code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import math
import re
from ml_pipeline_lch import isolate_categoricals, is_category
def view_dist(df, geo_columns = True, fig_size=(20,15), labels = None):
'''
Plot distributions of non-categorical columns in a given dataframe
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with numeric geographical information (ex: zipcodes)
labels: list of labels to apply to plot: title, xlable, ylabel, respectively
'''
non_categoricals = isolate_categoricals(df, categoricals_fcn = is_category, ret_categoricals = False, geos_indicator = geo_columns)
# non_categoricals = isolate_noncategoricals(df, ret_categoricals = False,
# geo_cols = geo_columns)
df[non_categoricals].hist(bins = 10, figsize=fig_size, color = 'blue')
if labels:
plt.title(labels[0])
plt.xlabel(labels[1])
plt.ylabel(labels[2])
plt.show()
def plot_value_counts(df, type_dict, category, norm = False, plot_kind = 'bar'):
for col in type_dict[category]:
plot_title = col + " distribution"
df[col].value_counts(normalize = norm).plot(kind=plot_kind)
plt.title(plot_title)
plt.xlabel(col)
plt.ylabel("frequency")
plt.show()
def check_corr(df, geo_columns = True, cat_cols = None):
'''
Display heatmap of linear correlation between non-categorical columns in a
given dataframe
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with numeric
geographical information (ex: zipcodes)
Attribution: Colormap Attribution: adapted from gradiated dataframe at
https://www.datascience.com/blog/introduction-to-correlation-learn-data-science-tutorials and correlation heatmap at https://stackoverflow.com/questions/29432629/correlation-matrix-using-pandas
'''
try:
non_categoricals = isolate_categoricals(df, categoricals_fcn = is_category,
ret_categoricals = False, geos_indicator = geo_columns)
fig, ax = plt.subplots(figsize=(12, 12))
corr = df[non_categoricals].corr(method="pearson")
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=plt.get_cmap("coolwarm"), square=True, ax=ax, annot=True)
ax.set_xticks(range(len(non_categoricals)))
ax.set_yticks(range(len(non_categoricals)))
ax.tick_params(direction='inout')
ax.set_xticklabels(non_categoricals, rotation=45, ha='right')
ax.set_yticklabels(non_categoricals, rotation=45, va='top')
plt.title('Feature Correlation')
plt.show()
except:
if cat_cols:
cat_df = df[df.columns]
for col in cat_cols:
cat_df[col] = cat_df[col].astype('categorical')
fig, ax = plt.subplots(figsize=(12, 12))
corr = cat_df.corr(method="pearson")
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=plt.get_cmap("coolwarm"), square=True, ax=ax, annot=True)
ax.set_xticks(range(len(cat_df.columns)))
ax.set_yticks(range(len(cat_df.columns)))
ax.tick_params(direction='inout')
ax.set_xticklabels(cat_df.columns, rotation=45, ha='right')
ax.set_yticklabels(cat_df.columns, rotation=45, va='top')
plt.title('Feature Correlation')
plt.show()
def discretize_cols(df, num_bins, geo_columns=True, specific_cols = False, split = False):
'''
Add columns to discretize and classify non-categorical columns in a given
data frame
Inputs:
df: pandas dataframe
geo_columns: list of column names corresponding to columns with
numeric geographical information (ex: zipcodes)
num_bins: number of groups into which column values should be
discretized
'''
if specific_cols:
non_categoricals = specific_cols
else:
non_categoricals = isolate_categoricals(df,
categoricals_fcn = is_category, ret_categoricals = False,
geos_indicator = geo_columns)
for col in non_categoricals:
bin_col = col + "_bin"
if col == "age":
age_bins = math.ceil((df[col].max() - df[col].min()) / 10)
if split:
df[bin_col], train_bins = pd.cut(df[col], bins = age_bins,
right = False, precision=0, retbins=split)
else:
df[bin_col] = pd.cut(df[col], bins = age_bins, right = False,
precision=0, retbins=split)
else:
if split:
df[bin_col], train_bins = pd.cut(df[col], bins = num_bins,
precision=0, retbins=split)
else:
df[bin_col] = pd.cut(df[col], bins = num_bins, precision=0,
retbins=split)
if split:
return train_bins
def discretize_train_test(train_test_tuples, still_blanks):
for i, (train, test) in enumerate(train_test_tuples):
fill_cols = still_blanks[i]
for col in fill_cols:
grouped = col + '_bin'
train[grouped], train_bins = pd.cut(train[col], bins = 4, precision = 0, retbins = True)
test[grouped] = pd.cut(test[col], bins = train_bins, precision = 0)
def confirm_train_test_discretization(train_test_tuples, still_blanks):
for i, (train, test) in enumerate(train_test_tuples):
for col in still_blanks[i]:
grouped = col
grouped = col + '_bin'
print("set {} {} train: {}.".format(i, col, train[grouped].unique()))
print()
print("set {} {} test: {}.".format(i, col, test[grouped].unique()))
print()
def drop_tt_binned(train_test_tuples, to_drop):
'''
Drop columns from both train and test sets.
Inputs:
train_test_tuples: list of tupled dataframes
to_drop: list of columns to drop
'''
for train, test in train_test_tuples:
train.drop(to_drop, axis = 1, inplace = True)
test.drop(to_drop, axis = 1, inplace = True)
def create_binary_vars(df, cols_to_dummy, keyword_list):
'''
Create columns of binary values corresponding to values above zero for
selected columns in a given dataframe based on common keywords
Inputs:
df: pandas dataframe
cols_to_dummy: (list of strings) columns in data frame to be evaluated
into dummy variables
keyword_list: (list of strings) words or phrases included in columns
to be evaluated indicating a dummy variable should be created based
on its values
'''
keyword_string = ("|").join(keyword_list)
for col in cols_to_dummy:
colname_trunc = re.sub(keyword_string, '', col)
binary_col_name = 'tf_' + colname_trunc
df[binary_col_name] = df[col].apply(lambda x: x > 0)
def plot_corr(df, color_category, geo_columns=True):
'''
Observe distributions and correlations of features for non-categorical
Inputs:
df: pandas dataframe
categoricals_list: list of strings corresponding to categorical columns
(ex: zip codes)
'''
non_categoricals = isolate_categoricals(df, categoricals_fcn = is_category,
ret_categoricals = False, geos_indicator = geo_columns)
plot_list = non_categoricals + [color_category]
corr = sns.pairplot(df[plot_list], hue = color_category, palette = "Set2")
def plot_relationship(df, feature_x, xlabel,feature_y, ylabel, xlimit = None,
ylimit = None, color_cat = None, filter_col = None,
filter_criteria = None, filter_param = None,
filter_param2 = None):
'''
Plot two features in a given data frame against each other to view
relationship and outliers.
Attribution: adapted from https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf
'''
if filter_col and filter_criteria and filter_param:
if filter_criteria == 'geq':
use_df = df[df[filter_col] >= filter_param]
elif filter_criteria == 'gt':
use_df = df[df[filter_col] > filter_param]
elif filter_criteria == 'leq':
use_df = df[df[filter_col] <= filter_param]
elif filter_criteria == 'lt':
use_df = df[df[filter_col] < filter_param]
elif filter_criteria == 'eq':
use_df = df[df[filter_col] == filter_param]
elif filter_criteria == 'neq':
use_df = df[df[filter_col] != filter_param]
elif filter_criteria == 'between':
use_df = df[(df[filter_col] > filter_param) and (df[filter_col] < filter_param2)]
g = sns.lmplot(x = feature_x, y = feature_y, data = use_df, aspect = 3,
hue = color_cat)
g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)
plot_title = ylabel + " by " + xlabel
plt.title(plot_title)
plt.show(g)
else:
g = sns.lmplot(x = feature_x, y = feature_y, data = df, aspect = 3,
hue = color_cat)
g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)
plot_title = ylabel + " by " + xlabel
plt.title(plot_title)
plt.show(g)
def eval_ratios(df, include_cols, category_cols, method = "count",
pct = False):
'''
Evaluate specific features via grouping on one or more category
Inputs:
df: (dataframe) pandas dataframe
include_cols: (list of strings) column names to be aggregated or
grouped
category_cols: (list of strings) column name(s) for variable(s) used
for grouping
method: (string) groupby aggregation method for column values
Output:
ratio_df: pandas data frame of grouped data
'''
if method == "count":
ratio_df = df[include_cols].groupby(category_cols).count()
if pct:
single_col = include_cols[-1] + " Percentage"
ratio_df[single_col] = ((df[include_cols].groupby(category_cols).count() /
df[include_cols].groupby(category_cols).count().sum()) * 100)
elif method == "sum":
ratio_df = df[include_cols].groupby(category_cols).sum()
if pct:
single_col = include_cols[-1] + " Percentage"
ratio_df[single_col] = ((df[include_cols].groupby(category_cols).sum() /
df[include_cols].groupby(category_cols).sum().sum()) * 100)
return ratio_df
def feature_by_geo(df, geo, expl_var, num_var, method = "median"):
'''
Evaluate specific features by geography (ex: zip code)
Inputs:
df: (dataframe) pandas dataframe
geo: (string) column name corresponding to geography used for grouping
expl_var: (string) column name for exploratory variable used for
grouping
num_var: (string) column name for numeric variable/ feature to be
aggregated
method: (string) groupby aggregation method for column values
Output:
geo_features: pandas data frame of grouped data
'''
df_geo = df[(df[geo] != 0)]
groupby_list = [geo] + expl_var
if method == "median":
geo_features = df_geo.groupby(groupby_list)[num_var].median().unstack(level = 1)
if method == "count":
geo_features = df_geo.groupby(groupby_list)[num_var].count().unstack(level = 1)
geo_features.fillna(value = "", inplace = True)
return geo_features
def plot_top_distros(train_test_tuples, var_dict, set_num):
for i, col in enumerate(var_dict['tops']):
train, test = train_test_tuples[set_num]
plot_title = "Projects by {} for Training Set {}".format(col, set_num)
train[col].value_counts().sort_index().plot(kind='bar', title = plot_title)
plt.show()
| [
"seaborn.lmplot",
"ml_pipeline_lch.isolate_categoricals",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.cut",
"matplotlib.pyplot.subplots",
"re.sub",
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"numpy.zeros_like",
"seaborn.pairplot",
"matplotlib.pyplot.show"
] | [((613, 724), 'ml_pipeline_lch.isolate_categoricals', 'isolate_categoricals', (['df'], {'categoricals_fcn': 'is_category', 'ret_categoricals': '(False)', 'geos_indicator': 'geo_columns'}), '(df, categoricals_fcn=is_category, ret_categoricals=\n False, geos_indicator=geo_columns)\n', (633, 724), False, 'from ml_pipeline_lch import isolate_categoricals, is_category\n'), ((1063, 1073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1071, 1073), True, 'from matplotlib import pyplot as plt\n'), ((7487, 7598), 'ml_pipeline_lch.isolate_categoricals', 'isolate_categoricals', (['df'], {'categoricals_fcn': 'is_category', 'ret_categoricals': '(False)', 'geos_indicator': 'geo_columns'}), '(df, categoricals_fcn=is_category, ret_categoricals=\n False, geos_indicator=geo_columns)\n', (7507, 7598), False, 'from ml_pipeline_lch import isolate_categoricals, is_category\n'), ((7676, 7739), 'seaborn.pairplot', 'sns.pairplot', (['df[plot_list]'], {'hue': 'color_category', 'palette': '"""Set2"""'}), "(df[plot_list], hue=color_category, palette='Set2')\n", (7688, 7739), True, 'import seaborn as sns\n'), ((978, 998), 'matplotlib.pyplot.title', 'plt.title', (['labels[0]'], {}), '(labels[0])\n', (987, 998), True, 'from matplotlib import pyplot as plt\n'), ((1007, 1028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['labels[1]'], {}), '(labels[1])\n', (1017, 1028), True, 'from matplotlib import pyplot as plt\n'), ((1037, 1058), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['labels[2]'], {}), '(labels[2])\n', (1047, 1058), True, 'from matplotlib import pyplot as plt\n'), ((1312, 1333), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (1321, 1333), True, 'from matplotlib import pyplot as plt\n'), ((1342, 1357), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col'], {}), '(col)\n', (1352, 1357), True, 'from matplotlib import pyplot as plt\n'), ((1366, 1389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (1376, 1389), True, 'from matplotlib import pyplot as plt\n'), ((1398, 1408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1406, 1408), True, 'from matplotlib import pyplot as plt\n'), ((2068, 2179), 'ml_pipeline_lch.isolate_categoricals', 'isolate_categoricals', (['df'], {'categoricals_fcn': 'is_category', 'ret_categoricals': '(False)', 'geos_indicator': 'geo_columns'}), '(df, categoricals_fcn=is_category, ret_categoricals=\n False, geos_indicator=geo_columns)\n', (2088, 2179), False, 'from ml_pipeline_lch import isolate_categoricals, is_category\n'), ((2213, 2243), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (2225, 2243), True, 'from matplotlib import pyplot as plt\n'), ((2756, 2788), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Correlation"""'], {}), "('Feature Correlation')\n", (2765, 2788), True, 'from matplotlib import pyplot as plt\n'), ((2797, 2807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2805, 2807), True, 'from matplotlib import pyplot as plt\n'), ((4207, 4318), 'ml_pipeline_lch.isolate_categoricals', 'isolate_categoricals', (['df'], {'categoricals_fcn': 'is_category', 'ret_categoricals': '(False)', 'geos_indicator': 'geo_columns'}), '(df, categoricals_fcn=is_category, ret_categoricals=\n False, geos_indicator=geo_columns)\n', (4227, 4318), False, 'from ml_pipeline_lch import isolate_categoricals, is_category\n'), ((7024, 7055), 're.sub', 're.sub', (['keyword_string', '""""""', 'col'], {}), "(keyword_string, '', col)\n", (7030, 7055), False, 'import re\n'), ((9032, 9106), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'feature_x', 'y': 'feature_y', 'data': 'use_df', 'aspect': '(3)', 'hue': 'color_cat'}), '(x=feature_x, y=feature_y, data=use_df, aspect=3, hue=color_cat)\n', (9042, 9106), True, 'import seaborn as sns\n'), ((9278, 9299), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (9287, 9299), True, 'from matplotlib import pyplot as plt\n'), ((9308, 9319), 'matplotlib.pyplot.show', 'plt.show', (['g'], {}), '(g)\n', (9316, 9319), True, 'from matplotlib import pyplot as plt\n'), ((9347, 9417), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'feature_x', 'y': 'feature_y', 'data': 'df', 'aspect': '(3)', 'hue': 'color_cat'}), '(x=feature_x, y=feature_y, data=df, aspect=3, hue=color_cat)\n', (9357, 9417), True, 'import seaborn as sns\n'), ((9589, 9610), 'matplotlib.pyplot.title', 'plt.title', (['plot_title'], {}), '(plot_title)\n', (9598, 9610), True, 'from matplotlib import pyplot as plt\n'), ((9619, 9630), 'matplotlib.pyplot.show', 'plt.show', (['g'], {}), '(g)\n', (9627, 9630), True, 'from matplotlib import pyplot as plt\n'), ((12215, 12225), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12223, 12225), True, 'from matplotlib import pyplot as plt\n'), ((5412, 5465), 'pandas.cut', 'pd.cut', (['train[col]'], {'bins': '(4)', 'precision': '(0)', 'retbins': '(True)'}), '(train[col], bins=4, precision=0, retbins=True)\n', (5418, 5465), True, 'import pandas as pd\n'), ((5500, 5547), 'pandas.cut', 'pd.cut', (['test[col]'], {'bins': 'train_bins', 'precision': '(0)'}), '(test[col], bins=train_bins, precision=0)\n', (5506, 5547), True, 'import pandas as pd\n'), ((2334, 2368), 'numpy.zeros_like', 'np.zeros_like', (['corr'], {'dtype': 'np.bool'}), '(corr, dtype=np.bool)\n', (2347, 2368), True, 'import numpy as np\n'), ((2396, 2420), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (2408, 2420), True, 'from matplotlib import pyplot as plt\n'), ((3015, 3045), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (3027, 3045), True, 'from matplotlib import pyplot as plt\n'), ((3576, 3608), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Correlation"""'], {}), "('Feature Correlation')\n", (3585, 3608), True, 'from matplotlib import pyplot as plt\n'), ((3621, 3631), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3629, 3631), True, 'from matplotlib import pyplot as plt\n'), ((4575, 4646), 'pandas.cut', 'pd.cut', (['df[col]'], {'bins': 'age_bins', 'right': '(False)', 'precision': '(0)', 'retbins': 'split'}), '(df[col], bins=age_bins, right=False, precision=0, retbins=split)\n', (4581, 4646), True, 'import pandas as pd\n'), ((4720, 4791), 'pandas.cut', 'pd.cut', (['df[col]'], {'bins': 'age_bins', 'right': '(False)', 'precision': '(0)', 'retbins': 'split'}), '(df[col], bins=age_bins, right=False, precision=0, retbins=split)\n', (4726, 4791), True, 'import pandas as pd\n'), ((4895, 4953), 'pandas.cut', 'pd.cut', (['df[col]'], {'bins': 'num_bins', 'precision': '(0)', 'retbins': 'split'}), '(df[col], bins=num_bins, precision=0, retbins=split)\n', (4901, 4953), True, 'import pandas as pd\n'), ((5025, 5083), 'pandas.cut', 'pd.cut', (['df[col]'], {'bins': 'num_bins', 'precision': '(0)', 'retbins': 'split'}), '(df[col], bins=num_bins, precision=0, retbins=split)\n', (5031, 5083), True, 'import pandas as pd\n'), ((3130, 3164), 'numpy.zeros_like', 'np.zeros_like', (['corr'], {'dtype': 'np.bool'}), '(corr, dtype=np.bool)\n', (3143, 3164), True, 'import numpy as np\n'), ((3196, 3220), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (3208, 3220), True, 'from matplotlib import pyplot as plt\n')] |
from typing import Dict, List, Union
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import dill
from pathlib import Path
from pysentimiento import create_analyzer
from lime.lime_text import LimeTextExplainer
from pysentimiento.analyzer import AnalyzerOutput
def sort_sentiment(res: AnalyzerOutput) -> np.array:
vals = [res.probas[k] for k in sentiment.id2label.values()]
return np.array(vals).reshape(1, -1)
def list_to_arr(result: List[AnalyzerOutput]) -> np.ndarray:
return np.vstack([sort_sentiment(out) for out in result])
def format_output(result: Union[List[AnalyzerOutput], AnalyzerOutput]) -> np.ndarray:
try:
return sort_sentiment(result)
except AttributeError:
return list_to_arr(result)
def dict_to_arr(dct: dict) -> np.ndarray:
n_feats = len(dct.values())
return np.array(list(dct.values())).reshape((-1, n_feats))
def predict_pos_proba(sentence: str) -> np.ndarray:
pred = sentiment.predict(sentence)
return format_output(pred)
sentiment = create_analyzer("sentiment", lang="en")
sentence = ["I'm tweeting and I'm happy!", "I'm sad"]
output = sentiment.predict(sentence)
predict_pos_proba(sentence)
labels = list(sentiment.id2label.values())
list_to_arr(output)
explainer = LimeTextExplainer(class_names=labels)
explains = explainer.explain_instance(sentence[0], predict_pos_proba, num_features=3)
# Test on real data
test_dat = pd.read_csv(
Path("./output/clean_tweets.csv"),
header=0,
skiprows=lambda i: i > 0 and random.random() > 0.01,
)
testy = test_dat.sample(1)
sentence = testy["cleantext"].tolist()[0]
top_label = np.argmax(predict_pos_proba(sentence))
explains = explainer.explain_instance(
sentence, predict_pos_proba, num_features=5, labels=[top_label]
)
explains.as_list()
fig = explains.as_pyplot_figure()
plt.show()
| [
"lime.lime_text.LimeTextExplainer",
"pathlib.Path",
"numpy.array",
"pysentimiento.create_analyzer",
"random.random",
"matplotlib.pyplot.show"
] | [((1055, 1094), 'pysentimiento.create_analyzer', 'create_analyzer', (['"""sentiment"""'], {'lang': '"""en"""'}), "('sentiment', lang='en')\n", (1070, 1094), False, 'from pysentimiento import create_analyzer\n'), ((1292, 1329), 'lime.lime_text.LimeTextExplainer', 'LimeTextExplainer', ([], {'class_names': 'labels'}), '(class_names=labels)\n', (1309, 1329), False, 'from lime.lime_text import LimeTextExplainer\n'), ((1861, 1871), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1869, 1871), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1499), 'pathlib.Path', 'Path', (['"""./output/clean_tweets.csv"""'], {}), "('./output/clean_tweets.csv')\n", (1470, 1499), False, 'from pathlib import Path\n'), ((426, 440), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (434, 440), True, 'import numpy as np\n'), ((1548, 1563), 'random.random', 'random.random', ([], {}), '()\n', (1561, 1563), False, 'import random\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to plain array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.platform import tf_logging as logging
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def fit_loop(model,
inputs,
targets,
sample_weights=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Abstract fit function for arrays of data.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_inputs: List of input arrays.
val_targets: List of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
callback_metrics: List of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
model._make_train_function()
f = model.train_function
sample_weights = sample_weights or []
val_sample_weights = val_sample_weights or []
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [1]
if val_inputs:
val_ins = val_inputs + val_targets + val_sample_weights + [1]
else:
ins = inputs + targets + sample_weights
if val_inputs:
val_ins = val_inputs + val_targets + val_sample_weights
if not val_inputs:
val_ins = []
do_validation = False
if val_inputs:
do_validation = True
if (steps_per_epoch is None and verbose and inputs and
hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` '
'when doing step-wise '
'training, i.e. `steps_per_epoch` '
'must be set.')
out_labels = model.metrics_names
if do_validation:
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
if callbacks is not None and any(
[isinstance(callback, cbks.TensorBoard) for callback in callbacks]):
# need to create the test_function before start of the first epoch
# because TensorBoard callback on_epoch_begin adds summary to the
# list of fetches of the test_function
model._make_test_function()
else:
callback_metrics = copy.copy(out_labels)
num_train_samples = training_utils.check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
model.history = cbks.History()
all_callbacks = [cbks.BaseLogger(
stateful_metrics=model.stateful_metric_names)]
if verbose:
if steps_per_epoch is not None:
count_mode = 'steps'
else:
count_mode = 'samples'
all_callbacks.append(
cbks.ProgbarLogger(
count_mode, stateful_metrics=model.stateful_metric_names))
all_callbacks += (callbacks or []) + [model.history]
callbacks = cbks.CallbackList(all_callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(model, 'callback_model') and model.callback_model:
callback_model = model.callback_model
else:
callback_model = model
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
for epoch in range(initial_epoch, epochs):
# Reset stateful metrics
for m in model.stateful_metric_functions:
m.reset_states()
# Update callbacks
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
# Step-wise fit loop.
for step_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = step_index
batch_logs['size'] = 1
callbacks.on_batch_begin(step_index, batch_logs)
try:
outs = f(ins)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
else:
# Sample-wise fit loop.
if shuffle == 'batch':
index_array = training_utils.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return model.history
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
model._make_predict_function()
f = model.predict_function
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
num_samples = training_utils.check_num_samples(
inputs, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
indices_for_conversion_to_dense = []
for i in range(len(model._feed_inputs)):
if (issparse is not None and issparse(inputs[i]) and
not K.is_sparse(model._feed_inputs[i])):
indices_for_conversion_to_dense.append(i)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def test_loop(model, inputs, targets,
sample_weights=None,
batch_size=None,
verbose=0,
steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
model._make_test_function()
f = model.test_function
sample_weights = sample_weights or []
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [0]
else:
ins = inputs + targets + sample_weights
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names
]
else:
stateful_metric_indices = []
num_samples = training_utils.check_num_samples(
ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
if steps is not None:
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= steps
else:
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
| [
"tensorflow.python.keras.engine.training_utils.check_num_samples",
"tensorflow.python.keras.callbacks.BaseLogger",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.keras.engine.training_utils.batch_shuffle",
"tensorflow.python.keras.utils.generic_utils.make_batches",
"tensorflow.python.kera... | [((5206, 5295), 'tensorflow.python.keras.engine.training_utils.check_num_samples', 'training_utils.check_num_samples', (['ins', 'batch_size', 'steps_per_epoch', '"""steps_per_epoch"""'], {}), "(ins, batch_size, steps_per_epoch,\n 'steps_per_epoch')\n", (5238, 5295), False, 'from tensorflow.python.keras.engine import training_utils\n'), ((5401, 5415), 'tensorflow.python.keras.callbacks.History', 'cbks.History', ([], {}), '()\n', (5413, 5415), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((5815, 5847), 'tensorflow.python.keras.callbacks.CallbackList', 'cbks.CallbackList', (['all_callbacks'], {}), '(all_callbacks)\n', (5832, 5847), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((11350, 11418), 'tensorflow.python.keras.engine.training_utils.check_num_samples', 'training_utils.check_num_samples', (['inputs', 'batch_size', 'steps', '"""steps"""'], {}), "(inputs, batch_size, steps, 'steps')\n", (11382, 11418), False, 'from tensorflow.python.keras.engine import training_utils\n'), ((15316, 15381), 'tensorflow.python.keras.engine.training_utils.check_num_samples', 'training_utils.check_num_samples', (['ins', 'batch_size', 'steps', '"""steps"""'], {}), "(ins, batch_size, steps, 'steps')\n", (15348, 15381), False, 'from tensorflow.python.keras.engine import training_utils\n'), ((5161, 5182), 'copy.copy', 'copy.copy', (['out_labels'], {}), '(out_labels)\n', (5170, 5182), False, 'import copy\n'), ((5353, 5381), 'numpy.arange', 'np.arange', (['num_train_samples'], {}), '(num_train_samples)\n', (5362, 5381), True, 'import numpy as np\n'), ((5435, 5496), 'tensorflow.python.keras.callbacks.BaseLogger', 'cbks.BaseLogger', ([], {'stateful_metrics': 'model.stateful_metric_names'}), '(stateful_metrics=model.stateful_metric_names)\n', (5450, 5496), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((12769, 12806), 'tensorflow.python.keras.utils.generic_utils.make_batches', 'make_batches', (['num_samples', 'batch_size'], {}), '(num_samples, batch_size)\n', (12781, 12806), False, 'from tensorflow.python.keras.utils.generic_utils import make_batches\n'), ((12825, 12847), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (12834, 12847), True, 'import numpy as np\n'), ((16546, 16583), 'tensorflow.python.keras.utils.generic_utils.make_batches', 'make_batches', (['num_samples', 'batch_size'], {}), '(num_samples, batch_size)\n', (16558, 16583), False, 'from tensorflow.python.keras.utils.generic_utils import make_batches\n'), ((16602, 16624), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (16611, 16624), True, 'import numpy as np\n'), ((4720, 4741), 'copy.copy', 'copy.copy', (['out_labels'], {}), '(out_labels)\n', (4729, 4741), False, 'import copy\n'), ((5655, 5731), 'tensorflow.python.keras.callbacks.ProgbarLogger', 'cbks.ProgbarLogger', (['count_mode'], {'stateful_metrics': 'model.stateful_metric_names'}), '(count_mode, stateful_metrics=model.stateful_metric_names)\n', (5673, 5731), True, 'from tensorflow.python.keras import callbacks as cbks\n'), ((6796, 6812), 'scipy.sparse.issparse', 'issparse', (['ins[i]'], {}), '(ins[i])\n', (6804, 6812), False, 'from scipy.sparse import issparse\n'), ((8703, 8746), 'tensorflow.python.keras.utils.generic_utils.make_batches', 'make_batches', (['num_train_samples', 'batch_size'], {}), '(num_train_samples, batch_size)\n', (8715, 8746), False, 'from tensorflow.python.keras.utils.generic_utils import make_batches\n'), ((11487, 11508), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'Progbar', ([], {'target': 'steps'}), '(target=steps)\n', (11494, 11508), False, 'from tensorflow.python.keras.utils.generic_utils import Progbar\n'), ((11535, 11562), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'Progbar', ([], {'target': 'num_samples'}), '(target=num_samples)\n', (11542, 11562), False, 'from tensorflow.python.keras.utils.generic_utils import Progbar\n'), ((11679, 11698), 'scipy.sparse.issparse', 'issparse', (['inputs[i]'], {}), '(inputs[i])\n', (11687, 11698), False, 'from scipy.sparse import issparse\n'), ((12531, 12577), 'numpy.concatenate', 'np.concatenate', (['unconcatenated_outs[0]'], {'axis': '(0)'}), '(unconcatenated_outs[0], axis=0)\n', (12545, 12577), True, 'import numpy as np\n'), ((12599, 12645), 'numpy.concatenate', 'np.concatenate', (['unconcatenated_outs[i]'], {'axis': '(0)'}), '(unconcatenated_outs[i], axis=0)\n', (12613, 12645), True, 'import numpy as np\n'), ((15462, 15483), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'Progbar', ([], {'target': 'steps'}), '(target=steps)\n', (15469, 15483), False, 'from tensorflow.python.keras.utils.generic_utils import Progbar\n'), ((15510, 15537), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'Progbar', ([], {'target': 'num_samples'}), '(target=num_samples)\n', (15517, 15537), False, 'from tensorflow.python.keras.utils.generic_utils import Progbar\n'), ((15797, 15813), 'scipy.sparse.issparse', 'issparse', (['ins[i]'], {}), '(ins[i])\n', (15805, 15813), False, 'from scipy.sparse import issparse\n'), ((3706, 3724), 'tensorflow.python.keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (3722, 3724), True, 'from tensorflow.python.keras import backend as K\n'), ((6821, 6841), 'tensorflow.python.keras.backend.is_sparse', 'K.is_sparse', (['feed[i]'], {}), '(feed[i])\n', (6832, 6841), True, 'from tensorflow.python.keras import backend as K\n'), ((8573, 8626), 'tensorflow.python.keras.engine.training_utils.batch_shuffle', 'training_utils.batch_shuffle', (['index_array', 'batch_size'], {}), '(index_array, batch_size)\n', (8601, 8626), False, 'from tensorflow.python.keras.engine import training_utils\n'), ((11259, 11277), 'tensorflow.python.keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (11275, 11277), True, 'from tensorflow.python.keras import backend as K\n'), ((11715, 11749), 'tensorflow.python.keras.backend.is_sparse', 'K.is_sparse', (['model._feed_inputs[i]'], {}), '(model._feed_inputs[i])\n', (11726, 11749), True, 'from tensorflow.python.keras import backend as K\n'), ((13159, 13187), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins', 'batch_ids'], {}), '(ins, batch_ids)\n', (13171, 13187), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n'), ((14882, 14900), 'tensorflow.python.keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (14898, 14900), True, 'from tensorflow.python.keras import backend as K\n'), ((15822, 15842), 'tensorflow.python.keras.backend.is_sparse', 'K.is_sparse', (['feed[i]'], {}), '(feed[i])\n', (15833, 15842), True, 'from tensorflow.python.keras import backend as K\n'), ((16928, 16956), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins', 'batch_ids'], {}), '(ins, batch_ids)\n', (16940, 16956), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n'), ((8655, 8685), 'numpy.random.shuffle', 'np.random.shuffle', (['index_array'], {}), '(index_array)\n', (8672, 8685), True, 'import numpy as np\n'), ((13081, 13114), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins[:-1]', 'batch_ids'], {}), '(ins[:-1], batch_ids)\n', (13093, 13114), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n'), ((16850, 16883), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins[:-1]', 'batch_ids'], {}), '(ins[:-1], batch_ids)\n', (16862, 16883), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n'), ((7465, 7695), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (["(\n 'Your dataset iterator ran out of data; interrupting training. Make sure that your dataset can generate at least `steps_per_epoch * epochs` batches (in this case, %d batches).'\n % steps_per_epoch * epochs)"], {}), "(\n 'Your dataset iterator ran out of data; interrupting training. Make sure that your dataset can generate at least `steps_per_epoch * epochs` batches (in this case, %d batches).'\n % steps_per_epoch * epochs)\n", (7480, 7695), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((9088, 9116), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins', 'batch_ids'], {}), '(ins, batch_ids)\n', (9100, 9116), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n'), ((13576, 13614), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'batch_out.dtype'}), '(shape, dtype=batch_out.dtype)\n', (13584, 13614), True, 'import numpy as np\n'), ((9002, 9035), 'tensorflow.python.keras.utils.generic_utils.slice_arrays', 'slice_arrays', (['ins[:-1]', 'batch_ids'], {}), '(ins[:-1], batch_ids)\n', (9014, 9035), False, 'from tensorflow.python.keras.utils.generic_utils import slice_arrays\n')] |
from copy import deepcopy
import logging
import os
import pickle
from bids.layout import BIDSImageFile
from bids.layout.writing import build_path as bids_build_path
import nibabel as nib
import numpy as np
import pandas as pd
import pytest
from rtCommon.bidsCommon import (
BIDS_DIR_PATH_PATTERN,
BIDS_FILE_PATTERN,
PYBIDS_PSEUDO_ENTITIES,
BidsFileExtension,
getNiftiData,
metadataFromProtocolName,
)
from rtCommon.bidsIncremental import BidsIncremental
from rtCommon.bidsArchive import BidsArchive
from rtCommon.errors import MissingMetadataError
from tests.common import isValidBidsArchive
logger = logging.getLogger(__name__)
# Test that construction fails for image metadata missing required fields
def testInvalidConstruction(sample2DNifti1, samplePseudo2DNifti1,
sample4DNifti1, imageMetadata):
# Test empty image
with pytest.raises(TypeError):
BidsIncremental(image=None,
imageMetadata=imageMetadata)
# Test 2-D image
with pytest.raises(ValueError) as err:
BidsIncremental(image=sample2DNifti1,
imageMetadata=imageMetadata)
assert "Image must have at least 3 dimensions" in str(err.value)
# Test 2-D image masquerading as 4-D image
with pytest.raises(ValueError) as err:
BidsIncremental(image=samplePseudo2DNifti1,
imageMetadata=imageMetadata)
assert ("Image's 3rd (and any higher) dimensions are <= 1, which means "
"it is a 2D image; images must have at least 3 dimensions" in
str(err.value))
# Test incomplete metadata
protocolName = imageMetadata.pop("ProtocolName")
for key in BidsIncremental.REQUIRED_IMAGE_METADATA:
value = imageMetadata.pop(key)
assert not BidsIncremental.isCompleteImageMetadata(imageMetadata)
with pytest.raises(MissingMetadataError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata[key] = value
imageMetadata["ProtocolName"] = protocolName
# Test too-large repetition and echo times
for key in ["RepetitionTime", "EchoTime"]:
original = imageMetadata[key]
imageMetadata[key] = 10**6
with pytest.raises(ValueError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata[key] = original
# Test non-image object
with pytest.raises(TypeError) as err:
notImage = "definitely not an image"
BidsIncremental(image=notImage,
imageMetadata=imageMetadata)
assert ("Image must be one of [nib.Nifti1Image, nib.Nifti2Image, "
f"BIDSImageFile (got {type(notImage)})" in str(err.value))
# Test non-functional data
with pytest.raises(NotImplementedError) as err:
original = imageMetadata['datatype']
invalidType = 'anat'
imageMetadata['datatype'] = invalidType
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata['datatype'] = original
assert ("BIDS Incremental for BIDS datatypes other than 'func' is not "
f"yet implemented (got '{invalidType}')") in str(err.value)
# Test that valid arguments produce a BIDS incremental
def testValidConstruction(sample3DNifti1, sample3DNifti2,
sample4DNifti1, sampleNifti2, bidsArchive4D,
imageMetadata):
# 3-D should be promoted to 4-D
assert BidsIncremental(sample3DNifti1, imageMetadata) is not None
assert BidsIncremental(sample3DNifti2, imageMetadata) is not None
# Both Nifti1 and Nifti2 images should work
assert BidsIncremental(sample4DNifti1, imageMetadata) is not None
assert BidsIncremental(sampleNifti2, imageMetadata) is not None
# If the metadata provides a RepetitionTime or EchoTime that works without
# adjustment, the construction should still work
repetitionTimeKey = "RepetitionTime"
original = imageMetadata[repetitionTimeKey]
imageMetadata[repetitionTimeKey] = 1.5
assert BidsIncremental(sample4DNifti1, imageMetadata) is not None
imageMetadata[repetitionTimeKey] = original
# Passing a BIDSImageFile is also valid
image = bidsArchive4D.getImages()[0]
assert type(image) is BIDSImageFile
assert BidsIncremental(image, imageMetadata) is not None
# Test that metadata values are of the correct types, if required by BIDS
def testMetadataTypes(validBidsI):
typeDict = {"RepetitionTime": float, "EchoTime": float}
for field, typ in typeDict.items():
assert type(validBidsI.getMetadataField(field)) is typ
# Test that the provided image metadata dictionary takes precedence over the
# metadata parsed from the protocol name, if any
def testConstructionMetadataPrecedence(sample4DNifti1, imageMetadata):
assert imageMetadata.get('ProtocolName', None) is not None
metadata = metadataFromProtocolName(imageMetadata['ProtocolName'])
assert len(metadata) > 0
assert metadata.get('run', None) is not None
newRunNumber = int(metadata['run']) + 1
imageMetadata['run'] = newRunNumber
assert metadata['run'] != imageMetadata['run']
incremental = BidsIncremental(sample4DNifti1, imageMetadata)
assert incremental.getMetadataField('run') == newRunNumber
# Test that the string output of the BIDS-I is as expected
def testStringOutput(validBidsI):
imageShape = str(validBidsI.getImageDimensions())
keyCount = len(validBidsI._imgMetadata.keys())
version = validBidsI.version
assert str(validBidsI) == f"Image shape: {imageShape}; " \
f"Metadata Key Count: {keyCount}; " \
f"BIDS-I Version: {version}"
# Test that equality comparison is as expected
def testEquals(sample4DNifti1, sample3DNifti1, imageMetadata):
# Test images with different headers
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(sample3DNifti1, imageMetadata)
# Test images with the same header, but different data
newData = 2 * getNiftiData(sample4DNifti1)
reversedNifti1 = nib.Nifti1Image(newData, sample4DNifti1.affine,
header=sample4DNifti1.header)
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(reversedNifti1, imageMetadata)
# Test different image metadata
modifiedImageMetadata = deepcopy(imageMetadata)
modifiedImageMetadata["subject"] = "newSubject"
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(sample4DNifti1, modifiedImageMetadata)
# Test different dataset metadata
datasetMeta1 = {"Name": "Dataset_1", "BIDSVersion": "1.0"}
datasetMeta2 = {"Name": "Dataset_2", "BIDSVersion": "2.0"}
assert BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta1) != \
BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta2)
# Test different readme
incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)
incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)
readme1 = "README 1"
readme2 = "README 2"
incremental1.readme = readme1
incremental2.readme = readme2
assert incremental1 != incremental2
# Test different events file
incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)
incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)
events1 = {'onset': [1, 25, 50], 'duration': [10, 10, 10], 'response_time':
[15, 36, 70]}
events2 = {key: [v + 5 for v in events1[key]] for key in events1.keys()}
incremental1.events = pd.DataFrame(data=events1)
incremental2.events = pd.DataFrame(data=events2)
assert incremental1 != incremental2
# Test that image metadata dictionaries can be properly created by the class
def testImageMetadataDictCreation(imageMetadata):
createdDict = BidsIncremental.createImageMetadataDict(
subject=imageMetadata["subject"],
task=imageMetadata["task"],
suffix=imageMetadata["suffix"],
repetitionTime=imageMetadata["RepetitionTime"],
datatype='func')
for key in createdDict.keys():
assert createdDict.get(key) == imageMetadata.get(key)
# Ensure that the method is in sync with the required metadata
# Get all required fields as lowerCamelCase for passing as kwargs
requiredFieldsCamel = [(key[0].lower() + key[1:]) for key in
BidsIncremental.REQUIRED_IMAGE_METADATA]
dummyValue = 'n/a'
metadataDict = {key: dummyValue for key in requiredFieldsCamel}
createdDict = BidsIncremental.createImageMetadataDict(**metadataDict)
for field in BidsIncremental.REQUIRED_IMAGE_METADATA:
assert createdDict[field] == dummyValue
# Test that internal metadata dictionary is independent from the argument dict
def testMetadataDictionaryIndependence(sample4DNifti1, imageMetadata):
incremental = BidsIncremental(sample4DNifti1, imageMetadata)
key = 'subject'
assert incremental.getMetadataField(key) == imageMetadata[key]
old = incremental.getMetadataField(key)
imageMetadata[key] = 'a brand-new subject'
assert incremental.getMetadataField(key) == old
assert incremental.getMetadataField(key) != imageMetadata[key]
# Test that invalid dataset.json fields are rejected and valid ones are accepted
def testDatasetMetadata(sample4DNifti1, imageMetadata):
# Test invalid dataset metadata
with pytest.raises(MissingMetadataError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata,
datasetDescription={"random_field": "doesnt work"})
# Test valid dataset metadata
dataset_name = "Test dataset"
bidsInc = BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata,
datasetDescription={"Name": dataset_name,
"BIDSVersion": "1.0"})
assert bidsInc.getDatasetName() == dataset_name
# Test that extracting metadata from the BIDS-I using its provided API returns
# the correct values
def testMetadataOutput(validBidsI, imageMetadata):
with pytest.raises(ValueError):
validBidsI.getMetadataField("InvalidEntityName", strict=True)
with pytest.raises(KeyError):
validBidsI.getMetadataField("InvalidEntityName")
# Data type - always 'func' currently
assert validBidsI.getDatatype() == "func"
# Entities
for entity in ['subject', 'task']:
assert validBidsI.getMetadataField(entity) == imageMetadata[entity]
# Suffix
assert validBidsI.getSuffix() == imageMetadata["suffix"]
# Test setting BIDS-I metadata API works as expected
def testSetMetadata(validBidsI):
# Test non-official BIDS entity fails with strict
with pytest.raises(ValueError):
validBidsI.setMetadataField("nonentity", "value", strict=True)
# Non-official BIDS entity succeeds without strict
validBidsI.setMetadataField("nonentity", "value", strict=False)
assert validBidsI.getMetadataField("nonentity", strict=False) == "value"
validBidsI.removeMetadataField("nonentity", strict=False)
# None field is invalid
with pytest.raises(ValueError):
validBidsI.setMetadataField(None, "test")
entityName = "subject"
newValue = "newValue"
originalValue = validBidsI.getMetadataField(entityName)
validBidsI.setMetadataField(entityName, newValue)
assert validBidsI.getMetadataField(entityName) == newValue
validBidsI.setMetadataField(entityName, originalValue)
assert validBidsI.getMetadataField(entityName) == originalValue
# Test removing BIDS-I metadata API works as expected
def testRemoveMetadata(validBidsI):
# Fail for entities that don't exist
with pytest.raises(ValueError):
validBidsI.removeMetadataField("nonentity", strict=True)
# Fail for entities that are required to be in the dictionary
with pytest.raises(RuntimeError):
validBidsI.removeMetadataField("subject")
entityName = "ProtocolName"
originalValue = validBidsI.getMetadataField(entityName)
validBidsI.removeMetadataField(entityName)
with pytest.raises(KeyError):
validBidsI.getMetadataField(entityName) is None
validBidsI.setMetadataField(entityName, originalValue)
assert validBidsI.getMetadataField(entityName) == originalValue
# Test that the BIDS-I interface methods for extracting internal NIfTI data
# return the correct values
def testQueryNifti(validBidsI):
# Image data
queriedData = validBidsI.getImageData()
exactData = getNiftiData(validBidsI.image)
assert np.array_equal(queriedData, exactData), "{} elements not equal" \
.format(np.sum(np.where(queriedData != exactData)))
# Header Data
queriedHeader = validBidsI.getImageHeader()
exactHeader = validBidsI.image.header
# Compare full image header
assert queriedHeader.keys() == exactHeader.keys()
for (field, queryValue) in queriedHeader.items():
exactValue = exactHeader.get(field)
if queryValue.dtype.char == 'S':
assert queryValue == exactValue
else:
assert np.allclose(queryValue, exactValue, atol=0.0, equal_nan=True)
# Compare Header field: Dimensions
FIELD = "dim"
assert np.array_equal(queriedHeader.get(FIELD), exactHeader.get(FIELD))
# Test that constructing BIDS-compatible filenames from internal metadata
# returns the correct filenames
def testFilenameConstruction(validBidsI, imageMetadata):
"""
General format:
sub-<label>[_ses-<label>]_task-<label>[_acq-<label>] [_ce-<label>]
[_dir-<label>][_rec-<label>][_run-<index>]
[_echo-<index>]_<contrast_label >.ext
"""
baseFilename = bids_build_path(imageMetadata, BIDS_FILE_PATTERN)
assert baseFilename + ".nii" == \
validBidsI.makeBidsFileName(BidsFileExtension.IMAGE)
assert baseFilename + ".json" == \
validBidsI.makeBidsFileName(BidsFileExtension.METADATA)
# Test that the hypothetical path for the BIDS-I if it were in an archive is
# correct based on the metadata within it
def testArchivePathConstruction(validBidsI, imageMetadata):
assert validBidsI.getDataDirPath() == \
bids_build_path(imageMetadata, BIDS_DIR_PATH_PATTERN)
# Test that writing the BIDS-I to disk returns a properly formatted BIDS archive
# in the correct location with all the data in the BIDS-I
def testDiskOutput(validBidsI, tmpdir):
# Write the archive
datasetRoot = os.path.join(tmpdir, "bids-pytest-dataset")
validBidsI.writeToDisk(datasetRoot)
# Validate the output can be opened by BidsArchive and verified against the
# source BIDS-Incremental
archive = BidsArchive(datasetRoot)
archiveImage = archive.getImages()[0]
# Remove pseudo entities to avoid conflict with the validBidsI
metadata = archive.getSidecarMetadata(archiveImage, includeEntities=True)
for entity in PYBIDS_PSEUDO_ENTITIES:
metadata.pop(entity)
incrementalFromArchive = BidsIncremental(archiveImage, metadata)
assert incrementalFromArchive == validBidsI
assert isValidBidsArchive(archive.rootPath)
# Try only writing data
datasetRoot = os.path.join(tmpdir, "bids-pytest-dataset-2")
validBidsI.writeToDisk(datasetRoot, onlyData=True)
assert not os.path.exists(os.path.join(datasetRoot, "README"))
assert not os.path.exists(os.path.join(datasetRoot,
"dataset_description.json"))
# Test serialization results in equivalent BIDS-I object
def testSerialization(validBidsI, sample4DNifti1, imageMetadata, tmpdir):
# Copy the NIfTI source image to a different location
sourceFileName = 'test.nii'
sourceFilePath = os.path.join(tmpdir, sourceFileName)
nib.save(sample4DNifti1, sourceFilePath)
sourceNifti = nib.load(sourceFilePath)
incremental = BidsIncremental(sourceNifti, imageMetadata)
# validBidsI is derived from a file elsewhere on disk, so we can use it as a
# reference once the file 'incremental' is derived from is removed
# Transitive property gives us:
# IF incremental == validBidsI AND validBidsI == deserialized
# THEN incremental == deserialized
assert incremental == validBidsI
# Serialize the object
serialized = pickle.dumps(incremental)
del incremental
# Now remove image file so the deserialized object can't access it
os.remove(sourceFilePath)
# Deserialize the object
deserialized = pickle.loads(serialized)
# Compare equality
assert validBidsI == deserialized
# Check there's no file mapping
assert deserialized.image.file_map['image'].filename is None
| [
"logging.getLogger",
"nibabel.load",
"pickle.dumps",
"copy.deepcopy",
"pickle.loads",
"os.remove",
"rtCommon.bidsArchive.BidsArchive",
"tests.common.isValidBidsArchive",
"numpy.where",
"bids.layout.writing.build_path",
"pandas.DataFrame",
"numpy.allclose",
"rtCommon.bidsCommon.metadataFromPr... | [((628, 655), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (645, 655), False, 'import logging\n'), ((5059, 5114), 'rtCommon.bidsCommon.metadataFromProtocolName', 'metadataFromProtocolName', (["imageMetadata['ProtocolName']"], {}), "(imageMetadata['ProtocolName'])\n", (5083, 5114), False, 'from rtCommon.bidsCommon import BIDS_DIR_PATH_PATTERN, BIDS_FILE_PATTERN, PYBIDS_PSEUDO_ENTITIES, BidsFileExtension, getNiftiData, metadataFromProtocolName\n'), ((5348, 5394), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (5363, 5394), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6283, 6360), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['newData', 'sample4DNifti1.affine'], {'header': 'sample4DNifti1.header'}), '(newData, sample4DNifti1.affine, header=sample4DNifti1.header)\n', (6298, 6360), True, 'import nibabel as nib\n'), ((6581, 6604), 'copy.deepcopy', 'deepcopy', (['imageMetadata'], {}), '(imageMetadata)\n', (6589, 6604), False, 'from copy import deepcopy\n'), ((7148, 7194), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (7163, 7194), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((7214, 7260), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (7229, 7260), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((7473, 7519), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (7488, 7519), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((7539, 7585), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (7554, 7585), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((7800, 7826), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'events1'}), '(data=events1)\n', (7812, 7826), True, 'import pandas as pd\n'), ((7853, 7879), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'events2'}), '(data=events2)\n', (7865, 7879), True, 'import pandas as pd\n'), ((8067, 8273), 'rtCommon.bidsIncremental.BidsIncremental.createImageMetadataDict', 'BidsIncremental.createImageMetadataDict', ([], {'subject': "imageMetadata['subject']", 'task': "imageMetadata['task']", 'suffix': "imageMetadata['suffix']", 'repetitionTime': "imageMetadata['RepetitionTime']", 'datatype': '"""func"""'}), "(subject=imageMetadata['subject'],\n task=imageMetadata['task'], suffix=imageMetadata['suffix'],\n repetitionTime=imageMetadata['RepetitionTime'], datatype='func')\n", (8106, 8273), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((8785, 8840), 'rtCommon.bidsIncremental.BidsIncremental.createImageMetadataDict', 'BidsIncremental.createImageMetadataDict', ([], {}), '(**metadataDict)\n', (8824, 8840), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((9118, 9164), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (9133, 9164), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((9943, 10078), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample4DNifti1', 'imageMetadata': 'imageMetadata', 'datasetDescription': "{'Name': dataset_name, 'BIDSVersion': '1.0'}"}), "(image=sample4DNifti1, imageMetadata=imageMetadata,\n datasetDescription={'Name': dataset_name, 'BIDSVersion': '1.0'})\n", (9958, 10078), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((12830, 12860), 'rtCommon.bidsCommon.getNiftiData', 'getNiftiData', (['validBidsI.image'], {}), '(validBidsI.image)\n', (12842, 12860), False, 'from rtCommon.bidsCommon import BIDS_DIR_PATH_PATTERN, BIDS_FILE_PATTERN, PYBIDS_PSEUDO_ENTITIES, BidsFileExtension, getNiftiData, metadataFromProtocolName\n'), ((12872, 12910), 'numpy.array_equal', 'np.array_equal', (['queriedData', 'exactData'], {}), '(queriedData, exactData)\n', (12886, 12910), True, 'import numpy as np\n'), ((13994, 14043), 'bids.layout.writing.build_path', 'bids_build_path', (['imageMetadata', 'BIDS_FILE_PATTERN'], {}), '(imageMetadata, BIDS_FILE_PATTERN)\n', (14009, 14043), True, 'from bids.layout.writing import build_path as bids_build_path\n'), ((14757, 14800), 'os.path.join', 'os.path.join', (['tmpdir', '"""bids-pytest-dataset"""'], {}), "(tmpdir, 'bids-pytest-dataset')\n", (14769, 14800), False, 'import os\n'), ((14966, 14990), 'rtCommon.bidsArchive.BidsArchive', 'BidsArchive', (['datasetRoot'], {}), '(datasetRoot)\n', (14977, 14990), False, 'from rtCommon.bidsArchive import BidsArchive\n'), ((15280, 15319), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['archiveImage', 'metadata'], {}), '(archiveImage, metadata)\n', (15295, 15319), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((15380, 15416), 'tests.common.isValidBidsArchive', 'isValidBidsArchive', (['archive.rootPath'], {}), '(archive.rootPath)\n', (15398, 15416), False, 'from tests.common import isValidBidsArchive\n'), ((15464, 15509), 'os.path.join', 'os.path.join', (['tmpdir', '"""bids-pytest-dataset-2"""'], {}), "(tmpdir, 'bids-pytest-dataset-2')\n", (15476, 15509), False, 'import os\n'), ((16004, 16040), 'os.path.join', 'os.path.join', (['tmpdir', 'sourceFileName'], {}), '(tmpdir, sourceFileName)\n', (16016, 16040), False, 'import os\n'), ((16045, 16085), 'nibabel.save', 'nib.save', (['sample4DNifti1', 'sourceFilePath'], {}), '(sample4DNifti1, sourceFilePath)\n', (16053, 16085), True, 'import nibabel as nib\n'), ((16105, 16129), 'nibabel.load', 'nib.load', (['sourceFilePath'], {}), '(sourceFilePath)\n', (16113, 16129), True, 'import nibabel as nib\n'), ((16148, 16191), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sourceNifti', 'imageMetadata'], {}), '(sourceNifti, imageMetadata)\n', (16163, 16191), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((16568, 16593), 'pickle.dumps', 'pickle.dumps', (['incremental'], {}), '(incremental)\n', (16580, 16593), False, 'import pickle\n'), ((16690, 16715), 'os.remove', 'os.remove', (['sourceFilePath'], {}), '(sourceFilePath)\n', (16699, 16715), False, 'import os\n'), ((16765, 16789), 'pickle.loads', 'pickle.loads', (['serialized'], {}), '(serialized)\n', (16777, 16789), False, 'import pickle\n'), ((890, 914), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (903, 914), False, 'import pytest\n'), ((924, 980), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'None', 'imageMetadata': 'imageMetadata'}), '(image=None, imageMetadata=imageMetadata)\n', (939, 980), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((1036, 1061), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1049, 1061), False, 'import pytest\n'), ((1078, 1144), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample2DNifti1', 'imageMetadata': 'imageMetadata'}), '(image=sample2DNifti1, imageMetadata=imageMetadata)\n', (1093, 1144), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((1299, 1324), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1312, 1324), False, 'import pytest\n'), ((1341, 1413), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'samplePseudo2DNifti1', 'imageMetadata': 'imageMetadata'}), '(image=samplePseudo2DNifti1, imageMetadata=imageMetadata)\n', (1356, 1413), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((2519, 2543), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2532, 2543), False, 'import pytest\n'), ((2605, 2665), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'notImage', 'imageMetadata': 'imageMetadata'}), '(image=notImage, imageMetadata=imageMetadata)\n', (2620, 2665), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((2880, 2914), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2893, 2914), False, 'import pytest\n'), ((3053, 3119), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample4DNifti1', 'imageMetadata': 'imageMetadata'}), '(image=sample4DNifti1, imageMetadata=imageMetadata)\n', (3068, 3119), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((3621, 3667), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample3DNifti1', 'imageMetadata'], {}), '(sample3DNifti1, imageMetadata)\n', (3636, 3667), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((3691, 3737), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample3DNifti2', 'imageMetadata'], {}), '(sample3DNifti2, imageMetadata)\n', (3706, 3737), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((3810, 3856), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (3825, 3856), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((3880, 3924), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sampleNifti2', 'imageMetadata'], {}), '(sampleNifti2, imageMetadata)\n', (3895, 3924), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((4213, 4259), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (4228, 4259), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((4457, 4494), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['image', 'imageMetadata'], {}), '(image, imageMetadata)\n', (4472, 4494), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6045, 6091), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (6060, 6091), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6108, 6154), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample3DNifti1', 'imageMetadata'], {}), '(sample3DNifti1, imageMetadata)\n', (6123, 6154), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6233, 6261), 'rtCommon.bidsCommon.getNiftiData', 'getNiftiData', (['sample4DNifti1'], {}), '(sample4DNifti1)\n', (6245, 6261), False, 'from rtCommon.bidsCommon import BIDS_DIR_PATH_PATTERN, BIDS_FILE_PATTERN, PYBIDS_PSEUDO_ENTITIES, BidsFileExtension, getNiftiData, metadataFromProtocolName\n'), ((6409, 6455), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (6424, 6455), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6469, 6515), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['reversedNifti1', 'imageMetadata'], {}), '(reversedNifti1, imageMetadata)\n', (6484, 6515), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6668, 6714), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata'], {}), '(sample4DNifti1, imageMetadata)\n', (6683, 6714), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6731, 6785), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'modifiedImageMetadata'], {}), '(sample4DNifti1, modifiedImageMetadata)\n', (6746, 6785), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((6962, 7022), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata', 'datasetMeta1'], {}), '(sample4DNifti1, imageMetadata, datasetMeta1)\n', (6977, 7022), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((7039, 7099), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', (['sample4DNifti1', 'imageMetadata', 'datasetMeta2'], {}), '(sample4DNifti1, imageMetadata, datasetMeta2)\n', (7054, 7099), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((9648, 9683), 'pytest.raises', 'pytest.raises', (['MissingMetadataError'], {}), '(MissingMetadataError)\n', (9661, 9683), False, 'import pytest\n'), ((9693, 9815), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample4DNifti1', 'imageMetadata': 'imageMetadata', 'datasetDescription': "{'random_field': 'doesnt work'}"}), "(image=sample4DNifti1, imageMetadata=imageMetadata,\n datasetDescription={'random_field': 'doesnt work'})\n", (9708, 9815), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((10399, 10424), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10412, 10424), False, 'import pytest\n'), ((10505, 10528), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (10518, 10528), False, 'import pytest\n'), ((11031, 11056), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11044, 11056), False, 'import pytest\n'), ((11430, 11455), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11443, 11455), False, 'import pytest\n'), ((12009, 12034), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12022, 12034), False, 'import pytest\n'), ((12177, 12204), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (12190, 12204), False, 'import pytest\n'), ((12406, 12429), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (12419, 12429), False, 'import pytest\n'), ((14480, 14533), 'bids.layout.writing.build_path', 'bids_build_path', (['imageMetadata', 'BIDS_DIR_PATH_PATTERN'], {}), '(imageMetadata, BIDS_DIR_PATH_PATTERN)\n', (14495, 14533), True, 'from bids.layout.writing import build_path as bids_build_path\n'), ((1829, 1883), 'rtCommon.bidsIncremental.BidsIncremental.isCompleteImageMetadata', 'BidsIncremental.isCompleteImageMetadata', (['imageMetadata'], {}), '(imageMetadata)\n', (1868, 1883), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((1897, 1932), 'pytest.raises', 'pytest.raises', (['MissingMetadataError'], {}), '(MissingMetadataError)\n', (1910, 1932), False, 'import pytest\n'), ((1946, 2012), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample4DNifti1', 'imageMetadata': 'imageMetadata'}), '(image=sample4DNifti1, imageMetadata=imageMetadata)\n', (1961, 2012), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((2308, 2333), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2321, 2333), False, 'import pytest\n'), ((2347, 2413), 'rtCommon.bidsIncremental.BidsIncremental', 'BidsIncremental', ([], {'image': 'sample4DNifti1', 'imageMetadata': 'imageMetadata'}), '(image=sample4DNifti1, imageMetadata=imageMetadata)\n', (2362, 2413), False, 'from rtCommon.bidsIncremental import BidsIncremental\n'), ((12961, 12995), 'numpy.where', 'np.where', (['(queriedData != exactData)'], {}), '(queriedData != exactData)\n', (12969, 12995), True, 'import numpy as np\n'), ((13410, 13471), 'numpy.allclose', 'np.allclose', (['queryValue', 'exactValue'], {'atol': '(0.0)', 'equal_nan': '(True)'}), '(queryValue, exactValue, atol=0.0, equal_nan=True)\n', (13421, 13471), True, 'import numpy as np\n'), ((15595, 15630), 'os.path.join', 'os.path.join', (['datasetRoot', '"""README"""'], {}), "(datasetRoot, 'README')\n", (15607, 15630), False, 'import os\n'), ((15662, 15715), 'os.path.join', 'os.path.join', (['datasetRoot', '"""dataset_description.json"""'], {}), "(datasetRoot, 'dataset_description.json')\n", (15674, 15715), False, 'import os\n')] |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
def flip(imagelist, axis=1):
"""Randomly flip spatial dimensions
Args:
imagelist (np.ndarray or list or tuple): image(s) to be flipped
axis (int): axis along which to flip the images
Returns:
np.ndarray or list or tuple: same as imagelist but randomly flipped
along axis
"""
# Check if a single image or a list of images has been passed
was_singular = False
if isinstance(imagelist, np.ndarray):
imagelist = [imagelist]
was_singular = True
# With a probility of 0.5 flip the image(s) across `axis`
do_flip = np.random.random(1)
if do_flip > 0.5:
for i in range(len(imagelist)):
imagelist[i] = np.flip(imagelist[i], axis=axis)
if was_singular:
return imagelist[0]
return imagelist
def add_gaussian_offset(image, sigma=0.1):
"""
Add Gaussian offset to an image. Adds the offset to each channel
independently.
Args:
image (np.ndarray): image to add noise to
sigma (float): stddev of the Gaussian distribution to generate noise
from
Returns:
np.ndarray: same as image but with added offset to each channel
"""
offsets = np.random.normal(0, sigma, ([1] * (image.ndim - 1) + [image.shape[-1]]))
image += offsets
return image
def add_gaussian_noise(image, sigma=0.05):
"""
Add Gaussian noise to an image
Args:
image (np.ndarray): image to add noise to
sigma (float): stddev of the Gaussian distribution to generate noise
from
Returns:
np.ndarray: same as image but with added offset to each channel
"""
image += np.random.normal(0, sigma, image.shape)
return image
def elastic_transform(image, alpha, sigma):
"""
Elastic deformation of images as described in [1].
[1] Simard, Steinkraus and Platt, "Best Practices for Convolutional
Neural Networks applied to Visual Document Analysis", in Proc. of the
International Conference on Document Analysis and Recognition, 2003.
Based on gist https://gist.github.com/erniejunior/601cdf56d2b424757de5
Args:
image (np.ndarray): image to be deformed
alpha (list): scale of transformation for each dimension, where larger
values have more deformation
sigma (list): Gaussian window of deformation for each dimension, where
smaller values have more localised deformation
Returns:
np.ndarray: deformed image
"""
assert len(alpha) == len(sigma), \
"Dimensions of alpha and sigma are different"
channelbool = image.ndim - len(alpha)
out = np.zeros((len(alpha) + channelbool, ) + image.shape)
# Generate a Gaussian filter, leaving channel dimensions zeroes
for jj in range(len(alpha)):
array = (np.random.rand(*image.shape) * 2 - 1)
out[jj] = gaussian_filter(array, sigma[jj],
mode="constant", cval=0) * alpha[jj]
# Map mask to indices
shapes = list(map(lambda x: slice(0, x, None), image.shape))
grid = np.broadcast_arrays(*np.ogrid[shapes])
indices = list(map((lambda x: np.reshape(x, (-1, 1))), grid + np.array(out)))
# Transform image based on masked indices
transformed_image = map_coordinates(image, indices, order=0,
mode='reflect').reshape(image.shape)
return transformed_image
def extract_class_balanced_example_array(image,
label,
example_size=[1, 64, 64],
n_examples=1,
classes=2,
class_weights=None):
"""Extract training examples from an image (and corresponding label) subject
to class balancing. Returns an image example array and the
corresponding label array.
Args:
image (np.ndarray): image to extract class-balanced patches from
label (np.ndarray): labels to use for balancing the classes
example_size (list or tuple): shape of the patches to extract
n_examples (int): number of patches to extract in total
classes (int or list or tuple): number of classes or list of classes
to extract
Returns:
np.ndarray, np.ndarray: class-balanced patches extracted from full
images with the shape [batch, example_size..., image_channels]
"""
assert image.shape[:-1] == label.shape, 'Image and label shape must match'
assert image.ndim - 1 == len(example_size), \
'Example size doesnt fit image size'
assert all([i_s >= e_s for i_s, e_s in zip(image.shape, example_size)]), \
'Image must be larger than example shape'
rank = len(example_size)
if isinstance(classes, int):
classes = tuple(range(classes))
n_classes = len(classes)
assert n_examples >= n_classes, \
'n_examples need to be greater than n_classes'
if class_weights is None:
n_ex_per_class = np.ones(n_classes).astype(int) * int(np.round(n_examples / n_classes))
else:
assert len(class_weights) == n_classes, \
'Class_weights must match number of classes'
class_weights = np.array(class_weights)
n_ex_per_class = np.round((class_weights / class_weights.sum()) * n_examples).astype(int)
# Compute an example radius to define the region to extract around a
# center location
ex_rad = np.array(list(zip(np.floor(np.array(example_size) / 2.0),
np.ceil(np.array(example_size) / 2.0))),
dtype=np.int)
class_ex_images = []
class_ex_lbls = []
min_ratio = 1.
for c_idx, c in enumerate(classes):
# Get valid, random center locations belonging to that class
idx = np.argwhere(label == c)
ex_images = []
ex_lbls = []
if len(idx) == 0 or n_ex_per_class[c_idx] == 0:
class_ex_images.append([])
class_ex_lbls.append([])
continue
# Extract random locations
r_idx_idx = np.random.choice(len(idx),
size=min(n_ex_per_class[c_idx], len(idx)),
replace=False).astype(int)
r_idx = idx[r_idx_idx]
# Shift the random to valid locations if necessary
r_idx = np.array(
[np.array([max(min(r[dim], image.shape[dim] - ex_rad[dim][1]),
ex_rad[dim][0]) for dim in range(rank)])
for r in r_idx])
for i in range(len(r_idx)):
# Extract class-balanced examples from the original image
slicer = [slice(r_idx[i][dim] -
ex_rad[dim][0], r_idx[i][dim] +
ex_rad[dim][1]) for dim in range(rank)]
ex_image = image[slicer][np.newaxis, :]
ex_lbl = label[slicer][np.newaxis, :]
# Concatenate them and return the examples
ex_images = np.concatenate((ex_images, ex_image), axis=0) \
if (len(ex_images) != 0) else ex_image
ex_lbls = np.concatenate((ex_lbls, ex_lbl), axis=0) \
if (len(ex_lbls) != 0) else ex_lbl
class_ex_images.append(ex_images)
class_ex_lbls.append(ex_lbls)
ratio = n_ex_per_class[c_idx] / len(ex_images)
min_ratio = ratio if ratio < min_ratio else min_ratio
indices = np.floor(n_ex_per_class * min_ratio).astype(int)
ex_images = np.concatenate([cimage[:idxs] for cimage, idxs in zip(class_ex_images, indices)
if len(cimage) > 0], axis=0)
ex_lbls = np.concatenate([clbl[:idxs] for clbl, idxs in zip(class_ex_lbls, indices)
if len(clbl) > 0], axis=0)
return ex_images, ex_lbls
def extract_random_example_array(image_list,
example_size=[1, 64, 64],
n_examples=1):
"""Randomly extract training examples from image (and a corresponding label).
Returns an image example array and the corresponding label array.
Args:
image_list (np.ndarray or list or tuple): image(s) to extract random
patches from
example_size (list or tuple): shape of the patches to extract
n_examples (int): number of patches to extract in total
Returns:
np.ndarray, np.ndarray: class-balanced patches extracted from full
images with the shape [batch, example_size..., image_channels]
"""
assert n_examples > 0
was_singular = False
if isinstance(image_list, np.ndarray):
image_list = [image_list]
was_singular = True
assert all([i_s >= e_s for i_s, e_s in zip(image_list[0].shape, example_size)]), \
'Image must be bigger than example shape'
assert (image_list[0].ndim - 1 == len(example_size) or
image_list[0].ndim == len(example_size)), \
'Example size doesnt fit image size'
for i in image_list:
if len(image_list) > 1:
assert (i.ndim - 1 == image_list[0].ndim or
i.ndim == image_list[0].ndim or
i.ndim + 1 == image_list[0].ndim), \
'Example size doesnt fit image size'
assert all([i0_s == i_s for i0_s, i_s in zip(image_list[0].shape, i.shape)]), \
'Image shapes must match'
rank = len(example_size)
# Extract random examples from image and label
valid_loc_range = [image_list[0].shape[i] - example_size[i] for i in range(rank)]
rnd_loc = [np.random.randint(valid_loc_range[dim], size=n_examples)
if valid_loc_range[dim] > 0
else np.zeros(n_examples, dtype=int) for dim in range(rank)]
examples = [[]] * len(image_list)
for i in range(n_examples):
slicer = [slice(rnd_loc[dim][i], rnd_loc[dim][i] + example_size[dim])
for dim in range(rank)]
for j in range(len(image_list)):
ex_image = image_list[j][slicer][np.newaxis]
# Concatenate and return the examples
examples[j] = np.concatenate((examples[j], ex_image), axis=0) \
if (len(examples[j]) != 0) else ex_image
if was_singular:
return examples[0]
return examples
| [
"numpy.random.normal",
"numpy.flip",
"numpy.reshape",
"numpy.random.rand",
"scipy.ndimage.filters.gaussian_filter",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.random.random",
"numpy.ones",
"numpy.floor",
"numpy.array",
"numpy.random.randint",
"numpy.argwhere",
"numpy.zeros",
"nu... | [((877, 896), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (893, 896), True, 'import numpy as np\n'), ((1494, 1564), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', '([1] * (image.ndim - 1) + [image.shape[-1]])'], {}), '(0, sigma, [1] * (image.ndim - 1) + [image.shape[-1]])\n', (1510, 1564), True, 'import numpy as np\n'), ((1956, 1995), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'image.shape'], {}), '(0, sigma, image.shape)\n', (1972, 1995), True, 'import numpy as np\n'), ((3384, 3422), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*np.ogrid[shapes]'], {}), '(*np.ogrid[shapes])\n', (3403, 3422), True, 'import numpy as np\n'), ((5596, 5619), 'numpy.array', 'np.array', (['class_weights'], {}), '(class_weights)\n', (5604, 5619), True, 'import numpy as np\n'), ((6184, 6207), 'numpy.argwhere', 'np.argwhere', (['(label == c)'], {}), '(label == c)\n', (6195, 6207), True, 'import numpy as np\n'), ((986, 1018), 'numpy.flip', 'np.flip', (['imagelist[i]'], {'axis': 'axis'}), '(imagelist[i], axis=axis)\n', (993, 1018), True, 'import numpy as np\n'), ((3176, 3234), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['array', 'sigma[jj]'], {'mode': '"""constant"""', 'cval': '(0)'}), "(array, sigma[jj], mode='constant', cval=0)\n", (3191, 3234), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((3576, 3632), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['image', 'indices'], {'order': '(0)', 'mode': '"""reflect"""'}), "(image, indices, order=0, mode='reflect')\n", (3591, 3632), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((7821, 7857), 'numpy.floor', 'np.floor', (['(n_ex_per_class * min_ratio)'], {}), '(n_ex_per_class * min_ratio)\n', (7829, 7857), True, 'import numpy as np\n'), ((9980, 10036), 'numpy.random.randint', 'np.random.randint', (['valid_loc_range[dim]'], {'size': 'n_examples'}), '(valid_loc_range[dim], size=n_examples)\n', (9997, 10036), True, 'import numpy as np\n'), ((10100, 10131), 'numpy.zeros', 'np.zeros', (['n_examples'], {'dtype': 'int'}), '(n_examples, dtype=int)\n', (10108, 10131), True, 'import numpy as np\n'), ((3120, 3148), 'numpy.random.rand', 'np.random.rand', (['*image.shape'], {}), '(*image.shape)\n', (3134, 3148), True, 'import numpy as np\n'), ((3457, 3479), 'numpy.reshape', 'np.reshape', (['x', '(-1, 1)'], {}), '(x, (-1, 1))\n', (3467, 3479), True, 'import numpy as np\n'), ((3489, 3502), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3497, 3502), True, 'import numpy as np\n'), ((5421, 5453), 'numpy.round', 'np.round', (['(n_examples / n_classes)'], {}), '(n_examples / n_classes)\n', (5429, 5453), True, 'import numpy as np\n'), ((7387, 7432), 'numpy.concatenate', 'np.concatenate', (['(ex_images, ex_image)'], {'axis': '(0)'}), '((ex_images, ex_image), axis=0)\n', (7401, 7432), True, 'import numpy as np\n'), ((7512, 7553), 'numpy.concatenate', 'np.concatenate', (['(ex_lbls, ex_lbl)'], {'axis': '(0)'}), '((ex_lbls, ex_lbl), axis=0)\n', (7526, 7553), True, 'import numpy as np\n'), ((10522, 10569), 'numpy.concatenate', 'np.concatenate', (['(examples[j], ex_image)'], {'axis': '(0)'}), '((examples[j], ex_image), axis=0)\n', (10536, 10569), True, 'import numpy as np\n'), ((5384, 5402), 'numpy.ones', 'np.ones', (['n_classes'], {}), '(n_classes)\n', (5391, 5402), True, 'import numpy as np\n'), ((5854, 5876), 'numpy.array', 'np.array', (['example_size'], {}), '(example_size)\n', (5862, 5876), True, 'import numpy as np\n'), ((5924, 5946), 'numpy.array', 'np.array', (['example_size'], {}), '(example_size)\n', (5932, 5946), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
from scipy.optimize import minimize
import scipy.special
from tqdm import tqdm
from amico.util import get_verbose
# Kaden's functionals
def F_norm_Diff_K(E0,Signal,sigma_diff):
# ------- SMT functional
sig2 = sigma_diff**2.0
F_norm = np.sum( ( Signal - np.sqrt( (np.pi*sig2)/2.0) * scipy.special.eval_laguerre(1.0/2.0, -1.0 * (E0**2.0) / (2.0*sig2), out=None) )**2.0 )
return np.array(F_norm)
def der_Diff(E0,Signal,sigma_diff):
E0 = np.array(E0)
sig2 = sigma_diff**2.0
k1 = np.sqrt((np.pi*sig2)/2.0)
ET = -1.0*(E0**2.0)/(2.0*sig2)
der1 = 2.0 * ( Signal - k1 * scipy.special.eval_laguerre(0.5, ET) )
der2 = k1 * scipy.special.hyp1f1( 0.5, 2.0, ET ) * (-0.5/(2.0*sig2)) * E0
return der1 * der2
def debiasRician(DWI,SNR,mask,scheme):
debiased_DWI = np.zeros(DWI.shape)
idx = 0
with tqdm(total=mask.sum(), ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
for ix in range(DWI.shape[0]):
for iy in range(DWI.shape[1]):
for iz in range(DWI.shape[2]):
if mask[ix,iy,iz]:
b0 = DWI[ix,iy,iz,scheme.b0_idx].mean()
sigma_diff = b0/SNR
init_guess = DWI[ix,iy,iz,:].copy()
tmp = minimize(F_norm_Diff_K, init_guess, args=(init_guess,sigma_diff), method = 'L-BFGS-B', jac=der_Diff)
debiased_DWI[ix,iy,iz] = tmp.x
progress.update()
return debiased_DWI
| [
"numpy.sqrt",
"scipy.optimize.minimize",
"numpy.array",
"numpy.zeros",
"amico.util.get_verbose"
] | [((456, 472), 'numpy.array', 'np.array', (['F_norm'], {}), '(F_norm)\n', (464, 472), True, 'import numpy as np\n'), ((521, 533), 'numpy.array', 'np.array', (['E0'], {}), '(E0)\n', (529, 533), True, 'import numpy as np\n'), ((572, 599), 'numpy.sqrt', 'np.sqrt', (['(np.pi * sig2 / 2.0)'], {}), '(np.pi * sig2 / 2.0)\n', (579, 599), True, 'import numpy as np\n'), ((866, 885), 'numpy.zeros', 'np.zeros', (['DWI.shape'], {}), '(DWI.shape)\n', (874, 885), True, 'import numpy as np\n'), ((327, 354), 'numpy.sqrt', 'np.sqrt', (['(np.pi * sig2 / 2.0)'], {}), '(np.pi * sig2 / 2.0)\n', (334, 354), True, 'import numpy as np\n'), ((993, 1006), 'amico.util.get_verbose', 'get_verbose', ([], {}), '()\n', (1004, 1006), False, 'from amico.util import get_verbose\n'), ((1390, 1494), 'scipy.optimize.minimize', 'minimize', (['F_norm_Diff_K', 'init_guess'], {'args': '(init_guess, sigma_diff)', 'method': '"""L-BFGS-B"""', 'jac': 'der_Diff'}), "(F_norm_Diff_K, init_guess, args=(init_guess, sigma_diff), method=\n 'L-BFGS-B', jac=der_Diff)\n", (1398, 1494), False, 'from scipy.optimize import minimize\n')] |
'''
Created by <NAME> 2020
# Read in WAV file into Python Class
sound1 = AudioProcessing('input.wav')
# Set the speed of the audio
sound1.set_audio_speed(0.5)
# Set the pitch of the audio
sound1.set_audio_pitch(2)
# Reverse the content of the audio
sound1.set_reverse()
# Add an echo to the audio
sound1.set_echo(1)
# Applies a bandpass filter between the (<low>, <high>) range of frequencies
sound.set_bandpass(50, 2600)
# Save the resulting processed audio data into a file
sound1.save_to_file('out.wav')
'''
import sys, wave
import numpy as np
from numpy import array, int16
from scipy.signal import lfilter, butter
from scipy.io.wavfile import read,write
from scipy import signal
import random
class AudioProcessing(object):
__slots__ = ('audio_data', 'sample_freq')
def __init__(self, input_audio_path):
self.sample_freq, self.audio_data = read(input_audio_path)
# self.audio_data = AudioProcessing.convert_to_mono_audio(self.audio_data)
def save_to_file(self, output_path):
'''Writes a WAV file representation of the processed audio data'''
write(output_path, self.sample_freq, array(self.audio_data, dtype = int16))
def set_audio_speed(self, speed_factor):
'''Sets the speed of the audio by a floating-point factor'''
sound_index = np.round(np.arange(0, len(self.audio_data), speed_factor))
self.audio_data = self.audio_data[sound_index[sound_index < len(self.audio_data)].astype(int)]
def set_echo(self, delay):
'''Applies an echo that is 0...<input audio duration in seconds> seconds from the beginning'''
output_audio = np.zeros(len(self.audio_data))
output_delay = delay * self.sample_freq
for count, e in enumerate(self.audio_data):
output_audio[count] = e + self.audio_data[count - int(output_delay)]
self.audio_data = output_audio
def set_volume(self, level):
'''Sets the overall volume of the data via floating-point factor'''
output_audio = np.zeros(len(self.audio_data))
for count, e in enumerate(self.audio_data):
output_audio[count] = (e * level)
self.audio_data = output_audio
def set_reverse(self):
'''Reverses the audio'''
self.audio_data = self.audio_data[::-1]
def set_audio_pitch(self, n, window_size=2**13, h=2**11):
'''Sets the pitch of the audio to a certain threshold'''
factor = 2 ** (1.0 * n / 12.0)
self._set_stretch(1.0 / factor, window_size, h)
self.audio_data = self.audio_data[window_size:]
self.set_audio_speed(factor)
def _set_stretch(self, factor, window_size, h):
phase = np.zeros(window_size)
hanning_window = np.hanning(window_size)
result = np.zeros(int(len(self.audio_data) / factor + window_size))
for i in np.arange(0, len(self.audio_data) - (window_size + h), h*factor):
# Two potentially overlapping subarrays
a1 = self.audio_data[int(i): int(i + window_size)]
a2 = self.audio_data[int(i + h): int(i + window_size + h)]
# The spectra of these arrays
s1 = np.fft.fft(hanning_window * a1)
s2 = np.fft.fft(hanning_window * a2)
# Rephase all frequencies
phase = (phase + np.angle(s2/s1)) % 2*np.pi
a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase))
i2 = int(i / factor)
result[i2: i2 + window_size] += hanning_window*a2_rephased.real
# normalize (16bit)
result = ((2 ** (16 - 4)) * result/result.max())
self.audio_data = result.astype('int16')
def set_lowpass(self, cutoff_low, order=5):
'''Applies a low pass filter'''
nyquist = self.sample_freq / 2.0
cutoff = cutoff_low / nyquist
x, y = signal.butter(order, cutoff, btype='lowpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
def set_highpass(self, cutoff_high, order=5):
'''Applies a high pass filter'''
nyquist = self.sample_freq / 2.0
cutoff = cutoff_high / nyquist
x, y = signal.butter(order, cutoff, btype='highpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
def set_bandpass(self, cutoff_low, cutoff_high, order=5):
'''Applies a band pass filter'''
cutoff = np.zeros(2)
nyquist = self.sample_freq / 2.0
cutoff[0] = cutoff_low / nyquist
cutoff[1] = cutoff_high / nyquist
x, y = signal.butter(order, cutoff, btype='bandpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
@staticmethod
def convert_to_mono_audio(input_audio):
'''Returns a numpy array that represents the mono version of a stereo input'''
output_audio = []
temp_audio = input_audio.astype(float)
for e in temp_audio:
output_audio.append((e[0] / 2) + (e[1] / 2))
return np.array(output_audio, dtype = 'int16')
# Example
# applyDSP_worsenRand("1.wav", "out.wav")
def applyDSP_worsenRand(file_in, file_out):
sound1 = AudioProcessing(file_in)
vol = 1-random.randint(-5, 5)/100
echo = random.randint(3, 5)/100
speed = 1-random.randint(5, 5)/100
sound1.set_volume(vol)
if random.randint(0, 1) == 1:
sound1.set_echo(echo) # can cause audio crackling
sound1.set_audio_speed(speed)
band_1 = random.randint(50, 200)
band_2 = random.randint(3000, 10000)
highpass = random.randint(10, 350)
sound1.set_highpass(highpass)
sound1.set_bandpass(band_1, band_2)
sound1.save_to_file(file_out)
| [
"numpy.hanning",
"numpy.abs",
"scipy.signal.filtfilt",
"numpy.fft.fft",
"scipy.signal.butter",
"numpy.angle",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.io.wavfile.read",
"random.randint"
] | [((4956, 4979), 'random.randint', 'random.randint', (['(50)', '(200)'], {}), '(50, 200)\n', (4970, 4979), False, 'import random\n'), ((4990, 5017), 'random.randint', 'random.randint', (['(3000)', '(10000)'], {}), '(3000, 10000)\n', (5004, 5017), False, 'import random\n'), ((5030, 5053), 'random.randint', 'random.randint', (['(10)', '(350)'], {}), '(10, 350)\n', (5044, 5053), False, 'import random\n'), ((873, 895), 'scipy.io.wavfile.read', 'read', (['input_audio_path'], {}), '(input_audio_path)\n', (877, 895), False, 'from scipy.io.wavfile import read, write\n'), ((2510, 2531), 'numpy.zeros', 'np.zeros', (['window_size'], {}), '(window_size)\n', (2518, 2531), True, 'import numpy as np\n'), ((2551, 2574), 'numpy.hanning', 'np.hanning', (['window_size'], {}), '(window_size)\n', (2561, 2574), True, 'import numpy as np\n'), ((3496, 3555), 'scipy.signal.butter', 'signal.butter', (['order', 'cutoff'], {'btype': '"""lowpass"""', 'analog': '(False)'}), "(order, cutoff, btype='lowpass', analog=False)\n", (3509, 3555), False, 'from scipy import signal\n'), ((3576, 3614), 'scipy.signal.filtfilt', 'signal.filtfilt', (['x', 'y', 'self.audio_data'], {}), '(x, y, self.audio_data)\n', (3591, 3614), False, 'from scipy import signal\n'), ((3775, 3835), 'scipy.signal.butter', 'signal.butter', (['order', 'cutoff'], {'btype': '"""highpass"""', 'analog': '(False)'}), "(order, cutoff, btype='highpass', analog=False)\n", (3788, 3835), False, 'from scipy import signal\n'), ((3856, 3894), 'scipy.signal.filtfilt', 'signal.filtfilt', (['x', 'y', 'self.audio_data'], {}), '(x, y, self.audio_data)\n', (3871, 3894), False, 'from scipy import signal\n'), ((4001, 4012), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4009, 4012), True, 'import numpy as np\n'), ((4128, 4188), 'scipy.signal.butter', 'signal.butter', (['order', 'cutoff'], {'btype': '"""bandpass"""', 'analog': '(False)'}), "(order, cutoff, btype='bandpass', analog=False)\n", (4141, 4188), False, 'from scipy import signal\n'), ((4209, 4247), 'scipy.signal.filtfilt', 'signal.filtfilt', (['x', 'y', 'self.audio_data'], {}), '(x, y, self.audio_data)\n', (4224, 4247), False, 'from scipy import signal\n'), ((4529, 4566), 'numpy.array', 'np.array', (['output_audio'], {'dtype': '"""int16"""'}), "(output_audio, dtype='int16')\n", (4537, 4566), True, 'import numpy as np\n'), ((4745, 4765), 'random.randint', 'random.randint', (['(3)', '(5)'], {}), '(3, 5)\n', (4759, 4765), False, 'import random\n'), ((4835, 4855), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4849, 4855), False, 'import random\n'), ((1120, 1155), 'numpy.array', 'array', (['self.audio_data'], {'dtype': 'int16'}), '(self.audio_data, dtype=int16)\n', (1125, 1155), False, 'from numpy import array, int16\n'), ((2924, 2955), 'numpy.fft.fft', 'np.fft.fft', (['(hanning_window * a1)'], {}), '(hanning_window * a1)\n', (2934, 2955), True, 'import numpy as np\n'), ((2964, 2995), 'numpy.fft.fft', 'np.fft.fft', (['(hanning_window * a2)'], {}), '(hanning_window * a2)\n', (2974, 2995), True, 'import numpy as np\n'), ((4711, 4732), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (4725, 4732), False, 'import random\n'), ((4781, 4801), 'random.randint', 'random.randint', (['(5)', '(5)'], {}), '(5, 5)\n', (4795, 4801), False, 'import random\n'), ((3103, 3113), 'numpy.abs', 'np.abs', (['s2'], {}), '(s2)\n', (3109, 3113), True, 'import numpy as np\n'), ((3114, 3134), 'numpy.exp', 'np.exp', (['(1.0j * phase)'], {}), '(1.0j * phase)\n', (3120, 3134), True, 'import numpy as np\n'), ((3046, 3063), 'numpy.angle', 'np.angle', (['(s2 / s1)'], {}), '(s2 / s1)\n', (3054, 3063), True, 'import numpy as np\n')] |
import csv
import re
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
import pandas as pd
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models import (BasicTicker, Circle, ColumnDataSource, DataRange1d,
Grid, LinearAxis, PanTool, Plot, WheelZoomTool,)
from bokeh.resources import INLINE
from bokeh.util.browser import view
def splom(data_csv):
# read csv
colors = []
scores = []
with open(data_csv, newline='') as csvfile:
datareader = csv.reader(csvfile)
index = 0
for row in datareader:
if "Generated" in row[0]:
colors.append('red')
else:
colors.append('blue')
scores.append(row[2:])
scores = np.array(scores)
# make splom
plots = []
for y in range(len(scores[0])):
row = []
for x in range(len(scores[0])):
xax = (y == len(scores[0])-1)
yax = (x == 0)
xscores = [float(item) for item in list(scores[:,x])]
yscores = [float(item) for item in list(scores[:,y])]
source = ColumnDataSource(dict(x=xscores, y=yscores, colors=colors))
plot = make_plot(source, x, y, xax, yax)
row.append(plot)
plots.append(row)
grid = gridplot(plots)
doc = Document()
doc.add_root(grid)
doc.validate()
filename = "augmentation_splom.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Data SPLOM"))
print("Wrote %s" % filename)
view(filename)
def make_plot(source, xindex, yindex, xax=False, yax=False):
xdr = DataRange1d(bounds=None)
ydr = DataRange1d(bounds=None)
mbl = 40 if yax else 0
mbb = 40 if xax else 0
plot = Plot(
x_range=xdr, y_range=ydr, background_fill_color="#efe8e2",
border_fill_color='white', plot_width=200 + mbl, plot_height=200 + mbb,
min_border_left=2+mbl, min_border_right=2, min_border_top=2, min_border_bottom=2+mbb)
circle = Circle(x='x', y='y', fill_color="colors", fill_alpha=0.2, size=4, line_color="colors")
r = plot.add_glyph(source, circle)
xdr.renderers.append(r)
ydr.renderers.append(r)
xticker = BasicTicker()
if xax:
xaxis = LinearAxis()
xaxis.axis_label = str(xindex)
plot.add_layout(xaxis, 'below')
xticker = xaxis.ticker
plot.add_layout(Grid(dimension=0, ticker=xticker))
yticker = BasicTicker()
if yax:
yaxis = LinearAxis()
yaxis.axis_label = str(yindex)
yaxis.major_label_orientation = 'vertical'
plot.add_layout(yaxis, 'left')
yticker = yaxis.ticker
plot.add_layout(Grid(dimension=1, ticker=yticker))
plot.add_tools(PanTool(), WheelZoomTool())
return plot
def violin(data_csv):
# Get data frame
types = []
dims = []
scores = []
with open(data_csv, newline='') as csvfile:
datareader = csv.reader(csvfile)
index = 0
for row in datareader:
current_type = "Original"
if "Generated" in row[0]:
current_type = "Generated"
for index in range(2, len(row)):
types.append(current_type)
dims.append(str(index-1))
scores.append(float(row[index]))
data = {'Data_Type':types, 'PCA_Mode':dims, "PCA_Score":scores}
df = pd.DataFrame(data)
# Plot
sns.set_style("whitegrid")
ax = sns.violinplot(x=df.PCA_Mode, y=df.PCA_Score, hue=df.Data_Type,
data=df, palette="Set2", split=True, scale="count")
# Save and show
plt.savefig("violin.png")
# img = Image.open('violin.png')
# img.show() | [
"bokeh.models.Circle",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"bokeh.models.Grid",
"bokeh.layouts.gridplot",
"seaborn.set_style",
"numpy.array",
"bokeh.models.LinearAxis",
"bokeh.models.PanTool",
"seaborn.violinplot",
"bokeh.models.BasicTicker",
"bokeh.models.Plot",
"bokeh.document... | [((853, 869), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (861, 869), True, 'import numpy as np\n'), ((1398, 1413), 'bokeh.layouts.gridplot', 'gridplot', (['plots'], {}), '(plots)\n', (1406, 1413), False, 'from bokeh.layouts import gridplot\n'), ((1425, 1435), 'bokeh.document.Document', 'Document', ([], {}), '()\n', (1433, 1435), False, 'from bokeh.document import Document\n'), ((1646, 1660), 'bokeh.util.browser.view', 'view', (['filename'], {}), '(filename)\n', (1650, 1660), False, 'from bokeh.util.browser import view\n'), ((1734, 1758), 'bokeh.models.DataRange1d', 'DataRange1d', ([], {'bounds': 'None'}), '(bounds=None)\n', (1745, 1758), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((1769, 1793), 'bokeh.models.DataRange1d', 'DataRange1d', ([], {'bounds': 'None'}), '(bounds=None)\n', (1780, 1793), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((1859, 2096), 'bokeh.models.Plot', 'Plot', ([], {'x_range': 'xdr', 'y_range': 'ydr', 'background_fill_color': '"""#efe8e2"""', 'border_fill_color': '"""white"""', 'plot_width': '(200 + mbl)', 'plot_height': '(200 + mbb)', 'min_border_left': '(2 + mbl)', 'min_border_right': '(2)', 'min_border_top': '(2)', 'min_border_bottom': '(2 + mbb)'}), "(x_range=xdr, y_range=ydr, background_fill_color='#efe8e2',\n border_fill_color='white', plot_width=200 + mbl, plot_height=200 + mbb,\n min_border_left=2 + mbl, min_border_right=2, min_border_top=2,\n min_border_bottom=2 + mbb)\n", (1863, 2096), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2120, 2210), 'bokeh.models.Circle', 'Circle', ([], {'x': '"""x"""', 'y': '"""y"""', 'fill_color': '"""colors"""', 'fill_alpha': '(0.2)', 'size': '(4)', 'line_color': '"""colors"""'}), "(x='x', y='y', fill_color='colors', fill_alpha=0.2, size=4,\n line_color='colors')\n", (2126, 2210), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2318, 2331), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {}), '()\n', (2329, 2331), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2553, 2566), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {}), '()\n', (2564, 2566), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((3489, 3507), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3501, 3507), True, 'import pandas as pd\n'), ((3524, 3550), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (3537, 3550), True, 'import seaborn as sns\n'), ((3560, 3679), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': 'df.PCA_Mode', 'y': 'df.PCA_Score', 'hue': 'df.Data_Type', 'data': 'df', 'palette': '"""Set2"""', 'split': '(True)', 'scale': '"""count"""'}), "(x=df.PCA_Mode, y=df.PCA_Score, hue=df.Data_Type, data=df,\n palette='Set2', split=True, scale='count')\n", (3574, 3679), True, 'import seaborn as sns\n'), ((3724, 3749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""violin.png"""'], {}), "('violin.png')\n", (3735, 3749), True, 'import matplotlib.pyplot as plt\n'), ((605, 624), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (615, 624), False, 'import csv\n'), ((2360, 2372), 'bokeh.models.LinearAxis', 'LinearAxis', ([], {}), '()\n', (2370, 2372), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2503, 2536), 'bokeh.models.Grid', 'Grid', ([], {'dimension': '(0)', 'ticker': 'xticker'}), '(dimension=0, ticker=xticker)\n', (2507, 2536), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2595, 2607), 'bokeh.models.LinearAxis', 'LinearAxis', ([], {}), '()\n', (2605, 2607), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2788, 2821), 'bokeh.models.Grid', 'Grid', ([], {'dimension': '(1)', 'ticker': 'yticker'}), '(dimension=1, ticker=yticker)\n', (2792, 2821), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2843, 2852), 'bokeh.models.PanTool', 'PanTool', ([], {}), '()\n', (2850, 2852), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((2854, 2869), 'bokeh.models.WheelZoomTool', 'WheelZoomTool', ([], {}), '()\n', (2867, 2869), False, 'from bokeh.models import BasicTicker, Circle, ColumnDataSource, DataRange1d, Grid, LinearAxis, PanTool, Plot, WheelZoomTool\n'), ((3045, 3064), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (3055, 3064), False, 'import csv\n'), ((1571, 1607), 'bokeh.embed.file_html', 'file_html', (['doc', 'INLINE', '"""Data SPLOM"""'], {}), "(doc, INLINE, 'Data SPLOM')\n", (1580, 1607), False, 'from bokeh.embed import file_html\n')] |
import time
import unittest
import numpy as np
from collections import defaultdict
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from ITMO_FS.ensembles.measure_based import *
from ITMO_FS.ensembles.ranking_based import *
from ITMO_FS.filters.univariate import *
class MyTestCase(unittest.TestCase):
wide_classification = make_classification(n_features=2000, n_informative=100, n_redundant=500)
tall_classification = make_classification(n_samples=50000, n_features=100, n_informative=23, n_redundant=30)
wide_regression = make_regression(n_features=2000, n_informative=100)
tall_regression = make_regression(n_samples=50000, n_features=200, n_informative=50)
def test_ranking_based_ensemble(self):
data, target = self.wide_classification[0], self.wide_classification[1]
filters = [gini_index,
fechner_corr,
spearman_corr,
pearson_corr]
ensemble = Mixed(filters)
ensemble.fit(data, target)
ensemble.transform(data, 100, borda_fusion)
d = [{'f' + str(i): i for i in range(100)}.items()] * 5
self.assertEqual(borda_fusion(d, 100), ['f' + str(i) for i in reversed(range(100))])
ensemble.transform(data, 100)
self.assertEqual(borda_fusion(d, 100), ['f' + str(i) for i in reversed(range(100))])
def test_weight_based_ensemble(self):
data, target = self.wide_classification[0], self.wide_classification[1]
filters = [UnivariateFilter(gini_index),
UnivariateFilter(fechner_corr),
UnivariateFilter(spearman_corr),
UnivariateFilter(pearson_corr)]
ensemble = WeightBased(filters)
ensemble.fit(data, target)
weights = [0.5, 0.5, 0.5, 0.5]
ensemble.transform(data, select_k_best(100), weights=weights)
def test_benching_ensembles(self):
datasets = [make_classification(n_samples=2000, n_features=20 * i, n_informative=i, n_redundant=5 * i) for i in
[2, 10, 20, 50, 100, 200, 500, 1000]]
filters = [gini_index,
fechner_corr,
spearman_corr,
pearson_corr]
kfold = KFold(n_splits=10)
for dataset in datasets:
X, y = dataset
k = int(X.shape[1] * 0.1)
time_ens_start = []
time_ens_end = []
time_filter_start = defaultdict(list)
time_filter_end = defaultdict(list)
scores_ens = []
scores_filters = defaultdict(list)
scores_no_fs = []
for train_index, test_index in kfold.split(X):
svm = SVC()
svm.fit(X[train_index], y[train_index])
y_pred = svm.predict(X[test_index])
scores_no_fs.append(f1_score(y[test_index], y_pred))
time_ens_start.append(time.time())
ensemble = Mixed(filters)
ensemble.fit(X[train_index], y[train_index])
X_transformed = ensemble.transform(X, k, borda_fusion)
time_ens_end.append(time.time())
svm = SVC()
svm.fit(X_transformed[train_index], y[train_index])
y_pred = svm.predict(X_transformed[test_index])
scores_ens.append(f1_score(y[test_index], y_pred))
for filter in filters:
time_filter_start[filter.__name__].append(time.time())
univ_filter = UnivariateFilter(filter, cutting_rule=("K best", k))
univ_filter.fit(X[train_index], y[train_index])
X_transformed = univ_filter.transform(X)
time_filter_end[filter.__name__].append(time.time())
svm = SVC()
svm.fit(X_transformed[train_index], y[train_index])
y_pred = svm.predict(X_transformed[test_index])
scores_filters[filter.__name__].append(f1_score(y[test_index], y_pred))
print('Dataset size', X.shape)
sum_time = 0
for filter in filters:
filter_dif = np.array(time_filter_end[filter.__name__]) - np.array(time_filter_start[filter.__name__])
print('Filter ' + filter.__name__ + ' time', np.mean(filter_dif), np.std(filter_dif))
sum_time += np.mean(filter_dif)
ens_dif = np.array(time_ens_end) - np.array(time_ens_start)
print('Ensemble time', np.mean(ens_dif), np.std(ens_dif))
print('Sum of filter time', sum_time)
print('No fs score', np.mean(scores_no_fs), np.std(scores_no_fs))
for filter in filters:
print('Filter ' + filter.__name__ + ' time', np.mean(scores_filters[filter.__name__]),
np.std(scores_filters[filter.__name__]))
print('Ensemble score', np.mean(scores_ens), np.std(scores_ens))
print()
if __name__ == '__main__':
unittest.main()
| [
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.datasets.make_regression",
"sklearn.metrics.f1_score",
"numpy.array",
"collections.defaultdict",
"numpy.std",
"unittest.main",
"sklearn.model_selection.KFold",
"time.time",
"sklearn.datasets.make_classification"
] | [((456, 528), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_features': '(2000)', 'n_informative': '(100)', 'n_redundant': '(500)'}), '(n_features=2000, n_informative=100, n_redundant=500)\n', (475, 528), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((555, 645), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50000)', 'n_features': '(100)', 'n_informative': '(23)', 'n_redundant': '(30)'}), '(n_samples=50000, n_features=100, n_informative=23,\n n_redundant=30)\n', (574, 645), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((664, 715), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_features': '(2000)', 'n_informative': '(100)'}), '(n_features=2000, n_informative=100)\n', (679, 715), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((738, 804), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': '(50000)', 'n_features': '(200)', 'n_informative': '(50)'}), '(n_samples=50000, n_features=200, n_informative=50)\n', (753, 804), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((5147, 5162), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5160, 5162), False, 'import unittest\n'), ((2347, 2365), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (2352, 2365), False, 'from sklearn.model_selection import KFold\n'), ((2040, 2134), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(2000)', 'n_features': '(20 * i)', 'n_informative': 'i', 'n_redundant': '(5 * i)'}), '(n_samples=2000, n_features=20 * i, n_informative=i,\n n_redundant=5 * i)\n', (2059, 2134), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((2560, 2577), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2571, 2577), False, 'from collections import defaultdict\n'), ((2608, 2625), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2619, 2625), False, 'from collections import defaultdict\n'), ((2684, 2701), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2695, 2701), False, 'from collections import defaultdict\n'), ((2814, 2819), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2817, 2819), False, 'from sklearn.svm import SVC\n'), ((3295, 3300), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3298, 3300), False, 'from sklearn.svm import SVC\n'), ((4523, 4542), 'numpy.mean', 'np.mean', (['filter_dif'], {}), '(filter_dif)\n', (4530, 4542), True, 'import numpy as np\n'), ((4566, 4588), 'numpy.array', 'np.array', (['time_ens_end'], {}), '(time_ens_end)\n', (4574, 4588), True, 'import numpy as np\n'), ((4591, 4615), 'numpy.array', 'np.array', (['time_ens_start'], {}), '(time_ens_start)\n', (4599, 4615), True, 'import numpy as np\n'), ((4651, 4667), 'numpy.mean', 'np.mean', (['ens_dif'], {}), '(ens_dif)\n', (4658, 4667), True, 'import numpy as np\n'), ((4669, 4684), 'numpy.std', 'np.std', (['ens_dif'], {}), '(ens_dif)\n', (4675, 4684), True, 'import numpy as np\n'), ((4770, 4791), 'numpy.mean', 'np.mean', (['scores_no_fs'], {}), '(scores_no_fs)\n', (4777, 4791), True, 'import numpy as np\n'), ((4793, 4813), 'numpy.std', 'np.std', (['scores_no_fs'], {}), '(scores_no_fs)\n', (4799, 4813), True, 'import numpy as np\n'), ((5054, 5073), 'numpy.mean', 'np.mean', (['scores_ens'], {}), '(scores_ens)\n', (5061, 5073), True, 'import numpy as np\n'), ((5075, 5093), 'numpy.std', 'np.std', (['scores_ens'], {}), '(scores_ens)\n', (5081, 5093), True, 'import numpy as np\n'), ((2964, 2995), 'sklearn.metrics.f1_score', 'f1_score', (['y[test_index]', 'y_pred'], {}), '(y[test_index], y_pred)\n', (2972, 2995), False, 'from sklearn.metrics import f1_score\n'), ((3036, 3047), 'time.time', 'time.time', ([], {}), '()\n', (3045, 3047), False, 'import time\n'), ((3259, 3270), 'time.time', 'time.time', ([], {}), '()\n', (3268, 3270), False, 'import time\n'), ((3467, 3498), 'sklearn.metrics.f1_score', 'f1_score', (['y[test_index]', 'y_pred'], {}), '(y[test_index], y_pred)\n', (3475, 3498), False, 'from sklearn.metrics import f1_score\n'), ((3931, 3936), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (3934, 3936), False, 'from sklearn.svm import SVC\n'), ((4303, 4345), 'numpy.array', 'np.array', (['time_filter_end[filter.__name__]'], {}), '(time_filter_end[filter.__name__])\n', (4311, 4345), True, 'import numpy as np\n'), ((4348, 4392), 'numpy.array', 'np.array', (['time_filter_start[filter.__name__]'], {}), '(time_filter_start[filter.__name__])\n', (4356, 4392), True, 'import numpy as np\n'), ((4454, 4473), 'numpy.mean', 'np.mean', (['filter_dif'], {}), '(filter_dif)\n', (4461, 4473), True, 'import numpy as np\n'), ((4475, 4493), 'numpy.std', 'np.std', (['filter_dif'], {}), '(filter_dif)\n', (4481, 4493), True, 'import numpy as np\n'), ((4912, 4952), 'numpy.mean', 'np.mean', (['scores_filters[filter.__name__]'], {}), '(scores_filters[filter.__name__])\n', (4919, 4952), True, 'import numpy as np\n'), ((4976, 5015), 'numpy.std', 'np.std', (['scores_filters[filter.__name__]'], {}), '(scores_filters[filter.__name__])\n', (4982, 5015), True, 'import numpy as np\n'), ((3602, 3613), 'time.time', 'time.time', ([], {}), '()\n', (3611, 3613), False, 'import time\n'), ((3891, 3902), 'time.time', 'time.time', ([], {}), '()\n', (3900, 3902), False, 'import time\n'), ((4136, 4167), 'sklearn.metrics.f1_score', 'f1_score', (['y[test_index]', 'y_pred'], {}), '(y[test_index], y_pred)\n', (4144, 4167), False, 'from sklearn.metrics import f1_score\n')] |
import torch
import os
import math
import numpy as np
from copy import deepcopy
from pycls.core.config import cfg
import pycls.utils.distributed as du
from tqdm import tqdm
class AdversarySampler:
def __init__(self, budget):
self.budget = budget
self.cuda_id = torch.cuda.current_device()
def compute_dists(self, X, X_train):
dists = (
-2 * np.dot(X, X_train.T)
+ np.sum(X_train**2, axis=1)
+ np.sum(X**2, axis=1)[:, np.newaxis]
)
return dists
def greedy_k_center(self, labeled, unlabeled):
greedy_indices = []
# get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):
min_dist = np.min(
self.compute_dists(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled),
axis=0,
)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
temp_range = 1000
for j in range(1, labeled.shape[0], temp_range):
if j + temp_range < labeled.shape[0]:
dist = self.compute_dists(labeled[j : j + temp_range, :], unlabeled)
else:
# for last iteration only :)
dist = self.compute_dists(labeled[j:, :], unlabeled)
# dist = pairwise_distances(labeled[j:, :], unlabeled,metric='euclidean')
min_dist = np.vstack(
(min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))
)
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
# iteratively insert the farthest index and recalculate the minimum distances:
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
amount = cfg.ACTIVE_LEARNING.BUDGET_SIZE - 1
for i in range(amount):
if i is not 0 and i % 500 == 0:
print("{} Sampled out of {}".format(i, amount + 1))
# dist = pairwise_distances(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled, metric='euclidean')
dist = self.compute_dists(
unlabeled[greedy_indices[-1], :].reshape((1, unlabeled.shape[1])),
unlabeled,
)
min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
remainSet = set(np.arange(unlabeled.shape[0])) - set(greedy_indices)
remainSet = np.array(list(remainSet))
return greedy_indices, remainSet
def get_vae_activations(self, vae, dataLoader):
acts = []
vae.eval()
temp_max_iter = len(dataLoader)
print("len(dataloader): {}".format(temp_max_iter))
temp_iter = 0
for x, y in dataLoader:
x = x.type(torch.cuda.FloatTensor)
x = x.cuda(self.cuda_id)
_, _, mu, _ = vae(x)
acts.append(mu.cpu().numpy())
if temp_iter % 100 == 0:
print(f"Iteration [{temp_iter}/{temp_max_iter}] Done!!")
temp_iter += 1
acts = np.concatenate(acts, axis=0)
return acts
def get_predictions(self, vae, discriminator, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
all_preds = all_preds.cpu().numpy()
return all_preds
def gpu_compute_dists(self, M1, M2):
"""
Computes L2 norm square on gpu
Assume
M1: M x D matrix
M2: N x D matrix
output: M x N matrix
"""
# print(f"Function call to gpu_compute dists; M1: {M1.shape} and M2: {M2.shape}")
M1_norm = (M1**2).sum(1).reshape(-1, 1)
M2_t = torch.transpose(M2, 0, 1)
M2_norm = (M2**2).sum(1).reshape(1, -1)
dists = M1_norm + M2_norm - 2.0 * torch.mm(M1, M2_t)
return dists
def efficient_compute_dists(self, labeled, unlabeled):
""" """
N_L = labeled.shape[0]
N_U = unlabeled.shape[0]
dist_matrix = None
temp_range = 1000
unlabeled = torch.from_numpy(unlabeled).cuda(self.cuda_id)
temp_dist_matrix = np.empty((N_U, temp_range))
# for i in range(0, N_L, temp_range):
for i in tqdm(range(0, N_L, temp_range), desc="Computing Distance Matrix"):
end_index = i + temp_range if i + temp_range < N_L else N_L
temp_labeled = labeled[i:end_index, :]
temp_labeled = torch.from_numpy(temp_labeled).cuda(self.cuda_id)
temp_dist_matrix = self.gpu_compute_dists(unlabeled, temp_labeled)
temp_dist_matrix = torch.min(temp_dist_matrix, dim=1)[0]
temp_dist_matrix = torch.reshape(
temp_dist_matrix, (temp_dist_matrix.shape[0], 1)
)
if dist_matrix is None:
dist_matrix = temp_dist_matrix
else:
dist_matrix = torch.cat((dist_matrix, temp_dist_matrix), dim=1)
dist_matrix = torch.min(dist_matrix, dim=1)[0]
dist_matrix = torch.reshape(dist_matrix, (dist_matrix.shape[0], 1))
return dist_matrix.cpu().numpy()
@torch.no_grad()
def vae_sample_for_labeling(
self, vae, uSet, lSet, unlabeled_dataloader, lSetLoader
):
vae.eval()
print("Computing activattions for uset....")
u_scores = self.get_vae_activations(vae, unlabeled_dataloader)
print("Computing activattions for lset....")
l_scores = self.get_vae_activations(vae, lSetLoader)
print("l_scores.shape: ", l_scores.shape)
print("u_scores.shape: ", u_scores.shape)
# dist_matrix = self.compute_dists(u_scores, l_scores)
dist_matrix = self.efficient_compute_dists(l_scores, u_scores)
print("Dist_matrix.shape: ", dist_matrix.shape)
min_scores = np.min(dist_matrix, axis=1)
sorted_idx = np.argsort(min_scores)[::-1]
activeSet = uSet[sorted_idx[0 : self.budget]]
remainSet = uSet[sorted_idx[self.budget :]]
return activeSet, remainSet
def sample_vaal_plus(self, vae, disc_task, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
disc_task.training == False
), "Expected disc_task model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds, _ = disc_task(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
# need to multiply by -1 to be able to use torch.topk
all_preds *= -1
# select the points which the discriminator things are the most likely to be unlabeled
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
def sample(self, vae, discriminator, data, uSet, cfg):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in tqdm(data, desc="Constructing VAE ActiveSet"):
images = images.type(torch.cuda.FloatTensor)
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
scores_save_path = cfg.OUT_DIR
os.makedirs(scores_save_path, exist_ok=True) # just to be safe
with open(os.path.join(scores_save_path, "actualScores.txt"), "w") as fpw:
for temp_idx, temp_rank in zip(uSet, all_preds):
fpw.write(f"{temp_idx}\t{temp_rank:.6f}\n")
fpw.close()
# need to multiply by -1 to be able to use torch.topk
all_preds *= -1
# select the points which the discriminator things are the most likely to be unlabeled
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
# def sample_for_labeling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):
# """
# Picks samples from uSet to form activeSet.
# INPUT
# ------
# vae: object of model VAE
# discriminator: object of model discriminator
# unlabeled_dataloader: Sequential dataloader iterating over uSet
# uSet: Collection of unlabelled datapoints
# NOTE: Please pass the unlabelled dataloader as sequential dataloader else the
# results won't be appropriate.
# OUTPUT
# -------
# Returns activeSet, [remaining]uSet
# """
# current_device = torch.cuda.current_device()
# #Load vae -- out_dir/vae.pyth
# vae_dir = os.path.join(cfg.OUT_DIR, "vae/vae.pyth")
# #Load disc -- out_dir/disc.pyth
# disc_dir = os.path.join(cfg.OUT_DIR, "disc/disc.pyth")
# #Get uSet form uSetPath
# uSet = np.load(uSetPath, allow_pickle=True)
# #Get uSetLoader from uSet
# uSetLoader = dataObj.getSequentialDataLoader(indexes=uSet,batch_size=int(cfg.TRAIN.BATCH_SIZE/cfg.NUM_GPUS),\
# data=noAugDataset)
# #load vae from vae_dir
# vae_checkpoint = None#load from vae_dir
# vae = torch.load(vae_checkpoint['model'], map_location='cpu')
# vae.cuda(current_device)
# #load disc from disc_dir
# disc_checkpoint = None
# disc = torch.load(disc_checkpoint['model'], map_location='cpu')
# disc.cuda(current_device)
# sampler = AdversarySampler(cfg.ACTIVE_LEARNING.BUDGET_SIZE)
# activeSet, remainSet = sampler.sample(vae, disc, uSetLoader)
# activeSet = uSet[activeSet]
# remainSet = uSet[remainSet]
# return activeSet, remainSet
@torch.no_grad()
def sample_for_labeling(self, vae, discriminator, unlabeled_dataloader, uSet, cfg):
"""
Picks samples from uSet to form activeSet.
INPUT
------
vae: object of model VAE
discriminator: object of model discriminator
unlabeled_dataloader: Sequential dataloader iterating over uSet
uSet: Collection of unlabelled datapoints
NOTE: Please pass the unlabelled dataloader as sequential dataloader else the
results won't be appropriate.
OUTPUT
-------
Returns activeSet, [remaining]uSet
"""
print("Sampling....")
activeSet, remainSet = self.sample(
vae,
discriminator,
unlabeled_dataloader,
uSet,
cfg,
)
activeSet = uSet[activeSet]
remainSet = uSet[remainSet]
return activeSet, remainSet
# def vaal_sampling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):
# lSet = np.load(lSetPath, allow_pickle=True)
# uSet = np.load(uSetPath, allow_pickle=True)
# activeSet, remainSet = self.sample_for_labeling(cfg, uSetPath, lSetPath, dataObj, noAugDataset)
# lSet = np.append(lSet, activeSet)
# uSet = remainSet
# #save all sets
# np.save(os.path.join(cfg.OUT_DIR, "lSet.npy"), lSet)
# np.save(os.path.join(cfg.OUT_DIR, "uSet.npy"), uSet)
# np.save(os.path.join(cfg.OUT_DIR, "activeSet.npy"), activeSet)
| [
"torch.from_numpy",
"torch.min",
"numpy.argsort",
"numpy.arange",
"numpy.dot",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"torch.cuda.current_device",
"numpy.argmax",
"torch.transpose",
"torch.reshape",
"torch.cat",
"os.makedirs",
"torch.stack",
"tqdm.tqdm",
"os.path.join",
"... | [((5994, 6009), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6007, 6009), False, 'import torch\n'), ((11991, 12006), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12004, 12006), False, 'import torch\n'), ((283, 310), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (308, 310), False, 'import torch\n'), ((1724, 1743), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (1733, 1743), True, 'import numpy as np\n'), ((3286, 3314), 'numpy.concatenate', 'np.concatenate', (['acts'], {'axis': '(0)'}), '(acts, axis=0)\n', (3300, 3314), True, 'import numpy as np\n'), ((4017, 4036), 'numpy.arange', 'np.arange', (['temp_idx'], {}), '(temp_idx)\n', (4026, 4036), True, 'import numpy as np\n'), ((4057, 4079), 'torch.stack', 'torch.stack', (['all_preds'], {}), '(all_preds)\n', (4068, 4079), False, 'import torch\n'), ((4542, 4567), 'torch.transpose', 'torch.transpose', (['M2', '(0)', '(1)'], {}), '(M2, 0, 1)\n', (4557, 4567), False, 'import torch\n'), ((4987, 5014), 'numpy.empty', 'np.empty', (['(N_U, temp_range)'], {}), '((N_U, temp_range))\n', (4995, 5014), True, 'import numpy as np\n'), ((6686, 6713), 'numpy.min', 'np.min', (['dist_matrix'], {'axis': '(1)'}), '(dist_matrix, axis=1)\n', (6692, 6713), True, 'import numpy as np\n'), ((7578, 7597), 'numpy.arange', 'np.arange', (['temp_idx'], {}), '(temp_idx)\n', (7587, 7597), True, 'import numpy as np\n'), ((7618, 7640), 'torch.stack', 'torch.stack', (['all_preds'], {}), '(all_preds)\n', (7629, 7640), False, 'import torch\n'), ((8691, 8736), 'tqdm.tqdm', 'tqdm', (['data'], {'desc': '"""Constructing VAE ActiveSet"""'}), "(data, desc='Constructing VAE ActiveSet')\n", (8695, 8736), False, 'from tqdm import tqdm\n'), ((9086, 9105), 'numpy.arange', 'np.arange', (['temp_idx'], {}), '(temp_idx)\n', (9095, 9105), True, 'import numpy as np\n'), ((9126, 9148), 'torch.stack', 'torch.stack', (['all_preds'], {}), '(all_preds)\n', (9137, 9148), False, 'import torch\n'), ((9236, 9280), 'os.makedirs', 'os.makedirs', (['scores_save_path'], {'exist_ok': '(True)'}), '(scores_save_path, exist_ok=True)\n', (9247, 9280), False, 'import os\n'), ((1528, 1552), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (1534, 1552), True, 'import numpy as np\n'), ((2388, 2412), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (2394, 2412), True, 'import numpy as np\n'), ((2500, 2519), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (2509, 2519), True, 'import numpy as np\n'), ((5524, 5587), 'torch.reshape', 'torch.reshape', (['temp_dist_matrix', '(temp_dist_matrix.shape[0], 1)'], {}), '(temp_dist_matrix, (temp_dist_matrix.shape[0], 1))\n', (5537, 5587), False, 'import torch\n'), ((6735, 6757), 'numpy.argsort', 'np.argsort', (['min_scores'], {}), '(min_scores)\n', (6745, 6757), True, 'import numpy as np\n'), ((423, 451), 'numpy.sum', 'np.sum', (['(X_train ** 2)'], {'axis': '(1)'}), '(X_train ** 2, axis=1)\n', (429, 451), True, 'import numpy as np\n'), ((464, 486), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (470, 486), True, 'import numpy as np\n'), ((2589, 2618), 'numpy.arange', 'np.arange', (['unlabeled.shape[0]'], {}), '(unlabeled.shape[0])\n', (2598, 2618), True, 'import numpy as np\n'), ((3779, 3794), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3792, 3794), False, 'import torch\n'), ((4658, 4676), 'torch.mm', 'torch.mm', (['M1', 'M2_t'], {}), '(M1, M2_t)\n', (4666, 4676), False, 'import torch\n'), ((4913, 4940), 'torch.from_numpy', 'torch.from_numpy', (['unlabeled'], {}), '(unlabeled)\n', (4929, 4940), False, 'import torch\n'), ((5455, 5489), 'torch.min', 'torch.min', (['temp_dist_matrix'], {'dim': '(1)'}), '(temp_dist_matrix, dim=1)\n', (5464, 5489), False, 'import torch\n'), ((5749, 5798), 'torch.cat', 'torch.cat', (['(dist_matrix, temp_dist_matrix)'], {'dim': '(1)'}), '((dist_matrix, temp_dist_matrix), dim=1)\n', (5758, 5798), False, 'import torch\n'), ((5892, 5945), 'torch.reshape', 'torch.reshape', (['dist_matrix', '(dist_matrix.shape[0], 1)'], {}), '(dist_matrix, (dist_matrix.shape[0], 1))\n', (5905, 5945), False, 'import torch\n'), ((7341, 7356), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7354, 7356), False, 'import torch\n'), ((8848, 8863), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8861, 8863), False, 'import torch\n'), ((9318, 9368), 'os.path.join', 'os.path.join', (['scores_save_path', '"""actualScores.txt"""'], {}), "(scores_save_path, 'actualScores.txt')\n", (9330, 9368), False, 'import os\n'), ((388, 408), 'numpy.dot', 'np.dot', (['X', 'X_train.T'], {}), '(X, X_train.T)\n', (394, 408), True, 'import numpy as np\n'), ((5295, 5325), 'torch.from_numpy', 'torch.from_numpy', (['temp_labeled'], {}), '(temp_labeled)\n', (5311, 5325), False, 'import torch\n'), ((5829, 5858), 'torch.min', 'torch.min', (['dist_matrix'], {'dim': '(1)'}), '(dist_matrix, dim=1)\n', (5838, 5858), False, 'import torch\n'), ((1437, 1457), 'numpy.min', 'np.min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (1443, 1457), True, 'import numpy as np\n')] |
from os.path import join
import cv2
import numpy as np
from PIL import Image
from torch.utils import data
def prepare_image_PIL(im):
im = im[:,:,::-1] - np.zeros_like(im) # rgb to bgr
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
def prepare_image_cv2(im):
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
class BSDS_RCFLoader(data.Dataset):
"""
Dataloader BSDS500
"""
def __init__(self, root='data/HED-BSDS_PASCAL', split='train', transform=False):
self.root = root
self.split = split
self.transform = transform
if self.split == 'train':
self.filelist = join(self.root, 'bsds_pascal_train_pair.lst')
elif self.split == 'test':
self.filelist = join(self.root, 'test.lst')
else:
raise ValueError("Invalid split type!")
with open(self.filelist, 'r') as f:
self.filelist = f.readlines()
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
if self.split == "train":
img_file, lb_file = self.filelist[index].split()
lb = np.array(Image.open(join(self.root, lb_file)), dtype=np.float32)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = lb[np.newaxis, :, :]
lb[lb == 0] = 0
lb[np.logical_and(lb>0, lb<128)] = 2
lb[lb >= 128] = 1
else:
img_file = self.filelist[index].rstrip()
if self.split == "train":
img = np.array(cv2.imread(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_cv2(img)
return img, lb
else:
img = np.array(Image.open(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_PIL(img)
return img
| [
"numpy.logical_and",
"os.path.join",
"numpy.squeeze",
"numpy.array",
"numpy.transpose",
"numpy.zeros_like"
] | [((201, 253), 'numpy.array', 'np.array', (['(104.00698793, 116.66876762, 122.67891434)'], {}), '((104.00698793, 116.66876762, 122.67891434))\n', (209, 253), True, 'import numpy as np\n'), ((261, 288), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (273, 288), True, 'import numpy as np\n'), ((371, 423), 'numpy.array', 'np.array', (['(104.00698793, 116.66876762, 122.67891434)'], {}), '((104.00698793, 116.66876762, 122.67891434))\n', (379, 423), True, 'import numpy as np\n'), ((431, 458), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (443, 458), True, 'import numpy as np\n'), ((160, 177), 'numpy.zeros_like', 'np.zeros_like', (['im'], {}), '(im)\n', (173, 177), True, 'import numpy as np\n'), ((813, 858), 'os.path.join', 'join', (['self.root', '"""bsds_pascal_train_pair.lst"""'], {}), "(self.root, 'bsds_pascal_train_pair.lst')\n", (817, 858), False, 'from os.path import join\n'), ((922, 949), 'os.path.join', 'join', (['self.root', '"""test.lst"""'], {}), "(self.root, 'test.lst')\n", (926, 949), False, 'from os.path import join\n'), ((1426, 1449), 'numpy.squeeze', 'np.squeeze', (['lb[:, :, 0]'], {}), '(lb[:, :, 0])\n', (1436, 1449), True, 'import numpy as np\n'), ((1563, 1595), 'numpy.logical_and', 'np.logical_and', (['(lb > 0)', '(lb < 128)'], {}), '(lb > 0, lb < 128)\n', (1577, 1595), True, 'import numpy as np\n'), ((1331, 1355), 'os.path.join', 'join', (['self.root', 'lb_file'], {}), '(self.root, lb_file)\n', (1335, 1355), False, 'from os.path import join\n'), ((1780, 1805), 'os.path.join', 'join', (['self.root', 'img_file'], {}), '(self.root, img_file)\n', (1784, 1805), False, 'from os.path import join\n'), ((1946, 1971), 'os.path.join', 'join', (['self.root', 'img_file'], {}), '(self.root, img_file)\n', (1950, 1971), False, 'from os.path import join\n')] |
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 <NAME>, <NAME>
# Licensed under the 2-clause BSD License
"""Code for plotting EoR Limits."""
import glob
import os
import copy
import yaml
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
from eor_limits.data import DATA_PATH
default_theory_params = {
"munoz_2021_AllGalaxies_z8.5": {
"paper": "munoz_2021",
"model": "EOS",
"redshift": 8.5,
"linewidth": 3,
},
"mesinger_2016_faint_nf0.8": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_bright_nf0.8": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_faint_nf0.5": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.5,
"linewidth": 3,
},
"mesinger_2016_bright_nf0.5": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.5,
"linewidth": 2,
},
"pagano_beta1_z8.5": {"paper": "pagano_liu_2020", "beta": 1, "redshift": 8.5},
"pagano_beta-1_z8.5": {"paper": "pagano_liu_2020", "beta": -1, "redshift": 8.5},
}
def read_data_yaml(paper_name, theory=False):
"""
Read in the data from a paper yaml file.
Parameters
----------
paper_name : str
Short name of paper (usually author_year) which corresponds to a file
in the data directory named <paper_name>.yaml
theory : bool
Flag that this is a theory paper and so is in the theory folder.
Returns
-------
dict
Dictionary with the parsed yaml for use in the plotting code.
"""
if theory:
file_name = os.path.join(DATA_PATH, "theory", paper_name + ".yaml")
else:
file_name = os.path.join(DATA_PATH, paper_name + ".yaml")
with open(file_name, "r") as pfile:
paper_dict = yaml.safe_load(pfile)
if isinstance(paper_dict["delta_squared"][0], (str,)):
try:
paper_dict["delta_squared"] = [
float(val) for val in paper_dict["delta_squared"]
]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"] = val_list
elif isinstance(paper_dict["delta_squared"][0], (list,)) and isinstance(
paper_dict["delta_squared"][0][0], (str,)
):
for ind, elem in enumerate(paper_dict["delta_squared"]):
try:
paper_dict["delta_squared"][ind] = [float(val) for val in elem]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"][ind]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"][ind] = val_list
return paper_dict
def make_plot(
papers=None,
include_theory=True,
theory_legend=True,
theory_params=default_theory_params,
plot_as_points=["patil_2017", "mertens_2020"],
plot_filename="eor_limits.pdf",
delta_squared_range=None,
redshift_range=None,
k_range=None,
shade_limits="generational",
shade_theory="flat",
colormap="Spectral_r",
bold_papers=None,
fontsize=15,
):
"""
Plot the current EoR Limits as a function of k and redshift.
Parameters
----------
papers : list of str
List of papers to include in the plot (specified as 'author_year',
must be present in the data folder).
Defaults to `None` meaning include all papers in the data folder.
include_theory : bool
Flag to include theory lines on plots.
theory_params : dict
Dictionary specifying theory lines to include on the plot. Dictionary
parameters depend on the theory paper. E.g. for lines from Mesinger et al. 2016,
the options are 'model' which can be 'bright' or 'faint', 'nf' which specifies
a neutral fraction and 'redshift'. See the paper specific modules for more
examples. Only used if `include_theory` is True.
theory_legend : bool
Option to exclude theory lines from the legend. Used by some users who prefer
to add the annotations on the lines by hand to improve readability.
plot_as_points : list of str
List of papers that have a line type data model to be plotted as points rather
that a line.
delta_squared_range : list of float
Range of delta squared values to include in plot (yaxis range). Must be
length 2 with second element greater than first element. Defaults to [1e3, 1e6]
if include_theory is False and [1e0, 1e6] otherwise.
redshift_range : list of float
Range of redshifts to include in the plot. Must be length 2 with the second
element greater than the first element.
k_range : list of float
Range of ks to include in the plot. Must be length 2 with the second element
greater than the first element.
shade_limits : {'generational', 'alpha', False}
How to shade above plotted limits. 'generational' shading shades dark grey for
all generation 1 papers and light grey for later generation papers. 'alpha'
shading shades all papers with semi-transparent grey. Setting this to False
results in no shading.
shade_theory : {'flat', 'alpha', False}
How to shade below theory lines. 'flat' shading shades light grey below all
theory lines. 'alpha' shading shades below all theory lines with
semi-transparent grey. Setting this to False results in no shading.
colormap : str
Matplotlib colormap to use for redshift.
plot_filename : str
File name to save plot to.
bold_papers : list of str
List of papers to bold in caption.
"""
if papers is None:
# use all the papers. This gives weird ordering which we will fix later
papers_sorted = False
papers = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "*.yaml"))
]
else:
# if a list is passed in by hand, don't reorder it
papers_sorted = True
if delta_squared_range is None:
if include_theory:
delta_squared_range = [1e0, 1e6]
else:
delta_squared_range = [1e3, 1e6]
if bold_papers is None:
bold_papers = []
generation1 = [
"paciga_2013",
"dillon_2014",
"dillon_2015",
"beardsley_2016",
"patil_2017",
"kolopanis_2019",
]
paper_list = []
for paper_name in papers:
paper_dict = read_data_yaml(paper_name)
if paper_name in bold_papers:
paper_dict["bold"] = True
else:
paper_dict["bold"] = False
if paper_name in plot_as_points:
paper_dict["plot_as_point"] = True
else:
paper_dict["plot_as_point"] = False
if paper_name in generation1:
paper_dict["generation1"] = True
else:
paper_dict["generation1"] = False
paper_list.append(paper_dict)
if not papers_sorted:
paper_list.sort(key=lambda paper_list: paper_list["year"])
if include_theory:
theory_paper_list = []
for name, theory in theory_params.items():
theory_paper_yamls = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "theory", "*.yaml"))
]
if theory["paper"] in theory_paper_yamls:
paper_dict = read_data_yaml(theory["paper"], theory=True)
elif theory["paper"] == "mesinger_2016":
from eor_limits.process_mesinger_2016 import get_mesinger_2016_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_mesinger_2016_line(**dict_use)
elif theory["paper"] == "pagano_liu_2020":
from eor_limits.process_pagano_2020 import get_pagano_2020_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_pagano_2020_line(**dict_use)
elif theory["paper"] == "munoz_2021":
from eor_limits.process_munoz_2021 import get_munoz_2021_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_munoz_2021_line(**dict_use)
else:
raise ValueError(
"Theory paper " + theory["paper"] + " is not a yaml in the "
"data/theory folder and is not a paper with a known processing "
"module."
)
theory_paper_list.append(paper_dict)
if redshift_range is not None:
if len(redshift_range) != 2:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
if redshift_range[0] >= redshift_range[1]:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
norm = colors.Normalize(vmin=redshift_range[0], vmax=redshift_range[1])
else:
redshift_list = []
for paper in paper_list:
if paper["type"] == "point":
delta_array = np.array(paper["delta_squared"])
paper_redshifts = np.array(paper["redshift"])
if paper_redshifts.size == 1 and delta_array.size > 1:
paper_redshifts = np.repeat(paper_redshifts[0], delta_array.size)
if k_range is not None:
k_vals = np.asarray(paper["k"])
inds_use = np.nonzero(
(delta_array <= delta_squared_range[1])
& (k_vals <= k_range[1])
& (k_vals >= k_range[0])
)[0]
else:
inds_use = np.nonzero(delta_array <= delta_squared_range[1])[0]
if len(paper["redshift"]) == 1 and inds_use.size > 0:
inds_use = np.asarray([0])
redshift_list += list(paper_redshifts[inds_use])
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
for ind, elem in enumerate(redshifts):
delta_array = np.asarray(delta_squared[ind])
if k_range is not None:
k_array = np.asarray(k_vals[ind])
if np.nanmin(delta_array) <= delta_squared_range[1] or (
np.min(k_array) <= k_range[1]
and np.max(k_array) >= k_range[0]
):
redshift_list.append(elem)
else:
if np.nanmin(delta_array) <= delta_squared_range[1]:
redshift_list.append(elem)
redshift_list = sorted(set(redshift_list))
if np.min(redshift_list) < np.max(redshift_list):
redshift_range_use = [redshift_list[0], redshift_list[-1]]
else:
# if only 1 redshift and no range specified, use a range of 2 centered on
# redshift of data.
redshift_range_use = [redshift_list[0] - 1, redshift_list[0] + 1]
norm = colors.Normalize(vmin=redshift_range_use[0], vmax=redshift_range_use[1])
scalar_map = cmx.ScalarMappable(norm=norm, cmap=colormap)
if include_theory:
fig_height = 20
else:
fig_height = 10
fig_width = 20
fig = plt.figure(figsize=(fig_width, fig_height))
legend_names = []
lines = []
paper_ks = []
skipped_papers = []
for paper_i, paper in enumerate(paper_list):
if paper["bold"]:
label_start = " $\\bf{"
else:
label_start = " $\\rm{"
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["telescope"].split(" "))
+ r"\ ("
+ paper["author"]
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
if paper["type"] == "point":
if len(paper["redshift"]) == 1 and len(paper["delta_squared"]) > 1:
paper["redshift"] = paper["redshift"] * len(paper["delta_squared"])
elif len(paper["redshift"]) != len(paper["delta_squared"]):
raise ValueError(f"{label} has the wrong number of redshift values.")
delta_squared = np.asarray(paper["delta_squared"])
if redshift_range is not None:
redshift_array = np.asarray(paper["redshift"])
points_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
& (delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
else:
points_use = np.where(
(delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
if points_use.size == 0:
skipped_papers.append(paper)
continue
else:
paper_ks.extend(list(np.asarray(paper["k"])[points_use]))
delta_squared = np.asarray(paper["delta_squared"])[points_use]
line = plt.scatter(
np.asarray(paper["k"])[points_use],
delta_squared,
marker=paper["marker"],
c=np.asarray(paper["redshift"])[points_use].tolist(),
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
for index in points_use:
k_edges = [paper["k_lower"][index], paper["k_upper"][index]]
delta_edges = [
paper["delta_squared"][index],
paper["delta_squared"][index],
]
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
lines.append(line)
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
k_lower = [paper["k_lower"]]
k_upper = [paper["k_upper"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
k_lower = paper["k_lower"]
k_upper = paper["k_upper"]
delta_squared = paper["delta_squared"]
if redshift_range is not None:
redshift_array = np.asarray(redshifts)
lines_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
)[0]
if lines_use.size == 0:
skipped_papers.append(paper)
continue
else:
lines_use = np.arange(len(redshifts))
for ind, redshift in enumerate(np.asarray(redshifts)[lines_use]):
paper_ks.extend(k_vals[ind])
k_edges = np.stack(
(np.asarray(k_lower[ind]), np.asarray(k_upper[ind]))
).T.flatten()
delta_edges = np.stack(
(np.asarray(delta_squared[ind]), np.asarray(delta_squared[ind]))
).T.flatten()
if paper["plot_as_point"]:
line = plt.scatter(
k_vals[ind],
delta_squared[ind],
marker=paper["marker"],
c=np.zeros(len(k_vals[ind])) + redshift,
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
else:
color_val = scalar_map.to_rgba(redshift)
# make black outline by plotting thicker black line first
plt.plot(
k_edges,
delta_edges,
c="black",
linewidth=paper["linewidth"] + 2,
zorder=2,
)
(line,) = plt.plot(
k_edges,
delta_edges,
c=color_val,
linewidth=paper["linewidth"],
label=label,
zorder=2,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
if ind == 0:
lines.append(line)
legend_names.append(label)
if len(skipped_papers) == len(paper_list):
raise ValueError("No papers in specified redshift and/or delta squared range.")
theory_line_inds = []
if include_theory:
# we want to supress legend labels for theories with linewidth=0
# which are only used for shading
# fix ordering to put them at the end
linewidths = np.asarray([paper["linewidth"] for paper in theory_paper_list])
ordering = np.argsort(linewidths == 0)
theory_paper_list = [theory_paper_list[p] for p in ordering]
for paper in theory_paper_list:
label_start = " $\\bf{Theory:} \\rm{ "
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["model"].split(" "))
+ r"\ ("
+ r"\ ".join(paper["author"].split(" "))
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
(line,) = plt.plot(
k_vals,
delta_squared,
c="lightsteelblue",
linewidth=paper["linewidth"],
linestyle=paper["linestyle"],
zorder=2,
)
if shade_theory is not False:
if shade_theory == "flat":
color_use = "aliceblue"
zorder = 0
alpha = 1
else:
color_use = "lightsteelblue"
zorder = 0
alpha = 1.0 / len(theory_paper_list)
plt.fill_between(
k_vals,
delta_squared,
delta_squared_range[0],
color=color_use,
alpha=alpha,
zorder=zorder,
)
theory_line_inds.append(len(lines))
lines.append(line)
if paper["linewidth"] > 0 and theory_legend:
legend_names.append(label)
point_size = 1 / 72.0 # typography standard (points/inch)
font_inch = fontsize * point_size
plt.rcParams.update({"font.size": fontsize})
plt.xlabel("k ($h Mpc^{-1}$)", fontsize=fontsize)
plt.ylabel("$\Delta^2$ ($mK^2$)", fontsize=fontsize) # noqa
plt.yscale("log")
plt.xscale("log")
plt.ylim(*delta_squared_range)
if k_range is None:
k_range = [np.min(paper_ks), np.max(paper_ks)]
min_factor = 10 ** np.ceil(np.log10(k_range[0]) * -1)
max_factor = 10 ** np.ceil(np.log10(k_range[1]) * -1)
k_range = [
np.floor(k_range[0] * min_factor) / min_factor,
np.ceil(k_range[1] * max_factor) / max_factor,
]
plt.xlim(*k_range)
plt.tick_params(labelsize=fontsize)
cb = plt.colorbar(scalar_map, fraction=0.1, pad=0.08, label="Redshift")
cb.ax.yaxis.set_label_position("left")
cb.ax.yaxis.set_ticks_position("left")
cb.set_label(label="Redshift", fontsize=fontsize)
plt.grid(axis="y")
if fontsize > 20:
leg_columns = 2
else:
leg_columns = 3
leg_rows = int(np.ceil(len(legend_names) / leg_columns))
legend_height = (2 * leg_rows) * font_inch
legend_height_norm = legend_height / fig_height # 0.25
axis_height = 3 * fontsize * point_size
axis_height_norm = axis_height / fig_height
plot_bottom = legend_height_norm + axis_height_norm
leg = plt.legend(
lines,
legend_names,
bbox_to_anchor=(0.45, legend_height_norm / 2.0),
loc="center",
bbox_transform=fig.transFigure,
ncol=leg_columns,
frameon=False,
)
for ind in range(len(leg.legendHandles)):
if ind not in theory_line_inds:
leg.legendHandles[ind].set_color("gray")
plt.subplots_adjust(bottom=plot_bottom)
fig.tight_layout()
plt.savefig(plot_filename)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--papers",
type=str,
nargs="+",
default=None,
help="Papers to include on plot "
"(must be in data directory). Defaults to all papers "
"in the data directory.",
)
parser.add_argument(
"--no_theory",
action="store_true",
help="Flag to not plot theory lines. If True, default range is modified.",
)
parser.add_argument(
"--theories",
type=str,
nargs="+",
default=None,
help="Theories to plot. Theory-specific options can be set to control which "
"lines are drawn.",
)
parser.add_argument(
"--theory_model",
nargs="+",
type=str,
default=None,
help="Model type to select from theories (e.g. 'bright' or 'faint' for "
"Mesinger et al. 2016).",
)
parser.add_argument(
"--theory_nf",
nargs="+",
type=str,
default=None,
help="Neutral fractions to select from theories.",
)
parser.add_argument(
"--theory_redshift",
nargs="+",
type=str,
default=None,
help="Redshifts to select from theories.",
)
parser.add_argument(
"--theory_linewidth",
nargs="+",
type=float,
default=None,
help="Linewidths for theory lines.",
)
parser.add_argument(
"--file",
type=str,
dest="filename",
help="Filename to save plot to.",
default="eor_limits.pdf",
)
parser.add_argument(
"--aspoints",
type=str,
nargs="+",
default=["patil_2017", "mertens_2020"],
help="Papers to plot as points rather than lines.",
)
parser.add_argument(
"--range",
type=float,
help="Range of Delta Squared to include on plot (yaxis range). "
"Defaults to [1e3, 1e6] if include_theory is false and [1e0, 1e6] otherwise",
default=None,
nargs="+",
)
parser.add_argument(
"--redshift",
type=float,
help="Range of redshifts to include on plot.",
default=None,
nargs="+",
)
parser.add_argument(
"--k_range",
type=float,
help="Range of k values to include on plot (xaxis range).",
default=None,
nargs="+",
)
parser.add_argument(
"--shade_limits",
type=str,
default="generational",
help="Type of shading above limits to apply, one of: 'generational', 'alpha' "
"or False.",
)
parser.add_argument(
"--shade_theory",
type=str,
default="flat",
help="Type of shading below theories to apply, one of: 'flat', 'alpha' "
"or False.",
)
parser.add_argument(
"--colormap", type=str, help="Matplotlib colormap to use.", default="Spectral_r"
)
parser.add_argument(
"--bold",
type=str,
nargs="+",
help="List of papers to bold in caption.",
default=None,
)
parser.add_argument("--fontsize", type=int, help="Font size to use.", default=15)
args = parser.parse_args()
if args.shade_limits == "False":
args.shade_limits = False
if args.shade_theory == "False":
args.shade_theory = False
if args.theories is not None:
if args.theory_nf is None:
args.theory_nf = [None]
else:
args.theory_nf = [
float(val) if val != "None" else None for val in args.theory_nf
]
if args.theory_redshift is None:
args.theory_redshift = [None]
if args.theory_model is None:
args.theory_model = [None]
theory_params = {}
num_theories = len(args.theories)
num_models = len(args.theory_model)
num_nf = len(args.theory_nf)
num_redshift = len(args.theory_redshift)
num_theory_lines = max([num_theories, num_models, num_nf, num_redshift])
if num_theory_lines > 1:
if num_theories == 1:
args.theories = args.theories * num_theory_lines
elif num_theories != num_theory_lines:
raise ValueError(
"Number of theories must be one or match the max length of "
"theory_model, theory_nf or theory_redshift."
)
if num_models == 1:
args.theory_model = args.theory_model * num_theory_lines
elif num_models != num_theory_lines:
raise ValueError(
"Number of theory_models must be one or match the max length of "
"theories, theory_nf or theory_redshift."
)
if num_nf == 1:
args.theory_nf = args.theory_nf * num_theory_lines
elif num_nf != num_theory_lines:
raise ValueError(
"Number of theory_nfs must be one or match the max length of "
"theories, theory_model or theory_redshift."
)
if num_redshift == 1:
args.theory_redshift = args.theory_redshift * num_theory_lines
elif num_redshift != num_theory_lines:
raise ValueError(
"Number of theory_redshifts must be one or match the max length of "
"theories, theory_model or theory_nf."
)
if args.theory_linewidth is not None:
if len(args.theory_linewidth) == 1:
args.theory_linewidth = args.theory_linewidth * num_theory_lines
elif len(args.theory_linewidth) != num_theory_lines:
raise ValueError(
"Number of theory lines must be one or match the max length of "
"theories, theory_model, theory_nf or theory_redshift."
)
for index, theory in enumerate(args.theories):
name = (
theory
+ "_"
+ str(args.theory_model[index])
+ "_nf_"
+ str(args.theory_nf[index])
+ "_z_"
+ str(args.theory_redshift[index])
)
theory_params[name] = {
"paper": theory,
"model": args.theory_model[index],
"nf": args.theory_nf[index],
"redshift": args.theory_redshift[index],
}
if args.theory_linewidth is not None:
theory_params[name]["linewidth"] = args.theory_linewidth[index]
else:
theory_params = default_theory_params
make_plot(
papers=args.papers,
include_theory=not args.no_theory,
theory_params=theory_params,
plot_as_points=args.aspoints,
delta_squared_range=args.range,
redshift_range=args.redshift,
k_range=args.k_range,
shade_limits=args.shade_limits,
shade_theory=args.shade_theory,
colormap=args.colormap,
plot_filename=args.filename,
bold_papers=args.bold,
fontsize=args.fontsize,
)
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"numpy.nanmin",
"eor_limits.process_mesinger_2016.get_mesinger_2016_line",
"numpy.repeat",
"argparse.ArgumentParser",
"numpy.where",
"matplo... | [((12492, 12536), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'colormap'}), '(norm=norm, cmap=colormap)\n', (12510, 12536), True, 'import matplotlib.cm as cmx\n'), ((12648, 12691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (12658, 12691), True, 'import matplotlib.pyplot as plt\n'), ((22102, 22146), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': fontsize}"], {}), "({'font.size': fontsize})\n", (22121, 22146), True, 'import matplotlib.pyplot as plt\n'), ((22151, 22200), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k ($h Mpc^{-1}$)"""'], {'fontsize': 'fontsize'}), "('k ($h Mpc^{-1}$)', fontsize=fontsize)\n", (22161, 22200), True, 'import matplotlib.pyplot as plt\n'), ((22205, 22258), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta^2$ ($mK^2$)"""'], {'fontsize': 'fontsize'}), "('$\\\\Delta^2$ ($mK^2$)', fontsize=fontsize)\n", (22215, 22258), True, 'import matplotlib.pyplot as plt\n'), ((22270, 22287), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (22280, 22287), True, 'import matplotlib.pyplot as plt\n'), ((22292, 22309), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (22302, 22309), True, 'import matplotlib.pyplot as plt\n'), ((22314, 22344), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*delta_squared_range'], {}), '(*delta_squared_range)\n', (22322, 22344), True, 'import matplotlib.pyplot as plt\n'), ((22702, 22720), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*k_range'], {}), '(*k_range)\n', (22710, 22720), True, 'import matplotlib.pyplot as plt\n'), ((22726, 22761), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (22741, 22761), True, 'import matplotlib.pyplot as plt\n'), ((22771, 22837), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['scalar_map'], {'fraction': '(0.1)', 'pad': '(0.08)', 'label': '"""Redshift"""'}), "(scalar_map, fraction=0.1, pad=0.08, label='Redshift')\n", (22783, 22837), True, 'import matplotlib.pyplot as plt\n'), ((22982, 23000), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""'}), "(axis='y')\n", (22990, 23000), True, 'import matplotlib.pyplot as plt\n'), ((23413, 23581), 'matplotlib.pyplot.legend', 'plt.legend', (['lines', 'legend_names'], {'bbox_to_anchor': '(0.45, legend_height_norm / 2.0)', 'loc': '"""center"""', 'bbox_transform': 'fig.transFigure', 'ncol': 'leg_columns', 'frameon': '(False)'}), "(lines, legend_names, bbox_to_anchor=(0.45, legend_height_norm / \n 2.0), loc='center', bbox_transform=fig.transFigure, ncol=leg_columns,\n frameon=False)\n", (23423, 23581), True, 'import matplotlib.pyplot as plt\n'), ((23780, 23819), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': 'plot_bottom'}), '(bottom=plot_bottom)\n', (23799, 23819), True, 'import matplotlib.pyplot as plt\n'), ((23847, 23873), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_filename'], {}), '(plot_filename)\n', (23858, 23873), True, 'import matplotlib.pyplot as plt\n'), ((23937, 24016), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (23960, 24016), False, 'import argparse\n'), ((1828, 1883), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""theory"""', "(paper_name + '.yaml')"], {}), "(DATA_PATH, 'theory', paper_name + '.yaml')\n", (1840, 1883), False, 'import os\n'), ((1914, 1959), 'os.path.join', 'os.path.join', (['DATA_PATH', "(paper_name + '.yaml')"], {}), "(DATA_PATH, paper_name + '.yaml')\n", (1926, 1959), False, 'import os\n'), ((2022, 2043), 'yaml.safe_load', 'yaml.safe_load', (['pfile'], {}), '(pfile)\n', (2036, 2043), False, 'import yaml\n'), ((9843, 9907), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'redshift_range[0]', 'vmax': 'redshift_range[1]'}), '(vmin=redshift_range[0], vmax=redshift_range[1])\n', (9859, 9907), True, 'import matplotlib.colors as colors\n'), ((12402, 12474), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'redshift_range_use[0]', 'vmax': 'redshift_range_use[1]'}), '(vmin=redshift_range_use[0], vmax=redshift_range_use[1])\n', (12418, 12474), True, 'import matplotlib.colors as colors\n'), ((20249, 20312), 'numpy.asarray', 'np.asarray', (["[paper['linewidth'] for paper in theory_paper_list]"], {}), "([paper['linewidth'] for paper in theory_paper_list])\n", (20259, 20312), True, 'import numpy as np\n'), ((20332, 20359), 'numpy.argsort', 'np.argsort', (['(linewidths == 0)'], {}), '(linewidths == 0)\n', (20342, 20359), True, 'import numpy as np\n'), ((12058, 12079), 'numpy.min', 'np.min', (['redshift_list'], {}), '(redshift_list)\n', (12064, 12079), True, 'import numpy as np\n'), ((12082, 12103), 'numpy.max', 'np.max', (['redshift_list'], {}), '(redshift_list)\n', (12088, 12103), True, 'import numpy as np\n'), ((13599, 13633), 'numpy.asarray', 'np.asarray', (["paper['delta_squared']"], {}), "(paper['delta_squared'])\n", (13609, 13633), True, 'import numpy as np\n'), ((20970, 21096), 'matplotlib.pyplot.plot', 'plt.plot', (['k_vals', 'delta_squared'], {'c': '"""lightsteelblue"""', 'linewidth': "paper['linewidth']", 'linestyle': "paper['linestyle']", 'zorder': '(2)'}), "(k_vals, delta_squared, c='lightsteelblue', linewidth=paper[\n 'linewidth'], linestyle=paper['linestyle'], zorder=2)\n", (20978, 21096), True, 'import matplotlib.pyplot as plt\n'), ((22389, 22405), 'numpy.min', 'np.min', (['paper_ks'], {}), '(paper_ks)\n', (22395, 22405), True, 'import numpy as np\n'), ((22407, 22423), 'numpy.max', 'np.max', (['paper_ks'], {}), '(paper_ks)\n', (22413, 22423), True, 'import numpy as np\n'), ((10049, 10081), 'numpy.array', 'np.array', (["paper['delta_squared']"], {}), "(paper['delta_squared'])\n", (10057, 10081), True, 'import numpy as np\n'), ((10116, 10143), 'numpy.array', 'np.array', (["paper['redshift']"], {}), "(paper['redshift'])\n", (10124, 10143), True, 'import numpy as np\n'), ((13710, 13739), 'numpy.asarray', 'np.asarray', (["paper['redshift']"], {}), "(paper['redshift'])\n", (13720, 13739), True, 'import numpy as np\n'), ((16877, 16898), 'numpy.asarray', 'np.asarray', (['redshifts'], {}), '(redshifts)\n', (16887, 16898), True, 'import numpy as np\n'), ((21568, 21681), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['k_vals', 'delta_squared', 'delta_squared_range[0]'], {'color': 'color_use', 'alpha': 'alpha', 'zorder': 'zorder'}), '(k_vals, delta_squared, delta_squared_range[0], color=\n color_use, alpha=alpha, zorder=zorder)\n', (21584, 21681), True, 'import matplotlib.pyplot as plt\n'), ((22581, 22614), 'numpy.floor', 'np.floor', (['(k_range[0] * min_factor)'], {}), '(k_range[0] * min_factor)\n', (22589, 22614), True, 'import numpy as np\n'), ((22641, 22673), 'numpy.ceil', 'np.ceil', (['(k_range[1] * max_factor)'], {}), '(k_range[1] * max_factor)\n', (22648, 22673), True, 'import numpy as np\n'), ((6530, 6549), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (6546, 6549), False, 'import os\n'), ((6585, 6618), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""*.yaml"""'], {}), "(DATA_PATH, '*.yaml')\n", (6597, 6618), False, 'import os\n'), ((8355, 8376), 'copy.deepcopy', 'copy.deepcopy', (['theory'], {}), '(theory)\n', (8368, 8376), False, 'import copy\n'), ((8444, 8478), 'eor_limits.process_mesinger_2016.get_mesinger_2016_line', 'get_mesinger_2016_line', ([], {}), '(**dict_use)\n', (8466, 8478), False, 'from eor_limits.process_mesinger_2016 import get_mesinger_2016_line\n'), ((10253, 10300), 'numpy.repeat', 'np.repeat', (['paper_redshifts[0]', 'delta_array.size'], {}), '(paper_redshifts[0], delta_array.size)\n', (10262, 10300), True, 'import numpy as np\n'), ((10370, 10392), 'numpy.asarray', 'np.asarray', (["paper['k']"], {}), "(paper['k'])\n", (10380, 10392), True, 'import numpy as np\n'), ((10830, 10845), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (10840, 10845), True, 'import numpy as np\n'), ((11421, 11451), 'numpy.asarray', 'np.asarray', (['delta_squared[ind]'], {}), '(delta_squared[ind])\n', (11431, 11451), True, 'import numpy as np\n'), ((13769, 13953), 'numpy.where', 'np.where', (['((redshift_array >= redshift_range[0]) & (redshift_array <= redshift_range[\n 1]) & (delta_squared >= delta_squared_range[0]) & (delta_squared <=\n delta_squared_range[1]))'], {}), '((redshift_array >= redshift_range[0]) & (redshift_array <=\n redshift_range[1]) & (delta_squared >= delta_squared_range[0]) & (\n delta_squared <= delta_squared_range[1]))\n', (13777, 13953), True, 'import numpy as np\n'), ((14093, 14192), 'numpy.where', 'np.where', (['((delta_squared >= delta_squared_range[0]) & (delta_squared <=\n delta_squared_range[1]))'], {}), '((delta_squared >= delta_squared_range[0]) & (delta_squared <=\n delta_squared_range[1]))\n', (14101, 14192), True, 'import numpy as np\n'), ((14482, 14516), 'numpy.asarray', 'np.asarray', (["paper['delta_squared']"], {}), "(paper['delta_squared'])\n", (14492, 14516), True, 'import numpy as np\n'), ((16592, 16621), 'numpy.squeeze', 'np.squeeze', (["paper['redshift']"], {}), "(paper['redshift'])\n", (16602, 16621), True, 'import numpy as np\n'), ((16927, 17018), 'numpy.where', 'np.where', (['((redshift_array >= redshift_range[0]) & (redshift_array <= redshift_range[1]))'], {}), '((redshift_array >= redshift_range[0]) & (redshift_array <=\n redshift_range[1]))\n', (16935, 17018), True, 'import numpy as np\n'), ((17310, 17331), 'numpy.asarray', 'np.asarray', (['redshifts'], {}), '(redshifts)\n', (17320, 17331), True, 'import numpy as np\n'), ((18386, 18475), 'matplotlib.pyplot.plot', 'plt.plot', (['k_edges', 'delta_edges'], {'c': '"""black"""', 'linewidth': "(paper['linewidth'] + 2)", 'zorder': '(2)'}), "(k_edges, delta_edges, c='black', linewidth=paper['linewidth'] + 2,\n zorder=2)\n", (18394, 18475), True, 'import matplotlib.pyplot as plt\n'), ((18646, 18746), 'matplotlib.pyplot.plot', 'plt.plot', (['k_edges', 'delta_edges'], {'c': 'color_val', 'linewidth': "paper['linewidth']", 'label': 'label', 'zorder': '(2)'}), "(k_edges, delta_edges, c=color_val, linewidth=paper['linewidth'],\n label=label, zorder=2)\n", (18654, 18746), True, 'import matplotlib.pyplot as plt\n'), ((19503, 19615), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['k_edges', 'delta_edges', 'delta_squared_range[1]'], {'color': 'color_use', 'alpha': 'alpha', 'zorder': 'zorder'}), '(k_edges, delta_edges, delta_squared_range[1], color=\n color_use, alpha=alpha, zorder=zorder)\n', (19519, 19615), True, 'import matplotlib.pyplot as plt\n'), ((22460, 22480), 'numpy.log10', 'np.log10', (['k_range[0]'], {}), '(k_range[0])\n', (22468, 22480), True, 'import numpy as np\n'), ((22522, 22542), 'numpy.log10', 'np.log10', (['k_range[1]'], {}), '(k_range[1])\n', (22530, 22542), True, 'import numpy as np\n'), ((7944, 7963), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (7960, 7963), False, 'import os\n'), ((8003, 8046), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""theory"""', '"""*.yaml"""'], {}), "(DATA_PATH, 'theory', '*.yaml')\n", (8015, 8046), False, 'import os\n'), ((8642, 8663), 'copy.deepcopy', 'copy.deepcopy', (['theory'], {}), '(theory)\n', (8655, 8663), False, 'import copy\n'), ((8731, 8763), 'eor_limits.process_pagano_2020.get_pagano_2020_line', 'get_pagano_2020_line', ([], {}), '(**dict_use)\n', (8751, 8763), False, 'from eor_limits.process_pagano_2020 import get_pagano_2020_line\n'), ((10424, 10529), 'numpy.nonzero', 'np.nonzero', (['((delta_array <= delta_squared_range[1]) & (k_vals <= k_range[1]) & (k_vals >=\n k_range[0]))'], {}), '((delta_array <= delta_squared_range[1]) & (k_vals <= k_range[1]) &\n (k_vals >= k_range[0]))\n', (10434, 10529), True, 'import numpy as np\n'), ((10676, 10725), 'numpy.nonzero', 'np.nonzero', (['(delta_array <= delta_squared_range[1])'], {}), '(delta_array <= delta_squared_range[1])\n', (10686, 10725), True, 'import numpy as np\n'), ((11202, 11231), 'numpy.squeeze', 'np.squeeze', (["paper['redshift']"], {}), "(paper['redshift'])\n", (11212, 11231), True, 'import numpy as np\n'), ((11530, 11553), 'numpy.asarray', 'np.asarray', (['k_vals[ind]'], {}), '(k_vals[ind])\n', (11540, 11553), True, 'import numpy as np\n'), ((14585, 14607), 'numpy.asarray', 'np.asarray', (["paper['k']"], {}), "(paper['k'])\n", (14595, 14607), True, 'import numpy as np\n'), ((15900, 16012), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['k_edges', 'delta_edges', 'delta_squared_range[1]'], {'color': 'color_use', 'alpha': 'alpha', 'zorder': 'zorder'}), '(k_edges, delta_edges, delta_squared_range[1], color=\n color_use, alpha=alpha, zorder=zorder)\n', (15916, 16012), True, 'import matplotlib.pyplot as plt\n'), ((8920, 8941), 'copy.deepcopy', 'copy.deepcopy', (['theory'], {}), '(theory)\n', (8933, 8941), False, 'import copy\n'), ((9009, 9040), 'eor_limits.process_munoz_2021.get_munoz_2021_line', 'get_munoz_2021_line', ([], {}), '(**dict_use)\n', (9028, 9040), False, 'from eor_limits.process_munoz_2021 import get_munoz_2021_line\n'), ((11890, 11912), 'numpy.nanmin', 'np.nanmin', (['delta_array'], {}), '(delta_array)\n', (11899, 11912), True, 'import numpy as np\n'), ((14413, 14435), 'numpy.asarray', 'np.asarray', (["paper['k']"], {}), "(paper['k'])\n", (14423, 14435), True, 'import numpy as np\n'), ((11581, 11603), 'numpy.nanmin', 'np.nanmin', (['delta_array'], {}), '(delta_array)\n', (11590, 11603), True, 'import numpy as np\n'), ((11663, 11678), 'numpy.min', 'np.min', (['k_array'], {}), '(k_array)\n', (11669, 11678), True, 'import numpy as np\n'), ((11725, 11740), 'numpy.max', 'np.max', (['k_array'], {}), '(k_array)\n', (11731, 11740), True, 'import numpy as np\n'), ((14722, 14751), 'numpy.asarray', 'np.asarray', (["paper['redshift']"], {}), "(paper['redshift'])\n", (14732, 14751), True, 'import numpy as np\n'), ((17448, 17472), 'numpy.asarray', 'np.asarray', (['k_lower[ind]'], {}), '(k_lower[ind])\n', (17458, 17472), True, 'import numpy as np\n'), ((17474, 17498), 'numpy.asarray', 'np.asarray', (['k_upper[ind]'], {}), '(k_upper[ind])\n', (17484, 17498), True, 'import numpy as np\n'), ((17591, 17621), 'numpy.asarray', 'np.asarray', (['delta_squared[ind]'], {}), '(delta_squared[ind])\n', (17601, 17621), True, 'import numpy as np\n'), ((17623, 17653), 'numpy.asarray', 'np.asarray', (['delta_squared[ind]'], {}), '(delta_squared[ind])\n', (17633, 17653), True, 'import numpy as np\n')] |
'''
`dtApp/dtCode/unquant.py`
:Author:
<NAME>
:Organisation:
University of Liverpool
:Copyright:
BSD Licence
This single python file ``unquant.py`` is the backend code for the uncertainty page.
A single function ``unquant()`` wrangles all the data requests from the html template.
The function takes care of the data sent by user performing a few actions:
* The data is converted in SI units;
* The slider position is converted to an interval;
* The uncertainty propagation function is invoked;
* The output is captured and plotted with Plotly.
This file makes sure that when no data are provided by the user, the page displays and plots the nominal values.
The default values that populate the page on load are defined at the top of document as persistent variables.
.. code-block:: python
MASS = [5.362, 5.144, 5.142] # *1e4 kg
STFF = [3.846, 4.464, 4.589] # *1e8 N/m
DAMP = [1.699, 1.016, 1.34] # *1e4 Ns/m
This page makes use of the following dependencies.
External dependencies
.. code-block:: python
from flask import render_template, request, redirect, Response, url_for
import importlib.util
import numpy
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import json
The internal dependency is imported from the scientific code library.
.. code-block:: python
import dtLib.unquant.msd3 as msd3
If not otherwise specified:
| * The excitation is applied at floor 2.
| * The frequency range is [5,200] Hz.
| * The FRF plots 350 frequencies.
| * The number of MonteCarlo samples is 50.
'''
# Import external packages
from flask import render_template, request, redirect, Response, url_for
import importlib.util
import numpy
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import json
# Import internal packages
from dtApp import app
from dtApp import date
import dtLib.unquant.msd3 as msd3
M_INP_1, M_INP_2, M_INP_3 = 5.36, 5.14, 5.14
K_INP_1, K_INP_2, K_INP_3 = 3.85, 4.46, 4.59
C_INP_1, C_INP_2, C_INP_3 = 1.70, 1.02, 1.34
SLIDER_SCALE = 10000
PLOT_DEFINITION = 350
PLOT_WIDTH = 900
PLOT_HEIGHT = 800
W_PLOT_RANGE = [5,200]
@app.route('/unquant', methods=['GET','POST'])
def unquant(): # Properties displayed on landing
'''
Although this function takes explicit inputs, it is responsible for dispatching the data requests from the html page.
The html inputs are dispathed from the Flask's request object ``request``, as follows:
.. code-block:: python
for key,val in request.form.items():
if key == "input1":
input1 = int(val)
if key == 'input2':
input2 = float(val)
'''
M_inp_1, M_inp_2, M_inp_3 = M_INP_1, M_INP_2, M_INP_3
eM_slider_1, eM_slider_2, eM_slider_3 = 0,0,0
K_inp_1, K_inp_2, K_inp_3 = K_INP_1, K_INP_2, K_INP_3
eK_slider_1,eK_slider_2,eK_slider_3 = 0,0,0
C_inp_1, C_inp_2, C_inp_3 = C_INP_1, C_INP_2, C_INP_3
eC_slider_1, eC_slider_2, eC_slider_3 = 0,0,0
MCsamp = 50
maxUnc = int(10) # percent
Exci = '2' # excitation at floor 2
if request.method=='POST':
fig = make_subplots(rows=3, cols=1, subplot_titles=("Floor 3", "Floor 2", "Floor 1"), shared_xaxes=False)
fig.update_layout(width=PLOT_WIDTH, height=PLOT_HEIGHT)
for key,val in request.form.items():
if key == "maxU":
maxUnc = int(val)
if key == 'exci':
Exci = val
# Mass
if key == "M_centre_3":
M_inp_3 = float(val)
if key == "M_centre_2":
M_inp_2 = float(val)
if key == "M_centre_1":
M_inp_1 = float(val)
if key == "eM_slider_3":
eM_slider_3 = float(val)
if key == "eM_slider_2":
eM_slider_2 = float(val)
if key == "eM_slider_1":
eM_slider_1 = float(val)
# Stiff
if key == "K_centre_3":
K_inp_3 = float(val)
if key == "K_centre_2":
K_inp_2 = float(val)
if key == "K_centre_1":
K_inp_1 = float(val)
if key == "eK_slider_3":
eK_slider_3 = float(val)
if key == "eK_slider_2":
eK_slider_2 = float(val)
if key == "eK_slider_1":
eK_slider_1 = float(val)
# Damp
if key == "C_centre_3":
C_inp_3 = float(val)
if key == "C_centre_2":
C_inp_2 = float(val)
if key == "C_centre_1":
C_inp_1 = float(val)
if key == "eC_slider_3":
eC_slider_3 = float(val)
if key == "eC_slider_2":
eC_slider_2 = float(val)
if key == "eC_slider_1":
eC_slider_1 = float(val)
if key == "MC_samples":
MCsamp = int(val)
if key == "Subintervals":
Subintervals = val
def Lo(c,e):
'''
Function retrieving the lower bound of an interval provided in central notation.
:param c: Midpoint of the interval
:param e: Relative half-width of the interval
:returns: The lower bound of the interval
'''
return c * (1-e)
def Hi(c,e):
'''
Function retrieving the lower bound of an interval provided in central notation.
:param c: Midpoint of the interval
:param e: Relative half-width of the interval
:returns: The upper bound of the interval
'''
return c * (1+e)
M_inp = [M_inp_1, M_inp_2, M_inp_3]
M_inp_SI = [1e4 * mi for mi in M_inp]
M_slider = [eM_slider_1,eM_slider_2,eM_slider_3]
M_e = [float(ms) * maxUnc / SLIDER_SCALE for ms in M_slider]
mI = [[Lo(m,e), Hi(m,e)] for m,e in zip(M_inp_SI,M_e)]
K_inp = [ K_inp_1, K_inp_2, K_inp_3]
K_inp_SI = [1e8 * ki for ki in K_inp]
K_slider = [eK_slider_1,eK_slider_2,eK_slider_3]
K_e = [ks * maxUnc / SLIDER_SCALE for ks in K_slider]
kI = [[Lo(k,e), Hi(k,e)] for k,e in zip(K_inp_SI,K_e)]
C_inp = [ C_inp_1, C_inp_2, C_inp_3]
C_inp_SI = [1e4 * ci for ci in C_inp]
C_slider = [eC_slider_1,eC_slider_2,eC_slider_3]
C_e = [cs * maxUnc / SLIDER_SCALE for cs in C_slider]
cI = [[Lo(c,e), Hi(c,e)] for c,e in zip(C_inp_SI,C_e)]
U = sum([em + ek + ec for em,ek,ec in zip(M_e,K_e,C_e)])
uncertainty = True
if abs(U)<1e-5:
uncertainty = False
if uncertainty:
kwargs = { # inputs required by the library module
'w_range':W_PLOT_RANGE,
'mI':mI,
'kI':kI,
'cI':cI,
'exci_floor':Exci,
}
kwargs['n1'] = 200
kwargs['n2'] = 30
ww,Y_cart = msd3.displacement_bounds_cartesian_MK(**kwargs)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_cart)[:,0,0],
fill=None,
mode='lines',
line_color='indigo',
showlegend=False), row=1, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_cart)[:,1,0],
fill=None,
mode='lines',
line_color='indigo',
showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_cart)[:,2,0],
fill=None,
mode='lines',
line_color='indigo',
showlegend=False), row=3, col=1)
fig.add_trace(go.Scatter(name='Cartesian',x=ww, y=numpy.log10(Y_cart)[:,0,1],
fill='tonexty',
mode='lines',
line_color='indigo'), row=1, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_cart)[:,1,1],
fill='tonexty',
mode='lines',
line_color='indigo',
showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_cart)[:,2,1],
fill='tonexty',
mode='lines',
line_color='indigo',
showlegend=False), row=3, col=1)
kwargs['n1'] = 200
kwargs['n2'] = MCsamp
ww,Y_in = msd3.displacement_bounds_montecarlo(**kwargs)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_in)[:,0,0],
fill=None,
mode='lines',
line_color='limegreen',
showlegend=False), row=1, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_in)[:,1,0],
fill=None,
mode='lines',
line_color='limegreen',
showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_in)[:,2,0],
fill=None,
mode='lines',
line_color='limegreen',
showlegend=False), row=3, col=1)
fig.add_trace(go.Scatter(name='MonteCarlo',x=ww, y=numpy.log10(Y_in)[:,0,1],
fill='tonexty',
mode='lines',
line_color='limegreen'), row=1, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_in)[:,1,1],
fill='tonexty',
mode='lines',
line_color='limegreen',
showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter(name='',x=ww, y=numpy.log10(Y_in)[:,2,1],
fill='tonexty',
mode='lines',
line_color='limegreen',
showlegend=False), row=3, col=1)
# Case without uncertainty
kwargs = { # inputs required by the library module
'w_range':W_PLOT_RANGE,
'm':M_inp_SI,
'k':K_inp_SI,
'c':C_inp_SI,
'n':PLOT_DEFINITION,
'exci_floor':Exci,
}
ww,Y_pr = msd3.displacement_msd_numpy_abs_ww(**kwargs)
fig.add_trace(go.Scatter(name = 'Nominal',x=ww, y=numpy.log10(Y_pr)[:,0],line_color='orangered'), row=1, col=1)
fig.add_trace(go.Scatter(x=ww, y=numpy.log10(Y_pr)[:,1],line_color='orangered',showlegend=False), row=2, col=1)
fig.add_trace(go.Scatter(x=ww, y=numpy.log10(Y_pr)[:,2],line_color='orangered',showlegend=False), row=3, col=1)
# Update xaxis properties
fig.update_xaxes(title_text='[Hz]', titlefont=dict(size=14), row=3, col=1) # fig.update_xaxes(type="log")
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=1, col=1)
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=2, col=1)
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=3, col=1)
# fig.update_yaxes(range=[-150, 0])
fig.update_layout(title_text="Bounds on displacement Frequency Response Function (FRF)",\
showlegend=True,\
font=dict(size=14),\
plot_bgcolor= 'rgba(0, 0, 0, 0.1)',paper_bgcolor= 'rgba(0, 0, 0, 0)') #paper_bgcolor= 'rgba(0, 0, 0, 0.05)'
sideplot = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
def Lo(inp,e):
return inp * (1-e)
def Hi(inp,e):
return inp * (1+e)
M_lo = [Lo(mi,me) for mi,me in zip(M_inp,M_e)]#, Lo(M_inp_2,M_e), Lo(M_inp_3,M_e)]
M_hi = [Hi(mi,me) for mi,me in zip(M_inp,M_e)]#, Hi(M_inp_2,M_e), Hi(M_inp_3,M_e)]
K_lo = [Lo(ki,ke) for ki,ke in zip(K_inp,K_e)]#[Lo(K_inp_1,K_e), Lo(K_inp_2,K_e), Lo(K_inp_3,K_e)]
K_hi = [Hi(ki,ke) for ki,ke in zip(K_inp,K_e)]#[Lo(K_inp_1,K_e), Lo(K_inp_2,K_e), Lo(K_inp_3,K_e)]
C_lo = [Lo(ci,ce) for ci,ce in zip(C_inp,C_e)]#C_lo = [Lo(C_inp_1,C_e), Lo(C_inp_2,C_e), Lo(C_inp_3,C_e)]
C_hi = [Hi(ci,ce) for ci,ce in zip(C_inp,C_e)]#C_hi = [Hi(C_inp_1,C_e), Hi(C_inp_2,C_e), Hi(C_inp_3,C_e)]
M_val = [float(M_inp_1), float(M_inp_2), float(M_inp_3)]
K_val = [float(K_inp_1), float(K_inp_2), float(K_inp_3)]
C_val = [float(C_inp_1), float(C_inp_2), float(C_inp_3)]
return render_template("unquant.html", UNC = maxUnc, MCsamp=MCsamp, \
M_val = M_val, M_e = M_e, M_slider = M_slider, M_lo = M_lo, M_hi = M_hi,\
K_val = K_val, K_e = K_e, K_slider = K_slider, K_lo = K_lo, K_hi = K_hi,\
C_val = C_val, C_e = C_e, C_slider = C_slider, C_lo = C_lo, C_hi = C_hi,\
Exci = Exci, plot = sideplot,date=date) #, \
else: # on page re-load and landing
fig = make_subplots(rows=3, cols=1, subplot_titles=("Floor 3", "Floor 2", "Floor 1"),shared_xaxes=False)
fig.update_layout(width=PLOT_WIDTH, height=PLOT_HEIGHT)
M_inp = [M_inp_1, M_inp_2, M_inp_3]
M_inp_SI = [1e4 * mi for mi in M_inp]
K_inp = [ K_inp_1, K_inp_2, K_inp_3]
K_inp_SI = [1e8 * ki for ki in K_inp]
C_inp = [ C_inp_1, C_inp_2, C_inp_3]
C_inp_SI = [1e4 * ci for ci in C_inp]
kwargs = { # inputs required by the library module
'w_range':W_PLOT_RANGE,
'm':M_inp_SI,
'k':K_inp_SI,
'c':C_inp_SI,
'n':PLOT_DEFINITION,
'exci_floor':Exci,
}
ww,Y_pr = msd3.displacement_msd_numpy_abs_ww(**kwargs)
fig.add_scatter(x=ww, y=numpy.log10(Y_pr)[:,0], name='', mode = 'lines', row=1, col=1)
fig.add_scatter(x=ww, y=numpy.log10(Y_pr)[:,1], name='', mode = 'lines', row=2, col=1)
fig.add_scatter(x=ww, y=numpy.log10(Y_pr)[:,2], name='', mode = 'lines', row=3, col=1)
# Update xaxis properties
fig.update_xaxes(title_text='Frequency [Hz]', titlefont=dict(size=14), row=3, col=1)
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=1, col=1)
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=2, col=1)
fig.update_yaxes(title_text='[dB]', titlefont=dict(size=14), row=3, col=1)
fig.update_layout(title_text="Bounds on displacement Frequency Response Function (FRF)",\
showlegend=False,\
font=dict(size=14),\
plot_bgcolor= 'rgba(0, 0, 0, 0.1)', paper_bgcolor= 'rgba(0, 0, 0, 0.0)')
sideplot = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
M_val = [float(M_INP_1), float(M_INP_2), float(M_INP_3)]
K_val = [float(K_INP_1), float(K_INP_2), float(K_INP_3)]
C_val = [float(C_INP_1), float(C_INP_2), float(C_INP_3)]
return render_template("unquant.html", UNC = maxUnc, MCsamp=MCsamp,\
M_val = M_val, M_e = [0]*3, M_slider = [0]*3, M_lo = M_val, M_hi = M_val,\
K_val = K_val, K_e = [0]*3, K_slider = [0]*3, K_lo = K_val, K_hi = K_val,\
C_val = C_val, C_e = [0]*3, C_slider = [0]*3, C_lo = C_val, C_hi = C_val,\
Exci=Exci, plot=sideplot,date=date) | [
"flask.render_template",
"numpy.log10",
"plotly.subplots.make_subplots",
"json.dumps",
"flask.request.form.items",
"dtApp.app.route",
"dtLib.unquant.msd3.displacement_msd_numpy_abs_ww",
"dtLib.unquant.msd3.displacement_bounds_cartesian_MK",
"dtLib.unquant.msd3.displacement_bounds_montecarlo"
] | [((2263, 2309), 'dtApp.app.route', 'app.route', (['"""/unquant"""'], {'methods': "['GET', 'POST']"}), "('/unquant', methods=['GET', 'POST'])\n", (2272, 2309), False, 'from dtApp import app\n'), ((3252, 3355), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(3)', 'cols': '(1)', 'subplot_titles': "('Floor 3', 'Floor 2', 'Floor 1')", 'shared_xaxes': '(False)'}), "(rows=3, cols=1, subplot_titles=('Floor 3', 'Floor 2',\n 'Floor 1'), shared_xaxes=False)\n", (3265, 3355), False, 'from plotly.subplots import make_subplots\n'), ((3440, 3460), 'flask.request.form.items', 'request.form.items', ([], {}), '()\n', (3458, 3460), False, 'from flask import render_template, request, redirect, Response, url_for\n'), ((10901, 10945), 'dtLib.unquant.msd3.displacement_msd_numpy_abs_ww', 'msd3.displacement_msd_numpy_abs_ww', ([], {}), '(**kwargs)\n', (10935, 10945), True, 'import dtLib.unquant.msd3 as msd3\n'), ((12050, 12101), 'json.dumps', 'json.dumps', (['fig'], {'cls': 'plotly.utils.PlotlyJSONEncoder'}), '(fig, cls=plotly.utils.PlotlyJSONEncoder)\n', (12060, 12101), False, 'import json\n'), ((13048, 13346), 'flask.render_template', 'render_template', (['"""unquant.html"""'], {'UNC': 'maxUnc', 'MCsamp': 'MCsamp', 'M_val': 'M_val', 'M_e': 'M_e', 'M_slider': 'M_slider', 'M_lo': 'M_lo', 'M_hi': 'M_hi', 'K_val': 'K_val', 'K_e': 'K_e', 'K_slider': 'K_slider', 'K_lo': 'K_lo', 'K_hi': 'K_hi', 'C_val': 'C_val', 'C_e': 'C_e', 'C_slider': 'C_slider', 'C_lo': 'C_lo', 'C_hi': 'C_hi', 'Exci': 'Exci', 'plot': 'sideplot', 'date': 'date'}), "('unquant.html', UNC=maxUnc, MCsamp=MCsamp, M_val=M_val, M_e\n =M_e, M_slider=M_slider, M_lo=M_lo, M_hi=M_hi, K_val=K_val, K_e=K_e,\n K_slider=K_slider, K_lo=K_lo, K_hi=K_hi, C_val=C_val, C_e=C_e, C_slider\n =C_slider, C_lo=C_lo, C_hi=C_hi, Exci=Exci, plot=sideplot, date=date)\n", (13063, 13346), False, 'from flask import render_template, request, redirect, Response, url_for\n'), ((13481, 13584), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(3)', 'cols': '(1)', 'subplot_titles': "('Floor 3', 'Floor 2', 'Floor 1')", 'shared_xaxes': '(False)'}), "(rows=3, cols=1, subplot_titles=('Floor 3', 'Floor 2',\n 'Floor 1'), shared_xaxes=False)\n", (13494, 13584), False, 'from plotly.subplots import make_subplots\n'), ((14188, 14232), 'dtLib.unquant.msd3.displacement_msd_numpy_abs_ww', 'msd3.displacement_msd_numpy_abs_ww', ([], {}), '(**kwargs)\n', (14222, 14232), True, 'import dtLib.unquant.msd3 as msd3\n'), ((15161, 15212), 'json.dumps', 'json.dumps', (['fig'], {'cls': 'plotly.utils.PlotlyJSONEncoder'}), '(fig, cls=plotly.utils.PlotlyJSONEncoder)\n', (15171, 15212), False, 'import json\n'), ((15424, 15741), 'flask.render_template', 'render_template', (['"""unquant.html"""'], {'UNC': 'maxUnc', 'MCsamp': 'MCsamp', 'M_val': 'M_val', 'M_e': '([0] * 3)', 'M_slider': '([0] * 3)', 'M_lo': 'M_val', 'M_hi': 'M_val', 'K_val': 'K_val', 'K_e': '([0] * 3)', 'K_slider': '([0] * 3)', 'K_lo': 'K_val', 'K_hi': 'K_val', 'C_val': 'C_val', 'C_e': '([0] * 3)', 'C_slider': '([0] * 3)', 'C_lo': 'C_val', 'C_hi': 'C_val', 'Exci': 'Exci', 'plot': 'sideplot', 'date': 'date'}), "('unquant.html', UNC=maxUnc, MCsamp=MCsamp, M_val=M_val, M_e\n =[0] * 3, M_slider=[0] * 3, M_lo=M_val, M_hi=M_val, K_val=K_val, K_e=[0\n ] * 3, K_slider=[0] * 3, K_lo=K_val, K_hi=K_val, C_val=C_val, C_e=[0] *\n 3, C_slider=[0] * 3, C_lo=C_val, C_hi=C_val, Exci=Exci, plot=sideplot,\n date=date)\n", (15439, 15741), False, 'from flask import render_template, request, redirect, Response, url_for\n'), ((7173, 7220), 'dtLib.unquant.msd3.displacement_bounds_cartesian_MK', 'msd3.displacement_bounds_cartesian_MK', ([], {}), '(**kwargs)\n', (7210, 7220), True, 'import dtLib.unquant.msd3 as msd3\n'), ((8934, 8979), 'dtLib.unquant.msd3.displacement_bounds_montecarlo', 'msd3.displacement_bounds_montecarlo', ([], {}), '(**kwargs)\n', (8969, 8979), True, 'import dtLib.unquant.msd3 as msd3\n'), ((14267, 14284), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (14278, 14284), False, 'import numpy\n'), ((14362, 14379), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (14373, 14379), False, 'import numpy\n'), ((14457, 14474), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (14468, 14474), False, 'import numpy\n'), ((11005, 11022), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (11016, 11022), False, 'import numpy\n'), ((11108, 11125), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (11119, 11125), False, 'import numpy\n'), ((11228, 11245), 'numpy.log10', 'numpy.log10', (['Y_pr'], {}), '(Y_pr)\n', (11239, 11245), False, 'import numpy\n'), ((7275, 7294), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (7286, 7294), False, 'import numpy\n'), ((7547, 7566), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (7558, 7566), False, 'import numpy\n'), ((7819, 7838), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (7830, 7838), False, 'import numpy\n'), ((8113, 8132), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (8124, 8132), False, 'import numpy\n'), ((8344, 8363), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (8355, 8363), False, 'import numpy\n'), ((8621, 8640), 'numpy.log10', 'numpy.log10', (['Y_cart'], {}), '(Y_cart)\n', (8632, 8640), False, 'import numpy\n'), ((9034, 9051), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (9045, 9051), False, 'import numpy\n'), ((9307, 9324), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (9318, 9324), False, 'import numpy\n'), ((9581, 9598), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (9592, 9598), False, 'import numpy\n'), ((9865, 9882), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (9876, 9882), False, 'import numpy\n'), ((10097, 10114), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (10108, 10114), False, 'import numpy\n'), ((10375, 10392), 'numpy.log10', 'numpy.log10', (['Y_in'], {}), '(Y_in)\n', (10386, 10392), False, 'import numpy\n')] |
if __name__ == "__main__":
import cv2
import numpy as np
import face_recognition as fr
import os
from datetime import datetime
import time
import json
path = 'face_recognition/basic_api/images/known'
images = []
classNames = []
tolerance = 0.6
fpsReport = 0
name = "Unknown"
scaleFactor = 0.5
myList = os.listdir(path)
for cls in myList:
curImg = cv2.imread(f'{path}/{cls}')
images.append(curImg)
classNames.append(os.path.splitext(cls)[0])
# encodeListKnown = findEncodings(images)
with open("face_recognition/basic_api/encodings.json", 'r+') as f:
data = json.load(f)
encodeListKnown = list(data.values())
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture('rtsp://admin:admin123@192.168.0.104:554/')
count = 0
# start = time.time()
while True:
timeStamp = cv2.getTickCount()
success, img = cap.read()
# img = img[:][150:]
imgS = cv2.resize(img, (0,0), None, scaleFactor, scaleFactor)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
count +=1
facesCurFrame = fr.face_locations(imgS, number_of_times_to_upsample=1)
encodeCurFrame = fr.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):
matches = fr.compare_faces(encodeListKnown, encodeFace)
faceDis = fr.face_distance(encodeListKnown, encodeFace)
matchIndex = np.argmin(faceDis)
if faceDis[matchIndex]<tolerance:
if matches[matchIndex]:
name = classNames[matchIndex]
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = int(y1/scaleFactor), int(x2/scaleFactor), int(y2/scaleFactor), int(x1/scaleFactor)
cv2.rectangle(img, (x1,y1), (x2,y2), (0,255,0),1)
cv2.rectangle(img, (x1,y2-35), (x2,y2),(0,255,255), cv2.FILLED)
cv2.putText(img, name, (x1+10,y2-10), cv2.FONT_HERSHEY_COMPLEX, 0.75, (0,0,0),1)
# cv2.imwrite("output.jpg", img)
# markAttendance(name)
else:
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = int(y1/scaleFactor), int(x2/scaleFactor), int(y2/scaleFactor), int(x1/scaleFactor)
cv2.rectangle(img, (x1,y1), (x2,y2), (0,0,255),2)
cv2.rectangle(img, (x1,y2-35), (x2,y2),(0,255,255), cv2.FILLED)
cv2.putText(img, name, (x1+6,y2-6), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,0),2)
else:
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = int(y1/scaleFactor), int(x2/scaleFactor), int(y2/scaleFactor), int(x1/scaleFactor)
cv2.rectangle(img, (x1,y1), (x2,y2), (0,0,255),2)
cv2.rectangle(img, (x1,y2-35), (x2,y2),(0,255,255), cv2.FILLED)
cv2.putText(img, name, (x1+10, y2-10), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,0), 1)
cv2.imshow("Webcam", img)
count+=1
# dt = time.time() - timeStamp
fps = cv2.getTickFrequency()/(cv2.getTickCount() - timeStamp)
fpsReport = 0.95*fpsReport + 0.05*fps
print(fpsReport)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if(count>120):
break
# end = time.time()
# duration = end - start
# fps = 120/duration
# print(fps)
# print(fps) #- 5.2677 at 1/4 times resolution
# - 9.026 at 1/2 times resolution
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"os.listdir",
"face_recognition.face_distance",
"numpy.argmin",
"cv2.waitKey",
"cv2.getTickFrequency",
"face_recognition.face_locations",
"os.path.splitext",
"cv2.putText",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"cv2.getTickCou... | [((373, 389), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (383, 389), False, 'import os\n'), ((740, 759), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (756, 759), False, 'import cv2\n'), ((3611, 3634), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3632, 3634), False, 'import cv2\n'), ((430, 457), 'cv2.imread', 'cv2.imread', (['f"""{path}/{cls}"""'], {}), "(f'{path}/{cls}')\n", (440, 457), False, 'import cv2\n'), ((673, 685), 'json.load', 'json.load', (['f'], {}), '(f)\n', (682, 685), False, 'import json\n'), ((910, 928), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (926, 928), False, 'import cv2\n'), ((1007, 1062), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)', 'None', 'scaleFactor', 'scaleFactor'], {}), '(img, (0, 0), None, scaleFactor, scaleFactor)\n', (1017, 1062), False, 'import cv2\n'), ((1077, 1114), 'cv2.cvtColor', 'cv2.cvtColor', (['imgS', 'cv2.COLOR_BGR2RGB'], {}), '(imgS, cv2.COLOR_BGR2RGB)\n', (1089, 1114), False, 'import cv2\n'), ((1166, 1220), 'face_recognition.face_locations', 'fr.face_locations', (['imgS'], {'number_of_times_to_upsample': '(1)'}), '(imgS, number_of_times_to_upsample=1)\n', (1183, 1220), True, 'import face_recognition as fr\n'), ((1246, 1284), 'face_recognition.face_encodings', 'fr.face_encodings', (['imgS', 'facesCurFrame'], {}), '(imgS, facesCurFrame)\n', (1263, 1284), True, 'import face_recognition as fr\n'), ((3055, 3080), 'cv2.imshow', 'cv2.imshow', (['"""Webcam"""', 'img'], {}), "('Webcam', img)\n", (3065, 3080), False, 'import cv2\n'), ((1378, 1423), 'face_recognition.compare_faces', 'fr.compare_faces', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1394, 1423), True, 'import face_recognition as fr\n'), ((1446, 1491), 'face_recognition.face_distance', 'fr.face_distance', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1462, 1491), True, 'import face_recognition as fr\n'), ((1518, 1536), 'numpy.argmin', 'np.argmin', (['faceDis'], {}), '(faceDis)\n', (1527, 1536), True, 'import numpy as np\n'), ((3151, 3173), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (3171, 3173), False, 'import cv2\n'), ((514, 535), 'os.path.splitext', 'os.path.splitext', (['cls'], {}), '(cls)\n', (530, 535), False, 'import os\n'), ((2818, 2872), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (2831, 2872), False, 'import cv2\n'), ((2884, 2954), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 255)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 255), cv2.FILLED)\n', (2897, 2954), False, 'import cv2\n'), ((2964, 3053), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 10, y2 - 10)', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 0, 0)', '(1)'], {}), '(img, name, (x1 + 10, y2 - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0,\n 0, 0), 1)\n', (2975, 3053), False, 'import cv2\n'), ((3175, 3193), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (3191, 3193), False, 'import cv2\n'), ((3289, 3303), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3300, 3303), False, 'import cv2\n'), ((1858, 1912), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(1)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n', (1871, 1912), False, 'import cv2\n'), ((1928, 1998), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 255)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 255), cv2.FILLED)\n', (1941, 1998), False, 'import cv2\n'), ((2012, 2104), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 10, y2 - 10)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.75)', '(0, 0, 0)', '(1)'], {}), '(img, name, (x1 + 10, y2 - 10), cv2.FONT_HERSHEY_COMPLEX, 0.75,\n (0, 0, 0), 1)\n', (2023, 2104), False, 'import cv2\n'), ((2396, 2450), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (2409, 2450), False, 'import cv2\n'), ((2466, 2536), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 255)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 255), cv2.FILLED)\n', (2479, 2536), False, 'import cv2\n'), ((2550, 2637), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 6, y2 - 6)', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(0, 0, 0)', '(2)'], {}), '(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0,\n 0), 2)\n', (2561, 2637), False, 'import cv2\n')] |
import cv2
import numpy as np
'''def read_file(filename):
img = cv2.imread(filename)
cv2_imshow(img)
return img'''
def color_quantization(img, k):
# Transform the image
data = np.float32(img).reshape((-1, 3))
# Determine criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
# Implementing K-Means
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshape(img.shape)
return result
def edge_mask(img, line_size, blur_value):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.medianBlur(gray, blur_value)
edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, line_size, blur_value)
return edges
#uploaded = files.upload()
#filename = next(iter(uploaded))
def filter_func(filename) :
img = cv2.imread(filename)
line_size = 5
blur_value = 7
edges = edge_mask(img, line_size, blur_value)
#cv2.imshow("newolf",edges)
cv2.imwrite("blackwhite.jpg",edges)
blurred = cv2.bilateralFilter(img, d=7, sigmaColor=200,sigmaSpace=200)
#cv2_imshow("newolf",blurred)
#cv2.imwrite("blackwhite.jpg",blurred)
total_color = 9
blurred = color_quantization(blurred, total_color)
#cv2_imshow("newolf",blurred)
cartoon = cv2.bitwise_and(blurred, blurred, mask=edges)
#cv2.imshow("newolf",cartoon)
cv2.imwrite("toonify.jpg",cartoon)
| [
"numpy.uint8",
"cv2.imwrite",
"cv2.bilateralFilter",
"cv2.kmeans",
"cv2.medianBlur",
"cv2.bitwise_and",
"cv2.adaptiveThreshold",
"cv2.cvtColor",
"cv2.imread",
"numpy.float32"
] | [((364, 430), 'cv2.kmeans', 'cv2.kmeans', (['data', 'k', 'None', 'criteria', '(10)', 'cv2.KMEANS_RANDOM_CENTERS'], {}), '(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n', (374, 430), False, 'import cv2\n'), ((442, 458), 'numpy.uint8', 'np.uint8', (['center'], {}), '(center)\n', (450, 458), True, 'import numpy as np\n'), ((600, 637), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (612, 637), False, 'import cv2\n'), ((652, 684), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', 'blur_value'], {}), '(gray, blur_value)\n', (666, 684), False, 'import cv2\n'), ((695, 807), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray_blur', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', 'line_size', 'blur_value'], {}), '(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, line_size, blur_value)\n', (716, 807), False, 'import cv2\n'), ((919, 939), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (929, 939), False, 'import cv2\n'), ((1064, 1100), 'cv2.imwrite', 'cv2.imwrite', (['"""blackwhite.jpg"""', 'edges'], {}), "('blackwhite.jpg', edges)\n", (1075, 1100), False, 'import cv2\n'), ((1114, 1175), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img'], {'d': '(7)', 'sigmaColor': '(200)', 'sigmaSpace': '(200)'}), '(img, d=7, sigmaColor=200, sigmaSpace=200)\n', (1133, 1175), False, 'import cv2\n'), ((1378, 1423), 'cv2.bitwise_and', 'cv2.bitwise_and', (['blurred', 'blurred'], {'mask': 'edges'}), '(blurred, blurred, mask=edges)\n', (1393, 1423), False, 'import cv2\n'), ((1462, 1497), 'cv2.imwrite', 'cv2.imwrite', (['"""toonify.jpg"""', 'cartoon'], {}), "('toonify.jpg', cartoon)\n", (1473, 1497), False, 'import cv2\n'), ((185, 200), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (195, 200), True, 'import numpy as np\n')] |
import copy
import itertools
from functools import lru_cache
from typing import List, Dict
import numpy as np
import numpy
from summer.constants import (
Compartment,
Flow,
BirthApproach,
Stratification,
IntegrationType,
)
from .epi_model import EpiModel
from .utils import (
convert_boolean_list_to_indices,
create_cumulative_dict,
create_function_of_function,
create_multiplicative_function,
create_stratified_name,
create_stratum_name,
create_time_variant_multiplicative_function,
element_list_multiplication,
element_list_division,
extract_reversed_x_positions,
find_name_components,
find_stem,
increment_list_by_index,
)
STRATA_EQUILIBRATION_FACTOR = 0.01
OVERWRITE_CHARACTER = "W"
OVERWRITE_KEY = "overwrite"
class StratifiedModel(EpiModel):
"""
stratified version of the epidemiological model that inherits from EpiModel above, which is a concrete class and
could in theory run stratified models independently
however, this class should make the stratification process more algorithmic, easier and more reliable
:attribute adaptation_functions: dict
single stage functions representing each stratified parameter component, from which to build the final functions
(i.e. final_parameter_functions)
:attribute all_stratifications: dictionary
keys are all the stratification names implemented so far. values are the list of strata for each stratification
:attribute available_death_rates: list
single strata names for which population_wide mortality will be adjusted (or over-written)
:attribute compartment_types_to_stratify: list
the compartments that are being stratified at this round of model stratification
:attribute final_parameter_functions: dict
a function representing each parameter that will be implemented during integration,
constructed recursively for stratification
:attribute full_stratifications_list: list
all the stratification names implemented so far that apply to all of the compartment types
:attribute heterogeneous_mixing: bool
whether any stratification has requested heterogeneous mixing, such that it will be implemented
:attribute infectious_compartments: tuple
all of the compartment stems that represent compartments with some degree of infectiousness
:attribute infectious_indices: dict
keys are strains being implemented with "all_strains" an additional standard key, such that models that are not
stratified by strain will only have the key "all_strains"
values are lists of the indices of the compartments that are infectious for that strain (or overall)
:attribute infectious_denominators: float
total size of the population, which effective infectious population will be divided through by in the case of
frequency-dependent transmission
:attribute infectious_populations: dict
keys are strains
values are lists with each list element representing a mixing category, so that this can be multiplied through
by a row of the mixing matrix
:attribute infectiousness_adjustments: dict
user-submitted adjustments to infectiousness for the stratification currently being implemented
:attribute infectiousness_levels: dict
keys are any strata for any stratification for which infectiousness will be adjusted, which does not need to be
exhaustive
values are their relative multipliers
:attribute infectiousness_multipliers: list
multipliers for the relative infectiousness of each compartment attributable to stratification, regardless of
whether they are actually infectious compartments or not and with arbitrary values which start from one and
are then modified by the user requests
:attribute mixing_categories: list
the effective mixing categories, which consists of all the possible combinations of all the strata within the
model's full stratifications that incorporate heterogeneous mixing
contents are strings joined with the standard linking character
:attribute mixing_denominator_indices: dict
keys are te mixing categories
values are lists of the indices that should be used to calculate the infectious population for that mixing
category
:attribute mixing_matrix: numpy array
array formed by taking the kronecker product of all the mixing matrices provided for full stratifications for
which heterogeneous mixing was requested
:attribute mortality_components: dict
keys for the name of each compartment, values the list of functions needed to recursively create the functions
to calculate the mortality rates for each compartment
:attribute overwrite_character: str
standard string (usually single character and currently "W") to indicate that a stratum request is intended to
over-write less stratified parameters
:attribute overwrite_key: str
standard string used by model to identify the dictionary element that represents the over-write parameters,
rather than a request to a particular stratum
:attribute overwrite_parameters: list
parameters which will result in all the less stratified parameters closer to the stratification tree's trunk
being ignored
:attribute parameter_components: dict
keys for the name of each transition parameter, values the list of functions needed to recursively create the
functions to create these parameter values
:attribute parameters: dict
same format as for EpiModel (but described here again given the other parameter-related attributes)
unprocessed parameters, which may be either float values or strings pointing to the keys of time_variants
:attribute removed_compartments: list
all unstratified compartments that have been removed through the stratification process
:attribute overwrite_parameters: list
any parameters that are intended as absolute values to be applied to that stratum and not multipliers for the
unstratified parameter further up the tree
:attribute strain_mixing_elements: dict
first tier of keys is strains
second tier of keys is mixing categories
content of lists at lowest/third tier is the indices of the compartments that are relevant to this strain and
category
:attribute strain_mixing_multipliers: dict
first tier of keys is strains
second tier of keys is mixing categories
content of lists at lowest/third tier is the final infectiousness multiplier for the compartments for this
strain and category
:attribute strains: list
the strata to the strains stratification with specific behaviour
"""
"""
general methods
"""
def add_compartment(self, new_compartment_name, new_compartment_value):
"""
add a compartment by specifying its name and the starting value for it to take
:param new_compartment_name: str
name of the new compartment to be created
:param new_compartment_value: float
initial value to be assigned to the new compartment before integration
"""
self.compartment_names.append(new_compartment_name)
self.compartment_values.append(new_compartment_value)
self.output_to_user("adding compartment: %s" % new_compartment_name)
def remove_compartment(self, compartment_name):
"""
remove a compartment by taking the element out of the compartment_names and compartment_values attributes
store name of removed compartment in a separate attribute
:param compartment_name: str
name of compartment to be removed
"""
self.removed_compartments.append(compartment_name)
del self.compartment_values[self.compartment_names.index(compartment_name)]
del self.compartment_names[self.compartment_names.index(compartment_name)]
self.output_to_user("removing compartment: %s" % compartment_name)
def __init__(
self,
times,
compartment_types,
initial_conditions,
parameters,
requested_flows,
infectious_compartment=(Compartment.EARLY_INFECTIOUS,),
birth_approach=BirthApproach.NO_BIRTH,
verbose=False,
reporting_sigfigs=4,
entry_compartment=Compartment.SUSCEPTIBLE,
starting_population=1,
output_connections=None,
death_output_categories=None,
derived_output_functions=None,
ticker=False,
):
super().__init__(
times,
compartment_types,
initial_conditions,
parameters,
requested_flows,
infectious_compartment,
birth_approach,
verbose,
reporting_sigfigs,
entry_compartment,
starting_population,
output_connections,
death_output_categories,
derived_output_functions,
ticker,
)
self.full_stratification_list = []
self.removed_compartments = []
self.overwrite_parameters = []
self.compartment_types_to_stratify = []
self.strains = []
self.mixing_categories = []
self.unstratified_compartment_names = []
self.all_stratifications = {}
self.infectiousness_adjustments = {}
self.final_parameter_functions = {}
self.adaptation_functions = {}
self.infectiousness_levels = {}
self.infectious_indices = {}
self.infectious_compartments = {}
self.infectiousness_multipliers = {}
self.parameter_components = {}
self.mortality_components = {}
self.infectious_populations = {}
self.strain_mixing_elements = {}
self.strain_mixing_multipliers = {}
self.strata_indices = {}
self.target_props = {}
self.cumulative_target_props = {}
self.individual_infectiousness_adjustments = []
self.heterogeneous_mixing = False
self.mixing_matrix = None
self.available_death_rates = [""]
self.dynamic_mixing_matrix = False
self.mixing_indices = {}
self.infectious_denominators = []
"""
stratification methods
"""
def stratify(
self,
stratification_name,
strata_request,
compartment_types_to_stratify,
requested_proportions,
entry_proportions={},
adjustment_requests=(),
infectiousness_adjustments={},
mixing_matrix=None,
target_props=None,
verbose=False,
):
"""
calls to initial preparation, checks and methods that stratify the various aspects of the model
:param stratification_name:
see prepare_and_check_stratification
:param strata_request:
see find_strata_names_from_input
:param compartment_types_to_stratify:
see check_compartment_request
:param adjustment_requests:
see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests
:param requested_proportions:
see prepare_starting_proportions
:param entry_proportions:
:param infectiousness_adjustments:
:param mixing_matrix:
see check_mixing
:param verbose: bool
whether to report on progress
note that this can be changed at this stage from what was requested at the original unstratified model
construction
:param target_props: dict
keys are the strata being implemented at this call to stratify
values are the desired proportions to target
"""
# check inputs correctly specified
strata_names, adjustment_requests = self.prepare_and_check_stratification(
stratification_name,
strata_request,
compartment_types_to_stratify,
adjustment_requests,
target_props,
verbose,
)
# work out ageing flows - comes first, so that the compartment names remain in the unstratified form
if stratification_name == "age":
self.set_ageing_rates(strata_names)
# retain copy of compartment names in their stratified form to refer back to during stratification process
self.unstratified_compartment_names = copy.copy(self.compartment_names)
# stratify the compartments
requested_proportions = self.prepare_starting_proportions(
strata_names, requested_proportions
)
self.stratify_compartments(
stratification_name,
strata_names,
requested_proportions,
self.compartment_types_to_stratify,
)
# stratify the flows
self.stratify_transition_flows(
stratification_name,
strata_names,
adjustment_requests,
self.compartment_types_to_stratify,
)
self.stratify_entry_flows(
stratification_name, strata_names, entry_proportions, requested_proportions
)
if self.death_flows.shape[0] > 0:
self.stratify_death_flows(stratification_name, strata_names, adjustment_requests)
self.stratify_universal_death_rate(
stratification_name, strata_names, adjustment_requests, compartment_types_to_stratify,
)
# if stratifying by strain
self.strains = strata_names if stratification_name == "strain" else self.strains
# check submitted mixing matrix and combine with existing matrix, if any
self.prepare_mixing_matrix(mixing_matrix, stratification_name, strata_names)
# prepare infectiousness levels attribute
self.prepare_infectiousness_levels(
stratification_name, strata_names, infectiousness_adjustments
)
# prepare strata equilibration target proportions
if target_props:
self.prepare_and_check_target_props(target_props, stratification_name, strata_names)
"""
stratification checking methods
"""
def prepare_and_check_stratification(
self,
_stratification_name,
_strata_names,
_compartment_types_to_stratify,
_adjustment_requests,
_target_props,
_verbose,
):
"""
initial preparation and checks of user-submitted arguments
:param _stratification_name: str
the name of the stratification - i.e. the reason for implementing this type of stratification
:param _strata_names:
see find_strata_names_from_input
:param _compartment_types_to_stratify:
see check_compartment_request
:param _adjustment_requests:
see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests
:param _verbose:
see stratify
:param _target_props:
see stratify
:return:
_strata_names: list
revised version of user request after adaptation to class requirements
adjustment_requests:
revised version of _adjustment_requests after adaptation to class requirements
"""
# collate all the stratifications that have been implemented so far
if not _compartment_types_to_stratify:
self.full_stratification_list.append(_stratification_name)
# report progress
self.verbose = _verbose
self.output_to_user(
"\n___________________\nimplementing stratification for: %s" % _stratification_name
)
# deal with stratifications that have specific behaviour
if _stratification_name == "age":
_strata_names = self.check_age_stratification(
_strata_names, _compartment_types_to_stratify
)
elif _stratification_name == "strain":
self.output_to_user("implementing strain stratification with specific behaviour")
# make sure the stratification name is a string
if not isinstance(_stratification_name, str):
_stratification_name = str(_stratification_name)
self.output_to_user(
"converting stratification name %s to string" % _stratification_name
)
# check target proportions correctly specified
if _target_props:
for restriction in _target_props:
if not type(_target_props[restriction]) == dict:
raise TypeError("target proportions not provided as dictionary")
elif type(_target_props[restriction]) == dict and any(
[
target_key not in _strata_names
for target_key in _target_props[restriction].keys()
]
):
raise ValueError("requested target proportion strata not in requested strata")
# ensure requested stratification hasn't previously been implemented
if _stratification_name in self.all_stratifications.keys():
raise ValueError(
"requested stratification has already been implemented, please choose a different name"
)
# record stratification as model attribute, find the names to apply strata and check requests
_strata_names = self.find_strata_names_from_input(_strata_names)
self.all_stratifications[_stratification_name] = _strata_names
_adjustment_requests = self.incorporate_alternative_overwrite_approach(_adjustment_requests)
self.check_compartment_request(_compartment_types_to_stratify)
self.check_parameter_adjustment_requests(_adjustment_requests, _strata_names)
return _strata_names, _adjustment_requests
def check_age_stratification(self, _strata_names, _compartment_types_to_stratify):
"""
check that the user request meets the requirements for stratification by age
:parameters: all parameters have come directly from the stratification (stratify) method unchanged and have been
renamed with a preceding _ character
:return: _strata_names: list
revised names of the strata tiers to be implemented
"""
self.output_to_user("implementing age stratification with specific behaviour")
if len(_compartment_types_to_stratify) > 0:
raise ValueError(
"requested age stratification, but compartment request should be passed as empty vector "
+ "in order to apply to all compartments"
)
elif not all([isinstance(stratum, (int, float)) for stratum in _strata_names]):
raise ValueError("inputs for age strata breakpoints are not numeric")
if 0 not in _strata_names:
_strata_names.append(0)
self.output_to_user(
"adding age stratum called '0' because not requested, which represents those aged "
+ "less than %s" % min(_strata_names)
)
if _strata_names != sorted(_strata_names):
_strata_names = sorted(_strata_names)
self.output_to_user(
"requested age strata not ordered, so have been sorted to: %s" % _strata_names
)
return _strata_names
def find_strata_names_from_input(self, _strata_names):
"""
find the names of the strata to be implemented from a particular user request
:parameters: list or alternative format to be adapted
strata requested in the format provided by the user (except for age, which is dealth with in the preceding
method)
:return: strata_names: list
modified list of strata to be implemented in model
"""
if type(_strata_names) == int:
_strata_names = numpy.arange(1, _strata_names + 1)
self.output_to_user(
"single integer provided as strata labels for stratification, hence strata "
+ "implemented will be integers from one to %s" % _strata_names
)
elif type(_strata_names) == float:
raise ValueError(
"single value passed as request for strata labels, but not an integer greater than "
+ "one, so unclear what to do - stratification failed"
)
elif type(_strata_names) == list and len(_strata_names) > 0:
pass
else:
raise ValueError(
"requested to stratify, but strata-level names not submitted in correct format"
)
for name in range(len(_strata_names)):
_strata_names[name] = str(_strata_names[name])
self.output_to_user("adding stratum: %s" % _strata_names[name])
return _strata_names
def incorporate_alternative_overwrite_approach(self, _adjustment_requests):
"""
alternative approach to working out which parameters to overwrite
can put a capital W at the string's end to indicate that it is an overwrite parameter, as an alternative to
submitting a separate dictionary key to represent the strata which need to be overwritten
:param _adjustment_requests: dict
user-submitted version of adjustment requests
:return: revised_adjustments: dict
modified version of _adjustment_requests after working out whether any parameters began with W
"""
# has to be constructed as a separate dictionary to avoid change of size during iteration
revised_adjustments = {}
for parameter in _adjustment_requests:
revised_adjustments[parameter] = {}
# ignore overwrite if submitted with the standard approach
for stratum in _adjustment_requests[parameter]:
if stratum == OVERWRITE_KEY:
continue
# if the parameter ends in W, interpret as an overwrite parameter and added to this key
elif stratum[-1] == OVERWRITE_CHARACTER:
if OVERWRITE_KEY not in revised_adjustments[parameter]:
revised_adjustments[parameter][OVERWRITE_KEY] = []
revised_adjustments[parameter][stratum[:-1]] = _adjustment_requests[parameter][
stratum
]
revised_adjustments[parameter][OVERWRITE_KEY].append(stratum[:-1])
# otherwise just accept the parameter in its submitted form
else:
revised_adjustments[parameter][stratum] = _adjustment_requests[parameter][
stratum
]
if OVERWRITE_KEY not in revised_adjustments:
revised_adjustments[OVERWRITE_KEY] = []
return revised_adjustments
def check_compartment_request(self, _compartment_types_to_stratify):
"""
check the requested compartments to be stratified has been requested correctly
:param _compartment_types_to_stratify: list
the names of the compartment types that the requested stratification is intended to apply to
"""
# if list of length zero passed, stratify all the compartment types in the model
if len(_compartment_types_to_stratify) == 0:
self.compartment_types_to_stratify = self.compartment_types
self.output_to_user(
"no compartment names specified for this stratification, "
+ "so stratification applied to all model compartments"
)
# otherwise check all the requested compartments are available and implement the user request
elif any(
[
compartment not in self.compartment_types
for compartment in self.compartment_types_to_stratify
]
):
raise ValueError(
"requested compartment or compartments to be stratified are not available in this model"
)
else:
self.compartment_types_to_stratify = _compartment_types_to_stratify
def check_parameter_adjustment_requests(self, _adjustment_requests, _strata_names):
"""
check parameter adjustments have been requested appropriately and add parameter for any strata not referred to
:param _adjustment_requests: dict
version of the submitted adjustment_requests modified by incorporate_alternative_overwrite_approach
:param _strata_names:
see find_strata_names_from_input
"""
for parameter in _adjustment_requests:
if any(
requested_stratum not in _strata_names + [OVERWRITE_KEY]
for requested_stratum in _adjustment_requests[parameter]
):
raise ValueError(
"a stratum was requested in adjustments that is not available in this stratification"
)
"""
stratification preparation methods
"""
def set_ageing_rates(self, strata_names):
"""
Set inter-compartmental flows for ageing from one stratum to the next.
The ageing rate is proportional to the width of the age bracket.
"""
ageing_flows = []
for strata_idx in range(len(strata_names) - 1):
start_age = int(strata_names[strata_idx])
end_age = int(strata_names[strata_idx + 1])
ageing_parameter_name = f"ageing{start_age}to{end_age}"
ageing_rate = 1.0 / (end_age - start_age)
self.parameters[ageing_parameter_name] = ageing_rate
for compartment in self.compartment_names:
ageing_flow = {
"type": Flow.STANDARD,
"parameter": ageing_parameter_name,
"origin": create_stratified_name(compartment, "age", start_age),
"to": create_stratified_name(compartment, "age", end_age),
"implement": len(self.all_stratifications),
}
ageing_flows.append(ageing_flow)
self.transition_flows = self.transition_flows.append(ageing_flows)
def prepare_starting_proportions(self, _strata_names, _requested_proportions):
"""
prepare user inputs for starting proportions for the initial conditions to apply to the exact set of strata
requested
if one or more strata not specified, the proportion of the initial conditions allocated to that group will be
the total unallocated population divided by the number of strata for which no request was specified
:param _strata_names:
see find_strata_names_from_input
:param _requested_proportions: dict
dictionary with keys for the stratum to assign starting population to and values the proportions to assign
:return: dict
revised dictionary of starting proportions after cleaning
"""
self.output_to_user(
"\n-----\ncalculating proportions of initial conditions to assign to each stratified starting compartment"
)
if any(stratum not in _strata_names for stratum in _requested_proportions):
raise ValueError(
"requested starting proportion for stratum that does not appear in requested strata"
)
if sum(_requested_proportions.values()) > 1.0:
raise ValueError("requested starting proportions sum to a value greater than one")
# assuming an equal proportion of the unallocated population if no request specified
unrequested_strata = [
stratum for stratum in _strata_names if stratum not in _requested_proportions
]
unrequested_proportions = {}
for stratum in unrequested_strata:
starting_proportion = (1.0 - sum(_requested_proportions.values())) / len(
unrequested_strata
)
unrequested_proportions[stratum] = starting_proportion
self.output_to_user(
"no starting proportion requested for %s stratum so provisionally allocated %s of total"
% (stratum, round(starting_proportion, self.reporting_sigfigs))
)
# update specified proportions with inferred unspecified proportions
_requested_proportions.update(unrequested_proportions)
return _requested_proportions
def stratify_compartments(
self,
stratification_name: str,
strata_names: List[str],
strata_proportions: Dict[str, float],
compartments_to_stratify: List[str],
):
"""
Stratify the model compartments into sub-compartments, based on the strata names provided,
splitting the population according to the provided proprotions. Stratification will be applied
to compartment_names and compartment_values.
Only compartments specified in `self.compartment_types_to_stratify` will be stratified.
"""
# Find the existing compartments that need stratification
compartments_to_stratify = [
c for c in self.compartment_names if find_stem(c) in compartments_to_stratify
]
for compartment in compartments_to_stratify:
# Add newm stratified compartment.
for stratum in strata_names:
name = create_stratified_name(compartment, stratification_name, stratum)
idx = self.compartment_names.index(compartment)
value = self.compartment_values[idx] * strata_proportions[stratum]
self.add_compartment(name, value)
# Remove the original compartment, since it has now been stratified.
self.remove_compartment(compartment)
def stratify_transition_flows(
self,
stratification_name: str,
strata_names: List[str],
adjustment_requests: Dict[str, Dict[str, float]],
compartments_to_stratify: List[str],
):
"""
Stratify flows depending on whether inflow, outflow or both need replication
"""
flow_idxs = self.find_transition_indices_to_implement(back_one=1, include_change=True)
all_new_flows = []
for n_flow in flow_idxs:
new_flows = []
flow = self.transition_flows.iloc[n_flow]
stratify_from = find_stem(flow.origin) in compartments_to_stratify
stratify_to = find_stem(flow.to) in compartments_to_stratify
if stratify_from or stratify_to:
for stratum in strata_names:
# Find the flow's parameter name
parameter_name = self.add_adjusted_parameter(
flow.parameter, stratification_name, stratum, adjustment_requests,
)
if not parameter_name:
parameter_name = self.sort_absent_transition_parameter(
stratification_name,
strata_names,
stratum,
stratify_from,
stratify_to,
flow.parameter,
)
# Determine whether to and/or from compartments are stratified
from_compartment = (
create_stratified_name(flow.origin, stratification_name, stratum)
if stratify_from
else flow.origin
)
to_compartment = (
create_stratified_name(flow.to, stratification_name, stratum)
if stratify_to
else flow.to
)
# Add the new flow
strain = (
stratum
if stratification_name == "strain" and flow.type != Flow.STRATA_CHANGE
else flow.strain
)
new_flow = {
"type": flow.type,
"parameter": parameter_name,
"origin": from_compartment,
"to": to_compartment,
"implement": len(self.all_stratifications),
"strain": strain,
}
new_flows.append(new_flow)
else:
# If flow applies to a transition not involved in the stratification,
# still increment to ensure that it is implemented.
new_flow = flow.to_dict()
new_flow["implement"] += 1
new_flows.append(new_flow)
# Update the customised flow functions.
num_flows = len(self.transition_flows) + len(all_new_flows)
for idx, new_flow in enumerate(new_flows):
if new_flow["type"] == Flow.CUSTOM:
new_idx = num_flows + idx
self.customised_flow_functions[new_idx] = self.customised_flow_functions[n_flow]
all_new_flows += new_flows
if all_new_flows:
self.transition_flows = self.transition_flows.append(all_new_flows, ignore_index=True)
def add_adjusted_parameter(
self, _unadjusted_parameter, _stratification_name, _stratum, _adjustment_requests,
):
"""
find the adjustment request that is relevant to a particular unadjusted parameter and stratum and add the
parameter value (str for function or float) to the parameters dictionary attribute
otherwise allow return of None
:param _unadjusted_parameter:
name of the unadjusted parameter value
:param _stratification_name:
see prepare_and_check_stratification
:param _stratum:
stratum being considered by the method calling this method
:param _adjustment_requests:
see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests
:return: parameter_adjustment_name: str or None
if returned as None, assumption will be that the original, unstratified parameter should be used
otherwise create a new parameter name and value and store away in the appropriate model structure
"""
parameter_adjustment_name = None
relevant_adjustment_request = self.find_relevant_adjustment_request(
_adjustment_requests, _unadjusted_parameter
)
if relevant_adjustment_request is not None:
parameter_adjustment_name = (
create_stratified_name(_unadjusted_parameter, _stratification_name, _stratum)
if _stratum in _adjustment_requests[relevant_adjustment_request]
else _unadjusted_parameter
)
self.output_to_user(
"\t parameter for %s stratum of %s stratification is called %s"
% (_stratum, _stratification_name, parameter_adjustment_name)
)
if _stratum in _adjustment_requests[relevant_adjustment_request]:
self.parameters[parameter_adjustment_name] = _adjustment_requests[
relevant_adjustment_request
][_stratum]
# record the parameters that over-write the less stratified parameters closer to the trunk of the tree
if (
OVERWRITE_KEY in _adjustment_requests[relevant_adjustment_request]
and _stratum in _adjustment_requests[relevant_adjustment_request][OVERWRITE_KEY]
):
self.overwrite_parameters.append(parameter_adjustment_name)
return parameter_adjustment_name
def find_relevant_adjustment_request(self, _adjustment_requests, _unadjusted_parameter):
"""
find the adjustment requests that are extensions of the base parameter type being considered
expected behaviour is as follows:
* if there are no submitted requests (keys to the adjustment requests) that are extensions of the unadjusted
parameter, will return None
* if there is one submitted request that is an extension of the unadjusted parameter, will return that parameter
* if there are multiple submitted requests that are extensions to the unadjusted parameter and one is more
stratified than any of the others (i.e. more instances of the "X" string), will return this most stratified
parameter
* if there are multiple submitted requests that are extensions to the unadjusted parameter and several of them
are equal in having the greatest extent of stratification, will return the longest string
:param _unadjusted_parameter:
see add_adjusted_parameter
:param _adjustment_requests:
see prepare_and_check_stratification
:return: str or None
the key of the adjustment request that is applicable to the parameter of interest if any, otherwise None
"""
# find all the requests that start with the parameter of interest and their level of stratification
applicable_params = [
param for param in _adjustment_requests if _unadjusted_parameter.startswith(param)
]
applicable_param_n_stratifications = [
len(find_name_components(param)) for param in applicable_params
]
if applicable_param_n_stratifications:
max_length_indices = [
i_p
for i_p, p in enumerate(applicable_param_n_stratifications)
if p == max(applicable_param_n_stratifications)
]
candidate_params = [applicable_params[i] for i in max_length_indices]
return max(candidate_params, key=len)
else:
return None
def sort_absent_transition_parameter(
self,
_stratification_name,
_strata_names,
_stratum,
_stratify_from,
_stratify_to,
unstratified_name,
):
"""
work out what to do if a specific transition parameter adjustment has not been requested
:param _stratification_name:
see prepare_and_check_stratification
:param _strata_names:
see find_strata_names_from_input
:param _stratum:
:param _stratify_from:
see add_stratified_flows
:param _stratify_to:
see add_stratified_flows
:param unstratified_name: str
the name of the parameter before the stratification is implemented
:return: str
parameter name for revised parameter than wasn't provided
"""
# default behaviour if not specified is to split the parameter into equal parts if to compartment is split
if not _stratify_from and _stratify_to:
self.output_to_user(
"\t splitting existing parameter value %s into %s equal parts"
% (unstratified_name, len(_strata_names))
)
parameter_name = create_stratified_name(
unstratified_name, _stratification_name, _stratum
)
self.parameters[parameter_name] = 1.0 / len(_strata_names)
self.adaptation_functions[parameter_name] = create_multiplicative_function(
1.0 / len(_strata_names)
)
return parameter_name
# otherwise if no request, retain the existing parameter
else:
self.output_to_user("\tretaining existing parameter value %s" % unstratified_name)
return unstratified_name
def stratify_entry_flows(
self, _stratification_name, _strata_names, _entry_proportions, _requested_proportions,
):
"""
stratify entry/recruitment/birth flows according to requested entry proportion adjustments
again, may need to revise behaviour for what is done if some strata are requested but not others
:param _stratification_name:
see prepare_and_check_stratification
:param _strata_names:
see find_strata_names_from_input
:param _entry_proportions: dict
user requested proportions to enter to each stratum
:param _requested_proportions:
see prepare_starting_proportions
:return:
normalised dictionary of the compartments that the new entry flows should come in to
"""
if self.entry_compartment in self.compartment_types_to_stratify:
self.output_to_user(
"\n-----\ncalculating proportions of births/recruitment to assign to each stratified entry compartment"
)
for stratum in _strata_names:
entry_fraction_name = create_stratified_name(
"entry_fraction", _stratification_name, stratum
)
# specific behaviour for age stratification
if _stratification_name == "age" and str(stratum) == "0":
self.parameters[entry_fraction_name] = 1.0
continue
elif _stratification_name == "age":
self.parameters[entry_fraction_name] = 0.0
continue
# where a request for splitting entry rates has been submitted
elif stratum in _entry_proportions and type(_entry_proportions[stratum]) == float:
self.parameters[entry_fraction_name] = _entry_proportions[stratum]
self.output_to_user(
"assigning requested proportion %s of births/recruitment to %s stratum"
% (_entry_proportions[stratum], stratum)
)
# if an incorrect string has been submitted by the user
elif (
stratum in _entry_proportions
and type(_entry_proportions[stratum]) == str
and _entry_proportions[stratum] not in self.time_variants
):
raise ValueError(
"requested entry fraction function for %s stratum not available in time variants"
)
# otherwise it must already be a defined function that can be called during integration
elif stratum in _entry_proportions and type(_entry_proportions[stratum]) == str:
self.time_variants[entry_fraction_name] = self.time_variants[
_entry_proportions[stratum]
]
self.output_to_user(
"function %s submitted for proportion of births assigned to %s"
% (_entry_proportions[stratum], stratum)
)
continue
# otherwise if no request made
else:
self.parameters[entry_fraction_name] = 1.0 / len(_strata_names)
def stratify_death_flows(self, _stratification_name, _strata_names, _adjustment_requests):
"""
add compartment-specific death flows to death_flows data frame attribute
:param _stratification_name:
see prepare_and_check_stratification
:param _strata_names:
see find_strata_names_from_input
:param _adjustment_requests:
see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests
"""
for n_flow in self.find_death_indices_to_implement(back_one=1):
# if the compartment with an additional death flow is being stratified
if find_stem(self.death_flows.origin[n_flow]) in self.compartment_types_to_stratify:
for stratum in _strata_names:
# get stratified parameter name if requested to stratify, otherwise use the unstratified one
parameter_name = self.add_adjusted_parameter(
self.death_flows.parameter[n_flow],
_stratification_name,
stratum,
_adjustment_requests,
)
if not parameter_name:
parameter_name = self.death_flows.parameter[n_flow]
# add the stratified flow to the death flows data frame
self.death_flows = self.death_flows.append(
{
"type": self.death_flows.type[n_flow],
"parameter": parameter_name,
"origin": create_stratified_name(
self.death_flows.origin[n_flow], _stratification_name, stratum,
),
"implement": len(self.all_stratifications),
},
ignore_index=True,
)
# otherwise if not part of the stratification, accept the existing flow and increment the implement value
else:
new_flow = self.death_flows.loc[n_flow, :].to_dict()
new_flow["implement"] += 1
self.death_flows = self.death_flows.append(new_flow, ignore_index=True)
def stratify_universal_death_rate(
self,
_stratification_name,
_strata_names,
_adjustment_requests,
_compartment_types_to_stratify,
):
"""
stratify the approach to universal, population-wide deaths (which can be made to vary by stratum)
adjust every parameter that refers to the universal death rate, according to user request if submitted and
otherwise populated with a value of one by default
:param _stratification_name:
see prepare_and_check_stratification
:param _strata_names:
see find_strata_names_from_input
:param _adjustment_requests:
see incorporate_alternative_overwrite_approach and check_parameter_adjustment_requests
:param _compartment_types_to_stratify:
see above
"""
if (
_stratification_name not in self.full_stratification_list
and "universal_death_rate" in _adjustment_requests
):
raise ValueError(
"universal death rate can only be stratified when applied to all compartment types"
)
elif _stratification_name not in self.full_stratification_list:
self.output_to_user(
"universal death rate not adjusted as stratification not applied to all compartments"
)
return
# ensure baseline function available for modification in universal death rates
self.adaptation_functions["universal_death_rateX"] = (
self.time_variants["universal_death_rate"]
if "universal_death_rate" in self.time_variants
else lambda time: self.parameters["universal_death_rate"]
)
# if stratification applied to all compartment types
for stratum in _strata_names:
if (
"universal_death_rate" in _adjustment_requests
and stratum in _adjustment_requests["universal_death_rate"]
):
stratum_name = create_stratum_name(_stratification_name, stratum, joining_string="")
self.available_death_rates.append(stratum_name)
# use existing function or create new one from constant as needed
if type(_adjustment_requests["universal_death_rate"][stratum]) == str:
self.adaptation_functions[
"universal_death_rateX" + stratum_name
] = self.time_variants[_adjustment_requests["universal_death_rate"][stratum]]
elif isinstance(
_adjustment_requests["universal_death_rate"][stratum], (int, float)
):
self.adaptation_functions[
"universal_death_rateX" + stratum_name
] = create_multiplicative_function(
self.time_variants[_adjustment_requests["universal_death_rate"][stratum]]
)
# record the parameters that over-write the less stratified parameters closer to the trunk of the tree
if (
OVERWRITE_KEY in _adjustment_requests["universal_death_rate"]
and stratum in _adjustment_requests["universal_death_rate"][OVERWRITE_KEY]
):
self.overwrite_parameters.append(
create_stratified_name(
"universal_death_rate", _stratification_name, stratum
)
)
def prepare_mixing_matrix(self, _mixing_matrix, _stratification_name, _strata_names):
"""
check that the mixing matrix has been correctly specified and call the other relevant functions
:param _mixing_matrix: numpy array
must be square
represents the mixing of the strata within this stratification
:param _stratification_name: str
the name of the stratification - i.e. the reason for implementing this type of stratification
:param _strata_names: list
see find_strata_names_from_input
"""
if _mixing_matrix is None:
return
elif type(_mixing_matrix) != numpy.ndarray:
raise ValueError("submitted mixing matrix is wrong data type")
elif len(_mixing_matrix.shape) != 2:
raise ValueError("submitted mixing matrix is not two-dimensional")
elif _mixing_matrix.shape[0] != _mixing_matrix.shape[1]:
raise ValueError("submitted mixing is not square")
elif _mixing_matrix.shape[0] != len(_strata_names):
raise ValueError("mixing matrix does not sized to number of strata being implemented")
self.combine_new_mixing_matrix_with_existing(
_mixing_matrix, _stratification_name, _strata_names
)
def combine_new_mixing_matrix_with_existing(
self, _mixing_matrix, _stratification_name, _strata_names
):
"""
master mixing matrix function to take in a new mixing matrix and combine with the existing ones
:param _mixing_matrix: numpy array
array, which must be square representing the mixing of the strata within this stratification
:param _stratification_name: str
the name of the stratification - i.e. the reason for implementing this type of stratification
:param _strata_names: list
see find_strata_names_from_input
"""
# if no mixing matrix yet, just convert the existing one to a dataframe
if self.mixing_matrix is None:
self.mixing_categories = [_stratification_name + "_" + i for i in _strata_names]
self.mixing_matrix = _mixing_matrix
# otherwise take the kronecker product to get the new mixing matrix
else:
self.mixing_categories = [
old_strata + "X" + _stratification_name + "_" + new_strata
for old_strata, new_strata in itertools.product(
self.mixing_categories, _strata_names
)
]
self.mixing_matrix = numpy.kron(self.mixing_matrix, _mixing_matrix)
def prepare_infectiousness_levels(
self, _stratification_name, _strata_names, _infectiousness_adjustments
):
"""
store infectiousness adjustments as dictionary attribute to the model object, with first tier of keys the
stratification and second tier the strata to be modified
:param _stratification_name:
see prepare_and_check_stratification
:param _strata_names:
see find_strata_names_from_input
:param _infectiousness_adjustments: dict
requested adjustments to infectiousness for this stratification
"""
if type(_infectiousness_adjustments) != dict:
raise ValueError("infectiousness adjustments not submitted as dictionary")
elif not all(key in _strata_names for key in _infectiousness_adjustments.keys()):
raise ValueError("infectiousness adjustment key not in strata being implemented")
else:
for stratum in _infectiousness_adjustments:
self.infectiousness_levels[
create_stratum_name(_stratification_name, stratum, joining_string="")
] = _infectiousness_adjustments[stratum]
def prepare_and_check_target_props(self, _target_props, _stratification_name, _strata_names):
"""
create the dictionary of dictionaries that contains the target values for equlibration
:parameters:
_target_props: dict
user submitted dictionary with keys the restrictions by previously implemented strata that apply
_stratification_name: str
name of stratification process currently being implemented
_strata_names: list
list of the names of the strata being implemented under the current stratification process
"""
self.target_props[_stratification_name] = {}
for restriction in _target_props:
self.target_props[_stratification_name][restriction] = {}
# only need parameter values for the first n-1 strata, as the last one will be the remainder
for stratum in _strata_names[:-1]:
if stratum not in _target_props[restriction]:
raise ValueError(
"one or more of first n-1 strata being applied not in the target prop request"
)
elif isinstance(_target_props[restriction][stratum], (float, int, str)):
self.target_props[_stratification_name][restriction][stratum] = _target_props[
restriction
][stratum]
else:
raise ValueError("target proportions specified with incorrect format for value")
if (
type(_target_props[restriction][stratum]) == str
and _target_props[restriction][stratum] not in self.time_variants
):
raise ValueError("function for prevalence of %s not found" % stratum)
if _strata_names[-1] in self.target_props:
self.output_to_user(
"target proportion requested for stratum %s, but as last stratum"
% _strata_names[-1]
+ " in request, this will be ignored and assigned the remainder to ensure sum to one"
)
# add the necessary flows to the transition data frame
self.link_strata_with_flows(_stratification_name, _strata_names, restriction)
def link_strata_with_flows(self, _stratification_name, _strata_names, _restriction):
"""
add in sequential series of flows between neighbouring strata that transition people between the strata being
implemented in this stratification stage
:parameters:
_stratification_name: str
name of stratification currently being implemented
_strata_names: list
list of the strata being implemented in this stratification process
_restriction: str
name of previously implemented stratum that this equilibration flow applies to, if any, otherwise "all"
"""
for compartment in self.unstratified_compartment_names:
if _restriction in find_name_components(compartment) or _restriction == "all":
for n_stratum in range(len(_strata_names[:-1])):
self.transition_flows = self.transition_flows.append(
{
"type": Flow.STRATA_CHANGE,
"parameter": _stratification_name
+ "X"
+ _restriction
+ "X"
+ _strata_names[n_stratum]
+ "_"
+ _strata_names[n_stratum + 1],
"origin": create_stratified_name(
compartment, _stratification_name, _strata_names[n_stratum],
),
"to": create_stratified_name(
compartment, _stratification_name, _strata_names[n_stratum + 1],
),
"implement": len(self.all_stratifications),
"strain": float("nan"),
},
ignore_index=True,
)
"""
pre-integration methods
"""
def prepare_to_run(self):
"""
methods that can be run prior to integration to save various function calls being made at every time step
"""
self.prepare_stratified_parameter_calculations()
self.prepare_infectiousness_calculations()
self.transition_indices_to_implement = self.find_transition_indices_to_implement()
self.death_indices_to_implement = self.find_death_indices_to_implement()
self.change_indices_to_implement = self.find_change_indices_to_implement()
# ensure there is a universal death rate available even if the model hasn't been stratified at all
if len(self.all_stratifications) == 0 and isinstance(
self.parameters["universal_death_rate"], (float, int)
):
self.final_parameter_functions["universal_death_rate"] = lambda time: self.parameters[
"universal_death_rate"
]
elif (
len(self.all_stratifications) == 0
and type(self.parameters["universal_death_rate"]) == str
):
self.final_parameter_functions["universal_death_rate"] = self.adaptation_functions[
"universal_death_rate"
]
self.find_strata_indices()
self.prepare_lookup_tables()
def find_strata_indices(self):
for stratif in self.all_stratifications:
self.strata_indices[stratif] = {}
for i_stratum, stratum in enumerate(self.all_stratifications[stratif]):
self.strata_indices[stratif][stratum] = [
i_comp
for i_comp in range(len(self.compartment_names))
if create_stratum_name(
stratif, self.all_stratifications[stratif][i_stratum], joining_string="",
)
in find_name_components(self.compartment_names[i_comp])
]
def prepare_stratified_parameter_calculations(self):
"""
prior to integration commencing, work out what the components are of each parameter being implemented
populates self.parameter_components even though it is not needed elsewhere, to allow that the components that
were used to create each given parameter can be determined later
"""
# create list of all the parameters that we need to find the set of adjustment functions for
parameters_to_adjust = []
transition_flow_indices = [
n_flow
for n_flow, flow in enumerate(self.transition_flows.type)
if "change" not in flow
and self.transition_flows.implement[n_flow] == len(self.all_stratifications)
]
for n_flow in transition_flow_indices:
if (
self.transition_flows.implement[n_flow] == len(self.all_stratifications)
and self.transition_flows.parameter[n_flow] not in parameters_to_adjust
):
parameters_to_adjust.append(self.transition_flows.parameter[n_flow])
for n_flow in range(self.death_flows.shape[0]):
if (
self.death_flows.implement[n_flow] == len(self.all_stratifications)
and self.death_flows.parameter[n_flow] not in parameters_to_adjust
):
parameters_to_adjust.append(self.death_flows.parameter[n_flow])
# and adjust
for parameter in parameters_to_adjust:
self.parameter_components[parameter] = self.find_transition_components(parameter)
self.create_transition_functions(parameter, self.parameter_components[parameter])
# similarly for all model compartments
for compartment in self.compartment_names:
self.mortality_components[compartment] = self.find_mortality_components(compartment)
if len(self.all_stratifications) > 0:
self.create_mortality_functions(compartment, self.mortality_components[compartment])
def find_mortality_components(self, _compartment):
"""
find the sub-parameters for population-wide natural mortality that are relevant to a particular compartment
used in prepare_stratified_parameter_calculations for creating functions to find the mortality rate for each
compartment
similar to find_transition_components, except being applied by compartment rather than parameter
:param _compartment: str
name of the compartment of interest
:return: all_sub_parameters: list
list of all the mortality-related sub-parameters for the compartment of interest
"""
all_sub_parameters = []
compartments_strata = find_name_components(_compartment)[1:]
compartments_strata.reverse()
compartments_strata.append("")
# loop through each stratification of the parameter and adapt if the parameter is available
for stratum in compartments_strata:
if stratum in self.available_death_rates:
all_sub_parameters.append("universal_death_rateX" + stratum)
if "universal_death_rateX" + stratum in self.overwrite_parameters:
break
all_sub_parameters.reverse()
return all_sub_parameters
def create_mortality_functions(self, _compartment, _sub_parameters):
"""
loop through all the components to the population-wide mortality and create the recursive functions
:param _compartment: str
name of the compartment of interest
:param _sub_parameters: list
the names of the functions that need to update the upstream parameters
:return:
"""
self.final_parameter_functions[
"universal_death_rateX" + _compartment
] = self.adaptation_functions[_sub_parameters[0]]
for component in _sub_parameters[1:]:
# get the new function to act on the less stratified function (closer to the "tree-trunk")
if component not in self.parameters:
raise ValueError(
"parameter component %s not found in parameters attribute" % component
)
elif type(self.parameters[component]) == float:
self.adaptation_functions[component] = create_multiplicative_function(
self.parameters[component]
)
elif type(self.parameters[component]) == str:
self.adaptation_functions[component] = create_time_variant_multiplicative_function(
self.adaptation_functions[component]
)
else:
raise ValueError("parameter component %s not appropriate format" % component)
# create the composite function
self.final_parameter_functions[
"universal_death_rateX" + _compartment
] = create_function_of_function(
self.adaptation_functions[component],
self.final_parameter_functions["universal_death_rateX" + _compartment],
)
def find_transition_components(self, _parameter):
"""
finds each of the strings for the functions acting on the next function in the sequence
:param _parameter: str
full name of the parameter of interest
"""
sub_parameters = []
# work backwards to allow stopping for overwriting requests, then reverse in preparation for function creation
for x_instance in extract_reversed_x_positions(_parameter):
component = _parameter[:x_instance]
sub_parameters.append(component)
if component in self.overwrite_parameters:
break
sub_parameters.reverse()
return sub_parameters
def create_transition_functions(self, _parameter, _sub_parameters):
"""
builds up each parameter to be implemented as a function, recursively creating an outer function that calls the
inner function
:param _parameter: str
full name of the parameter of interest
:param _sub_parameters: list
list of the strings representing the sub-parameters, including the base parameter as the stem and with all
of the relevant strata in the stratification sequence following
"""
# start from base value as a function of time, even if the time argument is ignored
if isinstance(self.parameters[_sub_parameters[0]], (float, int)):
self.final_parameter_functions[_parameter] = lambda time: self.parameters[
_sub_parameters[0]
]
elif type(self.parameters[_sub_parameters[0]]) == str:
self.final_parameter_functions[_parameter] = self.adaptation_functions[
_sub_parameters[0]
]
# then cycle through other applicable components and extend function recursively, only if component available
for component in _sub_parameters[1:]:
# get the new function to act on the less stratified function (closer to the "tree-trunk")
if component not in self.parameters:
raise ValueError(
"parameter component %s not found in parameters attribute" % component
)
elif isinstance(self.parameters[component], float) or isinstance(
self.parameters[component], int
):
self.adaptation_functions[component] = create_multiplicative_function(
self.parameters[component]
)
elif type(self.parameters[component]) == str:
self.adaptation_functions[component] = create_time_variant_multiplicative_function(
self.time_variants[self.parameters[component]]
)
else:
raise ValueError("parameter component %s not appropriate format" % component)
# create the composite function
self.final_parameter_functions[_parameter] = create_function_of_function(
self.adaptation_functions[component], self.final_parameter_functions[_parameter],
)
def prepare_infectiousness_calculations(self):
"""
master method to run all the code concerned with preparation for force of infection calculations
"""
# infectiousness preparations
self.prepare_all_infectiousness_multipliers()
self.find_infectious_indices()
# mixing preparations
if self.mixing_matrix is not None:
self.add_force_indices_to_transitions()
self.find_mixing_denominators()
# reconciling the strains and the mixing attributes together into one structure
self.find_strain_mixing_multipliers()
def prepare_all_infectiousness_multipliers(self):
"""
find the infectiousness multipliers for each compartment being implemented in the model
"""
# start from assumption that each compartment is fully and equally infectious
self.infectiousness_multipliers = [1.0] * len(self.compartment_names)
# if infectiousness modification requested for the compartment type, multiply through by the current value
for n_comp, compartment in enumerate(self.compartment_names):
for modifier in self.infectiousness_levels:
if modifier in find_name_components(compartment):
self.infectiousness_multipliers[n_comp] *= self.infectiousness_levels[modifier]
self.make_further_infectiousness_adjustments()
def make_further_infectiousness_adjustments(self):
"""
Work through specific requests for specific adjustments, to escape the requirement to only adjust compartment
infectiousness according to stratification process - with all infectious compartments having the same
adjustment.
"""
for i_adjustment in range(len(self.individual_infectiousness_adjustments)):
for i_comp, comp in enumerate(self.compartment_names):
if all(
[
component in find_name_components(comp)
for component in self.individual_infectiousness_adjustments[i_adjustment][0]
]
):
self.infectiousness_multipliers[
i_comp
] = self.individual_infectiousness_adjustments[i_adjustment][1]
def find_infectious_indices(self):
"""
find the infectious indices by strain and overall, as opposed to just overall in EpiModel
note that this changes the structure by one hierarchical level compared to EpiModel - in that previously we had
self.infectious_indices a list of infectious indices and now it is has a dictionary structure at the highest
level, followed by keys for each strain with values being lists that are equivalent to the
self.infectious_indices list for the unstratified version
"""
# find the indices for the compartments that are infectious across all strains
self.infectious_indices["all_strains"] = self.find_all_infectious_indices()
# then find the infectious compartment for each strain separately
for strain in self.strains:
self.infectious_indices[strain] = convert_boolean_list_to_indices(
[
create_stratum_name("strain", strain, joining_string="")
in find_name_components(comp)
and i_comp in self.infectious_indices["all_strains"]
for i_comp, comp in enumerate(self.compartment_names)
]
)
def add_force_indices_to_transitions(self):
"""
find the indices from the force of infection vector to be applied for each infection flow and populate to the
force_index column of the flows frame
"""
# identify the indices of all the infection-related flows to be implemented
infection_flow_indices = [
n_flow
for n_flow, flow in enumerate(self.transition_flows.type)
if "infection" in flow
and self.transition_flows.implement[n_flow] == len(self.all_stratifications)
]
# loop through and find the index of the mixing matrix applicable to the flow, of which there should be only one
for n_flow in infection_flow_indices:
found = False
for i_group, force_group in enumerate(self.mixing_categories):
if all(
stratum in find_name_components(self.transition_flows.origin[n_flow])
for stratum in find_name_components(force_group)
):
self.transition_flows.force_index[n_flow] = i_group
if found:
raise ValueError(
"mixing group found twice for transition flow number %s" % n_flow
)
found = True
continue
if not found:
raise ValueError("mixing group not found for transition flow number %s" % n_flow)
def find_mixing_denominators(self):
"""
for each mixing category, create a list of the compartment numbers that are relevant
:return mixing_indices: list
indices of the compartments that are applicable to a particular mixing category
"""
if self.mixing_matrix is None:
self.mixing_indices = {"all_population": range(len(self.compartment_names))}
else:
for category in self.mixing_categories:
self.mixing_indices[category] = [
i_comp
for i_comp, compartment in enumerate(self.compartment_names)
if all(
[
component in find_name_components(compartment)
for component in find_name_components(category)
]
)
]
self.mixing_indices_arr = np.array(list(self.mixing_indices.values()))
def find_strain_mixing_multipliers(self):
"""
find the relevant indices to be used to calculate the force of infection contribution to each strain from each
mixing category as a list of indices - and separately find multipliers as a list of the same length for
their relative infectiousness extracted from self.infectiousness_multipliers
"""
for strain in self.strains + ["all_strains"]:
(self.strain_mixing_elements[strain], self.strain_mixing_multipliers[strain],) = (
{},
{},
)
for category in (
["all_population"] if self.mixing_matrix is None else self.mixing_categories
):
self.strain_mixing_elements[strain][category] = numpy.array(
[
index
for index in self.mixing_indices[category]
if index in self.infectious_indices[strain]
]
)
self.strain_mixing_multipliers[strain][category] = numpy.array(
[
self.infectiousness_multipliers[i_comp]
for i_comp in self.strain_mixing_elements[strain][category]
]
)
def find_transition_indices_to_implement(
self, back_one: int = 0, include_change: bool = False
) -> List[int]:
"""
Finds all the indices of the transition flows that need to be stratified,
Overrides the version in the unstratified EpiModel
:parameters:
back_one: int
number to subtract from self.all_stratification, which will be one if this method is being called after the
stratification has been added
include_change: bool
whether to include the strata_change transition flows
:return: list
list of indices of the flows that need to be stratified
"""
return [
idx
for idx, flow in self.transition_flows.iterrows()
if (flow.type != Flow.STRATA_CHANGE or include_change)
and flow.implement == len(self.all_stratifications) - back_one
]
def find_change_indices_to_implement(self, back_one=0):
"""
find the indices of the equilibration flows to be applied in the transitions data frame
:parameters:
back_one: int
see find_transition_indices_to_implement
"""
return [
idx
for idx, flow in self.transition_flows.iterrows()
if flow.type == Flow.STRATA_CHANGE
and flow.implement == len(self.all_stratifications) - back_one
]
def find_death_indices_to_implement(self, back_one=0):
"""
find all the indices of the death flows that need to be stratified
separated out as very short method in order that it can over-ride the version in the unstratified EpiModel
:param back_one: int
number to subtract from self.all_stratification, which will be one if this method is being called after the
stratification has been added
:return: list
list of indices of the flows that need to be stratified
"""
return self.death_flows[
self.death_flows.implement == len(self.all_stratifications) - back_one
].index
"""
methods to be called during the process of model running
"""
# Cache return values to prevent wasteful re-computation - cache size is huge.
# Floating point return type is 8 bytes, meaning 2**17 values is ~1MB of memory.
# N.B this will leak memory, which is fine.
@lru_cache(maxsize=2 ** 17)
def get_parameter_value(self, _parameter, _time):
"""
returns a parameter value by calling the function represented by its string within the parameter_functions
attribute
:param _parameter: str
name of the parameter to be called (key to the parameter_functions dictionary)
:param _time: float
current time of model integration
:return: float
the parameter value needed
"""
return self.final_parameter_functions[_parameter](_time)
def find_infectious_population(self, compartment_values):
"""
find vectors for the total infectious populations and the total population that is needed in the case of
frequency-dependent transmission
:param compartment_values: numpy array
current values for the compartment sizes
"""
strains = self.strains if self.strains else ["all_strains"]
if self.mixing_matrix is None:
mixing_categories = ["all_population"]
else:
mixing_categories = self.mixing_categories
self.infectious_denominators = compartment_values[self.mixing_indices_arr].sum(axis=1)
self.infectious_populations = find_infectious_populations(
compartment_values,
strains,
mixing_categories,
self.strain_mixing_elements,
self.strain_mixing_multipliers,
)
def find_infectious_multiplier(self, n_flow):
"""
find the multiplier to account for the infectious population in dynamic flows
:param n_flow: int
index for the row of the transition_flows data frame
:return:
the total infectious quantity, whether that is the number or proportion of infectious persons
needs to return as one for flows that are not transmission dynamic infectiousness flows
"""
flow_type = self.transition_flows_dict["type"][n_flow]
strain = self.transition_flows_dict["strain"][n_flow]
force_index = self.transition_flows_dict["force_index"][n_flow]
if "infection" not in flow_type:
return 1.0
strain = "all_strains" if not self.strains else strain
mixing_elements = (
[1.0] if self.mixing_matrix is None else self.mixing_matrix[force_index, :]
)
denominator = (
[1.0] * len(self.infectious_denominators)
if "_density" in flow_type
else self.infectious_denominators
)
return sum(
element_list_division(
element_list_multiplication(self.infectious_populations[strain], mixing_elements),
denominator,
)
)
def prepare_time_step(self, _time):
"""
Perform any tasks needed for execution of each integration time step
"""
if self.dynamic_mixing_matrix:
self.mixing_matrix = self.find_dynamic_mixing_matrix(_time)
def find_dynamic_mixing_matrix(self, _time):
"""
Function for overwriting in application to create time-variant mixing matrix
"""
return self.mixing_matrix
def get_compartment_death_rate(self, _compartment, _time):
"""
find the universal or population-wide death rate for a particular compartment
:param _compartment: str
name of the compartment
:param _time: float
current integration time
:return: float
death rate
"""
return (
self.get_parameter_value("universal_death_rateX" + _compartment, _time)
if len(self.all_stratifications) > 0
else self.get_parameter_value("universal_death_rate", _time)
)
def apply_birth_rate(self, _ode_equations, _compartment_values, _time):
"""
apply a population-wide death rate to all compartments
all the entry_fraction proportions should be present in either parameters or time_variants given how they are
created in the process of implementing stratification
:parameters: all parameters have come directly from the apply_all_flow_types_to_odes method unchanged
"""
# find the total number of births entering the system at the current time point
total_births = self.find_total_births(_compartment_values, _time)
# split the total births across entry compartments
for compartment in [
comp for comp in self.compartment_names if find_stem(comp) == self.entry_compartment
]:
# calculate adjustment to original stem entry rate
entry_fraction = 1.0
for stratum in find_name_components(compartment)[1:]:
entry_fraction *= self.get_single_parameter_component(
"entry_fractionX%s" % stratum, _time
)
# apply to that compartment
_ode_equations = increment_list_by_index(
_ode_equations,
self.compartment_names.index(compartment),
total_births * entry_fraction,
)
return _ode_equations
def apply_change_rates(self, _ode_equations, _compartment_values, _time):
"""
apply the transition rates that relate to equilibrating prevalence values for a particular stratification
:parameters:
_ode_equations: list
working ode equations, to which transitions are being applied
_compartment_values: list
working compartment values
_time: float
current integration time value
"""
# for each change flow being implemented
for i_change in self.change_indices_to_implement:
# split out the components of the transition string, which follow the standard 6-character string "change"
stratification, restriction, transition = find_name_components(
self.transition_flows.parameter[i_change]
)
origin_stratum, _ = transition.split("_")
# find the distribution of the population across strata to be targeted
_cumulative_target_props = self.find_target_strata_props(
_time, restriction, stratification
)
# find the proportional distribution of the population across strata at the current time point
_cumulative_strata_props = self.find_current_strata_props(
_compartment_values, stratification, restriction
)
# work out which stratum and compartment transitions should be going from and to
if _cumulative_strata_props[origin_stratum] > _cumulative_target_props[origin_stratum]:
take_compartment, give_compartment, numerator, denominator = (
self.transition_flows.origin[i_change],
self.transition_flows.to[i_change],
_cumulative_strata_props[origin_stratum],
_cumulative_target_props[origin_stratum],
)
else:
take_compartment, give_compartment, numerator, denominator = (
self.transition_flows.to[i_change],
self.transition_flows.origin[i_change],
1.0 - _cumulative_strata_props[origin_stratum],
1.0 - _cumulative_target_props[origin_stratum],
)
# calculate net flow
net_flow = (
numpy.log(numerator / denominator)
/ STRATA_EQUILIBRATION_FACTOR
* _compartment_values[self.compartment_names.index(take_compartment)]
)
# update equations
_ode_equations = increment_list_by_index(
_ode_equations, self.compartment_names.index(take_compartment), -net_flow,
)
_ode_equations = increment_list_by_index(
_ode_equations, self.compartment_names.index(give_compartment), net_flow
)
return _ode_equations
def find_target_strata_props(self, _time, _restriction, _stratification):
"""
calculate the requested distribution of the population over the stratification that needs to be equilibrated
over
:parameters:
_time: float
current time value in integration
_stratification: str
name of the stratification over which the distribution of population is to be calculated
_restriction: str
name of the restriction stratification and the stratum joined with "_", if this is being applied
if this is submitted as "all", the equilibration will be applied across all other strata
"""
# for each applicable stratification, find target value for all strata, except the last one
target_prop_values = {}
for stratum in self.target_props[_stratification][_restriction]:
target_prop_values[stratum] = (
self.target_props[_stratification][_restriction][stratum]
if type(self.target_props[_stratification][_restriction][stratum]) == float
else self.time_variants[self.target_props[_stratification][_restriction][stratum]](
_time
)
)
# check that prevalence values (including time-variant values) fall between zero and one
if sum(target_prop_values.values()) > 1.0:
raise ValueError(
"total prevalence of first n-1 strata sums to more than one at time %s" % _time
)
elif any(target_prop_values.values()) < 0.0:
raise ValueError("prevalence request of less than zero at time %s" % _time)
# convert to dictionary of cumulative totals
cumulative_target_props = create_cumulative_dict(target_prop_values)
# add in a cumulative value of one for the last stratum
cumulative_target_props.update({self.all_stratifications[_stratification][-1]: 1.0})
return cumulative_target_props
def find_current_strata_props(self, _compartment_values, _stratification, _restriction):
"""
find the current distribution of the population across a particular stratification, which may or may not be
restricted to a stratum of a previously implemented stratification process
:parameters:
_compartment_values: list
current compartment values achieved during integration
_stratification: str
name of the stratification over which the distribution of population is to be calculated
_restriction: str
name of the restriction stratification and the stratum joined with "_", if this is being applied
if this is submitted as "all", the equilibration will be applied across all other strata
"""
# find the compartment indices applicable to the cross-stratification of interest (which may be all of them)
if _restriction == "all":
restriction_compartments = list(range(len(self.compartment_names)))
else:
restrict_stratification, restrict_stratum = _restriction.split("_")
restriction_compartments = self.strata_indices[restrict_stratification][
restrict_stratum
]
# find current values of prevalence for the stratification for which prevalence values targeted
current_strata_props = {}
for stratum in self.all_stratifications[_stratification]:
current_strata_props[stratum] = sum(
[
_compartment_values[i_comp]
for i_comp in restriction_compartments
if i_comp in self.strata_indices[_stratification][stratum]
]
) / sum([_compartment_values[i_comp] for i_comp in restriction_compartments])
return create_cumulative_dict(current_strata_props)
from numba import jit
def find_infectious_populations(
compartment_values: np.ndarray,
strains: List[str],
mixing_categories: List[str],
strain_mixing_elements: Dict[str, Dict[str, List[int]]],
strain_mixing_multipliers: Dict[str, Dict[str, np.ndarray]],
):
infectious_populations = {}
num_mixing_categories = len(mixing_categories)
for strain in strains:
infectious_populations[strain] = []
for idx in range(num_mixing_categories):
category = mixing_categories[idx]
weighted_sum = _find_infectious_populations_weighted_sum(
compartment_values,
strain_mixing_elements[strain][category],
strain_mixing_multipliers[strain][category],
)
infectious_populations[strain].append(weighted_sum)
return infectious_populations
@jit(nopython=True)
def _find_infectious_populations_weighted_sum(
compartment_values: np.ndarray, mixing_element_idxs: np.ndarray, mixing_multipliers: np.ndarray,
):
mixing_elements = compartment_values[mixing_element_idxs]
return (mixing_elements * mixing_multipliers).sum()
| [
"itertools.product",
"numpy.log",
"numpy.kron",
"numpy.array",
"numba.jit",
"functools.lru_cache",
"copy.copy",
"numpy.arange"
] | [((90506, 90524), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (90509, 90524), False, 'from numba import jit\n'), ((77487, 77513), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(2 ** 17)'}), '(maxsize=2 ** 17)\n', (77496, 77513), False, 'from functools import lru_cache\n'), ((12654, 12687), 'copy.copy', 'copy.copy', (['self.compartment_names'], {}), '(self.compartment_names)\n', (12663, 12687), False, 'import copy\n'), ((20160, 20194), 'numpy.arange', 'numpy.arange', (['(1)', '(_strata_names + 1)'], {}), '(1, _strata_names + 1)\n', (20172, 20194), False, 'import numpy\n'), ((51806, 51852), 'numpy.kron', 'numpy.kron', (['self.mixing_matrix', '_mixing_matrix'], {}), '(self.mixing_matrix, _mixing_matrix)\n', (51816, 51852), False, 'import numpy\n'), ((74499, 74610), 'numpy.array', 'numpy.array', (['[index for index in self.mixing_indices[category] if index in self.\n infectious_indices[strain]]'], {}), '([index for index in self.mixing_indices[category] if index in\n self.infectious_indices[strain]])\n', (74510, 74610), False, 'import numpy\n'), ((74806, 74925), 'numpy.array', 'numpy.array', (['[self.infectiousness_multipliers[i_comp] for i_comp in self.\n strain_mixing_elements[strain][category]]'], {}), '([self.infectiousness_multipliers[i_comp] for i_comp in self.\n strain_mixing_elements[strain][category]])\n', (74817, 74925), False, 'import numpy\n'), ((51664, 51720), 'itertools.product', 'itertools.product', (['self.mixing_categories', '_strata_names'], {}), '(self.mixing_categories, _strata_names)\n', (51681, 51720), False, 'import itertools\n'), ((85100, 85134), 'numpy.log', 'numpy.log', (['(numerator / denominator)'], {}), '(numerator / denominator)\n', (85109, 85134), False, 'import numpy\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
import unittest
from paddle.fluid.dygraph.jit import dygraph_to_static_output
np.random.seed(1)
def dyfunc_with_if_else(x_v):
if fluid.layers.mean(x_v).numpy()[0] > 5:
x_v = x_v - 1
else:
x_v = x_v + 1
return x_v
def dyfunc_with_if_else2(x):
i, j = 0, 0
if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[i][j]:
y = fluid.layers.relu(x)
else:
x_pow = fluid.layers.pow(x, 2)
y = fluid.layers.tanh(x_pow)
return y
def nested_if_else(x_v):
batch_size = x_v.shape[0]
feat_size = x_v.shape[-1]
bias = fluid.layers.fill_constant([feat_size], dtype='float32', value=1)
if fluid.layers.mean(x_v).numpy()[0] < 0:
y = x_v + bias
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10:
tmp = y * w
y = fluid.layers.relu(tmp)
if fluid.layers.mean(y).numpy()[0] < batch_size:
y = fluid.layers.abs(y)
else:
tmp = fluid.layers.fill_constant(
[feat_size], dtype='float32', value=-1)
y = y - tmp
else:
y = x_v - bias
return y
class TestDygraphIfElse(unittest.TestCase):
"""
TestCase for the transformation from control flow `if/else`
dependent on tensor in Dygraph into Static `fluid.layers.cond`.
"""
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = dyfunc_with_if_else
def _run_static(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x_v = fluid.layers.assign(self.x)
# Transform into static graph
out = dygraph_to_static_output(self.dyfunc)(x_v)
exe = fluid.Executor(fluid.CPUPlace())
ret = exe.run(main_program, fetch_list=out)
return ret
def _run_dygraph(self):
with fluid.dygraph.guard():
x_v = fluid.dygraph.to_variable(self.x)
ret = self.dyfunc(x_v)
return ret.numpy()
def test_ast_to_func(self):
self.assertTrue((self._run_dygraph() == self._run_static()).all())
class TestDygraphIfElse2(TestDygraphIfElse):
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = dyfunc_with_if_else2
class TestDygraphIfElse3(TestDygraphIfElse):
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = nested_if_else
if __name__ == '__main__':
unittest.main()
| [
"paddle.fluid.layers.fill_constant",
"paddle.fluid.Program",
"paddle.fluid.dygraph.jit.dygraph_to_static_output",
"paddle.fluid.dygraph.guard",
"paddle.fluid.layers.tanh",
"paddle.fluid.dygraph.to_variable",
"numpy.random.random",
"paddle.fluid.layers.reduce_mean",
"paddle.fluid.CPUPlace",
"paddle... | [((780, 797), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (794, 797), True, 'import numpy as np\n'), ((1287, 1352), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', (['[feat_size]'], {'dtype': '"""float32"""', 'value': '(1)'}), "([feat_size], dtype='float32', value=1)\n", (1313, 1352), True, 'import paddle.fluid as fluid\n'), ((3275, 3290), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3288, 3290), False, 'import unittest\n'), ((1069, 1089), 'paddle.fluid.layers.relu', 'fluid.layers.relu', (['x'], {}), '(x)\n', (1086, 1089), True, 'import paddle.fluid as fluid\n'), ((1116, 1138), 'paddle.fluid.layers.pow', 'fluid.layers.pow', (['x', '(2)'], {}), '(x, 2)\n', (1132, 1138), True, 'import paddle.fluid as fluid\n'), ((1151, 1175), 'paddle.fluid.layers.tanh', 'fluid.layers.tanh', (['x_pow'], {}), '(x_pow)\n', (1168, 1175), True, 'import paddle.fluid as fluid\n'), ((1434, 1500), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', (['[feat_size]'], {'dtype': '"""float32"""', 'value': '(10)'}), "([feat_size], dtype='float32', value=10)\n", (1460, 1500), True, 'import paddle.fluid as fluid\n'), ((2268, 2283), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (2281, 2283), True, 'import paddle.fluid as fluid\n'), ((1571, 1593), 'paddle.fluid.layers.relu', 'fluid.layers.relu', (['tmp'], {}), '(tmp)\n', (1588, 1593), True, 'import paddle.fluid as fluid\n'), ((2297, 2330), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program'], {}), '(main_program)\n', (2316, 2330), True, 'import paddle.fluid as fluid\n'), ((2350, 2377), 'paddle.fluid.layers.assign', 'fluid.layers.assign', (['self.x'], {}), '(self.x)\n', (2369, 2377), True, 'import paddle.fluid as fluid\n'), ((2653, 2674), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (2672, 2674), True, 'import paddle.fluid as fluid\n'), ((2694, 2727), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['self.x'], {}), '(self.x)\n', (2719, 2727), True, 'import paddle.fluid as fluid\n'), ((1675, 1694), 'paddle.fluid.layers.abs', 'fluid.layers.abs', (['y'], {}), '(y)\n', (1691, 1694), True, 'import paddle.fluid as fluid\n'), ((1735, 1801), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', (['[feat_size]'], {'dtype': '"""float32"""', 'value': '(-1)'}), "([feat_size], dtype='float32', value=-1)\n", (1761, 1801), True, 'import paddle.fluid as fluid\n'), ((2130, 2156), 'numpy.random.random', 'np.random.random', (['[10, 16]'], {}), '([10, 16])\n', (2146, 2156), True, 'import numpy as np\n'), ((2438, 2475), 'paddle.fluid.dygraph.jit.dygraph_to_static_output', 'dygraph_to_static_output', (['self.dyfunc'], {}), '(self.dyfunc)\n', (2462, 2475), False, 'from paddle.fluid.dygraph.jit import dygraph_to_static_output\n'), ((2514, 2530), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2528, 2530), True, 'import paddle.fluid as fluid\n'), ((2987, 3013), 'numpy.random.random', 'np.random.random', (['[10, 16]'], {}), '([10, 16])\n', (3003, 3013), True, 'import numpy as np\n'), ((3160, 3186), 'numpy.random.random', 'np.random.random', (['[10, 16]'], {}), '([10, 16])\n', (3176, 3186), True, 'import numpy as np\n'), ((837, 859), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['x_v'], {}), '(x_v)\n', (854, 859), True, 'import paddle.fluid as fluid\n'), ((999, 1026), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['x'], {}), '(x)\n', (1023, 1026), True, 'import paddle.fluid as fluid\n'), ((1360, 1382), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['x_v'], {}), '(x_v)\n', (1377, 1382), True, 'import paddle.fluid as fluid\n'), ((1609, 1629), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['y'], {}), '(y)\n', (1626, 1629), True, 'import paddle.fluid as fluid\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import seaborn as sns
import tensorflow as tf
import re
import json
from functools import partial
from itertools import filterfalse
from wordcloud import WordCloud
from tensorflow import keras
from tensorflow.keras import layers
df = pd.read_csv('data.csv')
columns = ['speaker','headline','description','event','duration','date_published','views_as_of_06162017','tags','transcript']
df = df[columns]
df['duration'] = pd.to_timedelta(df['duration']).dt.total_seconds()
df['date_published'] = pd.to_datetime(df['date_published'])
df = df.rename(columns={'views_as_of_06162017':'views'})
df = df.dropna()
wc = WordCloud()
def transcript_to_tokens(s):
s = list(map(lambda s: s.strip(), filter(len,s.split('\r'))))
s = ' '.join(filterfalse(partial(re.match,'[0-9]+\:[0-9]+'),s))
s = s.replace('.','').replace(',','').replace('!','').replace('?','').replace(':','').replace(';','').replace('"','').lower()
emotes = re.findall('\(([^)]+)\)',s)
speech = ' '.join(re.split('\(([^)]+)\)',s)).split()
emotes = emotes + list(filter(lambda s: s in ['applause','laughter'],speech)) # Inconsistent annotation in transcript
speech = filter(lambda s: not s in ['applause','laughter'],speech)
speech = list(filter(lambda s: s not in wc.stopwords, speech))
return (emotes,speech)
def word_count(s):
return len(pd.value_counts(s))
def translate_df(df):
emotes, words = zip(*df['transcript'].apply(transcript_to_tokens).to_list())
df.loc[:,'emotes'] = list(emotes)
df.loc[:,'words'] = list(words)
df['unique_words'] = df['words'].apply(word_count)
df['year_published'] = df['date_published'].dt.year
df['month_published'] = df['date_published'].dt.month
return df
df = translate_df(df)
all_words = [ x for xs in df['words'].to_list() for x in xs ]
word_counts = pd.value_counts(all_words)
all_emotes = [ x for xs in df['emotes'] for x in xs ]
emote_counts = pd.value_counts(all_emotes)
n_words_analyse = 50
for word in word_counts.head(n=n_words_analyse).keys():
df[f'num_{word}'] = df['words'].apply(lambda xs: xs.count(word))
n_emotes_analyse = 2
for emote in emote_counts.head(n=n_emotes_analyse).keys():
df[f'times_{emote}'] = df['emotes'].apply(lambda xs: xs.count(emote))
numerical_columns = df.select_dtypes(include='number').columns
val_frac = 0.2
test_frac = 0.2
train_frac = 1.0 - val_frac - test_frac
df_model = df[numerical_columns]
df_full_train = df_model.sample(frac=train_frac + val_frac,random_state=0)
df_test = df_model.drop(df_full_train.index)
y_full_train = np.log1p(df_full_train.pop('views'))
y_test = np.log1p(df_test .pop('views'))
def train_NN(df_train,y_train,df_val,y_val,inner_layers=[64],learning_rate=0.1,droprate=None,input_droprate=None):
normalizer = tf.keras.layers.Normalization(axis=-1)
normalizer.adapt(np.asarray(df_train))
model = tf.keras.Sequential()
model.add(normalizer)
if input_droprate:
model.add(layers.Dropout(droprate))
for layer_size in inner_layers:
model.add(layers.Dense(layer_size, activation='relu'))
if droprate:
model.add(layers.Dropout(droprate))
model.add(layers.Dense(units=1))
model.summary()
model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate)
,loss='mean_squared_error')
history = model.fit(df_train,y_train,epochs=200,validation_data=(np.asarray(df_val),y_val))
return history
best_ddn2_layer_size = [16,16]
best_ddn2_learning_rate = 0.33
best_ddn2_droprate = 0.4
best_ddn2_input_droprate = 0.0
best = train_NN(df_full_train,y_full_train,df_test,y_test
,inner_layers=best_ddn2_layer_size
,droprate=best_ddn2_droprate
,learning_rate=best_ddn2_learning_rate
,input_droprate=best_ddn2_input_droprate)
#best.model.save('keras_model')
tf.saved_model.save(best.model, 'view-model')
model_spec = { 'columns': list(filter(lambda x: x != 'views',df[numerical_columns].columns.to_list())),
'trained_words': word_counts.head(n=n_words_analyse).keys().to_list(),
'trained_emotes': emote_counts.head(n=n_emotes_analyse).keys().to_list()}
open('keras_model_spec.json','w+').write(json.dumps(model_spec))
| [
"re.split",
"pandas.to_timedelta",
"tensorflow.keras.layers.Normalization",
"pandas.read_csv",
"tensorflow.keras.Sequential",
"json.dumps",
"numpy.asarray",
"tensorflow.keras.layers.Dropout",
"pandas.value_counts",
"tensorflow.saved_model.save",
"wordcloud.WordCloud",
"tensorflow.keras.layers.... | [((374, 397), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (385, 397), True, 'import pandas as pd\n'), ((632, 668), 'pandas.to_datetime', 'pd.to_datetime', (["df['date_published']"], {}), "(df['date_published'])\n", (646, 668), True, 'import pandas as pd\n'), ((748, 759), 'wordcloud.WordCloud', 'WordCloud', ([], {}), '()\n', (757, 759), False, 'from wordcloud import WordCloud\n'), ((1955, 1981), 'pandas.value_counts', 'pd.value_counts', (['all_words'], {}), '(all_words)\n', (1970, 1981), True, 'import pandas as pd\n'), ((2052, 2079), 'pandas.value_counts', 'pd.value_counts', (['all_emotes'], {}), '(all_emotes)\n', (2067, 2079), True, 'import pandas as pd\n'), ((4011, 4056), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['best.model', '"""view-model"""'], {}), "(best.model, 'view-model')\n", (4030, 4056), True, 'import tensorflow as tf\n'), ((1068, 1098), 're.findall', 're.findall', (['"""\\\\(([^)]+)\\\\)"""', 's'], {}), "('\\\\(([^)]+)\\\\)', s)\n", (1078, 1098), False, 'import re\n'), ((2920, 2958), 'tensorflow.keras.layers.Normalization', 'tf.keras.layers.Normalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2949, 2958), True, 'import tensorflow as tf\n'), ((3015, 3036), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (3034, 3036), True, 'import tensorflow as tf\n'), ((4378, 4400), 'json.dumps', 'json.dumps', (['model_spec'], {}), '(model_spec)\n', (4388, 4400), False, 'import json\n'), ((1475, 1493), 'pandas.value_counts', 'pd.value_counts', (['s'], {}), '(s)\n', (1490, 1493), True, 'import pandas as pd\n'), ((2980, 3000), 'numpy.asarray', 'np.asarray', (['df_train'], {}), '(df_train)\n', (2990, 3000), True, 'import numpy as np\n'), ((3312, 3333), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (3324, 3333), False, 'from tensorflow.keras import layers\n'), ((558, 589), 'pandas.to_timedelta', 'pd.to_timedelta', (["df['duration']"], {}), "(df['duration'])\n", (573, 589), True, 'import pandas as pd\n'), ((886, 922), 'functools.partial', 'partial', (['re.match', '"""[0-9]+\\\\:[0-9]+"""'], {}), "(re.match, '[0-9]+\\\\:[0-9]+')\n", (893, 922), False, 'from functools import partial\n'), ((3104, 3128), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['droprate'], {}), '(droprate)\n', (3118, 3128), False, 'from tensorflow.keras import layers\n'), ((3184, 3227), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['layer_size'], {'activation': '"""relu"""'}), "(layer_size, activation='relu')\n", (3196, 3227), False, 'from tensorflow.keras import layers\n'), ((3384, 3431), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3402, 3431), True, 'import tensorflow as tf\n'), ((1118, 1146), 're.split', 're.split', (['"""\\\\(([^)]+)\\\\)"""', 's'], {}), "('\\\\(([^)]+)\\\\)', s)\n", (1126, 1146), False, 'import re\n'), ((3272, 3296), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['droprate'], {}), '(droprate)\n', (3286, 3296), False, 'from tensorflow.keras import layers\n'), ((3549, 3567), 'numpy.asarray', 'np.asarray', (['df_val'], {}), '(df_val)\n', (3559, 3567), True, 'import numpy as np\n')] |
""" Document Localization using Recursive CNN
Maintainer : <NAME>
Email : <EMAIL> """
import imgaug.augmenters as iaa
import csv
import logging
import os
import xml.etree.ElementTree as ET
import numpy as np
from torchvision import transforms
import utils.utils as utils
# To incdude a new Dataset, inherit from Dataset and add all the Dataset specific parameters here.
# Goal : Remove any data specific parameters from the rest of the code
logger = logging.getLogger("iCARL")
class Dataset:
"""
Base class to reprenent a Dataset
"""
def __init__(self, name):
self.name = name
self.data = []
self.labels = []
def getTransformsByImgaug():
return iaa.Sequential(
[
iaa.Resize(32),
# Add blur
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.GaussianBlur(
(0, 3.0)
), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(
k=(2, 11)
), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(
k=(3, 11)
), # blur image using local medians with kernel sizes between 2 and 7
iaa.MotionBlur(k=15, angle=[-45, 45]),
]
),
),
# Add color
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add((0, 50)))),
iaa.AddToBrightness((-30, 30)),
iaa.MultiplyBrightness((0.5, 1.5)),
iaa.AddToHueAndSaturation((-50, 50), per_channel=True),
iaa.Grayscale(alpha=(0.0, 1.0)),
iaa.ChangeColorTemperature((1100, 10000)),
iaa.KMeansColorQuantization(),
]
),
),
# Add wether
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.Clouds(),
iaa.Fog(),
iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05)),
iaa.Rain(speed=(0.1, 0.3)),
]
),
),
# Add contrast
iaa.Sometimes(
0.05,
iaa.OneOf(
[
iaa.GammaContrast((0.5, 2.0)),
iaa.GammaContrast((0.5, 2.0), per_channel=True),
iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)),
iaa.SigmoidContrast(
gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True
),
iaa.LogContrast(gain=(0.6, 1.4)),
iaa.LogContrast(gain=(0.6, 1.4), per_channel=True),
iaa.LinearContrast((0.4, 1.6)),
iaa.LinearContrast((0.4, 1.6), per_channel=True),
iaa.AllChannelsCLAHE(),
iaa.AllChannelsCLAHE(clip_limit=(1, 10)),
iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True),
iaa.Alpha((0.0, 1.0), iaa.HistogramEqualization()),
iaa.Alpha((0.0, 1.0), iaa.AllChannelsHistogramEqualization()),
iaa.AllChannelsHistogramEqualization(),
]
),
)
]
).augment_image
class SmartDoc(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for d in directory:
self.directory = d
self.train_transform = transforms.Compose(
[
getTransformsByImgaug(),
# transforms.Resize([32, 32]),
# transforms.ColorJitter(1.5, 1.5, 0.9, 0.5),
transforms.ToTensor(),
]
)
self.test_transform = transforms.Compose(
[
iaa.Sequential(
[
iaa.Resize(32),
]
).augment_image,
transforms.ToTensor(),
]
)
logger.info("Pass train/test data paths here")
self.classes_list = {}
file_names = []
print(self.directory, "gt.csv")
with open(os.path.join(self.directory, "gt.csv"), "r") as csvfile:
spamreader = csv.reader(
csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL
)
import ast
for row in spamreader:
file_names.append(row[0])
self.data.append(os.path.join(self.directory, row[0]))
test = row[1].replace("array", "")
self.labels.append((ast.literal_eval(test)))
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = [self.data, self.labels]
class SmartDocDirectories(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for folder in os.listdir(directory):
if os.path.isdir(directory + "/" + folder):
for file in os.listdir(directory + "/" + folder):
images_dir = directory + "/" + folder + "/" + file
if os.path.isdir(images_dir):
list_gt = []
tree = ET.parse(images_dir + "/" + file + ".gt")
root = tree.getroot()
for a in root.iter("frame"):
list_gt.append(a)
im_no = 0
for image in os.listdir(images_dir):
if image.endswith(".jpg"):
# print(im_no)
im_no += 1
# Now we have opened the file and GT. Write code to create multiple files and scale gt
list_of_points = {}
# img = cv2.imread(images_dir + "/" + image)
self.data.append(os.path.join(images_dir, image))
for point in list_gt[int(float(image[0:-4])) - 1].iter(
"point"
):
myDict = point.attrib
list_of_points[myDict["name"]] = (
int(float(myDict["x"])),
int(float(myDict["y"])),
)
ground_truth = np.asarray(
(
list_of_points["tl"],
list_of_points["tr"],
list_of_points["br"],
list_of_points["bl"],
)
)
ground_truth = utils.sort_gt(ground_truth)
self.labels.append(ground_truth)
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = []
for a in range(len(self.data)):
self.myData.append([self.data[a], self.labels[a]])
class SelfCollectedDataset(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for image in os.listdir(directory):
# print (image)
if image.endswith("jpg") or image.endswith("JPG"):
if os.path.isfile(os.path.join(directory, image + ".csv")):
with open(os.path.join(directory, image + ".csv"), "r") as csvfile:
spamwriter = csv.reader(
csvfile,
delimiter=" ",
quotechar="|",
quoting=csv.QUOTE_MINIMAL,
)
img_path = os.path.join(directory, image)
gt = []
for row in spamwriter:
gt.append(row)
gt = np.array(gt).astype(np.float32)
ground_truth = utils.sort_gt(gt)
self.labels.append(ground_truth)
self.data.append(img_path)
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = []
for a in range(len(self.data)):
self.myData.append([self.data[a], self.labels[a]])
class SmartDocCorner(Dataset):
"""
Class to include MNIST specific details
"""
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for d in directory:
self.directory = d
self.train_transform = transforms.Compose(
[
getTransformsByImgaug(),
transforms.ToTensor(),
]
)
self.test_transform = transforms.Compose(
[
iaa.Sequential(
[
iaa.Resize(32),
]
).augment_image,
transforms.ToTensor(),
]
)
logger.info("Pass train/test data paths here")
self.classes_list = {}
file_names = []
with open(os.path.join(self.directory, "gt.csv"), "r") as csvfile:
spamreader = csv.reader(
csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL
)
import ast
for row in spamreader:
file_names.append(row[0])
self.data.append(os.path.join(self.directory, row[0]))
test = row[1].replace("array", "")
self.labels.append((ast.literal_eval(test)))
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 2))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = [self.data, self.labels]
| [
"logging.getLogger",
"imgaug.augmenters.AverageBlur",
"utils.utils.sort_gt",
"imgaug.augmenters.AllChannelsHistogramEqualization",
"imgaug.augmenters.GaussianBlur",
"numpy.array",
"imgaug.augmenters.Resize",
"imgaug.augmenters.Snowflakes",
"imgaug.augmenters.LogContrast",
"imgaug.augmenters.Graysc... | [((457, 483), 'logging.getLogger', 'logging.getLogger', (['"""iCARL"""'], {}), "('iCARL')\n", (474, 483), False, 'import logging\n'), ((5406, 5427), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (5414, 5427), True, 'import numpy as np\n'), ((5451, 5483), 'numpy.reshape', 'np.reshape', (['self.labels', '(-1, 8)'], {}), '(self.labels, (-1, 8))\n', (5461, 5483), True, 'import numpy as np\n'), ((5911, 5932), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5921, 5932), False, 'import os\n'), ((8053, 8074), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (8061, 8074), True, 'import numpy as np\n'), ((8098, 8130), 'numpy.reshape', 'np.reshape', (['self.labels', '(-1, 8)'], {}), '(self.labels, (-1, 8))\n', (8108, 8130), True, 'import numpy as np\n'), ((8639, 8660), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (8649, 8660), False, 'import os\n'), ((9609, 9630), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (9617, 9630), True, 'import numpy as np\n'), ((9654, 9686), 'numpy.reshape', 'np.reshape', (['self.labels', '(-1, 8)'], {}), '(self.labels, (-1, 8))\n', (9664, 9686), True, 'import numpy as np\n'), ((11414, 11435), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (11422, 11435), True, 'import numpy as np\n'), ((11459, 11491), 'numpy.reshape', 'np.reshape', (['self.labels', '(-1, 2)'], {}), '(self.labels, (-1, 2))\n', (11469, 11491), True, 'import numpy as np\n'), ((5949, 5988), 'os.path.isdir', 'os.path.isdir', (["(directory + '/' + folder)"], {}), "(directory + '/' + folder)\n", (5962, 5988), False, 'import os\n'), ((739, 753), 'imgaug.augmenters.Resize', 'iaa.Resize', (['(32)'], {}), '(32)\n', (749, 753), True, 'import imgaug.augmenters as iaa\n'), ((4961, 5037), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (4971, 5037), False, 'import csv\n'), ((6018, 6054), 'os.listdir', 'os.listdir', (["(directory + '/' + folder)"], {}), "(directory + '/' + folder)\n", (6028, 6054), False, 'import os\n'), ((10969, 11045), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (10979, 11045), False, 'import csv\n'), ((4312, 4333), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4331, 4333), False, 'from torchvision import transforms\n'), ((4629, 4650), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4648, 4650), False, 'from torchvision import transforms\n'), ((4875, 4913), 'os.path.join', 'os.path.join', (['self.directory', '"""gt.csv"""'], {}), "(self.directory, 'gt.csv')\n", (4887, 4913), False, 'import os\n'), ((6150, 6175), 'os.path.isdir', 'os.path.isdir', (['images_dir'], {}), '(images_dir)\n', (6163, 6175), False, 'import os\n'), ((8787, 8826), 'os.path.join', 'os.path.join', (['directory', "(image + '.csv')"], {}), "(directory, image + '.csv')\n", (8799, 8826), False, 'import os\n'), ((10364, 10385), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10383, 10385), False, 'from torchvision import transforms\n'), ((10681, 10702), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10700, 10702), False, 'from torchvision import transforms\n'), ((10883, 10921), 'os.path.join', 'os.path.join', (['self.directory', '"""gt.csv"""'], {}), "(self.directory, 'gt.csv')\n", (10895, 10921), False, 'import os\n'), ((5226, 5262), 'os.path.join', 'os.path.join', (['self.directory', 'row[0]'], {}), '(self.directory, row[0])\n', (5238, 5262), False, 'import os\n'), ((5359, 5381), 'ast.literal_eval', 'ast.literal_eval', (['test'], {}), '(test)\n', (5375, 5381), False, 'import ast\n'), ((6246, 6287), 'xml.etree.ElementTree.parse', 'ET.parse', (["(images_dir + '/' + file + '.gt')"], {}), "(images_dir + '/' + file + '.gt')\n", (6254, 6287), True, 'import xml.etree.ElementTree as ET\n'), ((6505, 6527), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (6515, 6527), False, 'import os\n'), ((8954, 9030), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (8964, 9030), False, 'import csv\n'), ((9206, 9236), 'os.path.join', 'os.path.join', (['directory', 'image'], {}), '(directory, image)\n', (9218, 9236), False, 'import os\n'), ((9460, 9477), 'utils.utils.sort_gt', 'utils.sort_gt', (['gt'], {}), '(gt)\n', (9473, 9477), True, 'import utils.utils as utils\n'), ((11234, 11270), 'os.path.join', 'os.path.join', (['self.directory', 'row[0]'], {}), '(self.directory, row[0])\n', (11246, 11270), False, 'import os\n'), ((11367, 11389), 'ast.literal_eval', 'ast.literal_eval', (['test'], {}), '(test)\n', (11383, 11389), False, 'import ast\n'), ((900, 926), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0, 3.0)'], {}), '((0, 3.0))\n', (916, 926), True, 'import imgaug.augmenters as iaa\n'), ((1052, 1078), 'imgaug.augmenters.AverageBlur', 'iaa.AverageBlur', ([], {'k': '(2, 11)'}), '(k=(2, 11))\n', (1067, 1078), True, 'import imgaug.augmenters as iaa\n'), ((1224, 1249), 'imgaug.augmenters.MedianBlur', 'iaa.MedianBlur', ([], {'k': '(3, 11)'}), '(k=(3, 11))\n', (1238, 1249), True, 'import imgaug.augmenters as iaa\n'), ((1397, 1434), 'imgaug.augmenters.MotionBlur', 'iaa.MotionBlur', ([], {'k': '(15)', 'angle': '[-45, 45]'}), '(k=15, angle=[-45, 45])\n', (1411, 1434), True, 'import imgaug.augmenters as iaa\n'), ((1727, 1757), 'imgaug.augmenters.AddToBrightness', 'iaa.AddToBrightness', (['(-30, 30)'], {}), '((-30, 30))\n', (1746, 1757), True, 'import imgaug.augmenters as iaa\n'), ((1783, 1817), 'imgaug.augmenters.MultiplyBrightness', 'iaa.MultiplyBrightness', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (1805, 1817), True, 'import imgaug.augmenters as iaa\n'), ((1843, 1897), 'imgaug.augmenters.AddToHueAndSaturation', 'iaa.AddToHueAndSaturation', (['(-50, 50)'], {'per_channel': '(True)'}), '((-50, 50), per_channel=True)\n', (1868, 1897), True, 'import imgaug.augmenters as iaa\n'), ((1923, 1954), 'imgaug.augmenters.Grayscale', 'iaa.Grayscale', ([], {'alpha': '(0.0, 1.0)'}), '(alpha=(0.0, 1.0))\n', (1936, 1954), True, 'import imgaug.augmenters as iaa\n'), ((1980, 2021), 'imgaug.augmenters.ChangeColorTemperature', 'iaa.ChangeColorTemperature', (['(1100, 10000)'], {}), '((1100, 10000))\n', (2006, 2021), True, 'import imgaug.augmenters as iaa\n'), ((2047, 2076), 'imgaug.augmenters.KMeansColorQuantization', 'iaa.KMeansColorQuantization', ([], {}), '()\n', (2074, 2076), True, 'import imgaug.augmenters as iaa\n'), ((2281, 2293), 'imgaug.augmenters.Clouds', 'iaa.Clouds', ([], {}), '()\n', (2291, 2293), True, 'import imgaug.augmenters as iaa\n'), ((2319, 2328), 'imgaug.augmenters.Fog', 'iaa.Fog', ([], {}), '()\n', (2326, 2328), True, 'import imgaug.augmenters as iaa\n'), ((2354, 2411), 'imgaug.augmenters.Snowflakes', 'iaa.Snowflakes', ([], {'flake_size': '(0.1, 0.4)', 'speed': '(0.01, 0.05)'}), '(flake_size=(0.1, 0.4), speed=(0.01, 0.05))\n', (2368, 2411), True, 'import imgaug.augmenters as iaa\n'), ((2437, 2463), 'imgaug.augmenters.Rain', 'iaa.Rain', ([], {'speed': '(0.1, 0.3)'}), '(speed=(0.1, 0.3))\n', (2445, 2463), True, 'import imgaug.augmenters as iaa\n'), ((2670, 2699), 'imgaug.augmenters.GammaContrast', 'iaa.GammaContrast', (['(0.5, 2.0)'], {}), '((0.5, 2.0))\n', (2687, 2699), True, 'import imgaug.augmenters as iaa\n'), ((2725, 2772), 'imgaug.augmenters.GammaContrast', 'iaa.GammaContrast', (['(0.5, 2.0)'], {'per_channel': '(True)'}), '((0.5, 2.0), per_channel=True)\n', (2742, 2772), True, 'import imgaug.augmenters as iaa\n'), ((2798, 2850), 'imgaug.augmenters.SigmoidContrast', 'iaa.SigmoidContrast', ([], {'gain': '(3, 10)', 'cutoff': '(0.4, 0.6)'}), '(gain=(3, 10), cutoff=(0.4, 0.6))\n', (2817, 2850), True, 'import imgaug.augmenters as iaa\n'), ((2876, 2946), 'imgaug.augmenters.SigmoidContrast', 'iaa.SigmoidContrast', ([], {'gain': '(3, 10)', 'cutoff': '(0.4, 0.6)', 'per_channel': '(True)'}), '(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True)\n', (2895, 2946), True, 'import imgaug.augmenters as iaa\n'), ((3026, 3058), 'imgaug.augmenters.LogContrast', 'iaa.LogContrast', ([], {'gain': '(0.6, 1.4)'}), '(gain=(0.6, 1.4))\n', (3041, 3058), True, 'import imgaug.augmenters as iaa\n'), ((3084, 3134), 'imgaug.augmenters.LogContrast', 'iaa.LogContrast', ([], {'gain': '(0.6, 1.4)', 'per_channel': '(True)'}), '(gain=(0.6, 1.4), per_channel=True)\n', (3099, 3134), True, 'import imgaug.augmenters as iaa\n'), ((3160, 3190), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.4, 1.6)'], {}), '((0.4, 1.6))\n', (3178, 3190), True, 'import imgaug.augmenters as iaa\n'), ((3216, 3264), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.4, 1.6)'], {'per_channel': '(True)'}), '((0.4, 1.6), per_channel=True)\n', (3234, 3264), True, 'import imgaug.augmenters as iaa\n'), ((3290, 3312), 'imgaug.augmenters.AllChannelsCLAHE', 'iaa.AllChannelsCLAHE', ([], {}), '()\n', (3310, 3312), True, 'import imgaug.augmenters as iaa\n'), ((3338, 3378), 'imgaug.augmenters.AllChannelsCLAHE', 'iaa.AllChannelsCLAHE', ([], {'clip_limit': '(1, 10)'}), '(clip_limit=(1, 10))\n', (3358, 3378), True, 'import imgaug.augmenters as iaa\n'), ((3404, 3462), 'imgaug.augmenters.AllChannelsCLAHE', 'iaa.AllChannelsCLAHE', ([], {'clip_limit': '(1, 10)', 'per_channel': '(True)'}), '(clip_limit=(1, 10), per_channel=True)\n', (3424, 3462), True, 'import imgaug.augmenters as iaa\n'), ((3651, 3689), 'imgaug.augmenters.AllChannelsHistogramEqualization', 'iaa.AllChannelsHistogramEqualization', ([], {}), '()\n', (3687, 3689), True, 'import imgaug.augmenters as iaa\n'), ((8859, 8898), 'os.path.join', 'os.path.join', (['directory', "(image + '.csv')"], {}), "(directory, image + '.csv')\n", (8871, 8898), False, 'import os\n'), ((3510, 3537), 'imgaug.augmenters.HistogramEqualization', 'iaa.HistogramEqualization', ([], {}), '()\n', (3535, 3537), True, 'import imgaug.augmenters as iaa\n'), ((3586, 3624), 'imgaug.augmenters.AllChannelsHistogramEqualization', 'iaa.AllChannelsHistogramEqualization', ([], {}), '()\n', (3622, 3624), True, 'import imgaug.augmenters as iaa\n'), ((4530, 4544), 'imgaug.augmenters.Resize', 'iaa.Resize', (['(32)'], {}), '(32)\n', (4540, 4544), True, 'import imgaug.augmenters as iaa\n'), ((7520, 7625), 'numpy.asarray', 'np.asarray', (["(list_of_points['tl'], list_of_points['tr'], list_of_points['br'],\n list_of_points['bl'])"], {}), "((list_of_points['tl'], list_of_points['tr'], list_of_points['br'\n ], list_of_points['bl']))\n", (7530, 7625), True, 'import numpy as np\n'), ((7937, 7964), 'utils.utils.sort_gt', 'utils.sort_gt', (['ground_truth'], {}), '(ground_truth)\n', (7950, 7964), True, 'import utils.utils as utils\n'), ((9389, 9401), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (9397, 9401), True, 'import numpy as np\n'), ((10582, 10596), 'imgaug.augmenters.Resize', 'iaa.Resize', (['(32)'], {}), '(32)\n', (10592, 10596), True, 'import imgaug.augmenters as iaa\n'), ((1683, 1699), 'imgaug.augmenters.Add', 'iaa.Add', (['(0, 50)'], {}), '((0, 50))\n', (1690, 1699), True, 'import imgaug.augmenters as iaa\n'), ((6973, 7004), 'os.path.join', 'os.path.join', (['images_dir', 'image'], {}), '(images_dir, image)\n', (6985, 7004), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 12:07:01 2020
@author: <NAME>
"""
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
import math
def get_doc(nlp, file_name, encoding_='utf-8'):
return nlp(open(file_name, 'r', encoding=encoding_).read())
def get_sentences(doc):
return [sent for sent in doc.sents]
def get_tokens_of_sentence(sentence):
return [token for token in sentence]
def token_to_vector(token):
return token.vector
def sentence_to_vectors(sentence):
return [token_to_vector(token) for token in
get_tokens_of_sentence(sentence)
if not (token.is_punct or token.is_space)]
def sentence_mean_vector(sentence):
sentence_vectors = sentence_to_vectors(sentence)
if len(sentence_vectors) == 0:
return np.zeros(300, np.float32)
else:
return np.mean(sentence_vectors, axis=0)
def sentences_similarity(sentence_l, sentence_r):
sentence_l_mean_vector = sentence_mean_vector(sentence_l)
sentence_r_mean_vector = sentence_mean_vector(sentence_r)
has_empty_mean_vector = (np.dot(sentence_l_mean_vector, sentence_l_mean_vector) == 0) or (np.dot(sentence_r_mean_vector, sentence_r_mean_vector) == 0)
if has_empty_mean_vector:
return 0.0
else:
return 1 - cosine_distance(sentence_l_mean_vector, sentence_r_mean_vector)
def similar_sentence_pairs(sentences, threshold):
result = []
for idx1 in range(len(sentences)):
for idx2 in range(idx1+1, len(sentences)):
if sentences_similarity(
sentences[idx1],
sentences[idx2]) > threshold:
result.append((sentences[idx1], sentences[idx2]))
return result
def similar_matrix(sentences, threshold):
matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(idx1+1, len(sentences)):
if sentences_similarity(sentences[idx1], sentences[idx2]) > threshold:
matrix[idx1][idx2] += 1
return matrix
def summarize(nlp, file_name, threshold=0.95, top_most=10):
doc = get_doc(nlp, file_name)
sents = get_sentences(doc)
matrix = similar_matrix(sents, threshold)
sum_by_row = [np.sum(row) for row in matrix]
sorted_with_index = sorted(zip(sum_by_row, range(len(sum_by_row))), reverse=True)[:top_most]
indices = sorted([e[1] for e in sorted_with_index])
return [sents[i] for i in indices]
def summarize_pretty(nlp, file_name, threshold=0.95, top_most=10):
sents = summarize(nlp, file_name, threshold, top_most)
for i in range(len(sents)):
print("{0}\n".format(sents[i].text.strip()))
def summarize_as_adj_edges(sents, threshold=0.95):
vert_list = [i for i in range(len(sents))]
edge_list = []
for idx1 in range(len(sents)):
for idx2 in range(len(sents)):
if idx1 != idx2 and sentences_similarity(
sents[idx1],
sents[idx2]) > threshold:
edge_list.append((idx1, idx2))
return (vert_list, edge_list)
def summarize_as_adj_G(sents, threshold=0.95):
graph = summarize_as_adj_edges(sents, threshold)
G = nx.Graph()
for vert in graph[0]:
G.add_node(vert)
for edge in graph[1]:
G.add_edge(edge[0], edge[1])
return G
def summarize_with_adj_grahp(nlp, file_name, threshold=0.95):
doc = get_doc(nlp, file_name)
sents = get_sentences(doc)
adj_graph = summarize_as_adj_G(sents, threshold)
sub_graphs = nx.connected_components(adj_graph)
max_sub_graph = max(sub_graphs, key=lambda x:len(x))
return [sents[i] for i in max_sub_graph] | [
"numpy.mean",
"networkx.Graph",
"networkx.connected_components",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"nltk.cluster.util.cosine_distance"
] | [((3283, 3293), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3291, 3293), True, 'import networkx as nx\n'), ((3646, 3680), 'networkx.connected_components', 'nx.connected_components', (['adj_graph'], {}), '(adj_graph)\n', (3669, 3680), True, 'import networkx as nx\n'), ((822, 847), 'numpy.zeros', 'np.zeros', (['(300)', 'np.float32'], {}), '(300, np.float32)\n', (830, 847), True, 'import numpy as np\n'), ((873, 906), 'numpy.mean', 'np.mean', (['sentence_vectors'], {'axis': '(0)'}), '(sentence_vectors, axis=0)\n', (880, 906), True, 'import numpy as np\n'), ((2307, 2318), 'numpy.sum', 'np.sum', (['row'], {}), '(row)\n', (2313, 2318), True, 'import numpy as np\n'), ((1115, 1169), 'numpy.dot', 'np.dot', (['sentence_l_mean_vector', 'sentence_l_mean_vector'], {}), '(sentence_l_mean_vector, sentence_l_mean_vector)\n', (1121, 1169), True, 'import numpy as np\n'), ((1180, 1234), 'numpy.dot', 'np.dot', (['sentence_r_mean_vector', 'sentence_r_mean_vector'], {}), '(sentence_r_mean_vector, sentence_r_mean_vector)\n', (1186, 1234), True, 'import numpy as np\n'), ((1319, 1382), 'nltk.cluster.util.cosine_distance', 'cosine_distance', (['sentence_l_mean_vector', 'sentence_r_mean_vector'], {}), '(sentence_l_mean_vector, sentence_r_mean_vector)\n', (1334, 1382), False, 'from nltk.cluster.util import cosine_distance\n')] |
import numpy as np
import musher
def test_hpcp():
tone = 100.
frequencies = [tone, tone * 2, tone * 3, tone * 4]
magnitudes = [1., 1., 1., 1.]
harmonics = 3
band_preset = False
min_frequency = 50.0
max_frequency = 500.0
actual_hpcp = musher.hpcp(frequencies,
magnitudes,
harmonics=harmonics,
band_preset=band_preset,
min_frequency=min_frequency,
max_frequency=max_frequency)
expected_hpcp = [0.,
0.,
0.,
0.13404962,
0.,
0.24760914,
0.,
0.,
0.,
0.,
1.,
0.]
assert np.allclose(actual_hpcp, expected_hpcp, rtol=1e-8)
def test_hpcp_from_peaks():
buffer = [0.] * (400 + 1)
buffer[100] = 1.
buffer[200] = 1.
buffer[300] = 1.
buffer[400] = 1.
harmonics = 3
band_preset = False
min_frequency = 50.0
max_frequency = 500.0
spectral_peaks = musher.spectral_peaks(buffer, sample_rate=0)
actual_hpcp = musher.hpcp_from_peaks(spectral_peaks,
harmonics=harmonics,
band_preset=band_preset,
min_frequency=min_frequency,
max_frequency=max_frequency)
expected_hpcp = [0.,
0.,
0.,
0.13404962,
0.,
0.24760914,
0.,
0.,
0.,
0.,
1.,
0.]
assert np.allclose(actual_hpcp, expected_hpcp, rtol=1e-8)
| [
"musher.hpcp_from_peaks",
"musher.hpcp",
"musher.spectral_peaks",
"numpy.allclose"
] | [((270, 415), 'musher.hpcp', 'musher.hpcp', (['frequencies', 'magnitudes'], {'harmonics': 'harmonics', 'band_preset': 'band_preset', 'min_frequency': 'min_frequency', 'max_frequency': 'max_frequency'}), '(frequencies, magnitudes, harmonics=harmonics, band_preset=\n band_preset, min_frequency=min_frequency, max_frequency=max_frequency)\n', (281, 415), False, 'import musher\n'), ((890, 941), 'numpy.allclose', 'np.allclose', (['actual_hpcp', 'expected_hpcp'], {'rtol': '(1e-08)'}), '(actual_hpcp, expected_hpcp, rtol=1e-08)\n', (901, 941), True, 'import numpy as np\n'), ((1201, 1245), 'musher.spectral_peaks', 'musher.spectral_peaks', (['buffer'], {'sample_rate': '(0)'}), '(buffer, sample_rate=0)\n', (1222, 1245), False, 'import musher\n'), ((1264, 1411), 'musher.hpcp_from_peaks', 'musher.hpcp_from_peaks', (['spectral_peaks'], {'harmonics': 'harmonics', 'band_preset': 'band_preset', 'min_frequency': 'min_frequency', 'max_frequency': 'max_frequency'}), '(spectral_peaks, harmonics=harmonics, band_preset=\n band_preset, min_frequency=min_frequency, max_frequency=max_frequency)\n', (1286, 1411), False, 'import musher\n'), ((1900, 1951), 'numpy.allclose', 'np.allclose', (['actual_hpcp', 'expected_hpcp'], {'rtol': '(1e-08)'}), '(actual_hpcp, expected_hpcp, rtol=1e-08)\n', (1911, 1951), True, 'import numpy as np\n')] |
# The code is based on original repository https://github.com/OctoberChang/klcpd_code
# !/usr/bin/env python
# encoding: utf-8
import math
import numpy as np
import random
import sklearn.metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from sklearn.metrics.pairwise import euclidean_distances
from .data import HankelDataset
from types import SimpleNamespace
from tqdm import trange
from torch.utils.data import DataLoader
def median_heuristic(X, beta=0.5):
max_n = min(30000, X.shape[0])
D2 = euclidean_distances(X[:max_n], squared=True)
med_sqdist = np.median(D2[np.triu_indices_from(D2, k=1)])
beta_list = [beta ** 2, beta ** 1, 1, (1.0 / beta) ** 1, (1.0 / beta) ** 2]
return [med_sqdist * b for b in beta_list]
class NetG(nn.Module):
def __init__(self, var_dim, RNN_hid_dim, num_layers: int = 1):
super().__init__()
self.var_dim = var_dim
self.RNN_hid_dim = RNN_hid_dim
self.rnn_enc_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.rnn_dec_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.fc_layer = nn.Linear(self.RNN_hid_dim, self.var_dim)
# X_p: batch_size x wnd_dim x var_dim (Encoder input)
# X_f: batch_size x wnd_dim x var_dim (Decoder input)
# h_t: 1 x batch_size x RNN_hid_dim
# noise: 1 x batch_size x RNN_hid_dim
def forward(self, X_p, X_f, noise):
X_p_enc, h_t = self.rnn_enc_layer(X_p)
X_f_shft = self.shft_right_one(X_f)
hidden = h_t + noise
Y_f, _ = self.rnn_dec_layer(X_f_shft, hidden)
output = self.fc_layer(Y_f)
return output
def shft_right_one(self, X):
X_shft = X.clone()
X_shft[:, 0, :].data.fill_(0)
X_shft[:, 1:, :] = X[:, :-1, :]
return X_shft
class NetD(nn.Module):
def __init__(self, var_dim, RNN_hid_dim, num_layers: int = 1):
super(NetD, self).__init__()
self.var_dim = var_dim
self.RNN_hid_dim = RNN_hid_dim
self.rnn_enc_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.rnn_dec_layer = nn.GRU(self.RNN_hid_dim, self.var_dim, num_layers=num_layers, batch_first=True)
def forward(self, X):
X_enc, _ = self.rnn_enc_layer(X)
X_dec, _ = self.rnn_dec_layer(X_enc)
return X_enc, X_dec
class KL_CPD(nn.Module):
def __init__(self, D: int, critic_iters: int = 5,
lambda_ae: float = 0.001, lambda_real: float = 0.1,
p_wnd_dim: int = 25, f_wnd_dim: int = 10, sub_dim: int = 1, RNN_hid_dim: int = 10):
super().__init__()
self.p_wnd_dim = p_wnd_dim
self.f_wnd_dim = f_wnd_dim
self.sub_dim = sub_dim
self.D = D
self.var_dim = D * sub_dim
self.critic_iters = critic_iters
self.lambda_ae, self.lambda_real = lambda_ae, lambda_real
self.RNN_hid_dim = RNN_hid_dim
self.netD = NetD(self.var_dim, RNN_hid_dim)
self.netG = NetG(self.var_dim, RNN_hid_dim)
@property
def device(self):
return next(self.parameters()).device
def __mmd2_loss(self, X_p_enc, X_f_enc):
sigma_var = self.sigma_var
# some constants
n_basis = 1024
gumbel_lmd = 1e+6
cnst = math.sqrt(1. / n_basis)
n_mixtures = sigma_var.size(0)
n_samples = n_basis * n_mixtures
batch_size, seq_len, nz = X_p_enc.size()
# gumbel trick to get masking matrix to uniformly sample sigma
# input: (batch_size*n_samples, nz)
# output: (batch_size, n_samples, nz)
def sample_gmm(W, batch_size):
U = torch.FloatTensor(batch_size * n_samples, n_mixtures).uniform_().to(self.device)
sigma_samples = F.softmax(U * gumbel_lmd).matmul(sigma_var)
W_gmm = W.mul(1. / sigma_samples.unsqueeze(1))
W_gmm = W_gmm.view(batch_size, n_samples, nz)
return W_gmm
W = Variable(torch.FloatTensor(batch_size * n_samples, nz).normal_(0, 1).to(self.device))
W_gmm = sample_gmm(W, batch_size) # batch_size x n_samples x nz
W_gmm = torch.transpose(W_gmm, 1, 2).contiguous() # batch_size x nz x n_samples
XW_p = torch.bmm(X_p_enc, W_gmm) # batch_size x seq_len x n_samples
XW_f = torch.bmm(X_f_enc, W_gmm) # batch_size x seq_len x n_samples
z_XW_p = cnst * torch.cat((torch.cos(XW_p), torch.sin(XW_p)), 2)
z_XW_f = cnst * torch.cat((torch.cos(XW_f), torch.sin(XW_f)), 2)
batch_mmd2_rff = torch.sum((z_XW_p.mean(1) - z_XW_f.mean(1)) ** 2, 1)
return batch_mmd2_rff
def forward(self, X_p: torch.Tensor, X_f: torch.Tensor):
batch_size = X_p.size(0)
X_p_enc, _ = self.netD(X_p)
X_f_enc, _ = self.netD(X_f)
Y_pred_batch = self.__mmd2_loss(X_p_enc, X_f_enc)
return Y_pred_batch
def predict(self, ts):
dataset = HankelDataset(ts, self.p_wnd_dim, self.f_wnd_dim, self.sub_dim)
dataloader = DataLoader(dataset, batch_size=128, shuffle=False)
preds = []
with torch.no_grad():
for batch in dataloader:
X_p, X_f = [batch[key].float().to(self.device) for key in ['X_p', 'X_f']]
preds.append(self.forward(X_p, X_f).detach().cpu().numpy())
return np.concatenate(preds)
def fit(self, ts, epoches: int = 100, lr: float = 3e-4, weight_clip: float = .1, weight_decay: float = 0.,
momentum: float = 0.):
# must be defined in fit() method
optG = torch.optim.AdamW(self.netG.parameters(), lr=lr, weight_decay=weight_decay)
optD = torch.optim.AdamW(self.netD.parameters(), lr=lr, weight_decay=weight_decay)
dataset = HankelDataset(ts, self.p_wnd_dim, self.f_wnd_dim, self.sub_dim)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
sigma_list = median_heuristic(dataset.Y_hankel, beta=.5)
self.sigma_var = torch.FloatTensor(sigma_list).to(self.device)
tbar = trange(epoches, disable=True)
for epoch in tbar:
for batch in dataloader:
# Fit critic
for p in self.netD.parameters():
p.requires_grad = True
for p in self.netD.rnn_enc_layer.parameters():
p.data.clamp_(-weight_clip, weight_clip)
self._optimizeD(batch, optD)
if np.random.choice(np.arange(self.critic_iters)) == 0:
# Fit generator
for p in self.netD.parameters():
p.requires_grad = False # to avoid computation
self._optimizeG(batch, optG)
def _optimizeG(self, batch, opt, grad_clip: int = 10):
X_p, X_f = [batch[key].float().to(self.device) for key in ['X_p', 'X_f']]
batch_size = X_p.size(0)
# real data
X_f_enc, X_f_dec = self.netD(X_f)
# fake data
noise = torch.FloatTensor(1, batch_size, self.RNN_hid_dim).normal_(0, 1).to(self.device)
noise = Variable(noise)
Y_f = self.netG(X_p, X_f, noise)
Y_f_enc, Y_f_dec = self.netD(Y_f)
# batchwise MMD2 loss between X_f and Y_f
G_mmd2 = self.__mmd2_loss(X_f_enc, Y_f_enc)
# update netG
self.netG.zero_grad()
lossG = G_mmd2.mean()
# lossG = 0.0 * G_mmd2.mean()
lossG.backward()
torch.nn.utils.clip_grad_norm_(self.netG.parameters(), grad_clip)
opt.step()
def _optimizeD(self, batch, opt, grad_clip: int = 10):
X_p, X_f, Y_true = [batch[key].float().to(self.device) for key in ['X_p', 'X_f', 'Y']]
batch_size = X_p.size(0)
# real data
X_p_enc, X_p_dec = self.netD(X_p)
X_f_enc, X_f_dec = self.netD(X_f)
# fake data
noise = torch.FloatTensor(1, batch_size, self.netG.RNN_hid_dim).normal_(0, 1).to(self.device)
noise = Variable(noise, volatile=True) # total freeze netG
Y_f = Variable(self.netG(X_p, X_f, noise).data)
Y_f_enc, Y_f_dec = self.netD(Y_f)
# batchwise MMD2 loss between X_f and Y_f
D_mmd2 = self.__mmd2_loss(X_f_enc, Y_f_enc)
# batchwise MMD loss between X_p and X_f
mmd2_real = self.__mmd2_loss(X_p_enc, X_f_enc)
# reconstruction loss
real_L2_loss = torch.mean((X_f - X_f_dec) ** 2)
fake_L2_loss = torch.mean((Y_f - Y_f_dec) ** 2)
# update netD
self.netD.zero_grad()
lossD = D_mmd2.mean() - self.lambda_ae * (real_L2_loss + fake_L2_loss) - self.lambda_real * mmd2_real.mean()
lossD = -lossD
lossD.backward()
torch.nn.utils.clip_grad_norm_(self.netD.parameters(), grad_clip)
opt.step()
if __name__ == '__main__':
dim, seq_length = 1, 100
ts = np.random.randn(seq_length, dim)
device = torch.device('cuda')
model = KL_CPD(dim).to(device)
model.fit(ts)
preds = model.predict(ts)
print(preds)
| [
"math.sqrt",
"torch.sin",
"torch.cos",
"torch.bmm",
"torch.nn.functional.softmax",
"numpy.arange",
"torch.nn.GRU",
"torch.mean",
"numpy.concatenate",
"torch.autograd.Variable",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.triu_indices_from",
"torch.transpose",
"numpy.random.randn... | [((605, 649), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X[:max_n]'], {'squared': '(True)'}), '(X[:max_n], squared=True)\n', (624, 649), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((9219, 9251), 'numpy.random.randn', 'np.random.randn', (['seq_length', 'dim'], {}), '(seq_length, dim)\n', (9234, 9251), True, 'import numpy as np\n'), ((9265, 9285), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (9277, 9285), False, 'import torch\n'), ((1066, 1145), 'torch.nn.GRU', 'nn.GRU', (['self.var_dim', 'self.RNN_hid_dim'], {'num_layers': 'num_layers', 'batch_first': '(True)'}), '(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)\n', (1072, 1145), True, 'import torch.nn as nn\n'), ((1175, 1254), 'torch.nn.GRU', 'nn.GRU', (['self.var_dim', 'self.RNN_hid_dim'], {'num_layers': 'num_layers', 'batch_first': '(True)'}), '(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)\n', (1181, 1254), True, 'import torch.nn as nn\n'), ((1279, 1320), 'torch.nn.Linear', 'nn.Linear', (['self.RNN_hid_dim', 'self.var_dim'], {}), '(self.RNN_hid_dim, self.var_dim)\n', (1288, 1320), True, 'import torch.nn as nn\n'), ((2213, 2292), 'torch.nn.GRU', 'nn.GRU', (['self.var_dim', 'self.RNN_hid_dim'], {'num_layers': 'num_layers', 'batch_first': '(True)'}), '(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)\n', (2219, 2292), True, 'import torch.nn as nn\n'), ((2322, 2401), 'torch.nn.GRU', 'nn.GRU', (['self.RNN_hid_dim', 'self.var_dim'], {'num_layers': 'num_layers', 'batch_first': '(True)'}), '(self.RNN_hid_dim, self.var_dim, num_layers=num_layers, batch_first=True)\n', (2328, 2401), True, 'import torch.nn as nn\n'), ((3500, 3524), 'math.sqrt', 'math.sqrt', (['(1.0 / n_basis)'], {}), '(1.0 / n_basis)\n', (3509, 3524), False, 'import math\n'), ((4457, 4482), 'torch.bmm', 'torch.bmm', (['X_p_enc', 'W_gmm'], {}), '(X_p_enc, W_gmm)\n', (4466, 4482), False, 'import torch\n'), ((4534, 4559), 'torch.bmm', 'torch.bmm', (['X_f_enc', 'W_gmm'], {}), '(X_f_enc, W_gmm)\n', (4543, 4559), False, 'import torch\n'), ((5260, 5310), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(dataset, batch_size=128, shuffle=False)\n', (5270, 5310), False, 'from torch.utils.data import DataLoader\n'), ((5578, 5599), 'numpy.concatenate', 'np.concatenate', (['preds'], {}), '(preds)\n', (5592, 5599), True, 'import numpy as np\n'), ((6096, 6144), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(dataset, batch_size=64, shuffle=True)\n', (6106, 6144), False, 'from torch.utils.data import DataLoader\n'), ((6305, 6334), 'tqdm.trange', 'trange', (['epoches'], {'disable': '(True)'}), '(epoches, disable=True)\n', (6311, 6334), False, 'from tqdm import trange\n'), ((7363, 7378), 'torch.autograd.Variable', 'Variable', (['noise'], {}), '(noise)\n', (7371, 7378), False, 'from torch.autograd import Variable\n'), ((8290, 8320), 'torch.autograd.Variable', 'Variable', (['noise'], {'volatile': '(True)'}), '(noise, volatile=True)\n', (8298, 8320), False, 'from torch.autograd import Variable\n'), ((8726, 8758), 'torch.mean', 'torch.mean', (['((X_f - X_f_dec) ** 2)'], {}), '((X_f - X_f_dec) ** 2)\n', (8736, 8758), False, 'import torch\n'), ((8782, 8814), 'torch.mean', 'torch.mean', (['((Y_f - Y_f_dec) ** 2)'], {}), '((Y_f - Y_f_dec) ** 2)\n', (8792, 8814), False, 'import torch\n'), ((680, 709), 'numpy.triu_indices_from', 'np.triu_indices_from', (['D2'], {'k': '(1)'}), '(D2, k=1)\n', (700, 709), True, 'import numpy as np\n'), ((5343, 5358), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5356, 5358), False, 'import torch\n'), ((4369, 4397), 'torch.transpose', 'torch.transpose', (['W_gmm', '(1)', '(2)'], {}), '(W_gmm, 1, 2)\n', (4384, 4397), False, 'import torch\n'), ((6235, 6264), 'torch.FloatTensor', 'torch.FloatTensor', (['sigma_list'], {}), '(sigma_list)\n', (6252, 6264), False, 'import torch\n'), ((3987, 4012), 'torch.nn.functional.softmax', 'F.softmax', (['(U * gumbel_lmd)'], {}), '(U * gumbel_lmd)\n', (3996, 4012), True, 'import torch.nn.functional as F\n'), ((4631, 4646), 'torch.cos', 'torch.cos', (['XW_p'], {}), '(XW_p)\n', (4640, 4646), False, 'import torch\n'), ((4648, 4663), 'torch.sin', 'torch.sin', (['XW_p'], {}), '(XW_p)\n', (4657, 4663), False, 'import torch\n'), ((4704, 4719), 'torch.cos', 'torch.cos', (['XW_f'], {}), '(XW_f)\n', (4713, 4719), False, 'import torch\n'), ((4721, 4736), 'torch.sin', 'torch.sin', (['XW_f'], {}), '(XW_f)\n', (4730, 4736), False, 'import torch\n'), ((6725, 6753), 'numpy.arange', 'np.arange', (['self.critic_iters'], {}), '(self.critic_iters)\n', (6734, 6753), True, 'import numpy as np\n'), ((7266, 7316), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'batch_size', 'self.RNN_hid_dim'], {}), '(1, batch_size, self.RNN_hid_dim)\n', (7283, 7316), False, 'import torch\n'), ((8188, 8243), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'batch_size', 'self.netG.RNN_hid_dim'], {}), '(1, batch_size, self.netG.RNN_hid_dim)\n', (8205, 8243), False, 'import torch\n'), ((3878, 3931), 'torch.FloatTensor', 'torch.FloatTensor', (['(batch_size * n_samples)', 'n_mixtures'], {}), '(batch_size * n_samples, n_mixtures)\n', (3895, 3931), False, 'import torch\n'), ((4203, 4248), 'torch.FloatTensor', 'torch.FloatTensor', (['(batch_size * n_samples)', 'nz'], {}), '(batch_size * n_samples, nz)\n', (4220, 4248), False, 'import torch\n')] |
"""
Image classifier based in InceptionV3 (keras implementation).
"""
from PIL import Image
from keras.preprocessing import image
import keras.applications.inception_v3 as inception_v3
import keras.backend
import tensorflow as tf
import numpy as np
import pprint
keras.backend.clear_session()
MODEL_INPUT_SIZE_DEFAULT = (299, 299) # default size expected by inception v3 input.
class InceptionV3Classifier(object):
""" InceptionV2 classifier class. """
def __init__(self, weights='imagenet', target_size=MODEL_INPUT_SIZE_DEFAULT):
""" Constructor. """
self.inception_model = inception_v3.InceptionV3(weights=weights)
self.target_size = target_size
self.graph = tf.get_default_graph()
def classify(self, fp, top=5):
"""Classify image and return top matches.
:param fp: image filename (str), pathlib.Path object or a file object.
:param top: number of top results to return.
:returns: predictions about detected classes in image.
:rtype: list[list[tuple(str: class_id, str: class_name, float: score)]]
"""
# Open image
img = Image.open(fp)
# Image resizing and preparation for keras.
if img.size != self.target_size:
img = img.resize(self.target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = inception_v3.preprocess_input(x)
# Predictions
preds = []
with self.graph.as_default():
preds = self.inception_model.predict(x)
# Decode predictions
return inception_v3.decode_predictions(preds, top=top) | [
"keras.preprocessing.image.img_to_array",
"PIL.Image.open",
"keras.applications.inception_v3.preprocess_input",
"keras.applications.inception_v3.decode_predictions",
"numpy.expand_dims",
"keras.applications.inception_v3.InceptionV3",
"tensorflow.get_default_graph"
] | [((584, 625), 'keras.applications.inception_v3.InceptionV3', 'inception_v3.InceptionV3', ([], {'weights': 'weights'}), '(weights=weights)\n', (608, 625), True, 'import keras.applications.inception_v3 as inception_v3\n'), ((674, 696), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (694, 696), True, 'import tensorflow as tf\n'), ((1069, 1083), 'PIL.Image.open', 'Image.open', (['fp'], {}), '(fp)\n', (1079, 1083), False, 'from PIL import Image\n'), ((1226, 1249), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1244, 1249), False, 'from keras.preprocessing import image\n'), ((1259, 1284), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1273, 1284), True, 'import numpy as np\n'), ((1294, 1326), 'keras.applications.inception_v3.preprocess_input', 'inception_v3.preprocess_input', (['x'], {}), '(x)\n', (1323, 1326), True, 'import keras.applications.inception_v3 as inception_v3\n'), ((1483, 1530), 'keras.applications.inception_v3.decode_predictions', 'inception_v3.decode_predictions', (['preds'], {'top': 'top'}), '(preds, top=top)\n', (1514, 1530), True, 'import keras.applications.inception_v3 as inception_v3\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 19:33:27 2018
@author: yume
"""
import numpy as np
import matplotlib.pyplot as plt
def load_default_trajectory():
ps = np.array(([
[-0.77703479856881415, 1.4993181096841063],
[-0.70776038682731871, 1.4170221119724254],
[-0.68260690865884658, 1.4206095214452887],
[-0.61961335350444722, 1.396403374501471],
[-0.52452975175408619, 1.3120215099603865],
[-0.41054581005311593, 1.2300884769965503],
[-0.38567688612738783, 1.1262364239010458],
[-0.30115968064468291, 1.0616980649371949],
[-0.22130623319182521, 0.933914655648039],
[-0.14595070460897966, 0.82531833169704305],
[-0.073066997284054381, 0.71924080684385058],
[0.010223062162865558, 0.61421203570745217],
[0.065266998498120728, 0.50483296951356215],
[0.12787204045661326, 0.39646585780470696],
[0.18682160531477655, 0.26910195060268484],
[0.19791426786241169, 0.14344376640017222],
[0.30767184917455126, 0.63371568346808751],
[0.38202960019823439, -0.0133898000261839],
[0.44891710879876359, -0.080474866781608853],
[0.53705971610686458, -0.17921153739275405],
[0.62670128183993188, -0.27472851793244945],
[0.73939023996717992, -0.35063782321276187],
[0.80149477851952372, -0.34912297166337368],
[0.87046846035186309, -0.36280663000355622],
[0.98847297805636788, -0.41429711277275322],
[1.0520553474302326, -0.40133954562272993],
[1.1723587292417847, -0.37754650997976182],
[1.3530005099056032, -0.41721106503568262],
[1.3546287979263141, -0.38136422040072085],
[1.4902199436937522, -0.38493808898552663],
[1.6353635418584515, -0.34246197144551019],
[1.946088334976178, -0.34526603325326118],
[1.8313254083319275, -0.27676211872652652],
[2.1328239494248364, -0.27541768265452879],
[2.146202914656159, -0.22981560969963232],
[2.3364057587098828, -0.18565649458770156],
[2.4240777509727509, -0.14133234626209409],
[1.9430180862343152, -0.094173374304326487],
[2.337419604694207, -0.092612605522184211],
[2.4721016275926509, -0.07533703002757332]
]))
ps_for_d = np.array([[[-0.03665494, 0.12333371],
[ 0.06927441, -0.082296 ],
[ 0.02515348, 0.00358741],
[ 0.06299356, -0.02420615],
[ 0.0950836 , -0.08438186],
[ 0.11398394, -0.08193303],
[ 0.02486892, -0.10385205],
[ 0.08451721, -0.06453836],
[ 0.07985345, -0.12778341],
[ 0.07535553, -0.10859632],
[ 0.07288371, -0.10607752],
[ 0.08329006, -0.10502877],
[ 0.05504394, -0.10937907],
[ 0.06260504, -0.10836711],
[ 0.05894956, -0.12736391],
[ 0.01109266, -0.12565818],
[ 0.10975758, 0.49027192],
[ 0.07435775, -0.64710548],
[ 0.06688751, -0.06708507],
[ 0.08814261, -0.09873667],
[ 0.08964157, -0.09551698],
[ 0.11268896, -0.07590931],
[ 0.06210454, 0.00151485],
[ 0.06897368, -0.01368366],
[ 0.11800452, -0.05149048],
[ 0.06358237, 0.01295757],
[ 0.12030338, 0.02379304],
[ 0.18064178, -0.03966456],
[ 0.00162829, 0.03584684],
[ 0.13559115, -0.00357387],
[ 0.1451436 , 0.04247612],
[ 0.31072479, -0.00280406],
[-0.11476293, 0.06850391],
[ 0.30149854, 0.00134444],
[ 0.01337897, 0.04560207],
[ 0.19020284, 0.04415912],
[ 0.08767199, 0.04432415],
[-0.48105966, 0.04715897],
[ 0.39440152, 0.00156077],
[ 0.13468202, 0.01727558]]])
return (ps, ps_for_d)
if __name__ == '__main__':
ps, ds_for_d = load_default_trajectory()
for p in ps:
plt.plot(p[0], p[1], "*", color='#ff7f00') | [
"numpy.array",
"matplotlib.pyplot.plot"
] | [((177, 2042), 'numpy.array', 'np.array', (['[[-0.7770347985688142, 1.4993181096841064], [-0.7077603868273187, \n 1.4170221119724253], [-0.6826069086588465, 1.4206095214452887], [-\n 0.6196133535044472, 1.396403374501471], [-0.5245297517540862, \n 1.3120215099603865], [-0.4105458100531159, 1.2300884769965503], [-\n 0.3856768861273878, 1.1262364239010458], [-0.3011596806446829, \n 1.061698064937195], [-0.2213062331918252, 0.933914655648039], [-\n 0.14595070460897966, 0.825318331697043], [-0.07306699728405439, \n 0.7192408068438506], [0.010223062162865558, 0.6142120357074522], [\n 0.06526699849812073, 0.5048329695135622], [0.12787204045661327, \n 0.39646585780470694], [0.18682160531477654, 0.2691019506026848], [\n 0.1979142678624117, 0.14344376640017223], [0.30767184917455126, \n 0.6337156834680875], [0.3820296001982344, -0.0133898000261839], [\n 0.44891710879876356, -0.08047486678160885], [0.5370597161068645, -\n 0.17921153739275406], [0.6267012818399319, -0.27472851793244946], [\n 0.7393902399671799, -0.3506378232127619], [0.8014947785195237, -\n 0.3491229716633737], [0.8704684603518631, -0.36280663000355623], [\n 0.9884729780563679, -0.4142971127727532], [1.0520553474302325, -\n 0.40133954562272994], [1.1723587292417847, -0.3775465099797618], [\n 1.3530005099056033, -0.4172110650356826], [1.354628797926314, -\n 0.3813642204007209], [1.4902199436937522, -0.3849380889855266], [\n 1.6353635418584516, -0.3424619714455102], [1.946088334976178, -\n 0.34526603325326116], [1.8313254083319275, -0.2767621187265265], [\n 2.132823949424836, -0.2754176826545288], [2.146202914656159, -\n 0.22981560969963233], [2.336405758709883, -0.18565649458770156], [\n 2.4240777509727507, -0.1413323462620941], [1.9430180862343152, -\n 0.09417337430432648], [2.337419604694207, -0.09261260552218421], [\n 2.472101627592651, -0.07533703002757332]]'], {}), '([[-0.7770347985688142, 1.4993181096841064], [-0.7077603868273187, \n 1.4170221119724253], [-0.6826069086588465, 1.4206095214452887], [-\n 0.6196133535044472, 1.396403374501471], [-0.5245297517540862, \n 1.3120215099603865], [-0.4105458100531159, 1.2300884769965503], [-\n 0.3856768861273878, 1.1262364239010458], [-0.3011596806446829, \n 1.061698064937195], [-0.2213062331918252, 0.933914655648039], [-\n 0.14595070460897966, 0.825318331697043], [-0.07306699728405439, \n 0.7192408068438506], [0.010223062162865558, 0.6142120357074522], [\n 0.06526699849812073, 0.5048329695135622], [0.12787204045661327, \n 0.39646585780470694], [0.18682160531477654, 0.2691019506026848], [\n 0.1979142678624117, 0.14344376640017223], [0.30767184917455126, \n 0.6337156834680875], [0.3820296001982344, -0.0133898000261839], [\n 0.44891710879876356, -0.08047486678160885], [0.5370597161068645, -\n 0.17921153739275406], [0.6267012818399319, -0.27472851793244946], [\n 0.7393902399671799, -0.3506378232127619], [0.8014947785195237, -\n 0.3491229716633737], [0.8704684603518631, -0.36280663000355623], [\n 0.9884729780563679, -0.4142971127727532], [1.0520553474302325, -\n 0.40133954562272994], [1.1723587292417847, -0.3775465099797618], [\n 1.3530005099056033, -0.4172110650356826], [1.354628797926314, -\n 0.3813642204007209], [1.4902199436937522, -0.3849380889855266], [\n 1.6353635418584516, -0.3424619714455102], [1.946088334976178, -\n 0.34526603325326116], [1.8313254083319275, -0.2767621187265265], [\n 2.132823949424836, -0.2754176826545288], [2.146202914656159, -\n 0.22981560969963233], [2.336405758709883, -0.18565649458770156], [\n 2.4240777509727507, -0.1413323462620941], [1.9430180862343152, -\n 0.09417337430432648], [2.337419604694207, -0.09261260552218421], [\n 2.472101627592651, -0.07533703002757332]])\n', (185, 2042), True, 'import numpy as np\n'), ((2510, 3659), 'numpy.array', 'np.array', (['[[[-0.03665494, 0.12333371], [0.06927441, -0.082296], [0.02515348, \n 0.00358741], [0.06299356, -0.02420615], [0.0950836, -0.08438186], [\n 0.11398394, -0.08193303], [0.02486892, -0.10385205], [0.08451721, -\n 0.06453836], [0.07985345, -0.12778341], [0.07535553, -0.10859632], [\n 0.07288371, -0.10607752], [0.08329006, -0.10502877], [0.05504394, -\n 0.10937907], [0.06260504, -0.10836711], [0.05894956, -0.12736391], [\n 0.01109266, -0.12565818], [0.10975758, 0.49027192], [0.07435775, -\n 0.64710548], [0.06688751, -0.06708507], [0.08814261, -0.09873667], [\n 0.08964157, -0.09551698], [0.11268896, -0.07590931], [0.06210454, \n 0.00151485], [0.06897368, -0.01368366], [0.11800452, -0.05149048], [\n 0.06358237, 0.01295757], [0.12030338, 0.02379304], [0.18064178, -\n 0.03966456], [0.00162829, 0.03584684], [0.13559115, -0.00357387], [\n 0.1451436, 0.04247612], [0.31072479, -0.00280406], [-0.11476293, \n 0.06850391], [0.30149854, 0.00134444], [0.01337897, 0.04560207], [\n 0.19020284, 0.04415912], [0.08767199, 0.04432415], [-0.48105966, \n 0.04715897], [0.39440152, 0.00156077], [0.13468202, 0.01727558]]]'], {}), '([[[-0.03665494, 0.12333371], [0.06927441, -0.082296], [0.02515348,\n 0.00358741], [0.06299356, -0.02420615], [0.0950836, -0.08438186], [\n 0.11398394, -0.08193303], [0.02486892, -0.10385205], [0.08451721, -\n 0.06453836], [0.07985345, -0.12778341], [0.07535553, -0.10859632], [\n 0.07288371, -0.10607752], [0.08329006, -0.10502877], [0.05504394, -\n 0.10937907], [0.06260504, -0.10836711], [0.05894956, -0.12736391], [\n 0.01109266, -0.12565818], [0.10975758, 0.49027192], [0.07435775, -\n 0.64710548], [0.06688751, -0.06708507], [0.08814261, -0.09873667], [\n 0.08964157, -0.09551698], [0.11268896, -0.07590931], [0.06210454, \n 0.00151485], [0.06897368, -0.01368366], [0.11800452, -0.05149048], [\n 0.06358237, 0.01295757], [0.12030338, 0.02379304], [0.18064178, -\n 0.03966456], [0.00162829, 0.03584684], [0.13559115, -0.00357387], [\n 0.1451436, 0.04247612], [0.31072479, -0.00280406], [-0.11476293, \n 0.06850391], [0.30149854, 0.00134444], [0.01337897, 0.04560207], [\n 0.19020284, 0.04415912], [0.08767199, 0.04432415], [-0.48105966, \n 0.04715897], [0.39440152, 0.00156077], [0.13468202, 0.01727558]]])\n', (2518, 3659), True, 'import numpy as np\n'), ((4079, 4121), 'matplotlib.pyplot.plot', 'plt.plot', (['p[0]', 'p[1]', '"""*"""'], {'color': '"""#ff7f00"""'}), "(p[0], p[1], '*', color='#ff7f00')\n", (4087, 4121), True, 'import matplotlib.pyplot as plt\n')] |
import io
import numpy as np
import pandas as pd
import cirq
def assert_json_roundtrip_works(obj, text_should_be=None, resolvers=None):
"""Tests that the given object can serialized and de-serialized
Args:
obj: The object to test round-tripping for.
text_should_be: An optional argument to assert the JSON serialized
output.
resolvers: Any resolvers if testing those other than the default.
Raises:
AssertionError: The given object can not be round-tripped according to
the given arguments.
"""
buffer = io.StringIO()
cirq.protocols.to_json(obj, buffer)
if text_should_be is not None:
buffer.seek(0)
text = buffer.read()
assert text == text_should_be, text
buffer.seek(0)
restored_obj = cirq.protocols.read_json(buffer, resolvers=resolvers)
if isinstance(obj, np.ndarray):
np.testing.assert_equal(restored_obj, obj)
elif isinstance(obj, pd.DataFrame):
pd.testing.assert_frame_equal(restored_obj, obj)
elif isinstance(obj, pd.Index):
pd.testing.assert_index_equal(restored_obj, obj)
else:
assert restored_obj == obj
| [
"numpy.testing.assert_equal",
"cirq.protocols.read_json",
"pandas.testing.assert_index_equal",
"pandas.testing.assert_frame_equal",
"io.StringIO",
"cirq.protocols.to_json"
] | [((585, 598), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (596, 598), False, 'import io\n'), ((603, 638), 'cirq.protocols.to_json', 'cirq.protocols.to_json', (['obj', 'buffer'], {}), '(obj, buffer)\n', (625, 638), False, 'import cirq\n'), ((810, 863), 'cirq.protocols.read_json', 'cirq.protocols.read_json', (['buffer'], {'resolvers': 'resolvers'}), '(buffer, resolvers=resolvers)\n', (834, 863), False, 'import cirq\n'), ((908, 950), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['restored_obj', 'obj'], {}), '(restored_obj, obj)\n', (931, 950), True, 'import numpy as np\n'), ((999, 1047), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['restored_obj', 'obj'], {}), '(restored_obj, obj)\n', (1028, 1047), True, 'import pandas as pd\n'), ((1092, 1140), 'pandas.testing.assert_index_equal', 'pd.testing.assert_index_equal', (['restored_obj', 'obj'], {}), '(restored_obj, obj)\n', (1121, 1140), True, 'import pandas as pd\n')] |
import argparse
import os
import numpy as np
import glob
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
#import joblib
from azureml.core import Run
from utils import load_data
# let user feed in 2 parameters, the dataset to mount or download, and the regularization rate of the logistic regression model
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--regularization', type=float, dest='reg', default=0.01, help='regularization rate')
args = parser.parse_args()
data_folder = args.data_folder
print('Data folder:', data_folder)
# load train and test set into numpy arrays
# note we scale the pixel intensity values to 0-1 (by dividing it with 255.0) so the model can converge faster.
X_train = load_data(glob.glob(os.path.join(data_folder, '**/train-images-idx3-ubyte.gz'), recursive=True)[0], False) / 255.0
X_test = load_data(glob.glob(os.path.join(data_folder, '**/t10k-images-idx3-ubyte.gz'), recursive=True)[0], False) / 255.0
y_train = load_data(glob.glob(os.path.join(data_folder, '**/train-labels-idx1-ubyte.gz'), recursive=True)[0], True).reshape(-1)
y_test = load_data(glob.glob(os.path.join(data_folder, '**/t10k-labels-idx1-ubyte.gz'), recursive=True)[0], True).reshape(-1)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep = '\n')
# get hold of the current run
run = Run.get_context()
print('Train a logistic regression model with regularization rate of', args.reg)
clf = LogisticRegression(C=1.0/args.reg, solver="liblinear", multi_class="auto", random_state=42)
clf.fit(X_train, y_train)
print('Predict the test set')
y_hat = clf.predict(X_test)
# calculate accuracy on the prediction
acc = np.average(y_hat == y_test)
print('Accuracy is', acc)
run.log('regularization rate', np.float(args.reg))
run.log('accuracy', np.float(acc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=clf, filename='outputs/sklearn_mnist_model.pkl') | [
"numpy.float",
"os.makedirs",
"numpy.average",
"argparse.ArgumentParser",
"os.path.join",
"azureml.core.Run.get_context",
"sklearn.linear_model.LogisticRegression",
"sklearn.externals.joblib.dump"
] | [((358, 383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (381, 383), False, 'import argparse\n'), ((1459, 1476), 'azureml.core.Run.get_context', 'Run.get_context', ([], {}), '()\n', (1474, 1476), False, 'from azureml.core import Run\n'), ((1565, 1662), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0 / args.reg)', 'solver': '"""liblinear"""', 'multi_class': '"""auto"""', 'random_state': '(42)'}), "(C=1.0 / args.reg, solver='liblinear', multi_class='auto',\n random_state=42)\n", (1583, 1662), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1788, 1815), 'numpy.average', 'np.average', (['(y_hat == y_test)'], {}), '(y_hat == y_test)\n', (1798, 1815), True, 'import numpy as np\n'), ((1930, 1967), 'os.makedirs', 'os.makedirs', (['"""outputs"""'], {'exist_ok': '(True)'}), "('outputs', exist_ok=True)\n", (1941, 1967), False, 'import os\n'), ((2057, 2123), 'sklearn.externals.joblib.dump', 'joblib.dump', ([], {'value': 'clf', 'filename': '"""outputs/sklearn_mnist_model.pkl"""'}), "(value=clf, filename='outputs/sklearn_mnist_model.pkl')\n", (2068, 2123), False, 'from sklearn.externals import joblib\n'), ((1874, 1892), 'numpy.float', 'np.float', (['args.reg'], {}), '(args.reg)\n', (1882, 1892), True, 'import numpy as np\n'), ((1914, 1927), 'numpy.float', 'np.float', (['acc'], {}), '(acc)\n', (1922, 1927), True, 'import numpy as np\n'), ((873, 931), 'os.path.join', 'os.path.join', (['data_folder', '"""**/train-images-idx3-ubyte.gz"""'], {}), "(data_folder, '**/train-images-idx3-ubyte.gz')\n", (885, 931), False, 'import os\n'), ((997, 1054), 'os.path.join', 'os.path.join', (['data_folder', '"""**/t10k-images-idx3-ubyte.gz"""'], {}), "(data_folder, '**/t10k-images-idx3-ubyte.gz')\n", (1009, 1054), False, 'import os\n'), ((1121, 1179), 'os.path.join', 'os.path.join', (['data_folder', '"""**/train-labels-idx1-ubyte.gz"""'], {}), "(data_folder, '**/train-labels-idx1-ubyte.gz')\n", (1133, 1179), False, 'import os\n'), ((1248, 1305), 'os.path.join', 'os.path.join', (['data_folder', '"""**/t10k-labels-idx1-ubyte.gz"""'], {}), "(data_folder, '**/t10k-labels-idx1-ubyte.gz')\n", (1260, 1305), False, 'import os\n')] |
import numpy as np
import random
import os
import sys
from subprocess import call
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solver as S
import nnabla.initializer as I
from args import get_args
class LSTMWrapper(PF.LSTMCell, object):
def __init__(self, batch_size, state_size, h=None, c=None):
super(LSTMWrapper, self).__init__(batch_size, state_size, h, c)
self.h0 = self.h
self.c0 = self.c
self.h0.data.zero()
self.c0.data.zero()
def share_data(self):
'''
Initial cells point to the data of last cells so that learning continues
'''
self.h0.data = self.h.data
self.c0.data = self.c.data
def gradient_clipping(params, max_norm, norm_type=2):
params = list(filter(lambda p: p.need_grad == True, params))
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(np.abs(p.g).max() for p in params)
else:
total_norm = 0.
for p in params:
param_norm = F.pow_scalar(
F.sum(p.grad ** norm_type), 1. / norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coeff = max_norm / (float(total_norm.data) + 1e-6)
if clip_coeff < 1:
for p in params:
p.g = p.g * clip_coeff
def perplexity(loss):
perplexity = np.exp(loss)
return perplexity
def get_data():
fnames = ['ptb.train.txt', 'ptb.valid.txt', 'ptb.test.txt']
for fname in fnames:
if not os.path.exists(fname):
url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/'+fname
call(['wget', url])
train_words = open('ptb.train.txt').read().replace('\n', '<eos>').split()
words_as_set = set(train_words)
word_to_id = {w: i for i, w in enumerate(words_as_set)}
train_data = [word_to_id[w] for w in train_words]
val_words = open('ptb.valid.txt').read().replace('\n', '<eos>').split()
val_data = [word_to_id[w] for w in val_words]
test_words = open('ptb.test.txt').read().replace('\n', '<eos>').split()
test_data = [word_to_id[w] for w in val_words]
return train_data, val_data, test_data
def get_batch(data, itr, bs, num_steps):
offsets = [i * (len(data)-num_steps) // bs for i in range(bs)]
cur_words = [data[(offset + itr) % (len(data)-num_steps):(offset + itr) %
(len(data)-num_steps) + num_steps] for offset in offsets]
next_words = [data[(offset + itr) % (len(data)-num_steps) + 1:(offset + itr) %
(len(data)-num_steps) + num_steps + 1] for offset in offsets]
return np.array(cur_words).reshape([bs, num_steps]), np.array(next_words).reshape([bs, num_steps])
def get_loss(l1, l2, x, t, w_init, b_init, num_words, batch_size, state_size, dropout=False, dropout_rate=0.5, embed_name='embed', pred_name='pred'):
e_list = [PF.embed(x_elm, num_words, state_size, name=embed_name)
for x_elm in F.split(x, axis=1)]
t_list = F.split(t, axis=1)
loss = 0
for i, (e_t, t_t) in enumerate(zip(e_list, t_list)):
if dropout:
h1 = l1(F.dropout(e_t, dropout_rate), w_init, b_init)
h2 = l2(F.dropout(h1, dropout_rate), w_init, b_init)
y = PF.affine(F.dropout(h2, dropout_rate),
num_words, name=pred_name)
else:
h1 = l1(e_t, w_init, b_init)
h2 = l2(h1, w_init, b_init)
y = PF.affine(h2, num_words, name=pred_name)
t_t = F.reshape(t_t, [batch_size, 1])
loss += F.mean(F.softmax_cross_entropy(y, t_t))
loss /= float(i+1)
return loss
def main():
args = get_args()
state_size = args.state_size
batch_size = args.batch_size
num_steps = args.num_steps
num_layers = args.num_layers
max_epoch = args.max_epoch
max_norm = args.gradient_clipping_max_norm
num_words = 10000
lr = args.learning_rate
train_data, val_data, test_data = get_data()
# Get context.
from nnabla.ext_utils import get_extension_context
logger.info("Running in %s" % args.context)
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
from nnabla.monitor import Monitor, MonitorSeries
monitor = Monitor(args.work_dir)
monitor_perplexity = MonitorSeries(
"Training perplexity", monitor, interval=10)
monitor_vperplexity = MonitorSeries("Validation perplexity", monitor, interval=(
len(val_data)//(num_steps*batch_size)))
monitor_tperplexity = MonitorSeries(
"Test perplexity", monitor, interval=(len(test_data)//(num_steps*1)))
l1 = LSTMWrapper(batch_size, state_size)
l2 = LSTMWrapper(batch_size, state_size)
# train graph
x = nn.Variable((batch_size, num_steps))
t = nn.Variable((batch_size, num_steps))
w = I.UniformInitializer((-0.1, 0.1))
b = I.ConstantInitializer(1)
loss = get_loss(l1, l2, x, t, w, b, num_words,
batch_size, state_size, True)
l1.share_data()
l2.share_data()
# validation graph
vx = nn.Variable((batch_size, num_steps))
vt = nn.Variable((batch_size, num_steps))
vloss = get_loss(l1, l2, vx, vt, w, b, num_words, batch_size, state_size)
solver = S.Sgd(lr)
solver.set_parameters(nn.get_parameters())
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
best_val = 10000
for epoch in range(max_epoch):
l1.reset_state()
l2.reset_state()
for i in range(len(train_data)//(num_steps*batch_size)):
x.d, t.d = get_batch(train_data, i*num_steps,
batch_size, num_steps)
solver.zero_grad()
loss.forward()
loss.backward(clear_buffer=True)
solver.weight_decay(1e-5)
gradient_clipping(nn.get_parameters().values(), max_norm)
solver.update()
perp = perplexity(loss.d.copy())
monitor_perplexity.add(
(len(train_data)//(num_steps*batch_size))*(epoch)+i, perp)
l1.reset_state()
l2.reset_state()
vloss_avg = 0
for i in range(len(val_data)//(num_steps * batch_size)):
vx.d, vt.d = get_batch(val_data, i*num_steps,
batch_size, num_steps)
vloss.forward()
vloss_avg += vloss.d.copy()
vloss_avg /= float((len(val_data)//(num_steps*batch_size)))
vper = perplexity(vloss_avg)
if vper < best_val:
best_val = vper
if vper < 200:
save_name = "params_epoch_{:02d}.h5".format(epoch)
nn.save_parameters(os.path.join(args.save_dir, save_name))
else:
solver.set_learning_rate(solver.learning_rate()*0.25)
logger.info("Decreased learning rate to {:05f}".format(
solver.learning_rate()))
monitor_vperplexity.add(
(len(val_data)//(num_steps*batch_size))*(epoch)+i, vper)
# for final test split
t_batch_size = 1
tl1 = LSTMWrapper(t_batch_size, state_size)
tl2 = LSTMWrapper(t_batch_size, state_size)
tloss_avg = 0
tx = nn.Variable((t_batch_size, num_steps))
tt = nn.Variable((t_batch_size, num_steps))
tloss = get_loss(tl1, tl2, tx, tt, w, b, num_words, 1, state_size)
tl1.share_data()
tl2.share_data()
for i in range(len(test_data)//(num_steps * t_batch_size)):
tx.d, tt.d = get_batch(test_data, i*num_steps, 1, num_steps)
tloss.forward()
tloss_avg += tloss.d.copy()
tloss_avg /= float((len(test_data)//(num_steps*t_batch_size)))
tper = perplexity(tloss_avg)
monitor_tperplexity.add(
(len(test_data)//(num_steps*t_batch_size))*(epoch)+i, tper)
if __name__ == '__main__':
main()
| [
"nnabla.monitor.MonitorSeries",
"nnabla.initializer.ConstantInitializer",
"numpy.array",
"os.path.exists",
"nnabla.get_parameters",
"numpy.exp",
"nnabla.functions.sum",
"nnabla.ext_utils.get_extension_context",
"nnabla.functions.dropout",
"subprocess.call",
"args.get_args",
"nnabla.parametric_... | [((1466, 1478), 'numpy.exp', 'np.exp', (['loss'], {}), '(loss)\n', (1472, 1478), True, 'import numpy as np\n'), ((3113, 3131), 'nnabla.functions.split', 'F.split', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (3120, 3131), True, 'import nnabla.functions as F\n'), ((3781, 3791), 'args.get_args', 'get_args', ([], {}), '()\n', (3789, 3791), False, 'from args import get_args\n'), ((4179, 4222), 'nnabla.logger.info', 'logger.info', (["('Running in %s' % args.context)"], {}), "('Running in %s' % args.context)\n", (4190, 4222), True, 'import nnabla.logger as logger\n'), ((4233, 4329), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['args.context'], {'device_id': 'args.device_id', 'type_config': 'args.type_config'}), '(args.context, device_id=args.device_id, type_config=\n args.type_config)\n', (4254, 4329), False, 'from nnabla.ext_utils import get_extension_context\n'), ((4338, 4365), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (4360, 4365), True, 'import nnabla as nn\n'), ((4435, 4457), 'nnabla.monitor.Monitor', 'Monitor', (['args.work_dir'], {}), '(args.work_dir)\n', (4442, 4457), False, 'from nnabla.monitor import Monitor, MonitorSeries\n'), ((4483, 4541), 'nnabla.monitor.MonitorSeries', 'MonitorSeries', (['"""Training perplexity"""', 'monitor'], {'interval': '(10)'}), "('Training perplexity', monitor, interval=10)\n", (4496, 4541), False, 'from nnabla.monitor import Monitor, MonitorSeries\n'), ((4922, 4958), 'nnabla.Variable', 'nn.Variable', (['(batch_size, num_steps)'], {}), '((batch_size, num_steps))\n', (4933, 4958), True, 'import nnabla as nn\n'), ((4967, 5003), 'nnabla.Variable', 'nn.Variable', (['(batch_size, num_steps)'], {}), '((batch_size, num_steps))\n', (4978, 5003), True, 'import nnabla as nn\n'), ((5012, 5045), 'nnabla.initializer.UniformInitializer', 'I.UniformInitializer', (['(-0.1, 0.1)'], {}), '((-0.1, 0.1))\n', (5032, 5045), True, 'import nnabla.initializer as I\n'), ((5054, 5078), 'nnabla.initializer.ConstantInitializer', 'I.ConstantInitializer', (['(1)'], {}), '(1)\n', (5075, 5078), True, 'import nnabla.initializer as I\n'), ((5254, 5290), 'nnabla.Variable', 'nn.Variable', (['(batch_size, num_steps)'], {}), '((batch_size, num_steps))\n', (5265, 5290), True, 'import nnabla as nn\n'), ((5300, 5336), 'nnabla.Variable', 'nn.Variable', (['(batch_size, num_steps)'], {}), '((batch_size, num_steps))\n', (5311, 5336), True, 'import nnabla as nn\n'), ((5428, 5437), 'nnabla.solver.Sgd', 'S.Sgd', (['lr'], {}), '(lr)\n', (5433, 5437), True, 'import nnabla.solver as S\n'), ((7358, 7396), 'nnabla.Variable', 'nn.Variable', (['(t_batch_size, num_steps)'], {}), '((t_batch_size, num_steps))\n', (7369, 7396), True, 'import nnabla as nn\n'), ((7406, 7444), 'nnabla.Variable', 'nn.Variable', (['(t_batch_size, num_steps)'], {}), '((t_batch_size, num_steps))\n', (7417, 7444), True, 'import nnabla as nn\n'), ((2997, 3052), 'nnabla.parametric_functions.embed', 'PF.embed', (['x_elm', 'num_words', 'state_size'], {'name': 'embed_name'}), '(x_elm, num_words, state_size, name=embed_name)\n', (3005, 3052), True, 'import nnabla.parametric_functions as PF\n'), ((3627, 3658), 'nnabla.functions.reshape', 'F.reshape', (['t_t', '[batch_size, 1]'], {}), '(t_t, [batch_size, 1])\n', (3636, 3658), True, 'import nnabla.functions as F\n'), ((5464, 5483), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (5481, 5483), True, 'import nnabla as nn\n'), ((5497, 5526), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (5511, 5526), False, 'import os\n'), ((5536, 5562), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (5547, 5562), False, 'import os\n'), ((1623, 1644), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1637, 1644), False, 'import os\n'), ((1747, 1766), 'subprocess.call', 'call', (["['wget', url]"], {}), "(['wget', url])\n", (1751, 1766), False, 'from subprocess import call\n'), ((3080, 3098), 'nnabla.functions.split', 'F.split', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3087, 3098), True, 'import nnabla.functions as F\n'), ((3572, 3612), 'nnabla.parametric_functions.affine', 'PF.affine', (['h2', 'num_words'], {'name': 'pred_name'}), '(h2, num_words, name=pred_name)\n', (3581, 3612), True, 'import nnabla.parametric_functions as PF\n'), ((3682, 3713), 'nnabla.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['y', 't_t'], {}), '(y, t_t)\n', (3705, 3713), True, 'import nnabla.functions as F\n'), ((1136, 1162), 'nnabla.functions.sum', 'F.sum', (['(p.grad ** norm_type)'], {}), '(p.grad ** norm_type)\n', (1141, 1162), True, 'import nnabla.functions as F\n'), ((2739, 2758), 'numpy.array', 'np.array', (['cur_words'], {}), '(cur_words)\n', (2747, 2758), True, 'import numpy as np\n'), ((2785, 2805), 'numpy.array', 'np.array', (['next_words'], {}), '(next_words)\n', (2793, 2805), True, 'import numpy as np\n'), ((3242, 3270), 'nnabla.functions.dropout', 'F.dropout', (['e_t', 'dropout_rate'], {}), '(e_t, dropout_rate)\n', (3251, 3270), True, 'import nnabla.functions as F\n'), ((3308, 3335), 'nnabla.functions.dropout', 'F.dropout', (['h1', 'dropout_rate'], {}), '(h1, dropout_rate)\n', (3317, 3335), True, 'import nnabla.functions as F\n'), ((3379, 3406), 'nnabla.functions.dropout', 'F.dropout', (['h2', 'dropout_rate'], {}), '(h2, dropout_rate)\n', (3388, 3406), True, 'import nnabla.functions as F\n'), ((6855, 6893), 'os.path.join', 'os.path.join', (['args.save_dir', 'save_name'], {}), '(args.save_dir, save_name)\n', (6867, 6893), False, 'import os\n'), ((987, 998), 'numpy.abs', 'np.abs', (['p.g'], {}), '(p.g)\n', (993, 998), True, 'import numpy as np\n'), ((6019, 6038), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (6036, 6038), True, 'import nnabla as nn\n')] |
import os, sys, numpy
from scipy.interpolate import RectBivariateSpline, interp2d
from scipy.optimize import curve_fit
from matplotlib import cm
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
try:
from mpl_toolkits.mplot3d import Axes3D # necessario per caricare i plot 3D
except:
pass
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QSettings
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.util.oasys_util import TriggerIn
from orangecontrib.shadow.util.shadow_objects import ShadowOpticalElement, ShadowBeam, ShadowPreProcessorData
from orangecontrib.shadow.util.shadow_util import ShadowPreProcessor
from orangecontrib.shadow.widgets.gui import ow_ellipsoid_element, ow_optical_element
from Shadow import ShadowTools as ST
TRAPEZIUM = 0
RECTANGLE = 1
SINGLE_MOMENTUM = 0
DOUBLE_MOMENTUM = 1
class BendableEllipsoidMirror(ow_ellipsoid_element.EllipsoidElement):
name = "Bendable Ellipsoid Mirror"
description = "Shadow OE: Bendable Ellipsoid Mirror"
icon = "icons/bendable_ellipsoid_mirror.png"
maintainer = "<NAME>"
maintainer_email = "<EMAIL>"
priority = 6
category = "Optical Elements"
keywords = ["data", "file", "load", "read"]
send_footprint_beam = QSettings().value("output/send-footprint", 0, int) == 1
if send_footprint_beam:
outputs = [{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"},
{"name":"Footprint",
"type":list,
"doc":"Footprint",
"id":"beam"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"},
{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}
]
else:
outputs = [{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"},
{"name": "PreProcessor_Data",
"type": ShadowPreProcessorData,
"doc": "PreProcessor Data",
"id": "PreProcessor_Data"}
]
show_bender_plots = Setting(0)
bender_bin_x = Setting(100)
bender_bin_y = Setting(500)
E = Setting(131000)
h = Setting(10)
kind_of_bender = Setting(1)
shape = Setting(0)
output_file_name = Setting("mirror_bender.dat")
which_length = Setting(0)
optimized_length = Setting(0.0)
M1 = Setting(0.0)
ratio = Setting(0.5)
e = Setting(0.3)
M1_out = 0.0
ratio_out = 0.0
e_out = 0.0
M1_fixed = Setting(False)
ratio_fixed = Setting(False)
e_fixed = Setting(False)
M1_min = Setting(0.0)
ratio_min = Setting(0.0)
e_min = Setting(0.0)
M1_max = Setting(1000.0)
ratio_max = Setting(10.0)
e_max = Setting(1.0)
def __init__(self):
graphical_Options=ow_optical_element.GraphicalOptions(is_mirror=True)
super().__init__(graphical_Options)
tabs = gui.tabWidget(oasysgui.createTabPage(self.tabs_basic_setting, "Bender"))
tab_bender = oasysgui.createTabPage(tabs, "Bender Setting")
surface_box = oasysgui.widgetBox(tab_bender, "Surface Setting", addSpace=False, orientation="vertical")
oasysgui.lineEdit(surface_box, self, "bender_bin_x", "bins Sagittal", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(surface_box, self, "bender_bin_y", "bins Transversal", labelWidth=260, valueType=int, orientation="horizontal")
material_box = oasysgui.widgetBox(tab_bender, "Bender Setting", addSpace=False, orientation="vertical")
self.le_E = oasysgui.lineEdit(material_box, self, "E", "Young's Modulus ", labelWidth=260, valueType=float, orientation="horizontal")
self.le_h = oasysgui.lineEdit(material_box, self, "h", "Thickness ", labelWidth=260, valueType=float, orientation="horizontal")
gui.comboBox(material_box, self, "kind_of_bender", label="Kind Of Bender ", items=["Single Momentum", "Double Momentum"],
labelWidth=150, orientation="horizontal", callback=self.set_kind_of_bender)
gui.comboBox(material_box, self, "shape", label="Shape ", items=["Trapezium", "Rectangle"],
labelWidth=150, orientation="horizontal", callback=self.set_shape)
tab_fit = oasysgui.createTabPage(tabs, "Fit Setting")
fit_box = oasysgui.widgetBox(tab_fit, "", addSpace=False, orientation="vertical")
file_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="horizontal", height=25)
self.le_output_file_name = oasysgui.lineEdit(file_box, self, "output_file_name", "Out File Name", labelWidth=100, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.select_output_file, width=20)
length_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="horizontal")
self.cb_optimized_length = gui.comboBox(length_box, self, "which_length", label="Optimized Length ", items=["Total", "Partial"],
labelWidth=150, orientation="horizontal", callback=self.set_which_length)
self.le_optimized_length = oasysgui.lineEdit(length_box, self, "optimized_length", " ", labelWidth=10, valueType=float, orientation="horizontal")
self.set_which_length()
gui.separator(fit_box)
def add_parameter_box(container_box, variable, label):
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(box, self, variable, label, labelWidth=50, valueType=float, orientation="horizontal")
gui.label(box, self, " ", labelWidth=58)
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
setattr(self, "le_" + variable + "_min", oasysgui.lineEdit(box, self, variable + "_min", "Min",
labelWidth=50, valueType=float, orientation="horizontal"))
setattr(self, "le_" + variable + "_max", oasysgui.lineEdit(box, self, variable + "_max", "Max",
labelWidth=35, valueType=float, orientation="horizontal"))
gui.checkBox(box, self, variable + "_fixed", "Fixed", callback=getattr(self, "set_" + variable))
box = oasysgui.widgetBox(container_box, "", addSpace=False, orientation="horizontal")
le = oasysgui.lineEdit(box, self, variable + "_out", "Fitted", labelWidth=50, valueType=float, orientation="horizontal")
le.setEnabled(False)
le.setStyleSheet("color: blue; background-color: rgb(254, 244, 205); font:bold")
def set_variable_fit(): setattr(self, variable, getattr(self, variable + "_out"))
gui.button(box, self, "<- Use", width=58, callback=set_variable_fit)
getattr(self, "set_" + variable)()
m1_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
self.ratio_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
self.e_box = oasysgui.widgetBox(fit_box, "", addSpace=False, orientation="vertical")
gui.separator(fit_box, 10)
add_parameter_box(m1_box, "M1", "M1")
add_parameter_box(self.ratio_box, "ratio", "M1/M2")
add_parameter_box(self.e_box, "e", "e")
self.set_kind_of_bender()
self.set_shape()
#######################################################
plot_tab = oasysgui.createTabPage(self.main_tabs, "Bender Plots")
view_box = oasysgui.widgetBox(plot_tab, "Plotting Style", addSpace=False, orientation="vertical", width=350)
self.view_type_combo = gui.comboBox(view_box, self, "show_bender_plots", label="Show Plots", labelWidth=220,
items=["No", "Yes"], sendSelectedValue=False, orientation="horizontal")
bender_tabs = oasysgui.tabWidget(plot_tab)
tabs = [oasysgui.createTabPage(bender_tabs, "Bender vs. Ideal (1D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender (1D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender (3D)"),
oasysgui.createTabPage(bender_tabs, "Figure Error (3D)"),
oasysgui.createTabPage(bender_tabs, "Ideal - Bender + Figure Error (3D)")]
def create_figure_canvas(mode="3D"):
figure = Figure(figsize=(100, 100))
figure.patch.set_facecolor('white')
if mode == "3D": figure.add_subplot(111, projection='3d')
else: figure.add_subplot(111)
figure_canvas = FigureCanvasQTAgg(figure)
figure_canvas.setFixedWidth(self.IMAGE_WIDTH)
figure_canvas.setFixedHeight(self.IMAGE_HEIGHT-10)
return figure_canvas
self.figure_canvas = [create_figure_canvas("1D"), create_figure_canvas("1D"),
create_figure_canvas(), create_figure_canvas(), create_figure_canvas()]
for tab, figure_canvas in zip(tabs, self.figure_canvas): tab.layout().addWidget(figure_canvas)
gui.rubber(self.controlArea)
gui.rubber(self.mainArea)
################################################################
#
# SHADOW MANAGEMENT
#
################################################################
def select_output_file(self):
self.le_output_file_name.setText(oasysgui.selectFileFromDialog(self, self.output_file_name, "Select Output File", file_extension_filter="Data Files (*.dat)"))
def set_kind_of_bender(self):
self.ratio_box.setVisible(self.kind_of_bender==1)
def set_shape(self):
self.e_box.setVisible(self.shape==0)
def set_which_length(self):
self.le_optimized_length.setEnabled(self.which_length==1)
def set_M1(self):
self.le_M1_min.setEnabled(self.M1_fixed==False)
self.le_M1_max.setEnabled(self.M1_fixed==False)
def set_ratio(self):
self.le_ratio_min.setEnabled(self.ratio_fixed==False)
self.le_ratio_max.setEnabled(self.ratio_fixed==False)
def set_e(self):
self.le_e_min.setEnabled(self.e_fixed==False)
self.le_e_max.setEnabled(self.e_fixed==False)
def after_change_workspace_units(self):
super().after_change_workspace_units()
label = self.le_E.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [N/" + self.workspace_units_label + "^2]")
label = self.le_h.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.cb_optimized_length.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def checkFields(self):
super().checkFields()
if self.is_cylinder != 1: raise ValueError("Bender Ellipse must be a cylinder")
if self.cylinder_orientation != 0: raise ValueError("Cylinder orientation must be 0")
if self.is_infinite == 0: raise ValueError("This OE can't have infinite dimensions")
if self.which_length==1:
congruence.checkStrictlyPositiveNumber(self.optimized_length, "Optimized Length")
congruence.checkLessOrEqualThan(self.optimized_length, self.dim_y_plus+self.dim_y_minus, "Optimized Length", "Total Length")
if self.modified_surface > 0:
if not (self.modified_surface == 1 and self.ms_type_of_defect == 2):
raise ValueError("Only Preprocessor generated error profiles are admitted")
congruence.checkStrictlyPositiveNumber(self.bender_bin_x, "Bins X")
congruence.checkStrictlyPositiveNumber(self.bender_bin_y, "Bins Y")
self.output_file_name_full = congruence.checkFileName(self.output_file_name)
def completeOperations(self, shadow_oe):
shadow_oe_temp = shadow_oe.duplicate()
input_beam_temp = self.input_beam.duplicate(history=False)
self.manage_acceptance_slits(shadow_oe_temp)
ShadowBeam.traceFromOE(input_beam_temp,
shadow_oe_temp,
write_start_file=0,
write_end_file=0,
widget_class_name=type(self).__name__)
x, y, z = self.calculate_ideal_surface(shadow_oe_temp)
bender_parameter, z_bender_correction = self.calculate_bender_correction(y, z, self.kind_of_bender, self.shape)
self.M1_out = round(bender_parameter[0], int(6*self.workspace_units_to_mm))
if self.shape == TRAPEZIUM:
self.e_out = round(bender_parameter[1], 5)
if self.kind_of_bender == DOUBLE_MOMENTUM: self.ratio_out = round(bender_parameter[2], 5)
elif self.shape == RECTANGLE:
if self.kind_of_bender == DOUBLE_MOMENTUM: self.ratio_out = round(bender_parameter[1], 5)
self.plot3D(x, y, z_bender_correction, 2, "Ideal - Bender Surfaces")
if self.modified_surface > 0:
x_e, y_e, z_e = ShadowPreProcessor.read_surface_error_file(self.ms_defect_file_name)
if len(x) == len(x_e) and len(y) == len(y_e) and \
x[0] == x_e[0] and x[-1] == x_e[-1] and \
y[0] == y_e[0] and y[-1] == y_e[-1]:
z_figure_error = z_e
else:
z_figure_error = interp2d(y_e, x_e, z_e, kind='cubic')(y, x)
z_bender_correction += z_figure_error
self.plot3D(x, y, z_figure_error, 3, "Figure Error Surface")
self.plot3D(x, y, z_bender_correction, 4, "Ideal - Bender + Figure Error Surfaces")
ST.write_shadow_surface(z_bender_correction.T, numpy.round(x, 6), numpy.round(y, 6), self.output_file_name_full)
# Add new surface as figure error
shadow_oe._oe.F_RIPPLE = 1
shadow_oe._oe.F_G_S = 2
shadow_oe._oe.FILE_RIP = bytes(self.output_file_name_full, 'utf-8')
# Redo Raytracing with the new file
super().completeOperations(shadow_oe)
self.send("PreProcessor_Data", ShadowPreProcessorData(error_profile_data_file=self.output_file_name,
error_profile_x_dim=self.dim_x_plus+self.dim_x_minus,
error_profile_y_dim=self.dim_y_plus+self.dim_y_minus))
def instantiateShadowOE(self):
return ShadowOpticalElement.create_ellipsoid_mirror()
def calculate_ideal_surface(self, shadow_oe, sign=-1):
x = numpy.linspace(-self.dim_x_minus, self.dim_x_plus, self.bender_bin_x + 1)
y = numpy.linspace(-self.dim_y_minus, self.dim_y_plus, self.bender_bin_y + 1)
c1 = round(shadow_oe._oe.CCC[0], 10)
c2 = round(shadow_oe._oe.CCC[1], 10)
c3 = round(shadow_oe._oe.CCC[2], 10)
c4 = round(shadow_oe._oe.CCC[3], 10)
c5 = round(shadow_oe._oe.CCC[4], 10)
c6 = round(shadow_oe._oe.CCC[5], 10)
c7 = round(shadow_oe._oe.CCC[6], 10)
c8 = round(shadow_oe._oe.CCC[7], 10)
c9 = round(shadow_oe._oe.CCC[8], 10)
c10 = round(shadow_oe._oe.CCC[9], 10)
xx, yy = numpy.meshgrid(x, y)
c = c1*(xx**2) + c2*(yy**2) + c4*xx*yy + c7*xx + c8*yy + c10
b = c5*yy + c6*xx + c9
a = c3
z = (-b + sign*numpy.sqrt(b**2 - 4*a*c))/(2*a)
z[b**2 - 4*a*c < 0] = numpy.nan
return x, y, z.T
def calculate_bender_correction(self, y, z, kind_of_bender, shape):
b0 = self.dim_x_plus + self.dim_x_minus
L = self.dim_y_plus + self.dim_y_minus # add optimization length
# flip the coordinate system to be consistent with Mike's formulas
ideal_profile = z[0, :][::-1] # one row is the profile of the cylinder, enough for the minimizer
ideal_profile += -ideal_profile[0] + ((L/2 + y)*(ideal_profile[0]-ideal_profile[-1]))/L # Rotation
if self.which_length == 0:
y_fit = y
ideal_profile_fit = ideal_profile
else:
cursor = numpy.where(numpy.logical_and(y >= -self.optimized_length/2,
y <= self.optimized_length/2) )
y_fit = y[cursor]
ideal_profile_fit = ideal_profile[cursor]
epsilon_minus = 1 - 1e-8
epsilon_plus = 1 + 1e-8
Eh_3 = self.E * self.h ** 3
initial_guess = None
constraints = None
bender_function = None
if shape == TRAPEZIUM:
def general_bender_function(Y, M1, e, ratio):
M2 = M1 * ratio
A = (M1 + M2) / 2
B = (M1 - M2) / L
C = Eh_3 * (2 * b0 + e * b0) / 24
D = Eh_3 * e * b0 / (12 * L)
H = (A * D + B * C) / D ** 2
CDLP = C + D * L / 2
CDLM = C - D * L / 2
F = (H / L) * ((CDLM * numpy.log(CDLM) - CDLP * numpy.log(CDLP)) / D + L)
G = (-H * ((CDLM * numpy.log(CDLM) + CDLP * numpy.log(CDLP))) + (B * L ** 2) / 4) / (2 * D)
CDY = C + D * Y
return H * ((CDY / D) * numpy.log(CDY) - Y) - (B * Y ** 2) / (2 * D) + F * Y + G
def bender_function_2m(Y, M1, e, ratio): return general_bender_function(Y, M1, e, ratio)
def bender_function_1m(Y, M1, e): return general_bender_function(Y, M1, e, 1.0)
if kind_of_bender == SINGLE_MOMENTUM:
bender_function = bender_function_1m
initial_guess = [self.M1, self.e]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1 * epsilon_minus),
self.e_min if self.e_fixed == False else (self.e * epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1 * epsilon_plus),
self.e_max if self.e_fixed == False else (self.e * epsilon_plus)]]
elif kind_of_bender == DOUBLE_MOMENTUM:
bender_function = bender_function_2m
initial_guess = [self.M1, self.e, self.ratio]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1*epsilon_minus),
self.e_min if self.e_fixed == False else (self.e*epsilon_minus),
self.ratio_min if self.ratio_fixed == False else (self.ratio*epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1*epsilon_plus),
self.e_max if self.e_fixed == False else (self.e*epsilon_plus),
self.ratio_max if self.ratio_fixed == False else (self.ratio*epsilon_plus)]]
elif shape == RECTANGLE:
def general_bender_function(Y, M1, ratio):
M2 = M1 * ratio
A = (M1 + M2) / 2
B = (M1 - M2) / L
C = Eh_3 * b0 / 12
F = (B * L**2) / (24 * C)
G = -(A * L**2) / (8 * C)
return -(B * Y**3) / (6 * C) + (A * Y**2) / (2 * C) + F * Y + G
def bender_function_2m(Y, M1, ratio): return general_bender_function(Y, M1, ratio)
def bender_function_1m(Y, M1): return general_bender_function(Y, M1, 1.0)
if kind_of_bender == SINGLE_MOMENTUM:
bender_function = bender_function_1m
initial_guess = [self.M1]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1 * epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1 * epsilon_plus)]]
elif kind_of_bender == DOUBLE_MOMENTUM:
bender_function = bender_function_2m
initial_guess = [self.M1, self.ratio]
constraints = [[self.M1_min if self.M1_fixed == False else (self.M1*epsilon_minus),
self.ratio_min if self.ratio_fixed == False else (self.ratio*epsilon_minus)],
[self.M1_max if self.M1_fixed == False else (self.M1*epsilon_plus),
self.ratio_max if self.ratio_fixed == False else (self.ratio*epsilon_plus)]]
parameters, _ = curve_fit(f=bender_function,
xdata=y_fit,
ydata=ideal_profile_fit,
p0=initial_guess,
bounds=constraints,
method='trf')
if len(parameters) == 1: bender_profile = bender_function(y, parameters[0])
elif len(parameters) == 2: bender_profile = bender_function(y, parameters[0], parameters[1])
else: bender_profile = bender_function(y, parameters[0], parameters[1], parameters[2])
# rotate back to Shadow system
bender_profile = bender_profile[::-1]
ideal_profile = ideal_profile[::-1]
# from here it's Shadow Axis system
correction_profile = ideal_profile - bender_profile
if self.which_length == 1: correction_profile_fit = correction_profile[cursor]
# r-squared = 1 - residual sum of squares / total sum of squares
r_squared = 1 - (numpy.sum(correction_profile**2) / numpy.sum((ideal_profile - numpy.mean(ideal_profile))**2))
rms = round(correction_profile.std()*1e9*self.workspace_units_to_m, 6)
if self.which_length == 1: rms_opt = round(correction_profile_fit.std()*1e9*self.workspace_units_to_m, 6)
self.plot1D(y, bender_profile, y_values_2=ideal_profile, index=0, title = "Bender vs. Ideal Profiles" + "\n" + r'$R^2$ = ' + str(r_squared), um=1)
self.plot1D(y, correction_profile, index=1, title="Correction Profile 1D, r.m.s. = " + str(rms) + " nm" +
("" if self.which_length == 0 else (", " + str(rms_opt) + " nm (optimized)")))
z_bender_correction = numpy.zeros(z.shape)
for i in range(z_bender_correction.shape[0]): z_bender_correction[i, :] = numpy.copy(correction_profile)
return parameters, z_bender_correction
def plot1D(self, x_coords, y_values, y_values_2=None, index=0, title="", um=0):
if self.show_bender_plots == 1:
figure = self.figure_canvas[index].figure
axis = figure.gca()
axis.clear()
axis.set_xlabel("Y [" + self.workspace_units_label + "]")
axis.set_ylabel("Z [" + ("nm" if um==0 else "\u03bcm") + "]")
axis.set_title(title)
axis.plot(x_coords, (y_values * self.workspace_units_to_m * (1e9 if um==0 else 1e6)), color="blue", label="bender", linewidth=2)
if not y_values_2 is None: axis.plot(x_coords, (y_values_2 * self.workspace_units_to_m * (1e9 if um==0 else 1e6)), "-.r", label="ideal")
axis.legend(loc=0, fontsize='small')
figure.canvas.draw()
def plot3D(self, x_coords, y_coords, z_values, index, title=""):
if self.show_bender_plots == 1:
figure = self.figure_canvas[index].figure
x_to_plot, y_to_plot = numpy.meshgrid(x_coords, y_coords)
z_to_plot = z_values.T
axis = figure.gca()
axis.clear()
axis.set_xlabel("X [" + self.workspace_units_label + "]")
axis.set_ylabel("Y [" + self.workspace_units_label + "]")
axis.set_zlabel("Z [nm]")
axis.set_title(title)
axis.plot_surface(x_to_plot, y_to_plot, (z_to_plot * self.workspace_units_to_m * 1e9),
rstride=1, cstride=1, cmap=cm.autumn, linewidth=0.5, antialiased=True)
figure.canvas.draw()
axis.mouse_init()
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = BendableEllipsoidMirror()
ow.show()
a.exec_()
ow.saveSettings()
| [
"oasys.widgets.gui.widgetBox",
"numpy.sqrt",
"orangecontrib.shadow.util.shadow_objects.ShadowOpticalElement.create_ellipsoid_mirror",
"oasys.widgets.gui.createTabPage",
"oasys.widgets.congruence.checkFileName",
"numpy.log",
"oasys.widgets.congruence.checkStrictlyPositiveNumber",
"oasys.widgets.gui.tab... | [((2776, 2786), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (2783, 2786), False, 'from orangewidget.settings import Setting\n'), ((2807, 2819), 'orangewidget.settings.Setting', 'Setting', (['(100)'], {}), '(100)\n', (2814, 2819), False, 'from orangewidget.settings import Setting\n'), ((2839, 2851), 'orangewidget.settings.Setting', 'Setting', (['(500)'], {}), '(500)\n', (2846, 2851), False, 'from orangewidget.settings import Setting\n'), ((2861, 2876), 'orangewidget.settings.Setting', 'Setting', (['(131000)'], {}), '(131000)\n', (2868, 2876), False, 'from orangewidget.settings import Setting\n'), ((2885, 2896), 'orangewidget.settings.Setting', 'Setting', (['(10)'], {}), '(10)\n', (2892, 2896), False, 'from orangewidget.settings import Setting\n'), ((2919, 2929), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (2926, 2929), False, 'from orangewidget.settings import Setting\n'), ((2942, 2952), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (2949, 2952), False, 'from orangewidget.settings import Setting\n'), ((2977, 3005), 'orangewidget.settings.Setting', 'Setting', (['"""mirror_bender.dat"""'], {}), "('mirror_bender.dat')\n", (2984, 3005), False, 'from orangewidget.settings import Setting\n'), ((3026, 3036), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (3033, 3036), False, 'from orangewidget.settings import Setting\n'), ((3060, 3072), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (3067, 3072), False, 'from orangewidget.settings import Setting\n'), ((3086, 3098), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (3093, 3098), False, 'from orangewidget.settings import Setting\n'), ((3111, 3123), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (3118, 3123), False, 'from orangewidget.settings import Setting\n'), ((3136, 3148), 'orangewidget.settings.Setting', 'Setting', (['(0.3)'], {}), '(0.3)\n', (3143, 3148), False, 'from orangewidget.settings import Setting\n'), ((3229, 3243), 'orangewidget.settings.Setting', 'Setting', (['(False)'], {}), '(False)\n', (3236, 3243), False, 'from orangewidget.settings import Setting\n'), ((3262, 3276), 'orangewidget.settings.Setting', 'Setting', (['(False)'], {}), '(False)\n', (3269, 3276), False, 'from orangewidget.settings import Setting\n'), ((3295, 3309), 'orangewidget.settings.Setting', 'Setting', (['(False)'], {}), '(False)\n', (3302, 3309), False, 'from orangewidget.settings import Setting\n'), ((3327, 3339), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (3334, 3339), False, 'from orangewidget.settings import Setting\n'), ((3356, 3368), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (3363, 3368), False, 'from orangewidget.settings import Setting\n'), ((3385, 3397), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (3392, 3397), False, 'from orangewidget.settings import Setting\n'), ((3415, 3430), 'orangewidget.settings.Setting', 'Setting', (['(1000.0)'], {}), '(1000.0)\n', (3422, 3430), False, 'from orangewidget.settings import Setting\n'), ((3447, 3460), 'orangewidget.settings.Setting', 'Setting', (['(10.0)'], {}), '(10.0)\n', (3454, 3460), False, 'from orangewidget.settings import Setting\n'), ((3477, 3489), 'orangewidget.settings.Setting', 'Setting', (['(1.0)'], {}), '(1.0)\n', (3484, 3489), False, 'from orangewidget.settings import Setting\n'), ((24862, 24884), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (24874, 24884), False, 'from PyQt5.QtWidgets import QApplication\n'), ((3541, 3592), 'orangecontrib.shadow.widgets.gui.ow_optical_element.GraphicalOptions', 'ow_optical_element.GraphicalOptions', ([], {'is_mirror': '(True)'}), '(is_mirror=True)\n', (3576, 3592), False, 'from orangecontrib.shadow.widgets.gui import ow_ellipsoid_element, ow_optical_element\n'), ((3749, 3795), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['tabs', '"""Bender Setting"""'], {}), "(tabs, 'Bender Setting')\n", (3771, 3795), True, 'from oasys.widgets import gui as oasysgui\n'), ((3819, 3912), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['tab_bender', '"""Surface Setting"""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(tab_bender, 'Surface Setting', addSpace=False,\n orientation='vertical')\n", (3837, 3912), True, 'from oasys.widgets import gui as oasysgui\n'), ((3918, 4048), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['surface_box', 'self', '"""bender_bin_x"""', '"""bins Sagittal"""'], {'labelWidth': '(260)', 'valueType': 'int', 'orientation': '"""horizontal"""'}), "(surface_box, self, 'bender_bin_x', 'bins Sagittal',\n labelWidth=260, valueType=int, orientation='horizontal')\n", (3935, 4048), True, 'from oasys.widgets import gui as oasysgui\n'), ((4053, 4186), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['surface_box', 'self', '"""bender_bin_y"""', '"""bins Transversal"""'], {'labelWidth': '(260)', 'valueType': 'int', 'orientation': '"""horizontal"""'}), "(surface_box, self, 'bender_bin_y', 'bins Transversal',\n labelWidth=260, valueType=int, orientation='horizontal')\n", (4070, 4186), True, 'from oasys.widgets import gui as oasysgui\n'), ((4207, 4299), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['tab_bender', '"""Bender Setting"""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(tab_bender, 'Bender Setting', addSpace=False,\n orientation='vertical')\n", (4225, 4299), True, 'from oasys.widgets import gui as oasysgui\n'), ((4317, 4443), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['material_box', 'self', '"""E"""', '"""Young\'s Modulus """'], {'labelWidth': '(260)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), '(material_box, self, \'E\', "Young\'s Modulus ", labelWidth=\n 260, valueType=float, orientation=\'horizontal\')\n', (4334, 4443), True, 'from oasys.widgets import gui as oasysgui\n'), ((4459, 4578), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['material_box', 'self', '"""h"""', '"""Thickness """'], {'labelWidth': '(260)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(material_box, self, 'h', 'Thickness ', labelWidth=260,\n valueType=float, orientation='horizontal')\n", (4476, 4578), True, 'from oasys.widgets import gui as oasysgui\n'), ((4584, 4789), 'orangewidget.gui.comboBox', 'gui.comboBox', (['material_box', 'self', '"""kind_of_bender"""'], {'label': '"""Kind Of Bender """', 'items': "['Single Momentum', 'Double Momentum']", 'labelWidth': '(150)', 'orientation': '"""horizontal"""', 'callback': 'self.set_kind_of_bender'}), "(material_box, self, 'kind_of_bender', label='Kind Of Bender ',\n items=['Single Momentum', 'Double Momentum'], labelWidth=150,\n orientation='horizontal', callback=self.set_kind_of_bender)\n", (4596, 4789), False, 'from orangewidget import gui\n'), ((4812, 4979), 'orangewidget.gui.comboBox', 'gui.comboBox', (['material_box', 'self', '"""shape"""'], {'label': '"""Shape """', 'items': "['Trapezium', 'Rectangle']", 'labelWidth': '(150)', 'orientation': '"""horizontal"""', 'callback': 'self.set_shape'}), "(material_box, self, 'shape', label='Shape ', items=[\n 'Trapezium', 'Rectangle'], labelWidth=150, orientation='horizontal',\n callback=self.set_shape)\n", (4824, 4979), False, 'from orangewidget import gui\n'), ((5011, 5054), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['tabs', '"""Fit Setting"""'], {}), "(tabs, 'Fit Setting')\n", (5033, 5054), True, 'from oasys.widgets import gui as oasysgui\n'), ((5074, 5145), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['tab_fit', '""""""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(tab_fit, '', addSpace=False, orientation='vertical')\n", (5092, 5145), True, 'from oasys.widgets import gui as oasysgui\n'), ((5166, 5254), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['fit_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""', 'height': '(25)'}), "(fit_box, '', addSpace=False, orientation='horizontal',\n height=25)\n", (5184, 5254), True, 'from oasys.widgets import gui as oasysgui\n'), ((5286, 5417), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['file_box', 'self', '"""output_file_name"""', '"""Out File Name"""'], {'labelWidth': '(100)', 'valueType': 'str', 'orientation': '"""horizontal"""'}), "(file_box, self, 'output_file_name', 'Out File Name',\n labelWidth=100, valueType=str, orientation='horizontal')\n", (5303, 5417), True, 'from oasys.widgets import gui as oasysgui\n'), ((5422, 5499), 'orangewidget.gui.button', 'gui.button', (['file_box', 'self', '"""..."""'], {'callback': 'self.select_output_file', 'width': '(20)'}), "(file_box, self, '...', callback=self.select_output_file, width=20)\n", (5432, 5499), False, 'from orangewidget import gui\n'), ((5522, 5595), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['fit_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(fit_box, '', addSpace=False, orientation='horizontal')\n", (5540, 5595), True, 'from oasys.widgets import gui as oasysgui\n'), ((5632, 5815), 'orangewidget.gui.comboBox', 'gui.comboBox', (['length_box', 'self', '"""which_length"""'], {'label': '"""Optimized Length """', 'items': "['Total', 'Partial']", 'labelWidth': '(150)', 'orientation': '"""horizontal"""', 'callback': 'self.set_which_length'}), "(length_box, self, 'which_length', label='Optimized Length ',\n items=['Total', 'Partial'], labelWidth=150, orientation='horizontal',\n callback=self.set_which_length)\n", (5644, 5815), False, 'from orangewidget import gui\n'), ((5891, 6013), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['length_box', 'self', '"""optimized_length"""', '""" """'], {'labelWidth': '(10)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(length_box, self, 'optimized_length', ' ', labelWidth=10,\n valueType=float, orientation='horizontal')\n", (5908, 6013), True, 'from oasys.widgets import gui as oasysgui\n'), ((6051, 6073), 'orangewidget.gui.separator', 'gui.separator', (['fit_box'], {}), '(fit_box)\n', (6064, 6073), False, 'from orangewidget import gui\n'), ((7693, 7764), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['fit_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(fit_box, '', addSpace=False, orientation='vertical')\n", (7711, 7764), True, 'from oasys.widgets import gui as oasysgui\n'), ((7773, 7799), 'orangewidget.gui.separator', 'gui.separator', (['fit_box', '(10)'], {}), '(fit_box, 10)\n', (7786, 7799), False, 'from orangewidget import gui\n'), ((7825, 7896), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['fit_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(fit_box, '', addSpace=False, orientation='vertical')\n", (7843, 7896), True, 'from oasys.widgets import gui as oasysgui\n'), ((7905, 7931), 'orangewidget.gui.separator', 'gui.separator', (['fit_box', '(10)'], {}), '(fit_box, 10)\n', (7918, 7931), False, 'from orangewidget import gui\n'), ((7953, 8024), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['fit_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""vertical"""'}), "(fit_box, '', addSpace=False, orientation='vertical')\n", (7971, 8024), True, 'from oasys.widgets import gui as oasysgui\n'), ((8033, 8059), 'orangewidget.gui.separator', 'gui.separator', (['fit_box', '(10)'], {}), '(fit_box, 10)\n', (8046, 8059), False, 'from orangewidget import gui\n'), ((8368, 8422), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.main_tabs', '"""Bender Plots"""'], {}), "(self.main_tabs, 'Bender Plots')\n", (8390, 8422), True, 'from oasys.widgets import gui as oasysgui\n'), ((8443, 8545), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['plot_tab', '"""Plotting Style"""'], {'addSpace': '(False)', 'orientation': '"""vertical"""', 'width': '(350)'}), "(plot_tab, 'Plotting Style', addSpace=False, orientation=\n 'vertical', width=350)\n", (8461, 8545), True, 'from oasys.widgets import gui as oasysgui\n'), ((8573, 8738), 'orangewidget.gui.comboBox', 'gui.comboBox', (['view_box', 'self', '"""show_bender_plots"""'], {'label': '"""Show Plots"""', 'labelWidth': '(220)', 'items': "['No', 'Yes']", 'sendSelectedValue': '(False)', 'orientation': '"""horizontal"""'}), "(view_box, self, 'show_bender_plots', label='Show Plots',\n labelWidth=220, items=['No', 'Yes'], sendSelectedValue=False,\n orientation='horizontal')\n", (8585, 8738), False, 'from orangewidget import gui\n'), ((8798, 8826), 'oasys.widgets.gui.tabWidget', 'oasysgui.tabWidget', (['plot_tab'], {}), '(plot_tab)\n', (8816, 8826), True, 'from oasys.widgets import gui as oasysgui\n'), ((9989, 10017), 'orangewidget.gui.rubber', 'gui.rubber', (['self.controlArea'], {}), '(self.controlArea)\n', (9999, 10017), False, 'from orangewidget import gui\n'), ((10026, 10051), 'orangewidget.gui.rubber', 'gui.rubber', (['self.mainArea'], {}), '(self.mainArea)\n', (10036, 10051), False, 'from orangewidget import gui\n'), ((12474, 12541), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.bender_bin_x', '"""Bins X"""'], {}), "(self.bender_bin_x, 'Bins X')\n", (12512, 12541), False, 'from oasys.widgets import congruence\n'), ((12550, 12617), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.bender_bin_y', '"""Bins Y"""'], {}), "(self.bender_bin_y, 'Bins Y')\n", (12588, 12617), False, 'from oasys.widgets import congruence\n'), ((12655, 12702), 'oasys.widgets.congruence.checkFileName', 'congruence.checkFileName', (['self.output_file_name'], {}), '(self.output_file_name)\n', (12679, 12702), False, 'from oasys.widgets import congruence\n'), ((15341, 15387), 'orangecontrib.shadow.util.shadow_objects.ShadowOpticalElement.create_ellipsoid_mirror', 'ShadowOpticalElement.create_ellipsoid_mirror', ([], {}), '()\n', (15385, 15387), False, 'from orangecontrib.shadow.util.shadow_objects import ShadowOpticalElement, ShadowBeam, ShadowPreProcessorData\n'), ((15461, 15534), 'numpy.linspace', 'numpy.linspace', (['(-self.dim_x_minus)', 'self.dim_x_plus', '(self.bender_bin_x + 1)'], {}), '(-self.dim_x_minus, self.dim_x_plus, self.bender_bin_x + 1)\n', (15475, 15534), False, 'import os, sys, numpy\n'), ((15547, 15620), 'numpy.linspace', 'numpy.linspace', (['(-self.dim_y_minus)', 'self.dim_y_plus', '(self.bender_bin_y + 1)'], {}), '(-self.dim_y_minus, self.dim_y_plus, self.bender_bin_y + 1)\n', (15561, 15620), False, 'import os, sys, numpy\n'), ((16100, 16120), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (16114, 16120), False, 'import os, sys, numpy\n'), ((21289, 21412), 'scipy.optimize.curve_fit', 'curve_fit', ([], {'f': 'bender_function', 'xdata': 'y_fit', 'ydata': 'ideal_profile_fit', 'p0': 'initial_guess', 'bounds': 'constraints', 'method': '"""trf"""'}), "(f=bender_function, xdata=y_fit, ydata=ideal_profile_fit, p0=\n initial_guess, bounds=constraints, method='trf')\n", (21298, 21412), False, 'from scipy.optimize import curve_fit\n'), ((23035, 23055), 'numpy.zeros', 'numpy.zeros', (['z.shape'], {}), '(z.shape)\n', (23046, 23055), False, 'import os, sys, numpy\n'), ((3668, 3725), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.tabs_basic_setting', '"""Bender"""'], {}), "(self.tabs_basic_setting, 'Bender')\n", (3690, 3725), True, 'from oasys.widgets import gui as oasysgui\n'), ((6156, 6235), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['container_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(container_box, '', addSpace=False, orientation='horizontal')\n", (6174, 6235), True, 'from oasys.widgets import gui as oasysgui\n'), ((6248, 6356), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['box', 'self', 'variable', 'label'], {'labelWidth': '(50)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(box, self, variable, label, labelWidth=50, valueType=\n float, orientation='horizontal')\n", (6265, 6356), True, 'from oasys.widgets import gui as oasysgui\n'), ((6364, 6404), 'orangewidget.gui.label', 'gui.label', (['box', 'self', '""" """'], {'labelWidth': '(58)'}), "(box, self, ' ', labelWidth=58)\n", (6373, 6404), False, 'from orangewidget import gui\n'), ((6424, 6503), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['container_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(container_box, '', addSpace=False, orientation='horizontal')\n", (6442, 6503), True, 'from oasys.widgets import gui as oasysgui\n'), ((7111, 7190), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['container_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(container_box, '', addSpace=False, orientation='horizontal')\n", (7129, 7190), True, 'from oasys.widgets import gui as oasysgui\n'), ((7209, 7328), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['box', 'self', "(variable + '_out')", '"""Fitted"""'], {'labelWidth': '(50)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(box, self, variable + '_out', 'Fitted', labelWidth=50,\n valueType=float, orientation='horizontal')\n", (7226, 7328), True, 'from oasys.widgets import gui as oasysgui\n'), ((7558, 7626), 'orangewidget.gui.button', 'gui.button', (['box', 'self', '"""<- Use"""'], {'width': '(58)', 'callback': 'set_variable_fit'}), "(box, self, '<- Use', width=58, callback=set_variable_fit)\n", (7568, 7626), False, 'from orangewidget import gui\n'), ((8844, 8904), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['bender_tabs', '"""Bender vs. Ideal (1D)"""'], {}), "(bender_tabs, 'Bender vs. Ideal (1D)')\n", (8866, 8904), True, 'from oasys.widgets import gui as oasysgui\n'), ((8922, 8980), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['bender_tabs', '"""Ideal - Bender (1D)"""'], {}), "(bender_tabs, 'Ideal - Bender (1D)')\n", (8944, 8980), True, 'from oasys.widgets import gui as oasysgui\n'), ((8998, 9056), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['bender_tabs', '"""Ideal - Bender (3D)"""'], {}), "(bender_tabs, 'Ideal - Bender (3D)')\n", (9020, 9056), True, 'from oasys.widgets import gui as oasysgui\n'), ((9074, 9130), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['bender_tabs', '"""Figure Error (3D)"""'], {}), "(bender_tabs, 'Figure Error (3D)')\n", (9096, 9130), True, 'from oasys.widgets import gui as oasysgui\n'), ((9148, 9221), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['bender_tabs', '"""Ideal - Bender + Figure Error (3D)"""'], {}), "(bender_tabs, 'Ideal - Bender + Figure Error (3D)')\n", (9170, 9221), True, 'from oasys.widgets import gui as oasysgui\n'), ((9290, 9316), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(100, 100)'}), '(figsize=(100, 100))\n', (9296, 9316), False, 'from matplotlib.figure import Figure\n'), ((9506, 9531), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvasQTAgg', (['figure'], {}), '(figure)\n', (9523, 9531), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\n'), ((10304, 10432), 'oasys.widgets.gui.selectFileFromDialog', 'oasysgui.selectFileFromDialog', (['self', 'self.output_file_name', '"""Select Output File"""'], {'file_extension_filter': '"""Data Files (*.dat)"""'}), "(self, self.output_file_name,\n 'Select Output File', file_extension_filter='Data Files (*.dat)')\n", (10333, 10432), True, 'from oasys.widgets import gui as oasysgui\n'), ((12034, 12119), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.optimized_length', '"""Optimized Length"""'], {}), "(self.optimized_length,\n 'Optimized Length')\n", (12072, 12119), False, 'from oasys.widgets import congruence\n'), ((12128, 12258), 'oasys.widgets.congruence.checkLessOrEqualThan', 'congruence.checkLessOrEqualThan', (['self.optimized_length', '(self.dim_y_plus + self.dim_y_minus)', '"""Optimized Length"""', '"""Total Length"""'], {}), "(self.optimized_length, self.dim_y_plus +\n self.dim_y_minus, 'Optimized Length', 'Total Length')\n", (12159, 12258), False, 'from oasys.widgets import congruence\n'), ((13932, 14000), 'orangecontrib.shadow.util.shadow_util.ShadowPreProcessor.read_surface_error_file', 'ShadowPreProcessor.read_surface_error_file', (['self.ms_defect_file_name'], {}), '(self.ms_defect_file_name)\n', (13974, 14000), False, 'from orangecontrib.shadow.util.shadow_util import ShadowPreProcessor\n'), ((14598, 14615), 'numpy.round', 'numpy.round', (['x', '(6)'], {}), '(x, 6)\n', (14609, 14615), False, 'import os, sys, numpy\n'), ((14617, 14634), 'numpy.round', 'numpy.round', (['y', '(6)'], {}), '(y, 6)\n', (14628, 14634), False, 'import os, sys, numpy\n'), ((14987, 15176), 'orangecontrib.shadow.util.shadow_objects.ShadowPreProcessorData', 'ShadowPreProcessorData', ([], {'error_profile_data_file': 'self.output_file_name', 'error_profile_x_dim': '(self.dim_x_plus + self.dim_x_minus)', 'error_profile_y_dim': '(self.dim_y_plus + self.dim_y_minus)'}), '(error_profile_data_file=self.output_file_name,\n error_profile_x_dim=self.dim_x_plus + self.dim_x_minus,\n error_profile_y_dim=self.dim_y_plus + self.dim_y_minus)\n', (15009, 15176), False, 'from orangecontrib.shadow.util.shadow_objects import ShadowOpticalElement, ShadowBeam, ShadowPreProcessorData\n'), ((23138, 23168), 'numpy.copy', 'numpy.copy', (['correction_profile'], {}), '(correction_profile)\n', (23148, 23168), False, 'import os, sys, numpy\n'), ((24219, 24253), 'numpy.meshgrid', 'numpy.meshgrid', (['x_coords', 'y_coords'], {}), '(x_coords, y_coords)\n', (24233, 24253), False, 'import os, sys, numpy\n'), ((1398, 1409), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (1407, 1409), False, 'from PyQt5.QtCore import QSettings\n'), ((6558, 6674), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['box', 'self', "(variable + '_min')", '"""Min"""'], {'labelWidth': '(50)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(box, self, variable + '_min', 'Min', labelWidth=50,\n valueType=float, orientation='horizontal')\n", (6575, 6674), True, 'from oasys.widgets import gui as oasysgui\n'), ((6796, 6912), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['box', 'self', "(variable + '_max')", '"""Max"""'], {'labelWidth': '(35)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(box, self, variable + '_max', 'Max', labelWidth=35,\n valueType=float, orientation='horizontal')\n", (6813, 6912), True, 'from oasys.widgets import gui as oasysgui\n'), ((17018, 17105), 'numpy.logical_and', 'numpy.logical_and', (['(y >= -self.optimized_length / 2)', '(y <= self.optimized_length / 2)'], {}), '(y >= -self.optimized_length / 2, y <= self.\n optimized_length / 2)\n', (17035, 17105), False, 'import os, sys, numpy\n'), ((22304, 22338), 'numpy.sum', 'numpy.sum', (['(correction_profile ** 2)'], {}), '(correction_profile ** 2)\n', (22313, 22338), False, 'import os, sys, numpy\n'), ((14272, 14309), 'scipy.interpolate.interp2d', 'interp2d', (['y_e', 'x_e', 'z_e'], {'kind': '"""cubic"""'}), "(y_e, x_e, z_e, kind='cubic')\n", (14280, 14309), False, 'from scipy.interpolate import RectBivariateSpline, interp2d\n'), ((16261, 16291), 'numpy.sqrt', 'numpy.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (16271, 16291), False, 'import os, sys, numpy\n'), ((22366, 22391), 'numpy.mean', 'numpy.mean', (['ideal_profile'], {}), '(ideal_profile)\n', (22376, 22391), False, 'import os, sys, numpy\n'), ((17891, 17906), 'numpy.log', 'numpy.log', (['CDLM'], {}), '(CDLM)\n', (17900, 17906), False, 'import os, sys, numpy\n'), ((17916, 17931), 'numpy.log', 'numpy.log', (['CDLP'], {}), '(CDLP)\n', (17925, 17931), False, 'import os, sys, numpy\n'), ((17977, 17992), 'numpy.log', 'numpy.log', (['CDLM'], {}), '(CDLM)\n', (17986, 17992), False, 'import os, sys, numpy\n'), ((18002, 18017), 'numpy.log', 'numpy.log', (['CDLP'], {}), '(CDLP)\n', (18011, 18017), False, 'import os, sys, numpy\n'), ((18123, 18137), 'numpy.log', 'numpy.log', (['CDY'], {}), '(CDY)\n', (18132, 18137), False, 'import os, sys, numpy\n')] |
# -*- coding: utf-8 -*-
import time,sys,os
from netCDF4 import Dataset
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def readlatlon(file_path):
arr = []
with open(file_path,'r') as f:
for Line in f:
arr.append(list(map(float, Line.split())))
return arr
def readASCIIfile(ASCIIfile):
arr = []
geoRefer = []
fh = iter(open(ASCIIfile))
skiprows = 6
for i in range(skiprows):
try:
this_line = next(fh)
geoRefer.append(float(this_line.split()[1]))
except StopIteration:break
while 1:
try:
this_line = next(fh)
if this_line:
arr.append(list(map(float, this_line.split())))
except StopIteration:break
fh.close()
return arr,geoRefer
def FYToArray(fyfile):
data = Dataset(fyfile)
namelist = ['SSI','DirSSI','DifSSI']
value = []
for j in namelist:
dataarr = data.variables[j][:1400]
dataarr[dataarr>2000]=np.nan
dataarr[dataarr==-9999]=np.nan
dataarr[dataarr==9999]=np.nan
value.append(dataarr)
return np.array(value)
def geoRefer2xy(geoRefer):
ncols,nrows,xll,yll,cellsize,NODATA_value = geoRefer
x = np.arange(xll,xll+ncols*cellsize,cellsize)
y = np.arange(yll,yll+nrows*cellsize,cellsize)
return x,y
def interpolat(points,values,x,y):
print('t01')
xv, yv = np.meshgrid(x, y)
print('t02',points.shape,values.shape,xv.shape,yv.shape)
grid_z2 = griddata(points, values, (xv, yv), method='linear') #'nearest''linear''cubic'
return grid_z2
def modiGHI(a,b,r):
c = a*(1+(r[0]*b/1000+r[1])*0.01)
return c
def lat2row(lat):
row = int(((lat - 9.995) / 0.01))
return row
def topoCorrection(radiaArray,deltHgt):
print(5)
ghi_ri=[]
rr = [[2.6036,0.0365],[2.6204,0.0365],[2.6553,0.0362],[2.6973,0.0356],[2.7459,0.0343]\
,[2.8012,0.0324],[2.8616,0.0299],[2.9236,0.0257],[2.9870,0.0204]]
if len(deltHgt) == len(radiaArray):
for i in range(len(deltHgt)):
if i>=lat2row(52.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[8]))
if i>=lat2row(47.5) and i<lat2row(52.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[7]))
if i>=lat2row(42.5) and i<lat2row(47.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[6]))
if i>=lat2row(37.5) and i<lat2row(42.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[5]))
if i>=lat2row(32.5) and i<lat2row(37.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[4]))
if i>=lat2row(27.5) and i<lat2row(32.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[3]))
if i>=lat2row(22.5) and i<lat2row(27.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[2]))
if i>=lat2row(17.5) and i<lat2row(22.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[1]))
if i<lat2row(17.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[0]))
return np.array(ghi_ri)
def array2NC(ncfile,value,x,y):
Rnum = len(y)
Cnum = len(x)
ncf = Dataset(ncfile,"w")
lat = ncf.createDimension("lat", Rnum)
lon = ncf.createDimension("lon", Cnum)
latitudes = ncf.createVariable("lat","f4",("lat",))
longitudes = ncf.createVariable("lon","f4",("lon",))
Value1 = ncf.createVariable("V1","f4",("lat","lon"))
Value2 = ncf.createVariable("V2","f4",("lat","lon"))
Value3 = ncf.createVariable("V3","f4",("lat","lon"))
#latitudes.units = "degrees north"
#longitudes.units = "degrees east"
#FristValue.units = ""
latitudes[:] = y
longitudes[:] = x
Value1[:] = value[0]
Value2[:] = value[1]
Value3[:] = value[2]
ncf.close()
def maparray(value1,value2,value3):
fig = plt.figure()
plt.subplot(221)
plt.imshow(value1, origin='lower')
plt.title('value1')
plt.subplot(222)
plt.imshow(value2, origin='lower')
plt.title('value2')
plt.subplot(223)
plt.imshow(value3, origin='lower')
plt.title('value3')
plt.show()
def selectpoint(lon,lat):
ijList = []
for i in range(0, 1400):
for j in range(0, len(lat[0])):
if float(lon[i][j])>70 and float(lon[i][j])<140 and float(lat[i][j])<55 and float(lat[i][j])>0:
ijList.append([i,j,lat[i][j],lon[i][j]])
return ijList
def getvalue(ijList,radiValue):
lines = []
for k in ijList:
row,col,lat,lon = k
i,j = int(row),int(col)
line = [lon,lat,radiValue[0,i,j],radiValue[1,i,j],radiValue[2,i,j]]
lines.append(line)
return lines
def main():
#lat_path = r'I:\do\FY4\lat-4000-2WEI.txt'
#lon_path = r'I:\do\FY4\lon-4000-2JING.txt'
FY_path = r'I:\do\FY4\FY4A-_AGRI--_N_DISK_1047E_L2-_SSI-_MULT_NOM_20170801040000_20170801041459_4000M_V0001.NC'
ddem_path = r'I:\do\FY4\D_DEM.txt'
ncfile = r'I:\do\FY4\FY4SSI.nc'
file3 =r'I:\do\FY4\latlonfile.txt'
#prepare scat
print('a')
radiValue = FYToArray(FY_path) #SSI_data,DirSSI_data,DifSSI_data=FYToArray(FY_path)
#lat = np.loadtxt(lat_path) #np.array(readlatlon(lat_path))
#lon = np.loadtxt(lon_path) #np.array(readlatlon(lon_path))
print('b')
#ijList = np.array(selectpoint(lon,lat))
#np.savetxt(file3,ijList,fmt='%.6f',comments=' ')
ijList = np.loadtxt(file3)
lines = getvalue(ijList,radiValue)
'''
ipath = r'I:\do\FY4\201708010400_cssi.txt'
with open(ipath,'r') as f:
for Line in f:
#print len(latLine.split())
lines.append(list(map(float, Line.split())))
'''
print('c',lines[1000])
#points = np.dstack((lon.ravel(),lat.ravel()))[0]
#points = np.dstack((lon.ravel(),lat.ravel(),SSI_data.ravel(),DirSSI_data.ravel(),DifSSI_data.ravel()).squeeze()
points = np.array(lines).squeeze()
print('d',points[0])
#prepare grid
ddem,geoRefer = readASCIIfile(ddem_path)
nx,ny = geoRefer2xy(geoRefer)
print('f',nx,ny,geoRefer)
#ddemArr = np.flipud(np.array(ddem))
ddemArr = np.array(ddem)[::-1]
ddemArr[ddemArr==-9999]=np.nan
print('g',points[:,0:2].shape,points[:,2].shape)
interArray = interpolat(points[:,0:2],points[:,2],nx,ny)
#terrain Correction
print('h',interArray.shape,ddemArr.shape)
topocorrArray = topoCorrection(interArray,ddemArr)
#save array
print('i',topocorrArray.shape)
value = [interArray,ddemArr,topocorrArray]
array2NC(ncfile,value,nx,ny)
print('j',topocorrArray.shape,len(nx),len(ny))
maparray(interArray,topocorrArray,ddemArr)
def test():
ddem_path = r'I:\do\FY4\D_DEM.txt'
'''
x,y = np.meshgrid(np.arange(70, 140, 0.05), np.arange(10, 50, 0.05))
value = np.sqrt(x ** 2 + y ** 2)
points = np.dstack((x.ravel(),y.ravel()))[0]
va = value.ravel()
'''
ddemArr = np.loadtxt(ddem_path,skiprows=6)
#ddem,geoRefer = readASCIIfile(ddem_path)
#ddemArr = np.array(ddem)
#ddemArr[ddemArr==-9999.0]=np.nan
print(ddemArr.shape,ddemArr.max(),ddemArr.min(),ddemArr[2000:2010,2000:2010])
#print(array.type)
#print(ddemArr,array)
fig = plt.figure()
y = np.arange(ddemArr.shape[0])
x = np.arange(ddemArr.shape[1])
xv,yv = np.meshgrid(x,y)
print(x,y)
plt.contourf(xv,yv,ddemArr)
#plt.imshow(ddemArr, vmax=abs(ddemArr).max(), vmin=-abs(ddemArr).max(),cmap=cm.RdYlGn,origin='lower')
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.title",
"scipy.interpolate.griddata",
"netCDF4.Dataset",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.loadtxt",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((923, 938), 'netCDF4.Dataset', 'Dataset', (['fyfile'], {}), '(fyfile)\n', (930, 938), False, 'from netCDF4 import Dataset\n'), ((1216, 1231), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1224, 1231), True, 'import numpy as np\n'), ((1329, 1377), 'numpy.arange', 'np.arange', (['xll', '(xll + ncols * cellsize)', 'cellsize'], {}), '(xll, xll + ncols * cellsize, cellsize)\n', (1338, 1377), True, 'import numpy as np\n'), ((1380, 1428), 'numpy.arange', 'np.arange', (['yll', '(yll + nrows * cellsize)', 'cellsize'], {}), '(yll, yll + nrows * cellsize, cellsize)\n', (1389, 1428), True, 'import numpy as np\n'), ((1509, 1526), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1520, 1526), True, 'import numpy as np\n'), ((1602, 1653), 'scipy.interpolate.griddata', 'griddata', (['points', 'values', '(xv, yv)'], {'method': '"""linear"""'}), "(points, values, (xv, yv), method='linear')\n", (1610, 1653), False, 'from scipy.interpolate import griddata\n'), ((3422, 3438), 'numpy.array', 'np.array', (['ghi_ri'], {}), '(ghi_ri)\n', (3430, 3438), True, 'import numpy as np\n'), ((3531, 3551), 'netCDF4.Dataset', 'Dataset', (['ncfile', '"""w"""'], {}), "(ncfile, 'w')\n", (3538, 3551), False, 'from netCDF4 import Dataset\n'), ((4222, 4234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4232, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4239, 4255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (4250, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4294), 'matplotlib.pyplot.imshow', 'plt.imshow', (['value1'], {'origin': '"""lower"""'}), "(value1, origin='lower')\n", (4270, 4294), True, 'import matplotlib.pyplot as plt\n'), ((4299, 4318), 'matplotlib.pyplot.title', 'plt.title', (['"""value1"""'], {}), "('value1')\n", (4308, 4318), True, 'import matplotlib.pyplot as plt\n'), ((4323, 4339), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (4334, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4344, 4378), 'matplotlib.pyplot.imshow', 'plt.imshow', (['value2'], {'origin': '"""lower"""'}), "(value2, origin='lower')\n", (4354, 4378), True, 'import matplotlib.pyplot as plt\n'), ((4383, 4402), 'matplotlib.pyplot.title', 'plt.title', (['"""value2"""'], {}), "('value2')\n", (4392, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4407, 4423), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (4418, 4423), True, 'import matplotlib.pyplot as plt\n'), ((4428, 4462), 'matplotlib.pyplot.imshow', 'plt.imshow', (['value3'], {'origin': '"""lower"""'}), "(value3, origin='lower')\n", (4438, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4486), 'matplotlib.pyplot.title', 'plt.title', (['"""value3"""'], {}), "('value3')\n", (4476, 4486), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4503, 4505), True, 'import matplotlib.pyplot as plt\n'), ((5786, 5803), 'numpy.loadtxt', 'np.loadtxt', (['file3'], {}), '(file3)\n', (5796, 5803), True, 'import numpy as np\n'), ((7328, 7361), 'numpy.loadtxt', 'np.loadtxt', (['ddem_path'], {'skiprows': '(6)'}), '(ddem_path, skiprows=6)\n', (7338, 7361), True, 'import numpy as np\n'), ((7626, 7638), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7636, 7638), True, 'import matplotlib.pyplot as plt\n'), ((7647, 7674), 'numpy.arange', 'np.arange', (['ddemArr.shape[0]'], {}), '(ddemArr.shape[0])\n', (7656, 7674), True, 'import numpy as np\n'), ((7683, 7710), 'numpy.arange', 'np.arange', (['ddemArr.shape[1]'], {}), '(ddemArr.shape[1])\n', (7692, 7710), True, 'import numpy as np\n'), ((7723, 7740), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7734, 7740), True, 'import numpy as np\n'), ((7759, 7788), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xv', 'yv', 'ddemArr'], {}), '(xv, yv, ddemArr)\n', (7771, 7788), True, 'import matplotlib.pyplot as plt\n'), ((7897, 7907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7905, 7907), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6524), 'numpy.array', 'np.array', (['ddem'], {}), '(ddem)\n', (6518, 6524), True, 'import numpy as np\n'), ((6272, 6287), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (6280, 6287), True, 'import numpy as np\n'), ((2227, 2250), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2235, 2250), True, 'import numpy as np\n'), ((2251, 2271), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2259, 2271), True, 'import numpy as np\n'), ((2371, 2394), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2379, 2394), True, 'import numpy as np\n'), ((2395, 2415), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2403, 2415), True, 'import numpy as np\n'), ((2515, 2538), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2523, 2538), True, 'import numpy as np\n'), ((2539, 2559), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2547, 2559), True, 'import numpy as np\n'), ((2659, 2682), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2667, 2682), True, 'import numpy as np\n'), ((2683, 2703), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2691, 2703), True, 'import numpy as np\n'), ((2803, 2826), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2811, 2826), True, 'import numpy as np\n'), ((2827, 2847), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2835, 2847), True, 'import numpy as np\n'), ((2947, 2970), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (2955, 2970), True, 'import numpy as np\n'), ((2971, 2991), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (2979, 2991), True, 'import numpy as np\n'), ((3091, 3114), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (3099, 3114), True, 'import numpy as np\n'), ((3115, 3135), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (3123, 3135), True, 'import numpy as np\n'), ((3235, 3258), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (3243, 3258), True, 'import numpy as np\n'), ((3259, 3279), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (3267, 3279), True, 'import numpy as np\n'), ((3358, 3381), 'numpy.array', 'np.array', (['radiaArray[i]'], {}), '(radiaArray[i])\n', (3366, 3381), True, 'import numpy as np\n'), ((3382, 3402), 'numpy.array', 'np.array', (['deltHgt[i]'], {}), '(deltHgt[i])\n', (3390, 3402), True, 'import numpy as np\n')] |
from __future__ import print_function
import collections
import math
import os
import pickle
import sys
import time
import numpy
import torch
from sklearn.utils import compute_class_weight
from torch.nn.utils import clip_grad_norm
from torch.utils.data import DataLoader
from nldrp.dnn.config import DNN_BASE_PATH
from nldrp.dnn.logger.experiment import Metric, Experiment
from nldrp.dnn.logger.inspection import Inspector
from nldrp.dnn.util.multi_gpu import get_gpu_id
def sort_batch(lengths):
"""
Sort batch data and labels by length.
Useful for variable length inputs, for utilizing PackedSequences
Args:
lengths (nn.Tensor): tensor containing the lengths for the data
Returns:
- sorted lengths Tensor
- sort (callable) which will sort a given iterable according to lengths
- unsort (callable) which will revert a given iterable to its
original order
"""
batch_size = lengths.size(0)
sorted_lengths, sorted_idx = lengths.sort()
_, original_idx = sorted_idx.sort(0, descending=True)
reverse_idx = torch.linspace(batch_size - 1, 0, batch_size).long()
sorted_lengths = sorted_lengths[reverse_idx]
def sort(iterable):
if iterable.is_cuda:
return iterable[sorted_idx.cuda(get_gpu_id())][
reverse_idx.cuda(get_gpu_id())]
else:
return iterable[sorted_idx][reverse_idx]
def unsort(iterable):
if iterable.is_cuda:
return iterable[reverse_idx.cuda(get_gpu_id())][
original_idx.cuda(get_gpu_id())][
reverse_idx.cuda(get_gpu_id())]
else:
return iterable[reverse_idx][original_idx][reverse_idx]
return sorted_lengths, sort, unsort
def epoch_progress(loss, epoch, batch, batch_size, dataset_size):
batches = math.ceil(float(dataset_size) / batch_size)
count = batch * batch_size
bar_len = 40
filled_len = int(round(bar_len * count / float(dataset_size)))
bar = '=' * filled_len + '-' * (bar_len - filled_len)
status = 'Epoch {}, Batch Loss ({}): {:.4f}'.format(epoch, batch, loss)
_progress_str = "\r \r [{}] ...{}".format(bar, status)
sys.stdout.write(_progress_str)
sys.stdout.flush()
if batch == batches:
print()
def get_class_labels(y):
"""
Get the class labels
:param y: list of labels, ex. ['positive', 'negative', 'positive',
'neutral', 'positive', ...]
:return: sorted unique class labels
"""
return numpy.unique(y)
def get_class_weights(y):
"""
Returns the normalized weights for each class
based on the frequencies of the samples
:param y: list of true labels (the labels must be hashable)
:return: dictionary with the weight for each class
"""
weights = compute_class_weight('balanced', numpy.unique(y), y)
d = {c: w for c, w in zip(numpy.unique(y), weights)}
return d
def class_weigths(targets, to_pytorch=False):
w = get_class_weights(targets)
labels = get_class_labels(targets)
if to_pytorch:
return torch.FloatTensor([w[l] for l in sorted(labels)])
return labels
def _get_predictions(posteriors, task):
"""
Args:
posteriors (numpy.array):
Returns:
"""
if task == "clf":
if posteriors.shape[1] > 1:
predicted = numpy.argmax(posteriors, 1)
else:
predicted = numpy.clip(numpy.sign(posteriors), a_min=0,
a_max=None)
elif task == "multi-clf":
predicted = numpy.clip(numpy.sign(posteriors), a_min=0,
a_max=None)
elif task == "reg":
predicted = posteriors
else:
raise ValueError
return predicted
def predict(model, pipeline, dataloader, task,
mode="eval",
label_transformer=None):
"""
Pass a dataset(dataloader) to the model and get the predictions
Args:
dataloader (DataLoader): a torch DataLoader which will be used for
evaluating the performance of the model
mode (): set the operation mode of the model.
- "eval" : disable regularization layers
- "train" : enable regularization layers (MC eval)
model ():
pipeline ():
task ():
label_transformer ():
Returns:
"""
if mode == "eval":
model.eval()
elif mode == "train":
model.train()
else:
raise ValueError
posteriors = []
y_pred = []
y = []
attentions = []
total_loss = 0
for i_batch, sample_batched in enumerate(dataloader, 1):
outputs, labels, atts, loss = pipeline(model, sample_batched)
if loss is not None:
total_loss += loss.data[0]
# get the model posteriors
posts = outputs.data.cpu().numpy()
# get the actual predictions (classes and so on...)
if len(posts.shape) == 1:
predicted = _get_predictions(numpy.expand_dims(posts, axis=0), task)
else:
predicted = _get_predictions(posts, task)
# to numpy
labels = labels.data.cpu().numpy().squeeze().tolist()
predicted = predicted.squeeze().tolist()
posts = posts.squeeze().tolist()
if atts is not None:
atts = atts.data.cpu().numpy().squeeze().tolist()
if not isinstance(labels, collections.Iterable):
labels = [labels]
predicted = [predicted]
posts = [posts]
if atts is not None:
atts = [atts]
# make transformations to the predictions
if label_transformer is not None:
labels = [label_transformer.inverse(x) for x in labels]
labels = numpy.array(labels)
predicted = [label_transformer.inverse(x) for x in predicted]
predicted = numpy.array(predicted)
y.extend(labels)
y_pred.extend(predicted)
posteriors.extend(posts)
if atts is not None:
attentions.extend(atts)
avg_loss = total_loss / i_batch
return avg_loss, (y, y_pred), posteriors, attentions
def mc_predict(model, pipeline, dataloader, task, label_transformer=None,
runs=100):
"""
Monte Carlo predict
Args:
model ():
pipeline ():
dataloader ():
task ():
label_transformer ():
runs ():
Returns:
"""
y = None
posteriors = []
avg_losses = []
for i in range(runs):
avg_loss, (y, _), _posteriors, attentions = predict(model, pipeline,
dataloader,
task, "train",
label_transformer)
posteriors.append(_posteriors)
avg_losses.append(avg_loss)
# convert to numpy.ndarray in order to utilize scipy's methods
posteriors = numpy.array(posteriors)
means = numpy.mean(posteriors, axis=0)
# stds = numpy.std(posteriors, axis=0)
predictions = _get_predictions(means, task)
return numpy.mean(avg_losses), (y, predictions)
class LabelTransformer:
def __init__(self, map, inv_map=None):
"""
Class for creating a custom mapping of the labels to ids and back
Args:
map (dict):
inv_map (dict):
"""
self.map = map
self.inv_map = inv_map
if self.inv_map is None:
self.inv_map = {v: k for k, v in self.map.items()}
def transform(self, label):
return self.map[label]
def inverse(self, label):
return self.inv_map[label]
class MetricWatcher:
"""
Base class which monitors a given metric on a Trainer object
and check whether the model has been improved according to this metric
"""
def __init__(self, metric, mode="min", base=None):
self.best = base
self.metric = metric
self.mode = mode
self.scores = None # will be filled by the Trainer instance
def has_improved(self):
# get the latest value for the desired metric
value = self.scores[self.metric][-1]
# init best value
if self.best is None or math.isnan(self.best):
self.best = value
return True
if (
self.mode == "min" and value < self.best
or
self.mode == "max" and value > self.best
): # the performance of the model has been improved :)
self.best = value
return True
else:
# no improvement :(
return False
class EarlyStop(MetricWatcher):
def __init__(self, metric, mode="min", patience=0):
"""
Args:
patience (int): for how many epochs to wait, for the performance
to improve.
mode (str, optional): Possible values {"min","max"}.
- "min": save the model if the monitored metric is decreased.
- "max": save the model if the monitored metric is increased.
"""
MetricWatcher.__init__(self, metric, mode)
self.patience = patience
self.patience_left = patience
self.best = None
def stop(self):
"""
Check whether we should stop the training
"""
if self.has_improved():
self.patience_left = self.patience # reset patience
else:
self.patience_left -= 1 # decrease patience
print(
"patience left:{}, best({})".format(self.patience_left, self.best))
# if no more patience left, then stop training
return self.patience_left < 0
class Checkpoint(MetricWatcher):
def __init__(self, name, model, metric, model_conf, mode="min",
dir=None,
base=None,
timestamp=False,
scorestamp=False,
keep_best=False):
"""
Args:
model (nn.Module):
name (str): the name of the model
mode (str, optional): Possible values {"min","max"}.
- "min": save the model if the monitored metric is decreased.
- "max": save the model if the monitored metric is increased.
keep_best (bool): if True then keep only the best checkpoint
timestamp (bool): if True add a timestamp to the checkpoint files
scorestamp (bool): if True add the score to the checkpoint files
dir (str): the directory in which the checkpoint files will be saved
"""
MetricWatcher.__init__(self, metric, mode, base)
self.name = name
self.dir = dir
self.model = model
self.model_conf = model_conf
self.timestamp = timestamp
self.scorestamp = scorestamp
self.keep_best = keep_best
self.last_saved = None
if self.dir is None:
self.dir = os.path.join(DNN_BASE_PATH, "trained")
def _define_cp_name(self):
"""
Define the checkpoint name
Returns:
"""
fname = [self.name]
if self.scorestamp:
score_str = "{:.4f}".format(self.best)
fname.append(score_str)
if self.timestamp:
date_str = time.strftime("%Y-%m-%d_%H:%M")
fname.append(date_str)
return "_".join(fname)
def _save_checkpoint(self):
"""
A checkpoint saves:
- the model itself
- the model's config, which is required for loading related data,
such the word embeddings, on which it was trained
Returns:
"""
if not os.path.exists(self.dir):
os.makedirs(self.dir)
name = self._define_cp_name()
file_cp = os.path.join(self.dir, name + ".model")
file_conf = os.path.join(self.dir, name + ".conf")
# remove previous checkpoint files, if keep_best is True
if self.keep_best and self.last_saved is not None:
os.remove(self.last_saved["model"])
os.remove(self.last_saved["config"])
# update last saved checkpoint files
self.last_saved = {
"model": file_cp,
"config": file_conf
}
# save the checkpoint files (model, model config)
torch.save(self.model, file_cp)
with open(file_conf, 'wb') as f:
pickle.dump(self.model_conf, f)
def check(self):
"""
Check whether the model has improved and if so, then save a checkpoint
Returns:
"""
if self.has_improved():
print("Improved model ({}:{:.4f})! "
"Saving checkpoint...".format(self.metric, self.best))
self._save_checkpoint()
class Trainer:
def __init__(self, model,
train_set,
optimizer,
pipeline,
config,
train_batch_size=128,
eval_batch_size=512,
task="clf",
use_exp=False,
inspect_weights=False,
metrics=None,
val_set=None,
eval_train=True,
checkpoint=None,
early_stopping=None):
"""
The Trainer is responsible for training a model.
It is a stateful object.
It holds a set of variables that helps us to abstract
the training process.
Args:
use_exp (bool): if True, use the integrated experiment
manager. In order to utilize the visualizations provided
by the experiment manager you should:
- run `python -m visdom.server` in a terminal.
- access visdom by going to http://localhost:8097
https://github.com/facebookresearch/visdom#usage
model (nn.Module): the pytorch model
train_set (BaseDataset, dict): a
optimizer ():
pipeline (callable): a callback function, which defines the training
pipeline. it must return 3 things (outputs, labels, loss):
- outputs: the outputs (predictions) of the model
- labels: the gold labels
- loss: the loss
config (): the config instance with the hyperparams of the model
train_batch_size (int): the batch size that will be used when
training a model
eval_batch_size (int): the batch size that will be used when
evaluating a model
task (string): you can choose between {"clf", "reg"},
for classification and regression respectively.
metrics (dict): a dictionary with the metrics that will be used
for evaluating the performance of the model.
- key: string with the name of the metric.
- value: a callable, with arguments (y, y_hat) tha returns a
score.
val_set (BaseDataset, dict): optional validation dataset
eval_train (bool): if True, the at the end of each epoch evaluate
the performance of the model on the training dataset.
early_stopping (EarlyStop):
checkpoint (Checkpoint):
"""
self.use_exp = use_exp
self.inspect_weights = inspect_weights
self.model = model
self.task = task
self.train_set = train_set
self.val_set = val_set
self.config = config
self.eval_train = eval_train
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.optimizer = optimizer
self.pipeline = pipeline
self.checkpoint = checkpoint
self.early_stopping = early_stopping
self.metrics = {} if metrics is None else metrics
self.running_loss = 0.0
self.epoch = 0
self._init_watched_metrics()
self.train_loaders, self.val_loader = self._init_dataloaders()
if use_exp:
self.experiment = self._init_experiment()
if self.inspect_weights:
self.inspector = Inspector(model, ["std", "mean"])
def _validate_config(self):
pass
def _init_watched_metrics(self):
self.scores = {k: [] for k, v in self.metrics.items()}
# we need to attach the metrics dictionary
# on checkpoint and early_stopping objects
if self.checkpoint is not None:
self.checkpoint.scores = self.scores
if self.early_stopping is not None:
self.early_stopping.scores = self.scores
def _update_watched_metrics(self):
pass
def _init_experiment_tag(self, dataset, tags, tag):
if isinstance(dataset, dict):
for _name, _dataset in dataset.items():
tags.append("{}_{}".format(tag, _name))
else:
tags.append(tag)
def _init_experiment(self):
"""
init the experiment,
which will visualize and log the performance of the model
Returns:
"""
# 1 - define tags
tags = []
if self.eval_train:
self._init_experiment_tag(self.train_set, tags, "train")
if self.val_set is not None:
self._init_experiment_tag(self.val_set, tags, "val")
# 2 - define experiment
experiment = Experiment(name=self.config["name"],
desc=str(self.model),
hparams=self.config)
# 3 - define metrics
for name, metric in self.metrics.items():
experiment.add_metric(Metric(name=name, tags=tags,
vis_type="line"))
experiment.add_metric(Metric(name="loss", tags=tags,
vis_type="line"))
return experiment
def _update_experiment(self, scores, tag):
pass
def _init_dataloaders_train(self, dataset, num_workers=4):
loader = {
"train": DataLoader(dataset,
batch_size=self.train_batch_size,
shuffle=True,
num_workers=num_workers),
"val": DataLoader(dataset,
batch_size=self.eval_batch_size,
num_workers=num_workers),
}
return loader
def _init_dataloaders(self, num_workers=4):
"""
define different dataloaders, for each dataset and mode
we use a different dataloader for training and evaluation, in order
to be able to use different batch sizes, due to a limitation of
the pytorch's DataLoader class.
Returns:
"""
train_loader = None
val_loader = None
# train_loader can be:
# - a dict of (train, val) dataloaders for a single dataset
# - a dict of (dataset, (train, val)) dataloaders
# for a collection of datasets
if isinstance(self.train_set, dict):
train_loader = {}
for name, dataset in self.train_set.items():
train_loader[name] = self._init_dataloaders_train(dataset,
num_workers)
else:
train_loader = self._init_dataloaders_train(self.train_set,
num_workers)
if self.val_set is not None:
if isinstance(self.val_set, dict):
val_loader = {}
# loop over all validation datasets
for name, dataset in self.val_set.items():
val_loader[name] = DataLoader(
dataset,
batch_size=self.eval_batch_size,
num_workers=num_workers)
else:
val_loader = DataLoader(self.val_set,
batch_size=self.eval_batch_size,
num_workers=num_workers)
return train_loader, val_loader
def _model_train_loader(self, loader):
"""
Run a pass of the model on a given dataloader
Args:
loader ():
Returns:
"""
running_loss = 0.0
for i_batch, sample_batched in enumerate(loader, 1):
# 1 - zero the gradients
self.optimizer.zero_grad()
# 2 - compute loss using the provided pipeline
outputs, labels, attentions, loss = self.pipeline(self.model,
sample_batched)
# 3 - backward pass: compute gradient wrt model parameters
loss.backward()
# just to be sure... clip gradients with norm > N.
# apply it only if the model has an RNN in it.
if len([m for m in self.model.modules()
if hasattr(m, 'bidirectional')]) > 0:
clip_grad_norm(self.model.parameters(),
self.config["clip_norm"])
# 4 - update weights
self.optimizer.step()
running_loss += loss.data[0]
# print statistics
epoch_progress(loss=loss.data[0],
epoch=self.epoch,
batch=i_batch,
batch_size=loader.batch_size,
dataset_size=len(self.train_set))
return running_loss
def model_train(self):
"""
Train the model for one epoch (on one or more dataloaders)
Returns:
"""
# switch to train mode -> enable regularization layers, such as Dropout
self.model.train()
self.epoch += 1
running_loss = 0.0
if isinstance(self.train_set, dict):
for name, loader in self.train_loaders.items():
running_loss += self._model_train_loader(loader["train"])
else:
running_loss += self._model_train_loader(
self.train_loaders["train"])
return running_loss
def _calc_scores(self, y, y_pred):
return {name: metric(y, y_pred)
for name, metric in self.metrics.items()}
def _model_eval_loader(self, loader, tag):
"""
Evaluate a dataloader
and update the corresponding scores and metrics
Args:
loader ():
tag ():
Returns:
"""
# 1 - evaluate the dataloader
label_transformer = self._infer_label_transformer()
avg_loss, (y, y_pred), posteriors, attentions = predict(
self.model,
self.pipeline,
loader,
self.task,
"eval",
label_transformer)
# 2 - calculate its performance according to each metric
scores = self._calc_scores(y, y_pred)
# 3 - print the scores
self._print_scores(scores, avg_loss, tag.upper())
# TEST Monte Carlo Evaluation
# if tag == "val":
# mc_avg_loss, (mc_y, mc_y_pred) = self.mc_predict(loader)
# mc_scores = self._calc_scores(mc_y, mc_y_pred)
# self.print_scores(mc_scores, mc_avg_loss, "MC_" + tag.upper())
# 4 - update the corresponding values in the experiment
if self.use_exp:
for score, value in scores.items():
self.experiment.metrics[score].append(tag, value)
self.experiment.metrics["loss"].append(tag, avg_loss)
return avg_loss, scores
def _aggregate_scores(self, scores, aggregate):
aggs = {k: [score[k] for score in scores]
for k in scores[0].keys()}
aggs = {k: aggregate(v) for k, v in aggs.items()}
return aggs
def model_eval(self):
"""
Evaluate the model on each dataset and update the corresponding metrics.
The function is normally called at the end of each epoch.
Returns:
"""
# 1 - evaluate on train datasets
if self.eval_train:
if isinstance(self.train_set, dict):
for name, loader in self.train_loaders.items():
tag = "train_{}".format(name)
self._model_eval_loader(loader["val"], tag)
else:
self._model_eval_loader(self.train_loaders["val"], "train")
# 2 - evaluate on validation datasets
if self.val_loader is not None:
if isinstance(self.val_set, dict):
loss = []
scores = []
for name, loader in self.val_loader.items():
tag = "val_{}".format(name)
_loss, _scores = self._model_eval_loader(loader, tag)
loss.append(_loss)
scores.append(_scores)
agg_scores = self._aggregate_scores(scores, numpy.mean)
for name, value in agg_scores.items():
self.scores[name].append(value)
else:
loss, scores = self._model_eval_loader(self.val_loader, "val")
for name, value in scores.items():
self.scores[name].append(value)
if self.use_exp:
self.experiment.update_plots()
if self.inspect_weights:
self.inspector.update_state(self.model)
def _print_scores(self, scores, loss, tag):
"""
Log the scores of a dataset (tag) on the console
Args:
scores (): a dictionary of (metric_name, value)
loss (): the loss of the model on an epoch
tag (): the dataset (name)
Returns:
"""
print("\t{:6s} - ".format(tag), end=" ")
for name, value in scores.items():
print(name, '{:.4f}'.format(value), end=", ")
print(" Loss:{:.4f}".format(loss))
def _infer_label_transformer(self):
# both datasets (train,val) should have the same transformer
if isinstance(self.train_set, dict):
# pick any dataset from the dict.
# All should have the same transformer
dataset = self.train_set[list(self.train_set.keys())[0]]
return dataset.label_transformer
else:
return self.train_set.label_transformer
| [
"sys.stdout.write",
"numpy.array",
"os.remove",
"numpy.mean",
"os.path.exists",
"nldrp.dnn.logger.experiment.Metric",
"nldrp.dnn.util.multi_gpu.get_gpu_id",
"sys.stdout.flush",
"numpy.argmax",
"numpy.sign",
"torch.save",
"pickle.dump",
"numpy.unique",
"os.makedirs",
"time.strftime",
"o... | [((2200, 2231), 'sys.stdout.write', 'sys.stdout.write', (['_progress_str'], {}), '(_progress_str)\n', (2216, 2231), False, 'import sys\n'), ((2236, 2254), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2252, 2254), False, 'import sys\n'), ((2551, 2566), 'numpy.unique', 'numpy.unique', (['y'], {}), '(y)\n', (2563, 2566), False, 'import numpy\n'), ((7031, 7054), 'numpy.array', 'numpy.array', (['posteriors'], {}), '(posteriors)\n', (7042, 7054), False, 'import numpy\n'), ((7068, 7098), 'numpy.mean', 'numpy.mean', (['posteriors'], {'axis': '(0)'}), '(posteriors, axis=0)\n', (7078, 7098), False, 'import numpy\n'), ((2872, 2887), 'numpy.unique', 'numpy.unique', (['y'], {}), '(y)\n', (2884, 2887), False, 'import numpy\n'), ((7203, 7225), 'numpy.mean', 'numpy.mean', (['avg_losses'], {}), '(avg_losses)\n', (7213, 7225), False, 'import numpy\n'), ((11915, 11954), 'os.path.join', 'os.path.join', (['self.dir', "(name + '.model')"], {}), "(self.dir, name + '.model')\n", (11927, 11954), False, 'import os\n'), ((11975, 12013), 'os.path.join', 'os.path.join', (['self.dir', "(name + '.conf')"], {}), "(self.dir, name + '.conf')\n", (11987, 12013), False, 'import os\n'), ((12449, 12480), 'torch.save', 'torch.save', (['self.model', 'file_cp'], {}), '(self.model, file_cp)\n', (12459, 12480), False, 'import torch\n'), ((1090, 1135), 'torch.linspace', 'torch.linspace', (['(batch_size - 1)', '(0)', 'batch_size'], {}), '(batch_size - 1, 0, batch_size)\n', (1104, 1135), False, 'import torch\n'), ((3389, 3416), 'numpy.argmax', 'numpy.argmax', (['posteriors', '(1)'], {}), '(posteriors, 1)\n', (3401, 3416), False, 'import numpy\n'), ((5799, 5818), 'numpy.array', 'numpy.array', (['labels'], {}), '(labels)\n', (5810, 5818), False, 'import numpy\n'), ((5917, 5939), 'numpy.array', 'numpy.array', (['predicted'], {}), '(predicted)\n', (5928, 5939), False, 'import numpy\n'), ((8329, 8350), 'math.isnan', 'math.isnan', (['self.best'], {}), '(self.best)\n', (8339, 8350), False, 'import math\n'), ((11067, 11105), 'os.path.join', 'os.path.join', (['DNN_BASE_PATH', '"""trained"""'], {}), "(DNN_BASE_PATH, 'trained')\n", (11079, 11105), False, 'import os\n'), ((11410, 11441), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H:%M"""'], {}), "('%Y-%m-%d_%H:%M')\n", (11423, 11441), False, 'import time\n'), ((11798, 11822), 'os.path.exists', 'os.path.exists', (['self.dir'], {}), '(self.dir)\n', (11812, 11822), False, 'import os\n'), ((11836, 11857), 'os.makedirs', 'os.makedirs', (['self.dir'], {}), '(self.dir)\n', (11847, 11857), False, 'import os\n'), ((12151, 12186), 'os.remove', 'os.remove', (["self.last_saved['model']"], {}), "(self.last_saved['model'])\n", (12160, 12186), False, 'import os\n'), ((12199, 12235), 'os.remove', 'os.remove', (["self.last_saved['config']"], {}), "(self.last_saved['config'])\n", (12208, 12235), False, 'import os\n'), ((12534, 12565), 'pickle.dump', 'pickle.dump', (['self.model_conf', 'f'], {}), '(self.model_conf, f)\n', (12545, 12565), False, 'import pickle\n'), ((18250, 18346), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.train_batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers'}), '(dataset, batch_size=self.train_batch_size, shuffle=True,\n num_workers=num_workers)\n', (18260, 18346), False, 'from torch.utils.data import DataLoader\n'), ((18459, 18536), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.eval_batch_size', 'num_workers': 'num_workers'}), '(dataset, batch_size=self.eval_batch_size, num_workers=num_workers)\n', (18469, 18536), False, 'from torch.utils.data import DataLoader\n'), ((2923, 2938), 'numpy.unique', 'numpy.unique', (['y'], {}), '(y)\n', (2935, 2938), False, 'import numpy\n'), ((3466, 3488), 'numpy.sign', 'numpy.sign', (['posteriors'], {}), '(posteriors)\n', (3476, 3488), False, 'import numpy\n'), ((3608, 3630), 'numpy.sign', 'numpy.sign', (['posteriors'], {}), '(posteriors)\n', (3618, 3630), False, 'import numpy\n'), ((5031, 5063), 'numpy.expand_dims', 'numpy.expand_dims', (['posts'], {'axis': '(0)'}), '(posts, axis=0)\n', (5048, 5063), False, 'import numpy\n'), ((16351, 16384), 'nldrp.dnn.logger.inspection.Inspector', 'Inspector', (['model', "['std', 'mean']"], {}), "(model, ['std', 'mean'])\n", (16360, 16384), False, 'from nldrp.dnn.logger.inspection import Inspector\n'), ((17846, 17891), 'nldrp.dnn.logger.experiment.Metric', 'Metric', ([], {'name': 'name', 'tags': 'tags', 'vis_type': '"""line"""'}), "(name=name, tags=tags, vis_type='line')\n", (17852, 17891), False, 'from nldrp.dnn.logger.experiment import Metric, Experiment\n'), ((17968, 18015), 'nldrp.dnn.logger.experiment.Metric', 'Metric', ([], {'name': '"""loss"""', 'tags': 'tags', 'vis_type': '"""line"""'}), "(name='loss', tags=tags, vis_type='line')\n", (17974, 18015), False, 'from nldrp.dnn.logger.experiment import Metric, Experiment\n'), ((20134, 20221), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_set'], {'batch_size': 'self.eval_batch_size', 'num_workers': 'num_workers'}), '(self.val_set, batch_size=self.eval_batch_size, num_workers=\n num_workers)\n', (20144, 20221), False, 'from torch.utils.data import DataLoader\n'), ((1340, 1352), 'nldrp.dnn.util.multi_gpu.get_gpu_id', 'get_gpu_id', ([], {}), '()\n', (1350, 1352), False, 'from nldrp.dnn.util.multi_gpu import get_gpu_id\n'), ((1622, 1634), 'nldrp.dnn.util.multi_gpu.get_gpu_id', 'get_gpu_id', ([], {}), '()\n', (1632, 1634), False, 'from nldrp.dnn.util.multi_gpu import get_gpu_id\n'), ((19936, 20013), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.eval_batch_size', 'num_workers': 'num_workers'}), '(dataset, batch_size=self.eval_batch_size, num_workers=num_workers)\n', (19946, 20013), False, 'from torch.utils.data import DataLoader\n'), ((1291, 1303), 'nldrp.dnn.util.multi_gpu.get_gpu_id', 'get_gpu_id', ([], {}), '()\n', (1301, 1303), False, 'from nldrp.dnn.util.multi_gpu import get_gpu_id\n'), ((1573, 1585), 'nldrp.dnn.util.multi_gpu.get_gpu_id', 'get_gpu_id', ([], {}), '()\n', (1583, 1585), False, 'from nldrp.dnn.util.multi_gpu import get_gpu_id\n'), ((1523, 1535), 'nldrp.dnn.util.multi_gpu.get_gpu_id', 'get_gpu_id', ([], {}), '()\n', (1533, 1535), False, 'from nldrp.dnn.util.multi_gpu import get_gpu_id\n')] |
"""Visualize the annotated SVs"""
# standard libraries
import argparse
import pathlib
import numpy as np
import pandas as pd
# own libraries
from lib import plotting
# plotting
import matplotlib.pyplot as plt
def argparser():
parser = argparse.ArgumentParser(description="Visualize annotated CNVs.")
parser.add_argument('-c1', '--cnvs_1',
help='Path to the first CNV csv-file', required=True)
parser.add_argument('-c2', '--cnvs_2',
help='Path to the second CNV csv-file', required=True)
parser.add_argument('-o', '--output',
help='Output path for generated figure.', required=True)
return parser.parse_args()
def main():
# parse input arguments
args = argparser()
cnv_1_file_path = pathlib.Path(args.cnvs_1)
cnv_2_file_path = pathlib.Path(args.cnvs_2)
# read annotated CNV dataframes
cnvs_1 = pd.read_csv(cnv_1_file_path,header=0,index_col=False,sep='\t')
cnvs_2 = pd.read_csv(cnv_2_file_path,header=0,index_col=False,sep='\t')
# add label columns to both dataframes
cnvs_1['label'] = np.repeat(['Non Pathogenic'],cnvs_1.shape[0])
cnvs_2['label'] = np.repeat(['Pathogenic'],cnvs_2.shape[0])
# concatenate the two dataframes
merged_cnvs = pd.concat([cnvs_1,cnvs_2])
# plot distrubtion by label exlucding the location columns
plotting.plot_feature_dist(merged_cnvs,exclude_features=['CHR','START','END'],by='label')
# save fig to output location
plt.savefig(pathlib.Path(args.output) / 'feature_dist.png',bbox_inches='tight')
if __name__ == "__main__":
main()
| [
"numpy.repeat",
"pandas.read_csv",
"argparse.ArgumentParser",
"pathlib.Path",
"lib.plotting.plot_feature_dist",
"pandas.concat"
] | [((244, 308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualize annotated CNVs."""'}), "(description='Visualize annotated CNVs.')\n", (267, 308), False, 'import argparse\n'), ((793, 818), 'pathlib.Path', 'pathlib.Path', (['args.cnvs_1'], {}), '(args.cnvs_1)\n', (805, 818), False, 'import pathlib\n'), ((841, 866), 'pathlib.Path', 'pathlib.Path', (['args.cnvs_2'], {}), '(args.cnvs_2)\n', (853, 866), False, 'import pathlib\n'), ((917, 982), 'pandas.read_csv', 'pd.read_csv', (['cnv_1_file_path'], {'header': '(0)', 'index_col': '(False)', 'sep': '"""\t"""'}), "(cnv_1_file_path, header=0, index_col=False, sep='\\t')\n", (928, 982), True, 'import pandas as pd\n'), ((993, 1058), 'pandas.read_csv', 'pd.read_csv', (['cnv_2_file_path'], {'header': '(0)', 'index_col': '(False)', 'sep': '"""\t"""'}), "(cnv_2_file_path, header=0, index_col=False, sep='\\t')\n", (1004, 1058), True, 'import pandas as pd\n'), ((1123, 1169), 'numpy.repeat', 'np.repeat', (["['Non Pathogenic']", 'cnvs_1.shape[0]'], {}), "(['Non Pathogenic'], cnvs_1.shape[0])\n", (1132, 1169), True, 'import numpy as np\n'), ((1191, 1233), 'numpy.repeat', 'np.repeat', (["['Pathogenic']", 'cnvs_2.shape[0]'], {}), "(['Pathogenic'], cnvs_2.shape[0])\n", (1200, 1233), True, 'import numpy as np\n'), ((1289, 1316), 'pandas.concat', 'pd.concat', (['[cnvs_1, cnvs_2]'], {}), '([cnvs_1, cnvs_2])\n', (1298, 1316), True, 'import pandas as pd\n'), ((1384, 1481), 'lib.plotting.plot_feature_dist', 'plotting.plot_feature_dist', (['merged_cnvs'], {'exclude_features': "['CHR', 'START', 'END']", 'by': '"""label"""'}), "(merged_cnvs, exclude_features=['CHR', 'START',\n 'END'], by='label')\n", (1410, 1481), False, 'from lib import plotting\n'), ((1525, 1550), 'pathlib.Path', 'pathlib.Path', (['args.output'], {}), '(args.output)\n', (1537, 1550), False, 'import pathlib\n')] |
"""
Trainer for semi-supervised GAN
"""
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
from torchlib.common import FloatTensor, LongTensor
from torchlib.utils.plot import get_visdom_line_plotter
class Trainer(object):
def __init__(self, trick_dict=None):
if trick_dict is None:
self.trick_dict = {}
else:
self.trick_dict = trick_dict
self.global_step = 0
self.plotter = get_visdom_line_plotter('main')
def _create_real_data(self, raw_real_data):
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
raw_real_data = raw_real_data + torch.from_numpy(
np.random.randn(*raw_real_data.shape) * noisy_input['sigma']).type(torch.FloatTensor)
noisy_input['sigma'] = max(0, noisy_input['sigma'] - noisy_input['decay'])
real_data = Variable(raw_real_data.type(FloatTensor))
return real_data
def _create_valid(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
valid_range = soft_label['valid_range']
else:
valid_range = 1.
if isinstance(valid_range, list):
valid = Variable(FloatTensor(batch_size, 1).uniform_(*valid_range), requires_grad=False)
else:
valid = Variable(FloatTensor(batch_size, 1).fill_(valid_range), requires_grad=False)
return valid
def _create_fake(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
fake_range = soft_label['fake_range']
else:
fake_range = 0.
if isinstance(fake_range, list):
fake = Variable(FloatTensor(batch_size, 1).uniform_(*fake_range), requires_grad=False)
else:
fake = Variable(FloatTensor(batch_size, 1).fill_(fake_range), requires_grad=False)
return fake
def train(self, num_epoch, data_loader, data_loader_label, gan_model, disc_iter, checkpoint_path, epoch_per_save,
callbacks):
assert disc_iter > 0, 'Discriminator update iteration must be greater than zero'
for epoch in range(num_epoch):
gan_model._set_to_train()
# we sample a batch after each epoch
dis_loss_lst = []
gen_loss_lst = []
D_x_lst = []
D_G_z1_lst = []
D_G_z2_lst = []
# plot smoothing
smooth_factor = 0.95
plot_dis_s = 0
plot_gen_s = 0
plot_D_x = 0
plot_D_G_z1 = 0
plot_D_G_z2 = 0
plot_ws = 0
print('Epoch {}'.format(epoch + 1))
for input_and_aux in tqdm(data_loader):
# We assume the input_and_label is a tuple containing data and auxiliary information
# Adversarial ground truths
batch_size = input_and_aux[0].shape[0]
valid = self._create_valid(batch_size)
fake = self._create_fake(batch_size)
flip_label = self.trick_dict.get('flip_label', None)
if flip_label and (self.global_step + 1) % flip_label['num_steps_per_flip'] == 0:
valid, fake = fake, valid
real_data = self._create_real_data(input_and_aux[0])
for _ in range(disc_iter):
# train discriminator
d_real_loss, D_x = gan_model._train_dis_with_real(real_data, valid)
d_fake_loss, D_G_z1 = gan_model._train_dis_with_fake(fake)
# train generator
valid = self._create_valid(2 * batch_size)
g_loss, D_G_z2 = gan_model._train_gen(valid)
# gan_model.update_parameters()
dis_loss = (d_real_loss.item() + d_fake_loss.item()) / 2
gen_loss = g_loss.item()
plot_dis_s = plot_dis_s * smooth_factor + dis_loss * (1 - smooth_factor)
plot_gen_s = plot_gen_s * smooth_factor + gen_loss * (1 - smooth_factor)
plot_D_x = plot_D_x * smooth_factor + D_x.item() * (1 - smooth_factor)
plot_D_G_z1 = plot_D_G_z1 * smooth_factor + D_G_z1.item() * (1 - smooth_factor)
plot_D_G_z2 = plot_D_G_z2 * smooth_factor + D_G_z2.item() * (1 - smooth_factor)
plot_ws = plot_ws * smooth_factor + (1 - smooth_factor)
dis_loss_lst.append(plot_dis_s / plot_ws)
gen_loss_lst.append(plot_gen_s / plot_ws)
D_x_lst.append(plot_D_x / plot_ws)
D_G_z1_lst.append(plot_D_G_z1 / plot_ws)
D_G_z2_lst.append(plot_D_G_z2 / plot_ws)
self.global_step += 1
# train label classifier
loss_lst = []
total = 0
total_correct = 0
for data, label in tqdm(data_loader_label):
batch_size = data.size(0)
total += batch_size
data = data.type(FloatTensor)
labels = label.type(LongTensor)
loss, correct = gan_model._train_label(data, labels)
total_correct += correct
loss_lst.append(loss)
avg_loss = np.mean(loss_lst)
avg_accuracy = total_correct / total
print('loss: {:.4f}. acc: {:.4f}'.format(avg_loss, avg_accuracy))
if gan_model.optimizer_G_scheduler:
gan_model.optimizer_G_scheduler.step()
if gan_model.optimizer_D_scheduler:
gan_model.optimizer_D_scheduler.step()
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
print('Noisy input sigma: {:.4f}'.format(noisy_input['sigma']))
if checkpoint_path and (epoch + 1) % epoch_per_save == 0:
gan_model.save_checkpoint(checkpoint_path)
# plot loss figure
step = [a for a in range(self.global_step - len(dis_loss_lst), self.global_step)]
data = np.array([dis_loss_lst, gen_loss_lst]).transpose()
legend = ['dis_loss', 'gen_loss']
self.plotter.plot('gan_loss', legend, step, data)
data = np.array([D_x_lst, D_G_z1_lst, D_G_z2_lst]).transpose()
legend = ['D_x', 'D_G_z1', 'D_G_z2']
self.plotter.plot('gan_output', legend, step, data)
# callbacks
for callback in callbacks:
callback(self, gan_model)
if checkpoint_path:
gan_model.save_checkpoint(checkpoint_path)
| [
"numpy.mean",
"torchlib.common.FloatTensor",
"tqdm.tqdm",
"numpy.array",
"torchlib.utils.plot.get_visdom_line_plotter",
"numpy.random.randn"
] | [((477, 508), 'torchlib.utils.plot.get_visdom_line_plotter', 'get_visdom_line_plotter', (['"""main"""'], {}), "('main')\n", (500, 508), False, 'from torchlib.utils.plot import get_visdom_line_plotter\n'), ((2773, 2790), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (2777, 2790), False, 'from tqdm import tqdm\n'), ((4956, 4979), 'tqdm.tqdm', 'tqdm', (['data_loader_label'], {}), '(data_loader_label)\n', (4960, 4979), False, 'from tqdm import tqdm\n'), ((5325, 5342), 'numpy.mean', 'np.mean', (['loss_lst'], {}), '(loss_lst)\n', (5332, 5342), True, 'import numpy as np\n'), ((6129, 6167), 'numpy.array', 'np.array', (['[dis_loss_lst, gen_loss_lst]'], {}), '([dis_loss_lst, gen_loss_lst])\n', (6137, 6167), True, 'import numpy as np\n'), ((6307, 6350), 'numpy.array', 'np.array', (['[D_x_lst, D_G_z1_lst, D_G_z2_lst]'], {}), '([D_x_lst, D_G_z1_lst, D_G_z2_lst])\n', (6315, 6350), True, 'import numpy as np\n'), ((1277, 1303), 'torchlib.common.FloatTensor', 'FloatTensor', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1288, 1303), False, 'from torchlib.common import FloatTensor, LongTensor\n'), ((1392, 1418), 'torchlib.common.FloatTensor', 'FloatTensor', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1403, 1418), False, 'from torchlib.common import FloatTensor, LongTensor\n'), ((1769, 1795), 'torchlib.common.FloatTensor', 'FloatTensor', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1780, 1795), False, 'from torchlib.common import FloatTensor, LongTensor\n'), ((1882, 1908), 'torchlib.common.FloatTensor', 'FloatTensor', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1893, 1908), False, 'from torchlib.common import FloatTensor, LongTensor\n'), ((723, 760), 'numpy.random.randn', 'np.random.randn', (['*raw_real_data.shape'], {}), '(*raw_real_data.shape)\n', (738, 760), True, 'import numpy as np\n')] |
from __future__ import print_function
import tensorflow as tf
import numpy as np
import cPickle
from tensorflow.contrib import slim
#Load features and labels
features = cPickle.load(open('nn_features.p', 'rb'))
labels = cPickle.load(open('labels.p', 'rb'))
mask = np.random.choice(features.shape[0], features.shape[0], replace=False)
features = features[mask]
labels = labels[mask]
val_features = features[:10000]
train_features = features[10000:]
val_labels = labels[:10000]
train_labels = labels[10000:]
positive_mask = []
negative_mask = []
for i in range(train_labels.shape[0]):
if np.array_equal(train_labels[i], [0,1]):
positive_mask.append(i)
else:
negative_mask.append(i)
pos_features = train_features[positive_mask]
pos_labels = train_labels[positive_mask]
neg_features = train_features[negative_mask]
neg_labels = train_labels[negative_mask]
#change these values later
learning_rate = 0.001
training_epochs = 10
display_step = 1
in_dim = features.shape[1]
n_samples = train_features.shape[0]
batch_size = 512
num_features = features.shape[1]
num_classes = labels.shape[1]
num_iter = 1000
n_hidden1 = 256
n_hidden2 = 256
n_hidden3 = 256
reg_strength = 5e-4
dropout_rate = 0.5
#define placeholder for our input
X = tf.placeholder("float", [None, num_features])
Y = tf.placeholder("float", [None, num_classes])
#drop_p = tf.placeholder(tf.float32)
def model(x):
layer = slim.fully_connected(x,n_hidden1, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden1')
layer = slim.batch_norm(layer, scope='bn1')
layer = slim.dropout(layer, dropout_rate, scope='dropout1')
layer = slim.fully_connected(layer,n_hidden2, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden2')
layer = slim.batch_norm(layer, scope='bn2')
layer = slim.dropout(layer, dropout_rate, scope='dropout2')
layer = slim.fully_connected(layer,n_hidden3, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden3')
out_layer = slim.fully_connected(layer,num_classes, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='out_layer')
return out_layer
"""
# Hidden layer with RELU activation
layer = slim.fully_connected(x,n_hidden1, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden1')
layer = slim.fully_connected(layer,n_hidden2, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden2')
out_layer = slim.fully_connected(layer,num_classes, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='out_layer')
return out_layer
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_2, weights['out']), biases['out'])
return out_layer
"""
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_features, n_hidden1])),
'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2])),
'out': tf.Variable(tf.random_normal([n_hidden2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden1])),
'b2': tf.Variable(tf.random_normal([n_hidden2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
recommendor = model(X)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(recommendor, Y))
#regularizers = (tf.nn.l2_loss(weights['h1']) + tf.nn.l2_loss(biases['b1']) + tf.nn.l2_loss(weights['h2']) + tf.nn.l2_loss(biases['b2']))
#loss += reg_strength * regularizers
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# Test model
correct_prediction = tf.equal(tf.argmax(recommendor, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
probabilities = tf.nn.softmax(recommendor)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_loss = 0.
total_batch = int(train_features.shape[0]/batch_size)
# Loop over all batches
start = 0
end = batch_size
for i in range(total_batch):
pos_mask = np.random.choice(pos_features.shape[0], batch_size/2, replace=False)
neg_mask = np.random.choice(neg_features.shape[0], batch_size/2, replace=False)
batch_x = np.vstack((pos_features[pos_mask], neg_features[neg_mask]))
batch_y = np.vstack((pos_labels[pos_mask], neg_labels[neg_mask]))
shuffle = np.random.choice(batch_x.shape[0], batch_x.shape[0], replace=False)
batch_x = batch_x[shuffle]
batch_y = batch_y[shuffle]
#batch_x, batch_y = train_features[start:end], train_labels[start:end]
# Run optimization op (backprop) and loss op (to get loss value)
_, c = sess.run([optimizer, loss], feed_dict={X: batch_x,
Y: batch_y})
# Compute average loss
avg_loss += c / total_batch
start = end
end += batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "loss=", \
"{:.9f}".format(avg_loss))
print("Optimization Finished!")
acc, p = sess.run([accuracy, probabilities], feed_dict={X: val_features, Y: val_labels})
print('Val Accuracy: ', acc)
print('probabilities: ', p[:,1]) | [
"tensorflow.contrib.slim.batch_norm",
"tensorflow.initialize_all_variables",
"tensorflow.random_normal",
"tensorflow.contrib.slim.l2_regularizer",
"numpy.random.choice",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.truncated_normal_initializer",
"tensorflow.argmax",
"numpy.array_equ... | [((268, 337), 'numpy.random.choice', 'np.random.choice', (['features.shape[0]', 'features.shape[0]'], {'replace': '(False)'}), '(features.shape[0], features.shape[0], replace=False)\n', (284, 337), True, 'import numpy as np\n'), ((1255, 1300), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, num_features]'], {}), "('float', [None, num_features])\n", (1269, 1300), True, 'import tensorflow as tf\n'), ((1305, 1349), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, num_classes]'], {}), "('float', [None, num_classes])\n", (1319, 1349), True, 'import tensorflow as tf\n'), ((4656, 4682), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['recommendor'], {}), '(recommendor)\n', (4669, 4682), True, 'import tensorflow as tf\n'), ((4719, 4748), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (4746, 4748), True, 'import tensorflow as tf\n'), ((596, 635), 'numpy.array_equal', 'np.array_equal', (['train_labels[i]', '[0, 1]'], {}), '(train_labels[i], [0, 1])\n', (610, 635), True, 'import numpy as np\n'), ((1628, 1663), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['layer'], {'scope': '"""bn1"""'}), "(layer, scope='bn1')\n", (1643, 1663), False, 'from tensorflow.contrib import slim\n'), ((1676, 1727), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['layer', 'dropout_rate'], {'scope': '"""dropout1"""'}), "(layer, dropout_rate, scope='dropout1')\n", (1688, 1727), False, 'from tensorflow.contrib import slim\n'), ((1958, 1993), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['layer'], {'scope': '"""bn2"""'}), "(layer, scope='bn2')\n", (1973, 1993), False, 'from tensorflow.contrib import slim\n'), ((2006, 2057), 'tensorflow.contrib.slim.dropout', 'slim.dropout', (['layer', 'dropout_rate'], {'scope': '"""dropout2"""'}), "(layer, dropout_rate, scope='dropout2')\n", (2018, 2057), False, 'from tensorflow.contrib import slim\n'), ((4156, 4211), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['recommendor', 'Y'], {}), '(recommendor, Y)\n', (4195, 4211), True, 'import tensorflow as tf\n'), ((4511, 4536), 'tensorflow.argmax', 'tf.argmax', (['recommendor', '(1)'], {}), '(recommendor, 1)\n', (4520, 4536), True, 'import tensorflow as tf\n'), ((4538, 4553), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (4547, 4553), True, 'import tensorflow as tf\n'), ((4602, 4638), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (4609, 4638), True, 'import tensorflow as tf\n'), ((4774, 4786), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4784, 4786), True, 'import tensorflow as tf\n'), ((3753, 3796), 'tensorflow.random_normal', 'tf.random_normal', (['[num_features, n_hidden1]'], {}), '([num_features, n_hidden1])\n', (3769, 3796), True, 'import tensorflow as tf\n'), ((3821, 3861), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden1, n_hidden2]'], {}), '([n_hidden1, n_hidden2])\n', (3837, 3861), True, 'import tensorflow as tf\n'), ((3887, 3929), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden2, num_classes]'], {}), '([n_hidden2, num_classes])\n', (3903, 3929), True, 'import tensorflow as tf\n'), ((3966, 3995), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden1]'], {}), '([n_hidden1])\n', (3982, 3995), True, 'import tensorflow as tf\n'), ((4020, 4049), 'tensorflow.random_normal', 'tf.random_normal', (['[n_hidden2]'], {}), '([n_hidden2])\n', (4036, 4049), True, 'import tensorflow as tf\n'), ((4075, 4106), 'tensorflow.random_normal', 'tf.random_normal', (['[num_classes]'], {}), '([num_classes])\n', (4091, 4106), True, 'import tensorflow as tf\n'), ((4400, 4451), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4422, 4451), True, 'import tensorflow as tf\n'), ((1468, 1512), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1499, 1512), True, 'import tensorflow as tf\n'), ((1565, 1598), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['reg_strength'], {}), '(reg_strength)\n', (1584, 1598), False, 'from tensorflow.contrib import slim\n'), ((1798, 1842), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (1829, 1842), True, 'import tensorflow as tf\n'), ((1895, 1928), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['reg_strength'], {}), '(reg_strength)\n', (1914, 1928), False, 'from tensorflow.contrib import slim\n'), ((2128, 2172), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2159, 2172), True, 'import tensorflow as tf\n'), ((2225, 2258), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['reg_strength'], {}), '(reg_strength)\n', (2244, 2258), False, 'from tensorflow.contrib import slim\n'), ((2372, 2416), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2403, 2416), True, 'import tensorflow as tf\n'), ((2469, 2502), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['reg_strength'], {}), '(reg_strength)\n', (2488, 2502), False, 'from tensorflow.contrib import slim\n'), ((5096, 5166), 'numpy.random.choice', 'np.random.choice', (['pos_features.shape[0]', '(batch_size / 2)'], {'replace': '(False)'}), '(pos_features.shape[0], batch_size / 2, replace=False)\n', (5112, 5166), True, 'import numpy as np\n'), ((5188, 5258), 'numpy.random.choice', 'np.random.choice', (['neg_features.shape[0]', '(batch_size / 2)'], {'replace': '(False)'}), '(neg_features.shape[0], batch_size / 2, replace=False)\n', (5204, 5258), True, 'import numpy as np\n'), ((5279, 5338), 'numpy.vstack', 'np.vstack', (['(pos_features[pos_mask], neg_features[neg_mask])'], {}), '((pos_features[pos_mask], neg_features[neg_mask]))\n', (5288, 5338), True, 'import numpy as np\n'), ((5361, 5416), 'numpy.vstack', 'np.vstack', (['(pos_labels[pos_mask], neg_labels[neg_mask])'], {}), '((pos_labels[pos_mask], neg_labels[neg_mask]))\n', (5370, 5416), True, 'import numpy as np\n'), ((5439, 5506), 'numpy.random.choice', 'np.random.choice', (['batch_x.shape[0]', 'batch_x.shape[0]'], {'replace': '(False)'}), '(batch_x.shape[0], batch_x.shape[0], replace=False)\n', (5455, 5506), True, 'import numpy as np\n')] |
from collections import defaultdict
import json
from pandas.core import frame
import torch
import pandas as pd
import os
import pickle as pkl
import numpy as np
import cv2
import h5py
import tqdm
import lmdb
from functools import lru_cache
class EPIC_KITCHENS_DATASET(torch.utils.data.Dataset):
def __init__(self, logger, config):
super().__init__()
self.data_root = './data/EK55' if config.name == 'EPIC-KITCHENS-55' else './data/EK100'
self.name = config.name
self.split = config.split
self.config = config
self.challenge = 'test' in config.split
self.model_fps = config.fps
self.tau_a = config.tau_a
self.feature = config.feature
self.feature_fps = config.feature_fps
self.feature_dim = config.feature_dim
if config.name == 'EPIC-KITCHENS-55':
if config.split == 'train':
video_list = pd.read_csv(os.path.join(self.data_root,'training_videos.csv'),header = None)[0]
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_train_action_labels.pkl'),'rb'))
self.action_info = self.action_info.loc[self.action_info['video_id'].isin(video_list)]
elif config.split == 'valid':
video_list = pd.read_csv(os.path.join(self.data_root,'validation_videos.csv'),header = None)[0]
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_train_action_labels.pkl'),'rb'))
self.action_info = self.action_info.loc[self.action_info['video_id'].isin(video_list)]
elif config.split == 'trainval':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_train_action_labels.pkl'),'rb'))
elif config.split == 'test_seen':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_test_s1_timestamps.pkl'),'rb'))
elif config.split == 'test_unseen':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_test_s2_timestamps.pkl'),'rb'))
else:
raise NotImplementedError('Unknow split [%s] for dataset [%s]' % (config.split, config.name))
self.num_noun = pd.read_csv(os.path.join(self.data_root,'EPIC_noun_classes.csv')).shape[0]
self.num_verb = pd.read_csv(os.path.join(self.data_root,'EPIC_verb_classes.csv')).shape[0]
self.action_composition = json.load(open(os.path.join(self.data_root, 'EK55_action_composition.json'),'r'))
self.num_action = len(self.action_composition)
self.vn2action = {(v,n): i for i, (v,n) in enumerate(self.action_composition)}
elif config.name == 'EPIC-KITCHENS-100':
if config.split == 'train':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_100_train.pkl'),'rb'))
elif config.split == 'valid':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_100_validation.pkl'),'rb'))
elif config.split == 'trainval':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_100_train.pkl'),'rb'))
self.action_info = self.action_info.append(
pkl.load(open(os.path.join(self.data_root,'EPIC_100_validation.pkl'),'rb')))
elif config.split == 'test':
self.action_info = pkl.load(open(os.path.join(self.data_root,'EPIC_100_test_timestamps.pkl'),'rb'))
else:
raise NotImplementedError('Unknow split [%s] for dataset [%s]' % (config.split, config.name))
self.num_noun = pd.read_csv(os.path.join(self.data_root,'EPIC_100_noun_classes.csv')).shape[0]
self.num_verb = pd.read_csv(os.path.join(self.data_root,'EPIC_100_verb_classes.csv')).shape[0]
self.action_composition = json.load(open(os.path.join(self.data_root, 'EK100_action_composition.json'),'r'))
self.num_action = len(self.action_composition)
self.vn2action = {(v,n): i for i, (v,n) in enumerate(self.action_composition)}
else:
raise NotImplementedError('Unknow dataset: %s' % config.name)
if config.weight:
self.verb_weight = np.array(pkl.load(open(os.path.join(self.data_root,'verb_weight.pkl'),'rb')))
self.noun_weight = np.array(pkl.load(open(os.path.join(self.data_root,'noun_weight.pkl'),'rb')))
self.action_weight = np.array(pkl.load(open(os.path.join(self.data_root,'action_weight.pkl'),'rb')))
else:
self.verb_weight, self.noun_weight, self.action_weight = None, None, None
##### store source frame index
assert config.past_frame >= 0
self.data = []
self.frame_label = defaultdict(dict)
for video_id, group in self.action_info.groupby('video_id'):
for idx, a in group.iterrows():
segment = {
'id' : idx,
'participant_id' : a.participant_id,
'video_id': video_id,
}
start_frame = int(self.timestr_to_second(a.start_timestamp) * self.feature_fps)
end_frame = int(self.timestr_to_second(a.stop_timestamp) * self.feature_fps)
if not self.challenge:
for fid in range(start_frame,end_frame):
self.frame_label[video_id][fid] = (a.verb_class,a.noun_class)
if config.drop and start_frame<=self.tau_a * self.feature_fps:
continue
frame_index = np.arange(
start_frame - self.tau_a * self.feature_fps + config.forward_frame * self.feature_fps / self.model_fps,
start_frame - self.tau_a * self.feature_fps - config.past_frame * self.feature_fps / self.model_fps,
- self.feature_fps / self.model_fps
).astype(int)[::-1]
assert len(frame_index) == config.past_frame + config.forward_frame
frame_index[frame_index < 1] = 1
segment['frame_index'] = frame_index
if not self.challenge:
segment['next_verb_class'] = a.verb_class
segment['next_noun_class'] = a.noun_class
segment['next_action_class'] = self.vn2action[(a.verb_class,a.noun_class)]
self.data.append(segment)
# debug
# break
##### feature
assert config.feat_file
self.f = lmdb.open(config.feat_file, readonly=True, lock=False)
logger.info('[%s] # Frame: Past %d. Forward %d.' % (
config.split, config.past_frame,config.forward_frame))
logger.info('[%s] # segment %d. verb %d. noun %d. action %d.' % (
config.split, len(self.data), self.num_verb, self.num_noun, self.num_action))
self.cache = {}
if config.cache:
self.make_cache(logger)
def make_cache(self,logger):
logger.info('Cache: Load all feature into memory')
for segment in self.data:
for fid in segment['frame_index']:
key = '%s_frame_%010d.jpg' % (segment['video_id'],fid)
if key not in self.cache:
res = self._read_one_frame_feat(key)
self.cache[key] = res
logger.info('Cache: Finish loading. Cache Size %d' % len(self.cache))
def timestr_to_second(self,x):
a,b,c = list(map(float,x.split(':')))
return c + 60 * b + 3600 * a
def _read_one_frame_feat(self,key):
if key in self.cache:
return self.cache[key]
with self.f.begin() as e:
buf = e.get(key.strip().encode('utf-8'))
if buf is not None:
res = np.frombuffer(buf,'float32')
else:
res = None
return res
def _load_feat(self,video_id, frame_ids):
frames = []
dim = self.feature_dim
for fid in frame_ids:
# handling special case for irCSN feature provided by AVT
if self.feature == 'irCSN10':
if fid %3!=0:
fid = (fid // 3) * 3
if self.feature == 'irCSN25':
if fid % 6 == 3:
fid = fid -1
key = '%s_frame_%010d.jpg' % (video_id,fid)
frame_feat = self._read_one_frame_feat(key)
if frame_feat is not None:
frames.append(frame_feat)
elif len(frames) > 0:
frames.append(frames[-1])
# print('Copy frame: %s' % key)
else:
frames.append(np.zeros(dim))
# print('Zero frame: %s' % key)
return torch.from_numpy(np.stack(frames,0)).float()
def __len__(self):
return len(self.data)
def __getitem__(self,i):
segment = self.data[i]
out = {
'id' : segment['id'],
'index' : i
}
if not self.challenge:
out['next_action_class'] = segment['next_action_class']
out['next_verb_class'] = segment['next_verb_class']
out['next_noun_class'] = segment['next_noun_class']
out['past_frame'] = self._load_feat(
segment['video_id'],
segment['frame_index'],
)
return out
| [
"os.path.join",
"numpy.stack",
"numpy.zeros",
"collections.defaultdict",
"lmdb.open",
"numpy.frombuffer",
"numpy.arange"
] | [((4832, 4849), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4843, 4849), False, 'from collections import defaultdict\n'), ((6648, 6702), 'lmdb.open', 'lmdb.open', (['config.feat_file'], {'readonly': '(True)', 'lock': '(False)'}), '(config.feat_file, readonly=True, lock=False)\n', (6657, 6702), False, 'import lmdb\n'), ((7928, 7957), 'numpy.frombuffer', 'np.frombuffer', (['buf', '"""float32"""'], {}), "(buf, 'float32')\n", (7941, 7957), True, 'import numpy as np\n'), ((2502, 2562), 'os.path.join', 'os.path.join', (['self.data_root', '"""EK55_action_composition.json"""'], {}), "(self.data_root, 'EK55_action_composition.json')\n", (2514, 2562), False, 'import os\n'), ((8952, 8971), 'numpy.stack', 'np.stack', (['frames', '(0)'], {}), '(frames, 0)\n', (8960, 8971), True, 'import numpy as np\n'), ((968, 1019), 'os.path.join', 'os.path.join', (['self.data_root', '"""training_videos.csv"""'], {}), "(self.data_root, 'training_videos.csv')\n", (980, 1019), False, 'import os\n'), ((1086, 1146), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_train_action_labels.pkl"""'], {}), "(self.data_root, 'EPIC_train_action_labels.pkl')\n", (1098, 1146), False, 'import os\n'), ((2283, 2336), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_noun_classes.csv"""'], {}), "(self.data_root, 'EPIC_noun_classes.csv')\n", (2295, 2336), False, 'import os\n'), ((2386, 2439), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_verb_classes.csv"""'], {}), "(self.data_root, 'EPIC_verb_classes.csv')\n", (2398, 2439), False, 'import os\n'), ((3930, 3991), 'os.path.join', 'os.path.join', (['self.data_root', '"""EK100_action_composition.json"""'], {}), "(self.data_root, 'EK100_action_composition.json')\n", (3942, 3991), False, 'import os\n'), ((4327, 4374), 'os.path.join', 'os.path.join', (['self.data_root', '"""verb_weight.pkl"""'], {}), "(self.data_root, 'verb_weight.pkl')\n", (4339, 4374), False, 'import os\n'), ((4436, 4483), 'os.path.join', 'os.path.join', (['self.data_root', '"""noun_weight.pkl"""'], {}), "(self.data_root, 'noun_weight.pkl')\n", (4448, 4483), False, 'import os\n'), ((4547, 4596), 'os.path.join', 'os.path.join', (['self.data_root', '"""action_weight.pkl"""'], {}), "(self.data_root, 'action_weight.pkl')\n", (4559, 4596), False, 'import os\n'), ((8854, 8867), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (8862, 8867), True, 'import numpy as np\n'), ((1339, 1392), 'os.path.join', 'os.path.join', (['self.data_root', '"""validation_videos.csv"""'], {}), "(self.data_root, 'validation_videos.csv')\n", (1351, 1392), False, 'import os\n'), ((1459, 1519), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_train_action_labels.pkl"""'], {}), "(self.data_root, 'EPIC_train_action_labels.pkl')\n", (1471, 1519), False, 'import os\n'), ((2859, 2909), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_train.pkl"""'], {}), "(self.data_root, 'EPIC_100_train.pkl')\n", (2871, 2909), False, 'import os\n'), ((3703, 3760), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_noun_classes.csv"""'], {}), "(self.data_root, 'EPIC_100_noun_classes.csv')\n", (3715, 3760), False, 'import os\n'), ((3810, 3867), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_verb_classes.csv"""'], {}), "(self.data_root, 'EPIC_100_verb_classes.csv')\n", (3822, 3867), False, 'import os\n'), ((5689, 5954), 'numpy.arange', 'np.arange', (['(start_frame - self.tau_a * self.feature_fps + config.forward_frame * self.\n feature_fps / self.model_fps)', '(start_frame - self.tau_a * self.feature_fps - config.past_frame * self.\n feature_fps / self.model_fps)', '(-self.feature_fps / self.model_fps)'], {}), '(start_frame - self.tau_a * self.feature_fps + config.\n forward_frame * self.feature_fps / self.model_fps, start_frame - self.\n tau_a * self.feature_fps - config.past_frame * self.feature_fps / self.\n model_fps, -self.feature_fps / self.model_fps)\n', (5698, 5954), True, 'import numpy as np\n'), ((1723, 1783), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_train_action_labels.pkl"""'], {}), "(self.data_root, 'EPIC_train_action_labels.pkl')\n", (1735, 1783), False, 'import os\n'), ((3007, 3062), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_validation.pkl"""'], {}), "(self.data_root, 'EPIC_100_validation.pkl')\n", (3019, 3062), False, 'import os\n'), ((1885, 1944), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_test_s1_timestamps.pkl"""'], {}), "(self.data_root, 'EPIC_test_s1_timestamps.pkl')\n", (1897, 1944), False, 'import os\n'), ((3163, 3213), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_train.pkl"""'], {}), "(self.data_root, 'EPIC_100_train.pkl')\n", (3175, 3213), False, 'import os\n'), ((2048, 2107), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_test_s2_timestamps.pkl"""'], {}), "(self.data_root, 'EPIC_test_s2_timestamps.pkl')\n", (2060, 2107), False, 'import os\n'), ((3314, 3369), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_validation.pkl"""'], {}), "(self.data_root, 'EPIC_100_validation.pkl')\n", (3326, 3369), False, 'import os\n'), ((3467, 3527), 'os.path.join', 'os.path.join', (['self.data_root', '"""EPIC_100_test_timestamps.pkl"""'], {}), "(self.data_root, 'EPIC_100_test_timestamps.pkl')\n", (3479, 3527), False, 'import os\n')] |
import io
import os
import time
import argparse
import random
import logging
import warnings
import multiprocessing
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import Block, nn
from mxnet.gluon.data.sampler import Sampler, SequentialSampler
import gluonnlp as nlp
from gluonnlp.model import get_model
from gluonnlp.data import BERTTokenizer
from gluonnlp.data.dataset import SimpleDataset, Dataset
import json
import collections
from tmnt.preprocess.vectorizer import TMNTVectorizer
from tmnt.data_loading import to_label_matrix, PairedDataLoader, RoundRobinDataLoader
from typing import Dict
from gluonnlp.data import BERTSentenceTransform
from itertools import accumulate
class JsonlDataset(SimpleDataset):
"""A dataset wrapping over a jsonlines (.jsonl) file, each line is a json object.
Parameters:
filename : Path to the .jsonl file.
txt_key: Json attribute key to use for seleting text document strings
label_key: Json attribute key to use to get string labels
encoding : File encoding format. (default 'utf8')
label_remap : Dictionary to map labels.
"""
def __init__(self, filename: str, txt_key: str, label_key: str,
encoding: str = 'utf8', label_remap: Dict[str,str] = None, random_drop_pct: float = 0.0):
if not isinstance(filename, (tuple, list)):
filename = (filename, )
self._filenames = [os.path.expanduser(f) for f in filename]
self._encoding = encoding
self._txt_key = txt_key
self._label_key = label_key
self._label_remap = label_remap
self._random_drop_pct = random_drop_pct
self._random_drop = random_drop_pct > 0.0
super(JsonlDataset, self).__init__(self._read())
def _read(self):
all_samples = []
for filename in self._filenames:
samples = []
with open(filename, 'r', encoding=self._encoding) as fin:
for line in fin.readlines():
if not self._random_drop or (random.uniform(0,1) > self._random_drop_pct):
s = json.loads(line, object_pairs_hook=collections.OrderedDict)
label = s.get(self._label_key)
if self._label_remap is not None:
label = self._label_remap.get(label)
samples.append((s[self._txt_key], label))
all_samples += samples
return all_samples
class UnevenArrayDataset(Dataset):
"""A dataset that combines multiple dataset-like objects, e.g.
Datasets, lists, arrays, etc. but does NOT require lengths to be the same.
The i-th sample is defined as `(x1[i % len(x1)], x2[i % len(x2)], ...)`.
Parameters:
*args : one or more dataset-like objects. The data arrays.
"""
def __init__(self, *args):
assert len(args) > 0, "Needs at least 1 arrays"
self._sub_lengths = [len(a) for a in args]
self._length = max(self._sub_lengths) # length is equal to maximum subdataset length
self._data = []
for i, data in enumerate(args):
if isinstance(data, mx.nd.NDArray) and len(data.shape) == 1:
data = data.asnumpy()
self._data.append(data)
def __getitem__(self, idx):
if idx >= self._length:
raise StopIteration
if len(self._data) == 1:
return self._data[0][idx]
else:
return tuple(data[idx % data_len] for data,data_len in zip(self._data, self._sub_lengths))
def __len__(self):
return self._length
class BERTDatasetTransform(object):
"""Dataset transformation for BERT-style sentence classification or regression.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
max_seq_length : int.
Maximum sequence length of the sentences.
labels : list of int , float or None. defaults None
List of all label ids for the classification task and regressing task.
If labels is None, the default task is regression
pad : bool, default True
Whether to pad the sentences to maximum length.
pair : bool, default True
Whether to transform sentences or sentence pairs.
has_label: bool.
Whether labels are present for supervised learning
vectorizer: TMNTVectorizer
TMNTVectorizer to generate bag of words
bert_vocab_size: int
Use the raw BERT word-pieces as the bag-of-words vocabulary
num_classes: int
Must be provided if class_labels isn't provided
"""
def __init__(self,
tokenizer,
max_seq_length,
class_labels=None,
label_alias=None,
pad=True,
pair=True,
has_label=True,
vectorizer=None,
bert_vocab_size=0,
num_classes=None):
self.class_labels = class_labels
self.has_label = has_label
self.use_bert_bow = bert_vocab_size > 0
self.bert_vocab_size = bert_vocab_size
self._label_dtype = 'int32' if class_labels else 'float32'
self.num_classes = len(class_labels) if class_labels else num_classes
if has_label and class_labels:
self._label_map = {}
for (i, label) in enumerate(class_labels):
self._label_map[label] = i
if label_alias:
for key in label_alias:
if label_alias[key] in self._label_map:
self._label_map[key] = self._label_map[label_alias[key]]
self._bert_xform = BERTSentenceTransform(
tokenizer, max_seq_length, pad=pad, pair=pair)
self.vectorizer = vectorizer
def __call__(self, line):
"""Perform transformation for sequence pairs or single sequences.
The transformation is processed in the following steps:
- tokenize the input sequences
- insert [CLS], [SEP] as necessary
- generate type ids to indicate whether a token belongs to the first
sequence or the second sequence.
- generate valid length
For sequence pairs, the input is a tuple of 3 strings:
text_a, text_b and label.
Inputs:
text_a: 'is this jacksonville ?'
text_b: 'no it is not'
label: '0'
Tokenization:
text_a: 'is this jack ##son ##ville ?'
text_b: 'no it is not .'
Processed:
tokens: '[CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]'
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
valid_length: 14
label: 0
For single sequences, the input is a tuple of 2 strings: text_a and label.
Inputs:
text_a: 'the dog is hairy .'
label: '1'
Tokenization:
text_a: 'the dog is hairy .'
Processed:
text_a: '[CLS] the dog is hairy . [SEP]'
type_ids: 0 0 0 0 0 0 0
valid_length: 7
label: 1
Parameters
----------
line: tuple of str
Input strings. For sequence pairs, the input is a tuple of 3 strings:
(text_a, text_b, label). For single sequences, the input is a tuple
of 2 strings: (text_a, label).
Returns
-------
np.array: input token ids in 'int32', shape (seq_length,)
np.array: valid length in 'int32', shape (1,)
np.array: input token type ids in 'int32', shape (seq_length,)
np.array: classification task: label id in 'int32', shape (num_classes,),
regression task: label in 'float32', shape (1,)
"""
if self.has_label:
input_ids, valid_length, segment_ids = self._bert_xform(line[:-1])
label_str = line[-1]
# map to int if class labels are available
if self.class_labels:
if label_str:
labels = [ self._label_map.get(label,0) for label in label_str.split(',') ]
if labels is None or len(labels) == 0:
labels = [0]
else:
labels = [0]
else:
try:
labels=[int(label_str)]
except:
labels=[0]
#label = np.array(labels, dtype=self._label_dtype)
if self.num_classes is not None and self.num_classes > 1:
label_mat, _ = to_label_matrix([labels], num_labels=self.num_classes)
else:
label_mat = np.array([[0.0]]) # just fill with zeros; assumption is that labels will be ignored
bow = None
if self.use_bert_bow:
bow = np.zeros(self.bert_vocab_size)
inds, cnts = np.unique(input_ids, return_counts=True)
bow[inds] = cnts
bow = mx.nd.array(np.expand_dims(bow, 0), dtype='float32')
elif self.vectorizer:
bow,_ = self.vectorizer.transform(line[:-1])
bow = mx.nd.array(bow, dtype='float32')
return input_ids, valid_length, segment_ids, bow, label_mat[0]
else:
return self._bert_xform(line)
class FixedSeedRandomSampler(Sampler):
"""Samples elements from [0, length) randomly without replacement but with a FIXED seed to reproduce.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length, rng=1234):
self._length = length
self._rng = rng
self._calls = 0
def __iter__(self):
self._calls += 1
np.random.seed(self._rng + self._calls)
indices = np.arange(self._length)
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
def preprocess_seq_data(trans, class_labels, dataset, batch_size, max_len, train_mode=True, pad=False, aux_dataset=None):
pool = multiprocessing.Pool()
# transformation for data train and dev
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
# data train
data_ds = mx.gluon.data.SimpleDataset(pool.map(trans, dataset))
#data_ds_len = data_ds.transform(lambda input_id, length, segment_id, bow, label_id: length, lazy=False)
final_ds = data_ds.transform( lambda a,b,c,d,e: ((a,b,c,d,e),) ) # singleton tuple
final_ds_len = data_ds.transform(lambda input_id, length, segment_id, bow, label_id: length, lazy=False)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)))
if train_mode:
# bucket sampler
num_buckets = min(6, len(data_ds) // batch_size)
batch_sampler = nlp.data.sampler.FixedBucketSampler(
final_ds_len,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0.2, # may avoid batches with size = 1 (which may tigger a bug)
shuffle=True)
# data loader for training
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
else:
loader = gluon.data.DataLoader(
dataset=final_ds,
batch_size=batch_size,
num_workers=4,
shuffle=False,
batchify_fn=batchify_fn)
return loader, len(data_ds)
def get_aux_dataloader(trans, batch_size, aux_dataset):
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
aux_ds = mx.gluon.data.SimpleDataset(pool.map(trans, aux_dataset))
a_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
loader_aux = gluon.data.DataLoader(
dataset=aux_ds,
num_workers=4,
last_batch = 'rollover', ## need to ensure all batches are the same size here
shuffle=True, # shuffle optional (for training)
batch_size = batch_size,
batchify_fn = a_batchify_fn)
return loader_aux
def get_bert_datasets(class_labels,
vectorizer,
train_ds,
dev_ds,
batch_size,
max_len,
aux_ds = None,
bert_model_name = 'bert_12_768_12',
bert_dataset = 'book_corpus_wiki_en_uncased',
pad=False,
use_bert_vocab=False,
label_alias=None,
num_classes = None,
ctx=mx.cpu()):
if class_labels is None and num_classes is None:
raise Exception("Must provide class_labels or num_classes")
bert, bert_vocabulary = get_model(
name=bert_model_name,
dataset_name=bert_dataset,
pretrained=True,
ctx=ctx,
use_pooler=True,
use_decoder=False,
use_classifier=False)
do_lower_case = 'uncased' in bert_dataset
bert_tokenizer = BERTTokenizer(bert_vocabulary, lower=do_lower_case)
trans = BERTDatasetTransform(bert_tokenizer, max_len,
class_labels=class_labels,
label_alias=label_alias,
pad=pad, pair=False,
has_label=True,
vectorizer=vectorizer,
bert_vocab_size = len(bert_vocabulary) if use_bert_vocab else 0,
num_classes = num_classes)
train_data, num_train_examples = preprocess_seq_data(trans, class_labels, train_ds, batch_size, max_len, train_mode=True, pad=pad)
if aux_ds is not None:
aux_data = get_aux_dataloader(trans, batch_size, aux_ds)
else:
aux_data = None
dev_data, _ = preprocess_seq_data(trans, class_labels, dev_ds, batch_size, max_len, train_mode=False, pad=pad)
return train_data, dev_data, aux_data, num_train_examples, bert, bert_vocabulary
############
# Handle dataloading for Smoothed Deep Metric Loss with parallel batching
############
def preprocess_data_metriclearn(trans, class_labels, train_a_ds, train_b_ds, batch_size, max_len, pad=False, bucket_sample=False, aux_dataset=None):
"""Train/eval Data preparation function."""
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
a_data_train = mx.gluon.data.SimpleDataset(pool.map(trans, train_a_ds))
b_data_train = mx.gluon.data.SimpleDataset(pool.map(trans, train_b_ds))
# magic that "zips" these two datasets and pairs batches
joined_data_train = UnevenArrayDataset(a_data_train, b_data_train)
joined_len = joined_data_train.transform( lambda a, b: a[1] + b[1], lazy=False ) ## a[1] and b[1] and lengths, bucket by sum
if aux_dataset is None:
final_ds = joined_data_train.transform( lambda a,b: ((a,b),) ) # singleton tuple
final_len = joined_len
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
## tuple for a_data: (ids, lengths, segments, bow vector, label)
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)),
## tuple for b_data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))))
else:
aux_ds = mx.gluon.data.SimpleDataset(pool.map(trans, aux_dataset))
final_ds = UnevenArrayDataset(joined_data_train, aux_ds)
logging.info("Uneven dataset created, size = {} (from data_ds = {}, aux_ds = {})".format(len(final_ds), len(joined_data_train), len(aux_ds)))
final_len = final_ds.transform( lambda a, b: a[0][1] + a[1][1] + b[1], lazy=False )
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
## tuple for a_data: (ids, lengths, segments, bow vector, label)
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)),
## tuple for b_data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))),
# tuple for auxilliary data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)))
if bucket_sample:
batch_sampler = nlp.data.sampler.FixedBucketSampler(
final_len,
batch_size=batch_size,
num_buckets=4,
ratio=0.2,
shuffle=True)
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
else:
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
shuffle=False, batch_size = batch_size,
batchify_fn=batchify_fn)
return loader, len(final_ds)
def preprocess_data_metriclearn_separate(trans1, trans2, class_labels, train_a_ds, train_b_ds, batch_size, shuffle_both=False, shuffle_a_only=True):
"""Train/eval Data preparation function."""
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
a_data_train = mx.gluon.data.SimpleDataset(pool.map(trans1, train_a_ds))
b_data_train = mx.gluon.data.SimpleDataset(pool.map(trans2, train_b_ds))
a_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
b_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
## set up 'parallel' samplers that always stay in sync
if shuffle_both:
a_sampler = FixedSeedRandomSampler(len(a_data_train), rng=1234)
b_sampler = FixedSeedRandomSampler(len(b_data_train), rng=1234)
elif shuffle_a_only:
a_sampler = FixedSeedRandomSampler(len(a_data_train), rng=1234)
b_sampler = SequentialSampler(len(b_data_train))
else:
a_sampler = SequentialSampler(len(a_data_train))
b_sampler = SequentialSampler(len(b_data_train))
a_loader_train = gluon.data.DataLoader(
dataset=a_data_train,
num_workers=4,
last_batch = 'discard', ## need to ensure all batches are the same size here AND stay synchronized
sampler = a_sampler,
batch_size = batch_size,
batchify_fn = a_batchify_fn)
b_loader_train = gluon.data.DataLoader(
dataset=b_data_train,
num_workers=4,
sampler = b_sampler,
batch_size = batch_size,
batchify_fn = b_batchify_fn)
paired_loader = PairedDataLoader(a_loader_train, b_loader_train)
return paired_loader, len(a_data_train)
def get_dual_bert_datasets(class_labels,
vectorizer,
train_ds1,
train_ds2,
model_name,
dataset,
max_len1,
max_len2,
pad,
use_bert_vocab=False,
shuffle_both=False,
shuffle_a_only=False,
aux_dataset = None,
forced_batch_size = 0,
aux_batch_size = 32,
ctx=mx.cpu()):
bert, bert_vocabulary = get_model(
name=model_name,
dataset_name=dataset,
pretrained=True,
ctx=ctx,
use_pooler=True,
use_decoder=False,
use_classifier=False)
do_lower_case = 'uncased' in dataset
bert_tokenizer = BERTTokenizer(bert_vocabulary, lower=do_lower_case)
def get_transform(_class_labels, _max_len):
trans = BERTDatasetTransform(bert_tokenizer,
_max_len,
class_labels = _class_labels,
label_alias=None,
pad=pad, pair=False,
has_label=True,
vectorizer=vectorizer,
bert_vocab_size=len(bert_vocabulary) if use_bert_vocab else 0)
return trans
if isinstance(train_ds1, list) and isinstance(train_ds2, list) and len(train_ds1) == len(train_ds2):
trans1s = [ get_transform(class_labels[i], max_len1) for i in range(len(train_ds1)) ]
trans2s = [ get_transform(class_labels[i], max_len2) for i in range(len(train_ds2)) ]
train_sets = [
preprocess_data_metriclearn_separate(trans1s[i], trans2s[i], class_labels[i], train_ds1[i], train_ds2[i],
(forced_batch_size or len(train_ds2[i])), shuffle_a_only=shuffle_a_only, shuffle_both=shuffle_both)
for i in range(len(train_ds1)) ]
num_train_examples = list(accumulate([ s for _,s in train_sets], lambda x,y: x + y)).pop()
loaders = [l for l,_ in train_sets]
train_data = RoundRobinDataLoader(loaders)
else:
batch_size = forced_batch_size or len(train_ds2)
trans1 = get_transform(class_labels, max_len1)
trans2 = get_transform(class_labels, max_len2)
train_data, num_train_examples = preprocess_data_metriclearn_separate(
trans1, trans2, class_labels, train_ds1, train_ds2, batch_size, shuffle_a_only=shuffle_a_only, shuffle_both=shuffle_both)
if aux_dataset is not None:
aux_trans = get_transform([], max_len1)
aux_dataloader = get_aux_dataloader(aux_trans, aux_batch_size, aux_dataset)
else:
aux_dataloader = None
return train_data, aux_dataloader, num_train_examples, bert, bert_vocabulary
| [
"gluonnlp.data.batchify.Pad",
"numpy.array",
"numpy.arange",
"gluonnlp.data.sampler.FixedBucketSampler",
"numpy.random.seed",
"mxnet.nd.array",
"mxnet.gluon.data.DataLoader",
"os.path.expanduser",
"json.loads",
"gluonnlp.model.get_model",
"random.uniform",
"tmnt.data_loading.PairedDataLoader",... | [((10160, 10182), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (10180, 10182), False, 'import multiprocessing\n'), ((11883, 11905), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (11903, 11905), False, 'import multiprocessing\n'), ((12329, 12472), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'aux_ds', 'num_workers': '(4)', 'last_batch': '"""rollover"""', 'shuffle': '(True)', 'batch_size': 'batch_size', 'batchify_fn': 'a_batchify_fn'}), "(dataset=aux_ds, num_workers=4, last_batch='rollover',\n shuffle=True, batch_size=batch_size, batchify_fn=a_batchify_fn)\n", (12350, 12472), False, 'from mxnet import gluon\n'), ((13183, 13191), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (13189, 13191), True, 'import mxnet as mx\n'), ((13343, 13489), 'gluonnlp.model.get_model', 'get_model', ([], {'name': 'bert_model_name', 'dataset_name': 'bert_dataset', 'pretrained': '(True)', 'ctx': 'ctx', 'use_pooler': '(True)', 'use_decoder': '(False)', 'use_classifier': '(False)'}), '(name=bert_model_name, dataset_name=bert_dataset, pretrained=True,\n ctx=ctx, use_pooler=True, use_decoder=False, use_classifier=False)\n', (13352, 13489), False, 'from gluonnlp.model import get_model\n'), ((13614, 13665), 'gluonnlp.data.BERTTokenizer', 'BERTTokenizer', (['bert_vocabulary'], {'lower': 'do_lower_case'}), '(bert_vocabulary, lower=do_lower_case)\n', (13627, 13665), False, 'from gluonnlp.data import BERTTokenizer\n'), ((14931, 14953), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (14951, 14953), False, 'import multiprocessing\n'), ((18536, 18558), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (18556, 18558), False, 'import multiprocessing\n'), ((19805, 19964), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'a_data_train', 'num_workers': '(4)', 'last_batch': '"""discard"""', 'sampler': 'a_sampler', 'batch_size': 'batch_size', 'batchify_fn': 'a_batchify_fn'}), "(dataset=a_data_train, num_workers=4, last_batch=\n 'discard', sampler=a_sampler, batch_size=batch_size, batchify_fn=\n a_batchify_fn)\n", (19826, 19964), False, 'from mxnet import gluon\n'), ((20112, 20244), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'b_data_train', 'num_workers': '(4)', 'sampler': 'b_sampler', 'batch_size': 'batch_size', 'batchify_fn': 'b_batchify_fn'}), '(dataset=b_data_train, num_workers=4, sampler=\n b_sampler, batch_size=batch_size, batchify_fn=b_batchify_fn)\n', (20133, 20244), False, 'from mxnet import gluon\n'), ((20308, 20356), 'tmnt.data_loading.PairedDataLoader', 'PairedDataLoader', (['a_loader_train', 'b_loader_train'], {}), '(a_loader_train, b_loader_train)\n', (20324, 20356), False, 'from tmnt.data_loading import to_label_matrix, PairedDataLoader, RoundRobinDataLoader\n'), ((21063, 21071), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (21069, 21071), True, 'import mxnet as mx\n'), ((21102, 21238), 'gluonnlp.model.get_model', 'get_model', ([], {'name': 'model_name', 'dataset_name': 'dataset', 'pretrained': '(True)', 'ctx': 'ctx', 'use_pooler': '(True)', 'use_decoder': '(False)', 'use_classifier': '(False)'}), '(name=model_name, dataset_name=dataset, pretrained=True, ctx=ctx,\n use_pooler=True, use_decoder=False, use_classifier=False)\n', (21111, 21238), False, 'from gluonnlp.model import get_model\n'), ((21358, 21409), 'gluonnlp.data.BERTTokenizer', 'BERTTokenizer', (['bert_vocabulary'], {'lower': 'do_lower_case'}), '(bert_vocabulary, lower=do_lower_case)\n', (21371, 21409), False, 'from gluonnlp.data import BERTTokenizer\n'), ((5708, 5776), 'gluonnlp.data.BERTSentenceTransform', 'BERTSentenceTransform', (['tokenizer', 'max_seq_length'], {'pad': 'pad', 'pair': 'pair'}), '(tokenizer, max_seq_length, pad=pad, pair=pair)\n', (5729, 5776), False, 'from gluonnlp.data import BERTSentenceTransform\n'), ((9820, 9859), 'numpy.random.seed', 'np.random.seed', (['(self._rng + self._calls)'], {}), '(self._rng + self._calls)\n', (9834, 9859), True, 'import numpy as np\n'), ((9878, 9901), 'numpy.arange', 'np.arange', (['self._length'], {}), '(self._length)\n', (9887, 9901), True, 'import numpy as np\n'), ((9910, 9936), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (9927, 9936), True, 'import numpy as np\n'), ((11123, 11249), 'gluonnlp.data.sampler.FixedBucketSampler', 'nlp.data.sampler.FixedBucketSampler', (['final_ds_len'], {'batch_size': 'batch_size', 'num_buckets': 'num_buckets', 'ratio': '(0.2)', 'shuffle': '(True)'}), '(final_ds_len, batch_size=batch_size,\n num_buckets=num_buckets, ratio=0.2, shuffle=True)\n', (11158, 11249), True, 'import gluonnlp as nlp\n'), ((11418, 11531), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'final_ds', 'num_workers': '(4)', 'batch_sampler': 'batch_sampler', 'batchify_fn': 'batchify_fn'}), '(dataset=final_ds, num_workers=4, batch_sampler=\n batch_sampler, batchify_fn=batchify_fn)\n', (11439, 11531), False, 'from mxnet import gluon\n'), ((11603, 11725), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'final_ds', 'batch_size': 'batch_size', 'num_workers': '(4)', 'shuffle': '(False)', 'batchify_fn': 'batchify_fn'}), '(dataset=final_ds, batch_size=batch_size, num_workers=\n 4, shuffle=False, batchify_fn=batchify_fn)\n', (11624, 11725), False, 'from mxnet import gluon\n'), ((12135, 12164), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (12156, 12164), True, 'import gluonnlp as nlp\n'), ((12166, 12191), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (12189, 12191), True, 'import gluonnlp as nlp\n'), ((12201, 12230), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (12222, 12230), True, 'import gluonnlp as nlp\n'), ((12232, 12272), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (12255, 12272), True, 'import gluonnlp as nlp\n'), ((12274, 12310), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (12297, 12310), True, 'import gluonnlp as nlp\n'), ((17751, 17864), 'gluonnlp.data.sampler.FixedBucketSampler', 'nlp.data.sampler.FixedBucketSampler', (['final_len'], {'batch_size': 'batch_size', 'num_buckets': '(4)', 'ratio': '(0.2)', 'shuffle': '(True)'}), '(final_len, batch_size=batch_size,\n num_buckets=4, ratio=0.2, shuffle=True)\n', (17786, 17864), True, 'import gluonnlp as nlp\n'), ((17939, 18052), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'final_ds', 'num_workers': '(4)', 'batch_sampler': 'batch_sampler', 'batchify_fn': 'batchify_fn'}), '(dataset=final_ds, num_workers=4, batch_sampler=\n batch_sampler, batchify_fn=batchify_fn)\n', (17960, 18052), False, 'from mxnet import gluon\n'), ((18124, 18245), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', ([], {'dataset': 'final_ds', 'num_workers': '(4)', 'shuffle': '(False)', 'batch_size': 'batch_size', 'batchify_fn': 'batchify_fn'}), '(dataset=final_ds, num_workers=4, shuffle=False,\n batch_size=batch_size, batchify_fn=batchify_fn)\n', (18145, 18245), False, 'from mxnet import gluon\n'), ((18868, 18897), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (18889, 18897), True, 'import gluonnlp as nlp\n'), ((18899, 18924), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (18922, 18924), True, 'import gluonnlp as nlp\n'), ((18934, 18963), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (18955, 18963), True, 'import gluonnlp as nlp\n'), ((18965, 19005), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (18988, 19005), True, 'import gluonnlp as nlp\n'), ((19007, 19043), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (19030, 19043), True, 'import gluonnlp as nlp\n'), ((19103, 19132), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (19124, 19132), True, 'import gluonnlp as nlp\n'), ((19134, 19159), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (19157, 19159), True, 'import gluonnlp as nlp\n'), ((19169, 19198), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (19190, 19198), True, 'import gluonnlp as nlp\n'), ((19200, 19240), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (19223, 19240), True, 'import gluonnlp as nlp\n'), ((19242, 19278), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (19265, 19278), True, 'import gluonnlp as nlp\n'), ((22781, 22810), 'tmnt.data_loading.RoundRobinDataLoader', 'RoundRobinDataLoader', (['loaders'], {}), '(loaders)\n', (22801, 22810), False, 'from tmnt.data_loading import to_label_matrix, PairedDataLoader, RoundRobinDataLoader\n'), ((1442, 1463), 'os.path.expanduser', 'os.path.expanduser', (['f'], {}), '(f)\n', (1460, 1463), False, 'import os\n'), ((10811, 10840), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (10832, 10840), True, 'import gluonnlp as nlp\n'), ((10842, 10867), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (10865, 10867), True, 'import gluonnlp as nlp\n'), ((10885, 10914), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (10906, 10914), True, 'import gluonnlp as nlp\n'), ((10916, 10956), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (10939, 10956), True, 'import gluonnlp as nlp\n'), ((10958, 10994), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (10981, 10994), True, 'import gluonnlp as nlp\n'), ((8648, 8702), 'tmnt.data_loading.to_label_matrix', 'to_label_matrix', (['[labels]'], {'num_labels': 'self.num_classes'}), '([labels], num_labels=self.num_classes)\n', (8663, 8702), False, 'from tmnt.data_loading import to_label_matrix, PairedDataLoader, RoundRobinDataLoader\n'), ((8749, 8766), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (8757, 8766), True, 'import numpy as np\n'), ((8912, 8942), 'numpy.zeros', 'np.zeros', (['self.bert_vocab_size'], {}), '(self.bert_vocab_size)\n', (8920, 8942), True, 'import numpy as np\n'), ((8972, 9012), 'numpy.unique', 'np.unique', (['input_ids'], {'return_counts': '(True)'}), '(input_ids, return_counts=True)\n', (8981, 9012), True, 'import numpy as np\n'), ((17515, 17544), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (17536, 17544), True, 'import gluonnlp as nlp\n'), ((17546, 17571), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (17569, 17571), True, 'import gluonnlp as nlp\n'), ((17593, 17622), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (17614, 17622), True, 'import gluonnlp as nlp\n'), ((17624, 17664), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (17647, 17664), True, 'import gluonnlp as nlp\n'), ((17666, 17702), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (17689, 17702), True, 'import gluonnlp as nlp\n'), ((9080, 9102), 'numpy.expand_dims', 'np.expand_dims', (['bow', '(0)'], {}), '(bow, 0)\n', (9094, 9102), True, 'import numpy as np\n'), ((9238, 9271), 'mxnet.nd.array', 'mx.nd.array', (['bow'], {'dtype': '"""float32"""'}), "(bow, dtype='float32')\n", (9249, 9271), True, 'import mxnet as mx\n'), ((15844, 15873), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (15865, 15873), True, 'import gluonnlp as nlp\n'), ((15875, 15900), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (15898, 15900), True, 'import gluonnlp as nlp\n'), ((15922, 15951), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (15943, 15951), True, 'import gluonnlp as nlp\n'), ((15953, 15993), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (15976, 15993), True, 'import gluonnlp as nlp\n'), ((15995, 16031), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (16018, 16031), True, 'import gluonnlp as nlp\n'), ((16131, 16160), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (16152, 16160), True, 'import gluonnlp as nlp\n'), ((16162, 16187), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (16185, 16187), True, 'import gluonnlp as nlp\n'), ((16209, 16238), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (16230, 16238), True, 'import gluonnlp as nlp\n'), ((16240, 16280), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (16263, 16280), True, 'import gluonnlp as nlp\n'), ((16282, 16318), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (16305, 16318), True, 'import gluonnlp as nlp\n'), ((16940, 16969), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (16961, 16969), True, 'import gluonnlp as nlp\n'), ((16971, 16996), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (16994, 16996), True, 'import gluonnlp as nlp\n'), ((17018, 17047), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (17039, 17047), True, 'import gluonnlp as nlp\n'), ((17049, 17089), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (17072, 17089), True, 'import gluonnlp as nlp\n'), ((17091, 17127), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (17114, 17127), True, 'import gluonnlp as nlp\n'), ((17227, 17256), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (17248, 17256), True, 'import gluonnlp as nlp\n'), ((17258, 17283), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', ([], {}), '()\n', (17281, 17283), True, 'import gluonnlp as nlp\n'), ((17305, 17334), 'gluonnlp.data.batchify.Pad', 'nlp.data.batchify.Pad', ([], {'axis': '(0)'}), '(axis=0)\n', (17326, 17334), True, 'import gluonnlp as nlp\n'), ((17336, 17376), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['bow_count_dtype'], {}), '(bow_count_dtype)\n', (17359, 17376), True, 'import gluonnlp as nlp\n'), ((17378, 17414), 'gluonnlp.data.batchify.Stack', 'nlp.data.batchify.Stack', (['label_dtype'], {}), '(label_dtype)\n', (17401, 17414), True, 'import gluonnlp as nlp\n'), ((22651, 22709), 'itertools.accumulate', 'accumulate', (['[s for _, s in train_sets]', '(lambda x, y: x + y)'], {}), '([s for _, s in train_sets], lambda x, y: x + y)\n', (22661, 22709), False, 'from itertools import accumulate\n'), ((2131, 2190), 'json.loads', 'json.loads', (['line'], {'object_pairs_hook': 'collections.OrderedDict'}), '(line, object_pairs_hook=collections.OrderedDict)\n', (2141, 2190), False, 'import json\n'), ((2057, 2077), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2071, 2077), False, 'import random\n')] |
import numpy as np
from skimage.transform import pyramid_gaussian
from lv import get_contour_points, area2cont, cont2area, interpolate_contour
def window_image(img, cent_point, window):
y0 = int(np.round(cent_point[0]) - window // 2)
y1 = int(np.round(cent_point[0]) + window // 2 + 1)
x0 = int(np.round(cent_point[1]) - window // 2)
x1 = int(np.round(cent_point[1]) + window // 2 + 1)
if x0 < 0:
x0 = 0
if y0 < 0:
y0 = 0
if y1 > img.shape[0]:
y1 = img.shape[0]
if x1 > img.shape[1]:
x1 = img.shape[1]
img = img[y0:y1, x0:x1]
if img.shape[0] != window:
if y0 == 0:
img = np.concatenate((np.zeros((window - img.shape[0], img.shape[1])), img), axis=0)
elif y1 == img.shape[0]:
img = np.concatenate((img, np.zeros((window - img.shape[0], img.shape[1]))), axis=0)
if img.shape[1] != window:
if x0 == 0:
img = np.concatenate((np.zeros((img.shape[0], window - img.shape[1])), img), axis=1)
elif x1 == img.shape[1]:
img = np.concatenate((img, np.zeros((img.shape[0], window - img.shape[1]))), axis=1)
return img
class LucasKanade:
def __init__(self, gauss_layers = 0, window = 7, num_points = 9):
self.gauss_layers = gauss_layers
self.window = window
self.num_points = num_points
def get_points(self, imgs, points):
points_results = [points]
for ind in range(0, len(imgs)-1):
r_1_imgs = list(pyramid_gaussian(imgs[ind], max_layer=self.gauss_layers))
r_2_imgs = list(pyramid_gaussian(imgs[ind+1], max_layer=self.gauss_layers))
new_points = []
for point in points:
flow = np.array([[0], [0]])
for l, (img_1, img_2) in enumerate(zip(r_1_imgs[::-1], r_2_imgs[::-1])):
img1 = window_image(img_1,
(point[0] / 2 ** (self.gauss_layers - l),
point[1] / 2 ** (self.gauss_layers - l)),
self.window)
img2 = window_image(img_2,
((point[0] + flow[1]) / 2 ** (self.gauss_layers - l),
(point[1] + flow[0]) / 2 ** (self.gauss_layers - l)),
self.window)
f_y, f_x = np.gradient(img1)
f_t = img1 - img2
A = np.array([[np.sum(f_x ** 2), np.sum(f_x * f_y)],
[np.sum(f_x * f_y), np.sum(f_y ** 2)]])
B = np.array([[np.sum(f_x * f_t)],
[np.sum(f_y * f_t)]
])
solv_flow = np.linalg.lstsq(A, B, rcond=None)[0]#np.matmul(np.linalg.inv(A), B)
flow = 2 * (flow + solv_flow)
new_points.append((point[0] + int(flow[1]), point[1] + int(flow[0])))
points = new_points
points_results.append(points)
return points_results
def predict(self, imgs, true_msk):
cont_x, cont_y, *_ = get_contour_points(area2cont(true_msk), kind='contour', num = self.num_points)
points = [(y, x) for x, y in zip(cont_x, cont_y)]
results = self.get_points(imgs, points)
msks = []
for res in results:
x = [p[1] for p in res]
y = [p[0] for p in res]
mask = np.zeros((512,512))
p_x, p_y = interpolate_contour(np.array(x), np.array(y),)
mask[p_y, p_x] = 1
msks.append(cont2area(mask))
return msks, results
| [
"lv.cont2area",
"lv.area2cont",
"skimage.transform.pyramid_gaussian",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.linalg.lstsq",
"numpy.gradient",
"numpy.round"
] | [((204, 227), 'numpy.round', 'np.round', (['cent_point[0]'], {}), '(cent_point[0])\n', (212, 227), True, 'import numpy as np\n'), ((320, 343), 'numpy.round', 'np.round', (['cent_point[1]'], {}), '(cent_point[1])\n', (328, 343), True, 'import numpy as np\n'), ((3407, 3426), 'lv.area2cont', 'area2cont', (['true_msk'], {}), '(true_msk)\n', (3416, 3426), False, 'from lv import get_contour_points, area2cont, cont2area, interpolate_contour\n'), ((3710, 3730), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {}), '((512, 512))\n', (3718, 3730), True, 'import numpy as np\n'), ((260, 283), 'numpy.round', 'np.round', (['cent_point[0]'], {}), '(cent_point[0])\n', (268, 283), True, 'import numpy as np\n'), ((376, 399), 'numpy.round', 'np.round', (['cent_point[1]'], {}), '(cent_point[1])\n', (384, 399), True, 'import numpy as np\n'), ((1630, 1686), 'skimage.transform.pyramid_gaussian', 'pyramid_gaussian', (['imgs[ind]'], {'max_layer': 'self.gauss_layers'}), '(imgs[ind], max_layer=self.gauss_layers)\n', (1646, 1686), False, 'from skimage.transform import pyramid_gaussian\n'), ((1716, 1776), 'skimage.transform.pyramid_gaussian', 'pyramid_gaussian', (['imgs[ind + 1]'], {'max_layer': 'self.gauss_layers'}), '(imgs[ind + 1], max_layer=self.gauss_layers)\n', (1732, 1776), False, 'from skimage.transform import pyramid_gaussian\n'), ((1860, 1880), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (1868, 1880), True, 'import numpy as np\n'), ((3773, 3784), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3781, 3784), True, 'import numpy as np\n'), ((3786, 3797), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3794, 3797), True, 'import numpy as np\n'), ((3855, 3870), 'lv.cont2area', 'cont2area', (['mask'], {}), '(mask)\n', (3864, 3870), False, 'from lv import get_contour_points, area2cont, cont2area, interpolate_contour\n'), ((748, 795), 'numpy.zeros', 'np.zeros', (['(window - img.shape[0], img.shape[1])'], {}), '((window - img.shape[0], img.shape[1]))\n', (756, 795), True, 'import numpy as np\n'), ((1046, 1093), 'numpy.zeros', 'np.zeros', (['(img.shape[0], window - img.shape[1])'], {}), '((img.shape[0], window - img.shape[1]))\n', (1054, 1093), True, 'import numpy as np\n'), ((2604, 2621), 'numpy.gradient', 'np.gradient', (['img1'], {}), '(img1)\n', (2615, 2621), True, 'import numpy as np\n'), ((891, 938), 'numpy.zeros', 'np.zeros', (['(window - img.shape[0], img.shape[1])'], {}), '((window - img.shape[0], img.shape[1]))\n', (899, 938), True, 'import numpy as np\n'), ((1189, 1236), 'numpy.zeros', 'np.zeros', (['(img.shape[0], window - img.shape[1])'], {}), '((img.shape[0], window - img.shape[1]))\n', (1197, 1236), True, 'import numpy as np\n'), ((2981, 3014), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'B'], {'rcond': 'None'}), '(A, B, rcond=None)\n', (2996, 3014), True, 'import numpy as np\n'), ((2695, 2711), 'numpy.sum', 'np.sum', (['(f_x ** 2)'], {}), '(f_x ** 2)\n', (2701, 2711), True, 'import numpy as np\n'), ((2713, 2730), 'numpy.sum', 'np.sum', (['(f_x * f_y)'], {}), '(f_x * f_y)\n', (2719, 2730), True, 'import numpy as np\n'), ((2767, 2784), 'numpy.sum', 'np.sum', (['(f_x * f_y)'], {}), '(f_x * f_y)\n', (2773, 2784), True, 'import numpy as np\n'), ((2786, 2802), 'numpy.sum', 'np.sum', (['(f_y ** 2)'], {}), '(f_y ** 2)\n', (2792, 2802), True, 'import numpy as np\n'), ((2841, 2858), 'numpy.sum', 'np.sum', (['(f_x * f_t)'], {}), '(f_x * f_t)\n', (2847, 2858), True, 'import numpy as np\n'), ((2894, 2911), 'numpy.sum', 'np.sum', (['(f_y * f_t)'], {}), '(f_y * f_t)\n', (2900, 2911), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any
import numpy as np
import torch
from torch import tensor as t
import mt.mvae.ops.spherical as S
import mt.mvae.ops.spherical_projected as SP
from mt.mvae.ops.poincare import pm
from mt.mvae.ops.common import eps
import tests.mvae.ops.test_spherical as TS
np.random.seed(42)
random_nums = np.random.random_sample(100) * 100 + 1
test_eps = 5e-6
radius = torch.tensor(2., dtype=torch.float64)
def spherical_projected_distance_backprojection(x: torch.Tensor,
y: torch.Tensor,
radius: torch.Tensor = radius,
**kwargs: Any) -> torch.Tensor:
return TS.spherical_distance(SP.projected_to_spherical(x, radius), SP.projected_to_spherical(y, radius), radius)
def parallel_transport(x: torch.Tensor, src: torch.Tensor, dst: torch.Tensor, radius: torch.Tensor) -> torch.Tensor:
c = SP._c(radius)
return (SP.lambda_x_c(src, c) / SP.lambda_x_c(dst, c)) * SP.gyration(dst, -src, x, c)
def inverse_parallel_transport(x: torch.Tensor, src: torch.Tensor, dst: torch.Tensor,
radius: torch.Tensor) -> torch.Tensor:
return parallel_transport(x, dst, src, radius)
def is_in_hyp_space(x: torch.Tensor, eps: float = eps, radius: torch.Tensor = radius) -> torch.Tensor:
return torch.ones_like(x, dtype=torch.uint8).all()
def is_in_tangent_space(x: torch.Tensor, at_point: torch.Tensor, eps: float = eps,
radius: torch.Tensor = radius) -> torch.Tensor:
# TODO-LATER: This is most likely wrong, doesn't matter for VAE though, just for tests.
assert is_in_hyp_space(at_point, radius=radius, eps=eps)
prod = x.dot(at_point)
return prod.allclose(torch.zeros_like(prod), atol=eps)
def test_mu_0() -> None:
res = SP.mu_0((3, 3), dtype=torch.int64)
expected = t([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert res.allclose(expected)
def test_is_in_hyp_space() -> None:
assert is_in_hyp_space(radius * t([1., 0, 0]))
assert is_in_hyp_space(radius * t([0.1, 2, 3]))
assert is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]))
assert is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]))
assert is_in_hyp_space(radius * t([0, 2, -np.sqrt(2)]))
def test_is_in_tangent_space() -> None:
# assert is_in_tangent_space(t([0., 2, 3]), radius * t([1., 0, 0]))
# assert is_in_tangent_space(t([0.1, 2, 3]), radius * t([1., 0, 0]))
# # -0*2 + 2*1 - 2 = 0
# assert is_in_tangent_space(t([0, 2, -np.sqrt(2)]), t([2, 1, np.sqrt(2)]), eps=test_eps)
pass
def test_spherical_projected_distance_backproj() -> None:
mu0 = t([0., 0, 0])
mu = t([2., 1., np.sqrt(2)])
assert spherical_projected_distance_backprojection(mu0, mu0, radius).allclose(t(0.), atol=5e-4)
assert spherical_projected_distance_backprojection(mu, mu, radius).allclose(t(0.), atol=5e-4)
assert spherical_projected_distance_backprojection(mu0, mu, radius) == spherical_projected_distance_backprojection(
mu, mu0, radius)
def test_spherical_projected_distance_normal() -> None:
mu0 = t([0., 0, 0])
mu = t([2., 1., np.sqrt(2)])
K = SP._c(radius)
assert SP.spherical_projected_distance(mu0, mu0, K).allclose(t(0.), atol=5e-4)
assert SP.spherical_projected_distance(mu, mu, K).allclose(t(0.), atol=5e-4)
assert SP.spherical_projected_distance(mu0, mu, K) == SP.spherical_projected_distance(mu, mu0, K)
def test_spherical_projected_distance_gyr() -> None:
mu0 = t([0., 0, 0])
mu = t([2., 1., np.sqrt(2)])
K = SP._c(radius)
assert SP.spherical_projected_gyro_distance(mu0, mu0, K).allclose(t(0.), atol=5e-4)
assert SP.spherical_projected_gyro_distance(mu, mu, K).allclose(t(0.), atol=5e-4)
assert SP.spherical_projected_gyro_distance(mu0, mu, K) == SP.spherical_projected_gyro_distance(mu, mu0, K)
def test_spherical_projected_distance_all() -> None:
mu0 = t([0., 0, 0])
mu = t([2., 1., np.sqrt(2)])
K = SP._c(radius)
gyr_dist = SP.spherical_projected_gyro_distance(mu0, mu, K)
assert gyr_dist.allclose(SP.spherical_projected_distance(mu0, mu, K))
backproj_dist = spherical_projected_distance_backprojection(mu0, mu, radius)
assert gyr_dist.allclose(backproj_dist)
def test_mob_add() -> None:
mu1 = t([2., 1, np.sqrt(2)]).double()
mu2 = t([2., 0.6, np.sqrt(3)]).double()
assert SP.mob_add(mu1, -mu1, 1.).allclose(torch.zeros_like(mu1))
assert SP.mob_add(mu1, -mu1, 0.1).allclose(torch.zeros_like(mu1))
assert SP.mob_add(mu1, -mu1, 10).allclose(torch.zeros_like(mu1))
assert SP.mob_add(mu1, mu2, 1).allclose(pm.mobius_add(mu1, mu2, c=-1))
def test_gyration() -> None:
mu1 = t([2., 1, np.sqrt(2)]).double()
mu2 = t([2., 0.6, np.sqrt(3)]).double()
u = t([1., 1., 1.]).double()
assert SP.gyration(mu1, -mu1, u, 1.).allclose(u)
assert SP.gyration(mu1, -mu1, u, 0.1).allclose(u)
assert SP.gyration(mu1, -mu1, u, 10).allclose(u)
spr = SP.gyration(mu1, mu2, u, c=1)
poi = pm.gyration(mu1, mu2, u, c=-1)
assert spr.allclose(poi)
def test_parallel_transport() -> None:
mu1 = t([2., 1, np.sqrt(2)]).double()
mu2 = t([np.sqrt(5), 1, np.sqrt(3)]).double()
assert is_in_hyp_space(mu1)
assert is_in_hyp_space(mu2)
u = t([1., 1., 1.]).double()
# assert is_in_tangent_space(u, at_point=mu1, eps=test_eps)
assert parallel_transport(u, src=mu1, dst=mu1, radius=radius).allclose(u, atol=test_eps)
pt_u = parallel_transport(u, src=mu1, dst=mu2, radius=radius)
# assert is_in_tangent_space(pt_u, at_point=mu2, eps=test_eps)
u_ = parallel_transport(pt_u, src=mu2, dst=mu1, radius=radius)
u_inv = inverse_parallel_transport(pt_u, src=mu1, dst=mu2, radius=radius)
assert u_.allclose(u_inv)
# assert is_in_tangent_space(u_, at_point=mu1, eps=test_eps)
assert u.allclose(u_, atol=test_eps, rtol=test_eps)
def test_parallel_transport_batch() -> None:
mu1 = t([2., 1, np.sqrt(2)]) / radius
mu2 = t([np.sqrt(5), 1, np.sqrt(3)]) / radius
u = t([0, 2, -np.sqrt(2)])
u2 = t([0, 4, -2 * np.sqrt(2)])
U = torch.stack((u, u2), dim=0)
res = parallel_transport(U, src=mu1, dst=mu2, radius=radius)
U_ = inverse_parallel_transport(res, src=mu1, dst=mu2, radius=radius)
assert U.allclose(U_, atol=test_eps)
def test_parallel_transport_mu0() -> None:
mu0 = t([0., 0, 0])
mu2 = t([np.sqrt(5), 1, np.sqrt(3)]) / radius
u = t([0, 2, -np.sqrt(2)])
assert SP.parallel_transport_mu0(u, dst=mu0, radius=radius).allclose(u)
pt_u = SP.parallel_transport_mu0(u, dst=mu2, radius=radius)
assert parallel_transport(u, src=mu0, dst=mu2, radius=radius).allclose(pt_u, atol=test_eps)
u_inv = SP.inverse_parallel_transport_mu0(pt_u, src=mu2, radius=radius)
assert u.allclose(u_inv)
def test_parallel_transport_mu0_batch() -> None:
mu2 = radius * t([np.sqrt(5), 1, np.sqrt(3)])
u = t([0, 2, -np.sqrt(2)])
u2 = t([0, 4, -2 * np.sqrt(2)])
U = torch.stack((u, u2), dim=0)
res = SP.parallel_transport_mu0(U, dst=mu2, radius=radius)
U_ = SP.inverse_parallel_transport_mu0(res, src=mu2, radius=radius)
assert U.allclose(U_)
def test_exp_map() -> None:
mu = t([2., 1, np.sqrt(2)]) / radius
u = t([0, 2, -np.sqrt(2)])
assert is_in_tangent_space(u, at_point=mu, eps=test_eps)
u_mapped = SP.exp_map(u, at_point=mu, radius=radius)
u_ = SP.inverse_exp_map(u_mapped, at_point=mu, radius=radius)
assert u.allclose(u_, atol=test_eps)
c = SP._c(radius)
assert SP.spherical_projected_distance(mu, u_mapped, c).allclose(SP.lambda_x_c(mu, c) * torch.norm(u, p=2))
def test_exp_map_mu0() -> None:
mu0 = SP.mu_0((3,))
u = t([0, 2, -np.sqrt(2)])
assert is_in_tangent_space(u, at_point=mu0, eps=test_eps)
u_mapped = SP.exp_map(u, at_point=mu0, radius=radius)
u_mu0_mapped = SP.exp_map_mu0(u, radius=radius)
assert u_mapped.allclose(u_mu0_mapped)
u_ = SP.inverse_exp_map(u_mapped, at_point=mu0, radius=radius)
u_mu0 = SP.inverse_exp_map_mu0(u_mu0_mapped, radius=radius)
assert u.allclose(u_, atol=test_eps)
assert u.allclose(u_mu0, atol=test_eps)
K = SP._c(radius)
assert SP.spherical_projected_distance(mu0, u_mapped, K).allclose(2 * torch.norm(u, p=2))
assert SP.spherical_projected_distance(mu0, u_mu0_mapped, K).allclose(2 * torch.norm(u, p=2))
def test_exp_map_large() -> None:
mu = t([2., 1, np.sqrt(2)])
u = 2.5 * t([0, 2, -np.sqrt(2)])
assert is_in_tangent_space(u, at_point=mu, eps=test_eps) # This should hold.
u_mapped = SP.exp_map(u, at_point=mu, radius=radius)
u_ = SP.inverse_exp_map(u_mapped, at_point=mu, radius=radius)
assert u.allclose(u_, atol=test_eps)
def test_exp_map_batch() -> None:
mu = t([2., 1, np.sqrt(2)]).double() / radius
u = t([0, 2, -np.sqrt(2)]).double() / radius
u2 = t([0, 4, -2 * np.sqrt(2)]).double() / radius
assert is_in_tangent_space(u, at_point=mu, eps=test_eps)
assert is_in_tangent_space(u2, at_point=mu, eps=test_eps)
U = torch.stack((u, u2), dim=0)
U_mapped = SP.exp_map(U, at_point=mu, radius=radius)
U_ = SP.inverse_exp_map(U_mapped, at_point=mu, radius=radius)
assert U.allclose(U_, atol=test_eps)
def test_sample_projection() -> None:
v = t([0., 1, 2])
mu0 = t([0., 0, 0])
assert is_in_hyp_space(mu0)
assert is_in_tangent_space(v, at_point=mu0)
mu = t([2., 1, np.sqrt(2)]) / radius
assert is_in_hyp_space(mu)
v_proj, _ = SP.sample_projection_mu0(v, at_point=mu, radius=radius)
assert is_in_hyp_space(v_proj, eps=test_eps)
_, v_ = SP.inverse_sample_projection_mu0(v_proj, at_point=mu, radius=radius)
assert v.allclose(v_, atol=test_eps)
assert is_in_tangent_space(v_, at_point=mu0, eps=test_eps)
def test_projections() -> None:
mu0_d = t([0., 0])
mu0_s = radius * t([1., 0, 0])
assert SP.projected_to_spherical(mu0_d, radius).allclose(mu0_s)
assert S.spherical_to_projected(mu0_s, radius).allclose(mu0_d)
mu_d = t([1, np.sqrt(2)]) / radius
assert S.spherical_to_projected(SP.projected_to_spherical(mu_d, radius), radius).allclose(mu_d)
mu_s = t([2., 1, np.sqrt(2)])
mu_s = mu_s / mu_s.norm() * radius
torch.norm(mu_s)
mu_s_in_d = S.spherical_to_projected(mu_s, radius)
mu_s_ = SP.projected_to_spherical(mu_s_in_d, radius)
assert mu_s_.allclose(mu_s)
| [
"mt.mvae.ops.spherical_projected.exp_map_mu0",
"numpy.sqrt",
"mt.mvae.ops.spherical_projected.exp_map",
"mt.mvae.ops.poincare.pm.gyration",
"mt.mvae.ops.spherical_projected.sample_projection_mu0",
"mt.mvae.ops.spherical_projected.inverse_sample_projection_mu0",
"mt.mvae.ops.spherical_projected.gyration"... | [((934, 952), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (948, 952), True, 'import numpy as np\n'), ((1031, 1069), 'torch.tensor', 'torch.tensor', (['(2.0)'], {'dtype': 'torch.float64'}), '(2.0, dtype=torch.float64)\n', (1043, 1069), False, 'import torch\n'), ((1604, 1617), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (1609, 1617), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2510, 2544), 'mt.mvae.ops.spherical_projected.mu_0', 'SP.mu_0', (['(3, 3)'], {'dtype': 'torch.int64'}), '((3, 3), dtype=torch.int64)\n', (2517, 2544), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2560, 2596), 'torch.tensor', 't', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (2561, 2596), True, 'from torch import tensor as t\n'), ((3337, 3351), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (3338, 3351), True, 'from torch import tensor as t\n'), ((3795, 3809), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (3796, 3809), True, 'from torch import tensor as t\n'), ((3850, 3863), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (3855, 3863), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4195, 4209), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (4196, 4209), True, 'from torch import tensor as t\n'), ((4250, 4263), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (4255, 4263), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4615, 4629), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (4616, 4629), True, 'from torch import tensor as t\n'), ((4670, 4683), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (4675, 4683), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4699, 4747), 'mt.mvae.ops.spherical_projected.spherical_projected_gyro_distance', 'SP.spherical_projected_gyro_distance', (['mu0', 'mu', 'K'], {}), '(mu0, mu, K)\n', (4735, 4747), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5666, 5695), 'mt.mvae.ops.spherical_projected.gyration', 'SP.gyration', (['mu1', 'mu2', 'u'], {'c': '(1)'}), '(mu1, mu2, u, c=1)\n', (5677, 5695), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5706, 5736), 'mt.mvae.ops.poincare.pm.gyration', 'pm.gyration', (['mu1', 'mu2', 'u'], {'c': '(-1)'}), '(mu1, mu2, u, c=-1)\n', (5717, 5736), False, 'from mt.mvae.ops.poincare import pm\n'), ((6801, 6828), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (6812, 6828), False, 'import torch\n'), ((7064, 7078), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (7065, 7078), True, 'from torch import tensor as t\n'), ((7248, 7300), 'mt.mvae.ops.spherical_projected.parallel_transport_mu0', 'SP.parallel_transport_mu0', (['u'], {'dst': 'mu2', 'radius': 'radius'}), '(u, dst=mu2, radius=radius)\n', (7273, 7300), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((7410, 7473), 'mt.mvae.ops.spherical_projected.inverse_parallel_transport_mu0', 'SP.inverse_parallel_transport_mu0', (['pt_u'], {'src': 'mu2', 'radius': 'radius'}), '(pt_u, src=mu2, radius=radius)\n', (7443, 7473), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((7680, 7707), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (7691, 7707), False, 'import torch\n'), ((7718, 7770), 'mt.mvae.ops.spherical_projected.parallel_transport_mu0', 'SP.parallel_transport_mu0', (['U'], {'dst': 'mu2', 'radius': 'radius'}), '(U, dst=mu2, radius=radius)\n', (7743, 7770), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((7780, 7842), 'mt.mvae.ops.spherical_projected.inverse_parallel_transport_mu0', 'SP.inverse_parallel_transport_mu0', (['res'], {'src': 'mu2', 'radius': 'radius'}), '(res, src=mu2, radius=radius)\n', (7813, 7842), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8048, 8089), 'mt.mvae.ops.spherical_projected.exp_map', 'SP.exp_map', (['u'], {'at_point': 'mu', 'radius': 'radius'}), '(u, at_point=mu, radius=radius)\n', (8058, 8089), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8099, 8155), 'mt.mvae.ops.spherical_projected.inverse_exp_map', 'SP.inverse_exp_map', (['u_mapped'], {'at_point': 'mu', 'radius': 'radius'}), '(u_mapped, at_point=mu, radius=radius)\n', (8117, 8155), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8206, 8219), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (8211, 8219), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8376, 8389), 'mt.mvae.ops.spherical_projected.mu_0', 'SP.mu_0', (['(3,)'], {}), '((3,))\n', (8383, 8389), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8499, 8541), 'mt.mvae.ops.spherical_projected.exp_map', 'SP.exp_map', (['u'], {'at_point': 'mu0', 'radius': 'radius'}), '(u, at_point=mu0, radius=radius)\n', (8509, 8541), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8561, 8593), 'mt.mvae.ops.spherical_projected.exp_map_mu0', 'SP.exp_map_mu0', (['u'], {'radius': 'radius'}), '(u, radius=radius)\n', (8575, 8593), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8646, 8703), 'mt.mvae.ops.spherical_projected.inverse_exp_map', 'SP.inverse_exp_map', (['u_mapped'], {'at_point': 'mu0', 'radius': 'radius'}), '(u_mapped, at_point=mu0, radius=radius)\n', (8664, 8703), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8716, 8767), 'mt.mvae.ops.spherical_projected.inverse_exp_map_mu0', 'SP.inverse_exp_map_mu0', (['u_mu0_mapped'], {'radius': 'radius'}), '(u_mu0_mapped, radius=radius)\n', (8738, 8767), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8862, 8875), 'mt.mvae.ops.spherical_projected._c', 'SP._c', (['radius'], {}), '(radius)\n', (8867, 8875), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9270, 9311), 'mt.mvae.ops.spherical_projected.exp_map', 'SP.exp_map', (['u'], {'at_point': 'mu', 'radius': 'radius'}), '(u, at_point=mu, radius=radius)\n', (9280, 9311), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9321, 9377), 'mt.mvae.ops.spherical_projected.inverse_exp_map', 'SP.inverse_exp_map', (['u_mapped'], {'at_point': 'mu', 'radius': 'radius'}), '(u_mapped, at_point=mu, radius=radius)\n', (9339, 9377), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9740, 9767), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (9751, 9767), False, 'import torch\n'), ((9783, 9824), 'mt.mvae.ops.spherical_projected.exp_map', 'SP.exp_map', (['U'], {'at_point': 'mu', 'radius': 'radius'}), '(U, at_point=mu, radius=radius)\n', (9793, 9824), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9834, 9890), 'mt.mvae.ops.spherical_projected.inverse_exp_map', 'SP.inverse_exp_map', (['U_mapped'], {'at_point': 'mu', 'radius': 'radius'}), '(U_mapped, at_point=mu, radius=radius)\n', (9852, 9890), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9980, 9994), 'torch.tensor', 't', (['[0.0, 1, 2]'], {}), '([0.0, 1, 2])\n', (9981, 9994), True, 'from torch import tensor as t\n'), ((10005, 10019), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (10006, 10019), True, 'from torch import tensor as t\n'), ((10189, 10244), 'mt.mvae.ops.spherical_projected.sample_projection_mu0', 'SP.sample_projection_mu0', (['v'], {'at_point': 'mu', 'radius': 'radius'}), '(v, at_point=mu, radius=radius)\n', (10213, 10244), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((10307, 10375), 'mt.mvae.ops.spherical_projected.inverse_sample_projection_mu0', 'SP.inverse_sample_projection_mu0', (['v_proj'], {'at_point': 'mu', 'radius': 'radius'}), '(v_proj, at_point=mu, radius=radius)\n', (10339, 10375), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((10526, 10537), 'torch.tensor', 't', (['[0.0, 0]'], {}), '([0.0, 0])\n', (10527, 10537), True, 'from torch import tensor as t\n'), ((10925, 10941), 'torch.norm', 'torch.norm', (['mu_s'], {}), '(mu_s)\n', (10935, 10941), False, 'import torch\n'), ((10958, 10996), 'mt.mvae.ops.spherical.spherical_to_projected', 'S.spherical_to_projected', (['mu_s', 'radius'], {}), '(mu_s, radius)\n', (10982, 10996), True, 'import mt.mvae.ops.spherical as S\n'), ((11009, 11053), 'mt.mvae.ops.spherical_projected.projected_to_spherical', 'SP.projected_to_spherical', (['mu_s_in_d', 'radius'], {}), '(mu_s_in_d, radius)\n', (11034, 11053), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((967, 995), 'numpy.random.random_sample', 'np.random.random_sample', (['(100)'], {}), '(100)\n', (990, 995), True, 'import numpy as np\n'), ((1393, 1429), 'mt.mvae.ops.spherical_projected.projected_to_spherical', 'SP.projected_to_spherical', (['x', 'radius'], {}), '(x, radius)\n', (1418, 1429), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((1431, 1467), 'mt.mvae.ops.spherical_projected.projected_to_spherical', 'SP.projected_to_spherical', (['y', 'radius'], {}), '(y, radius)\n', (1456, 1467), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((1679, 1707), 'mt.mvae.ops.spherical_projected.gyration', 'SP.gyration', (['dst', '(-src)', 'x', 'c'], {}), '(dst, -src, x, c)\n', (1690, 1707), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2439, 2461), 'torch.zeros_like', 'torch.zeros_like', (['prod'], {}), '(prod)\n', (2455, 2461), False, 'import torch\n'), ((3466, 3472), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (3467, 3472), True, 'from torch import tensor as t\n'), ((3564, 3570), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (3565, 3570), True, 'from torch import tensor as t\n'), ((3929, 3935), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (3930, 3935), True, 'from torch import tensor as t\n'), ((4010, 4016), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (4011, 4016), True, 'from torch import tensor as t\n'), ((4039, 4082), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu0', 'mu', 'K'], {}), '(mu0, mu, K)\n', (4070, 4082), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4086, 4129), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu', 'mu0', 'K'], {}), '(mu, mu0, K)\n', (4117, 4129), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4334, 4340), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (4335, 4340), True, 'from torch import tensor as t\n'), ((4420, 4426), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (4421, 4426), True, 'from torch import tensor as t\n'), ((4449, 4497), 'mt.mvae.ops.spherical_projected.spherical_projected_gyro_distance', 'SP.spherical_projected_gyro_distance', (['mu0', 'mu', 'K'], {}), '(mu0, mu, K)\n', (4485, 4497), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4501, 4549), 'mt.mvae.ops.spherical_projected.spherical_projected_gyro_distance', 'SP.spherical_projected_gyro_distance', (['mu', 'mu0', 'K'], {}), '(mu, mu0, K)\n', (4537, 4549), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4777, 4820), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu0', 'mu', 'K'], {}), '(mu0, mu, K)\n', (4808, 4820), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5109, 5130), 'torch.zeros_like', 'torch.zeros_like', (['mu1'], {}), '(mu1)\n', (5125, 5130), False, 'import torch\n'), ((5179, 5200), 'torch.zeros_like', 'torch.zeros_like', (['mu1'], {}), '(mu1)\n', (5195, 5200), False, 'import torch\n'), ((5248, 5269), 'torch.zeros_like', 'torch.zeros_like', (['mu1'], {}), '(mu1)\n', (5264, 5269), False, 'import torch\n'), ((5315, 5344), 'mt.mvae.ops.poincare.pm.mobius_add', 'pm.mobius_add', (['mu1', 'mu2'], {'c': '(-1)'}), '(mu1, mu2, c=-1)\n', (5328, 5344), False, 'from mt.mvae.ops.poincare import pm\n'), ((10558, 10572), 'torch.tensor', 't', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (10559, 10572), True, 'from torch import tensor as t\n'), ((1630, 1651), 'mt.mvae.ops.spherical_projected.lambda_x_c', 'SP.lambda_x_c', (['src', 'c'], {}), '(src, c)\n', (1643, 1651), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((1654, 1675), 'mt.mvae.ops.spherical_projected.lambda_x_c', 'SP.lambda_x_c', (['dst', 'c'], {}), '(dst, c)\n', (1667, 1675), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2033, 2070), 'torch.ones_like', 'torch.ones_like', (['x'], {'dtype': 'torch.uint8'}), '(x, dtype=torch.uint8)\n', (2048, 2070), False, 'import torch\n'), ((2705, 2719), 'torch.tensor', 't', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (2706, 2719), True, 'from torch import tensor as t\n'), ((2756, 2770), 'torch.tensor', 't', (['[0.1, 2, 3]'], {}), '([0.1, 2, 3])\n', (2757, 2770), True, 'from torch import tensor as t\n'), ((3371, 3381), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3378, 3381), True, 'import numpy as np\n'), ((3829, 3839), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3836, 3839), True, 'import numpy as np\n'), ((3875, 3919), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu0', 'mu0', 'K'], {}), '(mu0, mu0, K)\n', (3906, 3919), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((3958, 4000), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu', 'mu', 'K'], {}), '(mu, mu, K)\n', (3989, 4000), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4229, 4239), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4236, 4239), True, 'import numpy as np\n'), ((4275, 4324), 'mt.mvae.ops.spherical_projected.spherical_projected_gyro_distance', 'SP.spherical_projected_gyro_distance', (['mu0', 'mu0', 'K'], {}), '(mu0, mu0, K)\n', (4311, 4324), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4363, 4410), 'mt.mvae.ops.spherical_projected.spherical_projected_gyro_distance', 'SP.spherical_projected_gyro_distance', (['mu', 'mu', 'K'], {}), '(mu, mu, K)\n', (4399, 4410), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4649, 4659), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4656, 4659), True, 'import numpy as np\n'), ((5074, 5100), 'mt.mvae.ops.spherical_projected.mob_add', 'SP.mob_add', (['mu1', '(-mu1)', '(1.0)'], {}), '(mu1, -mu1, 1.0)\n', (5084, 5100), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5143, 5169), 'mt.mvae.ops.spherical_projected.mob_add', 'SP.mob_add', (['mu1', '(-mu1)', '(0.1)'], {}), '(mu1, -mu1, 0.1)\n', (5153, 5169), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5213, 5238), 'mt.mvae.ops.spherical_projected.mob_add', 'SP.mob_add', (['mu1', '(-mu1)', '(10)'], {}), '(mu1, -mu1, 10)\n', (5223, 5238), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5282, 5305), 'mt.mvae.ops.spherical_projected.mob_add', 'SP.mob_add', (['mu1', 'mu2', '(1)'], {}), '(mu1, mu2, 1)\n', (5292, 5305), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5471, 5489), 'torch.tensor', 't', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5472, 5489), True, 'from torch import tensor as t\n'), ((5507, 5537), 'mt.mvae.ops.spherical_projected.gyration', 'SP.gyration', (['mu1', '(-mu1)', 'u', '(1.0)'], {}), '(mu1, -mu1, u, 1.0)\n', (5518, 5537), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5560, 5590), 'mt.mvae.ops.spherical_projected.gyration', 'SP.gyration', (['mu1', '(-mu1)', 'u', '(0.1)'], {}), '(mu1, -mu1, u, 0.1)\n', (5571, 5590), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5614, 5643), 'mt.mvae.ops.spherical_projected.gyration', 'SP.gyration', (['mu1', '(-mu1)', 'u', '(10)'], {}), '(mu1, -mu1, u, 10)\n', (5625, 5643), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((5972, 5990), 'torch.tensor', 't', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5973, 5990), True, 'from torch import tensor as t\n'), ((7171, 7223), 'mt.mvae.ops.spherical_projected.parallel_transport_mu0', 'SP.parallel_transport_mu0', (['u'], {'dst': 'mu0', 'radius': 'radius'}), '(u, dst=mu0, radius=radius)\n', (7196, 7223), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8231, 8279), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu', 'u_mapped', 'c'], {}), '(mu, u_mapped, c)\n', (8262, 8279), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8289, 8309), 'mt.mvae.ops.spherical_projected.lambda_x_c', 'SP.lambda_x_c', (['mu', 'c'], {}), '(mu, c)\n', (8302, 8309), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8312, 8330), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (8322, 8330), False, 'import torch\n'), ((8887, 8936), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu0', 'u_mapped', 'K'], {}), '(mu0, u_mapped, K)\n', (8918, 8936), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((8950, 8968), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (8960, 8968), False, 'import torch\n'), ((8981, 9034), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['mu0', 'u_mu0_mapped', 'K'], {}), '(mu0, u_mu0_mapped, K)\n', (9012, 9034), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((9048, 9066), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (9058, 9066), False, 'import torch\n'), ((9123, 9133), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9130, 9133), True, 'import numpy as np\n'), ((10584, 10624), 'mt.mvae.ops.spherical_projected.projected_to_spherical', 'SP.projected_to_spherical', (['mu0_d', 'radius'], {}), '(mu0_d, radius)\n', (10609, 10624), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((10652, 10691), 'mt.mvae.ops.spherical.spherical_to_projected', 'S.spherical_to_projected', (['mu0_s', 'radius'], {}), '(mu0_s, radius)\n', (10676, 10691), True, 'import mt.mvae.ops.spherical as S\n'), ((10869, 10879), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10876, 10879), True, 'import numpy as np\n'), ((6653, 6663), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6660, 6663), True, 'import numpy as np\n'), ((6688, 6698), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (6695, 6698), True, 'import numpy as np\n'), ((6703, 6713), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (6710, 6713), True, 'import numpy as np\n'), ((6743, 6753), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6750, 6753), True, 'import numpy as np\n'), ((6779, 6789), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6786, 6789), True, 'import numpy as np\n'), ((7091, 7101), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (7098, 7101), True, 'import numpy as np\n'), ((7106, 7116), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (7113, 7116), True, 'import numpy as np\n'), ((7146, 7156), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7153, 7156), True, 'import numpy as np\n'), ((7576, 7586), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (7583, 7586), True, 'import numpy as np\n'), ((7591, 7601), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (7598, 7601), True, 'import numpy as np\n'), ((7622, 7632), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7629, 7632), True, 'import numpy as np\n'), ((7658, 7668), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7665, 7668), True, 'import numpy as np\n'), ((7918, 7928), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7925, 7928), True, 'import numpy as np\n'), ((7958, 7968), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7965, 7968), True, 'import numpy as np\n'), ((8408, 8418), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8415, 8418), True, 'import numpy as np\n'), ((10119, 10129), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10126, 10129), True, 'import numpy as np\n'), ((10726, 10736), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10733, 10736), True, 'import numpy as np\n'), ((10784, 10823), 'mt.mvae.ops.spherical_projected.projected_to_spherical', 'SP.projected_to_spherical', (['mu_d', 'radius'], {}), '(mu_d, radius)\n', (10809, 10823), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2817, 2827), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2824, 2827), True, 'import numpy as np\n'), ((2876, 2886), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2883, 2886), True, 'import numpy as np\n'), ((4997, 5007), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5004, 5007), True, 'import numpy as np\n'), ((5041, 5051), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5048, 5051), True, 'import numpy as np\n'), ((5397, 5407), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5404, 5407), True, 'import numpy as np\n'), ((5441, 5451), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5448, 5451), True, 'import numpy as np\n'), ((5827, 5837), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5834, 5837), True, 'import numpy as np\n'), ((5862, 5872), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (5869, 5872), True, 'import numpy as np\n'), ((5877, 5887), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5884, 5887), True, 'import numpy as np\n'), ((9160, 9170), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9167, 9170), True, 'import numpy as np\n'), ((2936, 2946), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2943, 2946), True, 'import numpy as np\n'), ((9474, 9484), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9481, 9484), True, 'import numpy as np\n'), ((9523, 9533), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9530, 9533), True, 'import numpy as np\n'), ((9577, 9587), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9584, 9587), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import io
import numpy as np
import os
import struct
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
end_of_nnet_tag = '</Nnet>'
end_of_component_tag = '<!EndOfComponent>'
supported_components = [
'addshift',
'affinecomponent',
'affinetransform',
'convolutional1dcomponent',
'convolutionalcomponent',
'copy',
'fixedaffinecomponent',
'lstmprojected',
'lstmprojectedstreams',
'maxpoolingcomponent',
'parallelcomponent',
'rescale',
'sigmoid',
'softmax',
'softmaxcomponent',
'splicecomponent',
'tanhcomponent',
'normalizecomponent',
'affinecomponentpreconditionedonline',
'rectifiedlinearcomponent',
'batchnormcomponent',
'naturalgradientaffinecomponent',
'logsoftmaxcomponent',
'naturalgradientperelementscalecomponent',
'sigmoidcomponent',
'tanhcomponent',
'elementwiseproductcomponent',
'clipgradientcomponent',
'noopcomponent',
'lstmnonlinearitycomponent',
'backproptruncationcomponent',
]
def get_bool(s: bytes) -> bool:
"""
Get bool value from bytes
:param s: bytes array contains bool value
:return: bool value from bytes array
"""
if str(s) == "b\'F\'":
return False
elif str(s) == "b\'T\'":
return True
else:
return struct.unpack('?', s)[0]
def get_uint16(s: bytes) -> int:
"""
Get unsigned int16 value from bytes
:param s: bytes array contains unsigned int16 value
:return: unsigned int16 value from bytes array
"""
return struct.unpack('H', s)[0]
def get_uint32(s: bytes) -> int:
"""
Get unsigned int32 value from bytes
:param s: bytes array contains unsigned int32 value
:return: unsigned int32 value from bytes array
"""
return struct.unpack('I', s)[0]
def get_uint64(s: bytes) -> int:
"""
Get unsigned int64 value from bytes
:param s: bytes array contains unsigned int64 value
:return: unsigned int64 value from bytes array
"""
return struct.unpack('q', s)[0]
def read_binary_bool_token(file_desc: io.BufferedReader) -> bool:
"""
Get next bool value from file
The carriage moves forward to 1 position.
:param file_desc: file descriptor
:return: next boolean value in file
"""
return get_bool(file_desc.read(1))
def read_binary_integer32_token(file_desc: io.BufferedReader) -> int:
"""
Get next int32 value from file
The carriage moves forward to 5 position.
:param file_desc: file descriptor
:return: next uint32 value in file
"""
buffer_size = file_desc.read(1)
return get_uint32(file_desc.read(buffer_size[0]))
def read_binary_integer64_token(file_desc: io.BufferedReader) -> int:
"""
Get next int64 value from file
The carriage moves forward to 9 position.
:param file_desc: file descriptor
:return: next uint64 value in file
"""
buffer_size = file_desc.read(1)
return get_uint64(file_desc.read(buffer_size[0]))
def read_binary_float_token(file_desc: io.BufferedReader) -> float:
"""
Get next float32 value from file
The carriage moves forward to 5 position.
:param file_desc: file descriptor
:return: next float32 value in file
"""
buffer_size = file_desc.read(1)
s = file_desc.read(buffer_size[0])
return np.fromstring(s, dtype=np.float32)[0]
def read_string(file_desc: io.BufferedReader) -> int:
return collect_until_whitespace(file_desc)
def find_next_tag(file_desc: io.BufferedReader) -> str:
"""
Get next tag in the file
:param file_desc:file descriptor
:return: string like '<sometag>'
"""
tag = b''
while True:
symbol = file_desc.read(1)
if symbol == b'':
raise Error('Unexpected end of Kaldi model')
if tag == b'' and symbol != b'<':
continue
elif symbol == b'<':
tag = b''
tag += symbol
if symbol != b'>':
continue
try:
return tag.decode('ascii')
except UnicodeDecodeError:
# Tag in Kaldi model always in ascii encoding
tag = b''
def read_placeholder(file_desc: io.BufferedReader, size=3) -> bytes:
"""
Read size bytes from file
:param file_desc:file descriptor
:param size:number of reading bytes
:return: bytes
"""
return file_desc.read(size)
def find_next_component(file_desc: io.BufferedReader) -> str:
"""
Read next component in the file.
All components are contained in supported_components
:param file_desc:file descriptor
:return: string like '<component>'
"""
while True:
tag = find_next_tag(file_desc)
# Tag is <NameOfTheLayer>. But we want get without '<' and '>'
component_name = tag[1:-1].lower()
if component_name in supported_components or tag == end_of_nnet_tag:
# There is whitespace after component's name
read_placeholder(file_desc, 1)
return component_name
elif tag == '<ComponentName>':
raise Error('Component has unsupported or not specified type')
def get_name_from_path(path: str) -> str:
"""
Get name from path to the file
:param path: path to the file
:return: name of the file
"""
return os.path.splitext(os.path.basename(path))[0]
def find_end_of_component(file_desc: io.BufferedReader, component: str, end_tags: tuple = ()):
"""
Find an index and a tag of the ent of the component
:param file_desc: file descriptor
:param component: component from supported_components
:param end_tags: specific end tags
:return: the index and the tag of the end of the component
"""
end_tags_of_component = ['</{}>'.format(component),
end_of_component_tag.lower(),
end_of_nnet_tag.lower(),
*end_tags,
*['<{}>'.format(component) for component in supported_components]]
next_tag = find_next_tag(file_desc)
while next_tag.lower() not in end_tags_of_component:
next_tag = find_next_tag(file_desc)
return next_tag, file_desc.tell()
def get_parameters(file_desc: io.BufferedReader, start_index: int, end_index: int):
"""
Get part of file
:param file_desc: file descriptor
:param start_index: Index of the start reading
:param end_index: Index of the end reading
:return: part of the file
"""
file_desc.seek(start_index)
buffer = file_desc.read(end_index - start_index)
return io.BytesIO(buffer)
def read_token_value(file_desc: io.BufferedReader, token: bytes = b'', value_type: type = np.uint32):
"""
Get value of the token.
Read next token (until whitespace) and check if next teg equals token
:param file_desc: file descriptor
:param token: token
:param value_type: type of the reading value
:return: value of the token
"""
getters = {
np.uint32: read_binary_integer32_token,
np.uint64: read_binary_integer64_token,
np.bool: read_binary_bool_token
}
current_token = collect_until_whitespace(file_desc)
if token != b'' and token != current_token:
raise Error('Can not load token {} from Kaldi model'.format(token) +
refer_to_faq_msg(94))
return getters[value_type](file_desc)
def collect_until_whitespace(file_desc: io.BufferedReader):
"""
Read from file until whitespace
:param file_desc: file descriptor
:return:
"""
res = b''
while True:
new_sym = file_desc.read(1)
if new_sym == b' ' or new_sym == b'':
break
res += new_sym
return res
def collect_until_token(file_desc: io.BufferedReader, token):
"""
Read from file until the token
:param file_desc: file descriptor
:param token: token that we find
:return:
"""
while True:
# usually there is the following structure <CellDim> DIM<ClipGradient> VALUEFM
res = collect_until_whitespace(file_desc)
if res == token or res[-len(token):] == token:
return
size = 0
if isinstance(file_desc, io.BytesIO):
size = len(file_desc.getbuffer())
elif isinstance(file_desc, io.BufferedReader):
size = os.fstat(file_desc.fileno()).st_size
if file_desc.tell() == size:
raise Error('End of the file. Token {} not found. {}'.format(token, file_desc.tell()))
def collect_until_token_and_read(file_desc: io.BufferedReader, token, value_type: type = np.uint32):
"""
Read from file until the token
:param file_desc: file descriptor
:param token: token to find and read
:param value_type: type of value to read
:return:
"""
getters = {
np.uint32: read_binary_integer32_token,
np.uint64: read_binary_integer64_token,
np.bool: read_binary_bool_token,
np.string_: read_string
}
collect_until_token(file_desc, token)
return getters[value_type](file_desc)
def create_edge_attrs(prev_layer_id: str, next_layer_id: str, in_port=0, out_port=0) -> dict:
"""
Create common edge's attributes
:param prev_layer_id: id of previous layer
:param next_layer_id: id of next layer
:param in_port: 'in' port
:param out_port: 'out' port
:return: dictionary contains common attributes for edge
"""
return {
'out': out_port,
'in': in_port,
'name': next_layer_id,
'fw_tensor_debug_info': [(prev_layer_id, next_layer_id)],
'in_attrs': ['in', 'name'],
'out_attrs': ['out', 'name'],
'data_attrs': ['fw_tensor_debug_info']
}
def read_blob(file_desc: io.BufferedReader, size: int, dtype=np.float32):
"""
Read blob from the file
:param file_desc: file descriptor
:param size: size of the blob
:param dtype: type of values of the blob
:return: np array contains blob
"""
dsizes = {
np.float32: 4,
np.int32: 4
}
data = file_desc.read(size * dsizes[dtype])
return np.fromstring(data, dtype=dtype)
def get_args_for_specifier(string):
"""
Parse arguments in brackets and return list of arguments
:param string: string in format (<arg1>, <arg2>, .., <argn>)
:return: list with arguments
"""
open_bracket = 1
pos = 1
args = []
prev_arg_pos = 1
while pos < len(string):
pos_open = string.find(b'(', pos)
pos_close = string.find(b')', pos)
pos_sep = string.find(b',', pos)
if pos_open == -1:
if open_bracket == 1:
args = args + string[prev_arg_pos:pos_close].replace(b' ', b'').split(b',')
pos = len(string)
else:
open_bracket = open_bracket - 1
while open_bracket > 1:
pos_close = string.find(b')', pos_close+1)
if pos_close != -1:
open_bracket = open_bracket - 1
else:
raise Error("Syntax error in model: incorrect number of brackets")
args.append(string[prev_arg_pos:pos_close+1].strip())
prev_arg_pos = string.find(b',', pos_close+1) + 1
if prev_arg_pos != 0 and string[prev_arg_pos:-2].replace(b' ', b'').split(b',') != [b'']:
args = args + string[prev_arg_pos:-2].replace(b' ', b'').split(b',')
pos = len(string)
else:
if pos_sep < pos_open and open_bracket == 1:
pos_sep = string[pos_sep:pos_open].rfind(b',') + pos_sep
args = args + string[prev_arg_pos:pos_sep].replace(b' ', b'').split(b',')
prev_arg_pos = pos_sep + 1
if pos_open < pos_close:
open_bracket = open_bracket + 1
pos = pos_open + 1
else:
open_bracket = open_bracket - 1
if open_bracket == 1:
args.append(string[prev_arg_pos:pos_close + 1].strip())
prev_arg_pos = string.find(b',', pos_close+1) + 1
pos = prev_arg_pos
else:
pos = pos_close + 1
return args
| [
"mo.utils.error.Error",
"io.BytesIO",
"mo.utils.utils.refer_to_faq_msg",
"struct.unpack",
"os.path.basename",
"numpy.fromstring"
] | [((7192, 7210), 'io.BytesIO', 'io.BytesIO', (['buffer'], {}), '(buffer)\n', (7202, 7210), False, 'import io\n'), ((10726, 10758), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (10739, 10758), True, 'import numpy as np\n'), ((2156, 2177), 'struct.unpack', 'struct.unpack', (['"""H"""', 's'], {}), "('H', s)\n", (2169, 2177), False, 'import struct\n'), ((2390, 2411), 'struct.unpack', 'struct.unpack', (['"""I"""', 's'], {}), "('I', s)\n", (2403, 2411), False, 'import struct\n'), ((2624, 2645), 'struct.unpack', 'struct.unpack', (['"""q"""', 's'], {}), "('q', s)\n", (2637, 2645), False, 'import struct\n'), ((3935, 3969), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': 'np.float32'}), '(s, dtype=np.float32)\n', (3948, 3969), True, 'import numpy as np\n'), ((4362, 4400), 'mo.utils.error.Error', 'Error', (['"""Unexpected end of Kaldi model"""'], {}), "('Unexpected end of Kaldi model')\n", (4367, 4400), False, 'from mo.utils.error import Error\n'), ((5928, 5950), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5944, 5950), False, 'import os\n'), ((1922, 1943), 'struct.unpack', 'struct.unpack', (['"""?"""', 's'], {}), "('?', s)\n", (1935, 1943), False, 'import struct\n'), ((5684, 5740), 'mo.utils.error.Error', 'Error', (['"""Component has unsupported or not specified type"""'], {}), "('Component has unsupported or not specified type')\n", (5689, 5740), False, 'from mo.utils.error import Error\n'), ((7936, 7956), 'mo.utils.utils.refer_to_faq_msg', 'refer_to_faq_msg', (['(94)'], {}), '(94)\n', (7952, 7956), False, 'from mo.utils.utils import refer_to_faq_msg\n'), ((11704, 11764), 'mo.utils.error.Error', 'Error', (['"""Syntax error in model: incorrect number of brackets"""'], {}), "('Syntax error in model: incorrect number of brackets')\n", (11709, 11764), False, 'from mo.utils.error import Error\n')] |
# -*- coding: utf-8 -*-
"""Classes for 2d U-net training and prediction.
"""
import json
from loguru import logger
import os
import sys
import warnings
from functools import partial
from pathlib import Path
from zipfile import ZipFile
import numpy as np
#from pytorch3dunet.unet3d.losses import GeneralizedDiceLoss
import torch
import torch.nn.functional as F
from fastai.callbacks import CSVLogger, SaveModelCallback
from fastai.utils.mem import gpu_mem_get_free_no_cache
from fastai.vision import (SegmentationItemList, dice, get_transforms,
imagenet_stats, lr_find, models, open_image,
unet_learner, crop_pad, Image, pil2tensor)
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
from skimage import exposure, img_as_ubyte, io, img_as_float
from tqdm import tqdm
warnings.filterwarnings("ignore", category=UserWarning)
class Unet2dTrainer:
"""Class that takes in 2d images and corresponding segmentations and
trains a 2dUnet with a pretrained ResNet34 encoder.
Args:
data_im_out_dir (pathlib.Path): Path to directory containing image slices.
seg_im_out_dir (pathlib.Path): Path to to directory containing label slices.
codes (list of str): Names of the label classes, must be the same length as
number of classes.
im_size (int): Size of images to input to network.
weight_decay (float): Value of the weight decay regularisation term to use.
"""
def __init__(self, data_im_out_dir, seg_im_out_dir, codes, use_gdl=False):
self.data_dir = data_im_out_dir
self.label_dir = seg_im_out_dir
self.codes = codes
self.multilabel = len(codes) > 2
self.image_size = 256
# Params for learning rate finder
self.lr_find_lr_diff = 15
self.lr_find_loss_threshold = 0.05
self.lr_find_adjust_value = 1
self.gdl = None
# if use_gdl:
# self.gdl = GeneralizedDiceLoss(sigmoid_normalization=False)
# Params for model training
self.weight_decay = float(1e-2)
self.pct_lr_inc = 0.4
# Set up model ready for training
self.batch_size = self.get_batchsize()
self.create_training_dataset()
self.setup_metrics_and_loss()
self.create_model()
def setup_metrics_and_loss(self):
"""Sets instance attributes for loss function and evaluation metrics
according to whether binary or multilabel segmentation is being
performed.
"""
if self.multilabel:
logger.info("Setting up for multilabel segmentation since there are "
f"{len(self.codes)} classes")
self.metrics = self.accuracy
self.monitor = 'accuracy'
self.loss_func = None
else:
logger.info("Setting up for binary segmentation since there are "
f"{len(self.codes)} classes")
self.metrics = [partial(dice, iou=True)]
self.monitor = 'dice'
self.loss_func = self.bce_loss
# If Generalised dice loss is selected, overwrite loss function
if self.gdl:
logger.info("Using generalised dice loss.")
self.loss_func = self.generalised_dice_loss
def create_training_dataset(self):
"""Creates a fastai segmentation dataset and stores it as an instance
attribute.
"""
logger.info("Creating training dataset from saved images.")
src = (SegmentationItemList.from_folder(self.data_dir)
.split_by_rand_pct()
.label_from_func(self.get_label_name, classes=list(self.codes.keys())))
self.data = (src.transform(get_transforms(), size=self.image_size, tfm_y=True)
.databunch(bs=self.batch_size)
.normalize(imagenet_stats))
def create_model(self):
"""Creates a deep learning model linked to the dataset and stores it as
an instance attribute.
"""
logger.info("Creating 2d U-net model for training.")
self.model = unet_learner(self.data, models.resnet34, metrics=self.metrics,
wd=self.weight_decay, loss_func=self.loss_func,
callback_fns=[partial(CSVLogger,
filename='unet_training_history',
append=True),
partial(SaveModelCallback,
monitor=self.monitor, mode='max',
name="best_unet_model")])
def train_model(self, num_cyc_frozen=10, num_cyc_unfrozen=5):
"""Performs transfer learning training of model for a number of cycles
with parameters frozen or unfrozen and a learning rate that is determined automatically.
"""
if num_cyc_frozen > 0:
logger.info("Finding learning rate for frozen Unet model.")
lr_to_use = self.find_appropriate_lr()
logger.info(
f"Training frozen Unet for {num_cyc_frozen} cycles with learning rate of {lr_to_use}.")
self.model.fit_one_cycle(num_cyc_frozen, slice(
lr_to_use/50, lr_to_use), pct_start=self.pct_lr_inc)
if num_cyc_unfrozen > 0:
self.model.unfreeze()
logger.info("Finding learning rate for unfrozen Unet model.")
lr_to_use = self.find_appropriate_lr()
logger.info(
f"Training unfrozen Unet for {num_cyc_unfrozen} cycles with learning rate of {lr_to_use}.")
self.model.fit_one_cycle(num_cyc_unfrozen, slice(
lr_to_use/50, lr_to_use), pct_start=self.pct_lr_inc)
def save_model_weights(self, model_filepath):
"""Saves the model weights to a specified location.
Args:
model_filepath (pathlib.Path): Full path to location to save model
weights excluding file extension.
"""
self.model.save(model_filepath)
json_path = model_filepath.parent/f"{model_filepath.name}_codes.json"
zip_path = model_filepath.with_suffix('.zip')
logger.info(
f"Zipping the model weights to: {zip_path}")
with open(json_path, 'w') as jf:
json.dump(self.codes, jf)
with ZipFile(zip_path, mode='w') as zf:
zf.write(json_path, arcname=json_path.name)
zf.write(model_filepath.with_suffix('.pth'),
arcname=model_filepath.with_suffix('.pth').name)
os.remove(json_path)
os.remove(model_filepath.with_suffix('.pth'))
def predict_single_slice(self, data):
"""Takes in a 2d data array and returns the max and argmax of the predicted probabilities.
Args:
data (numpy.array): The 2d data array to be fed into the U-net.
Returns:
torch.tensor: A 3d torch tensor containing a 2d array with max probabilities
and a 2d array with argmax indices.
"""
data = img_as_float(data)
data = Image(pil2tensor(data, dtype=np.float32))
self.fix_odd_sides(data)
prediction = self.model.predict(data)[2]
return torch.max(prediction, dim=0)
def output_prediction_figure(self, model_path):
"""Saves a figure containing image slice data for three random images
fromthe validation dataset along with the corresponding ground truth
label image and corresponding prediction output from the model attached
to this class instance. The image is saved to the same directory as the
model weights.
Args:
model_path (pathlib.Path): Full path to the model weights file,
this is used to get the directory and name of the model not to
load and predict.
"""
# Remove the restriction on the model prediction size
self.model.data.single_ds.tfmargs['size'] = None
filename_list = self.data.valid_ds.items[:3]
img_list = []
pred_list = []
gt_list = []
for fn in filename_list:
img_list.append(open_image(fn))
gt_list.append(io.imread(self.get_label_name(fn)))
for img in img_list:
self.fix_odd_sides(img)
pred_list.append(img_as_ubyte(self.model.predict(img)[1][0]))
# Horrible conversion from Fastai image to unit8 data array
img_list = [img_as_ubyte(exposure.rescale_intensity(
x.data.numpy()[0, :, :])) for x in img_list]
# Create the plot
fig = plt.figure(figsize=(12, 12))
columns = 3
rows = 3
j = 0
for i in range(columns*rows)[::3]:
img = img_list[j]
gt = gt_list[j]
pred = pred_list[j]
col1 = fig.add_subplot(rows, columns, i + 1)
plt.imshow(img, cmap='gray')
col2 = fig.add_subplot(rows, columns, i + 2)
plt.imshow(gt, cmap='gray')
col3 = fig.add_subplot(rows, columns, i + 3)
plt.imshow(pred, cmap='gray')
j += 1
if i == 0:
col1.title.set_text('Data')
col2.title.set_text('Ground Truth')
col3.title.set_text('Prediction')
plt.suptitle(f"Predictions for {model_path.name}", fontsize=16)
plt_out_pth = model_path.parent/f'{model_path.stem}_prediction_image.png'
logger.info(f"Saving example image predictions to {plt_out_pth}")
plt.savefig(plt_out_pth, dpi=300)
def return_fast_prediction_volume(self, data_volume):
"""Predicts slices in a volume and returns segmented volume.
"""
logger.info("Predicting output for training volume.")
zdim, ydim, xdim = data_volume.shape
if ydim%2 != 0:
ydim += 1
if xdim%2 != 0:
xdim += 1
vol_out = np.zeros((zdim, ydim, xdim))
for i, z_slice in enumerate(data_volume):
vol_out[i] = self.predict_single_slice(z_slice)[1]
return vol_out
def find_appropriate_lr(self):
"""Function taken from https://forums.fast.ai/t/automated-learning-rate-suggester/44199
which attempts to automatically find a learning rate from the fastai lr_find function.
Returns:
float: A value for a sensible learning rate to use for training.
"""
lr_find(self.model)
#Get loss values and their corresponding gradients, and get lr values
losses = np.array(self.model.recorder.losses)
assert(self.lr_find_lr_diff < len(losses))
loss_grad = np.gradient(losses)
learning_rates = self.model.recorder.lrs
#Search for index in gradients where loss is lowest before the loss spike
#Initialize right and left idx using the lr_diff as a spacing unit
#Set the local min lr as -1 to signify if threshold is too low
local_min_lr = 0.001 # Add as default value to fix bug
r_idx = -1
l_idx = r_idx - self.lr_find_lr_diff
while (l_idx >= -len(losses)) and (abs(loss_grad[r_idx] - loss_grad[l_idx])
> self.lr_find_loss_threshold):
local_min_lr = learning_rates[l_idx]
r_idx -= 1
l_idx -= 1
lr_to_use = local_min_lr * self.lr_find_adjust_value
return lr_to_use
def get_batchsize(self):
"""Provides an appropriate batch size based upon free GPU memory.
Returns:
int: A batch size for model training.
"""
gpu_free_mem = gpu_mem_get_free_no_cache()
if gpu_free_mem > 8200:
batch_size = 8
else:
batch_size = 4
logger.info(f"Using batch size of {batch_size}, have {gpu_free_mem} MB" \
" of GPU RAM free.")
return batch_size
def fix_odd_sides(self, example_image):
"""Replaces an an odd image dimension with an even dimension by padding.
Taken from https://forums.fast.ai/t/segmentation-mask-prediction-on-different-input-image-sizes/44389/7.
Args:
example_image (fastai.vision.Image): The image to be fixed.
"""
if (list(example_image.size)[0] % 2) != 0:
example_image = crop_pad(example_image,
size=(list(example_image.size)[
0]+1, list(example_image.size)[1]),
padding_mode='reflection')
if (list(example_image.size)[1] % 2) != 0:
example_image = crop_pad(example_image,
size=(list(example_image.size)[0], list(
example_image.size)[1] + 1),
padding_mode='reflection')
def bce_loss(self, logits, labels):
"""Function to calulate Binary Cross Entropy loss from predictions.
Args:
logits (torch.Tensor): output from network.
labels (torch.Tensor): ground truth label values.
Returns:
torch.Tensor: The BCE loss calulated on the predictions.
"""
logits = logits[:, 1, :, :].float()
labels = labels.squeeze(1).float()
return F.binary_cross_entropy_with_logits(logits, labels)
def generalised_dice_loss(self, logits, labels):
labels = F.one_hot(torch.squeeze(labels), len(self.codes))
labels = labels.permute((0, 3, 1, 2))
return self.gdl(logits, labels)
def accuracy(self, input, target):
"""Calculates and accuracy metric between predictions and ground truth
labels.
Args:
input (torch.Tensor): The predictions.
target (torchTensor): The desired output (ground truth).
Returns:
[type]: [description]
"""
target = target.squeeze(1)
return (input.argmax(dim=1) == target).float().mean()
def get_label_name(self, img_fname):
"""Converts a path fo an image slice to a path for corresponding label
slice.
Args:
img_fname (pathlib.Path): Path to an image slice file.
Returns:
pathlib.Path: Path to the corresponding segmentation label slice file.
"""
return self.label_dir/f'{"seg" + img_fname.stem[4:]}{img_fname.suffix}'
class Unet2dPredictor:
"""Class that can either load in fastai 2d Unet model weights or take an
instance of a trained fastai Unet learner. It can then predict 2d
segmentations of image slices provided and save them to disk.
"""
def __init__(self, root_dir, model_path=None):
self.dummy_fns = ['data_z_stack_0.png', 'seg_z_stack_0.png']
self.dummy_dir = root_dir/'dummy_imgs'
self.root_dir = root_dir
def create_dummy_files(self):
logger.info(f"Creating dummy images in {self.dummy_dir}.")
os.makedirs(self.dummy_dir, exist_ok=True)
for fn in self.dummy_fns:
dummy_im = np.random.randint(256, size=(256, 256))
io.imsave(self.dummy_dir/fn, img_as_ubyte(dummy_im))
def create_dummy_dataset(self):
"""Creates a fastai segmentation dataset and stores it as an instance
attribute.
"""
logger.info("Creating training dataset from dummy images.")
src = (SegmentationItemList.from_folder(self.dummy_dir)
.split_by_rand_pct()
.label_from_func(self.get_label_name, classes=self.codes))
self.data = (src.transform(get_transforms(), size=256, tfm_y=True)
.databunch()
.normalize(imagenet_stats))
def get_label_name(self, img_fname):
"""Converts a path fo an image slice to a path for corresponding label
slice.
Args:
img_fname (pathlib.Path): Path to an image slice file.
Returns:
pathlib.Path: Path to the corresponding segmentation label slice file.
"""
return self.dummy_dir/f'{"seg" + img_fname.stem[4:]}{img_fname.suffix}'
def create_model_from_zip(self, weights_fn):
"""Creates a deep learning model linked to the dataset and stores it as
an instance attribute. Returns labels saved with model (dict or list).
"""
weights_fn = weights_fn.resolve()
logger.info(f"Unzipping the model weights and label classes from {weights_fn}")
output_dir = self.root_dir/"extracted_model_files"
output_dir.mkdir(exist_ok=True)
with ZipFile(weights_fn, mode='r') as zf:
zf.extractall(output_dir)
# Load in the label classes from the json file
with open(output_dir/f"{weights_fn.stem}_codes.json") as jf:
codes = json.load(jf)
# Instance variable should always be a list
if isinstance(codes, dict):
logger.info("Converting label dictionary into list.")
self.codes = [f"label_val_{i}" for i in codes]
else:
self.codes = codes
# Have to create dummy files and datset before loading in model weights
self.create_dummy_files()
self.create_dummy_dataset()
logger.info("Creating 2d U-net model for prediction.")
self.model = unet_learner(
self.data, models.resnet34, model_dir=output_dir)
logger.info("Loading in the saved weights.")
self.model.load(weights_fn.stem)
# Remove the restriction on the model prediction size
self.model.data.single_ds.tfmargs['size'] = None
return codes
def get_model_from_trainer(self, trainer):
self.model = trainer.model
| [
"fastai.vision.unet_learner",
"zipfile.ZipFile",
"torch.max",
"fastai.vision.SegmentationItemList.from_folder",
"skimage.img_as_float",
"numpy.array",
"torch.squeeze",
"numpy.gradient",
"fastai.vision.pil2tensor",
"os.remove",
"matplotlib.pyplot.imshow",
"fastai.utils.mem.gpu_mem_get_free_no_c... | [((712, 726), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (719, 726), True, 'import matplotlib as mpl\n'), ((848, 903), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (871, 903), False, 'import warnings\n'), ((3489, 3548), 'loguru.logger.info', 'logger.info', (['"""Creating training dataset from saved images."""'], {}), "('Creating training dataset from saved images.')\n", (3500, 3548), False, 'from loguru import logger\n'), ((4083, 4135), 'loguru.logger.info', 'logger.info', (['"""Creating 2d U-net model for training."""'], {}), "('Creating 2d U-net model for training.')\n", (4094, 4135), False, 'from loguru import logger\n'), ((6310, 6366), 'loguru.logger.info', 'logger.info', (['f"""Zipping the model weights to: {zip_path}"""'], {}), "(f'Zipping the model weights to: {zip_path}')\n", (6321, 6366), False, 'from loguru import logger\n'), ((6698, 6718), 'os.remove', 'os.remove', (['json_path'], {}), '(json_path)\n', (6707, 6718), False, 'import os\n'), ((7188, 7206), 'skimage.img_as_float', 'img_as_float', (['data'], {}), '(data)\n', (7200, 7206), False, 'from skimage import exposure, img_as_ubyte, io, img_as_float\n'), ((7361, 7389), 'torch.max', 'torch.max', (['prediction'], {'dim': '(0)'}), '(prediction, dim=0)\n', (7370, 7389), False, 'import torch\n'), ((8735, 8763), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (8745, 8763), True, 'from matplotlib import pyplot as plt\n'), ((9438, 9501), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Predictions for {model_path.name}"""'], {'fontsize': '(16)'}), "(f'Predictions for {model_path.name}', fontsize=16)\n", (9450, 9501), True, 'from matplotlib import pyplot as plt\n'), ((9592, 9657), 'loguru.logger.info', 'logger.info', (['f"""Saving example image predictions to {plt_out_pth}"""'], {}), "(f'Saving example image predictions to {plt_out_pth}')\n", (9603, 9657), False, 'from loguru import logger\n'), ((9666, 9699), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plt_out_pth'], {'dpi': '(300)'}), '(plt_out_pth, dpi=300)\n', (9677, 9699), True, 'from matplotlib import pyplot as plt\n'), ((9848, 9901), 'loguru.logger.info', 'logger.info', (['"""Predicting output for training volume."""'], {}), "('Predicting output for training volume.')\n", (9859, 9901), False, 'from loguru import logger\n'), ((10066, 10094), 'numpy.zeros', 'np.zeros', (['(zdim, ydim, xdim)'], {}), '((zdim, ydim, xdim))\n', (10074, 10094), True, 'import numpy as np\n'), ((10595, 10614), 'fastai.vision.lr_find', 'lr_find', (['self.model'], {}), '(self.model)\n', (10602, 10614), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((10710, 10746), 'numpy.array', 'np.array', (['self.model.recorder.losses'], {}), '(self.model.recorder.losses)\n', (10718, 10746), True, 'import numpy as np\n'), ((10818, 10837), 'numpy.gradient', 'np.gradient', (['losses'], {}), '(losses)\n', (10829, 10837), True, 'import numpy as np\n'), ((11765, 11792), 'fastai.utils.mem.gpu_mem_get_free_no_cache', 'gpu_mem_get_free_no_cache', ([], {}), '()\n', (11790, 11792), False, 'from fastai.utils.mem import gpu_mem_get_free_no_cache\n'), ((11901, 12000), 'loguru.logger.info', 'logger.info', (['f"""Using batch size of {batch_size}, have {gpu_free_mem} MB of GPU RAM free."""'], {}), "(\n f'Using batch size of {batch_size}, have {gpu_free_mem} MB of GPU RAM free.'\n )\n", (11912, 12000), False, 'from loguru import logger\n'), ((13451, 13501), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'labels'], {}), '(logits, labels)\n', (13485, 13501), True, 'import torch.nn.functional as F\n'), ((15047, 15105), 'loguru.logger.info', 'logger.info', (['f"""Creating dummy images in {self.dummy_dir}."""'], {}), "(f'Creating dummy images in {self.dummy_dir}.')\n", (15058, 15105), False, 'from loguru import logger\n'), ((15114, 15156), 'os.makedirs', 'os.makedirs', (['self.dummy_dir'], {'exist_ok': '(True)'}), '(self.dummy_dir, exist_ok=True)\n', (15125, 15156), False, 'import os\n'), ((15473, 15532), 'loguru.logger.info', 'logger.info', (['"""Creating training dataset from dummy images."""'], {}), "('Creating training dataset from dummy images.')\n", (15484, 15532), False, 'from loguru import logger\n'), ((16550, 16629), 'loguru.logger.info', 'logger.info', (['f"""Unzipping the model weights and label classes from {weights_fn}"""'], {}), "(f'Unzipping the model weights and label classes from {weights_fn}')\n", (16561, 16629), False, 'from loguru import logger\n'), ((17392, 17446), 'loguru.logger.info', 'logger.info', (['"""Creating 2d U-net model for prediction."""'], {}), "('Creating 2d U-net model for prediction.')\n", (17403, 17446), False, 'from loguru import logger\n'), ((17468, 17530), 'fastai.vision.unet_learner', 'unet_learner', (['self.data', 'models.resnet34'], {'model_dir': 'output_dir'}), '(self.data, models.resnet34, model_dir=output_dir)\n', (17480, 17530), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((17552, 17596), 'loguru.logger.info', 'logger.info', (['"""Loading in the saved weights."""'], {}), "('Loading in the saved weights.')\n", (17563, 17596), False, 'from loguru import logger\n'), ((3221, 3264), 'loguru.logger.info', 'logger.info', (['"""Using generalised dice loss."""'], {}), "('Using generalised dice loss.')\n", (3232, 3264), False, 'from loguru import logger\n'), ((5042, 5101), 'loguru.logger.info', 'logger.info', (['"""Finding learning rate for frozen Unet model."""'], {}), "('Finding learning rate for frozen Unet model.')\n", (5053, 5101), False, 'from loguru import logger\n'), ((5165, 5274), 'loguru.logger.info', 'logger.info', (['f"""Training frozen Unet for {num_cyc_frozen} cycles with learning rate of {lr_to_use}."""'], {}), "(\n f'Training frozen Unet for {num_cyc_frozen} cycles with learning rate of {lr_to_use}.'\n )\n", (5176, 5274), False, 'from loguru import logger\n'), ((5490, 5551), 'loguru.logger.info', 'logger.info', (['"""Finding learning rate for unfrozen Unet model."""'], {}), "('Finding learning rate for unfrozen Unet model.')\n", (5501, 5551), False, 'from loguru import logger\n'), ((5615, 5728), 'loguru.logger.info', 'logger.info', (['f"""Training unfrozen Unet for {num_cyc_unfrozen} cycles with learning rate of {lr_to_use}."""'], {}), "(\n f'Training unfrozen Unet for {num_cyc_unfrozen} cycles with learning rate of {lr_to_use}.'\n )\n", (5626, 5728), False, 'from loguru import logger\n'), ((6433, 6458), 'json.dump', 'json.dump', (['self.codes', 'jf'], {}), '(self.codes, jf)\n', (6442, 6458), False, 'import json\n'), ((6472, 6499), 'zipfile.ZipFile', 'ZipFile', (['zip_path'], {'mode': '"""w"""'}), "(zip_path, mode='w')\n", (6479, 6499), False, 'from zipfile import ZipFile\n'), ((7228, 7262), 'fastai.vision.pil2tensor', 'pil2tensor', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (7238, 7262), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((9017, 9045), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (9027, 9045), True, 'from matplotlib import pyplot as plt\n'), ((9115, 9142), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt'], {'cmap': '"""gray"""'}), "(gt, cmap='gray')\n", (9125, 9142), True, 'from matplotlib import pyplot as plt\n'), ((9212, 9241), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pred'], {'cmap': '"""gray"""'}), "(pred, cmap='gray')\n", (9222, 9241), True, 'from matplotlib import pyplot as plt\n'), ((13583, 13604), 'torch.squeeze', 'torch.squeeze', (['labels'], {}), '(labels)\n', (13596, 13604), False, 'import torch\n'), ((15214, 15253), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(256, 256)'}), '(256, size=(256, 256))\n', (15231, 15253), True, 'import numpy as np\n'), ((16742, 16771), 'zipfile.ZipFile', 'ZipFile', (['weights_fn'], {'mode': '"""r"""'}), "(weights_fn, mode='r')\n", (16749, 16771), False, 'from zipfile import ZipFile\n'), ((16961, 16974), 'json.load', 'json.load', (['jf'], {}), '(jf)\n', (16970, 16974), False, 'import json\n'), ((17075, 17128), 'loguru.logger.info', 'logger.info', (['"""Converting label dictionary into list."""'], {}), "('Converting label dictionary into list.')\n", (17086, 17128), False, 'from loguru import logger\n'), ((3014, 3037), 'functools.partial', 'partial', (['dice'], {'iou': '(True)'}), '(dice, iou=True)\n', (3021, 3037), False, 'from functools import partial\n'), ((8290, 8304), 'fastai.vision.open_image', 'open_image', (['fn'], {}), '(fn)\n', (8300, 8304), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((15295, 15317), 'skimage.img_as_ubyte', 'img_as_ubyte', (['dummy_im'], {}), '(dummy_im)\n', (15307, 15317), False, 'from skimage import exposure, img_as_ubyte, io, img_as_float\n'), ((4350, 4415), 'functools.partial', 'partial', (['CSVLogger'], {'filename': '"""unet_training_history"""', 'append': '(True)'}), "(CSVLogger, filename='unet_training_history', append=True)\n", (4357, 4415), False, 'from functools import partial\n'), ((4561, 4650), 'functools.partial', 'partial', (['SaveModelCallback'], {'monitor': 'self.monitor', 'mode': '"""max"""', 'name': '"""best_unet_model"""'}), "(SaveModelCallback, monitor=self.monitor, mode='max', name=\n 'best_unet_model')\n", (4568, 4650), False, 'from functools import partial\n'), ((3564, 3611), 'fastai.vision.SegmentationItemList.from_folder', 'SegmentationItemList.from_folder', (['self.data_dir'], {}), '(self.data_dir)\n', (3596, 3611), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((15548, 15596), 'fastai.vision.SegmentationItemList.from_folder', 'SegmentationItemList.from_folder', (['self.dummy_dir'], {}), '(self.dummy_dir)\n', (15580, 15596), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((3770, 3786), 'fastai.vision.get_transforms', 'get_transforms', ([], {}), '()\n', (3784, 3786), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n'), ((15743, 15759), 'fastai.vision.get_transforms', 'get_transforms', ([], {}), '()\n', (15757, 15759), False, 'from fastai.vision import SegmentationItemList, dice, get_transforms, imagenet_stats, lr_find, models, open_image, unet_learner, crop_pad, Image, pil2tensor\n')] |
# -*- coding: utf-8 -*-
"""
Routines and Class definitions for the diffusion maps algorithm.
"""
from __future__ import absolute_import
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import warnings
from . import kernel
from . import utils
class DiffusionMap(object):
"""
Diffusion Map object for data analysis
Parameters
----------
kernel_object : Kernel object.
Kernel object that outputs the values of the kernel. Must have the method .fit(X) and .compute() methods.
Any epsilon desired for normalization should be stored at kernel_object.epsilon_fitted and any bandwidths
should be located at kernel_object.bandwidths.
alpha : scalar, optional
Exponent to be used for the left normalization in constructing the diffusion map.
n_evecs : int, optional
Number of diffusion map eigenvectors to return
weight_fxn : callable or None, optional
Callable function that take in a point, and outputs the value of the weight matrix at those points.
density_fxn : callable or None, optional
Callable function that take in X, and outputs the value of the density of X. Used instead of kernel density estimation in the normalisation.
bandwidth_normalize: boolean, optional
If true, normalize the final constructed transition matrix by the bandwidth as described in Berry and Harlim. [1]_
oos : 'nystroem' or 'power', optional
Method to use for out-of-sample extension.
References
----------
.. [1] <NAME>, and <NAME>, Applied and Computational Harmonic Analysis 40, 68-96
(2016).
"""
def __init__(self, kernel_object, alpha=0.5, n_evecs=1,
weight_fxn=None, density_fxn=None,
bandwidth_normalize=False, oos='nystroem'):
"""
Initializes Diffusion Map, sets parameters.
"""
self.alpha = alpha
self.n_evecs = n_evecs
self.epsilon_fitted = None
self.weight_fxn = weight_fxn
self.bandwidth_normalize = bandwidth_normalize
self.oos = oos
self.density_fxn = density_fxn
self.local_kernel = kernel_object
@classmethod
def from_sklearn(cls, alpha=0.5, k=64, kernel_type='gaussian', epsilon='bgh', n_evecs=1, neighbor_params=None,
metric='euclidean', metric_params=None, weight_fxn=None, density_fxn=None, bandwidth_type=None,
bandwidth_normalize=False, oos='nystroem'):
"""
Builds the diffusion map using a kernel constructed using the Scikit-learn nearest neighbor object.
Parameters are largely the same as the constructor, but in place of the kernel object it take
the following parameters.
Parameters
----------
k : int, optional
Number of nearest neighbors over which to construct the kernel.
kernel_type : string, optional
Type of kernel to construct. Currently the only option is 'gaussian', but more will be implemented.
epsilon: string or scalar, optional
Method for choosing the epsilon. Currently, the only options are to provide a scalar (epsilon is set to the provided scalar) 'bgh' (Berry, Giannakis and Harlim), and 'bgh_generous' ('bgh' method, with answer multiplied by 2.
neighbor_params : dict or None, optional
Optional parameters for the nearest Neighbor search. See scikit-learn NearestNeighbors class for details.
metric : string, optional
Metric for distances in the kernel. Default is 'euclidean'. The callable should take two arrays as input and return one value indicating the distance between them.
metric_params : dict or None, optional
Optional parameters required for the metric given.
bandwidth_type: callable, number, string, or None, optional
Type of bandwidth to use in the kernel. If None (default), a fixed bandwidth kernel is used. If a callable function, the data is passed to the function, and the bandwidth is output (note that the function must take in an entire dataset, not the points 1-by-1). If a number, e.g. -.25, a kernel density estimate is performed, and the bandwidth is taken to be q**(input_number). For a string input, the input is assumed to be an evaluatable expression in terms of the dimension d, e.g. "-1/(d+2)". The dimension is then estimated, and the bandwidth is set to q**(evaluated input string).
Examples
--------
# setup neighbor_params list with as many jobs as CPU cores and kd_tree neighbor search.
>>> neighbor_params = {'n_jobs': -1, 'algorithm': 'kd_tree'}
# initialize diffusion map object with the top two eigenvalues being computed, epsilon set to 0.1
# and alpha set to 1.0.
>>> mydmap = DiffusionMap.from_sklearn(n_evecs = 2, epsilon = .1, alpha = 1.0, neighbor_params = neighbor_params)
References
----------
.. [1] <NAME>, and <NAME>, Applied and Computational Harmonic Analysis 40, 68-96
(2016).
"""
buendia = kernel.Kernel(kernel_type=kernel_type, k=k, epsilon=epsilon, neighbor_params=neighbor_params, metric=metric, metric_params=metric_params, bandwidth_type=bandwidth_type)
dmap = cls(buendia, alpha=alpha, n_evecs=n_evecs, weight_fxn=weight_fxn, density_fxn=density_fxn, bandwidth_normalize=bandwidth_normalize, oos=oos)
# if ((bandwidth_type is None) and (bandwidth_normalize is True)):
# warnings.warn('Bandwith normalization set to true, but no bandwidth function provided. Setting to False.')
return dmap
def _build_kernel(self, X, my_kernel):
my_kernel.fit(X)
kernel_matrix = utils._symmetrize_matrix(my_kernel.compute())
return kernel_matrix, my_kernel
def _compute_weights(self, X):
if self.weight_fxn is not None:
N = np.shape(X)[0]
return np.array([self.weight_fxn(Xi) for Xi in X]).reshape(N)
else:
return None
def _make_right_norm_vec(self, kernel_matrix, q=None, bandwidths=None):
if q is None:
# perform kde
q = np.array(kernel_matrix.sum(axis=1)).ravel()
if bandwidths is not None:
q /= bandwidths**2
right_norm_vec = np.power(q, -self.alpha)
return q, right_norm_vec
def _right_normalize(self, kernel_matrix, right_norm_vec, weights):
m = right_norm_vec.shape[0]
Dalpha = sps.spdiags(right_norm_vec, 0, m, m)
kernel_matrix = kernel_matrix * Dalpha
if weights is not None:
weight_mat = sps.spdiags(weights, 0, m, m)
kernel_matrix = kernel_matrix * weight_mat
return kernel_matrix
def _left_normalize(self, kernel_matrix):
row_sum = kernel_matrix.sum(axis=1).transpose()
n = row_sum.shape[1]
Dalpha = sps.spdiags(np.power(row_sum, -1), 0, n, n)
P = Dalpha * kernel_matrix
return P
def _build_generator(self, P, epsilon_fitted, bandwidths=None, bandwidth_normalize=False):
m, n = P.shape
L = (P - sps.eye(m, n, k=(n - m))) / epsilon_fitted
if bandwidth_normalize:
if bandwidths is not None:
bw_diag = sps.spdiags(np.power(bandwidths, -2), 0, m, m)
L = bw_diag * L
else:
warnings.warn('Bandwith normalization set to true, but no bandwidth function was found in normalization. Not performing normalization')
return L
def _make_diffusion_coords(self, L):
evals, evecs = spsl.eigs(L, k=(self.n_evecs+1), which='LR')
ix = evals.argsort()[::-1][1:]
evals = np.real(evals[ix])
evecs = np.real(evecs[:, ix])
dmap = np.dot(evecs, np.diag(np.sqrt(-1. / evals)))
return dmap, evecs, evals
def construct_Lmat(self, X):
"""
Builds the transition matrix, but does NOT compute the eigenvectors. This is useful for applications where the transition matrix itself is the object of interest.
Parameters
----------
X : array-like, shape (n_query, n_features)
Data upon which to construct the diffusion map.
Returns
-------
self : the object itself
"""
kernel_matrix, my_kernel = self._build_kernel(X, self.local_kernel)
weights = self._compute_weights(X)
if self.density_fxn is not None:
density = self.density_fxn(X)
else:
density = None
try:
bandwidths = my_kernel.bandwidths
except AttributeError:
bandwidths = None
q, right_norm_vec = self._make_right_norm_vec(kernel_matrix, q=density, bandwidths=bandwidths)
P = self._right_normalize(kernel_matrix, right_norm_vec, weights)
P = self._left_normalize(P)
L = self._build_generator(P, my_kernel.epsilon_fitted, bandwidths, bandwidth_normalize=self.bandwidth_normalize)
# Save data
self.local_kernel = my_kernel
self.epsilon_fitted = my_kernel.epsilon_fitted
self.data = X
self.weights = weights
self.kernel_matrix = kernel_matrix
self.L = L
self.q = q
self.right_norm_vec = right_norm_vec
return self
def fit(self, X):
"""
Fits the data.
Parameters
----------
X : array-like, shape (n_query, n_features)
Data upon which to construct the diffusion map.
Returns
-------
self : the object itself
"""
self.construct_Lmat(X)
dmap, evecs, evals = self._make_diffusion_coords(self.L)
# Save constructed data.
self.evals = evals
self.evecs = evecs
self.dmap = dmap
return self
def transform(self, Y):
"""
Performs Nystroem out-of-sample extension to calculate the values of the diffusion coordinates at each given point.
Parameters
----------
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
if np.array_equal(self.data, Y):
return self.dmap
else:
# turn Y into 2D array if needed
if (Y.ndim == 1):
Y = Y[np.newaxis, :]
if self.oos == "nystroem":
return nystroem_oos(self, Y)
elif self.oos == "power":
return power_oos(self, Y)
else:
raise ValueError('Did not understand the OOS algorithm specified')
def fit_transform(self, X):
"""
Fits the data and returns diffusion coordinates. equivalent to calling dmap.fit(X).transform(x).
Parameters
----------
X : array-like, shape (n_query, n_features)
Data upon which to construct the diffusion map.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
self.fit(X)
return self.dmap
class TMDmap(DiffusionMap):
"""
Implementation of the TargetMeasure diffusion map. This provides a more convenient interface for some hyperparameter selection for the general diffusion object. It takes the same parameters as the base Diffusion Map object. However, rather than taking a weight function, it takes as input a change of measure function.
Parameters
----------
change_of_measure : callable, optional
Function that takes in a point and evaluates the change-of-measure between the density otherwise stationary to the diffusion map and the desired density.
"""
def __init__(self, alpha=0.5, k=64, kernel_type='gaussian', epsilon='bgh',
n_evecs=1, neighbor_params=None, metric='euclidean',
metric_params=None, change_of_measure=None, density_fxn=None,
bandwidth_type=None, bandwidth_normalize=False, oos='nystroem'):
def weight_fxn(y_i):
return np.sqrt(change_of_measure(y_i))
buendia = kernel.Kernel(kernel_type=kernel_type, k=k, epsilon=epsilon, neighbor_params=neighbor_params, metric=metric, metric_params=metric_params, bandwidth_type=bandwidth_type)
super(TMDmap, self).__init__(buendia, alpha=alpha, n_evecs=n_evecs, weight_fxn=weight_fxn, density_fxn=density_fxn, bandwidth_normalize=bandwidth_normalize, oos=oos)
def nystroem_oos(dmap_object, Y):
"""
Performs Nystroem out-of-sample extension to calculate the values of the diffusion coordinates at each given point.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
# check if Y is equal to data. If yes, no computation needed.
# compute the values of the kernel matrix
kernel_extended = dmap_object.local_kernel.compute(Y)
weights = dmap_object._compute_weights(dmap_object.local_kernel.data)
P = dmap_object._left_normalize(dmap_object._right_normalize(kernel_extended, dmap_object.right_norm_vec, weights))
oos_evecs = P * dmap_object.dmap
# evals_p = dmap_object.local_kernel.epsilon_fitted * dmap_object.evals + 1.
# oos_dmap = np.dot(oos_evecs, np.diag(1. / evals_p))
return oos_evecs
def power_oos(dmap_object, Y):
"""
Performs out-of-sample extension to calculate the values of the diffusion coordinates at each given point using the power-like method.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
m = int(Y.shape[0])
k_yx, y_bandwidths = dmap_object.local_kernel.compute(Y, return_bandwidths=True) # Evaluate on ref points
yy_right_norm_vec = dmap_object._make_right_norm_vec(k_yx, y_bandwidths)[1]
k_yy_diag = dmap_object.local_kernel.kernel_fxn(0, dmap_object.epsilon_fitted)
data_full = np.vstack([dmap_object.local_kernel.data, Y])
k_full = sps.hstack([k_yx, sps.eye(m) * k_yy_diag])
right_norm_full = np.hstack([dmap_object.right_norm_vec, yy_right_norm_vec])
weights = dmap_object._compute_weights(data_full)
P = dmap_object._left_normalize(dmap_object._right_normalize(k_full, right_norm_full, weights))
L = dmap_object._build_generator(P, dmap_object.epsilon_fitted, y_bandwidths)
L_yx = L[:, :-m]
L_yy = np.array(L[:, -m:].diagonal())
adj_evals = dmap_object.evals - L_yy.reshape(-1, 1)
dot_part = np.array(L_yx.dot(dmap_object.dmap))
return (1. / adj_evals) * dot_part
| [
"numpy.shape",
"numpy.sqrt",
"scipy.sparse.eye",
"numpy.power",
"numpy.hstack",
"numpy.real",
"numpy.array_equal",
"numpy.vstack",
"warnings.warn",
"scipy.sparse.linalg.eigs",
"scipy.sparse.spdiags"
] | [((14676, 14721), 'numpy.vstack', 'np.vstack', (['[dmap_object.local_kernel.data, Y]'], {}), '([dmap_object.local_kernel.data, Y])\n', (14685, 14721), True, 'import numpy as np\n'), ((14800, 14858), 'numpy.hstack', 'np.hstack', (['[dmap_object.right_norm_vec, yy_right_norm_vec]'], {}), '([dmap_object.right_norm_vec, yy_right_norm_vec])\n', (14809, 14858), True, 'import numpy as np\n'), ((6357, 6381), 'numpy.power', 'np.power', (['q', '(-self.alpha)'], {}), '(q, -self.alpha)\n', (6365, 6381), True, 'import numpy as np\n'), ((6541, 6577), 'scipy.sparse.spdiags', 'sps.spdiags', (['right_norm_vec', '(0)', 'm', 'm'], {}), '(right_norm_vec, 0, m, m)\n', (6552, 6577), True, 'import scipy.sparse as sps\n'), ((7650, 7694), 'scipy.sparse.linalg.eigs', 'spsl.eigs', (['L'], {'k': '(self.n_evecs + 1)', 'which': '"""LR"""'}), "(L, k=self.n_evecs + 1, which='LR')\n", (7659, 7694), True, 'import scipy.sparse.linalg as spsl\n'), ((7750, 7768), 'numpy.real', 'np.real', (['evals[ix]'], {}), '(evals[ix])\n', (7757, 7768), True, 'import numpy as np\n'), ((7785, 7806), 'numpy.real', 'np.real', (['evecs[:, ix]'], {}), '(evecs[:, ix])\n', (7792, 7806), True, 'import numpy as np\n'), ((10365, 10393), 'numpy.array_equal', 'np.array_equal', (['self.data', 'Y'], {}), '(self.data, Y)\n', (10379, 10393), True, 'import numpy as np\n'), ((6682, 6711), 'scipy.sparse.spdiags', 'sps.spdiags', (['weights', '(0)', 'm', 'm'], {}), '(weights, 0, m, m)\n', (6693, 6711), True, 'import scipy.sparse as sps\n'), ((6957, 6978), 'numpy.power', 'np.power', (['row_sum', '(-1)'], {}), '(row_sum, -1)\n', (6965, 6978), True, 'import numpy as np\n'), ((5946, 5957), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (5954, 5957), True, 'import numpy as np\n'), ((7177, 7199), 'scipy.sparse.eye', 'sps.eye', (['m', 'n'], {'k': '(n - m)'}), '(m, n, k=n - m)\n', (7184, 7199), True, 'import scipy.sparse as sps\n'), ((7430, 7576), 'warnings.warn', 'warnings.warn', (['"""Bandwith normalization set to true, but no bandwidth function was found in normalization. Not performing normalization"""'], {}), "(\n 'Bandwith normalization set to true, but no bandwidth function was found in normalization. Not performing normalization'\n )\n", (7443, 7576), False, 'import warnings\n'), ((7844, 7865), 'numpy.sqrt', 'np.sqrt', (['(-1.0 / evals)'], {}), '(-1.0 / evals)\n', (7851, 7865), True, 'import numpy as np\n'), ((14753, 14763), 'scipy.sparse.eye', 'sps.eye', (['m'], {}), '(m)\n', (14760, 14763), True, 'import scipy.sparse as sps\n'), ((7329, 7353), 'numpy.power', 'np.power', (['bandwidths', '(-2)'], {}), '(bandwidths, -2)\n', (7337, 7353), True, 'import numpy as np\n')] |
"""
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve --show gui
in your browser.
"""
from os.path import dirname, join
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import layout, Spacer
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models import HoverTool
from bokeh.models.widgets import (
Slider,
TextInput,
Select,
Paragraph,
AutocompleteInput,
CheckboxGroup,
TableColumn,
DataTable,
Button,
CheckboxGroup,
Toggle,
Select,
Div
)
from bokeh.plotting import figure
from bokeh.events import ButtonClick
import astropy.units as u
from astropy import constants as const
from astropy.units.imperial import deg_F, inch, foot, mil
from roentgen.absorption import Material, Response
from roentgen.util import get_density, density_ideal_gas
import roentgen
u.imperial.enable()
DEFAULT_MATERIAL = ["silicon"]
DEFAULT_THICKNESS = [100.0]
DEFAULT_ENERGY_LOW = 1.0
DEFAULT_ENERGY_HIGH = 200.0
DEFAULT_DENSITY = [get_density(DEFAULT_MATERIAL[0]).value]
NUMBER_OF_MATERIALS = len(DEFAULT_MATERIAL)
DEFAULT_DETECTOR_MATERIAL = 'cdte'
DEFAULT_DETECTOR_THICKNESS = 1
DEFAULT_DETECTOR_DENSITY = get_density(DEFAULT_DETECTOR_MATERIAL).value
DEFAULT_ENERGY_RESOLUTION = 0.25
DEFAULT_AIR_THICKNESS = 1
DEFAULT_AIR_PRESSURE = 1
DEFAULT_AIR_TEMPERATURE = 25
PLOT_HEIGHT = 400
PLOT_WIDTH = 900
TOOLS = "pan,wheel_zoom,box_zoom,box_select,undo,redo,save,reset"
custom_hover = HoverTool(
tooltips=[
('energy [keV]', '@{x}{0.2f}'),
('transmission', '@{y}{0.3f}'), # use @{ } for field names with spaces
],
# display a tooltip whenever the cursor is vertically in line with a glyph
mode='vline'
)
# defaults
material_list = []
this_material = Material(DEFAULT_MATERIAL[0], DEFAULT_THICKNESS[0] * u.micron)
air_density = density_ideal_gas(DEFAULT_AIR_PRESSURE * const.atm,
DEFAULT_AIR_TEMPERATURE * u.Celsius)
air = Material('air', DEFAULT_AIR_THICKNESS * u.m, density=air_density)
this_detector = Material(DEFAULT_DETECTOR_MATERIAL,
DEFAULT_DETECTOR_THICKNESS * u.mm)
response = Response(optical_path=[this_material, air], detector=this_detector)
energy = u.Quantity(np.arange(DEFAULT_ENERGY_LOW, DEFAULT_ENERGY_HIGH,
DEFAULT_ENERGY_RESOLUTION), "keV")
x = energy.value
y = response.response(energy)
source = ColumnDataSource(data={"x": x, "y": y})
all_materials = list(roentgen.elements["name"]) + \
list(roentgen.compounds["symbol"])
all_materials.sort()
all_materials = [this_material.lower() for this_material in all_materials]
# Set up the plot
plot = figure(
plot_height=PLOT_HEIGHT,
plot_width=PLOT_WIDTH,
tools=TOOLS,
x_range=[1, 50],
y_range=[0, 1],
)
plot.yaxis.axis_label = "Transmission fraction"
plot.xaxis.axis_label = "Energy [keV]"
plot.line("x", "y", source=source, line_width=3, line_alpha=0.6)
plot.title.text = f"{response}"
plot.add_tools(custom_hover)
# Set up the inputs
ylog_checkbox = CheckboxGroup(labels=["y-log"], active=[0])
# materials in the path
material_input = AutocompleteInput(title="Material (lowercase)", value=DEFAULT_MATERIAL[0])
material_input.completions = all_materials
material_thickness_input = TextInput(title="thickness", value=str(DEFAULT_THICKNESS[0]))
material_density_input = TextInput(title="density", value=str(this_material.density.value))
air_thickness_input = TextInput(title="air path length", value=str(DEFAULT_AIR_THICKNESS))
air_pressure_input = TextInput(title='air pressure', value=str(DEFAULT_AIR_PRESSURE))
air_temperature_input = TextInput(title='air temperature', value=str(DEFAULT_AIR_TEMPERATURE))
detector_material_input = AutocompleteInput(title='Detector', value=DEFAULT_DETECTOR_MATERIAL)
detector_material_input.completions = all_materials
detector_thickness_input = TextInput(title='thickness', value=str(DEFAULT_DETECTOR_THICKNESS))
detector_density_input = TextInput(title='density', value=str(DEFAULT_DETECTOR_DENSITY))
p = Paragraph(text="", width=500)
p.text = f"Running roentgen version {roentgen.__version__}"
columns = [
TableColumn(field="x", title="energy [keV]"),
TableColumn(field="y", title="Percent"),
]
data_table = DataTable(source=source, columns=columns, width=400, height=700)
download_button = Button(label="Download", button_type="success")
download_button.js_on_event(ButtonClick, CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read()))
def convert_air_pressure(value, current_unit, new_unit):
if current_unit == "atm":
air_pressure = u.Quantity(value * const.atm, "Pa")
elif current_unit == "torr":
air_pressure = u.Quantity(value * const.atm / 760., "Pa")
else:
air_pressure = u.Quantity(value, current_unit)
if new_unit == "atm":
return (air_pressure.to("Pa") / const.atm).value
elif new_unit == "torr":
return (air_pressure.to("Pa") / const.atm).value * 760.0
else:
return air_pressure.to(new_unit)
def update_response(attrname, old, new):
# check whether the input variables have changed and update the response
global response
if not material_input.disabled:
if str(material_input.value).lower() in all_materials:
this_thickness = u.Quantity(material_thickness_input.value,
material_thick_unit.value)
this_density = u.Quantity(material_density_input.value,
material_density_unit.value)
this_material = Material(str(material_input.value).lower(), this_thickness,
density=this_density)
else:
# if material not selected, just make a bogus material with no thickness
this_material = Material('Al', 0 * u.mm)
if not air_pressure_input.disabled:
if air_pressure_unit.value == "atm":
air_pressure = u.Quantity(air_pressure_input.value * const.atm, "Pa")
elif air_pressure_unit.value == "torr":
air_pressure = u.Quantity(air_pressure_input.value * const.atm / 760.,
"Pa")
else:
air_pressure = u.Quantity(air_pressure_input.value,
air_pressure_unit.value)
air_path_length = u.Quantity(air_thickness_input.value,
air_thick_unit.value)
air_temperature = u.Quantity(air_temperature_input.value,
air_temp_unit.value).to("Celsius",
equivalencies=u.temperature())
air_density = density_ideal_gas(air_pressure, air_temperature)
air = Material('air', air_path_length, density=air_density)
else:
# if air is not selected than just add bogus air with no thickness
air = Material('air', 0 * u.mm, density=0 * u.g / u.cm**3)
if not detector_material_input.disabled:
if str(detector_material_input.value).lower() in all_materials:
this_thickness = u.Quantity(detector_thickness_input.value,
detector_thick_unit.value)
this_density = u.Quantity(detector_density_input.value,
detector_density_unit.value)
this_detector = Material(str(detector_material_input.value).lower(), this_thickness,
density=this_density)
else:
this_detector = None
response = Response(optical_path=[this_material, air], detector=this_detector)
def update_data(attrname, old, new):
if plot.x_range.start < DEFAULT_ENERGY_LOW:
energy = u.Quantity(np.arange(DEFAULT_ENERGY_LOW, plot.x_range.end,
DEFAULT_ENERGY_RESOLUTION), "keV")
else:
energy = u.Quantity(np.arange(plot.x_range.start, plot.x_range.end,
DEFAULT_ENERGY_RESOLUTION), "keV")
y = response.response(energy)
plot.title.text = f"{response}"
if plot_checkbox_group.active:
y = np.log10(response.response(energy))
plot.y_range.start = -4
plot.y_range.end = 0
plot.yaxis.axis_label = 'log(Transmission fraction)'
else:
plot.y_range.start = 0
plot.y_range.end = 1
plot.yaxis.axis_label = 'Transmission fraction'
source.data = dict(x=energy, y=y)
def toggle_active(new):
if 0 in new:
material_input.disabled = False
material_thick_unit.disabled = False
material_density_input.disabled = False
if 0 not in new:
material_input.disabled = True
material_thick_unit.disabled = True
material_density_input.disabled = True
if 1 in new:
air_pressure_input.disabled = False
air_thickness_input.disabled = False
air_temperature_input.disabled = False
if 1 not in new:
air_pressure_input.disabled = True
air_thickness_input.disabled = True
air_temperature_input.disabled = True
if 2 in new:
detector_material_input.disabled = False
detector_thickness_input.disabled = False
detector_density_input.disabled = False
if 2 not in new:
detector_material_input.disabled = True
detector_thickness_input.disabled = True
detector_density_input.disabled = True
return new
checkbox_group = CheckboxGroup(labels=["Material", "Air", "Detector"],
active=[0, 1, 2])
checkbox_group.on_click(toggle_active)
def update_button_action():
update_response("update_plot_button", 0, 0)
update_data("update", 0, 0)
update_plot_button = Button(label="Update Plot", button_type="success")
update_plot_button.on_click(update_button_action)
plot.x_range.on_change('start', update_data)
plot.x_range.on_change('end', update_data)
plot_checkbox_group = CheckboxGroup(labels=["ylog"], active=[])
length_units = ["m", "mm", "micron", "inch", "foot", "mil"]
pressure_units = ["Pa", "torr", "atm"]
density_units = ["g / cm ** 3", "kg / m ** 3"]
temperature_units = ["K", "deg_C", "deg_F"]
material_thick_unit = Select(title="unit", value=length_units[2], options=length_units)
def update_mat_thick_units(attr, old, new):
material_thickness_input.value = str(u.Quantity(material_thickness_input.value, old).to(new).value)
material_thick_unit.on_change('value', update_mat_thick_units)
detector_thick_unit = Select(title="unit", value=length_units[1],
options=length_units)
def update_det_thick_units(attr, old, new):
detector_thickness_input.value = str(u.Quantity(detector_thickness_input.value, old).to(new).value)
detector_thick_unit.on_change('value', update_det_thick_units)
air_thick_unit = Select(title="unit", value=length_units[0],
options=length_units)
def update_air_thick_units(attr, old, new):
air_thickness_input.value = str(u.Quantity(air_thickness_input.value, old).to(new).value)
air_thick_unit.on_change('value', update_air_thick_units)
material_density_unit = Select(title="unit", value=density_units[0],
options=density_units)
def update_mat_density_units(attr, old, new):
material_density_input.value = str(u.Quantity(material_density_input.value, old).to(new).value)
material_density_unit.on_change('value', update_mat_density_units)
detector_density_unit = Select(title="unit", value=density_units[0], options=density_units)
def update_det_density_units(attr, old, new):
detector_density_input.value = str(u.Quantity(detector_density_input.value, old).to(new).value)
detector_density_unit.on_change('value', update_det_density_units)
air_pressure_unit = Select(title="unit", value=pressure_units[2],
options=pressure_units)
def update_air_pressure_units(attr, old, new):
air_pressure = convert_air_pressure(air_pressure_input.value, old, new)
air_pressure_input.value = str(air_pressure)
air_pressure_unit.on_change('value', update_air_pressure_units)
air_temp_unit = Select(title="unit", value=temperature_units[1],
options=temperature_units)
def update_air_temp_units(attr, old, new):
air_temperature_input.value = str(u.Quantity(air_temperature_input.value, old).to(new, equivalencies=u.temperature()).value)
air_temp_unit.on_change('value', update_air_temp_units)
def update_material_density(attr, old, new):
# if the material is changed then update the density
material_density_input.value = str(get_density(material_input.value).value)
material_input.on_change('value', update_material_density)
def update_detector_density(attr, old, new):
# if the material is changed then update the density
detector_density_input.value = str(get_density(detector_material_input.value).value)
detector_material_input.on_change('value', update_detector_density)
curdoc().add_root(
layout(
[
[plot],
[checkbox_group, plot_checkbox_group],
[material_input, material_thickness_input, material_thick_unit, material_density_input, material_density_unit],
[air_pressure_input, air_pressure_unit, air_thickness_input, air_thick_unit, air_temperature_input, air_temp_unit],
[detector_material_input, detector_thickness_input, detector_thick_unit, detector_density_input, detector_density_unit],
#[energy_low, energy_high, energy_step],
[download_button, Spacer(), update_plot_button],
[p]
],
sizing_mode="scale_width",
)
)
curdoc().title = "Roentgen"
| [
"bokeh.plotting.figure",
"bokeh.models.widgets.Button",
"bokeh.models.widgets.CheckboxGroup",
"roentgen.absorption.Response",
"roentgen.util.get_density",
"numpy.arange",
"bokeh.io.curdoc",
"astropy.units.imperial.enable",
"bokeh.models.widgets.TableColumn",
"bokeh.models.widgets.DataTable",
"bo... | [((876, 895), 'astropy.units.imperial.enable', 'u.imperial.enable', ([], {}), '()\n', (893, 895), True, 'import astropy.units as u\n'), ((1484, 1586), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('energy [keV]', '@{x}{0.2f}'), ('transmission', '@{y}{0.3f}')]", 'mode': '"""vline"""'}), "(tooltips=[('energy [keV]', '@{x}{0.2f}'), ('transmission',\n '@{y}{0.3f}')], mode='vline')\n", (1493, 1586), False, 'from bokeh.models import HoverTool\n'), ((1786, 1848), 'roentgen.absorption.Material', 'Material', (['DEFAULT_MATERIAL[0]', '(DEFAULT_THICKNESS[0] * u.micron)'], {}), '(DEFAULT_MATERIAL[0], DEFAULT_THICKNESS[0] * u.micron)\n', (1794, 1848), False, 'from roentgen.absorption import Material, Response\n'), ((1863, 1955), 'roentgen.util.density_ideal_gas', 'density_ideal_gas', (['(DEFAULT_AIR_PRESSURE * const.atm)', '(DEFAULT_AIR_TEMPERATURE * u.Celsius)'], {}), '(DEFAULT_AIR_PRESSURE * const.atm, DEFAULT_AIR_TEMPERATURE *\n u.Celsius)\n', (1880, 1955), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((1990, 2055), 'roentgen.absorption.Material', 'Material', (['"""air"""', '(DEFAULT_AIR_THICKNESS * u.m)'], {'density': 'air_density'}), "('air', DEFAULT_AIR_THICKNESS * u.m, density=air_density)\n", (1998, 2055), False, 'from roentgen.absorption import Material, Response\n'), ((2072, 2142), 'roentgen.absorption.Material', 'Material', (['DEFAULT_DETECTOR_MATERIAL', '(DEFAULT_DETECTOR_THICKNESS * u.mm)'], {}), '(DEFAULT_DETECTOR_MATERIAL, DEFAULT_DETECTOR_THICKNESS * u.mm)\n', (2080, 2142), False, 'from roentgen.absorption import Material, Response\n'), ((2180, 2247), 'roentgen.absorption.Response', 'Response', ([], {'optical_path': '[this_material, air]', 'detector': 'this_detector'}), '(optical_path=[this_material, air], detector=this_detector)\n', (2188, 2247), False, 'from roentgen.absorption import Material, Response\n'), ((2442, 2481), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': "{'x': x, 'y': y}"}), "(data={'x': x, 'y': y})\n", (2458, 2481), False, 'from bokeh.models import ColumnDataSource, CustomJS\n'), ((2708, 2813), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'PLOT_HEIGHT', 'plot_width': 'PLOT_WIDTH', 'tools': 'TOOLS', 'x_range': '[1, 50]', 'y_range': '[0, 1]'}), '(plot_height=PLOT_HEIGHT, plot_width=PLOT_WIDTH, tools=TOOLS, x_range\n =[1, 50], y_range=[0, 1])\n', (2714, 2813), False, 'from bokeh.plotting import figure\n'), ((3081, 3124), 'bokeh.models.widgets.CheckboxGroup', 'CheckboxGroup', ([], {'labels': "['y-log']", 'active': '[0]'}), "(labels=['y-log'], active=[0])\n", (3094, 3124), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((3167, 3241), 'bokeh.models.widgets.AutocompleteInput', 'AutocompleteInput', ([], {'title': '"""Material (lowercase)"""', 'value': 'DEFAULT_MATERIAL[0]'}), "(title='Material (lowercase)', value=DEFAULT_MATERIAL[0])\n", (3184, 3241), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((3766, 3834), 'bokeh.models.widgets.AutocompleteInput', 'AutocompleteInput', ([], {'title': '"""Detector"""', 'value': 'DEFAULT_DETECTOR_MATERIAL'}), "(title='Detector', value=DEFAULT_DETECTOR_MATERIAL)\n", (3783, 3834), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((4076, 4105), 'bokeh.models.widgets.Paragraph', 'Paragraph', ([], {'text': '""""""', 'width': '(500)'}), "(text='', width=500)\n", (4085, 4105), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((4289, 4353), 'bokeh.models.widgets.DataTable', 'DataTable', ([], {'source': 'source', 'columns': 'columns', 'width': '(400)', 'height': '(700)'}), '(source=source, columns=columns, width=400, height=700)\n', (4298, 4353), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((4373, 4420), 'bokeh.models.widgets.Button', 'Button', ([], {'label': '"""Download"""', 'button_type': '"""success"""'}), "(label='Download', button_type='success')\n", (4379, 4420), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((9576, 9647), 'bokeh.models.widgets.CheckboxGroup', 'CheckboxGroup', ([], {'labels': "['Material', 'Air', 'Detector']", 'active': '[0, 1, 2]'}), "(labels=['Material', 'Air', 'Detector'], active=[0, 1, 2])\n", (9589, 9647), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((9851, 9901), 'bokeh.models.widgets.Button', 'Button', ([], {'label': '"""Update Plot"""', 'button_type': '"""success"""'}), "(label='Update Plot', button_type='success')\n", (9857, 9901), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((10065, 10106), 'bokeh.models.widgets.CheckboxGroup', 'CheckboxGroup', ([], {'labels': "['ylog']", 'active': '[]'}), "(labels=['ylog'], active=[])\n", (10078, 10106), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((10322, 10387), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'length_units[2]', 'options': 'length_units'}), "(title='unit', value=length_units[2], options=length_units)\n", (10328, 10387), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((10625, 10690), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'length_units[1]', 'options': 'length_units'}), "(title='unit', value=length_units[1], options=length_units)\n", (10631, 10690), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((10952, 11017), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'length_units[0]', 'options': 'length_units'}), "(title='unit', value=length_units[0], options=length_units)\n", (10958, 11017), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((11267, 11334), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'density_units[0]', 'options': 'density_units'}), "(title='unit', value=density_units[0], options=density_units)\n", (11273, 11334), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((11608, 11675), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'density_units[0]', 'options': 'density_units'}), "(title='unit', value=density_units[0], options=density_units)\n", (11614, 11675), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((11913, 11982), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'pressure_units[2]', 'options': 'pressure_units'}), "(title='unit', value=pressure_units[2], options=pressure_units)\n", (11919, 11982), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((12267, 12342), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""unit"""', 'value': 'temperature_units[1]', 'options': 'temperature_units'}), "(title='unit', value=temperature_units[1], options=temperature_units)\n", (12273, 12342), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((1206, 1244), 'roentgen.util.get_density', 'get_density', (['DEFAULT_DETECTOR_MATERIAL'], {}), '(DEFAULT_DETECTOR_MATERIAL)\n', (1217, 1244), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((2269, 2346), 'numpy.arange', 'np.arange', (['DEFAULT_ENERGY_LOW', 'DEFAULT_ENERGY_HIGH', 'DEFAULT_ENERGY_RESOLUTION'], {}), '(DEFAULT_ENERGY_LOW, DEFAULT_ENERGY_HIGH, DEFAULT_ENERGY_RESOLUTION)\n', (2278, 2346), True, 'import numpy as np\n'), ((4183, 4227), 'bokeh.models.widgets.TableColumn', 'TableColumn', ([], {'field': '"""x"""', 'title': '"""energy [keV]"""'}), "(field='x', title='energy [keV]')\n", (4194, 4227), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((4233, 4272), 'bokeh.models.widgets.TableColumn', 'TableColumn', ([], {'field': '"""y"""', 'title': '"""Percent"""'}), "(field='y', title='Percent')\n", (4244, 4272), False, 'from bokeh.models.widgets import Slider, TextInput, Select, Paragraph, AutocompleteInput, CheckboxGroup, TableColumn, DataTable, Button, CheckboxGroup, Toggle, Select, Div\n'), ((7679, 7746), 'roentgen.absorption.Response', 'Response', ([], {'optical_path': '[this_material, air]', 'detector': 'this_detector'}), '(optical_path=[this_material, air], detector=this_detector)\n', (7687, 7746), False, 'from roentgen.absorption import Material, Response\n'), ((13788, 13796), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (13794, 13796), False, 'from bokeh.io import curdoc\n'), ((1028, 1060), 'roentgen.util.get_density', 'get_density', (['DEFAULT_MATERIAL[0]'], {}), '(DEFAULT_MATERIAL[0])\n', (1039, 1060), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((4718, 4753), 'astropy.units.Quantity', 'u.Quantity', (['(value * const.atm)', '"""Pa"""'], {}), "(value * const.atm, 'Pa')\n", (4728, 4753), True, 'import astropy.units as u\n'), ((5923, 5947), 'roentgen.absorption.Material', 'Material', (['"""Al"""', '(0 * u.mm)'], {}), "('Al', 0 * u.mm)\n", (5931, 5947), False, 'from roentgen.absorption import Material, Response\n'), ((6459, 6518), 'astropy.units.Quantity', 'u.Quantity', (['air_thickness_input.value', 'air_thick_unit.value'], {}), '(air_thickness_input.value, air_thick_unit.value)\n', (6469, 6518), True, 'import astropy.units as u\n'), ((6808, 6856), 'roentgen.util.density_ideal_gas', 'density_ideal_gas', (['air_pressure', 'air_temperature'], {}), '(air_pressure, air_temperature)\n', (6825, 6856), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((6871, 6924), 'roentgen.absorption.Material', 'Material', (['"""air"""', 'air_path_length'], {'density': 'air_density'}), "('air', air_path_length, density=air_density)\n", (6879, 6924), False, 'from roentgen.absorption import Material, Response\n'), ((7024, 7078), 'roentgen.absorption.Material', 'Material', (['"""air"""', '(0 * u.mm)'], {'density': '(0 * u.g / u.cm ** 3)'}), "('air', 0 * u.mm, density=0 * u.g / u.cm ** 3)\n", (7032, 7078), False, 'from roentgen.absorption import Material, Response\n'), ((13107, 13115), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (13113, 13115), False, 'from bokeh.io import curdoc\n'), ((4810, 4853), 'astropy.units.Quantity', 'u.Quantity', (['(value * const.atm / 760.0)', '"""Pa"""'], {}), "(value * const.atm / 760.0, 'Pa')\n", (4820, 4853), True, 'import astropy.units as u\n'), ((4886, 4917), 'astropy.units.Quantity', 'u.Quantity', (['value', 'current_unit'], {}), '(value, current_unit)\n', (4896, 4917), True, 'import astropy.units as u\n'), ((5416, 5485), 'astropy.units.Quantity', 'u.Quantity', (['material_thickness_input.value', 'material_thick_unit.value'], {}), '(material_thickness_input.value, material_thick_unit.value)\n', (5426, 5485), True, 'import astropy.units as u\n'), ((5553, 5622), 'astropy.units.Quantity', 'u.Quantity', (['material_density_input.value', 'material_density_unit.value'], {}), '(material_density_input.value, material_density_unit.value)\n', (5563, 5622), True, 'import astropy.units as u\n'), ((6061, 6115), 'astropy.units.Quantity', 'u.Quantity', (['(air_pressure_input.value * const.atm)', '"""Pa"""'], {}), "(air_pressure_input.value * const.atm, 'Pa')\n", (6071, 6115), True, 'import astropy.units as u\n'), ((7224, 7293), 'astropy.units.Quantity', 'u.Quantity', (['detector_thickness_input.value', 'detector_thick_unit.value'], {}), '(detector_thickness_input.value, detector_thick_unit.value)\n', (7234, 7293), True, 'import astropy.units as u\n'), ((7361, 7430), 'astropy.units.Quantity', 'u.Quantity', (['detector_density_input.value', 'detector_density_unit.value'], {}), '(detector_density_input.value, detector_density_unit.value)\n', (7371, 7430), True, 'import astropy.units as u\n'), ((7863, 7937), 'numpy.arange', 'np.arange', (['DEFAULT_ENERGY_LOW', 'plot.x_range.end', 'DEFAULT_ENERGY_RESOLUTION'], {}), '(DEFAULT_ENERGY_LOW, plot.x_range.end, DEFAULT_ENERGY_RESOLUTION)\n', (7872, 7937), True, 'import numpy as np\n'), ((8018, 8092), 'numpy.arange', 'np.arange', (['plot.x_range.start', 'plot.x_range.end', 'DEFAULT_ENERGY_RESOLUTION'], {}), '(plot.x_range.start, plot.x_range.end, DEFAULT_ENERGY_RESOLUTION)\n', (8027, 8092), True, 'import numpy as np\n'), ((12741, 12774), 'roentgen.util.get_density', 'get_density', (['material_input.value'], {}), '(material_input.value)\n', (12752, 12774), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((12986, 13028), 'roentgen.util.get_density', 'get_density', (['detector_material_input.value'], {}), '(detector_material_input.value)\n', (12997, 13028), False, 'from roentgen.util import get_density, density_ideal_gas\n'), ((6191, 6253), 'astropy.units.Quantity', 'u.Quantity', (['(air_pressure_input.value * const.atm / 760.0)', '"""Pa"""'], {}), "(air_pressure_input.value * const.atm / 760.0, 'Pa')\n", (6201, 6253), True, 'import astropy.units as u\n'), ((6332, 6393), 'astropy.units.Quantity', 'u.Quantity', (['air_pressure_input.value', 'air_pressure_unit.value'], {}), '(air_pressure_input.value, air_pressure_unit.value)\n', (6342, 6393), True, 'import astropy.units as u\n'), ((6582, 6642), 'astropy.units.Quantity', 'u.Quantity', (['air_temperature_input.value', 'air_temp_unit.value'], {}), '(air_temperature_input.value, air_temp_unit.value)\n', (6592, 6642), True, 'import astropy.units as u\n'), ((6769, 6784), 'astropy.units.temperature', 'u.temperature', ([], {}), '()\n', (6782, 6784), True, 'import astropy.units as u\n'), ((13687, 13695), 'bokeh.layouts.Spacer', 'Spacer', ([], {}), '()\n', (13693, 13695), False, 'from bokeh.layouts import layout, Spacer\n'), ((10475, 10522), 'astropy.units.Quantity', 'u.Quantity', (['material_thickness_input.value', 'old'], {}), '(material_thickness_input.value, old)\n', (10485, 10522), True, 'import astropy.units as u\n'), ((10807, 10854), 'astropy.units.Quantity', 'u.Quantity', (['detector_thickness_input.value', 'old'], {}), '(detector_thickness_input.value, old)\n', (10817, 10854), True, 'import astropy.units as u\n'), ((11124, 11166), 'astropy.units.Quantity', 'u.Quantity', (['air_thickness_input.value', 'old'], {}), '(air_thickness_input.value, old)\n', (11134, 11166), True, 'import astropy.units as u\n'), ((11453, 11498), 'astropy.units.Quantity', 'u.Quantity', (['material_density_input.value', 'old'], {}), '(material_density_input.value, old)\n', (11463, 11498), True, 'import astropy.units as u\n'), ((11763, 11808), 'astropy.units.Quantity', 'u.Quantity', (['detector_density_input.value', 'old'], {}), '(detector_density_input.value, old)\n', (11773, 11808), True, 'import astropy.units as u\n'), ((12449, 12493), 'astropy.units.Quantity', 'u.Quantity', (['air_temperature_input.value', 'old'], {}), '(air_temperature_input.value, old)\n', (12459, 12493), True, 'import astropy.units as u\n'), ((12516, 12531), 'astropy.units.temperature', 'u.temperature', ([], {}), '()\n', (12529, 12531), True, 'import astropy.units as u\n'), ((4562, 4579), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (4569, 4579), False, 'from os.path import dirname, join\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 14:01:00 2020
@author: hvf811
"""
seed_val = 1234
import os
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Dropout,Multiply, LSTM, Add, Concatenate, TimeDistributed
from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU
from tensorflow.keras.layers import Reshape
from tensorflow.keras.models import Model
from keras.regularizers import l2, l1
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant
from tensorflow.keras import backend as K
from tensorflow.keras.constraints import non_neg
from tensorflow.keras.models import load_model
from tensorflow.keras.activations import relu
from tensorflow.keras.optimizers import Adam
import random
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import matthews_corrcoef, roc_auc_score, precision_score, recall_score,f1_score,cohen_kappa_score,accuracy_score
import time
### Controlling randomness
tf.random.set_seed(seed_val)
np.random.seed(seed_val)
random.seed(seed_val)
#### Readaing peptide sequence data
with open("total_classes_seq_bins32.pkl","rb") as f:
tot_bins = pickle.load(f)
with open("total_classes_seq32.pkl","rb") as f:
encoder = pickle.load(f)
with open("branches32.pkl","rb") as f:
branches = pickle.load(f)
branches = [list(i) for i in branches]
lvl1 = [list(branches[0]+branches[1]), list(branches[2]+branches[3]+branches[4])]
lvl2_1 = [list(branches[0]),list(branches[1])]
lvl2_2 = [list(branches[2]),list(branches[3]+branches[4])]
lvl3 = [list(branches[3]),list(branches[4])]
lvl4_1 = [list(branches[0])]
lvl4_2 = [list(branches[1])]
lvl4_3 = [list(branches[2])]
lvl4_4 = [list(branches[3])]
lvl4_5 = [list(branches[4])]
levels = [lvl1, lvl2_1, lvl2_2, lvl3, lvl4_1, lvl4_2, lvl4_3, lvl4_4, lvl4_5]
vocab = ['A',
'R',
'N',
'D',
'C',
'Q',
'E',
'G',
'H',
'I',
'L',
'K',
'M',
'F',
'P',
'S',
'T',
'W',
'Y',
'V',]
folds = [
[[0], [1], [2, 3, 4, 5, 6, 7, 8, 9]],
[[1], [2], [0, 3, 4, 5, 6, 7, 8, 9]],
[[2], [3], [0, 1, 4, 5, 6, 7, 8, 9]],
[[3], [4], [0, 1, 2, 5, 6, 7, 8, 9]],
[[4], [5], [0, 1, 2, 3, 6, 7, 8, 9]],
[[5], [6], [0, 1, 2, 3, 4, 7, 8, 9]],
[[6], [7], [0, 1, 2, 3, 4, 5, 8, 9]],
[[7], [8], [0, 1, 2, 3, 4, 5, 6, 9]],
[[8], [9], [0, 1, 2, 3, 4, 5, 6, 7]],
[[9], [0], [1, 2, 3, 4, 5, 6, 7, 8]]
]
## Getting the labels
labels1 = set()
for i in list(encoder.values()):
labels1.update(i)
labels1 = list(labels1)
labels1.sort()
labels = set()
for k,v in tot_bins.items():
#print(v)
tmp = k.split(" | ")
tmp.remove("")
labels.update(tmp)
labels = list(labels)
labels.sort()
print(labels1)
print("------")
print(labels)
assert labels == labels1
del labels1
#############################################################
## Functions
def calc_real_acc(yt,tmp_val):
return np.sum(yt == np.round(tmp_val))/(yt.shape[0] * yt.shape[1])
def print_function(name,x,y):
print(name,"||",sep=" ", end=" ")
for i in range(len(x)):
print(x[i], np.round(y[i],4),"|",sep=" ", end=" ")
print("")
try:
print("average:", np.average(y))
except:
print("average:", np.average(y[1:]))
print("\n")
def calc_score(yt,tmp_val,funk):
out = []
for i in range(len(yt)):
indx = np.sum(yt[i],axis=-1)
ind = indx > 0
out.append(np.average(funk(tmp_val[i][ind], yt[i][ind])))
return out
def calc_score_wzero(yt,tmp_val,funk):
out = []
out0 = []
for i in range(len(yt)):
indx = np.sum(yt[i],axis=-1)
ind = indx > 0
ind0 = indx == 0
out.append(funk(yt[i][ind], tmp_val[i][ind]))
if np.sum(ind0) > 0:
out0.append(funk(yt[i][ind0], tmp_val[i][ind0]))
if np.sum(ind0) == 0:
out0.append([])
return out, out0
def calc_roc(yt,tmp_val,funk):
out = []
out0 = []
for i in range(len(yt)):
indx = np.sum(yt[i],axis=-1)
ind = indx > 0
out.append(funk(yt[i][ind], tmp_val[i][ind]))
out0.append(funk(yt[i], tmp_val[i]))
return out, out0
def calc_score_wzero_round(yt,tmp_val,funk):
out = []
out0 = []
for i in range(len(yt)):
indx = np.sum(yt[i],axis=-1)
ind = indx > 0
ind0 = indx == 0
out.append(funk(yt[i][ind], np.round(tmp_val[i][ind])))
if np.sum(ind0) > 0:
out0.append(funk(yt[i][ind0], np.round(tmp_val[i][ind0])))
if np.sum(ind0) == 0:
out0.append([])
return out, out0
def per_pred(yv,tmp_val,funk, name):
mccs = []
for iq in range(len(yv)):
mccs.append([funk(yv[iq][:,iqq],np.round(tmp_val[iq][:,iqq])) for iqq in range(len(yv[iq][0]))])
for iq in range(len(mccs)):
print(level_names[iq])
for iqq in mccs[iq]:
print(np.round(iqq,4), sep=" ", end=" ")
print("")
all_mcc = []
for iq in mccs:
all_mcc += iq
all_mcc1 = np.prod(all_mcc)
all_mcc2 = np.average(all_mcc)
print("\naverage {}:".format(name), all_mcc2, "| prod", all_mcc1)
print("\naverage {} for leaves:".format(name), np.average(all_mcc[-len(labels):]), "| prod", np.prod(all_mcc[-len(labels):]))
return all_mcc2
def per_pred2(yv,tmp_val,funk, name):
mccs = []
for iq in range(len(yv)):
mccs.append([funk(yv[iq][:,iqq],np.round(tmp_val[iq][:,iqq])) for iqq in range(len(yv[iq][0]))])
for iq in range(len(mccs)):
print(level_names[iq])
for iqq in mccs[iq]:
print(np.round(iqq,4), sep=" ", end=" ")
print("")
all_mcc = []
for iq in mccs:
all_mcc += iq
all_mcc1 = np.prod(all_mcc)
all_mcc2 = np.average(all_mcc)
#print("\naverage {}:".format(name), all_mcc2, "| prod", all_mcc1)
#print("\naverage {} for leaves:".format(name), np.average(all_mcc[-len(labels):]), "| prod", np.prod(all_mcc[-len(labels):]))
return all_mcc2
def printer_stuff(yv,xv,modd):
tmp_val = modd.predict(xv)
val_loss = [losser(yv[iq],tmp_val[iq]).numpy() for iq in range(len(yv))]
print_function("val_loss",level_names,val_loss)
val_acc = [accuracy_score(yv[iq],np.round(tmp_val[iq])) for iq in range(len(yv))]
print_function("val_exact_ACC",level_names,val_acc)
ac1, ac2 = calc_score_wzero_round(yv,tmp_val,accuracy_score)
print_function("exact_acc_labels",level_names,ac1)
print_function("exact_acc_zeros",level_names,ac2)
print_function("TP_ranked",level_names,calc_score(yv,tmp_val,estimate_acc))
ac1, ac2 = calc_score_wzero(yv,tmp_val,calc_real_acc)
print_function("real_acc_labels",level_names,ac1)
print_function("real_acc_zeros",level_names,ac2)
roc1, roc0 = calc_roc(yv,tmp_val,roc_auc_score)
print_function("roc_labels",level_names,roc1)
print_function("roc_with_zeros",level_names,roc0)
_ = per_pred(yv,tmp_val,precision_score,"PREC")
_ = per_pred(yv,tmp_val,recall_score,"REC")
_ = per_pred(yv,tmp_val,f1_score,"F1")
_ = per_pred(yv,tmp_val,cohen_kappa_score,"KAPPA")
all_mcc2 = per_pred(yv,tmp_val,matthews_corrcoef,"MCC")
### Functions for sequence encoding
def making_ys(activity,levels):
lvls = []
for l in levels:
lvls.append([])
for j,l in enumerate(levels):
if len(l) == 2:
lab = np.zeros((len(l),))
for jj,ll in enumerate(l):
if activity in ll:
lab[jj] = 1
lvls[j].append(lab)
if len(l) == 1:
lab = np.zeros((len(l[0]),))
if activity in l[0]:
lab[l[0].index(activity)] = 1
lvls[j].append(lab)
return lvls
def encode_seqs(sq_dct, encoder, max_, voc, ac_over, levels):
lnv = len(voc)
#dims = len(ac_over)
alsqs = []
y_list = []
x_list = []
for sq in sq_dct:
tmp = []
for ii in encoder[sq]:
tmp2 = making_ys(ii, levels)
if len(tmp) == 0:
for iii in tmp2:
tmp.append(iii)
else:
for iii in range(len(tmp2)):
#print(sq,tmp[iii])
tmp[iii][0] += tmp2[iii][0]
for ii in tmp:
ii[0][ii[0] > 0] = 1
y_list.append(tmp)
diff = max_ - len(sq)
if diff % 2 == 0:
tmps = "9"*int(diff/2) + sq + "9"*int(diff/2)
if diff % 2 != 0:
tmps = "9"*int((diff-1)/2) + sq + "9"*int((diff-1)/2 + 1)
#tmps = sq + "9"*int(diff)
alsqs.append(sq)
tmp_x = np.zeros((max_,lnv))
for ii in range(len(tmp_x)):
if tmps[ii] in voc:
tmp_x[ii][voc.index(tmps[ii])] = 1.
x_list.append([tmp_x.flatten()])
return np.concatenate(x_list,axis=0), y_list, alsqs
def folder(folder, tot_bins):
train_to_encode = []
val_to_encode = []
test_to_encode = []
for k,v in tot_bins.items():
for i in folder[-1]:
train_to_encode += v[i]
val_to_encode += v[folder[0][0]]
test_to_encode += v[folder[1][0]]
return train_to_encode, val_to_encode, test_to_encode
####
def make_rn(x):
rn = np.arange(len(x))
np.random.shuffle(rn)
return rn
def res(X):
X = X.reshape((len(X),len(X[0]),1))
return X
def y_sets(y,levels):
lvls = [[] for i in levels]
for j,i in enumerate(y):
for jj,ii in enumerate(i):
lvls[jj].append(ii)
for j,i in enumerate(lvls):
lvls[j] = np.concatenate(lvls[j],axis=0)
return lvls
def sorter(a,b):
c = list(zip(a,b))
c.sort()
a1,b1 = zip(*c)
b1 = list(b1[::-1])
a1 = list(a1[::-1])
return a1, b1
def estimate_acc(pred,y):
acc = []
for i in range(len(y)):
a,b = sorter(pred[i],np.arange(len(pred[i])))
a1,b1 = sorter(y[i],np.arange(len(y[i])))
ln = len(y[i][y[i] > 0])
ac = len(set(b1[:ln]).intersection(set(b[:ln])))/len(b1[:ln])
acc.append(ac)
return acc
binary_cross = BinaryCrossentropy()#reduction="sum")
binnz = K.binary_crossentropy
def mcc_norm_rev_sumXxX(y_true, y_pred):
def mcc_loss_binary_mean_cor(y_true, y_pred):
y = K.sum(K.cast(y_true, 'float32'), axis=0)
q = K.cast(K.equal(y/K.cast(K.shape(y_true)[0],'float32'),1),'float32')
q_ = K.cast(K.equal(y/K.cast(K.shape(y_true)[0],'float32'),0),'float32')
yh = K.sum(K.cast(y_pred, 'float32'), axis=0)
qq = K.cast(K.equal(yh/K.cast(K.shape(y_true)[0],'float32'),1),'float32')
qq_ = K.cast(K.equal(yh/K.cast(K.shape(y_true)[0],'float32'),0),'float32')
e_ = K.sum(K.cast(K.abs(y_true-y_pred), 'float32'), axis=0)
e = K.cast(K.not_equal(e_,0),'float32')
tp = K.clip(K.sum(K.cast(y_true*y_pred, 'float32'), axis=0),K.clip(q_,0,1), K.cast(K.shape(y_true)[0],'float32'))
tn = K.clip(K.sum(K.cast((1-y_true)*(1-y_pred), 'float32'), axis=0),K.clip(q,0,1), K.cast(K.shape(y_true)[0],'float32'))
fp = K.clip(K.sum(K.cast((1-y_true)*y_pred, 'float32'), axis=0),K.clip(qq_,0,1), K.cast(K.shape(y_true)[0],'float32'))
fn = K.clip(K.sum(K.cast(y_true*(1-y_pred), 'float32'), axis=0),K.clip(qq,0,1), K.cast(K.shape(y_true)[0],'float32'))
up = tp*tn - fp*fn
down = K.sqrt((tp+fp) * (tp+fn) * (tn+fp) * (tn+fn))
mcc = up / down
return (1-mcc)*e
e_ = K.sum(K.cast(K.abs(y_true-y_pred), 'float32'), axis=0)
e = K.cast(K.equal(e_,K.cast(K.shape(y_true)[0],'float32')),'float32')
e = e * 2
m1 = mcc_loss_binary_mean_cor(y_true, y_pred)
return K.clip(m1,e,2)
def upper_loss(y_true, y_pred):
y_pred = K.cast(y_pred, 'float32')
y_true = K.cast(y_true, 'float32')
return K.sum(mcc_norm_rev_sumXxX(y_true, y_pred))
def loss_1(y_true, y_pred):
y_pred = K.cast(y_pred, 'float32')
y_true = K.cast(y_true, 'float32')
return K.sum(K.mean(binnz(y_true, y_pred),axis=0)+mcc_norm_rev_sumXxX(y_true, y_pred))
#return K.sum(K.mean(binnz(y_true, y_pred)+mcc_norm_rev_sumXxX(y_true, y_pred),axis=0))
#return K.square(mcc_norm_rev_sumXxX(y_true, y_pred))
#############################################################
#### Intializing the network
init3 = RandomUniform(minval=-0.001, maxval=0.001, seed=seed_val)
init4 = Constant(1)
init5 = RandomUniform(minval=0.001, maxval=0.05, seed=seed_val)
init6 = RandomUniform(minval=-1, maxval=1, seed=seed_val)
init7 = Constant(0.001)
def max_sec_ax_keep_dims(inp):
return K.max(inp,axis=-2, keepdims=True)
def divider(inp):
#return inp / (K.max(K.abs(inp),axis=-1, keepdims=True) + K.epsilon())
return inp / (K.sum(K.abs(inp),axis=-1, keepdims=True) + K.epsilon())
def bottum_up(inp):
pred = K.max(inp, axis=-1, keepdims=True)
return pred
def small_cnn(x,kernels,LR,init2):
l1x = []
l2_reg = 0.0
drop = 0.5
for i in [4,6,10,16,22,30,40]:
cxc = len(vocab)
x4 = Conv1D(kernels,kernel_size=cxc*i, strides=cxc, padding="valid", activation=LR,kernel_initializer=init2, use_bias=False)(x)
x4 = PReLU(alpha_initializer=Constant(value=0.3))(x4)
x4 = Lambda(max_sec_ax_keep_dims)(x4)
l1x.append(x4)
x4 = Concatenate(axis=-2)(l1x)
z41x = Flatten()(x4)
z41x = Lambda(divider)(z41x)
z41 = Dropout(0.2)(z41x)
z42 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z41)
z42 = PReLU(alpha_initializer=Constant(value=0.3))(z42)
#z42 = Lambda(divider)(z42)
#z42x = Concatenate()([z41x, z42])
z42 = Dropout(drop)(z42)
z43 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z42)
z43 = PReLU(alpha_initializer=Constant(value=0.3))(z43)
#z43 = Lambda(divider)(z43)
#z43x = Concatenate()([z42x, z43])
z43 = Dropout(drop)(z43)
z44 = Dense(500, activation='linear',kernel_initializer=init6, use_bias=True)(z43)
z44 = PReLU(alpha_initializer=Constant(value=0.3))(z44)
#z44 = Lambda(divider)(z44)
#z44x = Concatenate()([z43x, z44])
z4 = Dropout(drop)(z44)
return z4
def activation3(x):
#return K.relu((x-0.5)*2)
#return K.relu(x, threshold=0.5)
return K.relu(x/0.5-0.5,max_value=1)
inputs = Input(shape=(len(vocab)*200,1))
x = inputs
xx = Flatten()(x)
init2 = "orthogonal"
name1 = "fold1_"
kernels = 40
LR = "linear"
l2_reg = 0.0
z4 = small_cnn(x,kernels,LR,init2)
lvl3_1 = Dense(len(levels[4][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs5")
outputs5 = lvl3_1(z4)
#outputs5 = Lambda(activation3)(outputs5)
z4 = small_cnn(x,kernels,LR,init2)
lvl3_1 = Dense(len(levels[5][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs6")
outputs6 = lvl3_1(z4)
#outputs6 = Lambda(activation3)(outputs6)
z7 = small_cnn(x,kernels,LR,init2)
lvl3_2 = Dense(len(levels[6][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs7")
outputs7 = lvl3_2(z7)
#outputs7 = Lambda(activation3)(outputs7)
z5 = small_cnn(x,kernels,LR,init2)
lvl3_3 = Dense(len(levels[7][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs8")
outputs8 = lvl3_3(z5)
#outputs8 = Lambda(activation3)(outputs8)
z6 = small_cnn(x,kernels,LR,init2)
lvl3_4 = Dense(len(levels[8][0]), activation='sigmoid',kernel_initializer=init5, use_bias=True, name="outputs9")
outputs9 = lvl3_4(z6)
#outputs9 = Lambda(activation3)(outputs9)
outputs51 = Lambda(bottum_up)(outputs8)
outputs52 = Lambda(bottum_up)(outputs9)
outputs4 = Concatenate(axis=-1)([outputs51,outputs52])
outputs31 = Lambda(bottum_up)(outputs7)
outputs32 = Lambda(bottum_up)(outputs4)
outputs3 = Concatenate(axis=-1)([outputs31,outputs32])
outputs21 = Lambda(bottum_up)(outputs5)
outputs22 = Lambda(bottum_up)(outputs6)
outputs2 = Concatenate(axis=-1)([outputs21,outputs22])
outputs11 = Lambda(bottum_up)(outputs2)
outputs12 = Lambda(bottum_up)(outputs3)
outputs1 = Concatenate(axis=-1)([outputs11,outputs12])
outputs = [outputs1,outputs2,outputs3,outputs4,outputs5,outputs6,outputs7, outputs8, outputs9]
def special_loss_mean_n_sum(y_true, y_pred):
y_true = K.cast(y_true, "float32")
y_pred = K.cast(y_pred, "float32")
return K.sum(K.mean(binnz(y_true, y_pred)+mcc_norm_rev_sumXxX(y_true, y_pred),axis=0))
def mena_binn_loss(y_true, y_pred):
y_true = K.cast(y_true, "float32")
y_pred = K.cast(y_pred, "float32")
return K.sum(K.mean(binnz(y_true, y_pred), axis=0))
losser = special_loss_mean_n_sum
losses = [loss_1 for i in levels]
#losses = [upper_loss,upper_loss,upper_loss,upper_loss, loss_1,loss_1,loss_1,loss_1,loss_1]
#losses = [upper_loss,upper_loss,upper_loss,upper_loss, upper_loss,upper_loss,upper_loss,upper_loss,upper_loss]
lossesx = [mena_binn_loss for i in levels]
train_losses = [loss_1 for i in levels]
#train_losses = [upper_loss,upper_loss,upper_loss,upper_loss, loss_1,loss_1,loss_1,loss_1,loss_1]
#train_losses = [upper_loss,upper_loss,upper_loss,upper_loss, upper_loss,upper_loss,upper_loss,upper_loss,upper_loss]
opt = Adam(learning_rate=0.0005)
decoderx = Model(inputs, outputs)
decoderx.compile(optimizer=opt, loss=train_losses, loss_weights=[1,1,1,1, 1,1,1,1,1])
decoderx.summary()
#############################################################
#### Running the network
accc = 100000.
toxl = 100000.
ambl = 100000.
pepl = 100000.
hypl = 100000.
enzl = 100000.
total_loss = 100000
level_names = ["lvl1", "lvl2_1", "lvl2_2","lvl3", "lvl4_1", "lvl4_2", "lvl4_3", "lvl4_4", "lvl4_5"]
ws = decoderx.get_weights()
start_ws = ws.copy()
order = np.arange(len(levels))
breaker = 0
losseZ = []
losseZ_train = []
for fs in range(len(folds)):#2,3):#len(folds)):
accc = 100000.
toxl = 100000.
ambl = 100000.
pepl = 100000.
hypl = 100000.
enzl = 100000.
total_loss = 100000
train_to_encode, val_to_encode, test_to_encode = folder(folds[fs], tot_bins)
xtr, ytr, xtrsq = encode_seqs(train_to_encode,encoder, 200, vocab, labels,levels)
xv, yv, xvsq = encode_seqs(val_to_encode,encoder, 200, vocab, labels,levels)
xt, yt ,xtsq = encode_seqs(test_to_encode,encoder, 200, vocab, labels,levels)
print(ytr[:10])
tmp = []
for i in [ytr,yv,yt]:
tmp.append(y_sets(i,levels))
ytr,yv,yt = tmp
with open("numpy_mullab_data_tree_final_{}.pkl".format(fs),"wb") as f:
pickle.dump([xtr, ytr, xtrsq, xv, yv, xvsq, xt, yt ,xtsq], f)
print("numpy data saved")
tmpenc = [xtr,xv,xt]
tmpsq = [xtrsq,xvsq,xtsq]
for i in range(3):
assert len(tmpenc[i]) == len(tmpsq[i])
for ii in range(len(tmpenc[i])):
assert len(tmpsq[i][ii]) == np.sum(tmpenc[i][ii])
del(tmpenc)
del(tmpsq)
rntr = make_rn(xtr)
rnv = make_rn(xv)
rnt = make_rn(xt)
all_ys = [i[rntr] for i in ytr]
xtr = res(xtr)
xv = res(xv)
xt = res(xt)
print(xtr.shape)
for i in (ytr):
print(i.shape)
print("\n")
print(xv.shape)
for i in (yv):
print(i.shape)
print("\n")
print(xt.shape)
for i in (yt):
print(i.shape)
decoderx.set_weights(start_ws)
name1 = "fold_"+str(fs)+"_save_model_based_on_MCC_loss_and_bin_"
for i in range(500):
print("\nEPOCH", i)
decoderx.fit(xtr[rntr], all_ys, verbose=0, #validation_data=(xv, yv),
epochs=1,
batch_size=128, use_multiprocessing=True, workers=2)#, class_weight=wdct)#,callbacks=my_callbacks)
rntr = make_rn(xtr)
all_ys = [i[rntr] for i in ytr]
start = time.time()
tmp_val = decoderx.predict(xv, batch_size=128)#, use_multiprocessing=True, workers=3)
end = time.time()
print(end - start)
val_loss = [loss_1(yv[iq],tmp_val[iq]).numpy() for iq in range(len(yv))]
val_loss_bin = [mena_binn_loss(yv[iq],tmp_val[iq]).numpy() for iq in range(len(yv))]
print_function("val_loss_bin",level_names,val_loss_bin)
print_function("val_loss",level_names,val_loss)
losseZ.append([val_loss, val_loss_bin])
av_loss = np.average(val_loss[4:])
ww = decoderx.get_weights()
val_loss_bin = val_loss
print("\n")
all_mcc2 = per_pred(yv,tmp_val,matthews_corrcoef,"VAL_MCC")
if val_loss_bin[4] < toxl:
print("\n\tsaving best tox_hem model",val_loss_bin[4])
decoderx.save_weights(name1+"best_toxhem_plusMCC5.h5")
toxl = val_loss_bin[4]
breaker = -1
ws[28:35] = ww[28:35]
ws[49:56] = ww[49:56]
ws[76] = ww[76]
ws[77] = ww[77]
ws[82] = ww[82]
ws[91:93] = ww[91:93]
ws[97] = ww[97]
ws[106:108] = ww[106:108]
ws[112] = ww[112]
ws[119:121] = ww[119:121]
if val_loss_bin[5] < ambl:
print("\n\tsaving best ambl model",val_loss_bin[5])
decoderx.save_weights(name1+"best_ambl_plusMCC5.h5")
ambl = val_loss_bin[5]
breaker = -1
ws[35:42] = ww[35:42]
ws[56:63] = ww[56:63]
ws[78] = ww[78]
ws[79] = ww[79]
ws[83] = ww[83]
ws[93:95] = ww[93:95]
ws[98] = ww[98]
ws[108:110] = ww[108:110]
ws[113] = ww[113]
ws[121:123] = ww[121:123]
if val_loss_bin[6] < pepl:
print("\n\tsaving best pepl model",val_loss_bin[6])
decoderx.save_weights(name1+"best_pepl_plusMCC5.h5")
pepl = val_loss_bin[6]
breaker = -1
ws[42:49] = ww[42:49]
ws[63:70] = ww[63:70]
ws[80] = ww[80]
ws[81] = ww[81]
ws[84] = ww[84]
ws[95:97] = ww[95:97]
ws[99] = ww[99]
ws[110:112] = ww[110:112]
ws[114] = ww[114]
ws[123:125] = ww[123:125]
if val_loss_bin[7] < hypl:
print("\n\tsaving best hyp model",val_loss_bin[7])
decoderx.save_weights(name1+"best_hyp_plusMCC5.h5")
hypl = val_loss_bin[7]
breaker = -1
ws[7:14] = ww[7:14]
ws[21:28] = ww[21:28]
ws[72] = ww[72]
ws[73] = ww[73]
ws[75] = ww[75]
ws[87:89] = ww[87:89]
ws[90] = ww[90]
ws[102:104] = ww[102:104]
ws[105] = ww[105]
ws[117:119] = ww[117:119]
if val_loss_bin[8] < enzl:
print("\n\tsaving best enz model",val_loss_bin[8])
decoderx.save_weights(name1+"best_enz_plusMCC5.h5")
enzl = val_loss_bin[8]
breaker = -1
ws[:7] = ww[:7]
ws[14:21] = ww[14:21]
ws[70] = ww[70]
ws[71] = ww[71]
ws[74] = ww[74]
ws[85:87] = ww[85:87]
ws[89] = ww[89]
ws[100:102] = ww[100:102]
ws[104] = ww[104]
ws[115:117] = ww[115:117]
if np.sum(val_loss_bin) < total_loss:
total_loss = np.sum(val_loss_bin)
print("\n\tsaving best overal best model",np.sum(val_loss_bin))
decoderx.save_weights(name1+"best_overall_best_plusMCC5.h5")
with open(name1+"best_all_plusMCC5.pkl", "wb") as f:
pickle.dump(ws,f)
#decoderx.set_weights(ws)
with open(name1+"losses.pkl", "wb") as f:
pickle.dump(losseZ,f)
with open(name1+"losses_train.pkl", "wb") as f:
pickle.dump(losseZ_train,f)
breaker += 1
if breaker == 20:
break
print("\n")
| [
"numpy.prod",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.not_equal",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.max",
"numpy.random.seed",
"tensorflow.keras.backend.cast",
"numpy.con... | [((1119, 1147), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed_val'], {}), '(seed_val)\n', (1137, 1147), True, 'import tensorflow as tf\n'), ((1149, 1173), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (1163, 1173), True, 'import numpy as np\n'), ((1175, 1196), 'random.seed', 'random.seed', (['seed_val'], {}), '(seed_val)\n', (1186, 1196), False, 'import random\n'), ((10584, 10604), 'tensorflow.keras.losses.BinaryCrossentropy', 'BinaryCrossentropy', ([], {}), '()\n', (10602, 10604), False, 'from tensorflow.keras.losses import BinaryCrossentropy\n'), ((12889, 12946), 'tensorflow.keras.initializers.RandomUniform', 'RandomUniform', ([], {'minval': '(-0.001)', 'maxval': '(0.001)', 'seed': 'seed_val'}), '(minval=-0.001, maxval=0.001, seed=seed_val)\n', (12902, 12946), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((12956, 12967), 'tensorflow.keras.initializers.Constant', 'Constant', (['(1)'], {}), '(1)\n', (12964, 12967), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((12977, 13032), 'tensorflow.keras.initializers.RandomUniform', 'RandomUniform', ([], {'minval': '(0.001)', 'maxval': '(0.05)', 'seed': 'seed_val'}), '(minval=0.001, maxval=0.05, seed=seed_val)\n', (12990, 13032), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((13042, 13091), 'tensorflow.keras.initializers.RandomUniform', 'RandomUniform', ([], {'minval': '(-1)', 'maxval': '(1)', 'seed': 'seed_val'}), '(minval=-1, maxval=1, seed=seed_val)\n', (13055, 13091), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((13101, 13116), 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.001)'], {}), '(0.001)\n', (13109, 13116), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((17860, 17886), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0005)'}), '(learning_rate=0.0005)\n', (17864, 17886), False, 'from tensorflow.keras.optimizers import Adam\n'), ((17903, 17925), 'tensorflow.keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (17908, 17925), False, 'from tensorflow.keras.models import Model\n'), ((1310, 1324), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1321, 1324), False, 'import pickle\n'), ((1395, 1409), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1406, 1409), False, 'import pickle\n'), ((1468, 1482), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1479, 1482), False, 'import pickle\n'), ((5267, 5283), 'numpy.prod', 'np.prod', (['all_mcc'], {}), '(all_mcc)\n', (5274, 5283), True, 'import numpy as np\n'), ((5300, 5319), 'numpy.average', 'np.average', (['all_mcc'], {}), '(all_mcc)\n', (5310, 5319), True, 'import numpy as np\n'), ((5982, 5998), 'numpy.prod', 'np.prod', (['all_mcc'], {}), '(all_mcc)\n', (5989, 5998), True, 'import numpy as np\n'), ((6015, 6034), 'numpy.average', 'np.average', (['all_mcc'], {}), '(all_mcc)\n', (6025, 6034), True, 'import numpy as np\n'), ((9701, 9722), 'numpy.random.shuffle', 'np.random.shuffle', (['rn'], {}), '(rn)\n', (9718, 9722), True, 'import numpy as np\n'), ((12241, 12257), 'tensorflow.keras.backend.clip', 'K.clip', (['m1', 'e', '(2)'], {}), '(m1, e, 2)\n', (12247, 12257), True, 'from tensorflow.keras import backend as K\n'), ((12307, 12332), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (12313, 12332), True, 'from tensorflow.keras import backend as K\n'), ((12347, 12372), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (12353, 12372), True, 'from tensorflow.keras import backend as K\n'), ((12473, 12498), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (12479, 12498), True, 'from tensorflow.keras import backend as K\n'), ((12513, 12538), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (12519, 12538), True, 'from tensorflow.keras import backend as K\n'), ((13163, 13197), 'tensorflow.keras.backend.max', 'K.max', (['inp'], {'axis': '(-2)', 'keepdims': '(True)'}), '(inp, axis=-2, keepdims=True)\n', (13168, 13197), True, 'from tensorflow.keras import backend as K\n'), ((13404, 13438), 'tensorflow.keras.backend.max', 'K.max', (['inp'], {'axis': '(-1)', 'keepdims': '(True)'}), '(inp, axis=-1, keepdims=True)\n', (13409, 13438), True, 'from tensorflow.keras import backend as K\n'), ((14911, 14945), 'tensorflow.keras.backend.relu', 'K.relu', (['(x / 0.5 - 0.5)'], {'max_value': '(1)'}), '(x / 0.5 - 0.5, max_value=1)\n', (14917, 14945), True, 'from tensorflow.keras import backend as K\n'), ((15005, 15014), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15012, 15014), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16220, 16237), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16226, 16237), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16261, 16278), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16267, 16278), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16301, 16321), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (16312, 16321), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((16360, 16377), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16366, 16377), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16401, 16418), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16407, 16418), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16441, 16461), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (16452, 16461), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((16500, 16517), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16506, 16517), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16541, 16558), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16547, 16558), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16581, 16601), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (16592, 16601), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((16640, 16657), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16646, 16657), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16681, 16698), 'tensorflow.keras.layers.Lambda', 'Lambda', (['bottum_up'], {}), '(bottum_up)\n', (16687, 16698), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((16721, 16741), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (16732, 16741), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((16929, 16954), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (16935, 16954), True, 'from tensorflow.keras import backend as K\n'), ((16969, 16994), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (16975, 16994), True, 'from tensorflow.keras import backend as K\n'), ((17142, 17167), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (17148, 17167), True, 'from tensorflow.keras import backend as K\n'), ((17182, 17207), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (17188, 17207), True, 'from tensorflow.keras import backend as K\n'), ((3556, 3578), 'numpy.sum', 'np.sum', (['yt[i]'], {'axis': '(-1)'}), '(yt[i], axis=-1)\n', (3562, 3578), True, 'import numpy as np\n'), ((3802, 3824), 'numpy.sum', 'np.sum', (['yt[i]'], {'axis': '(-1)'}), '(yt[i], axis=-1)\n', (3808, 3824), True, 'import numpy as np\n'), ((4214, 4236), 'numpy.sum', 'np.sum', (['yt[i]'], {'axis': '(-1)'}), '(yt[i], axis=-1)\n', (4220, 4236), True, 'import numpy as np\n'), ((4508, 4530), 'numpy.sum', 'np.sum', (['yt[i]'], {'axis': '(-1)'}), '(yt[i], axis=-1)\n', (4514, 4530), True, 'import numpy as np\n'), ((9023, 9044), 'numpy.zeros', 'np.zeros', (['(max_, lnv)'], {}), '((max_, lnv))\n', (9031, 9044), True, 'import numpy as np\n'), ((9226, 9256), 'numpy.concatenate', 'np.concatenate', (['x_list'], {'axis': '(0)'}), '(x_list, axis=0)\n', (9240, 9256), True, 'import numpy as np\n'), ((10031, 10062), 'numpy.concatenate', 'np.concatenate', (['lvls[j]'], {'axis': '(0)'}), '(lvls[j], axis=0)\n', (10045, 10062), True, 'import numpy as np\n'), ((11915, 11968), 'tensorflow.keras.backend.sqrt', 'K.sqrt', (['((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))'], {}), '((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n', (11921, 11968), True, 'from tensorflow.keras import backend as K\n'), ((13897, 13917), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-2)'}), '(axis=-2)\n', (13908, 13917), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((13935, 13944), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13942, 13944), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((13961, 13976), 'tensorflow.keras.layers.Lambda', 'Lambda', (['divider'], {}), '(divider)\n', (13967, 13976), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((13994, 14006), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (14001, 14006), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14030, 14102), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""linear"""', 'kernel_initializer': 'init6', 'use_bias': '(True)'}), "(500, activation='linear', kernel_initializer=init6, use_bias=True)\n", (14035, 14102), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14252, 14265), 'tensorflow.keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (14259, 14265), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14288, 14360), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""linear"""', 'kernel_initializer': 'init6', 'use_bias': '(True)'}), "(500, activation='linear', kernel_initializer=init6, use_bias=True)\n", (14293, 14360), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14510, 14523), 'tensorflow.keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (14517, 14523), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14546, 14618), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""linear"""', 'kernel_initializer': 'init6', 'use_bias': '(True)'}), "(500, activation='linear', kernel_initializer=init6, use_bias=True)\n", (14551, 14618), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((14767, 14780), 'tensorflow.keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (14774, 14780), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Multiply, LSTM, Add, Concatenate, TimeDistributed\n'), ((19267, 19328), 'pickle.dump', 'pickle.dump', (['[xtr, ytr, xtrsq, xv, yv, xvsq, xt, yt, xtsq]', 'f'], {}), '([xtr, ytr, xtrsq, xv, yv, xvsq, xt, yt, xtsq], f)\n', (19278, 19328), False, 'import pickle\n'), ((20620, 20631), 'time.time', 'time.time', ([], {}), '()\n', (20629, 20631), False, 'import time\n'), ((20742, 20753), 'time.time', 'time.time', ([], {}), '()\n', (20751, 20753), False, 'import time\n'), ((21158, 21182), 'numpy.average', 'np.average', (['val_loss[4:]'], {}), '(val_loss[4:])\n', (21168, 21182), True, 'import numpy as np\n'), ((3276, 3293), 'numpy.round', 'np.round', (['y[i]', '(4)'], {}), '(y[i], 4)\n', (3284, 3293), True, 'import numpy as np\n'), ((3367, 3380), 'numpy.average', 'np.average', (['y'], {}), '(y)\n', (3377, 3380), True, 'import numpy as np\n'), ((3941, 3953), 'numpy.sum', 'np.sum', (['ind0'], {}), '(ind0)\n', (3947, 3953), True, 'import numpy as np\n'), ((4033, 4045), 'numpy.sum', 'np.sum', (['ind0'], {}), '(ind0)\n', (4039, 4045), True, 'import numpy as np\n'), ((4657, 4669), 'numpy.sum', 'np.sum', (['ind0'], {}), '(ind0)\n', (4663, 4669), True, 'import numpy as np\n'), ((4759, 4771), 'numpy.sum', 'np.sum', (['ind0'], {}), '(ind0)\n', (4765, 4771), True, 'import numpy as np\n'), ((6507, 6528), 'numpy.round', 'np.round', (['tmp_val[iq]'], {}), '(tmp_val[iq])\n', (6515, 6528), True, 'import numpy as np\n'), ((10787, 10812), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', '"""float32"""'], {}), "(y_true, 'float32')\n", (10793, 10812), True, 'from tensorflow.keras import backend as K\n'), ((11007, 11032), 'tensorflow.keras.backend.cast', 'K.cast', (['y_pred', '"""float32"""'], {}), "(y_pred, 'float32')\n", (11013, 11032), True, 'from tensorflow.keras import backend as K\n'), ((11300, 11318), 'tensorflow.keras.backend.not_equal', 'K.not_equal', (['e_', '(0)'], {}), '(e_, 0)\n', (11311, 11318), True, 'from tensorflow.keras import backend as K\n'), ((11400, 11416), 'tensorflow.keras.backend.clip', 'K.clip', (['q_', '(0)', '(1)'], {}), '(q_, 0, 1)\n', (11406, 11416), True, 'from tensorflow.keras import backend as K\n'), ((11543, 11558), 'tensorflow.keras.backend.clip', 'K.clip', (['q', '(0)', '(1)'], {}), '(q, 0, 1)\n', (11549, 11558), True, 'from tensorflow.keras import backend as K\n'), ((11674, 11691), 'tensorflow.keras.backend.clip', 'K.clip', (['qq_', '(0)', '(1)'], {}), '(qq_, 0, 1)\n', (11680, 11691), True, 'from tensorflow.keras import backend as K\n'), ((11809, 11825), 'tensorflow.keras.backend.clip', 'K.clip', (['qq', '(0)', '(1)'], {}), '(qq, 0, 1)\n', (11815, 11825), True, 'from tensorflow.keras import backend as K\n'), ((12041, 12063), 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (12046, 12063), True, 'from tensorflow.keras import backend as K\n'), ((13356, 13367), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (13365, 13367), True, 'from tensorflow.keras import backend as K\n'), ((13623, 13750), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['kernels'], {'kernel_size': '(cxc * i)', 'strides': 'cxc', 'padding': '"""valid"""', 'activation': 'LR', 'kernel_initializer': 'init2', 'use_bias': '(False)'}), "(kernels, kernel_size=cxc * i, strides=cxc, padding='valid',\n activation=LR, kernel_initializer=init2, use_bias=False)\n", (13629, 13750), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((13824, 13852), 'tensorflow.keras.layers.Lambda', 'Lambda', (['max_sec_ax_keep_dims'], {}), '(max_sec_ax_keep_dims)\n', (13830, 13852), False, 'from tensorflow.keras.layers import Conv1D, Flatten, Lambda, MaxPooling1D, GRU, SimpleRNN, PReLU\n'), ((24243, 24263), 'numpy.sum', 'np.sum', (['val_loss_bin'], {}), '(val_loss_bin)\n', (24249, 24263), True, 'import numpy as np\n'), ((24304, 24324), 'numpy.sum', 'np.sum', (['val_loss_bin'], {}), '(val_loss_bin)\n', (24310, 24324), True, 'import numpy as np\n'), ((24565, 24583), 'pickle.dump', 'pickle.dump', (['ws', 'f'], {}), '(ws, f)\n', (24576, 24583), False, 'import pickle\n'), ((24692, 24714), 'pickle.dump', 'pickle.dump', (['losseZ', 'f'], {}), '(losseZ, f)\n', (24703, 24714), False, 'import pickle\n'), ((24802, 24830), 'pickle.dump', 'pickle.dump', (['losseZ_train', 'f'], {}), '(losseZ_train, f)\n', (24813, 24830), False, 'import pickle\n'), ((3107, 3124), 'numpy.round', 'np.round', (['tmp_val'], {}), '(tmp_val)\n', (3115, 3124), True, 'import numpy as np\n'), ((3423, 3440), 'numpy.average', 'np.average', (['y[1:]'], {}), '(y[1:])\n', (3433, 3440), True, 'import numpy as np\n'), ((4617, 4642), 'numpy.round', 'np.round', (['tmp_val[i][ind]'], {}), '(tmp_val[i][ind])\n', (4625, 4642), True, 'import numpy as np\n'), ((5135, 5151), 'numpy.round', 'np.round', (['iqq', '(4)'], {}), '(iqq, 4)\n', (5143, 5151), True, 'import numpy as np\n'), ((5850, 5866), 'numpy.round', 'np.round', (['iqq', '(4)'], {}), '(iqq, 4)\n', (5858, 5866), True, 'import numpy as np\n'), ((11238, 11260), 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (11243, 11260), True, 'from tensorflow.keras import backend as K\n'), ((11358, 11392), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float32"""'], {}), "(y_true * y_pred, 'float32')\n", (11364, 11392), True, 'from tensorflow.keras import backend as K\n'), ((11493, 11539), 'tensorflow.keras.backend.cast', 'K.cast', (['((1 - y_true) * (1 - y_pred))', '"""float32"""'], {}), "((1 - y_true) * (1 - y_pred), 'float32')\n", (11499, 11539), True, 'from tensorflow.keras import backend as K\n'), ((11628, 11668), 'tensorflow.keras.backend.cast', 'K.cast', (['((1 - y_true) * y_pred)', '"""float32"""'], {}), "((1 - y_true) * y_pred, 'float32')\n", (11634, 11668), True, 'from tensorflow.keras import backend as K\n'), ((11763, 11803), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * (1 - y_pred))', '"""float32"""'], {}), "(y_true * (1 - y_pred), 'float32')\n", (11769, 11803), True, 'from tensorflow.keras import backend as K\n'), ((13319, 13329), 'tensorflow.keras.backend.abs', 'K.abs', (['inp'], {}), '(inp)\n', (13324, 13329), True, 'from tensorflow.keras import backend as K\n'), ((14142, 14161), 'tensorflow.keras.initializers.Constant', 'Constant', ([], {'value': '(0.3)'}), '(value=0.3)\n', (14150, 14161), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((14400, 14419), 'tensorflow.keras.initializers.Constant', 'Constant', ([], {'value': '(0.3)'}), '(value=0.3)\n', (14408, 14419), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((14658, 14677), 'tensorflow.keras.initializers.Constant', 'Constant', ([], {'value': '(0.3)'}), '(value=0.3)\n', (14666, 14677), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((19588, 19609), 'numpy.sum', 'np.sum', (['tmpenc[i][ii]'], {}), '(tmpenc[i][ii])\n', (19594, 19609), True, 'import numpy as np\n'), ((24380, 24400), 'numpy.sum', 'np.sum', (['val_loss_bin'], {}), '(val_loss_bin)\n', (24386, 24400), True, 'import numpy as np\n'), ((4718, 4744), 'numpy.round', 'np.round', (['tmp_val[i][ind0]'], {}), '(tmp_val[i][ind0])\n', (4726, 4744), True, 'import numpy as np\n'), ((4956, 4985), 'numpy.round', 'np.round', (['tmp_val[iq][:, iqq]'], {}), '(tmp_val[iq][:, iqq])\n', (4964, 4985), True, 'import numpy as np\n'), ((5671, 5700), 'numpy.round', 'np.round', (['tmp_val[iq][:, iqq]'], {}), '(tmp_val[iq][:, iqq])\n', (5679, 5700), True, 'import numpy as np\n'), ((11435, 11450), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11442, 11450), True, 'from tensorflow.keras import backend as K\n'), ((11570, 11585), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11577, 11585), True, 'from tensorflow.keras import backend as K\n'), ((11705, 11720), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11712, 11720), True, 'from tensorflow.keras import backend as K\n'), ((11840, 11855), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11847, 11855), True, 'from tensorflow.keras import backend as K\n'), ((12117, 12132), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (12124, 12132), True, 'from tensorflow.keras import backend as K\n'), ((13785, 13804), 'tensorflow.keras.initializers.Constant', 'Constant', ([], {'value': '(0.3)'}), '(value=0.3)\n', (13793, 13804), False, 'from tensorflow.keras.initializers import RandomNormal, RandomUniform, Constant\n'), ((10859, 10874), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (10866, 10874), True, 'from tensorflow.keras import backend as K\n'), ((10941, 10956), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (10948, 10956), True, 'from tensorflow.keras import backend as K\n'), ((11081, 11096), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11088, 11096), True, 'from tensorflow.keras import backend as K\n'), ((11165, 11180), 'tensorflow.keras.backend.shape', 'K.shape', (['y_true'], {}), '(y_true)\n', (11172, 11180), True, 'from tensorflow.keras import backend as K\n')] |
"""
Code for processing operations for numpy arrays of tif stacks
"""
#Import packages
#Dependences
import numpy as np
from numpy.fft import fft2, ifft2, fftshift
from scipy.ndimage import median_filter, gaussian_filter, shift
import itertools
import gc
def doMedianFilter(imgstack, med_fsize=3):
'''
Median Filter (Takes 303.37 sec, 5 min 3 sec)
imgstack is (nframes, height, width) numpy array of images
med_fsize is the median filter size
Returns medstack, a (nframes, height, width) numpy array of median filtered images
'''
medstack = np.empty(imgstack.shape, dtype=np.uint16)
for idx, frame in enumerate(imgstack):
medstack[idx,...] = median_filter(frame, size=med_fsize)
return medstack
def doHomomorphicFilter(imgstack, sigmaVal=7):
'''
Homomorphic Filter (Takes 323.1 sec, 5 min 23 sec)
imgstack is (nframes, height, width) numpy array of images
sigmaVal is the gaussian_filter size for subtracing the low frequency component
Returns homomorphimgs, a (nframes, height, width) numpy array of homomorphic filtered images
'''
#Constants to scale from between 0 and 1
eps = 7./3 - 4./3 -1
maxval = imgstack.max()
ScaleFactor = 1./maxval
Baseline = imgstack.min()
# Subtract minimum baseline, and multiply by scale factor. Force minimum of eps before taking log.
logimgs = np.log1p(np.maximum((imgstack-Baseline)*ScaleFactor, eps))
# Get Low Frequency Component from Gaussian Filter
lpComponent = np.empty(logimgs.shape)
for idx, frame in enumerate(logimgs):
lpComponent[idx,...] = gaussian_filter(frame, sigma=sigmaVal)
# Remove Low Frequency Component and Shift Values
adjimgs = logimgs - lpComponent
del logimgs, lpComponent
gc.collect()
logmin = adjimgs.min()
adjimgs = adjimgs - logmin #Shift by minimum logged difference value, so lowest value is 0
#Undo the log and shift back to standard image space
homomorphimgs = (np.expm1(adjimgs)/ScaleFactor) + Baseline
return homomorphimgs
def registerImages(imgstack, Ref=None, method='CrossCorrelation'):
'''
Perform frame-by-frame Image Registration to a reference image using a default of Cross Correlation (465.43 sec. 7 min 45 sec)
imgstack is (nframes, height, width) numpy array of images
Ref is a (height, width) numpy array as a reference image to use for motion correction
If no Ref is given, then the mean across all frames is used
method is the method to use to register the images, with the default being cross-correlation between the Reference frame and each individual frame
Returns stackshift, a (nframes, height, width) numpy array of motion corrected and shifted images
Returns yshift is the number of pixels to shift each frame in the y-direction (height)
Returns xshift is the number of pixels to shift each frame in the x-direction (width)
'''
#Insert functions for different registration methods
def CrossCorrelation(imgstack, Ref):
#Precalculate Static Values
if Ref is None:
Ref = imgstack.mean(axis=0)
imshape = Ref.shape
nframes = imgstack.shape[0]
imcenter = np.array(imshape)/2
yshift = np.empty((nframes,1)); xshift = np.empty((nframes,1));
Ref_fft = fft2(Ref).conjugate()
#Measure shifts from Images and apply those shifts to the Images
stackshift = np.zeros_like(imgstack, dtype=np.uint16)
for idx, frame in enumerate(imgstack):
xcfft = fft2(frame) * Ref_fft
xcim = abs(ifft2(xcfft))
xcpeak = np.array(np.unravel_index(np.argmax(fftshift(xcim)), imshape))
disps = imcenter - xcpeak
stackshift[idx,...] = np.uint16(shift(frame, disps))
yshift[idx] = disps[0]
xshift[idx] = disps[1]
return stackshift, yshift, xshift
#Dictionary for method selection and return
method_select = {
'CrossCorrelation': CrossCorrelation(imgstack, Ref),
}
#Run the selected method from the dictionary the method_select dictionary
return method_select.get(method, "ERROR: No function defined for Provided Method")
def calculateFramewiseCrossCorrelation(imgstack1, imgstack2):
'''
Calculate frame-by-frame Cross Correlation between two image stacks (465.43 sec. 7 min 45 sec)
imgstack1 is (nframes, height, width) numpy array of images
imgstack2 is (nframes, height, width) numpy array of images
imgstack1 and imgstack2 should be the same dimensions, however if one video is shorter than the other, then the values will be calculated for all of the length of the shorter video
Returns yshift is the number of pixels to shift each frame in the y-direction (height)
Returns xshift is the number of pixels to shift each frame in the x-direction (width)
'''
#Precalculate Static Values
nframes = imgstack1.shape[0]
imshape = imgstack1.shape[1:]
imcenter = np.array(imshape)/2
yshift = np.empty((nframes,1)); xshift = np.empty((nframes,1));
#Loop through frames and compute cross correlation between each frame in the stack
for idx, (frame1, frame2) in enumerate(itertools.izip(imgstack1,imgstack2)):
xcfft = fft2(frame1) * fft2(frame2).conjugate()
xcim = abs(ifft2(xcfft))
xcpeak = np.array(np.unravel_index(np.argmax(fftshift(xcim)), imshape))
disps = imcenter - xcpeak
yshift[idx] = disps[0]
xshift[idx] = disps[1]
return yshift, xshift
def applyFrameShifts(imgstack, yshift, xshift):
'''
Apply frame shifts to each frame of an image stack (301.28 sec. 5 min 2 sec)
imgstack is (nframes, height, width) numpy array of images
yshift is the number of pixels to shift each frame in the y-direction (height)
xshift is the number of pixels to shift each frame in the x-direction (width)
Returns stackshift, a (nframes, height, width) numpy array of images shifted according to yshift & xshift
'''
#Precalculate Static Values
stackshift = np.zeros_like(imgstack, dtype=np.uint16)
for idx, frame in enumerate(imgstack):
stackshift[idx,...] = np.uint16(shift(frame, (yshift[idx],xshift[idx])))
return stackshift
| [
"numpy.fft.fftshift",
"numpy.fft.ifft2",
"numpy.expm1",
"numpy.fft.fft2",
"scipy.ndimage.shift",
"numpy.array",
"numpy.empty",
"gc.collect",
"scipy.ndimage.gaussian_filter",
"itertools.izip",
"scipy.ndimage.median_filter",
"numpy.maximum",
"numpy.zeros_like"
] | [((571, 612), 'numpy.empty', 'np.empty', (['imgstack.shape'], {'dtype': 'np.uint16'}), '(imgstack.shape, dtype=np.uint16)\n', (579, 612), True, 'import numpy as np\n'), ((1522, 1545), 'numpy.empty', 'np.empty', (['logimgs.shape'], {}), '(logimgs.shape)\n', (1530, 1545), True, 'import numpy as np\n'), ((1782, 1794), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1792, 1794), False, 'import gc\n'), ((5053, 5075), 'numpy.empty', 'np.empty', (['(nframes, 1)'], {}), '((nframes, 1))\n', (5061, 5075), True, 'import numpy as np\n'), ((5085, 5107), 'numpy.empty', 'np.empty', (['(nframes, 1)'], {}), '((nframes, 1))\n', (5093, 5107), True, 'import numpy as np\n'), ((6106, 6146), 'numpy.zeros_like', 'np.zeros_like', (['imgstack'], {'dtype': 'np.uint16'}), '(imgstack, dtype=np.uint16)\n', (6119, 6146), True, 'import numpy as np\n'), ((684, 720), 'scipy.ndimage.median_filter', 'median_filter', (['frame'], {'size': 'med_fsize'}), '(frame, size=med_fsize)\n', (697, 720), False, 'from scipy.ndimage import median_filter, gaussian_filter, shift\n'), ((1398, 1450), 'numpy.maximum', 'np.maximum', (['((imgstack - Baseline) * ScaleFactor)', 'eps'], {}), '((imgstack - Baseline) * ScaleFactor, eps)\n', (1408, 1450), True, 'import numpy as np\n'), ((1619, 1657), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['frame'], {'sigma': 'sigmaVal'}), '(frame, sigma=sigmaVal)\n', (1634, 1657), False, 'from scipy.ndimage import median_filter, gaussian_filter, shift\n'), ((3253, 3275), 'numpy.empty', 'np.empty', (['(nframes, 1)'], {}), '((nframes, 1))\n', (3261, 3275), True, 'import numpy as np\n'), ((3285, 3307), 'numpy.empty', 'np.empty', (['(nframes, 1)'], {}), '((nframes, 1))\n', (3293, 3307), True, 'import numpy as np\n'), ((3451, 3491), 'numpy.zeros_like', 'np.zeros_like', (['imgstack'], {'dtype': 'np.uint16'}), '(imgstack, dtype=np.uint16)\n', (3464, 3491), True, 'import numpy as np\n'), ((5020, 5037), 'numpy.array', 'np.array', (['imshape'], {}), '(imshape)\n', (5028, 5037), True, 'import numpy as np\n'), ((5238, 5274), 'itertools.izip', 'itertools.izip', (['imgstack1', 'imgstack2'], {}), '(imgstack1, imgstack2)\n', (5252, 5274), False, 'import itertools\n'), ((1996, 2013), 'numpy.expm1', 'np.expm1', (['adjimgs'], {}), '(adjimgs)\n', (2004, 2013), True, 'import numpy as np\n'), ((3216, 3233), 'numpy.array', 'np.array', (['imshape'], {}), '(imshape)\n', (3224, 3233), True, 'import numpy as np\n'), ((5292, 5304), 'numpy.fft.fft2', 'fft2', (['frame1'], {}), '(frame1)\n', (5296, 5304), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((5351, 5363), 'numpy.fft.ifft2', 'ifft2', (['xcfft'], {}), '(xcfft)\n', (5356, 5363), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((6230, 6270), 'scipy.ndimage.shift', 'shift', (['frame', '(yshift[idx], xshift[idx])'], {}), '(frame, (yshift[idx], xshift[idx]))\n', (6235, 6270), False, 'from scipy.ndimage import median_filter, gaussian_filter, shift\n'), ((3326, 3335), 'numpy.fft.fft2', 'fft2', (['Ref'], {}), '(Ref)\n', (3330, 3335), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((3559, 3570), 'numpy.fft.fft2', 'fft2', (['frame'], {}), '(frame)\n', (3563, 3570), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((3604, 3616), 'numpy.fft.ifft2', 'ifft2', (['xcfft'], {}), '(xcfft)\n', (3609, 3616), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((3784, 3803), 'scipy.ndimage.shift', 'shift', (['frame', 'disps'], {}), '(frame, disps)\n', (3789, 3803), False, 'from scipy.ndimage import median_filter, gaussian_filter, shift\n'), ((5307, 5319), 'numpy.fft.fft2', 'fft2', (['frame2'], {}), '(frame2)\n', (5311, 5319), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((5418, 5432), 'numpy.fft.fftshift', 'fftshift', (['xcim'], {}), '(xcim)\n', (5426, 5432), False, 'from numpy.fft import fft2, ifft2, fftshift\n'), ((3675, 3689), 'numpy.fft.fftshift', 'fftshift', (['xcim'], {}), '(xcim)\n', (3683, 3689), False, 'from numpy.fft import fft2, ifft2, fftshift\n')] |
import codecs
import numpy as np
import os
_CORE_ARGS = { "ARG0", "ARG1", "ARG2", "ARG3", "ARG4", "ARG5", "ARGA",
"A0", "A1", "A2", "A3", "A4", "A5", "AA" }
def logsumexp(arr):
maxv = np.max(arr)
lognorm = maxv + np.log(np.sum(np.exp(arr - maxv)))
arr2 = np.exp(arr - lognorm)
#print maxv, lognorm, arr, arr2
return arr2
def srl_constraint_tracker(pred_to_args):
unique_core_role_violations = 0
continuation_role_violations = 0
reference_role_violations = 0
for pred_ids, args in pred_to_args.items():
# Sort by span start, assuming they are not overlapping.
sorted_args = sorted(args, key=lambda x: x[0], reverse=True)
core_args = set()
base_args = set()
for start, end, role in sorted_args:
if role in _CORE_ARGS:
if role in core_args:
unique_core_role_violations += 1
core_args.update([role])
elif role.startswith("C-") and not role[2:] in base_args:
continuation_role_violations += 1
if not role.startswith("C-") and not role.startswith("R-"):
base_args.update(role)
for start, end, role in sorted_args:
if role.startswith("R-") and not role[2:] in base_args:
reference_role_violations += 1
return unique_core_role_violations, continuation_role_violations, reference_role_violations
def print_sentence_to_conll(fout, tokens, labels, head_scores, raw_head_scores=None):
"""token_info: Unnormalized head scores, etc.
"""
for label_column in labels:
assert len(label_column) == len(tokens)
for i in range(len(tokens)):
fout.write(tokens[i].ljust(10) + "\t")
if raw_head_scores:
for hs in raw_head_scores[i]:
fout.write(str(round(hs, 3)).rjust(4) + "\t")
for label_column, score_column in zip(labels, head_scores):
fout.write(label_column[i].rjust(10) + "\t")
if score_column[i] > 0:
fout.write(str(round(score_column[i], 2)).rjust(4) + "\t")
else:
fout.write(" ".rjust(4) + "\t")
fout.write("\n")
fout.write("\n")
| [
"numpy.exp",
"numpy.max"
] | [((204, 215), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (210, 215), True, 'import numpy as np\n'), ((279, 300), 'numpy.exp', 'np.exp', (['(arr - lognorm)'], {}), '(arr - lognorm)\n', (285, 300), True, 'import numpy as np\n'), ((249, 267), 'numpy.exp', 'np.exp', (['(arr - maxv)'], {}), '(arr - maxv)\n', (255, 267), True, 'import numpy as np\n')] |
from aitlas.datasets.crops_classification import CropsDataset
import os
import zipfile
import tarfile
import urllib
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import seaborn as sns
import h5py
from ..base import BaseDataset
from .urls import CODESURL, CLASSMAPPINGURL, INDEX_FILE_URLs, FILESIZES, SHP_URLs, H5_URLs, RAW_CSV_URL
from eolearn.core import EOPatch, FeatureType
from eolearn.geometry import VectorToRasterTask
import matplotlib.pyplot as plt
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_file(url, output_path, overwrite=False):
if url is None:
raise ValueError("download_file: provided url is None!")
if not os.path.exists(output_path) or overwrite:
with DownloadProgressBar(unit='B', unit_scale=True,
miniters=1, desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
else:
print(f"file exists in {output_path}. specify overwrite=True if intended")
BANDS = ['B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B11', 'B12', 'NDVI', 'NDWI', 'Brightness']
class EOPatchCrops(CropsDataset):
"""EOPatchCrops - a crop type classification dataset"""
def __init__(self, config):
CropsDataset.__init__(self, config)
self.root = self.config.root
self.regions = self.config.regions #'slovenia'
self.indexfile = self.config.root+os.sep+self.config.csv_file_path
self.h5path = {}
self.split_sets = ['train','test','val']
for region in self.split_sets:
self.h5path[region] = self.config.root+os.sep+region+'.hdf5'
self.classmappingfile = self.config.root+os.sep+"classmapping.csv"
#self.regions = ['slovenia']
self.load_classmapping(self.classmappingfile)
# Only do the timeseries (breizhcrops) file structure generation once, if a general index doesn't exist
if not os.path.isfile(self.indexfile):
self.preprocess()
self.selected_bands = BANDS
self.index= pd.read_csv(self.root+os.sep+self.regions[0]+".csv", index_col=None)
for region in self.regions[1:]:
region_ind = pd.read_csv(self.root+os.sep+region+".csv", index_col=None)
self.index = pd.concatenate([self.index,region_ind], axis=0)
# self.index will always be all chosen regions summarized
# index.csv will be the entire index for all existing regions
self.X_list = None
self.show_timeseries(0)
plt.show()
# if "classid" not in index_region.columns or "classname" not in index_region.columns or "region" not in index_region.columns:
# # drop fields that are not in the class mapping
# index_region = index_region.loc[index_region["CODE_CULTU"].isin(self.mapping.index)]
# index_region[["classid", "classname"]] = index_region["CODE_CULTU"].apply(
# lambda code: self.mapping.loc[code])
# index_region.to_csv(self.indexfile)
# # filter zero-length time series
# if self.index.index.name != "idx":
# self.index = self.index.loc[self.index.sequencelength > self.config.filter_length] # set_index('idx')
# self.maxseqlength = int(self.index["sequencelength"].max())
def preprocess(self):
self.eopatches = [f.name for f in os.scandir(self.root+os.sep+'eopatches') if f.is_dir()]
self.indexfile = self.root+os.sep+'index.csv'
print(self.eopatches)
columns = ['path','eopatch', 'polygon_id','CODE_CULTU', 'sequencelength','classid','classname','region']
#self.index = pd.DataFrame(columns=columns)
list_index = list()
for patch in self.eopatches:
eop = EOPatch.load(self.root+os.sep+'eopatches'+os.sep+patch)
polygons = eop.vector_timeless["CROP_TYPE_GDF"]
for row in polygons.itertuples():
if row.ct_eu_code not in self.mapping.index.values:
continue
poly_id = int(row.polygon_id)
classid = self.mapping.loc[row.ct_eu_code].id
classname = self.mapping.loc[row.ct_eu_code].classname
list_index.append(
{
columns[0]:patch+os.sep+str(poly_id),
columns[1]:patch,
columns[2]:poly_id,
columns[3]:row.ct_eu_code,
columns[4]:0,#temp_X.shape[0],
columns[5]:classid,
columns[6]:classname,
columns[7]:''#self.region
}
)
#self.index = pd.concat([self.index, pd.DataFrame([[patch+os.sep+str(poly_id), patch, poly_id, row.ct_eu_code, temp_X.shape[0], classid, classname]], columns=self.index.columns)], axis=0, ignore_index=True)
self.index = pd.DataFrame(list_index)
self.split()
f = {}
for set in self.split_sets:
f[set] = h5py.File(self.h5path[set], "w")
self.index.set_index("path", drop=False, inplace=True)
for patch in self.eopatches:
eop = EOPatch.load(self.root+os.sep+'eopatches'+os.sep+patch)
polygons = eop.vector_timeless["CROP_TYPE_GDF"]
for row in polygons.itertuples():
if row.ct_eu_code not in self.mapping.index.values:
continue
poly_id = int(row.polygon_id)
print(self.index)
index_row = self.index.loc[patch+os.sep+str(poly_id)]
polygon = polygons[polygons.polygon_id==poly_id]
temp = VectorToRasterTask(vector_input=polygon,
raster_feature=(FeatureType.MASK_TIMELESS, 'poly'),
values=1,
raster_shape = (FeatureType.MASK_TIMELESS, 'CROP_TYPE')
)
polygon_indicator_mask = temp.execute(eop).mask_timeless['poly']
# plt.figure(figsize=(10,10))
# plt.imshow(new_eop.mask_timeless['poly'])
# plt.show()
print("num_pixels orig "+str(np.sum(polygon_indicator_mask)))
seq_length = eop.data["FEATURES_S2"].shape[0]
num_bands = eop.data["FEATURES_S2"].shape[3]
polygon_indicator_mask_ts = np.repeat(polygon_indicator_mask[np.newaxis,:,:,:], seq_length, axis=0)
polygon_indicator_mask_ts = np.repeat(polygon_indicator_mask_ts, num_bands, axis=3)
print(polygon_indicator_mask_ts.shape)
print("num_pixels "+str(np.sum(polygon_indicator_mask_ts)))
print("aggregation_test "+str(np.sum(polygon_indicator_mask_ts, axis=(1,2))))
print("aggregation_test_shape "+str(np.sum(polygon_indicator_mask_ts, axis=(1,2)).shape))
temp_X=np.sum(np.multiply(polygon_indicator_mask_ts, eop.data["FEATURES_S2"]), axis=(1,2))
dset = f[index_row.region].create_dataset(patch+os.sep+str(poly_id), data=temp_X)
self.index.reset_index(inplace=True, drop=True)
self.write_index()
def split(self):
print(self.index)
X_train, X_test, y_train, y_test = train_test_split(self.index.values, self.index.classid.values, test_size=0.15, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=1) # 0.25 x 0.8 = 0.2
X_train = pd.DataFrame(X_train, columns=self.index.columns)
X_train['region'] = 'train'
X_train.to_csv(self.root+os.sep+'train.csv')
X_test = pd.DataFrame(X_test, columns=self.index.columns)
X_test['region'] = 'test'
X_test.to_csv(self.root+os.sep+'test.csv')
X_val = pd.DataFrame(X_val, columns=self.index.columns)
X_val['region'] = 'val'
X_val.to_csv(self.root+os.sep+'val.csv')
self.index = pd.concat([X_train, X_val, X_test], ignore_index=True)
# sort?
print(self.index)
def write_index(self):
self.index.to_csv(self.indexfile)
| [
"aitlas.datasets.crops_classification.CropsDataset.__init__",
"os.path.exists",
"numpy.multiply",
"numpy.repeat",
"pandas.read_csv",
"urllib.request.urlretrieve",
"sklearn.model_selection.train_test_split",
"os.scandir",
"pandas.concatenate",
"eolearn.core.EOPatch.load",
"h5py.File",
"os.path.... | [((1493, 1528), 'aitlas.datasets.crops_classification.CropsDataset.__init__', 'CropsDataset.__init__', (['self', 'config'], {}), '(self, config)\n', (1514, 1528), False, 'from aitlas.datasets.crops_classification import CropsDataset\n'), ((2327, 2401), 'pandas.read_csv', 'pd.read_csv', (["(self.root + os.sep + self.regions[0] + '.csv')"], {'index_col': 'None'}), "(self.root + os.sep + self.regions[0] + '.csv', index_col=None)\n", (2338, 2401), True, 'import pandas as pd\n'), ((2819, 2829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2827, 2829), True, 'import matplotlib.pyplot as plt\n'), ((5250, 5274), 'pandas.DataFrame', 'pd.DataFrame', (['list_index'], {}), '(list_index)\n', (5262, 5274), True, 'import pandas as pd\n'), ((7692, 7791), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.index.values', 'self.index.classid.values'], {'test_size': '(0.15)', 'random_state': '(1)'}), '(self.index.values, self.index.classid.values, test_size=\n 0.15, random_state=1)\n', (7708, 7791), False, 'from sklearn.model_selection import train_test_split\n'), ((7828, 7894), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.15)', 'random_state': '(1)'}), '(X_train, y_train, test_size=0.15, random_state=1)\n', (7844, 7894), False, 'from sklearn.model_selection import train_test_split\n'), ((7933, 7982), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': 'self.index.columns'}), '(X_train, columns=self.index.columns)\n', (7945, 7982), True, 'import pandas as pd\n'), ((8089, 8137), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {'columns': 'self.index.columns'}), '(X_test, columns=self.index.columns)\n', (8101, 8137), True, 'import pandas as pd\n'), ((8239, 8286), 'pandas.DataFrame', 'pd.DataFrame', (['X_val'], {'columns': 'self.index.columns'}), '(X_val, columns=self.index.columns)\n', (8251, 8286), True, 'import pandas as pd\n'), ((8390, 8444), 'pandas.concat', 'pd.concat', (['[X_train, X_val, X_test]'], {'ignore_index': '(True)'}), '([X_train, X_val, X_test], ignore_index=True)\n', (8399, 8444), True, 'import pandas as pd\n'), ((904, 931), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (918, 931), False, 'import os\n'), ((1094, 1171), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url'], {'filename': 'output_path', 'reporthook': 't.update_to'}), '(url, filename=output_path, reporthook=t.update_to)\n', (1120, 1171), False, 'import urllib\n'), ((2207, 2237), 'os.path.isfile', 'os.path.isfile', (['self.indexfile'], {}), '(self.indexfile)\n', (2221, 2237), False, 'import os\n'), ((2470, 2535), 'pandas.read_csv', 'pd.read_csv', (["(self.root + os.sep + region + '.csv')"], {'index_col': 'None'}), "(self.root + os.sep + region + '.csv', index_col=None)\n", (2481, 2535), True, 'import pandas as pd\n'), ((2555, 2603), 'pandas.concatenate', 'pd.concatenate', (['[self.index, region_ind]'], {'axis': '(0)'}), '([self.index, region_ind], axis=0)\n', (2569, 2603), True, 'import pandas as pd\n'), ((4056, 4119), 'eolearn.core.EOPatch.load', 'EOPatch.load', (["(self.root + os.sep + 'eopatches' + os.sep + patch)"], {}), "(self.root + os.sep + 'eopatches' + os.sep + patch)\n", (4068, 4119), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((5370, 5402), 'h5py.File', 'h5py.File', (['self.h5path[set]', '"""w"""'], {}), "(self.h5path[set], 'w')\n", (5379, 5402), False, 'import h5py\n'), ((5523, 5586), 'eolearn.core.EOPatch.load', 'EOPatch.load', (["(self.root + os.sep + 'eopatches' + os.sep + patch)"], {}), "(self.root + os.sep + 'eopatches' + os.sep + patch)\n", (5535, 5586), False, 'from eolearn.core import EOPatch, FeatureType\n'), ((3668, 3712), 'os.scandir', 'os.scandir', (["(self.root + os.sep + 'eopatches')"], {}), "(self.root + os.sep + 'eopatches')\n", (3678, 3712), False, 'import os\n'), ((6021, 6188), 'eolearn.geometry.VectorToRasterTask', 'VectorToRasterTask', ([], {'vector_input': 'polygon', 'raster_feature': "(FeatureType.MASK_TIMELESS, 'poly')", 'values': '(1)', 'raster_shape': "(FeatureType.MASK_TIMELESS, 'CROP_TYPE')"}), "(vector_input=polygon, raster_feature=(FeatureType.\n MASK_TIMELESS, 'poly'), values=1, raster_shape=(FeatureType.\n MASK_TIMELESS, 'CROP_TYPE'))\n", (6039, 6188), False, 'from eolearn.geometry import VectorToRasterTask\n'), ((6806, 6880), 'numpy.repeat', 'np.repeat', (['polygon_indicator_mask[np.newaxis, :, :, :]', 'seq_length'], {'axis': '(0)'}), '(polygon_indicator_mask[np.newaxis, :, :, :], seq_length, axis=0)\n', (6815, 6880), True, 'import numpy as np\n'), ((6922, 6977), 'numpy.repeat', 'np.repeat', (['polygon_indicator_mask_ts', 'num_bands'], {'axis': '(3)'}), '(polygon_indicator_mask_ts, num_bands, axis=3)\n', (6931, 6977), True, 'import numpy as np\n'), ((7341, 7404), 'numpy.multiply', 'np.multiply', (['polygon_indicator_mask_ts', "eop.data['FEATURES_S2']"], {}), "(polygon_indicator_mask_ts, eop.data['FEATURES_S2'])\n", (7352, 7404), True, 'import numpy as np\n'), ((6589, 6619), 'numpy.sum', 'np.sum', (['polygon_indicator_mask'], {}), '(polygon_indicator_mask)\n', (6595, 6619), True, 'import numpy as np\n'), ((7074, 7107), 'numpy.sum', 'np.sum', (['polygon_indicator_mask_ts'], {}), '(polygon_indicator_mask_ts)\n', (7080, 7107), True, 'import numpy as np\n'), ((7156, 7202), 'numpy.sum', 'np.sum', (['polygon_indicator_mask_ts'], {'axis': '(1, 2)'}), '(polygon_indicator_mask_ts, axis=(1, 2))\n', (7162, 7202), True, 'import numpy as np\n'), ((7256, 7302), 'numpy.sum', 'np.sum', (['polygon_indicator_mask_ts'], {'axis': '(1, 2)'}), '(polygon_indicator_mask_ts, axis=(1, 2))\n', (7262, 7302), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# ### - Calculate the signature strength and Transcriptional Activity Score for each compound based on its replicates for Cell painting Level-4 profiles
#
#
# #### Definitions from [clue.io](https://clue.io/connectopedia/signature_quality_metrics)
#
# - **Signature strength -** Signature strength is a measure of the magnitude of the response elicited by a given treatment and is computed as the number of landmark genes (out of 978) with absolute z-score greater than or equal to 2. SS helps to further discriminate signatures that were consistent (high CC) from those that did or did not impact many genes.
#
# - **Transcriptional Activity Score (TAS) -** is an aggregate measure of signature strength (SS) and median replicate correlation (CC) that is intended to represent a perturbagen's transcriptional activity. The more transcriptionally active a perturbagen, the higher its TAS.
#
# In[1]:
import os
import argparse
import pandas as pd
import numpy as np
import re
from os import walk
from collections import Counter
import random
import shutil
from statistics import median
import math
from math import sqrt
from functools import reduce
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
sns.set_style("darkgrid")
import pickle
from statistics import median
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
# In[2]:
L1000_level4_path = "L1000_lvl4_cpd_replicate_datasets"
# In[3]:
df_level4 = pd.read_csv(os.path.join(L1000_level4_path, 'L1000_level4_cpd_replicates.csv.gz'),
compression='gzip',low_memory = False)
df_cpd_med_scores = pd.read_csv(os.path.join(L1000_level4_path, 'cpd_replicate_median_scores.csv'))
# In[4]:
##cpds_replicates_dict = dict(zip(df_cpd_med_scores['cpd'], df_cpd_med_scores['no_of_replicates']))
# In[5]:
metadata_cols = ['replicate_id', 'Metadata_broad_sample', 'pert_id', 'dose', 'pert_idose',
'pert_iname', 'moa', 'det_plate', 'det_well', 'sig_id']
# In[6]:
n_L1000_feats = df_level4.drop(metadata_cols, axis=1).shape[1]
# In[7]:
def compute_signature_strength(cpds_list, df, metadata_cols = metadata_cols):
"""Computes signature strength per compound based on its replicates"""
cpds_SS = {}
for cpd in cpds_list:
cpd_replicates = df[df['pert_iname'] == cpd].copy()
cpd_replicates.drop(metadata_cols, axis = 1, inplace = True)
cpd_replicates = cpd_replicates * sqrt(cpd_replicates.shape[0])
df_cpd_reps = abs(cpd_replicates.T)
ldmk_genes_gtr_2 = df_cpd_reps[df_cpd_reps >= 2.0].stack().count()
ss_norm = ldmk_genes_gtr_2/len(df_cpd_reps.columns)
cpds_SS[cpd] = ss_norm
return cpds_SS
# In[8]:
def compute_tas(cpds_SS, cpds_median_score, dose, num_feats):
"""Computes Transcriptional activity score (TAS) per compound based on its replicates"""
cpds_TAS = {}
for cpd in cpds_SS:
cpds_TAS[cpd] = sqrt((max(cpds_median_score[cpd][dose-1],0) * cpds_SS[cpd])/num_feats)
return cpds_TAS
# In[9]:
def compute_SS_TAS(df, cpds_median_score, num_L1000_feats = n_L1000_feats):
"""
Computes both Transcriptional activity score (TAS) and
signature strength per compound based on its replicates across all doses"""
dose_list = list(set(df['dose'].unique().tolist()))[1:7]
for dose in dose_list:
df_dose = df[df['dose'] == dose].copy()
cpds_ss = compute_signature_strength(list(cpds_median_score.keys()), df_dose)
cpds_tas = compute_tas(cpds_ss, cpds_median_score, dose, num_L1000_feats)
sorted_ss = {key:value for key, value in sorted(cpds_ss.items(), key=lambda item: item[0])}
sorted_tas = {key:value for key, value in sorted(cpds_tas.items(), key=lambda item: item[0])}
if dose == 1:
df_cpd_ss = pd.DataFrame.from_dict(sorted_ss, orient='index', columns = ['dose_1'])
df_cpd_tas = pd.DataFrame.from_dict(sorted_tas, orient='index', columns = ['dose_1'])
else:
df_cpd_ss['dose_' + str(dose)] = sorted_ss.values()
df_cpd_tas['dose_' + str(dose)] = sorted_tas.values()
return df_cpd_ss, df_cpd_tas
# In[10]:
df_med_scores = df_cpd_med_scores.set_index('cpd').rename_axis(None, axis=0).drop(['no_of_replicates'], axis = 1)
cpd_med_scores = df_med_scores.T.to_dict('list')
# In[11]:
df_ss_score, df_tas_score = compute_SS_TAS(df_level4, cpd_med_scores)
# In[12]:
df_cpd_med_scores.drop(['no_of_replicates'],axis = 1, inplace = True)
# In[13]:
df_ss_score = df_ss_score.reset_index().rename({'index':'cpd'}, axis = 1)
df_tas_score = df_tas_score.reset_index().rename({'index':'cpd'}, axis = 1)
# In[14]:
def rename_cols(df):
'Rename columns from dose number to actual doses'
df.rename(columns= {'dose_1' : '0.04 uM', 'dose_2':'0.12 uM', 'dose_3':'0.37 uM',
'dose_4': '1.11 uM', 'dose_5':'3.33 uM', 'dose_6':'10 uM'}, inplace = True)
return df
# In[15]:
df_cpd_med_scores = rename_cols(df_cpd_med_scores)
df_ss_score = rename_cols(df_ss_score)
df_tas_score = rename_cols(df_tas_score)
# In[16]:
def melt_df(df, col_name):
"""
This function returns a reformatted dataframe with
3 columns: cpd, dose number and dose_values(median score or p-value)
"""
df = df.melt(id_vars=['cpd'], var_name="dose", value_name=col_name)
return df
# In[17]:
def merge_ss_tas_med_scores(df_med_scores, df_ss_scores, df_tas_scores):
"""
This function merge median_scores (replication correlation),
signature strength (SS) and MAS (transcriptional activity score)
dataframes for each compound for all doses(1-6)
"""
df_med_vals = melt_df(df_med_scores, 'replicate_correlation')
df_ss_vals = melt_df(df_ss_scores, 'signature_strength')
df_tas_vals = melt_df(df_tas_scores, 'TAS')
metrics_df = [df_med_vals, df_ss_vals, df_tas_vals]
df_merged = reduce(lambda left,right: pd.merge(left,right,on=['cpd', 'dose'], how='inner'), metrics_df)
return df_merged
# In[18]:
df_all_vals = merge_ss_tas_med_scores(df_cpd_med_scores, df_ss_score, df_tas_score)
# In[19]:
df_all_vals.head(10)
# In[20]:
def save_to_csv(df, path, file_name, compress=None):
"""saves dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index=False, compression=compress)
# In[21]:
save_to_csv(df_all_vals, L1000_level4_path, 'L1000_all_scores.csv')
# ### - DMSO MAS and replicate correlation
#
# - Calculate 95th percentile of DMSO MAS score
# In[22]:
df_dmso = df_level4[df_level4['pert_iname'] == 'DMSO'].copy()
# In[23]:
df_dmso['det_plate'].unique()
# In[24]:
len(df_dmso['det_plate'].unique())
# In[25]:
def compute_dmso_SS_median_score(df):
"""
This function computes the signature strength (SS) and
median correlation replicate score for DMSO per plate
"""
dmso_median_scores = {}
dmso_ss_scores = {}
for plate in df['det_plate'].unique():
plt_replicates = df[df['det_plate'] == plate].copy()
if plt_replicates.shape[0] > 1:
plt_replicates.drop(['replicate_id', 'Metadata_broad_sample', 'pert_id', 'dose', 'pert_idose',
'pert_iname', 'moa', 'det_plate', 'det_well', 'sig_id'], axis = 1, inplace = True)
plt_rep_corr = plt_replicates.astype('float64').T.corr(method = 'spearman').values
median_score = median(list(plt_rep_corr[np.triu_indices(len(plt_rep_corr), k = 1)]))
dmso_median_scores[plate] = median_score
##signature strength --ss
plt_replicates = plt_replicates * sqrt(plt_replicates.shape[0])
df_plt_reps = abs(plt_replicates.T)
ldk_genes_gtr_2 = df_plt_reps[df_plt_reps >= 2.0].stack().count()
ss_norm = ldk_genes_gtr_2/len(df_plt_reps.columns)
dmso_ss_scores[plate] = ss_norm
return dmso_median_scores, dmso_ss_scores
# In[26]:
dmso_median_scores, dmso_ss_scores = compute_dmso_SS_median_score(df_dmso)
# In[27]:
def compute_dmso_TAS(dmso_median, dmso_ss, num_feats = n_L1000_feats):
"""
This function computes Transcriptional Activity Score (TAS)
per plate for only DMSO replicates
"""
dmso_tas_scores = {}
for plate in dmso_median:
dmso_tas_scores[plate] = sqrt((abs(dmso_median[plate]) * dmso_ss[plate])/num_feats)
return dmso_tas_scores
# In[28]:
dmso_tas_scores = compute_dmso_TAS(dmso_median_scores, dmso_ss_scores)
# In[29]:
dmso_95pct = np.percentile(list(dmso_tas_scores.values()),95)
# In[30]:
print(dmso_95pct)
# In[31]:
def save_to_pickle(value, path, file_name):
"""saves a value into a pickle file"""
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, file_name), 'wb') as handle:
pickle.dump(value, handle, protocol=pickle.HIGHEST_PROTOCOL)
# In[32]:
save_to_pickle(dmso_95pct, L1000_level4_path, 'L1000_dmso_95_percentile_TAS.pickle')
# In[ ]:
| [
"os.path.exists",
"pickle.dump",
"pandas.merge",
"numpy.warnings.filterwarnings",
"os.path.join",
"math.sqrt",
"pandas.DataFrame.from_dict",
"seaborn.set_style",
"os.mkdir",
"warnings.simplefilter"
] | [((1304, 1329), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (1317, 1329), True, 'import seaborn as sns\n'), ((1390, 1452), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (1411, 1452), False, 'import warnings\n'), ((1453, 1528), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {'category': 'np.VisibleDeprecationWarning'}), "('ignore', category=np.VisibleDeprecationWarning)\n", (1479, 1528), True, 'import numpy as np\n'), ((1635, 1704), 'os.path.join', 'os.path.join', (['L1000_level4_path', '"""L1000_level4_cpd_replicates.csv.gz"""'], {}), "(L1000_level4_path, 'L1000_level4_cpd_replicates.csv.gz')\n", (1647, 1704), False, 'import os\n'), ((1802, 1868), 'os.path.join', 'os.path.join', (['L1000_level4_path', '"""cpd_replicate_median_scores.csv"""'], {}), "(L1000_level4_path, 'cpd_replicate_median_scores.csv')\n", (1814, 1868), False, 'import os\n'), ((6509, 6529), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6523, 6529), False, 'import os\n'), ((6539, 6553), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (6547, 6553), False, 'import os\n'), ((6573, 6602), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (6585, 6602), False, 'import os\n'), ((9034, 9054), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (9048, 9054), False, 'import os\n'), ((9064, 9078), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (9072, 9078), False, 'import os\n'), ((9158, 9218), 'pickle.dump', 'pickle.dump', (['value', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(value, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (9169, 9218), False, 'import pickle\n'), ((2626, 2655), 'math.sqrt', 'sqrt', (['cpd_replicates.shape[0]'], {}), '(cpd_replicates.shape[0])\n', (2630, 2655), False, 'from math import sqrt\n'), ((4027, 4096), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['sorted_ss'], {'orient': '"""index"""', 'columns': "['dose_1']"}), "(sorted_ss, orient='index', columns=['dose_1'])\n", (4049, 4096), True, 'import pandas as pd\n'), ((4124, 4194), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['sorted_tas'], {'orient': '"""index"""', 'columns': "['dose_1']"}), "(sorted_tas, orient='index', columns=['dose_1'])\n", (4146, 4194), True, 'import pandas as pd\n'), ((6172, 6226), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': "['cpd', 'dose']", 'how': '"""inner"""'}), "(left, right, on=['cpd', 'dose'], how='inner')\n", (6180, 6226), True, 'import pandas as pd\n'), ((9102, 9131), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (9114, 9131), False, 'import os\n'), ((7938, 7967), 'math.sqrt', 'sqrt', (['plt_replicates.shape[0]'], {}), '(plt_replicates.shape[0])\n', (7942, 7967), False, 'from math import sqrt\n')] |
import numpy as np
def convert_weights(d, cfg):
has_fpn = cfg.MODEL.NECK.NAME == "FPN"
use_res5_in_stage2 = cfg.MODEL.ROI_HEADS.NAME == "Res5ROIHeads"
is_retina = cfg.MODEL.NECK.TOP_BLOCK_TYPE == "P6P7"
ret = {}
def _convert_conv(src, dst):
src_w = d.pop(src + ".weight").transpose(2, 3, 1, 0)
ret[dst + "/weights"] = src_w
if src + ".norm.weight" in d: # has norm
ret[dst + "/norm/gamma"] = d.pop(src + ".norm.weight")
ret[dst + "/norm/beta"] = d.pop(src + ".norm.bias")
if src + ".norm.running_var" in d: # batch norm
ret[dst + "/norm/moving_variance"] = d.pop(src + ".norm.running_var")
ret[dst + "/norm/moving_mean"] = d.pop(src + ".norm.running_mean")
if src + ".norm.num_batches_tracked" in d:
d.pop(src + ".norm.num_batches_tracked")
if src + "_offset.weight" in d:
ret[dst + "/offset_weights"] = d.pop(src + "_offset.weight").transpose(2, 3, 1, 0)
ret[dst + "/offset_bias"] = d.pop(src + "_offset.bias")
if src + ".bias" in d:
ret[dst + "/bias"] = d.pop(src + ".bias")
def _convert_fc(src, dst):
ret[dst + "/weights"] = d.pop(src + ".weight").transpose()
ret[dst + "/bias"] = d.pop(src + ".bias")
if has_fpn:
backbone_prefix = "backbone.bottom_up."
dst_prefix = "backbone/"
else:
backbone_prefix = "backbone."
dst_prefix = "backbone/"
_convert_conv(backbone_prefix + "stem.conv1", dst_prefix + "stem/conv1")
for grpid in range(4):
if use_res5_in_stage2 and grpid == 3 and not is_retina:
backbone_prefix = "roi_heads."
dst_prefix = "roi_heads/"
num_blocks_per_stage = {
50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]
}[cfg.MODEL.RESNETS.DEPTH]
for blkid in range(num_blocks_per_stage[grpid]):
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv1",
dst_prefix + f"res{grpid + 2}/block_{blkid + 1}/conv1")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv2",
dst_prefix + f"res{grpid + 2}/block_{blkid + 1}/conv2")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv3",
dst_prefix + f"res{grpid + 2}/block_{blkid + 1}/conv3")
if blkid == 0:
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.shortcut",
dst_prefix + f"res{grpid + 2}/block_{blkid + 1}/shortcut")
if is_retina:
for lvl in range(6, 8):
_convert_conv(f"backbone.top_block.p{lvl}", f"neck/top_block/p{lvl}")
for lvl in range(3, 6):
_convert_conv(f"backbone.fpn_lateral{lvl}", f"neck/fpn_lateral{lvl}")
_convert_conv(f"backbone.fpn_output{lvl}", f"neck/fpn_output{lvl}")
elif has_fpn:
for lvl in range(2, 6):
_convert_conv(f"backbone.fpn_lateral{lvl}", f"neck/fpn_lateral{lvl}")
_convert_conv(f"backbone.fpn_output{lvl}", f"neck/fpn_output{lvl}")
def get_box_indices(num_reg_classes):
idx_xmin = np.arange(num_reg_classes) * 4
idx_ymin = idx_xmin + 1
idx_xmax = idx_xmin + 2
idx_ymax = idx_xmin + 3
idxs = np.stack([idx_ymin, idx_xmin, idx_ymax, idx_xmax], axis=-1)
idxs = np.reshape(idxs, [num_reg_classes * 4])
return idxs
if is_retina:
for i in range(cfg.MODEL.RETINANET.NUM_CONVS):
_convert_conv(f"head.cls_subnet.{2*i}", f"head/cls_subnet{2*i}")
_convert_conv(f"head.bbox_subnet.{2*i}", f"head/bbox_subnet{2*i}")
_convert_conv("head.cls_score", "head/cls_score")
_convert_conv("head.bbox_pred", "head/bbox_pred")
num_anchors = ret["head/bbox_pred/bias"].shape[0] // 4
idxs = get_box_indices(num_anchors)
v = ret["head/bbox_pred/bias"]
ret["head/bbox_pred/bias"] = v[idxs]
v = ret["head/bbox_pred/weights"]
ret["head/bbox_pred/weights"] = v[..., idxs]
elif cfg.MODEL.META_ARCHITECTURE != "SemanticSegmentor":
# RPN:
def _convert_rpn(src, dst):
_convert_conv(src + ".conv", dst + "/share")
_convert_conv(src + ".objectness_logits", dst + "/objectness_logits")
_convert_conv(src + ".anchor_deltas", dst + "/anchor_deltas")
num_anchors = ret[dst + "/objectness_logits/bias"].shape[0]
idxs = get_box_indices(num_anchors)
v = ret[dst + "/anchor_deltas/bias"]
ret[dst + "/anchor_deltas/bias"] = v[idxs]
v = ret[dst + "/anchor_deltas/weights"]
ret[dst + "/anchor_deltas/weights"] = v[..., idxs]
_convert_rpn("proposal_generator.rpn_head", "proposal_generator/rpn_head")
def _convert_box_predictor(src, dst):
num_reg_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
if cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG:
num_reg_classes = 1
idxs = get_box_indices(num_reg_classes)
v = d.pop(src + ".bbox_pred.bias")
ret[dst + "/box_deltas/bias"] = v[idxs]
v = d.pop(src + ".bbox_pred.weight")
ret[dst + "/box_deltas/weights"] = v.transpose()[..., idxs]
_convert_fc(src + ".cls_score", dst + "/class_logits")
# Fast R-CNN: box head
has_cascade = cfg.MODEL.ROI_HEADS.NAME in ["CascadeROIHeads", "CascadeLCCHeads"]
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
fc_in_channels = cfg.MODEL.NECK.OUT_CHANNELS
if not has_fpn:
fc_in_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * 2 ** 3
if cfg.MODEL.ROI_BOX_HEAD.NUM_CONV > 0:
fc_in_channels = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
fc_out_channels = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
if has_cascade:
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
for k in range(3):
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_CONV):
_convert_conv(f"roi_heads.box_head.{k}.conv{i+1}",
f"roi_heads/box_head_stage{k+1}/conv{i+1}")
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.{k}.fc{i+1}",
f"roi_heads/box_head_stage{k+1}/fc{i+1}")
if i == 0:
w = ret[f"roi_heads/box_head_stage{k+1}/fc{i+1}/weights"]
w = w.reshape(
[fc_in_channels, pooler_resolution, pooler_resolution, fc_out_channels]
)
w = w.transpose(1, 2, 0, 3)
w = w.reshape(
[
pooler_resolution * pooler_resolution * fc_in_channels,
fc_out_channels
]
)
ret[f"roi_heads/box_head_stage{k+1}/fc{i+1}/weights"] = w
_convert_box_predictor(f"roi_heads.box_predictor.{k}", f"roi_heads/box_predictor_stage{k+1}")
else:
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_CONV):
_convert_conv(f"roi_heads.box_head.conv{i+1}", f"roi_heads/box_head/conv{i+1}")
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.fc{i+1}", f"roi_heads/box_head/fc{i+1}")
if i == 0:
w = ret[f"roi_heads/box_head/fc{i+1}/weights"]
w = w.reshape(
[fc_in_channels, pooler_resolution, pooler_resolution, fc_out_channels]
)
w = w.transpose(1, 2, 0, 3)
w = w.reshape(
[pooler_resolution * pooler_resolution * fc_in_channels, fc_out_channels]
)
ret[f"roi_heads/box_head/fc{i+1}/weights"] = w
dst = "roi_heads/fastrcnn" if use_res5_in_stage2 else "roi_heads/box_predictor"
_convert_box_predictor("roi_heads.box_predictor", dst)
# mask head
if cfg.MODEL.MASK_ON:
for fcn in range(cfg.MODEL.ROI_MASK_HEAD.NUM_CONV):
_convert_conv(f"roi_heads.mask_head.mask_fcn{fcn+1}",
f"roi_heads/mask_head/mask_fcn{fcn+1}")
_convert_conv("roi_heads.mask_head.deconv", "roi_heads/mask_head/deconv")
_convert_conv("roi_heads.mask_head.predictor", "roi_heads/mask_head/predictor")
# semantic segmentation head
if cfg.MODEL.META_ARCHITECTURE in ["PanopticFPN", "SemanticSegmentor"]:
for i, in_feature in enumerate(cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES):
head_length = max(1, int(i + 2 - np.log2(cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE)))
for k in range(head_length):
_convert_conv(f"sem_seg_head.{in_feature}.{2 * k}",
f"sem_seg_head/{in_feature}_{2 * k}")
_convert_conv(f"sem_seg_head.predictor", f"sem_seg_head/predictor")
for k in list(d.keys()):
if "cell_anchors" in k:
d.pop(k)
assert len(d) == 0, d.keys()
return ret
| [
"numpy.stack",
"numpy.log2",
"numpy.reshape",
"numpy.arange"
] | [((3357, 3416), 'numpy.stack', 'np.stack', (['[idx_ymin, idx_xmin, idx_ymax, idx_xmax]'], {'axis': '(-1)'}), '([idx_ymin, idx_xmin, idx_ymax, idx_xmax], axis=-1)\n', (3365, 3416), True, 'import numpy as np\n'), ((3432, 3471), 'numpy.reshape', 'np.reshape', (['idxs', '[num_reg_classes * 4]'], {}), '(idxs, [num_reg_classes * 4])\n', (3442, 3471), True, 'import numpy as np\n'), ((3215, 3241), 'numpy.arange', 'np.arange', (['num_reg_classes'], {}), '(num_reg_classes)\n', (3224, 3241), True, 'import numpy as np\n'), ((8906, 8951), 'numpy.log2', 'np.log2', (['cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE'], {}), '(cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE)\n', (8913, 8951), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 01:58:58 2019
@author: iqbalsublime
"""
#================================================================================================================
#----------------------------------------------------------------------------------------------------------------
# K NEAREST NEIGHBOURS
#----------------------------------------------------------------------------------------------------------------
#================================================================================================================
# Details of implementation/tutorial is in : http://madhugnadig.com/articles/machine-learning/2017/01/13/implementing-k-nearest-neighbours-from-scratch-in-python.html
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import random
from collections import Counter
from sklearn import preprocessing
import time
#for plotting
plt.style.use('ggplot')
class CustomKNN:
def __init__(self):
self.accurate_predictions = 0
self.total_predictions = 0
self.accuracy = 0.0
def predict(self, training_data, to_predict, k = 3):
if len(training_data) >= k:
print("K cannot be smaller than the total voting groups(ie. number of training data points)")
return
distributions = []
for group in training_data:
for features in training_data[group]:
euclidean_distance = np.linalg.norm(np.array(features)- np.array(to_predict))
distributions.append([euclidean_distance, group])
results = [i[1] for i in sorted(distributions)[:k]]
result = Counter(results).most_common(1)[0][0]
confidence = Counter(results).most_common(1)[0][1]/k
return result, confidence
def test(self, test_set, training_set):
for group in test_set:
for data in test_set[group]:
predicted_class,confidence = self.predict(training_set, data, k =3)
if predicted_class == group:
self.accurate_predictions += 1
else:
print("Wrong classification with confidence " + str(confidence * 100) + " and class " + str(predicted_class))
self.total_predictions += 1
self.accuracy = 100*(self.accurate_predictions/self.total_predictions)
print("\nAcurracy :", str(self.accuracy) + "%")
def mod_data(df):
df.replace('?', -999999, inplace = True)
df.replace('yes', 4, inplace = True)
df.replace('no', 2, inplace = True)
df.replace('notpresent', 4, inplace = True)
df.replace('present', 2, inplace = True)
df.replace('abnormal', 4, inplace = True)
df.replace('normal', 2, inplace = True)
df.replace('poor', 4, inplace = True)
df.replace('good', 2, inplace = True)
df.replace('ckd', 4, inplace = True)
df.replace('notckd', 2, inplace = True)
def main():
df = pd.read_csv("chronic_kidney_disease.csv")
mod_data(df)
dataset = df.astype(float).values.tolist()
#Normalize the data
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df = pd.DataFrame(x_scaled) #Replace df with normalized values
#Shuffle the dataset
random.shuffle(dataset)
#20% of the available data will be used for testing
test_size = 0.2
#The keys of the dict are the classes that the data is classfied into
training_set = {2: [], 4:[]}
test_set = {2: [], 4:[]}
#Split data into training and test for cross validation
training_data = dataset[:-int(test_size * len(dataset))]
test_data = dataset[-int(test_size * len(dataset)):]
#Insert data into the training set
for record in training_data:
training_set[record[-1]].append(record[:-1]) # Append the list in the dict will all the elements of the record except the class
#Insert data into the test set
for record in test_data:
test_set[record[-1]].append(record[:-1]) # Append the list in the dict will all the elements of the record except the class
s = time.clock()
knn = CustomKNN()
knn.test(test_set, training_set)
e = time.clock()
print("Exec Time:" ,e-s)
if __name__ == "__main__":
main() | [
"random.shuffle",
"pandas.read_csv",
"time.clock",
"matplotlib.pyplot.style.use",
"collections.Counter",
"numpy.array",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler"
] | [((993, 1016), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1006, 1016), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2867), 'pandas.read_csv', 'pd.read_csv', (['"""chronic_kidney_disease.csv"""'], {}), "('chronic_kidney_disease.csv')\n", (2837, 2867), True, 'import pandas as pd\n'), ((3011, 3039), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (3037, 3039), False, 'from sklearn import preprocessing\n'), ((3092, 3114), 'pandas.DataFrame', 'pd.DataFrame', (['x_scaled'], {}), '(x_scaled)\n', (3104, 3114), True, 'import pandas as pd\n'), ((3178, 3201), 'random.shuffle', 'random.shuffle', (['dataset'], {}), '(dataset)\n', (3192, 3201), False, 'import random\n'), ((3982, 3994), 'time.clock', 'time.clock', ([], {}), '()\n', (3992, 3994), False, 'import time\n'), ((4056, 4068), 'time.clock', 'time.clock', ([], {}), '()\n', (4066, 4068), False, 'import time\n'), ((1485, 1503), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1493, 1503), True, 'import numpy as np\n'), ((1505, 1525), 'numpy.array', 'np.array', (['to_predict'], {}), '(to_predict)\n', (1513, 1525), True, 'import numpy as np\n'), ((1653, 1669), 'collections.Counter', 'Counter', (['results'], {}), '(results)\n', (1660, 1669), False, 'from collections import Counter\n'), ((1707, 1723), 'collections.Counter', 'Counter', (['results'], {}), '(results)\n', (1714, 1723), False, 'from collections import Counter\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import model_train, model_eval, tf_model_load
from cleverhans.utils import AccuracyReport, set_log_level
class Trainer():
def __init__(self, sess, inference, data_params, train_params):
"""
Saves parameters and
"""
self.session = sess
self.inference = inference
# data params
self.data_params = data_params
# train params
self.train_params = train_params
# instantiate report
self.report = AccuracyReport()
# define placeholders
self.x = tf.placeholder(tf.float32, shape=self.data_params['x_shape'])
self.y = tf.placeholder(tf.float32, shape=self.data_params['y_shape'])
# define random number generator
self.rng = np.random.RandomState([2017, 8, 30])
def train(self, save=False):
"""
Wrapper around cleverhans model_train with pre-setup
"""
model = self.inference
self.preds = model.get_probs(self.x)
model_train(sess=self.session,
x=self.x,
y=self.y,
X_train=self.data_params['X_train'],
Y_train=self.data_params['Y_train'],
predictions=self.preds,
evaluate=self.evaluate,
save=self.train_params['save_model'],
args=self.train_params,
rng=self.rng,
var_list=model.get_params())
def evaluate(self):
"""
Wrapper aroud cleverhans model_eval
"""
eval_params = {'batch_size': self.train_params['batch_size']}
acc = model_eval(
self.session, self.x, self.y, self.preds, self.data_params['X_test'], self.data_params['Y_test'], args=eval_params)
self.report.clean_train_clean_eval = acc
print('Test accuracy on legitimate examples: %0.4f' % acc)
def restore(self, path):
"""
Wrapper around cleverhans tf.model_load
"""
return tf_model_load(self.session, path)
| [
"cleverhans.utils_tf.tf_model_load",
"cleverhans.utils_tf.model_eval",
"tensorflow.placeholder",
"cleverhans.utils.AccuracyReport",
"numpy.random.RandomState"
] | [((704, 720), 'cleverhans.utils.AccuracyReport', 'AccuracyReport', ([], {}), '()\n', (718, 720), False, 'from cleverhans.utils import AccuracyReport, set_log_level\n'), ((768, 829), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': "self.data_params['x_shape']"}), "(tf.float32, shape=self.data_params['x_shape'])\n", (782, 829), True, 'import tensorflow as tf\n'), ((847, 908), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': "self.data_params['y_shape']"}), "(tf.float32, shape=self.data_params['y_shape'])\n", (861, 908), True, 'import tensorflow as tf\n'), ((970, 1006), 'numpy.random.RandomState', 'np.random.RandomState', (['[2017, 8, 30]'], {}), '([2017, 8, 30])\n', (991, 1006), True, 'import numpy as np\n'), ((1867, 1998), 'cleverhans.utils_tf.model_eval', 'model_eval', (['self.session', 'self.x', 'self.y', 'self.preds', "self.data_params['X_test']", "self.data_params['Y_test']"], {'args': 'eval_params'}), "(self.session, self.x, self.y, self.preds, self.data_params[\n 'X_test'], self.data_params['Y_test'], args=eval_params)\n", (1877, 1998), False, 'from cleverhans.utils_tf import model_train, model_eval, tf_model_load\n'), ((2240, 2273), 'cleverhans.utils_tf.tf_model_load', 'tf_model_load', (['self.session', 'path'], {}), '(self.session, path)\n', (2253, 2273), False, 'from cleverhans.utils_tf import model_train, model_eval, tf_model_load\n')] |
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
import json
import operator
import os
import pickle
import subprocess
import sys
import time
from collections import deque
from contextlib import contextmanager
from datetime import datetime
from importlib import reload
from pprint import pformat
import numpy as np
import pandas as pd
import pydash as ps
import regex as re
import torch
import torch.multiprocessing as mp
import ujson
import yaml
from convlab import ROOT_DIR, EVAL_MODES
NUM_CPUS = mp.cpu_count()
FILE_TS_FORMAT = '%Y_%m_%d_%H%M%S'
RE_FILE_TS = re.compile(r'(\d{4}_\d{2}_\d{2}_\d{6})')
class LabJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, (np.ndarray, pd.Series)):
return obj.tolist()
else:
return str(obj)
def batch_get(arr, idxs):
'''Get multi-idxs from an array depending if it's a python list or np.array'''
if isinstance(arr, (list, deque)):
return np.array(operator.itemgetter(*idxs)(arr))
else:
return arr[idxs]
def calc_srs_mean_std(sr_list):
'''Given a list of series, calculate their mean and std'''
cat_df = pd.DataFrame(dict(enumerate(sr_list)))
mean_sr = cat_df.mean(axis=1)
std_sr = cat_df.std(axis=1)
return mean_sr, std_sr
def calc_ts_diff(ts2, ts1):
'''
Calculate the time from tss ts1 to ts2
@param {str} ts2 Later ts in the FILE_TS_FORMAT
@param {str} ts1 Earlier ts in the FILE_TS_FORMAT
@returns {str} delta_t in %H:%M:%S format
@example
ts1 = '2017_10_17_084739'
ts2 = '2017_10_17_084740'
ts_diff = util.calc_ts_diff(ts2, ts1)
# => '0:00:01'
'''
delta_t = datetime.strptime(ts2, FILE_TS_FORMAT) - datetime.strptime(ts1, FILE_TS_FORMAT)
return str(delta_t)
def cast_df(val):
'''missing pydash method to cast value as DataFrame'''
if isinstance(val, pd.DataFrame):
return val
return pd.DataFrame(val)
def cast_list(val):
'''missing pydash method to cast value as list'''
if ps.is_list(val):
return val
else:
return [val]
def clear_periodic_ckpt(prepath):
'''Clear periodic (with -epi) ckpt files in prepath'''
if '-epi' in prepath:
run_cmd(f'rm {prepath}*')
def concat_batches(batches):
'''
Concat batch objects from body.memory.sample() into one batch, when all bodies experience similar envs
Also concat any nested epi sub-batches into flat batch
{k: arr1} + {k: arr2} = {k: arr1 + arr2}
'''
# if is nested, then is episodic
is_episodic = isinstance(batches[0]['dones'][0], (list, np.ndarray))
concat_batch = {}
for k in batches[0]:
datas = []
for batch in batches:
data = batch[k]
if is_episodic: # make into plain batch instead of nested
data = np.concatenate(data)
datas.append(data)
concat_batch[k] = np.concatenate(datas)
return concat_batch
def downcast_float32(df):
'''Downcast any float64 col to float32 to allow safer pandas comparison'''
for col in df.columns:
if df[col].dtype == 'float':
df[col] = df[col].astype('float32')
return df
def epi_done(done):
'''
General method to check if episode is done for both single and vectorized env
Only return True for singleton done since vectorized env does not have a natural episode boundary
'''
return np.isscalar(done) and done
def find_ckpt(prepath):
'''Find the ckpt-lorem-ipsum in a string and return lorem-ipsum'''
if 'ckpt' in prepath:
ckpt_str = ps.find(prepath.split('_'), lambda s: s.startswith('ckpt'))
ckpt = ckpt_str.replace('ckpt-', '')
else:
ckpt = None
return ckpt
def frame_mod(frame, frequency, num_envs):
'''
Generic mod for (frame % frequency == 0) for when num_envs is 1 or more,
since frame will increase multiple ticks for vector env, use the remainder'''
remainder = num_envs or 1
return (frame % frequency < remainder)
def flatten_dict(obj, delim='.'):
'''Missing pydash method to flatten dict'''
nobj = {}
for key, val in obj.items():
if ps.is_dict(val) and not ps.is_empty(val):
strip = flatten_dict(val, delim)
for k, v in strip.items():
nobj[key + delim + k] = v
elif ps.is_list(val) and not ps.is_empty(val) and ps.is_dict(val[0]):
for idx, v in enumerate(val):
nobj[key + delim + str(idx)] = v
if ps.is_object(v):
nobj = flatten_dict(nobj, delim)
else:
nobj[key] = val
return nobj
def get_class_name(obj, lower=False):
'''Get the class name of an object'''
class_name = obj.__class__.__name__
if lower:
class_name = class_name.lower()
return class_name
def get_class_attr(obj):
'''Get the class attr of an object as dict'''
attr_dict = {}
for k, v in obj.__dict__.items():
if hasattr(v, '__dict__') or ps.is_tuple(v):
val = str(v)
else:
val = v
attr_dict[k] = val
return attr_dict
def get_file_ext(data_path):
'''get the `.ext` of file.ext'''
return os.path.splitext(data_path)[-1]
def get_fn_list(a_cls):
'''
Get the callable, non-private functions of a class
@returns {[*str]} A list of strings of fn names
'''
fn_list = ps.filter_(dir(a_cls), lambda fn: not fn.endswith('__') and callable(getattr(a_cls, fn)))
return fn_list
def get_git_sha():
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], close_fds=True, cwd=ROOT_DIR).decode().strip()
def get_lab_mode():
return os.environ.get('lab_mode')
def get_prepath(spec, unit='experiment'):
spec_name = spec['name']
meta_spec = spec['meta']
predir = f'output/{spec_name}_{meta_spec["experiment_ts"]}'
prename = f'{spec_name}'
trial_index = meta_spec['trial']
session_index = meta_spec['session']
t_str = '' if trial_index is None else f'_t{trial_index}'
s_str = '' if session_index is None else f'_s{session_index}'
if unit == 'trial':
prename += t_str
elif unit == 'session':
prename += f'{t_str}{s_str}'
ckpt = meta_spec['ckpt']
if ckpt is not None:
prename += f'_ckpt-{ckpt}'
prepath = f'{predir}/{prename}'
return prepath
def get_ts(pattern=FILE_TS_FORMAT):
'''
Get current ts, defaults to format used for filename
@param {str} pattern To format the ts
@returns {str} ts
@example
util.get_ts()
# => '2017_10_17_084739'
'''
ts_obj = datetime.now()
ts = ts_obj.strftime(pattern)
assert RE_FILE_TS.search(ts)
return ts
def insert_folder(prepath, folder):
'''Insert a folder into prepath'''
split_path = prepath.split('/')
prename = split_path.pop()
split_path += [folder, prename]
return '/'.join(split_path)
def in_eval_lab_modes():
'''Check if lab_mode is one of EVAL_MODES'''
return get_lab_mode() in EVAL_MODES
def is_jupyter():
'''Check if process is in Jupyter kernel'''
try:
get_ipython().config
return True
except NameError:
return False
return False
@contextmanager
def ctx_lab_mode(lab_mode):
'''
Creates context to run method with a specific lab_mode
@example
with util.ctx_lab_mode('eval'):
foo()
@util.ctx_lab_mode('eval')
def foo():
...
'''
prev_lab_mode = os.environ.get('lab_mode')
os.environ['lab_mode'] = lab_mode
yield
if prev_lab_mode is None:
del os.environ['lab_mode']
else:
os.environ['lab_mode'] = prev_lab_mode
def monkey_patch(base_cls, extend_cls):
'''Monkey patch a base class with methods from extend_cls'''
ext_fn_list = get_fn_list(extend_cls)
for fn in ext_fn_list:
setattr(base_cls, fn, getattr(extend_cls, fn))
def parallelize(fn, args, num_cpus=NUM_CPUS):
'''
Parallelize a method fn, args and return results with order preserved per args.
args should be a list of tuples.
@returns {list} results Order preserved output from fn.
'''
pool = mp.Pool(num_cpus, maxtasksperchild=1)
results = pool.starmap(fn, args)
pool.close()
pool.join()
return results
def prepath_split(prepath):
'''
Split prepath into useful names. Works with predir (prename will be None)
prepath: output/dqn_pong_2018_12_02_082510/dqn_pong_t0_s0
predir: output/dqn_pong_2018_12_02_082510
prefolder: dqn_pong_2018_12_02_082510
prename: dqn_pong_t0_s0
spec_name: dqn_pong
experiment_ts: 2018_12_02_082510
ckpt: ckpt-best of dqn_pong_t0_s0_ckpt-best if available
'''
prepath = prepath.strip('_')
tail = prepath.split('output/')[-1]
ckpt = find_ckpt(tail)
if ckpt is not None: # separate ckpt
tail = tail.replace(f'_ckpt-{ckpt}', '')
if '/' in tail: # tail = prefolder/prename
prefolder, prename = tail.split('/', 1)
else:
prefolder, prename = tail, None
predir = f'output/{prefolder}'
spec_name = RE_FILE_TS.sub('', prefolder).strip('_')
experiment_ts = RE_FILE_TS.findall(prefolder)[0]
return predir, prefolder, prename, spec_name, experiment_ts, ckpt
def prepath_to_idxs(prepath):
'''Extract trial index and session index from prepath if available'''
_, _, prename, spec_name, _, _ = prepath_split(prepath)
idxs_tail = prename.replace(spec_name, '').strip('_')
idxs_strs = ps.compact(idxs_tail.split('_')[:2])
if ps.is_empty(idxs_strs):
return None, None
tidx = idxs_strs[0]
assert tidx.startswith('t')
trial_index = int(tidx.strip('t'))
if len(idxs_strs) == 1: # has session
session_index = None
else:
sidx = idxs_strs[1]
assert sidx.startswith('s')
session_index = int(sidx.strip('s'))
return trial_index, session_index
def prepath_to_spec(prepath):
'''
Given a prepath, read the correct spec recover the meta_spec that will return the same prepath for eval lab modes
example: output/a2c_cartpole_2018_06_13_220436/a2c_cartpole_t0_s0
'''
predir, _, prename, _, experiment_ts, ckpt = prepath_split(prepath)
sidx_res = re.search('_s\d+', prename)
if sidx_res: # replace the _s0 if any
prename = prename.replace(sidx_res[0], '')
spec_path = f'{predir}/{prename}_spec.json'
# read the spec of prepath
spec = read(spec_path)
# recover meta_spec
trial_index, session_index = prepath_to_idxs(prepath)
meta_spec = spec['meta']
meta_spec['experiment_ts'] = experiment_ts
meta_spec['ckpt'] = ckpt
meta_spec['experiment'] = 0
meta_spec['trial'] = trial_index
meta_spec['session'] = session_index
check_prepath = get_prepath(spec, unit='session')
assert check_prepath in prepath, f'{check_prepath}, {prepath}'
return spec
def read(data_path, **kwargs):
'''
Universal data reading method with smart data parsing
- {.csv} to DataFrame
- {.json} to dict, list
- {.yml} to dict
- {*} to str
@param {str} data_path The data path to read from
@returns {data} The read data in sensible format
@example
data_df = util.read('test/fixture/lib/util/test_df.csv')
# => <DataFrame>
data_dict = util.read('test/fixture/lib/util/test_dict.json')
data_dict = util.read('test/fixture/lib/util/test_dict.yml')
# => <dict>
data_list = util.read('test/fixture/lib/util/test_list.json')
# => <list>
data_str = util.read('test/fixture/lib/util/test_str.txt')
# => <str>
'''
data_path = smart_path(data_path)
try:
assert os.path.isfile(data_path)
except AssertionError:
raise FileNotFoundError(data_path)
ext = get_file_ext(data_path)
if ext == '.csv':
data = read_as_df(data_path, **kwargs)
elif ext == '.pkl':
data = read_as_pickle(data_path, **kwargs)
else:
data = read_as_plain(data_path, **kwargs)
return data
def read_as_df(data_path, **kwargs):
'''Submethod to read data as DataFrame'''
ext = get_file_ext(data_path)
data = pd.read_csv(data_path, **kwargs)
return data
def read_as_pickle(data_path, **kwargs):
'''Submethod to read data as pickle'''
with open(data_path, 'rb') as f:
data = pickle.load(f)
return data
def read_as_plain(data_path, **kwargs):
'''Submethod to read data as plain type'''
open_file = open(data_path, 'r')
ext = get_file_ext(data_path)
if ext == '.json':
data = ujson.load(open_file, **kwargs)
elif ext == '.yml':
data = yaml.load(open_file, **kwargs)
else:
data = open_file.read()
open_file.close()
return data
def run_cmd(cmd):
'''Run shell command'''
print(f'+ {cmd}')
proc = subprocess.Popen(cmd, cwd=ROOT_DIR, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
return proc
def run_cmd_wait(proc):
'''Wait on a running process created by util.run_cmd and print its stdout'''
for line in proc.stdout:
print(line.decode(), end='')
output = proc.communicate()[0]
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.args, proc.returncode, output)
else:
return output
def self_desc(cls):
'''Method to get self description, used at init.'''
desc_list = [f'{get_class_name(cls)}:']
for k, v in get_class_attr(cls).items():
if k == 'spec':
desc_v = v['name']
elif ps.is_dict(v) or ps.is_dict(ps.head(v)):
desc_v = pformat(v)
else:
desc_v = v
desc_list.append(f'- {k} = {desc_v}')
desc = '\n'.join(desc_list)
return desc
def set_attr(obj, attr_dict, keys=None):
'''Set attribute of an object from a dict'''
if keys is not None:
attr_dict = ps.pick(attr_dict, keys)
for attr, val in attr_dict.items():
setattr(obj, attr, val)
return obj
def set_cuda_id(spec):
'''Use trial and session id to hash and modulo cuda device count for a cuda_id to maximize device usage. Sets the net_spec for the base Net class to pick up.'''
# Don't trigger any cuda call if not using GPU. Otherwise will break multiprocessing on machines with CUDA.
# see issues https://github.com/pytorch/pytorch/issues/334 https://github.com/pytorch/pytorch/issues/3491 https://github.com/pytorch/pytorch/issues/9996
for agent_spec in spec['agent']:
if 'net' not in agent_spec or not agent_spec['net'].get('gpu'):
return
meta_spec = spec['meta']
trial_idx = meta_spec['trial'] or 0
session_idx = meta_spec['session'] or 0
if meta_spec['distributed'] == 'shared': # shared hogwild uses only global networks, offset them to idx 0
session_idx = 0
job_idx = trial_idx * meta_spec['max_session'] + session_idx
job_idx += meta_spec['cuda_offset']
device_count = torch.cuda.device_count()
cuda_id = None if not device_count else job_idx % device_count
for agent_spec in spec['agent']:
agent_spec['net']['cuda_id'] = cuda_id
def set_logger(spec, logger, unit=None):
'''Set the logger for a lab unit give its spec'''
os.environ['LOG_PREPATH'] = insert_folder(get_prepath(spec, unit=unit), 'log')
reload(logger) # to set session-specific logger
def set_random_seed(spec):
'''Generate and set random seed for relevant modules, and record it in spec.meta.random_seed'''
torch.set_num_threads(1) # prevent multithread slowdown, set again for hogwild
trial = spec['meta']['trial']
session = spec['meta']['session']
random_seed = int(1e5 * (trial or 0) + 1e3 * (session or 0) + time.time())
torch.cuda.manual_seed_all(random_seed)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
spec['meta']['random_seed'] = random_seed
return random_seed
def _sizeof(obj, seen=None):
'''Recursively finds size of objects'''
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([_sizeof(v, seen) for v in obj.values()])
size += sum([_sizeof(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += _sizeof(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([_sizeof(i, seen) for i in obj])
return size
def sizeof(obj, divisor=1e6):
'''Return the size of object, in MB by default'''
return _sizeof(obj) / divisor
def smart_path(data_path, as_dir=False):
'''
Resolve data_path into abspath with fallback to join from ROOT_DIR
@param {str} data_path The input data path to resolve
@param {bool} as_dir Whether to return as dirname
@returns {str} The normalized absolute data_path
@example
util.smart_path('convlab/lib')
# => '/Users/ANON/Documents/convlab/convlab/lib'
util.smart_path('/tmp')
# => '/tmp'
'''
if not os.path.isabs(data_path):
abs_path = os.path.abspath(data_path)
if os.path.exists(abs_path):
data_path = abs_path
else:
data_path = os.path.join(ROOT_DIR, data_path)
if as_dir:
data_path = os.path.dirname(data_path)
return os.path.normpath(data_path)
def split_minibatch(batch, mb_size):
'''Split a batch into minibatches of mb_size or smaller, without replacement'''
size = len(batch['rewards'])
assert mb_size < size, f'Minibatch size {mb_size} must be < batch size {size}'
idxs = np.arange(size)
np.random.shuffle(idxs)
chunks = int(size / mb_size)
nested_idxs = np.array_split(idxs, chunks)
mini_batches = []
for minibatch_idxs in nested_idxs:
minibatch = {k: v[minibatch_idxs] for k, v in batch.items()}
mini_batches.append(minibatch)
return mini_batches
def to_json(d, indent=2):
'''Shorthand method for stringify JSON with indent'''
return json.dumps(d, indent=indent, cls=LabJsonEncoder)
def to_render():
return get_lab_mode() in ('dev', 'enjoy') and os.environ.get('RENDER', 'true') == 'true'
def to_torch_batch(batch, device, is_episodic):
'''Mutate a batch (dict) to make its values from numpy into PyTorch tensor'''
for k in batch:
if is_episodic: # for episodic format
batch[k] = np.concatenate(batch[k])
elif ps.is_list(batch[k]):
batch[k] = np.array(batch[k])
batch[k] = torch.from_numpy(batch[k].astype(np.float32)).to(device)
return batch
def write(data, data_path):
'''
Universal data writing method with smart data parsing
- {.csv} from DataFrame
- {.json} from dict, list
- {.yml} from dict
- {*} from str(*)
@param {*} data The data to write
@param {str} data_path The data path to write to
@returns {data_path} The data path written to
@example
data_path = util.write(data_df, 'test/fixture/lib/util/test_df.csv')
data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.json')
data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.yml')
data_path = util.write(data_list, 'test/fixture/lib/util/test_list.json')
data_path = util.write(data_str, 'test/fixture/lib/util/test_str.txt')
'''
data_path = smart_path(data_path)
data_dir = os.path.dirname(data_path)
os.makedirs(data_dir, exist_ok=True)
ext = get_file_ext(data_path)
if ext == '.csv':
write_as_df(data, data_path)
elif ext == '.pkl':
write_as_pickle(data, data_path)
else:
write_as_plain(data, data_path)
return data_path
def write_as_df(data, data_path):
'''Submethod to write data as DataFrame'''
df = cast_df(data)
ext = get_file_ext(data_path)
df.to_csv(data_path, index=False)
return data_path
def write_as_pickle(data, data_path):
'''Submethod to write data as pickle'''
with open(data_path, 'wb') as f:
pickle.dump(data, f)
return data_path
def write_as_plain(data, data_path):
'''Submethod to write data as plain type'''
open_file = open(data_path, 'w')
ext = get_file_ext(data_path)
if ext == '.json':
json.dump(data, open_file, indent=2, cls=LabJsonEncoder)
elif ext == '.yml':
yaml.dump(data, open_file)
else:
open_file.write(str(data))
open_file.close()
return data_path
| [
"pandas.read_csv",
"torch.cuda.device_count",
"yaml.load",
"numpy.array_split",
"numpy.array",
"pydash.is_empty",
"operator.itemgetter",
"torch.multiprocessing.cpu_count",
"numpy.arange",
"ujson.load",
"regex.search",
"os.path.exists",
"pydash.is_dict",
"numpy.isscalar",
"subprocess.Pope... | [((525, 539), 'torch.multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (537, 539), True, 'import torch.multiprocessing as mp\n'), ((588, 631), 'regex.compile', 're.compile', (['"""(\\\\d{4}_\\\\d{2}_\\\\d{2}_\\\\d{6})"""'], {}), "('(\\\\d{4}_\\\\d{2}_\\\\d{2}_\\\\d{6})')\n", (598, 631), True, 'import regex as re\n'), ((2094, 2111), 'pandas.DataFrame', 'pd.DataFrame', (['val'], {}), '(val)\n', (2106, 2111), True, 'import pandas as pd\n'), ((2195, 2210), 'pydash.is_list', 'ps.is_list', (['val'], {}), '(val)\n', (2205, 2210), True, 'import pydash as ps\n'), ((5865, 5891), 'os.environ.get', 'os.environ.get', (['"""lab_mode"""'], {}), "('lab_mode')\n", (5879, 5891), False, 'import os\n'), ((6800, 6814), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6812, 6814), False, 'from datetime import datetime\n'), ((7673, 7699), 'os.environ.get', 'os.environ.get', (['"""lab_mode"""'], {}), "('lab_mode')\n", (7687, 7699), False, 'import os\n'), ((8357, 8394), 'torch.multiprocessing.Pool', 'mp.Pool', (['num_cpus'], {'maxtasksperchild': '(1)'}), '(num_cpus, maxtasksperchild=1)\n', (8364, 8394), True, 'import torch.multiprocessing as mp\n'), ((9744, 9766), 'pydash.is_empty', 'ps.is_empty', (['idxs_strs'], {}), '(idxs_strs)\n', (9755, 9766), True, 'import pydash as ps\n'), ((10441, 10469), 'regex.search', 're.search', (['"""_s\\\\d+"""', 'prename'], {}), "('_s\\\\d+', prename)\n", (10450, 10469), True, 'import regex as re\n'), ((12357, 12389), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path, **kwargs)\n', (12368, 12389), True, 'import pandas as pd\n'), ((13036, 13153), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'ROOT_DIR', 'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'close_fds': '(True)'}), '(cmd, cwd=ROOT_DIR, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, close_fds=True)\n', (13052, 13153), False, 'import subprocess\n'), ((15162, 15187), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15185, 15187), False, 'import torch\n'), ((15524, 15538), 'importlib.reload', 'reload', (['logger'], {}), '(logger)\n', (15530, 15538), False, 'from importlib import reload\n'), ((15706, 15730), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (15727, 15730), False, 'import torch\n'), ((15941, 15980), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['random_seed'], {}), '(random_seed)\n', (15967, 15980), False, 'import torch\n'), ((15985, 16015), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (16002, 16015), False, 'import torch\n'), ((16020, 16047), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (16034, 16047), True, 'import numpy as np\n'), ((16203, 16221), 'sys.getsizeof', 'sys.getsizeof', (['obj'], {}), '(obj)\n', (16216, 16221), False, 'import sys\n'), ((17699, 17726), 'os.path.normpath', 'os.path.normpath', (['data_path'], {}), '(data_path)\n', (17715, 17726), False, 'import os\n'), ((17977, 17992), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (17986, 17992), True, 'import numpy as np\n'), ((17997, 18020), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (18014, 18020), True, 'import numpy as np\n'), ((18072, 18100), 'numpy.array_split', 'np.array_split', (['idxs', 'chunks'], {}), '(idxs, chunks)\n', (18086, 18100), True, 'import numpy as np\n'), ((18391, 18439), 'json.dumps', 'json.dumps', (['d'], {'indent': 'indent', 'cls': 'LabJsonEncoder'}), '(d, indent=indent, cls=LabJsonEncoder)\n', (18401, 18439), False, 'import json\n'), ((19768, 19794), 'os.path.dirname', 'os.path.dirname', (['data_path'], {}), '(data_path)\n', (19783, 19794), False, 'import os\n'), ((19799, 19835), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (19810, 19835), False, 'import os\n'), ((1843, 1881), 'datetime.datetime.strptime', 'datetime.strptime', (['ts2', 'FILE_TS_FORMAT'], {}), '(ts2, FILE_TS_FORMAT)\n', (1860, 1881), False, 'from datetime import datetime\n'), ((1884, 1922), 'datetime.datetime.strptime', 'datetime.strptime', (['ts1', 'FILE_TS_FORMAT'], {}), '(ts1, FILE_TS_FORMAT)\n', (1901, 1922), False, 'from datetime import datetime\n'), ((3081, 3102), 'numpy.concatenate', 'np.concatenate', (['datas'], {}), '(datas)\n', (3095, 3102), True, 'import numpy as np\n'), ((3593, 3610), 'numpy.isscalar', 'np.isscalar', (['done'], {}), '(done)\n', (3604, 3610), True, 'import numpy as np\n'), ((5395, 5422), 'os.path.splitext', 'os.path.splitext', (['data_path'], {}), '(data_path)\n', (5411, 5422), False, 'import os\n'), ((11877, 11902), 'os.path.isfile', 'os.path.isfile', (['data_path'], {}), '(data_path)\n', (11891, 11902), False, 'import os\n'), ((12544, 12558), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12555, 12558), False, 'import pickle\n'), ((12773, 12804), 'ujson.load', 'ujson.load', (['open_file'], {}), '(open_file, **kwargs)\n', (12783, 12804), False, 'import ujson\n'), ((13417, 13482), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['proc.args', 'proc.returncode', 'output'], {}), '(proc.args, proc.returncode, output)\n', (13446, 13482), False, 'import subprocess\n'), ((14091, 14115), 'pydash.pick', 'ps.pick', (['attr_dict', 'keys'], {}), '(attr_dict, keys)\n', (14098, 14115), True, 'import pydash as ps\n'), ((17412, 17436), 'os.path.isabs', 'os.path.isabs', (['data_path'], {}), '(data_path)\n', (17425, 17436), False, 'import os\n'), ((17457, 17483), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (17472, 17483), False, 'import os\n'), ((17495, 17519), 'os.path.exists', 'os.path.exists', (['abs_path'], {}), '(abs_path)\n', (17509, 17519), False, 'import os\n'), ((17661, 17687), 'os.path.dirname', 'os.path.dirname', (['data_path'], {}), '(data_path)\n', (17676, 17687), False, 'import os\n'), ((20393, 20413), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (20404, 20413), False, 'import pickle\n'), ((20624, 20680), 'json.dump', 'json.dump', (['data', 'open_file'], {'indent': '(2)', 'cls': 'LabJsonEncoder'}), '(data, open_file, indent=2, cls=LabJsonEncoder)\n', (20633, 20680), False, 'import json\n'), ((4340, 4355), 'pydash.is_dict', 'ps.is_dict', (['val'], {}), '(val)\n', (4350, 4355), True, 'import pydash as ps\n'), ((5193, 5207), 'pydash.is_tuple', 'ps.is_tuple', (['v'], {}), '(v)\n', (5204, 5207), True, 'import pydash as ps\n'), ((12844, 12874), 'yaml.load', 'yaml.load', (['open_file'], {}), '(open_file, **kwargs)\n', (12853, 12874), False, 'import yaml\n'), ((15924, 15935), 'time.time', 'time.time', ([], {}), '()\n', (15933, 15935), False, 'import time\n'), ((17592, 17625), 'os.path.join', 'os.path.join', (['ROOT_DIR', 'data_path'], {}), '(ROOT_DIR, data_path)\n', (17604, 17625), False, 'import os\n'), ((18509, 18541), 'os.environ.get', 'os.environ.get', (['"""RENDER"""', '"""true"""'], {}), "('RENDER', 'true')\n", (18523, 18541), False, 'import os\n'), ((18774, 18798), 'numpy.concatenate', 'np.concatenate', (['batch[k]'], {}), '(batch[k])\n', (18788, 18798), True, 'import numpy as np\n'), ((18812, 18832), 'pydash.is_list', 'ps.is_list', (['batch[k]'], {}), '(batch[k])\n', (18822, 18832), True, 'import pydash as ps\n'), ((20713, 20739), 'yaml.dump', 'yaml.dump', (['data', 'open_file'], {}), '(data, open_file)\n', (20722, 20739), False, 'import yaml\n'), ((1143, 1169), 'operator.itemgetter', 'operator.itemgetter', (['*idxs'], {}), '(*idxs)\n', (1162, 1169), False, 'import operator\n'), ((3003, 3023), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (3017, 3023), True, 'import numpy as np\n'), ((4364, 4380), 'pydash.is_empty', 'ps.is_empty', (['val'], {}), '(val)\n', (4375, 4380), True, 'import pydash as ps\n'), ((4521, 4536), 'pydash.is_list', 'ps.is_list', (['val'], {}), '(val)\n', (4531, 4536), True, 'import pydash as ps\n'), ((4566, 4584), 'pydash.is_dict', 'ps.is_dict', (['val[0]'], {}), '(val[0])\n', (4576, 4584), True, 'import pydash as ps\n'), ((13750, 13763), 'pydash.is_dict', 'ps.is_dict', (['v'], {}), '(v)\n', (13760, 13763), True, 'import pydash as ps\n'), ((13812, 13822), 'pprint.pformat', 'pformat', (['v'], {}), '(v)\n', (13819, 13822), False, 'from pprint import pformat\n'), ((18857, 18875), 'numpy.array', 'np.array', (['batch[k]'], {}), '(batch[k])\n', (18865, 18875), True, 'import numpy as np\n'), ((4545, 4561), 'pydash.is_empty', 'ps.is_empty', (['val'], {}), '(val)\n', (4556, 4561), True, 'import pydash as ps\n'), ((4696, 4711), 'pydash.is_object', 'ps.is_object', (['v'], {}), '(v)\n', (4708, 4711), True, 'import pydash as ps\n'), ((5731, 5819), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {'close_fds': '(True)', 'cwd': 'ROOT_DIR'}), "(['git', 'rev-parse', 'HEAD'], close_fds=True, cwd=\n ROOT_DIR)\n", (5754, 5819), False, 'import subprocess\n'), ((13778, 13788), 'pydash.head', 'ps.head', (['v'], {}), '(v)\n', (13785, 13788), True, 'import pydash as ps\n')] |
# Copyright 2018 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
# This example demonstrates how to leverage Spark for parallel inferencing from a SavedModel.
#
# Normally, you can use TensorFlowOnSpark to just form a TensorFlow cluster for training and inferencing.
# However, in some situations, you may have a SavedModel without the original code for defining the inferencing
# graph. In these situations, we can use Spark to instantiate a single-node TensorFlow instance on each executor,
# where each executor can independently load the model and inference on input data.
#
# Note: this particular example demonstrates use of `tf.data.Dataset` to read the input data for inferencing,
# but it could also be adapted to just use an RDD of TFRecords from Spark.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
def inference(args, ctx):
# load saved_model
saved_model = tf.saved_model.load(args.export_dir, tags='serve')
predict = saved_model.signatures['serving_default']
# parse function for TFRecords
def parse_tfr(example_proto):
feature_def = {"label": tf.io.FixedLenFeature(1, tf.int64),
"image": tf.io.FixedLenFeature(784, tf.int64)}
features = tf.io.parse_single_example(serialized=example_proto, features=feature_def)
image = tf.cast(features['image'], dtype=tf.float32) / 255.0
image = tf.reshape(image, [28, 28, 1])
label = tf.cast(features['label'], dtype=tf.float32)
return (image, label)
# define a new tf.data.Dataset (for inferencing)
ds = tf.data.Dataset.list_files("{}/part-*".format(args.images_labels), shuffle=False)
ds = ds.shard(ctx.num_workers, ctx.worker_num)
ds = ds.interleave(tf.data.TFRecordDataset)
ds = ds.map(parse_tfr)
ds = ds.batch(10)
# create an output file per spark worker for the predictions
tf.io.gfile.makedirs(args.output)
output_file = tf.io.gfile.GFile("{}/part-{:05d}".format(args.output, ctx.worker_num), mode='w')
for batch in ds:
predictions = predict(conv2d_input=batch[0])
labels = np.reshape(batch[1], -1).astype(np.int)
preds = np.argmax(predictions['dense_1'], axis=1)
for x in zip(labels, preds):
output_file.write("{} {}\n".format(x[0], x[1]))
output_file.close()
if __name__ == '__main__':
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from tensorflowonspark import TFParallel
sc = SparkContext(conf=SparkConf().setAppName("mnist_inference"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--cluster_size", help="number of nodes in the cluster (for S with labelspark Standalone)", type=int, default=num_executors)
parser.add_argument('--images_labels', type=str, help='Directory for input images with labels')
parser.add_argument("--export_dir", help="HDFS path to export model", type=str, default="mnist_export")
parser.add_argument("--output", help="HDFS path to save predictions", type=str, default="predictions")
args, _ = parser.parse_known_args()
print("args: {}".format(args))
# Running single-node TF instances on each executor
TFParallel.run(sc, inference, args, args.cluster_size)
| [
"tensorflow.saved_model.load",
"numpy.reshape",
"argparse.ArgumentParser",
"tensorflow.io.parse_single_example",
"numpy.argmax",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.FixedLenFeature",
"pyspark.conf.SparkConf",
"tensorflow.reshape",
"tensorflowonspark.TFParallel.run",
"tensorflow.cast"
... | [((1076, 1126), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['args.export_dir'], {'tags': '"""serve"""'}), "(args.export_dir, tags='serve')\n", (1095, 1126), True, 'import tensorflow as tf\n'), ((2005, 2038), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['args.output'], {}), '(args.output)\n', (2025, 2038), True, 'import tensorflow as tf\n'), ((2776, 2801), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2799, 2801), False, 'import argparse\n'), ((3386, 3440), 'tensorflowonspark.TFParallel.run', 'TFParallel.run', (['sc', 'inference', 'args', 'args.cluster_size'], {}), '(sc, inference, args, args.cluster_size)\n', (3400, 3440), False, 'from tensorflowonspark import TFParallel\n'), ((1392, 1466), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', ([], {'serialized': 'example_proto', 'features': 'feature_def'}), '(serialized=example_proto, features=feature_def)\n', (1418, 1466), True, 'import tensorflow as tf\n'), ((1544, 1574), 'tensorflow.reshape', 'tf.reshape', (['image', '[28, 28, 1]'], {}), '(image, [28, 28, 1])\n', (1554, 1574), True, 'import tensorflow as tf\n'), ((1587, 1631), 'tensorflow.cast', 'tf.cast', (["features['label']"], {'dtype': 'tf.float32'}), "(features['label'], dtype=tf.float32)\n", (1594, 1631), True, 'import tensorflow as tf\n'), ((2271, 2312), 'numpy.argmax', 'np.argmax', (["predictions['dense_1']"], {'axis': '(1)'}), "(predictions['dense_1'], axis=1)\n", (2280, 2312), True, 'import numpy as np\n'), ((1275, 1309), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['(1)', 'tf.int64'], {}), '(1, tf.int64)\n', (1296, 1309), True, 'import tensorflow as tf\n'), ((1339, 1375), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['(784)', 'tf.int64'], {}), '(784, tf.int64)\n', (1360, 1375), True, 'import tensorflow as tf\n'), ((1479, 1523), 'tensorflow.cast', 'tf.cast', (["features['image']"], {'dtype': 'tf.float32'}), "(features['image'], dtype=tf.float32)\n", (1486, 1523), True, 'import tensorflow as tf\n'), ((2219, 2243), 'numpy.reshape', 'np.reshape', (['batch[1]', '(-1)'], {}), '(batch[1], -1)\n', (2229, 2243), True, 'import numpy as np\n'), ((2601, 2612), 'pyspark.conf.SparkConf', 'SparkConf', ([], {}), '()\n', (2610, 2612), False, 'from pyspark.conf import SparkConf\n')] |
# coding: utf-8
# pylint: disable=invalid-name, no-member, too-many-arguments
""" wrapper function of distmesh for EIT """
# Copyright (c) <NAME>. All rights reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, absolute_import, print_function
import numpy as np
from .distmesh import build
from .mesh_circle import MeshCircle
from .utils import check_order
from .shape import circle, area_uniform, ball, thorax, L_shaped
from .shape import fix_points_fd, fix_points_ball
def create(n_el=16, fd=None, fh=area_uniform, h0=0.1, p_fix=None, bbox=None):
"""
Generating 2D/3D meshes using distmesh (pyEIT built-in)
Parameters
----------
n_el: int
number of electrodes (point-type electrode)
fd: function
distance function (circle in 2D, ball in 3D)
fh: function
mesh size quality control function
p_fix: NDArray
fixed points
bbox: NDArray
bounding box
h0: float
initial mesh size, default=0.1
Returns
-------
mesh_obj: dict
{'element', 'node', 'perm'}
"""
# test conditions if fd or/and bbox are none
if bbox is None:
if fd != ball:
bbox = np.array([[-1, -1], [1, 1]])
else:
bbox = [[-1.2, -1.2, -1.2], [1.2, 1.2, 1.2]]
bbox = np.array(
bbox
) # list is converted to Numpy array so we can use it then (calling shape method..)
n_dim = bbox.shape[1] # bring dimension
# infer dim
if fd is None:
if n_dim == 2:
fd = circle
elif n_dim == 3:
fd = ball
if n_dim not in [2, 3]:
raise TypeError("distmesh only supports 2D or 3D")
if bbox.shape[0] != 2:
raise TypeError("please specify lower and upper bound of bbox")
if p_fix is None:
if n_dim == 2:
if fd == thorax:
# thorax shape is generated so far without fixed points (to be updated later)
p_fix = [
(-0.098, -0.6463),
(-0.4181, -0.6074),
(-0.7207, -0.4946),
(-0.933, -0.2647),
(-0.9147, 0.0543),
(-0.8022, 0.3565),
(-0.5791, 0.5864),
(-0.1653, 0.6819),
(0.1564, 0.6571),
(0.5814, 0.6353),
(0.8298, 0.433),
(0.9698, 0.1431),
(0.9914, -0.1767),
(0.8359, -0.449),
(0.5419, -0.5833),
(0.2243, -0.6456),
]
p_fix = np.array(p_fix)
elif fd == L_shaped:
p_fix = [
[1, 0],
[1, -1],
[0, -1],
[-1, -1],
[-1, 0],
[-1, 1],
[0, 1],
[0, 0],
] # values brought from distmesh2D L shaped mesh example
p_fix = np.array(p_fix)
h0 = 0.15
else:
p_fix = fix_points_fd(fd, n_el=n_el)
elif n_dim == 3:
p_fix = fix_points_ball(n_el=n_el)
# 1. build mesh
p, t = build(fd, fh, pfix=p_fix, bbox=bbox, h0=h0)
# 2. check whether t is counter-clock-wise, otherwise reshape it
t = check_order(p, t)
# 3. generate electrodes, the same as p_fix (top n_el)
el_pos = np.arange(n_el)
# 4. init uniform element permittivity (sigma)
perm = np.ones(t.shape[0], dtype=np.float)
# 5. build output structure
mesh = {"element": t, "node": p, "perm": perm}
return mesh, el_pos
def set_perm(mesh, anomaly=None, background=None):
"""wrapper for pyEIT interface
Note
----
update permittivity of mesh, if specified.
Parameters
----------
mesh: dict
mesh structure
anomaly: dict, optional
anomaly is a dictionary (or arrays of dictionary) contains,
{'x': val, 'y': val, 'd': val, 'perm': val}
all permittivity on triangles whose distance to (x,y) are less than (d)
will be replaced with a new value, 'perm' may be a complex value.
background: float, optional
set background permittivity
Returns
-------
mesh_obj: dict
updated mesh structure, {'element', 'node', 'perm'}
"""
pts = mesh["element"]
tri = mesh["node"]
perm = mesh["perm"].copy()
tri_centers = np.mean(tri[pts], axis=1)
# this code is equivalent to:
# >>> N = np.shape(tri)[0]
# >>> for i in range(N):
# >>> tri_centers[i] = np.mean(pts[tri[i]], axis=0)
# >>> plt.plot(tri_centers[:,0], tri_centers[:,1], 'kx')
n = np.size(mesh["perm"])
# reset background if needed
if background is not None:
perm = background * np.ones(n)
# change dtype to 'complex' for complex-valued permittivity
if anomaly is not None:
for attr in anomaly:
if np.iscomplex(attr["perm"]):
perm = perm.astype("complex")
break
# assign anomaly values (for elements in regions)
if anomaly is not None:
for _, attr in enumerate(anomaly):
d = attr["d"]
# find elements whose distance to (cx,cy) is smaller than d
if "z" in attr:
index = (
np.sqrt(
(tri_centers[:, 0] - attr["x"]) ** 2
+ (tri_centers[:, 1] - attr["y"]) ** 2
+ (tri_centers[:, 2] - attr["z"]) ** 2
)
< d
)
else:
index = (
np.sqrt(
(tri_centers[:, 0] - attr["x"]) ** 2
+ (tri_centers[:, 1] - attr["y"]) ** 2
)
< d
)
# update permittivity within indices
perm[index] = attr["perm"]
mesh_new = {"node": tri, "element": pts, "perm": perm}
return mesh_new
def layer_circle(n_el=16, n_fan=8, n_layer=8):
"""generate mesh on unit-circle"""
model = MeshCircle(n_fan=n_fan, n_layer=n_layer, n_el=n_el)
p, e, el_pos = model.create()
perm = np.ones(e.shape[0])
mesh = {"element": e, "node": p, "perm": perm}
return mesh, el_pos
| [
"numpy.mean",
"numpy.iscomplex",
"numpy.sqrt",
"numpy.ones",
"numpy.size",
"numpy.array",
"numpy.arange"
] | [((1358, 1372), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (1366, 1372), True, 'import numpy as np\n'), ((3520, 3535), 'numpy.arange', 'np.arange', (['n_el'], {}), '(n_el)\n', (3529, 3535), True, 'import numpy as np\n'), ((3598, 3633), 'numpy.ones', 'np.ones', (['t.shape[0]'], {'dtype': 'np.float'}), '(t.shape[0], dtype=np.float)\n', (3605, 3633), True, 'import numpy as np\n'), ((4544, 4569), 'numpy.mean', 'np.mean', (['tri[pts]'], {'axis': '(1)'}), '(tri[pts], axis=1)\n', (4551, 4569), True, 'import numpy as np\n'), ((4794, 4815), 'numpy.size', 'np.size', (["mesh['perm']"], {}), "(mesh['perm'])\n", (4801, 4815), True, 'import numpy as np\n'), ((6337, 6356), 'numpy.ones', 'np.ones', (['e.shape[0]'], {}), '(e.shape[0])\n', (6344, 6356), True, 'import numpy as np\n'), ((1246, 1274), 'numpy.array', 'np.array', (['[[-1, -1], [1, 1]]'], {}), '([[-1, -1], [1, 1]])\n', (1254, 1274), True, 'import numpy as np\n'), ((4909, 4919), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4916, 4919), True, 'import numpy as np\n'), ((5057, 5083), 'numpy.iscomplex', 'np.iscomplex', (["attr['perm']"], {}), "(attr['perm'])\n", (5069, 5083), True, 'import numpy as np\n'), ((2689, 2704), 'numpy.array', 'np.array', (['p_fix'], {}), '(p_fix)\n', (2697, 2704), True, 'import numpy as np\n'), ((3092, 3107), 'numpy.array', 'np.array', (['p_fix'], {}), '(p_fix)\n', (3100, 3107), True, 'import numpy as np\n'), ((5451, 5579), 'numpy.sqrt', 'np.sqrt', (["((tri_centers[:, 0] - attr['x']) ** 2 + (tri_centers[:, 1] - attr['y']) ** \n 2 + (tri_centers[:, 2] - attr['z']) ** 2)"], {}), "((tri_centers[:, 0] - attr['x']) ** 2 + (tri_centers[:, 1] - attr[\n 'y']) ** 2 + (tri_centers[:, 2] - attr['z']) ** 2)\n", (5458, 5579), True, 'import numpy as np\n'), ((5775, 5864), 'numpy.sqrt', 'np.sqrt', (["((tri_centers[:, 0] - attr['x']) ** 2 + (tri_centers[:, 1] - attr['y']) ** 2)"], {}), "((tri_centers[:, 0] - attr['x']) ** 2 + (tri_centers[:, 1] - attr[\n 'y']) ** 2)\n", (5782, 5864), True, 'import numpy as np\n')] |
import numpy as np
import mxnet as mx
from collections import namedtuple
def get_network_fc(network_path, network_epoch, normalize_inputs):
batch_def = namedtuple('Batch', ['data'])
sym, arg_params, aux_params = mx.model.load_checkpoint(network_path, network_epoch)
network = mx.mod.Module(symbol=sym.get_internals()['flatten0_output'],
label_names=None,
context=mx.gpu())
network.bind(for_training=False,
data_shapes=[("data", (6, 3, 224, 224))])
network.set_params(arg_params, aux_params)
def fc(image):
image = image.astype(np.float32)
if normalize_inputs: # true for resnext101
image = image - np.array([[[[123.68, 116.779, 103.939]]]], dtype=np.float32)
image = np.transpose(image, [0, 3, 1, 2])
inputs = batch_def([mx.nd.array(image)])
network.forward(inputs)
return network.get_outputs()[0].asnumpy()
return fc
resnext = get_network_fc("//diplomova_praca_lib/resnet/resnext-101-1", 40, True)
resnet = get_network_fc("//diplomova_praca_lib/resnet/resnet-152", 0, False)
image = np.zeros([6, 224, 224, 3], dtype=np.uint8)
features_for_image = np.concatenate([resnext(image), resnet(image)], 1)
print(features_for_image.shape)
| [
"collections.namedtuple",
"numpy.array",
"numpy.zeros",
"mxnet.gpu",
"mxnet.nd.array",
"mxnet.model.load_checkpoint",
"numpy.transpose"
] | [((1153, 1195), 'numpy.zeros', 'np.zeros', (['[6, 224, 224, 3]'], {'dtype': 'np.uint8'}), '([6, 224, 224, 3], dtype=np.uint8)\n', (1161, 1195), True, 'import numpy as np\n'), ((157, 186), 'collections.namedtuple', 'namedtuple', (['"""Batch"""', "['data']"], {}), "('Batch', ['data'])\n", (167, 186), False, 'from collections import namedtuple\n'), ((221, 274), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['network_path', 'network_epoch'], {}), '(network_path, network_epoch)\n', (245, 274), True, 'import mxnet as mx\n'), ((804, 837), 'numpy.transpose', 'np.transpose', (['image', '[0, 3, 1, 2]'], {}), '(image, [0, 3, 1, 2])\n', (816, 837), True, 'import numpy as np\n'), ((433, 441), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (439, 441), True, 'import mxnet as mx\n'), ((727, 787), 'numpy.array', 'np.array', (['[[[[123.68, 116.779, 103.939]]]]'], {'dtype': 'np.float32'}), '([[[[123.68, 116.779, 103.939]]]], dtype=np.float32)\n', (735, 787), True, 'import numpy as np\n'), ((866, 884), 'mxnet.nd.array', 'mx.nd.array', (['image'], {}), '(image)\n', (877, 884), True, 'import mxnet as mx\n')] |
"""Basic Breakpoint API Examples."""
import numpy as np
import pandas as pd
from portformer import BreakpointAPI
def examples():
"""List of major Breakpoint API examples"""
# Read environment variable = BREAKPOINT_API_KEY
api = BreakpointAPI(api_key=None)
# Get Latest AAPL forecasts
breakpoint_forecast = api.forecast("AAPL")
print(
"breakpoint_forecast for {} on {}:\t\tmu:{}, std:{}".format(
breakpoint_forecast["ticker"],
breakpoint_forecast["as_of_date"],
breakpoint_forecast["mu"],
breakpoint_forecast["std"],
)
)
# Get Historical TSLA forecasts
historical_breakpoints = api.historical_forecasts(
"TSLA", start_date="2020-02-01", end_date="2020-04-01"
)
print(
"number of historical_breakpoints:\t\t\t{}".format(
len(historical_breakpoints["agg"])
)
)
# Get Latest SPY AGG GLD forecasts
breakpoint_cross_section = api.cross_sectional_forecasts(
tickers=["SPY", "AGG", "GLD"]
)
print(
"breakpoint_cross_section forecasted sharpe ratios:\t",
{x["ticker"]: x["sharpe"] for x in breakpoint_cross_section},
)
# Get Latest Bitcoin forecasts
btc = api.crypto_forecasts(ticker="BTCUSD")
print(
"BTCUSD for {} on {}:\t\t\tmu:{}, std:{}".format(
btc["ticker"], btc["as_of_date"], btc["mu"], btc["std"],
)
)
# Get Crypto Universe Bitcoin forecasts
universe = api.crypto_universe()
print("number of crypto tickers in universe:\t\t\t{}".format(len(universe)))
def custom_examples():
"""API request with custom timeseries data"""
# Read environment variable = BREAKPOINT_API_KEY
api = BreakpointAPI(api_key=None)
N = 200
seed = 42
# Generate a random price series
np.random.seed(seed)
data = np.exp(pd.Series(np.random.normal(size=(N,)) * 0.01).cumsum())
data.index = pd.bdate_range("2020-01-01", periods=N)
bp = api.custom_timeseries_forecasts(
data,
name=None,
history_timedelta=None,
tform="log-diff",
no_whitten=False,
seed=seed,
)
print(bp)
if __name__ == "__main__":
# examples()
custom_examples()
| [
"numpy.random.normal",
"numpy.random.seed",
"portformer.BreakpointAPI",
"pandas.bdate_range"
] | [((244, 271), 'portformer.BreakpointAPI', 'BreakpointAPI', ([], {'api_key': 'None'}), '(api_key=None)\n', (257, 271), False, 'from portformer import BreakpointAPI\n'), ((1749, 1776), 'portformer.BreakpointAPI', 'BreakpointAPI', ([], {'api_key': 'None'}), '(api_key=None)\n', (1762, 1776), False, 'from portformer import BreakpointAPI\n'), ((1846, 1866), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1860, 1866), True, 'import numpy as np\n'), ((1958, 1997), 'pandas.bdate_range', 'pd.bdate_range', (['"""2020-01-01"""'], {'periods': 'N'}), "('2020-01-01', periods=N)\n", (1972, 1997), True, 'import pandas as pd\n'), ((1895, 1922), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N,)'}), '(size=(N,))\n', (1911, 1922), True, 'import numpy as np\n')] |
"""
Test smd0 and eventbuilder for handling step dgrams.
See https://docs.google.com/spreadsheets/d/1VlVCwEVGahab3omAFJLaF8DJWFcz-faI9Q9aHa7QTUw/edit?usp=sharing for test setup.
"""
import os, time, glob, sys
from psana.smdreader import SmdReader
from psana.dgram import Dgram
from setup_input_files import setup_input_files
from psana import DataSource
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
test_xtc_dir = os.environ.get('TEST_XTC_DIR', '.')
xtc_dir = os.path.join(test_xtc_dir, '.tmp_smd0')
def run_smd0(n_events):
filenames = glob.glob(os.path.join(xtc_dir, '.tmp', 'smalldata', '*.xtc2'))
fds = [os.open(filename, os.O_RDONLY) for filename in filenames]
# Move file ptrs to datagram part
configs = [Dgram(file_descriptor=fd) for fd in fds]
limit = len(filenames)
if len(sys.argv) > 1:
limit = int(sys.argv[1])
st = time.time()
smdr = SmdReader(fds[:limit])
got_events = -1
processed_events = 0
smdr.get(n_events)
got_events = smdr.got_events
result = {'each_read':[], 'total_n_events':0}
cn_i = 0
while got_events != 0:
step_chunk_nbytes = 0
smd_chunk_nbytes = 0
for i in range(limit):
smd_view = smdr.view(i)
if smd_view:
smd_chunk_nbytes += smd_view.nbytes
step_view = smdr.view(i, update=True)
if step_view:
step_chunk_nbytes += step_view.nbytes
result['each_read'].append([got_events, smd_chunk_nbytes, step_chunk_nbytes])
processed_events += got_events
# Read more events
smdr.get(n_events)
got_events = smdr.got_events
cn_i += 1
en = time.time()
result['total_n_events'] = processed_events
for fd in fds:
os.close(fd)
return result
def my_filter(evt):
return True
def run_serial_read(n_events, batch_size=1, filter_fn=0):
exp_xtc_dir = os.path.join(xtc_dir, '.tmp')
os.environ['PS_SMD_N_EVENTS'] = str(n_events)
ds = DataSource(exp='xpptut13', run=1, dir=exp_xtc_dir, batch_size=batch_size, filter=filter_fn)
cn_steps = 0
cn_events = 0
result = {'evt_per_step':[0,0,0], 'n_steps': 0, 'n_events':0}
for run in ds.runs():
edet = run.Detector('HX2:DVD:GCC:01:PMON')
sdet = run.Detector('motor2')
for i, step in enumerate(run.steps()):
cn_evt_per_step = 0
for j, evt in enumerate(step.events()):
cn_evt_per_step += 1
cn_events += 1
cn_steps +=1
result['evt_per_step'][i] = cn_evt_per_step
result['n_steps'] = cn_steps
result['n_events'] = cn_events
return result
def check_results(results, expected_result):
for result in results:
assert result == expected_result
def test_runsinglefile_steps():
ds = DataSource(files=os.path.join(xtc_dir,'.tmp','data-r0001-s00.xtc2'))
cn_steps = 0
cn_events = 0
result = {'evt_per_step':[0,0,0], 'n_steps': 0, 'n_events':0}
for run in ds.runs():
for i, step in enumerate(run.steps()):
cn_evt_per_step = 0
for j, evt in enumerate(step.events()):
cn_evt_per_step += 1
cn_events += 1
cn_steps +=1
result['evt_per_step'][i] = cn_evt_per_step
result['n_steps'] = cn_steps
result['n_events'] = cn_events
return result
if __name__ == "__main__":
import pathlib
p = pathlib.Path(xtc_dir)
if not p.exists():
if rank == 0:
p.mkdir()
setup_input_files(p, n_files=2, slow_update_freq=4, n_motor_steps=3, n_events_per_step=10, gen_run2=False)
comm.Barrier()
"""
# Expected result:
# each_read n_events, smd_chunk_nbytes, step_chunk_nbytes
# total_n_events
# Test 1: No. of chunk-read events covers the entire smds
expected_result = {'each_read': [[30, 7896, 3108]], 'total_n_events': 30}
result = run_smd0(51)
assert result == expected_result
# Test 2: No. of chunk-read events covers beyond the next BeginStep
expected_result = {'each_read': [[20, 5208, 1848], [10, 2688, 1260]], 'total_n_events': 30}
result = run_smd0(20)
assert result == expected_result
# Test 3: No. of chunk-read events covers the next BeginStep
expected_result = {'each_read': [[19, 5040, 1848], [11, 2856, 1260]], 'total_n_events': 30}
result = run_smd0(19)
assert result == expected_result
"""
# Test run.steps()
test_cases = [\
(51, 1, 0), \
(51, 1, my_filter), \
(51, 5, 0), \
(51, 5, my_filter), \
(20, 1, 0), \
(19, 1, 0), \
(1, 1, my_filter), \
(1, 1, 0), \
(3, 4, my_filter),
(3, 4, 0), \
]
for test_case in test_cases:
result = run_serial_read(test_case[0], batch_size=test_case[1], filter_fn=test_case[2])
result = comm.gather(result, root=0)
if rank == 0:
sum_events_per_step = np.zeros(3, dtype=np.int)
sum_events = 0
n_steps = 0
for i in range(size):
if result[i]['evt_per_step']:
sum_events_per_step += np.asarray(result[i]['evt_per_step'], dtype=np.int)
sum_events += result[i]['n_events']
n_steps = np.max([n_steps, result[i]['n_steps']])
assert all(sum_events_per_step == [10,10,10])
assert sum_events == 30
assert n_steps == 3
# Test run.steps() for RunSingleFile
if size == 1:
result = test_runsinglefile_steps()
assert result == {'evt_per_step': [10, 10, 10], 'n_steps': 3, 'n_events': 30}
| [
"psana.DataSource",
"pathlib.Path",
"psana.dgram.Dgram",
"os.close",
"psana.smdreader.SmdReader",
"os.open",
"os.path.join",
"os.environ.get",
"numpy.asarray",
"numpy.max",
"numpy.zeros",
"setup_input_files.setup_input_files",
"time.time"
] | [((481, 516), 'os.environ.get', 'os.environ.get', (['"""TEST_XTC_DIR"""', '"""."""'], {}), "('TEST_XTC_DIR', '.')\n", (495, 516), False, 'import os, time, glob, sys\n'), ((527, 566), 'os.path.join', 'os.path.join', (['test_xtc_dir', '""".tmp_smd0"""'], {}), "(test_xtc_dir, '.tmp_smd0')\n", (539, 566), False, 'import os, time, glob, sys\n'), ((941, 952), 'time.time', 'time.time', ([], {}), '()\n', (950, 952), False, 'import os, time, glob, sys\n'), ((964, 986), 'psana.smdreader.SmdReader', 'SmdReader', (['fds[:limit]'], {}), '(fds[:limit])\n', (973, 986), False, 'from psana.smdreader import SmdReader\n'), ((1763, 1774), 'time.time', 'time.time', ([], {}), '()\n', (1772, 1774), False, 'import os, time, glob, sys\n'), ((1997, 2026), 'os.path.join', 'os.path.join', (['xtc_dir', '""".tmp"""'], {}), "(xtc_dir, '.tmp')\n", (2009, 2026), False, 'import os, time, glob, sys\n'), ((2086, 2181), 'psana.DataSource', 'DataSource', ([], {'exp': '"""xpptut13"""', 'run': '(1)', 'dir': 'exp_xtc_dir', 'batch_size': 'batch_size', 'filter': 'filter_fn'}), "(exp='xpptut13', run=1, dir=exp_xtc_dir, batch_size=batch_size,\n filter=filter_fn)\n", (2096, 2181), False, 'from psana import DataSource\n'), ((3556, 3577), 'pathlib.Path', 'pathlib.Path', (['xtc_dir'], {}), '(xtc_dir)\n', (3568, 3577), False, 'import pathlib\n'), ((618, 670), 'os.path.join', 'os.path.join', (['xtc_dir', '""".tmp"""', '"""smalldata"""', '"""*.xtc2"""'], {}), "(xtc_dir, '.tmp', 'smalldata', '*.xtc2')\n", (630, 670), False, 'import os, time, glob, sys\n'), ((683, 713), 'os.open', 'os.open', (['filename', 'os.O_RDONLY'], {}), '(filename, os.O_RDONLY)\n', (690, 713), False, 'import os, time, glob, sys\n'), ((795, 820), 'psana.dgram.Dgram', 'Dgram', ([], {'file_descriptor': 'fd'}), '(file_descriptor=fd)\n', (800, 820), False, 'from psana.dgram import Dgram\n'), ((1851, 1863), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1859, 1863), False, 'import os, time, glob, sys\n'), ((2946, 2998), 'os.path.join', 'os.path.join', (['xtc_dir', '""".tmp"""', '"""data-r0001-s00.xtc2"""'], {}), "(xtc_dir, '.tmp', 'data-r0001-s00.xtc2')\n", (2958, 2998), False, 'import os, time, glob, sys\n'), ((3657, 3767), 'setup_input_files.setup_input_files', 'setup_input_files', (['p'], {'n_files': '(2)', 'slow_update_freq': '(4)', 'n_motor_steps': '(3)', 'n_events_per_step': '(10)', 'gen_run2': '(False)'}), '(p, n_files=2, slow_update_freq=4, n_motor_steps=3,\n n_events_per_step=10, gen_run2=False)\n', (3674, 3767), False, 'from setup_input_files import setup_input_files\n'), ((5157, 5182), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int'}), '(3, dtype=np.int)\n', (5165, 5182), True, 'import numpy as np\n'), ((5487, 5526), 'numpy.max', 'np.max', (["[n_steps, result[i]['n_steps']]"], {}), "([n_steps, result[i]['n_steps']])\n", (5493, 5526), True, 'import numpy as np\n'), ((5357, 5408), 'numpy.asarray', 'np.asarray', (["result[i]['evt_per_step']"], {'dtype': 'np.int'}), "(result[i]['evt_per_step'], dtype=np.int)\n", (5367, 5408), True, 'import numpy as np\n')] |
import copy
import numpy as np
import math
class Transformation:
def __init__(self, is_array = False, num_instances = 1):
# self.tx = 0.0
# self.ty = 0.0
# self.tz = 0.0
# self.rx = 0.0
# self.ry = 0.0
# self.rz = 0.0
# self.sx = 1.0
# self.sy = 1.0
# self.sz = 1.0
self.transformation = np.eye(4)
#variables relevant to xform
self.isArray = is_array
self.num_instances = num_instances
def translate(self, tx_, ty_, tz_):
# self.tx += tx_
# self.ty += ty_
# self.tz += tz_
translation = np.array([[1, 0, 0, tx_],
[0, 1, 0, ty_],
[0, 0, 1, tz_],
[0, 0, 0, 1]])
self.transformation = np.matmul(translation, self.transformation)
# def rotate(self, rx_, ry_, rz_):
# self.rx += rx_
# self.ry += ry_
# self.rz += rz_
def rotate_x(self, rx):
c = math.cos(math.radians(rx))
s = math.sin(math.radians(rx))
rotation = np.array([[1, 0, 0, 0],
[0, c, s, 0],
[0, -s, c, 0],
[0, 0, 0, 1]])
self.transformation = np.matmul(rotation, self.transformation)
def rotate_y(self, ry):
c = math.cos(math.radians(ry))
s = math.sin(math.radians(ry))
rotation = np.array([[c, 0, -s, 0],
[0, 1, 0, 0],
[s, 0, c, 0],
[0, 0, 0, 1]])
self.transformation = np.matmul(rotation, self.transformation)
def rotate_z(self, rz):
c = math.cos(math.radians(rz))
s = math.sin(math.radians(rz))
rotation = np.array([[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.transformation = np.matmul(rotation, self.transformation)
def scale(self, s):
# self.sx *= s
# self.sy *= s
# self.sz *= s
scaling = np.array([[s, 0, 0, 0],
[0, s, 0, 0],
[0, 0, s, 0],
[0, 0, 0, 1]])
self.transformation = np.matmul(scaling, self.transformation)
def mirror_x(self):
# self.sx *= -1
self.transformation[0,0] *= -1
def mirror_y(self):
# self.sy *= -1
self.transformation[1,1] *= -1
def mirror_z(self):
# self.sz *= -1
self.transformation[2,2] *= -1
def transform(self, tform):
# self.tx += tform.tx
# self.ty += tform.ty
# self.tz += tform.tz
# self.rx += tform.rx
# self.ry += tform.ry
# self.rz += tform.rz
# self.sx *= tform.sx
# self.sy *= tform.sy
# self.sz *= tform.sz
self.transformation = np.matmul(tform.transformation, self.transformation)
def expandToList(self):
#take a 'transformation array' (specified by a '-a' flag) and expand it to a separate transformations
#The first object will have zero applications of the transform.
xform_array = []
base_xform = Transformation()
for i in range(self.num_instances):
xform_array.append(copy.deepcopy(base_xform))
base_xform.transform(self)
return xform_array
def __str__(self):
#To print readable object details
return f"Xform(isArray: {self.isArray}, num_instances:{self.num_instances})"
def __repr__(self):
#To print detailed object details
return (
f"Xform(isArray: {self.isArray}, num_instances:{self.num_instances},\n"
# f"tx: {self.tx}, ty:{self.ty}, tz:{self.tz},\n"
# f"rx: {self.rx}, ry:{self.ry}, rz:{self.rz},\n"
# f"sx: {self.sx}, sy:{self.sy}, sz:{self.sz})\n"
"Transformation:\n"
f"{self.transformation[0][0]} {self.transformation[0][1]} {self.transformation[0][2]} {self.transformation[0][3]}\n"
f"{self.transformation[1][0]} {self.transformation[1][1]} {self.transformation[1][2]} {self.transformation[1][3]}\n"
f"{self.transformation[2][0]} {self.transformation[2][1]} {self.transformation[2][2]} {self.transformation[2][3]}\n"
f"{self.transformation[3][0]} {self.transformation[3][1]} {self.transformation[3][2]} {self.transformation[3][3]}\n"
)
| [
"numpy.eye",
"math.radians",
"numpy.array",
"numpy.matmul",
"copy.deepcopy"
] | [((373, 382), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (379, 382), True, 'import numpy as np\n'), ((640, 712), 'numpy.array', 'np.array', (['[[1, 0, 0, tx_], [0, 1, 0, ty_], [0, 0, 1, tz_], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, tx_], [0, 1, 0, ty_], [0, 0, 1, tz_], [0, 0, 0, 1]])\n', (648, 712), True, 'import numpy as np\n'), ((840, 883), 'numpy.matmul', 'np.matmul', (['translation', 'self.transformation'], {}), '(translation, self.transformation)\n', (849, 883), True, 'import numpy as np\n'), ((1134, 1201), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, c, s, 0], [0, -s, c, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, c, s, 0], [0, -s, c, 0], [0, 0, 0, 1]])\n', (1142, 1201), True, 'import numpy as np\n'), ((1320, 1360), 'numpy.matmul', 'np.matmul', (['rotation', 'self.transformation'], {}), '(rotation, self.transformation)\n', (1329, 1360), True, 'import numpy as np\n'), ((1487, 1554), 'numpy.array', 'np.array', (['[[c, 0, -s, 0], [0, 1, 0, 0], [s, 0, c, 0], [0, 0, 0, 1]]'], {}), '([[c, 0, -s, 0], [0, 1, 0, 0], [s, 0, c, 0], [0, 0, 0, 1]])\n', (1495, 1554), True, 'import numpy as np\n'), ((1673, 1713), 'numpy.matmul', 'np.matmul', (['rotation', 'self.transformation'], {}), '(rotation, self.transformation)\n', (1682, 1713), True, 'import numpy as np\n'), ((1840, 1907), 'numpy.array', 'np.array', (['[[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (1848, 1907), True, 'import numpy as np\n'), ((2026, 2066), 'numpy.matmul', 'np.matmul', (['rotation', 'self.transformation'], {}), '(rotation, self.transformation)\n', (2035, 2066), True, 'import numpy as np\n'), ((2187, 2253), 'numpy.array', 'np.array', (['[[s, 0, 0, 0], [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, 1]]'], {}), '([[s, 0, 0, 0], [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, 1]])\n', (2195, 2253), True, 'import numpy as np\n'), ((2369, 2408), 'numpy.matmul', 'np.matmul', (['scaling', 'self.transformation'], {}), '(scaling, self.transformation)\n', (2378, 2408), True, 'import numpy as np\n'), ((3021, 3073), 'numpy.matmul', 'np.matmul', (['tform.transformation', 'self.transformation'], {}), '(tform.transformation, self.transformation)\n', (3030, 3073), True, 'import numpy as np\n'), ((1058, 1074), 'math.radians', 'math.radians', (['rx'], {}), '(rx)\n', (1070, 1074), False, 'import math\n'), ((1097, 1113), 'math.radians', 'math.radians', (['rx'], {}), '(rx)\n', (1109, 1113), False, 'import math\n'), ((1411, 1427), 'math.radians', 'math.radians', (['ry'], {}), '(ry)\n', (1423, 1427), False, 'import math\n'), ((1450, 1466), 'math.radians', 'math.radians', (['ry'], {}), '(ry)\n', (1462, 1466), False, 'import math\n'), ((1764, 1780), 'math.radians', 'math.radians', (['rz'], {}), '(rz)\n', (1776, 1780), False, 'import math\n'), ((1803, 1819), 'math.radians', 'math.radians', (['rz'], {}), '(rz)\n', (1815, 1819), False, 'import math\n'), ((3442, 3467), 'copy.deepcopy', 'copy.deepcopy', (['base_xform'], {}), '(base_xform)\n', (3455, 3467), False, 'import copy\n')] |
import logging
import unittest
import numpy as np
from astropy import units as u
from flarestack.cosmo import get_rate
from flarestack.cosmo.rates import source_maps
from flarestack.cosmo.rates.tde_rates import tde_evolutions, local_tde_rates
from flarestack.cosmo.rates.sfr_rates import sfr_evolutions, local_sfr_rates
from flarestack.cosmo.rates.ccsn_rates import kcc_rates, sn_subclass_rates
from flarestack.cosmo.rates.grb_rates import grb_evolutions, local_grb_rates
from flarestack.cosmo.rates.fbot_rates import local_fbot_rates
from flarestack.cosmo.rates.frb_rates import local_frb_rates
zrange = np.linspace(0.0, 8.0, 5)
class TestCosmoRates(unittest.TestCase):
def setUp(self):
pass
def test_get_rates(self):
logging.info("Testing get_rates util functions.")
for vals in source_maps.values():
for val in vals:
f = get_rate(val)
f(1.0)
def test_tde_rates(self):
for evolution in tde_evolutions.keys():
for rate in local_tde_rates.keys():
f = get_rate("tde", evolution_name=evolution, rate_name=rate)
f(zrange)
f = get_rate("tde", evolution_name="biehl_18_jetted", m=-2)
true = 2.e-07 / (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
def test_sfr_rates(self):
for evolution in sfr_evolutions.keys():
for rate in local_sfr_rates.keys():
f = get_rate("sfr", evolution_name=evolution, rate_name=rate)
f(zrange)
f = get_rate("sfr")
true = 0.08687592762508031 * u.solMass / (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
def test_ccsn_rates(self):
for kcc_name in kcc_rates.keys():
for (subclass_fractions_name, (sn_type, _)) in sn_subclass_rates.items():
for sn_subclass in sn_type.keys():
f = get_rate(
"ccsn",
kcc_name=kcc_name,
sn_subclass=sn_subclass,
subclass_fractions_name=subclass_fractions_name
)
f(zrange)
f = get_rate("ccsn", sn_subclass="Ibc", fraction=0.5)
true = 7.236764771169189e-05 / (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
def test_grn_rates(self):
for evolution in grb_evolutions.keys():
for rate in local_grb_rates.keys():
f = get_rate("grb", evolution_name=evolution, rate_name=rate)
f(zrange)
f = get_rate("grb", evolution_name="lien_14")
true = 1.7635240284867526e-09 / (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
def test_fbot_rates(self):
for evolution in sfr_evolutions.keys():
for rate in local_fbot_rates.keys():
f = get_rate("fbot", evolution_name=evolution, rate_name=rate)
f(zrange)
f = get_rate("fbot")
true = 4.054209955837081e-06/ (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
def test_frb_rates(self):
for evolution in sfr_evolutions.keys():
for rate in local_frb_rates.keys():
f = get_rate("frb", evolution_name=evolution, rate_name=rate)
f(zrange)
f = get_rate("frb")
true = 0.418741971152887 / (u.Mpc**3 * u.yr)
self.assertAlmostEqual(f(1.0)/true, 1.0, delta=0.05)
if __name__ == '__main__':
unittest.main() | [
"flarestack.cosmo.rates.sfr_rates.sfr_evolutions.keys",
"flarestack.cosmo.rates.ccsn_rates.sn_subclass_rates.items",
"flarestack.cosmo.rates.grb_rates.grb_evolutions.keys",
"flarestack.cosmo.rates.frb_rates.local_frb_rates.keys",
"flarestack.cosmo.rates.ccsn_rates.kcc_rates.keys",
"flarestack.cosmo.get_ra... | [((606, 630), 'numpy.linspace', 'np.linspace', (['(0.0)', '(8.0)', '(5)'], {}), '(0.0, 8.0, 5)\n', (617, 630), True, 'import numpy as np\n'), ((3597, 3612), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3610, 3612), False, 'import unittest\n'), ((748, 797), 'logging.info', 'logging.info', (['"""Testing get_rates util functions."""'], {}), "('Testing get_rates util functions.')\n", (760, 797), False, 'import logging\n'), ((819, 839), 'flarestack.cosmo.rates.source_maps.values', 'source_maps.values', ([], {}), '()\n', (837, 839), False, 'from flarestack.cosmo.rates import source_maps\n'), ((984, 1005), 'flarestack.cosmo.rates.tde_rates.tde_evolutions.keys', 'tde_evolutions.keys', ([], {}), '()\n', (1003, 1005), False, 'from flarestack.cosmo.rates.tde_rates import tde_evolutions, local_tde_rates\n'), ((1172, 1227), 'flarestack.cosmo.get_rate', 'get_rate', (['"""tde"""'], {'evolution_name': '"""biehl_18_jetted"""', 'm': '(-2)'}), "('tde', evolution_name='biehl_18_jetted', m=-2)\n", (1180, 1227), False, 'from flarestack.cosmo import get_rate\n'), ((1389, 1410), 'flarestack.cosmo.rates.sfr_rates.sfr_evolutions.keys', 'sfr_evolutions.keys', ([], {}), '()\n', (1408, 1410), False, 'from flarestack.cosmo.rates.sfr_rates import sfr_evolutions, local_sfr_rates\n'), ((1577, 1592), 'flarestack.cosmo.get_rate', 'get_rate', (['"""sfr"""'], {}), "('sfr')\n", (1585, 1592), False, 'from flarestack.cosmo import get_rate\n'), ((1779, 1795), 'flarestack.cosmo.rates.ccsn_rates.kcc_rates.keys', 'kcc_rates.keys', ([], {}), '()\n', (1793, 1795), False, 'from flarestack.cosmo.rates.ccsn_rates import kcc_rates, sn_subclass_rates\n'), ((2229, 2278), 'flarestack.cosmo.get_rate', 'get_rate', (['"""ccsn"""'], {'sn_subclass': '"""Ibc"""', 'fraction': '(0.5)'}), "('ccsn', sn_subclass='Ibc', fraction=0.5)\n", (2237, 2278), False, 'from flarestack.cosmo import get_rate\n'), ((2456, 2477), 'flarestack.cosmo.rates.grb_rates.grb_evolutions.keys', 'grb_evolutions.keys', ([], {}), '()\n', (2475, 2477), False, 'from flarestack.cosmo.rates.grb_rates import grb_evolutions, local_grb_rates\n'), ((2644, 2685), 'flarestack.cosmo.get_rate', 'get_rate', (['"""grb"""'], {'evolution_name': '"""lien_14"""'}), "('grb', evolution_name='lien_14')\n", (2652, 2685), False, 'from flarestack.cosmo import get_rate\n'), ((2864, 2885), 'flarestack.cosmo.rates.sfr_rates.sfr_evolutions.keys', 'sfr_evolutions.keys', ([], {}), '()\n', (2883, 2885), False, 'from flarestack.cosmo.rates.sfr_rates import sfr_evolutions, local_sfr_rates\n'), ((3054, 3070), 'flarestack.cosmo.get_rate', 'get_rate', (['"""fbot"""'], {}), "('fbot')\n", (3062, 3070), False, 'from flarestack.cosmo import get_rate\n'), ((3246, 3267), 'flarestack.cosmo.rates.sfr_rates.sfr_evolutions.keys', 'sfr_evolutions.keys', ([], {}), '()\n', (3265, 3267), False, 'from flarestack.cosmo.rates.sfr_rates import sfr_evolutions, local_sfr_rates\n'), ((3434, 3449), 'flarestack.cosmo.get_rate', 'get_rate', (['"""frb"""'], {}), "('frb')\n", (3442, 3449), False, 'from flarestack.cosmo import get_rate\n'), ((1031, 1053), 'flarestack.cosmo.rates.tde_rates.local_tde_rates.keys', 'local_tde_rates.keys', ([], {}), '()\n', (1051, 1053), False, 'from flarestack.cosmo.rates.tde_rates import tde_evolutions, local_tde_rates\n'), ((1436, 1458), 'flarestack.cosmo.rates.sfr_rates.local_sfr_rates.keys', 'local_sfr_rates.keys', ([], {}), '()\n', (1456, 1458), False, 'from flarestack.cosmo.rates.sfr_rates import sfr_evolutions, local_sfr_rates\n'), ((1856, 1881), 'flarestack.cosmo.rates.ccsn_rates.sn_subclass_rates.items', 'sn_subclass_rates.items', ([], {}), '()\n', (1879, 1881), False, 'from flarestack.cosmo.rates.ccsn_rates import kcc_rates, sn_subclass_rates\n'), ((2503, 2525), 'flarestack.cosmo.rates.grb_rates.local_grb_rates.keys', 'local_grb_rates.keys', ([], {}), '()\n', (2523, 2525), False, 'from flarestack.cosmo.rates.grb_rates import grb_evolutions, local_grb_rates\n'), ((2911, 2934), 'flarestack.cosmo.rates.fbot_rates.local_fbot_rates.keys', 'local_fbot_rates.keys', ([], {}), '()\n', (2932, 2934), False, 'from flarestack.cosmo.rates.fbot_rates import local_fbot_rates\n'), ((3293, 3315), 'flarestack.cosmo.rates.frb_rates.local_frb_rates.keys', 'local_frb_rates.keys', ([], {}), '()\n', (3313, 3315), False, 'from flarestack.cosmo.rates.frb_rates import local_frb_rates\n'), ((890, 903), 'flarestack.cosmo.get_rate', 'get_rate', (['val'], {}), '(val)\n', (898, 903), False, 'from flarestack.cosmo import get_rate\n'), ((1075, 1132), 'flarestack.cosmo.get_rate', 'get_rate', (['"""tde"""'], {'evolution_name': 'evolution', 'rate_name': 'rate'}), "('tde', evolution_name=evolution, rate_name=rate)\n", (1083, 1132), False, 'from flarestack.cosmo import get_rate\n'), ((1480, 1537), 'flarestack.cosmo.get_rate', 'get_rate', (['"""sfr"""'], {'evolution_name': 'evolution', 'rate_name': 'rate'}), "('sfr', evolution_name=evolution, rate_name=rate)\n", (1488, 1537), False, 'from flarestack.cosmo import get_rate\n'), ((2547, 2604), 'flarestack.cosmo.get_rate', 'get_rate', (['"""grb"""'], {'evolution_name': 'evolution', 'rate_name': 'rate'}), "('grb', evolution_name=evolution, rate_name=rate)\n", (2555, 2604), False, 'from flarestack.cosmo import get_rate\n'), ((2956, 3014), 'flarestack.cosmo.get_rate', 'get_rate', (['"""fbot"""'], {'evolution_name': 'evolution', 'rate_name': 'rate'}), "('fbot', evolution_name=evolution, rate_name=rate)\n", (2964, 3014), False, 'from flarestack.cosmo import get_rate\n'), ((3337, 3394), 'flarestack.cosmo.get_rate', 'get_rate', (['"""frb"""'], {'evolution_name': 'evolution', 'rate_name': 'rate'}), "('frb', evolution_name=evolution, rate_name=rate)\n", (3345, 3394), False, 'from flarestack.cosmo import get_rate\n'), ((1958, 2071), 'flarestack.cosmo.get_rate', 'get_rate', (['"""ccsn"""'], {'kcc_name': 'kcc_name', 'sn_subclass': 'sn_subclass', 'subclass_fractions_name': 'subclass_fractions_name'}), "('ccsn', kcc_name=kcc_name, sn_subclass=sn_subclass,\n subclass_fractions_name=subclass_fractions_name)\n", (1966, 2071), False, 'from flarestack.cosmo import get_rate\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
def smooth_product(u, v, domain=(0, 1)):
approx, _ = quad(lambda x: u(x)*v(x), *domain) # Discard error
return approx
def least_squares(func, basis, inner, **kwargs):
# Take the inner product of each pair of basis
lhs = np.array([
[inner(p, q, **kwargs) for p in basis]
for q in basis
])
# The the inner product of the function with each basis
rhs = np.array([
inner(p, func, **kwargs) for p in basis
])
# Create the approximation
return np.linalg.solve(lhs, rhs)
def Q1():
legendre = ('1', '2*x - 1', '6*x**2 - 6*x + 1')
basis = [ # Generate the functions based off strings
np.vectorize(eval('lambda x:' + poly))
for poly in legendre
]
coeffs = least_squares(
np.exp, basis, smooth_product, domain=(0, 1))
domain = np.linspace(0, 1)
approx = coeffs @ [p(domain) for p in basis]
fig, ax = plt.subplots()
ax.plot(domain, np.exp(domain), label='Exact')
ax.plot(domain, approx, label='Approx')
def Q2():
basis_funcs = [ # Generate the functions based off strings
np.vectorize(eval('lambda x:' + poly))
for poly in ('1', 'x', 'x**2')
]
x_data, y_data = np.loadtxt('data_points.txt', unpack=True)
basis_data = [p(x_data) for p in basis_funcs]
coeffs = least_squares(
y_data, basis_data, np.inner)
domain = np.linspace(x_data.min(), x_data.max())
approx = coeffs @ [p(domain) for p in basis_funcs]
fig, ax = plt.subplots()
ax.scatter(x_data, y_data, label='Exact')
ax.plot(domain, approx, 'k-', label='Approx')
def Q3():
basis = [ # Generate the functions based off strings
eval('lambda x:' + func)
for func in ('np.sin(x)', 'np.sin(2*x)', 'np.sin(3*x)')
]
def function(x):
return x*(np.pi-x)
coeffs = least_squares(
function, basis,
smooth_product, domain=(0, np.pi)
)
for coeff, exact in zip(coeffs, (8/np.pi, 0, 8/(27*np.pi))):
print(f'Coefficient error: {abs(coeff-exact):.3e}')
domain = np.linspace(0, np.pi)
approx = coeffs @ [p(domain) for p in basis]
fig, ax = plt.subplots()
ax.plot(domain, function(domain), label='Exact')
ax.plot(domain, approx, label='Approx')
if __name__ == '__main__':
questions = (Q1, Q2, Q3)
for question in questions:
input(f'Press `Enter` to run {question.__name__} ')
plt.close('all') # <- Close all existing figures
question()
plt.show(block=False) # <- Allow code execution to continue
input('Press `Enter` to quit the program.')
| [
"numpy.linalg.solve",
"numpy.exp",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((593, 618), 'numpy.linalg.solve', 'np.linalg.solve', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (608, 618), True, 'import numpy as np\n'), ((923, 940), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (934, 940), True, 'import numpy as np\n'), ((1005, 1019), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1017, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1347), 'numpy.loadtxt', 'np.loadtxt', (['"""data_points.txt"""'], {'unpack': '(True)'}), "('data_points.txt', unpack=True)\n", (1315, 1347), True, 'import numpy as np\n'), ((1589, 1603), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2185), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi'], {}), '(0, np.pi)\n', (2175, 2185), True, 'import numpy as np\n'), ((2250, 2264), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2262, 2264), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1054), 'numpy.exp', 'np.exp', (['domain'], {}), '(domain)\n', (1046, 1054), True, 'import numpy as np\n'), ((2520, 2536), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2529, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2624), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2611, 2624), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
import itertools
import threading
import imp
"""This module is for developing the machinery required to make neural nets and analyse local and global codes
This module does stuff.
"""
__version__ = '0.1'
__author__ = '<NAME>'
__date__ = 'Jan 2017'
class ThreadedRunner(object):
""" run a task across multiple processors, taking care not to overload them """
def __init__(self, tasks , maxparallel=8):
"""
tasks: an array of tuples of the form (function,arguments) to call
maxparallel: the maximum number of threads to be running at once
"""
self.threads = [threading.Thread(target=f,kwargs=k) for (f,k) in tasks]
# TODO: spin up seperate thread managers to maximise throughput
self.maxparallel = 8
self.next_thread=0
def run(self, threadrunlimit=None):
"""
threadrunlimit: only run this many threads at most total,
if None (default) then run all threads
"""
runcount = len(self.threads[self.next_thread:])
if threadrunlimit is not None:
runcount = min(runcount, threadrunlimit)
next_thread=0
while runcount>0:
batch = self.threads[next_thread:next_thread+self.maxparallel]
# cannot start threads while imp lock is held.
toLock = imp.lock_held()
if toLock:
imp.release_lock()
# Start all threads in this batch
for thread in batch:
thread.start()
# Wait for them all to finish
for thread in batch:
thread.join
# rest lock state
if toLock:
imp.acquire_lock()
runcount = runcount - len(batch)
next_thread = next_thread+len(batch)
def HelloWorld(word):
print(word)
def normalise_to_zero_one_interval(y, ymin, ymax):
"""Because I always forget the bloody formula"""
if ymin > ymax: raise TypeError('min and max values the wrong way round!')
return (y - ymin) / (ymax - ymin)
def global_charactor(y):
"""Function from Karplus and Brooks 1995 J. Compu. Chem
expects a column vector"""
"N.B. this does not currently work as advertised!"
print('You are using global_character WHICH IS NOT CORRECT!!\n')
if len(y.shape) != 1: raise TypeError('globalCharacter() not handed a vector ')
y = y / np.linalg.norm(y)
r = y.shape[0]
# y has to be normalised for this formula to work!
gc = (r**-0.5*sum(abs(y))) ** 4.0 # this is the original formula, now we normalise it
return normalise_to_zero_one_interval(gc, 1, r**4)
def fk_plotter(dks, noOfK, lRange=None, error=0.15, xaxis=1, title=None, xlabel=None, ylabel=None, showPlots=1, savePlots=0):
"""Produces F(k) plots for each layer of neurons"""
"lRange = range of layers to plot"
"error = error below 1 which we consider significant"
"xaxis = where to draw the xaxis line"
if lRange == None:
lRange = range(len(dks))
for l in lRange:
#l is the number of layers -- send a smaller dks if you don't want them all!
fig=plt.figure(l)
x_data = np.array(range(noOfK)) + 1
marker = itertools.cycle(['o', '>', '<', 'v', '8', 'd', 's', 'p', '*'])
for n in range(len(dks[l])):
# n is the number neurons in a layer
y_data = dks[l][n].fs
plt.plot(x_data, y_data, label=str(n), marker=marker.next(), alpha=1)
if not xaxis == None:
# note, if you don't want an xaxis, set xaxis='off'
plt.axhline(xaxis)
else:
plt.axhline(0)
plt.xlim([min(x_data)-0.25, max(x_data)+1])
#plt.legend()
plt.legend(bbox_to_anchor=(0.9, 1.1), loc='best', ncol=2, framealpha=0.5).draggable()
#ax.legend().draggable()
plt.plot([0., noOfK], [1-error, 1-error])
if title == None:
plt.title('Layer '+ str(l+1))
else:
plt.title(title)
if xlabel == None:
plt.xlabel('K')
else:
plt.xlabel(xlabel)
if ylabel == None:
plt.ylabel('f(K)')
else:
plt.ylabel(ylabel)
if showPlots == 1:
plt.show()
if savePlots == 1:
fig.savefig('Fk' + str(l) + '.png', dpi=fig.dpi)
def jitterer(out, z):
"""This function jitters the x axis
1: matrix of layer activations of the form:
2. which layer number to do
outputs a transposed matrix of no of neurons rows and no of data columns"""
Jx=np.ones(out[z].T.shape)
for i in range(out[z].T.shape[0]):
'this is the number of neurons'
for j in range(out[z].T.shape[1]):
'this is the number of data'
Jx[i,j] = i + 1 + np.random.uniform(-0.25,0.25)
return Jx
def local_charactor(y):
"""Function from Karplus and Brooks 1995 Compu Chem
expects a column vector"""
if len(y.shape) != 1: raise TypeError('localCharacter() not handed a vector ')
y = y / np.linalg.norm(y)
# y has to be normalised for this formula to work!
lc = sum(y ** 4.0) # this is hte original formula, we now normalise the output as well
return normalise_to_zero_one_interval(lc, y.shape[0] ** -1, 1)
def plotter(x, y, labels=['x','y'], legend=None, linestyle=['o-', '+-', '*.-'], xaxis=None, showPlots=1, savePlots=0):
"""Make nice plots automatically"""
fig=plt.figure(1)
xrange = max(x)-min(x)
yrange = max(y.flatten()) - min(y.flatten())
if not legend==None:
for i in range(len(y)):
plt.plot(x, y[i], linestyle[i/3], label=legend[i])
else:
for i in range(len(y)):
plt.plot(x, y[i], linestyle[i/3])
if not xaxis == None:
# note, if you don't want an xaxis, set xaxis='off'
plt.axhline(xaxis)
else:
plt.axhline(0)
plt.axis([min(x.flatten())-0.1*xrange, max(x.flatten())+0.1*xrange,
min(y.flatten())-0.1*yrange, max(y.flatten())+0.1*yrange])
plt.ylabel(labels[1])
plt.xlabel(labels[0])
if not legend==None:
plt.legend(framealpha=0.5)
if showPlots == 1:
plt.show()
if savePlots == 1:
fig.savefig('Hk' + str(x[0]) + '.png', dpi=fig.dpi)
def simple_layer_activation_plotter(out):
"""The original layer activation plotter, not the proper jitter plot"""
noOfLayers = len(out)
for l in range(noOfLayers):
plt.figure(l)
t = a.jitterer(out, l)
# yrange=max(out[l])-min(out[l])
plt.plot(t, out[l].T, '+', label='training')
plt.ylabel('Activation')
plt.xlabel('Neuron no.')
# plt.axis([min(t)-0.25, max(t)+0.25, min(out[l])-0.1*yrange, max(out[l]+0.1*yrange)])
# plt.legend()
plt.show()
####################################################
## downloaded code
"""
Demo of a function to create Hinton diagrams.
Hinton diagrams are useful for visualizing the values of a 2D array (e.g.
a weight matrix): Positive and negative values are represented by white and
black squares, respectively, and the size of each square represents the
magnitude of each value.
Initial idea from <NAME> on the SciPy Cookbook
"""
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
if __name__ == '__main__':
hinton(np.random.rand(20, 20) - 0.5)
plt.show()
## writing model to file and reading it back in test
| [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.log",
"imp.lock_held",
"numpy.linalg.norm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.ndenumerate",
"matplotlib.pyplot.axhline",
"imp.acquire_lock",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.NullLocator",
"num... | [((4652, 4675), 'numpy.ones', 'np.ones', (['out[z].T.shape'], {}), '(out[z].T.shape)\n', (4659, 4675), True, 'import numpy as np\n'), ((5523, 5536), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5533, 5536), True, 'import matplotlib.pyplot as plt\n'), ((6121, 6142), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['labels[1]'], {}), '(labels[1])\n', (6131, 6142), True, 'import matplotlib.pyplot as plt\n'), ((6147, 6168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['labels[0]'], {}), '(labels[0])\n', (6157, 6168), True, 'import matplotlib.pyplot as plt\n'), ((7742, 7764), 'numpy.ndenumerate', 'np.ndenumerate', (['matrix'], {}), '(matrix)\n', (7756, 7764), True, 'import numpy as np\n'), ((2477, 2494), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (2491, 2494), True, 'import numpy as np\n'), ((3211, 3224), 'matplotlib.pyplot.figure', 'plt.figure', (['l'], {}), '(l)\n', (3221, 3224), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3348), 'itertools.cycle', 'itertools.cycle', (["['o', '>', '<', 'v', '8', 'd', 's', 'p', '*']"], {}), "(['o', '>', '<', 'v', '8', 'd', 's', 'p', '*'])\n", (3301, 3348), False, 'import itertools\n'), ((3926, 3972), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, noOfK]', '[1 - error, 1 - error]'], {}), '([0.0, noOfK], [1 - error, 1 - error])\n', (3934, 3972), True, 'import matplotlib.pyplot as plt\n'), ((5121, 5138), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (5135, 5138), True, 'import numpy as np\n'), ((5915, 5933), 'matplotlib.pyplot.axhline', 'plt.axhline', (['xaxis'], {}), '(xaxis)\n', (5926, 5933), True, 'import matplotlib.pyplot as plt\n'), ((5952, 5966), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {}), '(0)\n', (5963, 5966), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6228), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'framealpha': '(0.5)'}), '(framealpha=0.5)\n', (6212, 6228), True, 'import matplotlib.pyplot as plt\n'), ((6260, 6270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6268, 6270), True, 'import matplotlib.pyplot as plt\n'), ((6539, 6552), 'matplotlib.pyplot.figure', 'plt.figure', (['l'], {}), '(l)\n', (6549, 6552), True, 'import matplotlib.pyplot as plt\n'), ((6629, 6673), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'out[l].T', '"""+"""'], {'label': '"""training"""'}), "(t, out[l].T, '+', label='training')\n", (6637, 6673), True, 'import matplotlib.pyplot as plt\n'), ((6682, 6706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Activation"""'], {}), "('Activation')\n", (6692, 6706), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6739), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Neuron no."""'], {}), "('Neuron no.')\n", (6725, 6739), True, 'import matplotlib.pyplot as plt\n'), ((6858, 6868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6866, 6868), True, 'import matplotlib.pyplot as plt\n'), ((7440, 7449), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7447, 7449), True, 'import matplotlib.pyplot as plt\n'), ((7651, 7668), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (7666, 7668), True, 'import matplotlib.pyplot as plt\n'), ((7701, 7718), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (7716, 7718), True, 'import matplotlib.pyplot as plt\n'), ((7874, 7967), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['[x - size / 2, y - size / 2]', 'size', 'size'], {'facecolor': 'color', 'edgecolor': 'color'}), '([x - size / 2, y - size / 2], size, size, facecolor=color,\n edgecolor=color)\n', (7887, 7967), True, 'import matplotlib.pyplot as plt\n'), ((8153, 8163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8161, 8163), True, 'import matplotlib.pyplot as plt\n'), ((663, 699), 'threading.Thread', 'threading.Thread', ([], {'target': 'f', 'kwargs': 'k'}), '(target=f, kwargs=k)\n', (679, 699), False, 'import threading\n'), ((1394, 1409), 'imp.lock_held', 'imp.lock_held', ([], {}), '()\n', (1407, 1409), False, 'import imp\n'), ((3657, 3675), 'matplotlib.pyplot.axhline', 'plt.axhline', (['xaxis'], {}), '(xaxis)\n', (3668, 3675), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3716), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {}), '(0)\n', (3713, 3716), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4078), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4071, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4133), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K"""'], {}), "('K')\n", (4128, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4178), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (4170, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4218, 4236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f(K)"""'], {}), "('f(K)')\n", (4228, 4236), True, 'import matplotlib.pyplot as plt\n'), ((4263, 4281), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (4273, 4281), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4331), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4329, 4331), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5734), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y[i]', 'linestyle[i / 3]'], {'label': 'legend[i]'}), '(x, y[i], linestyle[i / 3], label=legend[i])\n', (5690, 5734), True, 'import matplotlib.pyplot as plt\n'), ((5787, 5822), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y[i]', 'linestyle[i / 3]'], {}), '(x, y[i], linestyle[i / 3])\n', (5795, 5822), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1467), 'imp.release_lock', 'imp.release_lock', ([], {}), '()\n', (1465, 1467), False, 'import imp\n'), ((1753, 1771), 'imp.acquire_lock', 'imp.acquire_lock', ([], {}), '()\n', (1769, 1771), False, 'import imp\n'), ((3799, 3872), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.9, 1.1)', 'loc': '"""best"""', 'ncol': '(2)', 'framealpha': '(0.5)'}), "(bbox_to_anchor=(0.9, 1.1), loc='best', ncol=2, framealpha=0.5)\n", (3809, 3872), True, 'import matplotlib.pyplot as plt\n'), ((4870, 4900), 'numpy.random.uniform', 'np.random.uniform', (['(-0.25)', '(0.25)'], {}), '(-0.25, 0.25)\n', (4887, 4900), True, 'import numpy as np\n'), ((7835, 7844), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (7841, 7844), True, 'import numpy as np\n'), ((8115, 8137), 'numpy.random.rand', 'np.random.rand', (['(20)', '(20)'], {}), '(20, 20)\n', (8129, 8137), True, 'import numpy as np\n'), ((7539, 7548), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7545, 7548), True, 'import numpy as np\n'), ((7515, 7529), 'numpy.abs', 'np.abs', (['matrix'], {}), '(matrix)\n', (7521, 7529), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import linear_model
plt.style.use('fivethirtyeight')
datafile = 'datafile.txt'
data = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True)
X = np.transpose(np.array(data[:-1]))
Y = np.transpose(np.array(data[-1:]))
pos = np.array([X[i] for i in xrange(X.shape[0]) if Y[i] == 1])
neg = np.array([X[i] for i in xrange(X.shape[0]) if Y[i] == 0])
def plot_data():
#plt.scatter(X[:,0],X[:,1],c=Y,edgecolors='k',cmap=plt.cm.Paired)
plt.plot(pos[:,0],pos[:,1],'wo',label='Admitted')
plt.plot(neg[:,0],neg[:,1],'bo',label='Not Admitted')
plt.xlabel('Exam 1 Score')
plt.ylabel('Exam 2 Score')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend()
plt.grid(True)
y_arr = data[-1:]
y_arr = y_arr.flatten()
#print y_arr.flatten()
#print y_arr
#step value for creating meshgrid
h = 0.5
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X,y_arr)
x_min, x_max = X[:,0].min() - .5, X[:,0].max() + .5
y_min, y_max = X[:,1].min() - .5, X[:,1].max() + .5
xx,yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))
Z = logreg.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(figsize=(10,8))
plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plot_data()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.pcolormesh",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"matplotlib.pyplot.figu... | [((104, 136), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (117, 136), True, 'import matplotlib.pyplot as plt\n'), ((171, 238), 'numpy.loadtxt', 'np.loadtxt', (['datafile'], {'delimiter': '""","""', 'usecols': '(0, 1, 2)', 'unpack': '(True)'}), "(datafile, delimiter=',', usecols=(0, 1, 2), unpack=True)\n", (181, 238), True, 'import numpy as np\n'), ((937, 980), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(100000.0)'}), '(C=100000.0)\n', (968, 980), False, 'from sklearn import linear_model\n'), ((1248, 1275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1258, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1320), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.Paired'}), '(xx, yy, Z, cmap=plt.cm.Paired)\n', (1289, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1334), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (1330, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1349), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1345, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((252, 271), 'numpy.array', 'np.array', (['data[:-1]'], {}), '(data[:-1])\n', (260, 271), True, 'import numpy as np\n'), ((290, 309), 'numpy.array', 'np.array', (['data[-1:]'], {}), '(data[-1:])\n', (298, 309), True, 'import numpy as np\n'), ((532, 586), 'matplotlib.pyplot.plot', 'plt.plot', (['pos[:, 0]', 'pos[:, 1]', '"""wo"""'], {'label': '"""Admitted"""'}), "(pos[:, 0], pos[:, 1], 'wo', label='Admitted')\n", (540, 586), True, 'import matplotlib.pyplot as plt\n'), ((586, 644), 'matplotlib.pyplot.plot', 'plt.plot', (['neg[:, 0]', 'neg[:, 1]', '"""bo"""'], {'label': '"""Not Admitted"""'}), "(neg[:, 0], neg[:, 1], 'bo', label='Not Admitted')\n", (594, 644), True, 'import matplotlib.pyplot as plt\n'), ((644, 670), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exam 1 Score"""'], {}), "('Exam 1 Score')\n", (654, 670), True, 'import matplotlib.pyplot as plt\n'), ((675, 701), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exam 2 Score"""'], {}), "('Exam 2 Score')\n", (685, 701), True, 'import matplotlib.pyplot as plt\n'), ((773, 785), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (783, 785), True, 'import matplotlib.pyplot as plt\n'), ((790, 804), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (798, 804), True, 'import matplotlib.pyplot as plt\n'), ((1123, 1149), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (1132, 1149), True, 'import numpy as np\n'), ((1148, 1174), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (1157, 1174), True, 'import numpy as np\n')] |
from dataclasses import dataclass
import hashlib
import blosc
import numpy as np
def HashedKey(*args, version=None):
""" BOSS Key creation function
Takes a list of different key string elements, joins them with the '&' char,
and prepends the MD5 hash of the key to the key.
Args (Common usage):
parent_iso (None or 'ISO')
collection_id
experiment_id
channel_id
resolution
time_sample
morton (str): Morton ID of cube
Keyword Args:
version : Optional Object version, not part of the hashed value
Example:
use_iso_key (boolean) : If the BOSS keys should include an 'ISO=' flag
iso = 'ISO' if use_iso_key else None
parent_iso = None if args['resolution'] == args['iso_resolution'] else iso
>>> parent_iso = None
>>> col_id=48
>>> exp_id = 168
>>> chan_id = 994
>>> res = 0
>>> t=0
>>> mortonid = 21117301
>>> ver = 0
>>> print(HashedKey(parent_iso, col_id, exp_id, chan_id, res, t, mortonid, version=ver))
00000004f98cd89f2034b4a78b5a4558&48&168&994&0&0&21117301&0
"""
key = "&".join([str(arg) for arg in args if arg is not None])
digest = hashlib.md5(key.encode()).hexdigest()
key = "{}&{}".format(digest, key)
if version is not None:
key = "{}&{}".format(key, version)
return key
@dataclass
class BossKey:
s3key: str
digest: str
parent_iso: int
col_id: int
exp_id: int
chan_id: int
res: int
t: int
mortonid: int
version: int = 0
def ret_boss_key(col_id, exp_id, chan_id, res, t, mortonid, version=0, parent_iso=None):
# helper function to return the s3 key inside BOSS
return HashedKey(
parent_iso, col_id, exp_id, chan_id, res, t, mortonid, version=version
)
def parts_from_bosskey(s3key):
"""Returns: BossKey
Note: parent_iso not returned if not in original s3key
"""
s3keyparts = s3key.split("&")
digest = s3keyparts.pop(0)
s3keyparts = [int(p) for p in s3keyparts]
if len(s3keyparts) < 8:
s3keyparts = [None] + s3keyparts
bosskey = BossKey(s3key, digest, *s3keyparts)
return bosskey
def get_boss_data(s3resource, s3Bucket, s3Key, dtype, cube_size):
"""returns data from boss
>> data = get_boss_data(session.resource("s3"), "cuboids.production.neurodata", "89bb785630a9446b6a564c8779b3678d&51&174&1005&0&0&12282054&0", "uint8", (512, 512, 16) )
>> data.shape
(16, 512, 512)
"""
obj = s3resource.Object(s3Bucket, s3Key)
r = obj.get()
rawdata = blosc.decompress(r["Body"].read())
data = np.frombuffer(rawdata, dtype=dtype)
return data.reshape(cube_size[::-1])
def get_scale(x_voxel_size, y_voxel_size, z_voxel_size, voxel_unit):
"""return scale in (x, y, z) nanometers at base resolution
"""
multiplier = 1
if voxel_unit == "micrometers":
multiplier = 1000
elif voxel_unit == "millimeters":
multiplier = 1000000
elif voxel_unit == "centimeters":
multiplier = 1e7
scale = tuple(
round(v * multiplier, 2) for v in (x_voxel_size, y_voxel_size, z_voxel_size)
)
# data comes from pandas DF where all values start as floats
# convert to int all that you can
scale = [int(s) if s == int(s) else s for s in scale]
return scale
| [
"numpy.frombuffer"
] | [((2670, 2705), 'numpy.frombuffer', 'np.frombuffer', (['rawdata'], {'dtype': 'dtype'}), '(rawdata, dtype=dtype)\n', (2683, 2705), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Tools for loading, shuffling, and batching ANI datasets
The `torchani.data.load(path)` creates an iterable of raw data,
where species are strings, and coordinates are numpy ndarrays.
You can transform these iterable by using transformations.
To do transformation, just do `it.transformation_name()`.
Available transformations are listed below:
- `species_to_indices` accepts two different kinds of arguments. It converts
species from elements (e. g. "H", "C", "Cl", etc) into internal torchani
indices (as returned by :class:`torchani.utils.ChemicalSymbolsToInts` or
the ``species_to_tensor`` method of a :class:`torchani.models.BuiltinModel`
and :class:`torchani.neurochem.Constants`), if its argument is an iterable
of species. By default species_to_indices behaves this way, with an
argument of ``('H', 'C', 'N', 'O', 'F', 'S', 'Cl')`` However, if its
argument is the string "periodic_table", then elements are converted into
atomic numbers ("periodic table indices") instead. This last option is
meant to be used when training networks that already perform a forward pass
of :class:`torchani.nn.SpeciesConverter` on their inputs in order to
convert elements to internal indices, before processing the coordinates.
- `subtract_self_energies` subtracts self energies from all molecules of the
dataset. It accepts two different kinds of arguments: You can pass a dict
of self energies, in which case self energies are directly subtracted
according to the key-value pairs, or a
:class:`torchani.utils.EnergyShifter`, in which case the self energies are
calculated by linear regression and stored inside the class in the order
specified by species_order. By default the function orders by atomic
number if no extra argument is provided, but a specific order may be requested.
- `remove_outliers`
- `shuffle`
- `cache` cache the result of previous transformations.
- `collate` pad the dataset, convert it to tensor, and stack them
together to get a batch. `collate` uses a default padding dictionary
``{'species': -1, 'coordinates': 0.0, 'forces': 0.0, 'energies': 0.0}`` for
padding, but a custom padding dictionary can be passed as an optional
parameter, which overrides this default padding.
- `pin_memory` copy the tensor to pinned memory so that later transfer
to cuda could be faster.
Note that orderings used in :class:`torchani.utils.ChemicalSymbolsToInts` and
:class:`torchani.nn.SpeciesConverter` should be consistent with orderings used
in `species_to_indices` and `subtract_self_energies`. To prevent confusion it
is recommended that arguments to intialize converters and arguments to these
functions all order elements *by their atomic number* (e. g. if you are working
with hydrogen, nitrogen and bromine always use ['H', 'N', 'Br'] and never ['N',
'H', 'Br'] or other variations). It is possible to specify a different custom
ordering, mainly due to backwards compatibility and to fully custom atom types,
but doing so is NOT recommended, since it is very error prone.
you can also use `split` to split the iterable to pieces. use `split` as:
.. code-block:: python
it.split(ratio1, ratio2, None)
where the None in the end indicate that we want to use all of the the rest
Example:
.. code-block:: python
energy_shifter = torchani.utils.EnergyShifter(None)
training, validation = torchani.data.load(dspath).subtract_self_energies(energy_shifter).species_to_indices().shuffle().split(int(0.8 * size), None)
training = training.collate(batch_size).cache()
validation = validation.collate(batch_size).cache()
If the above approach takes too much memory for you, you can then use dataloader
with multiprocessing to achieve comparable performance with less memory usage:
.. code-block:: python
training, validation = torchani.data.load(dspath).subtract_self_energies(energy_shifter).species_to_indices().shuffle().split(0.8, None)
training = torch.utils.data.DataLoader(list(training), batch_size=batch_size, collate_fn=torchani.data.collate_fn, num_workers=64)
validation = torch.utils.data.DataLoader(list(validation), batch_size=batch_size, collate_fn=torchani.data.collate_fn, num_workers=64)
"""
from os.path import join, isfile, isdir
import os
from ._pyanitools import anidataloader
from .. import utils
import importlib
import functools
import math
import random
from collections import Counter
import numpy
import gc
PKBAR_INSTALLED = importlib.util.find_spec('pkbar') is not None # type: ignore
if PKBAR_INSTALLED:
import pkbar
verbose = True
PROPERTIES = ('energies',)
PADDING = {
'species': -1,
'coordinates': 0.0,
'forces': 0.0,
'energies': 0.0
}
def collate_fn(samples, padding=None):
if padding is None:
padding = PADDING
return utils.stack_with_padding(samples, padding)
class IterableAdapter:
"""https://stackoverflow.com/a/39564774"""
def __init__(self, iterable_factory, length=None):
self.iterable_factory = iterable_factory
self.length = length
def __iter__(self):
return iter(self.iterable_factory())
class IterableAdapterWithLength(IterableAdapter):
def __init__(self, iterable_factory, length):
super().__init__(iterable_factory)
self.length = length
def __len__(self):
return self.length
class Transformations:
"""Convert one reenterable iterable to another reenterable iterable"""
@staticmethod
def species_to_indices(reenterable_iterable, species_order=('H', 'C', 'N', 'O', 'F', 'S', 'Cl')):
if species_order == 'periodic_table':
species_order = utils.PERIODIC_TABLE
idx = {k: i for i, k in enumerate(species_order)}
def reenterable_iterable_factory():
for d in reenterable_iterable:
d['species'] = numpy.array([idx[s] for s in d['species']], dtype='i8')
yield d
try:
return IterableAdapterWithLength(reenterable_iterable_factory, len(reenterable_iterable))
except TypeError:
return IterableAdapter(reenterable_iterable_factory)
@staticmethod
def subtract_self_energies(reenterable_iterable, self_energies=None, species_order=None):
intercept = 0.0
shape_inference = False
if isinstance(self_energies, utils.EnergyShifter):
shape_inference = True
shifter = self_energies
self_energies = {}
counts = {}
Y = []
for n, d in enumerate(reenterable_iterable):
species = d['species']
count = Counter()
for s in species:
count[s] += 1
for s, c in count.items():
if s not in counts:
counts[s] = [0] * n
counts[s].append(c)
for s in counts:
if len(counts[s]) != n + 1:
counts[s].append(0)
Y.append(d['energies'])
# sort based on the order in periodic table by default
if species_order is None:
species_order = utils.PERIODIC_TABLE
species = sorted(list(counts.keys()), key=lambda x: species_order.index(x))
X = [counts[s] for s in species]
if shifter.fit_intercept:
X.append([1] * n)
X = numpy.array(X).transpose()
Y = numpy.array(Y)
if Y.shape[0] == 0:
raise RuntimeError("subtract_self_energies could not find any energies in the provided dataset.\n"
"Please make sure the path provided to data.load() points to a dataset has energies and is not empty or corrupted.")
sae, _, _, _ = numpy.linalg.lstsq(X, Y, rcond=None)
sae_ = sae
if shifter.fit_intercept:
intercept = sae[-1]
sae_ = sae[:-1]
for s, e in zip(species, sae_):
self_energies[s] = e
shifter.__init__(sae, shifter.fit_intercept)
gc.collect()
def reenterable_iterable_factory():
for d in reenterable_iterable:
e = intercept
for s in d['species']:
e += self_energies[s]
d['energies'] -= e
yield d
if shape_inference:
return IterableAdapterWithLength(reenterable_iterable_factory, n)
return IterableAdapter(reenterable_iterable_factory)
@staticmethod
def remove_outliers(reenterable_iterable, threshold1=15.0, threshold2=8.0):
assert 'subtract_self_energies', "Transformation remove_outliers can only run after subtract_self_energies"
# pass 1: remove everything that has per-atom energy > threshold1
def scaled_energy(x):
num_atoms = len(x['species'])
return abs(x['energies']) / math.sqrt(num_atoms)
filtered = IterableAdapter(lambda: (x for x in reenterable_iterable if scaled_energy(x) < threshold1))
# pass 2: compute those that are outside the mean by threshold2 * std
n = 0
mean = 0
std = 0
for m in filtered:
n += 1
mean += m['energies']
std += m['energies'] ** 2
mean /= n
std = math.sqrt(std / n - mean ** 2)
return IterableAdapter(lambda: filter(lambda x: abs(x['energies'] - mean) < threshold2 * std, filtered))
@staticmethod
def shuffle(reenterable_iterable):
list_ = list(reenterable_iterable)
del reenterable_iterable
gc.collect()
random.shuffle(list_)
return list_
@staticmethod
def cache(reenterable_iterable):
ret = list(reenterable_iterable)
del reenterable_iterable
gc.collect()
return ret
@staticmethod
def collate(reenterable_iterable, batch_size, padding=None):
def reenterable_iterable_factory(padding=None):
batch = []
i = 0
for d in reenterable_iterable:
batch.append(d)
i += 1
if i == batch_size:
i = 0
yield collate_fn(batch, padding)
batch = []
if len(batch) > 0:
yield collate_fn(batch, padding)
reenterable_iterable_factory = functools.partial(reenterable_iterable_factory,
padding)
try:
length = (len(reenterable_iterable) + batch_size - 1) // batch_size
return IterableAdapterWithLength(reenterable_iterable_factory, length)
except TypeError:
return IterableAdapter(reenterable_iterable_factory)
@staticmethod
def pin_memory(reenterable_iterable):
def reenterable_iterable_factory():
for d in reenterable_iterable:
yield {k: d[k].pin_memory() for k in d}
try:
return IterableAdapterWithLength(reenterable_iterable_factory, len(reenterable_iterable))
except TypeError:
return IterableAdapter(reenterable_iterable_factory)
class TransformableIterable:
def __init__(self, wrapped_iterable, transformations=()):
self.wrapped_iterable = wrapped_iterable
self.transformations = transformations
def __iter__(self):
return iter(self.wrapped_iterable)
def __getattr__(self, name):
transformation = getattr(Transformations, name)
@functools.wraps(transformation)
def f(*args, **kwargs):
return TransformableIterable(
transformation(self.wrapped_iterable, *args, **kwargs),
self.transformations + (name,))
return f
def split(self, *nums):
length = len(self)
iters = []
self_iter = iter(self)
for n in nums:
list_ = []
if n is not None:
for _ in range(int(n * length)):
list_.append(next(self_iter))
else:
for i in self_iter:
list_.append(i)
iters.append(TransformableIterable(list_, self.transformations + ('split',)))
del self_iter
gc.collect()
return iters
def __len__(self):
return len(self.wrapped_iterable)
def load(path, additional_properties=()):
properties = PROPERTIES + additional_properties
def h5_files(path):
"""yield file name of all h5 files in a path"""
if isdir(path):
for f in os.listdir(path):
f = join(path, f)
yield from h5_files(f)
elif isfile(path) and (path.endswith('.h5') or path.endswith('.hdf5')):
yield path
def molecules():
for f in h5_files(path):
anidata = anidataloader(f)
anidata_size = anidata.group_size()
use_pbar = PKBAR_INSTALLED and verbose
if use_pbar:
pbar = pkbar.Pbar('=> loading {}, total molecules: {}'.format(f, anidata_size), anidata_size)
for i, m in enumerate(anidata):
yield m
if use_pbar:
pbar.update(i)
def conformations():
for m in molecules():
species = m['species']
coordinates = m['coordinates']
for i in range(coordinates.shape[0]):
ret = {'species': species, 'coordinates': coordinates[i]}
for k in properties:
if k in m:
ret[k] = m[k][i]
yield ret
return TransformableIterable(IterableAdapter(lambda: conformations()))
__all__ = ['load', 'collate_fn']
| [
"os.listdir",
"random.shuffle",
"importlib.util.find_spec",
"math.sqrt",
"os.path.join",
"functools.wraps",
"collections.Counter",
"numpy.array",
"os.path.isfile",
"os.path.isdir",
"functools.partial",
"gc.collect",
"numpy.linalg.lstsq"
] | [((4524, 4557), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""pkbar"""'], {}), "('pkbar')\n", (4548, 4557), False, 'import importlib\n'), ((8171, 8183), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8181, 8183), False, 'import gc\n'), ((9419, 9449), 'math.sqrt', 'math.sqrt', (['(std / n - mean ** 2)'], {}), '(std / n - mean ** 2)\n', (9428, 9449), False, 'import math\n'), ((9706, 9718), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9716, 9718), False, 'import gc\n'), ((9727, 9748), 'random.shuffle', 'random.shuffle', (['list_'], {}), '(list_)\n', (9741, 9748), False, 'import random\n'), ((9908, 9920), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9918, 9920), False, 'import gc\n'), ((10485, 10541), 'functools.partial', 'functools.partial', (['reenterable_iterable_factory', 'padding'], {}), '(reenterable_iterable_factory, padding)\n', (10502, 10541), False, 'import functools\n'), ((11633, 11664), 'functools.wraps', 'functools.wraps', (['transformation'], {}), '(transformation)\n', (11648, 11664), False, 'import functools\n'), ((12368, 12380), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12378, 12380), False, 'import gc\n'), ((12656, 12667), 'os.path.isdir', 'isdir', (['path'], {}), '(path)\n', (12661, 12667), False, 'from os.path import join, isfile, isdir\n'), ((7518, 7532), 'numpy.array', 'numpy.array', (['Y'], {}), '(Y)\n', (7529, 7532), False, 'import numpy\n'), ((7859, 7895), 'numpy.linalg.lstsq', 'numpy.linalg.lstsq', (['X', 'Y'], {'rcond': 'None'}), '(X, Y, rcond=None)\n', (7877, 7895), False, 'import numpy\n'), ((12690, 12706), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12700, 12706), False, 'import os\n'), ((5904, 5959), 'numpy.array', 'numpy.array', (["[idx[s] for s in d['species']]"], {'dtype': '"""i8"""'}), "([idx[s] for s in d['species']], dtype='i8')\n", (5915, 5959), False, 'import numpy\n'), ((6683, 6692), 'collections.Counter', 'Counter', ([], {}), '()\n', (6690, 6692), False, 'from collections import Counter\n'), ((9011, 9031), 'math.sqrt', 'math.sqrt', (['num_atoms'], {}), '(num_atoms)\n', (9020, 9031), False, 'import math\n'), ((12728, 12741), 'os.path.join', 'join', (['path', 'f'], {}), '(path, f)\n', (12732, 12741), False, 'from os.path import join, isfile, isdir\n'), ((12794, 12806), 'os.path.isfile', 'isfile', (['path'], {}), '(path)\n', (12800, 12806), False, 'from os.path import join, isfile, isdir\n'), ((7475, 7489), 'numpy.array', 'numpy.array', (['X'], {}), '(X)\n', (7486, 7489), False, 'import numpy\n')] |
import numpy as np
from torch.autograd import Variable
import torch as torch
import copy
from torch.autograd.gradcheck import zero_gradients
def deepfool(image, net, num_classes, overshoot, max_iter):
"""
:param image: Image of size HxWx3
:param net: network (input: images, output: values of activation **BEFORE** softmax).
:param num_classes: num_classes (limits the number of classes to test against, by default = 10)
:param overshoot: used as a termination criterion to prevent vanishing updates (default = 0.02).
:param max_iter: maximum number of iterations for deepfool (default = 50)
:return: minimal perturbation that fools the classifier, number of iterations that it required, new estimated_label and perturbed image
"""
is_cuda = torch.cuda.is_available()
if is_cuda:
image = image.cuda()
net = net.cuda()
f_image = net.forward(Variable(image[None, :, :, :], requires_grad=True)).data.cpu().numpy().flatten()
I = f_image.argsort()[::-1]
I = I[0:num_classes]
label = I[0]
input_shape = image.cpu().numpy().shape
pert_image = copy.deepcopy(image)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
loop_i = 0
x = Variable(pert_image[None, :], requires_grad=True)
fs = net.forward(x)
k_i = label
while k_i == label and loop_i < max_iter:
pert = np.inf
fs[0, I[0]].backward(retain_graph=True)
grad_orig = x.grad.data.cpu().numpy().copy()
for k in range(1, num_classes):
zero_gradients(x)
fs[0, I[k]].backward(retain_graph=True)
cur_grad = x.grad.data.cpu().numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, I[k]] - fs[0, I[0]]).data.cpu().numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
if is_cuda:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).cuda()
else:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot)
x = Variable(pert_image, requires_grad=True)
fs = net.forward(x)
k_i = np.argmax(fs.data.cpu().numpy().flatten())
loop_i += 1
return (1+overshoot)*r_tot, loop_i, label, k_i, pert_image
| [
"torch.autograd.gradcheck.zero_gradients",
"numpy.linalg.norm",
"torch.from_numpy",
"numpy.zeros",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.autograd.Variable",
"numpy.float32"
] | [((799, 824), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (822, 824), True, 'import torch as torch\n'), ((1140, 1160), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (1153, 1160), False, 'import copy\n'), ((1169, 1190), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (1177, 1190), True, 'import numpy as np\n'), ((1203, 1224), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (1211, 1224), True, 'import numpy as np\n'), ((1250, 1299), 'torch.autograd.Variable', 'Variable', (['pert_image[None, :]'], {'requires_grad': '(True)'}), '(pert_image[None, :], requires_grad=True)\n', (1258, 1299), False, 'from torch.autograd import Variable\n'), ((2166, 2189), 'numpy.float32', 'np.float32', (['(r_tot + r_i)'], {}), '(r_tot + r_i)\n', (2176, 2189), True, 'import numpy as np\n'), ((2387, 2427), 'torch.autograd.Variable', 'Variable', (['pert_image'], {'requires_grad': '(True)'}), '(pert_image, requires_grad=True)\n', (2395, 2427), False, 'from torch.autograd import Variable\n'), ((1564, 1581), 'torch.autograd.gradcheck.zero_gradients', 'zero_gradients', (['x'], {}), '(x)\n', (1578, 1581), False, 'from torch.autograd.gradcheck import zero_gradients\n'), ((2132, 2149), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (2146, 2149), True, 'import numpy as np\n'), ((2350, 2373), 'torch.from_numpy', 'torch.from_numpy', (['r_tot'], {}), '(r_tot)\n', (2366, 2373), True, 'import torch as torch\n'), ((2258, 2281), 'torch.from_numpy', 'torch.from_numpy', (['r_tot'], {}), '(r_tot)\n', (2274, 2281), True, 'import torch as torch\n'), ((922, 972), 'torch.autograd.Variable', 'Variable', (['image[None, :, :, :]'], {'requires_grad': '(True)'}), '(image[None, :, :, :], requires_grad=True)\n', (930, 972), False, 'from torch.autograd import Variable\n')] |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""Speech activity detection"""
from typing import Optional
from typing import Text
import numpy as np
import torch
import torch.nn as nn
from .base import LabelingTask
from .base import LabelingTaskGenerator
from pyannote.audio.train.task import Task, TaskType, TaskOutput
from ..gradient_reversal import GradientReversal
from pyannote.audio.models.models import RNN
from pyannote.audio.features.wrapper import Wrappable
from pyannote.database import Protocol
from pyannote.database import Subset
from pyannote.audio.train.model import Resolution
from pyannote.audio.train.model import Alignment
class SpeechActivityDetectionGenerator(LabelingTaskGenerator):
"""Batch generator for training speech activity detection
Parameters
----------
task : Task
Task
feature_extraction : Wrappable
Describes how features should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
protocol : Protocol
subset : {'train', 'development', 'test'}, optional
Protocol and subset.
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
Defaults to `feature_extraction.sliding_window`
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models that
include the feature extraction step (e.g. SincNet) and therefore use a
different cropping mode. Defaults to 'center'.
duration : float, optional
Duration of audio chunks. Defaults to 2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Force total audio duration per epoch, in days.
Defaults to total duration of protocol subset.
mask : str, optional
When provided, protocol files are expected to contain a key named after
this `mask` variable and providing a `SlidingWindowFeature` instance.
Generated batches will contain an additional "mask" key (on top of
existing "X" and "y" keys) computed as an excerpt of `current_file[mask]`
time-aligned with "y". Defaults to not add any "mask" key.
"""
def __init__(
self,
task: Task,
feature_extraction: Wrappable,
protocol: Protocol,
subset: Subset = "train",
resolution: Optional[Resolution] = None,
alignment: Optional[Alignment] = None,
duration: float = 2.0,
batch_size: int = 32,
per_epoch: float = None,
mask: Text = None,
):
super().__init__(
task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=duration,
batch_size=batch_size,
per_epoch=per_epoch,
exhaustive=False,
mask=mask,
local_labels=True,
)
def postprocess_y(self, Y: np.ndarray) -> np.ndarray:
"""Generate labels for speech activity detection
Parameters
----------
Y : (n_samples, n_speakers) numpy.ndarray
Discretized annotation returned by
`pyannote.core.utils.numpy.one_hot_encoding`.
Returns
-------
y : (n_samples, 1) numpy.ndarray
See also
--------
`pyannote.core.utils.numpy.one_hot_encoding`
"""
# number of speakers for each frame
speaker_count = np.sum(Y, axis=1, keepdims=True)
# mark speech regions as such
return np.int64(speaker_count > 0)
@property
def specifications(self):
specs = {
"task": self.task,
"X": {"dimension": self.feature_extraction.dimension},
"y": {"classes": ["non_speech", "speech"]},
}
for key, classes in self.file_labels_.items():
# TODO. add an option to handle this list
# TODO. especially useful for domain-adversarial stuff
if key in ["duration", "audio", "uri"]:
continue
specs[key] = {"classes": classes}
return specs
class SpeechActivityDetection(LabelingTask):
"""Train speech activity (and overlap) detection
Parameters
----------
duration : float, optional
Duration of sub-sequences. Defaults to 3.2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Total audio duration per epoch, in days.
Defaults to one day (1).
"""
def get_batch_generator(
self,
feature_extraction,
protocol,
subset: Subset = "train",
resolution=None,
alignment=None,
):
"""
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models
that include the feature extraction step (e.g. SincNet) and
therefore use a different cropping mode. Defaults to 'center'.
"""
return SpeechActivityDetectionGenerator(
self.task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=self.duration,
per_epoch=self.per_epoch,
batch_size=self.batch_size,
)
class DomainAwareSpeechActivityDetection(SpeechActivityDetection):
"""Domain-aware speech activity detection
Trains speech activity detection and domain classification jointly.
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
rnn: `dict`, optional
Parameters of the RNN used in the domain classifier.
See `pyannote.audio.models.models.RNN` for details.
domain_loss : `str`, optional
Loss function to use. Defaults to 'NLLLoss'.
"""
DOMAIN_PT = "{train_dir}/weights/{epoch:04d}.domain.pt"
def __init__(
self, domain="domain", attachment=-1, rnn=None, domain_loss="NLLLoss", **kwargs
):
super().__init__(**kwargs)
self.domain = domain
self.attachment = attachment
if rnn is None:
rnn = dict()
self.rnn = rnn
self.domain_loss = domain_loss
if self.domain_loss == "NLLLoss":
# Default value
self.domain_loss_ = nn.NLLLoss()
self.activation_ = nn.LogSoftmax(dim=1)
elif self.domain_loss == "MSELoss":
self.domain_loss_ = nn.MSELoss()
self.activation_ = nn.Sigmoid()
else:
msg = f"{domain_loss} has not been implemented yet."
raise NotImplementedError(msg)
def more_parameters(self):
"""Initialize trainable trainer parameters
Yields
------
parameter : nn.Parameter
Trainable trainer parameters
"""
domain_classifier_rnn = RNN(
n_features=self.model.intermediate_dimension(self.attachment), **self.rnn
)
n_classes = len(self.specifications[self.domain]["classes"])
domain_classifier_linear = nn.Linear(
domain_classifier_rnn.dimension, n_classes, bias=True
).to(self.device)
self.domain_classifier_ = nn.Sequential(
domain_classifier_rnn, domain_classifier_linear
).to(self.device)
# TODO: check if we really need to do this .to(self.device) twice
return self.domain_classifier_.parameters()
def load_more(self, model_pt=None) -> bool:
"""Load classifier from disk"""
if model_pt is None:
domain_pt = self.DOMAIN_PT.format(
train_dir=self.train_dir_, epoch=self.epoch_
)
else:
domain_pt = model_pt.with_suffix(".domain.pt")
domain_classifier_state = torch.load(
domain_pt, map_location=lambda storage, loc: storage
)
self.domain_classifier_.load_state_dict(domain_classifier_state)
# FIXME add support for different domains
return True
def save_more(self):
"""Save domain classifier to disk"""
domain_pt = self.DOMAIN_PT.format(train_dir=self.train_dir_, epoch=self.epoch_)
torch.save(self.domain_classifier_.state_dict(), domain_pt)
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(self.domain_classifier_(intermediate))
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
class DomainAdversarialSpeechActivityDetection(DomainAwareSpeechActivityDetection):
"""Domain Adversarial speech activity detection
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
alpha : `float`, optional
Coefficient multiplied with the domain loss
"""
def __init__(self, domain="domain", attachment=-1, alpha=1.0, **kwargs):
super().__init__(domain=domain, attachment=attachment, **kwargs)
self.alpha = alpha
self.gradient_reversal_ = GradientReversal()
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(
self.domain_classifier_(self.gradient_reversal_(intermediate))
)
if self.domain_loss == "MSELoss":
# One hot encode domain_target for Mean Squared Error Loss
nb_domains = domain_scores.shape[1]
identity_mat = torch.sparse.torch.eye(nb_domains, device=self.device_)
domain_target = identity_mat.index_select(dim=0, index=domain_target)
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + self.alpha * domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
| [
"torch.nn.Sigmoid",
"numpy.int64",
"torch.nn.Sequential",
"torch.load",
"numpy.sum",
"torch.tensor",
"torch.nn.MSELoss",
"torch.nn.NLLLoss",
"torch.sparse.torch.eye",
"torch.nn.LogSoftmax",
"torch.nn.Linear"
] | [((4880, 4912), 'numpy.sum', 'np.sum', (['Y'], {'axis': '(1)', 'keepdims': '(True)'}), '(Y, axis=1, keepdims=True)\n', (4886, 4912), True, 'import numpy as np\n'), ((4967, 4994), 'numpy.int64', 'np.int64', (['(speaker_count > 0)'], {}), '(speaker_count > 0)\n', (4975, 4994), True, 'import numpy as np\n'), ((9820, 9884), 'torch.load', 'torch.load', (['domain_pt'], {'map_location': '(lambda storage, loc: storage)'}), '(domain_pt, map_location=lambda storage, loc: storage)\n', (9830, 9884), False, 'import torch\n'), ((10645, 10711), 'torch.tensor', 'torch.tensor', (["batch['X']"], {'dtype': 'torch.float32', 'device': 'self.device_'}), "(batch['X'], dtype=torch.float32, device=self.device_)\n", (10657, 10711), False, 'import torch\n'), ((11257, 11329), 'torch.tensor', 'torch.tensor', (['batch[self.domain]'], {'dtype': 'torch.int64', 'device': 'self.device_'}), '(batch[self.domain], dtype=torch.int64, device=self.device_)\n', (11269, 11329), False, 'import torch\n'), ((12816, 12882), 'torch.tensor', 'torch.tensor', (["batch['X']"], {'dtype': 'torch.float32', 'device': 'self.device_'}), "(batch['X'], dtype=torch.float32, device=self.device_)\n", (12828, 12882), False, 'import torch\n'), ((13431, 13503), 'torch.tensor', 'torch.tensor', (['batch[self.domain]'], {'dtype': 'torch.int64', 'device': 'self.device_'}), '(batch[self.domain], dtype=torch.int64, device=self.device_)\n', (13443, 13503), False, 'import torch\n'), ((8343, 8355), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (8353, 8355), True, 'import torch.nn as nn\n'), ((8387, 8407), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (8400, 8407), True, 'import torch.nn as nn\n'), ((13843, 13898), 'torch.sparse.torch.eye', 'torch.sparse.torch.eye', (['nb_domains'], {'device': 'self.device_'}), '(nb_domains, device=self.device_)\n', (13865, 13898), False, 'import torch\n'), ((8485, 8497), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (8495, 8497), True, 'import torch.nn as nn\n'), ((8529, 8541), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8539, 8541), True, 'import torch.nn as nn\n'), ((9104, 9168), 'torch.nn.Linear', 'nn.Linear', (['domain_classifier_rnn.dimension', 'n_classes'], {'bias': '(True)'}), '(domain_classifier_rnn.dimension, n_classes, bias=True)\n', (9113, 9168), True, 'import torch.nn as nn\n'), ((9242, 9304), 'torch.nn.Sequential', 'nn.Sequential', (['domain_classifier_rnn', 'domain_classifier_linear'], {}), '(domain_classifier_rnn, domain_classifier_linear)\n', (9255, 9304), True, 'import torch.nn as nn\n'), ((10903, 10967), 'torch.tensor', 'torch.tensor', (["batch['y']"], {'dtype': 'torch.int64', 'device': 'self.device_'}), "(batch['y'], dtype=torch.int64, device=self.device_)\n", (10915, 10967), False, 'import torch\n'), ((13076, 13140), 'torch.tensor', 'torch.tensor', (["batch['y']"], {'dtype': 'torch.int64', 'device': 'self.device_'}), "(batch['y'], dtype=torch.int64, device=self.device_)\n", (13088, 13140), False, 'import torch\n')] |
import os
import re
import dgl
import numpy as np
from data import *
def get_edgelists(edgelist_expression, directory):
if "," in edgelist_expression:
return edgelist_expression.split(",")
files = os.listdir(directory)
compiled_expression = re.compile(edgelist_expression)
return [filename for filename in files if compiled_expression.match(filename)]
def construct_graph(training_dir, edges, nodes, target_node_type, heterogeneous=True):
if heterogeneous:
print("Getting relation graphs from the following edge lists : {} ".format(edges))
edgelists, id_to_node = {}, {}
for i, edge in enumerate(edges):
edgelist, id_to_node, src, dst = parse_edgelist(os.path.join(training_dir, edge), id_to_node, header=True)
if src == target_node_type:
src = 'target'
if dst == target_node_type:
dst = 'target'
edgelists[(src, 'relation{}'.format(i), dst)] = edgelist
print("Read edges for relation{} from edgelist: {}".format(i, os.path.join(training_dir, edge)))
# reverse edge list so that relation is undirected
edgelists[(dst, 'reverse_relation{}'.format(i), src)] = [(b, a) for a, b in edgelist]
# get features for target nodes
features, new_nodes = get_features(id_to_node[target_node_type], os.path.join(training_dir, nodes))
print("Read in features for target nodes")
# handle target nodes that have features but don't have any connections
# if new_nodes:
# edgelists[('target', 'relation'.format(i+1), 'none')] = [(node, 0) for node in new_nodes]
# edgelists[('none', 'reverse_relation{}'.format(i + 1), 'target')] = [(0, node) for node in new_nodes]
# add self relation
edgelists[('target', 'self_relation', 'target')] = [(t, t) for t in id_to_node[target_node_type].values()]
g = dgl.heterograph(edgelists)
print(
"Constructed heterograph with the following metagraph structure: Node types {}, Edge types{}".format(
g.ntypes, g.canonical_etypes))
print("Number of nodes of type target : {}".format(g.number_of_nodes('target')))
g.nodes['target'].data['features'] = features
id_to_node = id_to_node[target_node_type]
else:
sources, sinks, features, id_to_node = read_edges(os.path.join(training_dir, edges[0]),
os.path.join(training_dir, nodes))
# add self relation
all_nodes = sorted(id_to_node.values())
sources.extend(all_nodes)
sinks.extend(all_nodes)
g = dgl.graph((sources, sinks))
if features:
g.ndata['features'] = np.array(features).astype('float32')
print('read graph from node list and edge list')
features = g.ndata['features']
return g, features, id_to_node
| [
"os.listdir",
"dgl.heterograph",
"dgl.graph",
"re.compile",
"os.path.join",
"numpy.array"
] | [((216, 237), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (226, 237), False, 'import os\n'), ((264, 295), 're.compile', 're.compile', (['edgelist_expression'], {}), '(edgelist_expression)\n', (274, 295), False, 'import re\n'), ((1942, 1968), 'dgl.heterograph', 'dgl.heterograph', (['edgelists'], {}), '(edgelists)\n', (1957, 1968), False, 'import dgl\n'), ((2696, 2723), 'dgl.graph', 'dgl.graph', (['(sources, sinks)'], {}), '((sources, sinks))\n', (2705, 2723), False, 'import dgl\n'), ((1375, 1408), 'os.path.join', 'os.path.join', (['training_dir', 'nodes'], {}), '(training_dir, nodes)\n', (1387, 1408), False, 'import os\n'), ((2409, 2445), 'os.path.join', 'os.path.join', (['training_dir', 'edges[0]'], {}), '(training_dir, edges[0])\n', (2421, 2445), False, 'import os\n'), ((2505, 2538), 'os.path.join', 'os.path.join', (['training_dir', 'nodes'], {}), '(training_dir, nodes)\n', (2517, 2538), False, 'import os\n'), ((720, 752), 'os.path.join', 'os.path.join', (['training_dir', 'edge'], {}), '(training_dir, edge)\n', (732, 752), False, 'import os\n'), ((1064, 1096), 'os.path.join', 'os.path.join', (['training_dir', 'edge'], {}), '(training_dir, edge)\n', (1076, 1096), False, 'import os\n'), ((2780, 2798), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2788, 2798), True, 'import numpy as np\n')] |
'''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import os
os.environ['ETS_TOOLKIT'] = 'qt5'
from PySide2.QtWidgets import QDialog, QAbstractItemView, QTableWidgetItem
from PySide2.QtGui import QIntValidator
from PySide2.QtCore import Qt
from mapclientplugins.pelvislandmarkshjcpredictionstep.ui_hjcpredictionviewerwidget import Ui_Dialog
from traits.api import HasTraits, Instance, on_trait_change, \
Int, Dict
from gias2.mappluginutils.mayaviviewer import MayaviViewerObjectsContainer, MayaviViewerLandmark, colours
import numpy as np
class MayaviHJCPredictionViewerWidget(QDialog):
'''
Configure dialog to present the user with the options to configure this step.
'''
defaultColor = colours['bone']
objectTableHeaderColumns = {'landmarks': 0}
backgroundColour = (0.0, 0.0, 0.0)
_landmarkRenderArgs = {'mode': 'sphere', 'scale_factor': 5.0, 'color': (0, 1, 0)}
_hjcRenderArgs = {'mode': 'sphere', 'scale_factor': 10.0, 'color': (1, 0, 0)}
def __init__(self, landmarks, config, predictFunc, predMethods, popClasses, parent=None):
'''
Constructor
'''
QDialog.__init__(self, parent)
self._ui = Ui_Dialog()
self._ui.setupUi(self)
self._scene = self._ui.MayaviScene.visualisation.scene
self._scene.background = self.backgroundColour
self.selectedObjectName = None
self._landmarks = landmarks
self._landmarkNames = sorted(self._landmarks.keys())
self._predictFunc = predictFunc
self._predMethods = predMethods
self._popClasses = popClasses
self._config = config
# print 'init...', self._config
### FIX FROM HERE ###
# create self._objects
self._initViewerObjects()
self._setupGui()
self._makeConnections()
self._initialiseObjectTable()
self._initialiseSettings()
self._refresh()
# self.testPlot()
# self.drawObjects()
print('finished init...', self._config)
def _initViewerObjects(self):
self._objects = MayaviViewerObjectsContainer()
for ln in self._landmarkNames:
self._objects.addObject(ln, MayaviViewerLandmark(ln, self._landmarks[ln],
renderArgs=self._landmarkRenderArgs)
)
hjcl = self._objects.getObject('HJC_left')
hjcl.setRenderArgs(self._hjcRenderArgs)
hjcr = self._objects.getObject('HJC_right')
hjcr.setRenderArgs(self._hjcRenderArgs)
# self._objects.addObject('HJC_left', MayaviViewerLandmark('HJC_left', [0,0,0], renderArgs=self._hjcRenderArgs))
# self._objects.addObject('HJC_right', MayaviViewerLandmark('HJC_right', [0,0,0], renderArgs=self._hjcRenderArgs))
def _setupGui(self):
self._ui.screenshotPixelXLineEdit.setValidator(QIntValidator())
self._ui.screenshotPixelYLineEdit.setValidator(QIntValidator())
for l in self._landmarkNames:
self._ui.comboBoxLASIS.addItem(l)
self._ui.comboBoxRASIS.addItem(l)
self._ui.comboBoxLPSIS.addItem(l)
self._ui.comboBoxRPSIS.addItem(l)
self._ui.comboBoxPS.addItem(l)
for m in self._predMethods:
self._ui.comboBoxPredMethod.addItem(m)
for p in self._popClasses:
self._ui.comboBoxPopClass.addItem(p)
def _makeConnections(self):
self._ui.tableWidget.itemClicked.connect(self._tableItemClicked)
self._ui.tableWidget.itemChanged.connect(self._visibleBoxChanged)
self._ui.screenshotSaveButton.clicked.connect(self._saveScreenShot)
self._ui.predictButton.clicked.connect(self._predict)
self._ui.resetButton.clicked.connect(self._reset)
self._ui.abortButton.clicked.connect(self._abort)
self._ui.acceptButton.clicked.connect(self._accept)
self._ui.comboBoxPredMethod.activated.connect(self._updateConfigPredMethod)
self._ui.comboBoxPopClass.activated.connect(self._updateConfigPopClass)
self._ui.comboBoxLASIS.activated.connect(self._updateConfigLASIS)
self._ui.comboBoxRASIS.activated.connect(self._updateConfigRASIS)
self._ui.comboBoxLPSIS.activated.connect(self._updateConfigLPSIS)
self._ui.comboBoxRPSIS.activated.connect(self._updateConfigRPSIS)
self._ui.comboBoxPS.activated.connect(self._updateConfigPS)
def _initialiseSettings(self):
self._ui.comboBoxPredMethod.setCurrentIndex(self._predMethods.index(self._config['Prediction Method']))
self._ui.comboBoxPopClass.setCurrentIndex(self._popClasses.index(self._config['Population Class']))
if self._config['LASIS'] in self._landmarkNames:
self._ui.comboBoxLASIS.setCurrentIndex(self._landmarkNames.index(self._config['LASIS']))
else:
self._ui.comboBoxLASIS.setCurrentIndex(0)
if self._config['RASIS'] in self._landmarkNames:
self._ui.comboBoxRASIS.setCurrentIndex(self._landmarkNames.index(self._config['RASIS']))
else:
self._ui.comboBoxRASIS.setCurrentIndex(0)
if self._config['LPSIS'] in self._landmarkNames:
self._ui.comboBoxLPSIS.setCurrentIndex(self._landmarkNames.index(self._config['LPSIS']))
else:
self._ui.comboBoxLPSIS.setCurrentIndex(0)
if self._config['RPSIS'] in self._landmarkNames:
self._ui.comboBoxRPSIS.setCurrentIndex(self._landmarkNames.index(self._config['RPSIS']))
else:
self._ui.comboBoxRPSIS.setCurrentIndex(0)
if self._config['PS'] in self._landmarkNames:
self._ui.comboBoxPS.setCurrentIndex(self._landmarkNames.index(self._config['PS']))
else:
self._ui.comboBoxPS.setCurrentIndex(0)
def _initialiseObjectTable(self):
self._ui.tableWidget.setRowCount(self._objects.getNumberOfObjects())
self._ui.tableWidget.verticalHeader().setVisible(False)
self._ui.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self._ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self._ui.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
r = 0
for ln in self._landmarkNames:
self._addObjectToTable(r, ln, self._objects.getObject(ln))
r += 1
hjclTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_left'),
self.objectTableHeaderColumns['landmarks'])
hjclTableItem.setCheckState(Qt.Unchecked)
hjcrTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_right'),
self.objectTableHeaderColumns['landmarks'])
hjcrTableItem.setCheckState(Qt.Unchecked)
self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['landmarks'])
def _addObjectToTable(self, row, name, obj, checked=True):
typeName = obj.typeName
print('adding to table: %s (%s)' % (name, typeName))
tableItem = QTableWidgetItem(name)
if checked:
tableItem.setCheckState(Qt.Checked)
else:
tableItem.setCheckState(Qt.Unchecked)
self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['landmarks'], tableItem)
# self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['type'], QTableWidgetItem(typeName))
def _tableItemClicked(self):
selectedRow = self._ui.tableWidget.currentRow()
self.selectedObjectName = self._ui.tableWidget.item(
selectedRow,
self.objectTableHeaderColumns['landmarks']
).text()
print(selectedRow)
print(self.selectedObjectName)
def _visibleBoxChanged(self, tableItem):
# get name of object selected
# name = self._getSelectedObjectName()
# checked changed item is actually the checkbox
if tableItem.column() == self.objectTableHeaderColumns['landmarks']:
# get visible status
name = tableItem.text()
visible = tableItem.checkState().name == 'Checked'
print('visibleboxchanged name', name)
print('visibleboxchanged visible', visible)
# toggle visibility
obj = self._objects.getObject(name)
print(obj.name)
if obj.sceneObject:
print('changing existing visibility')
obj.setVisibility(visible)
else:
print('drawing new')
obj.draw(self._scene)
def _getSelectedObjectName(self):
return self.selectedObjectName
def _getSelectedScalarName(self):
return 'none'
def drawObjects(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).draw(self._scene)
def _updateConfigPredMethod(self):
self._config['Prediction Method'] = self._ui.comboBoxPredMethod.currentText()
def _updateConfigPopClass(self):
self._config['Population Class'] = self._ui.comboBoxPopClass.currentText()
def _updateConfigLASIS(self):
self._config['LASIS'] = self._ui.comboBoxLASIS.currentText()
def _updateConfigRASIS(self):
self._config['RASIS'] = self._ui.comboBoxRASIS.currentText()
def _updateConfigLPSIS(self):
self._config['LPSIS'] = self._ui.comboBoxLPSIS.currentText()
def _updateConfigRPSIS(self):
self._config['RPSIS'] = self._ui.comboBoxRPSIS.currentText()
def _updateConfigPS(self):
self._config['PS'] = self._ui.comboBoxPS.currentText()
def _predict(self):
self._predictFunc()
# update predicted HJCs
hjclObj = self._objects.getObject('HJC_left')
hjclObj.updateGeometry(self._landmarks['HJC_left'], self._scene)
hjclTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_left'),
self.objectTableHeaderColumns['landmarks'])
hjclTableItem.setCheckState(Qt.Checked)
hjcrObj = self._objects.getObject('HJC_right')
hjcrObj.updateGeometry(self._landmarks['HJC_right'], self._scene)
hjcrTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_right'),
self.objectTableHeaderColumns['landmarks'])
hjcrTableItem.setCheckState(Qt.Checked)
def _reset(self):
# delete viewer table row
# self._ui.tableWidget.removeRow(2)
# reset registered datacloud
hjclObj = self._objects.getObject('HJC_left')
hjclObj.updateGeometry(np.array([0, 0, 0]), self._scene)
hjclTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_left'),
self.objectTableHeaderColumns['landmarks'])
hjclTableItem.setCheckState(Qt.Unchecked)
hjcrObj = self._objects.getObject('HJC_right')
hjcrObj.updateGeometry(np.array([0, 0, 0]), self._scene)
hjcrTableItem = self._ui.tableWidget.item(self._landmarkNames.index('HJC_right'),
self.objectTableHeaderColumns['landmarks'])
hjcrTableItem.setCheckState(Qt.Unchecked)
def _accept(self):
self._close()
def _abort(self):
self._reset()
del self._landmarks['HJC_left']
del self._landmarks['HJC_right']
self._close()
def _close(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).remove()
self._objects._objects = {}
self._objects == None
# for r in xrange(self._ui.tableWidget.rowCount()):
# self._ui.tableWidget.removeRow(r)
def _refresh(self):
for r in range(self._ui.tableWidget.rowCount()):
tableItem = self._ui.tableWidget.item(r, self.objectTableHeaderColumns['landmarks'])
name = tableItem.text()
visible = tableItem.checkState().name == 'Checked'
obj = self._objects.getObject(name)
print(obj.name)
if obj.sceneObject:
print('changing existing visibility')
obj.setVisibility(visible)
else:
print('drawing new')
obj.draw(self._scene)
def _saveScreenShot(self):
filename = self._ui.screenshotFilenameLineEdit.text()
width = int(self._ui.screenshotPixelXLineEdit.text())
height = int(self._ui.screenshotPixelYLineEdit.text())
self._scene.mlab.savefig(filename, size=(width, height))
# ================================================================#
@on_trait_change('scene.activated')
def testPlot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
print('trait_changed')
# We can do normal mlab calls on the embedded scene.
self._scene.mlab.test_points3d()
# def _saveImage_fired( self ):
# self.scene.mlab.savefig( str(self.saveImageFilename), size=( int(self.saveImageWidth), int(self.saveImageLength) ) )
| [
"gias2.mappluginutils.mayaviviewer.MayaviViewerObjectsContainer",
"traits.api.on_trait_change",
"PySide2.QtGui.QIntValidator",
"PySide2.QtWidgets.QTableWidgetItem",
"numpy.array",
"PySide2.QtWidgets.QDialog.__init__",
"mapclientplugins.pelvislandmarkshjcpredictionstep.ui_hjcpredictionviewerwidget.Ui_Dia... | [((13893, 13927), 'traits.api.on_trait_change', 'on_trait_change', (['"""scene.activated"""'], {}), "('scene.activated')\n", (13908, 13927), False, 'from traits.api import HasTraits, Instance, on_trait_change, Int, Dict\n'), ((1983, 2013), 'PySide2.QtWidgets.QDialog.__init__', 'QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1999, 2013), False, 'from PySide2.QtWidgets import QDialog, QAbstractItemView, QTableWidgetItem\n'), ((2034, 2045), 'mapclientplugins.pelvislandmarkshjcpredictionstep.ui_hjcpredictionviewerwidget.Ui_Dialog', 'Ui_Dialog', ([], {}), '()\n', (2043, 2045), False, 'from mapclientplugins.pelvislandmarkshjcpredictionstep.ui_hjcpredictionviewerwidget import Ui_Dialog\n'), ((2965, 2995), 'gias2.mappluginutils.mayaviviewer.MayaviViewerObjectsContainer', 'MayaviViewerObjectsContainer', ([], {}), '()\n', (2993, 2995), False, 'from gias2.mappluginutils.mayaviviewer import MayaviViewerObjectsContainer, MayaviViewerLandmark, colours\n'), ((8114, 8136), 'PySide2.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', (['name'], {}), '(name)\n', (8130, 8136), False, 'from PySide2.QtWidgets import QDialog, QAbstractItemView, QTableWidgetItem\n'), ((3796, 3811), 'PySide2.QtGui.QIntValidator', 'QIntValidator', ([], {}), '()\n', (3809, 3811), False, 'from PySide2.QtGui import QIntValidator\n'), ((3869, 3884), 'PySide2.QtGui.QIntValidator', 'QIntValidator', ([], {}), '()\n', (3882, 3884), False, 'from PySide2.QtGui import QIntValidator\n'), ((11786, 11805), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (11794, 11805), True, 'import numpy as np\n'), ((12146, 12165), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (12154, 12165), True, 'import numpy as np\n'), ((3077, 3164), 'gias2.mappluginutils.mayaviviewer.MayaviViewerLandmark', 'MayaviViewerLandmark', (['ln', 'self._landmarks[ln]'], {'renderArgs': 'self._landmarkRenderArgs'}), '(ln, self._landmarks[ln], renderArgs=self.\n _landmarkRenderArgs)\n', (3097, 3164), False, 'from gias2.mappluginutils.mayaviviewer import MayaviViewerObjectsContainer, MayaviViewerLandmark, colours\n')] |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# # 3.3 线性回归的简洁实现
import torch
from torch import nn
import numpy as np
torch.manual_seed(1)
print(torch.__version__)
torch.set_default_tensor_type('torch.FloatTensor')
# ## 3.3.1 生成数据集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
# ## 3.3.2 读取数据
import torch.utils.data as Data
batch_size = 10
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 把 dataset 放入 DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=2, # 多线程来读数据
)
for X, y in data_iter:
print(X, '\n', y)
break
# ## 3.3.3 定义模型
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net) # 使用print可以打印出网络的结构
# 写法一
net = nn.Sequential(
nn.Linear(num_inputs, 1)
# 此处还可以传入其他层
)
# 写法二
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))
# net.add_module ......
# 写法三
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
# ......
]))
print(net)
print(net[0])
for param in net.parameters():
print(param)
# ## 3.3.4 初始化模型参数
from torch.nn import init
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0) # 也可以直接修改bias的data: net[0].bias.data.fill_(0)
for param in net.parameters():
print(param)
# ## 3.3.5 定义损失函数
loss = nn.MSELoss()
# ## 3.3.6 定义优化算法
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03)
print(optimizer)
# 为不同子网络设置不同的学习率
# optimizer =optim.SGD([
# # 如果对某个参数不指定学习率,就使用最外层的默认学习率
# {'params': net.subnet1.parameters()}, # lr=0.03
# {'params': net.subnet2.parameters(), 'lr': 0.01}
# ], lr=0.03)
# # 调整学习率
# for param_group in optimizer.param_groups:
# param_group['lr'] *= 0.1 # 学习率为之前的0.1倍
# ## 3.3.7 训练模型
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
dense = net[0]
print(true_w, dense.weight.data)
print(true_b, dense.bias.data)
| [
"numpy.random.normal",
"torch.manual_seed",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.utils.data.TensorDataset",
"torch.set_default_tensor_type",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.nn.init.normal_"
] | [((158, 178), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (175, 178), False, 'import torch\n'), ((205, 255), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (234, 255), False, 'import torch\n'), ((695, 731), 'torch.utils.data.TensorDataset', 'Data.TensorDataset', (['features', 'labels'], {}), '(features, labels)\n', (713, 731), True, 'import torch.utils.data as Data\n'), ((771, 859), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(dataset=dataset, batch_size=batch_size, shuffle=True,\n num_workers=2)\n', (786, 859), True, 'import torch.utils.data as Data\n'), ((1442, 1457), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1455, 1457), False, 'from torch import nn\n'), ((1812, 1859), 'torch.nn.init.normal_', 'init.normal_', (['net[0].weight'], {'mean': '(0.0)', 'std': '(0.01)'}), '(net[0].weight, mean=0.0, std=0.01)\n', (1824, 1859), False, 'from torch.nn import init\n'), ((1860, 1896), 'torch.nn.init.constant_', 'init.constant_', (['net[0].bias'], {'val': '(0.0)'}), '(net[0].bias, val=0.0)\n', (1874, 1896), False, 'from torch.nn import init\n'), ((2021, 2033), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2031, 2033), False, 'from torch import nn\n'), ((367, 417), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(num_examples, num_inputs)'], {}), '(0, 1, (num_examples, num_inputs))\n', (383, 417), True, 'import numpy as np\n'), ((1381, 1405), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (1390, 1405), False, 'from torch import nn\n'), ((1483, 1507), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (1492, 1507), False, 'from torch import nn\n'), ((1189, 1212), 'torch.nn.Linear', 'nn.Linear', (['n_feature', '(1)'], {}), '(n_feature, 1)\n', (1198, 1212), False, 'from torch import nn\n'), ((1631, 1655), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (1640, 1655), False, 'from torch import nn\n')] |
from matplotlib.pyplot import figure
import xarray
import numpy as np
__all__ = ["precip", "ver"]
def density(iono: xarray.Dataset):
fig = figure()
axs = fig.subplots(1, 2, sharey=True)
fig.suptitle("Number Density")
ax = axs[0]
for v in ("O", "N2", "O2", "NO"):
ax.plot(iono[v], iono[v].alt_km, label=v)
ax.set_xscale("log")
ax.set_ylabel("altitude [km]")
ax.set_xlabel("Density [cm$^{-3}$]")
ax.set_title("Neutrals")
ax.grid(True)
ax.set_xlim(1, None)
ax.legend(loc="best")
ax = axs[1]
for v in ("O+", "O2+", "NO+", "N2D"):
ax.plot(iono[v], iono[v].alt_km, label=v)
ax.set_xscale("log")
ax.set_title("Ions")
ax.grid(True)
ax.set_xlim(1, None)
ax.legend(loc="best")
def precip(precip: xarray.DataArray):
ax = figure().gca()
ax.plot(precip["energy"] / 1e3, precip)
ax.set_xlabel("Energy bin centers [keV]")
ax.set_ylabel("hemispherical flux [cm$^{-2}$ s$^{-1}$ eV$^{-1}$]")
ax.set_title("precipitation: differential number flux")
ax.grid(True)
def temperature(iono: xarray.Dataset):
time = iono.time
location = iono.glatlon
tail = f"\n{time} {location}"
ax = figure().gca()
ax.plot(iono["Ti"], iono["Ti"].alt_km, label="$T_i$")
ax.plot(iono["Te"], iono["Te"].alt_km, label="$T_e$")
ax.plot(iono["Tn"], iono["Tn"].alt_km, label="$T_n$")
ax.set_xlabel("Temperature [K]")
ax.set_ylabel("altitude [km]")
ax.set_title("Ion, Electron, Neutral temperatures" + tail)
ax.grid(True)
ax.legend()
def altitude(iono: xarray.Dataset):
ax = figure().gca()
ax.plot(iono.alt_km)
ax.set_xlabel("altitude grid index #")
ax.set_ylabel("altitude [km]")
ax.set_title("altitude grid cells")
ax.grid(True)
def ver(iono: xarray.Dataset):
time = iono.time
location = iono.glatlon
tail = f"\n{time} {location}"
fig = figure(constrained_layout=True)
axs = fig.subplots(1, 3, sharey=True)
fig.suptitle(tail)
ver_group(iono["ver"].loc[:, ["4278", "5577", "6300", "5200"]], "Visible", axs[0])
ver_group(iono["ver"].loc[:, ["7320", "7774", "8446", "10400"]], "Infrared", axs[1])
ver_group(
iono["ver"].loc[:, ["3371", "3644", "3726", "1356", "1493", "1304", "LBH"]],
"Ultraviolet",
axs[2],
)
axs[0].set_ylabel("altitude [km]")
axs[0].set_xlabel("Volume Emission Rate [Rayleigh]")
def ver_group(iono: xarray.DataArray, ttxt: str, ax):
nm = np.nanmax(iono)
if nm == 0 or np.isnan(nm):
return
colors = {
"4278": "blue",
"5577": "xkcd:dark lime green",
"5200": "xkcd:golden yellow",
"6300": "red",
}
for w in iono.wavelength:
ax.plot(iono.loc[:, w], iono.alt_km, label=w.item(), color=colors.get(w.item()))
ax.set_xscale("log")
ax.set_ylim(90, 500)
ax.set_title(ttxt)
ax.grid(True)
ax.legend()
| [
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.nanmax"
] | [((146, 154), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (152, 154), False, 'from matplotlib.pyplot import figure\n'), ((1909, 1940), 'matplotlib.pyplot.figure', 'figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (1915, 1940), False, 'from matplotlib.pyplot import figure\n'), ((2489, 2504), 'numpy.nanmax', 'np.nanmax', (['iono'], {}), '(iono)\n', (2498, 2504), True, 'import numpy as np\n'), ((2523, 2535), 'numpy.isnan', 'np.isnan', (['nm'], {}), '(nm)\n', (2531, 2535), True, 'import numpy as np\n'), ((814, 822), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (820, 822), False, 'from matplotlib.pyplot import figure\n'), ((1201, 1209), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (1207, 1209), False, 'from matplotlib.pyplot import figure\n'), ((1606, 1614), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (1612, 1614), False, 'from matplotlib.pyplot import figure\n')] |
import os
import numpy as np
def getParamsFromInfo(folder):
infoFiles = ["criterion", "test_criterion", "test_loader", "train_loader", "opimizer", "test_data_set", "train_data_set", "weight"]
for idx, f in enumerate(infoFiles):
infoFiles[i] = os.path.join(folder, f+"_info.txt")
criterion = getCriterionFromInfo(infoFiles[0])
test_criterion = getCriterionFromInfo(infoFiles[1])
test_loader = getLoaderFromInfo(infoFiles[2])
train_loader = getLoaderFromInfo(infoFiles[3])
optimizer = getOptimizerFromInfo(infoFiles[4])
test_data_set = getDataSetFromInfo(infoFiles[5])
train_data_set = getDataSetFromInfo(infoFiles[6])
weight = getWeightFromInfo(infoFiles[7])
def getVariablesFromDataSetInfo(filename):
variables = []
with open(filename, "r") as f:
for line in f:
if("variables" in line):
parts = line.split("variables")
parts2 = parts[1].split("[")[1].split("]")[0].split(",")
for var in parts2:
var = var.strip()[1:-1]
variables.append(var)
return variables
def getNormTypeFromDataSetInfo(filename):
with open(filename, "r") as f:
for line in f:
if("normalize_type" in line):
parts = line.split("normalize_type")
parts2 = parts[1].split(":")[1].split(",")[0]
return parts2
def getLevelRangeFromDataSetInfo(filename):
levelrange = []
with open(filename, "r") as f:
for line in f:
if("levelrange" in line):
parts = line.split("levelrange")
parts2 = parts[1].split("[")[1].split("]")[0].split(",")
for var in parts2:
var = var.strip()
levelrange.append(int(var))
return np.array(levelrange)
def getDataSetInformationFromInfo(filename):
dataInfo = dict()
with open(filename, "r") as f:
datasetType = f.readline()
print(datasetType)
for line in f:
parts = line.split(" :: ")
try:
key,datatype,value,end = parts[0], parts[1], parts[2:-1], parts[-1]
dataInfo[key] = formatValue(datatype, value)
except:
print("wrongly formatted line. Potentially a multiline object")
return dataInfo
def formatValue(datatype, valueString):
if(isinstance(valueString, list)):
valueString = ' :: '.join(valueString)
valueString = valueString.strip()
datatype = datatype.strip()
# handle basic datatypes
if(datatype == 'str'):
valueString = valueString.strip("'")
valueString = valueString.strip()
return valueString
elif(datatype == 'int'):
return int(valueString)
elif(datatype == 'float'):
return float(valueString)
# handle more complex basic datatypes (lists, tuples, dictionaries)
else:
typeparts = datatype.split("(")
if(len(typeparts) <= 1):
print("Wrong format, exiting now")
return "NonenoN"
elif(len(typeparts) == 2):
nested = False
else:
nested = True
if(nested == False):
if(typeparts[0] == "list"):
values = valueString[1:-1].split(',')
types = typeparts[1][:-1]
return [formatValue(types, value) for value in values]
elif(typeparts[0] == "tuple"):
values = valueString[1:-1].split(',')
types = typeparts[1][:-1].split(',')
return tuple([formatValue(types[idx], value) for idx,value in enumerate(values)])
elif(typeparts[0] == "dict"):
entries = valueString[1:-1].split(',')
keys, values = list(zip([entry.split(':') for entry in entries]))
keytype,valuetype = typeparts[1][:-1].split(': ')
return {formatValue(keytype, keys[idx]): formatValue(valuetype, values[idx]) for idx in range(len(keys))}
else:
print("unknown Type encountered, line will be ignored")
else:
# a list nested with other complex types
if(typeparts[0] == "list"):
values = getValueListFromNested(valueString[1:-1])
# get the inner type
types = "(".join(typeparts[1:])[:-1]
return [formatValue(types, value) for value in values]
# a tuple nested with other complex types
elif(typeparts[0] == "tuple"):
values = getValueListFromNested(valueString[1:-1])
typeList = "(".join(typeparts[1:])[:-1]
types = getValueListFromNested(typeList)
return tuple([formatValue(types[idx], value) for idx,value in enumerate(values)])
# a dict nested with other complex types as values
elif(typeparts[0] == "dict"):
entries = getValueListFromNested(valueString[1:-1])
print(entries)
keys, values = list(zip(*[entry.split(': ') for entry in entries]))
types = "(".join(typeparts[1:])[:-1]
typeParts = types.split(': ')
keytype = typeParts[0]
valuetype = ":".join(typeParts[1:])
print(keytype, valuetype)
print(values)
return {formatValue(keytype, keys[idx]): formatValue(valuetype, values[idx]) for idx in range(len(keys))}
else:
print("unknown Type encountered, line will be ignored")
return "NonenoN"
def getValueListFromNested(valueString):
AllLevelValues = valueString.split(',')
values = []
level = 0
for value in AllLevelValues:
if(level == 0):
values.append(value)
elif(level > 0):
values[-1] += ","+value
level+=value.count("(")+value.count("{")+value.count("[")
level-=value.count(")")+value.count("}")+value.count("]")
return values
#getVariablesFromDataSetInfo(sys.argv[1])
| [
"numpy.array",
"os.path.join"
] | [((1844, 1864), 'numpy.array', 'np.array', (['levelrange'], {}), '(levelrange)\n', (1852, 1864), True, 'import numpy as np\n'), ((260, 297), 'os.path.join', 'os.path.join', (['folder', "(f + '_info.txt')"], {}), "(folder, f + '_info.txt')\n", (272, 297), False, 'import os\n')] |
from __future__ import division
import numpy as np
from scipy.optimize import fmin_bfgs
from itertools import combinations_with_replacement
import causalinference.utils.tools as tools
from .data import Dict
class Propensity(Dict):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin, qua):
Z = form_matrix(data['X'], lin, qua)
Z_c, Z_t = Z[data['controls']], Z[data['treated']]
beta = calc_coef(Z_c, Z_t)
self._data = data
self._dict = dict()
self._dict['lin'], self._dict['qua'] = lin, qua
self._dict['coef'] = beta
self._dict['loglike'] = -neg_loglike(beta, Z_c, Z_t)
self._dict['fitted'] = sigmoid(Z.dot(beta))
self._dict['se'] = calc_se(Z, self._dict['fitted'])
def __str__(self):
table_width = 80
coefs = self._dict['coef']
ses = self._dict['se']
output = '\n'
output += 'Estimated Parameters of Propensity Score\n\n'
entries1 = ['', 'Coef.', 'S.e.', 'z', 'P>|z|',
'[95% Conf. int.]']
entry_types1 = ['string']*6
col_spans1 = [1]*5 + [2]
output += tools.add_row(entries1, entry_types1,
col_spans1, table_width)
output += tools.add_line(table_width)
entries2 = tools.gen_reg_entries('Intercept', coefs[0], ses[0])
entry_types2 = ['string'] + ['float']*6
col_spans2 = [1]*7
output += tools.add_row(entries2, entry_types2,
col_spans2, table_width)
lin = self._dict['lin']
for (lin_term, coef, se) in zip(lin, coefs[1:], ses[1:]):
entries3 = tools.gen_reg_entries('X'+str(lin_term),
coef, se)
output += tools.add_row(entries3, entry_types2,
col_spans2, table_width)
qua = self._dict['qua']
lin_num = len(lin)+1 # including intercept
for (qua_term, coef, se) in zip(qua, coefs[lin_num:],
ses[lin_num:]):
name = 'X'+str(qua_term[0])+'*X'+str(qua_term[1])
entries4 = tools.gen_reg_entries(name, coef, se)
output += tools.add_row(entries4, entry_types2,
col_spans2, table_width)
return output
class PropensitySelect(Propensity):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin_B, C_lin, C_qua):
X_c, X_t = data['X_c'], data['X_t']
lin = select_lin_terms(X_c, X_t, lin_B, C_lin)
qua = select_qua_terms(X_c, X_t, lin, C_qua)
super(PropensitySelect, self).__init__(data, lin, qua)
def form_matrix(X, lin, qua):
N, K = X.shape
mat = np.empty((N, 1+len(lin)+len(qua)))
mat[:, 0] = 1 # constant term
current_col = 1
if lin:
mat[:, current_col:current_col+len(lin)] = X[:, lin]
current_col += len(lin)
for term in qua: # qua is a list of tuples of column numbers
mat[:, current_col] = X[:, term[0]] * X[:, term[1]]
current_col += 1
return mat
def sigmoid(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 1.0
values[low_x] = 0.0
values[mid_x] = 1/(1+np.exp(-x[mid_x]))
return values
def log1exp(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 0.0
values[low_x] = -x[low_x]
values[mid_x] = np.log(1 + np.exp(-x[mid_x]))
return values
def neg_loglike(beta, X_c, X_t):
return log1exp(X_t.dot(beta)).sum() + log1exp(-X_c.dot(beta)).sum()
def neg_gradient(beta, X_c, X_t):
return (sigmoid(X_c.dot(beta))*X_c.T).sum(1) - \
(sigmoid(-X_t.dot(beta))*X_t.T).sum(1)
def calc_coef(X_c, X_t):
K = X_c.shape[1]
neg_ll = lambda b: neg_loglike(b, X_c, X_t)
neg_grad = lambda b: neg_gradient(b, X_c, X_t)
logit = fmin_bfgs(neg_ll, np.zeros(K), neg_grad,
full_output=True, disp=False)
return logit[0]
def calc_se(X, phat):
H = np.dot(phat*(1-phat)*X.T, X)
return np.sqrt(np.diag(np.linalg.inv(H)))
def get_excluded_lin(K, included):
included_set = set(included)
return [x for x in range(K) if x not in included_set]
def get_excluded_qua(lin, included):
whole_set = list(combinations_with_replacement(lin, 2))
included_set = set(included)
return [x for x in whole_set if x not in included_set]
def calc_loglike(X_c, X_t, lin, qua):
Z_c = form_matrix(X_c, lin, qua)
Z_t = form_matrix(X_t, lin, qua)
beta = calc_coef(Z_c, Z_t)
return -neg_loglike(beta, Z_c, Z_t)
def select_lin(X_c, X_t, lin_B, C_lin):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included linearly in propensity
# score estimation.
K = X_c.shape[1]
excluded = get_excluded_lin(K, lin_B)
if excluded == []:
return lin_B
ll_null = calc_loglike(X_c, X_t, lin_B, [])
def lr_stat_lin(lin_term):
ll_alt = calc_loglike(X_c, X_t, lin_B+[lin_term], [])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_lin(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_lin:
return lin_B
else:
new_term = [excluded[argmax_lr]]
return select_lin(X_c, X_t, lin_B+new_term, C_lin)
def select_lin_terms(X_c, X_t, lin_B, C_lin):
# Mostly a wrapper around function select_lin to handle cases that
# require little computation.
if C_lin <= 0:
K = X_c.shape[1]
return lin_B + get_excluded_lin(K, lin_B)
elif C_lin == np.inf:
return lin_B
else:
return select_lin(X_c, X_t, lin_B, C_lin)
def select_qua(X_c, X_t, lin, qua_B, C_qua):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included quadratically in propensity
# score estimation.
excluded = get_excluded_qua(lin, qua_B)
if excluded == []:
return qua_B
ll_null = calc_loglike(X_c, X_t, lin, qua_B)
def lr_stat_qua(qua_term):
ll_alt = calc_loglike(X_c, X_t, lin, qua_B+[qua_term])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_qua(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_qua:
return qua_B
else:
new_term = [excluded[argmax_lr]]
return select_qua(X_c, X_t, lin, qua_B+new_term, C_qua)
def select_qua_terms(X_c, X_t, lin, C_qua):
# Mostly a wrapper around function select_qua to handle cases that
# require little computation.
if lin == []:
return []
if C_qua <= 0:
return get_excluded_qua(lin, [])
elif C_qua == np.inf:
return []
else:
return select_qua(X_c, X_t, lin, [], C_qua)
| [
"causalinference.utils.tools.gen_reg_entries",
"causalinference.utils.tools.add_line",
"numpy.exp",
"numpy.dot",
"numpy.zeros",
"numpy.empty",
"numpy.linalg.inv",
"causalinference.utils.tools.add_row",
"itertools.combinations_with_replacement"
] | [((3478, 3498), 'numpy.empty', 'np.empty', (['x.shape[0]'], {}), '(x.shape[0])\n', (3486, 3498), True, 'import numpy as np\n'), ((3762, 3782), 'numpy.empty', 'np.empty', (['x.shape[0]'], {}), '(x.shape[0])\n', (3770, 3782), True, 'import numpy as np\n'), ((4408, 4442), 'numpy.dot', 'np.dot', (['(phat * (1 - phat) * X.T)', 'X'], {}), '(phat * (1 - phat) * X.T, X)\n', (4414, 4442), True, 'import numpy as np\n'), ((1297, 1359), 'causalinference.utils.tools.add_row', 'tools.add_row', (['entries1', 'entry_types1', 'col_spans1', 'table_width'], {}), '(entries1, entry_types1, col_spans1, table_width)\n', (1310, 1359), True, 'import causalinference.utils.tools as tools\n'), ((1398, 1425), 'causalinference.utils.tools.add_line', 'tools.add_line', (['table_width'], {}), '(table_width)\n', (1412, 1425), True, 'import causalinference.utils.tools as tools\n'), ((1440, 1492), 'causalinference.utils.tools.gen_reg_entries', 'tools.gen_reg_entries', (['"""Intercept"""', 'coefs[0]', 'ses[0]'], {}), "('Intercept', coefs[0], ses[0])\n", (1461, 1492), True, 'import causalinference.utils.tools as tools\n'), ((1568, 1630), 'causalinference.utils.tools.add_row', 'tools.add_row', (['entries2', 'entry_types2', 'col_spans2', 'table_width'], {}), '(entries2, entry_types2, col_spans2, table_width)\n', (1581, 1630), True, 'import causalinference.utils.tools as tools\n'), ((4302, 4313), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (4310, 4313), True, 'import numpy as np\n'), ((4664, 4701), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['lin', '(2)'], {}), '(lin, 2)\n', (4693, 4701), False, 'from itertools import combinations_with_replacement\n'), ((1858, 1920), 'causalinference.utils.tools.add_row', 'tools.add_row', (['entries3', 'entry_types2', 'col_spans2', 'table_width'], {}), '(entries3, entry_types2, col_spans2, table_width)\n', (1871, 1920), True, 'import causalinference.utils.tools as tools\n'), ((2194, 2231), 'causalinference.utils.tools.gen_reg_entries', 'tools.gen_reg_entries', (['name', 'coef', 'se'], {}), '(name, coef, se)\n', (2215, 2231), True, 'import causalinference.utils.tools as tools\n'), ((2245, 2307), 'causalinference.utils.tools.add_row', 'tools.add_row', (['entries4', 'entry_types2', 'col_spans2', 'table_width'], {}), '(entries4, entry_types2, col_spans2, table_width)\n', (2258, 2307), True, 'import causalinference.utils.tools as tools\n'), ((3564, 3581), 'numpy.exp', 'np.exp', (['(-x[mid_x])'], {}), '(-x[mid_x])\n', (3570, 3581), True, 'import numpy as np\n'), ((3860, 3877), 'numpy.exp', 'np.exp', (['(-x[mid_x])'], {}), '(-x[mid_x])\n', (3866, 3877), True, 'import numpy as np\n'), ((4463, 4479), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (4476, 4479), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import wx
# use the numpy code instead of the raw access code for comparison
USE_NUMPY = False
# time the execution of making a bitmap?
TIMEIT = False
# how big to make the bitmaps
DIM = 100
# should we use a wx.GraphicsContext for painting?
TEST_GC = False
#----------------------------------------------------------------------
# attempt to import a numeric module if requested to
if USE_NUMPY:
try:
import numpy
def makeByteArray(shape):
return numpy.empty(shape, numpy.uint8)
numtype = 'numpy'
except ImportError:
try:
import numarray
def makeByteArray(shape):
arr = numarray.array(shape=shape, typecode='u1')
arr[:] = 0
return arr
numtype = 'numarray'
except ImportError:
USE_NUMPY = False
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
self.Bind(wx.EVT_PAINT, self.OnPaint)
if TIMEIT:
import timeit
timeit.s = self # Put self in timeit's global namespace as
# 's' so it can be found in the code
# snippets being tested.
if not USE_NUMPY:
t = timeit.Timer("bmp = s.MakeBitmap(10, 20, 30)")
else:
t = timeit.Timer("bmp = s.MakeBitmap2(10, 20, 30)")
log.write("Timing...\n")
num = 100
tm = t.timeit(num)
log.write("%d passes in %f seconds == %f seconds per pass " %
(num, tm, tm/num))
if not USE_NUMPY:
log.write("using raw access\n")
self.redBmp = self.MakeBitmap(178, 34, 34)
self.greenBmp = self.MakeBitmap( 35, 142, 35)
self.blueBmp = self.MakeBitmap( 0, 0, 139)
else:
log.write("using %s\n" % numtype)
self.redBmp = self.MakeBitmap2(178, 34, 34)
self.greenBmp = self.MakeBitmap2( 35, 142, 35)
self.blueBmp = self.MakeBitmap2( 0, 0, 139)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
if not TEST_GC:
dc.DrawBitmap(self.redBmp, 50, 50, True)
dc.DrawBitmap(self.greenBmp, 110, 110, True)
dc.DrawBitmap(self.blueBmp, 170, 50, True)
self.log.write("using wx.DC\n")
else:
gc = wx.GraphicsContext.Create(dc)
gc.DrawBitmap(self.redBmp, 50, 50, DIM,DIM)
gc.DrawBitmap(self.greenBmp, 110, 110, DIM,DIM)
gc.DrawBitmap(self.blueBmp, 170, 50, DIM,DIM)
self.log.write("using wx.GraphicsContext\n")
def MakeBitmap(self, red, green, blue, alpha=128):
# Create the bitmap that we will stuff pixel values into using
# the raw bitmap access classes.
bmp = wx.Bitmap(DIM, DIM, 32)
# Create an object that facilitates access to the bitmap's
# pixel buffer
pixelData = wx.AlphaPixelData(bmp)
if not pixelData:
raise RuntimeError("Failed to gain raw access to bitmap data.")
# We have two ways to access each pixel, first we'll use an
# iterator to set every pixel to the colour and alpha values
# passed in.
for pixel in pixelData:
pixel.Set(red, green, blue, alpha)
# This block of code is another way to do the same as above,
# but with the accessor interface instead of the Python
# iterator. It is a bit faster than the above because it
# avoids the iterator/generator magic, but it is not nearly as
# 'clean' looking ;-)
#pixels = pixelData.GetPixels()
#for y in range(DIM):
# pixels.MoveTo(pixelData, 0, y)
# for x in range(DIM):
# pixels.Set(red, green, blue, alpha)
# pixels.nextPixel()
# Next we'll use the pixel accessor to set the border pixels
# to be fully opaque
pixels = pixelData.GetPixels()
for x in range(DIM):
pixels.MoveTo(pixelData, x, 0)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
pixels.MoveTo(pixelData, x, DIM-1)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
for y in range(DIM):
pixels.MoveTo(pixelData, 0, y)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
pixels.MoveTo(pixelData, DIM-1, y)
pixels.Set(red, green, blue, wx.ALPHA_OPAQUE)
return bmp
def MakeBitmap2(self, red, green, blue, alpha=128):
# Make an array of bytes that is DIM*DIM in size, with enough
# slots for each pixel to have a RGB and A value
arr = makeByteArray( (DIM,DIM, 4) )
# just some indexes to keep track of which byte is which
R, G, B, A = range(4)
# initialize all pixel values to the values passed in
arr[:,:,R] = red
arr[:,:,G] = green
arr[:,:,B] = blue
arr[:,:,A] = alpha
# Set the alpha for the border pixels to be fully opaque
arr[0, 0:DIM, A] = wx.ALPHA_OPAQUE # first row
arr[DIM-1, 0:DIM, A] = wx.ALPHA_OPAQUE # last row
arr[0:DIM, 0, A] = wx.ALPHA_OPAQUE # first col
arr[0:DIM, DIM-1, A] = wx.ALPHA_OPAQUE # last col
# finally, use the array to create a bitmap
bmp = wx.BitmapFromBufferRGBA(DIM, DIM, arr)
return bmp
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>Raw Bitmap Access</center></h2>
wx.NativePixelData and wx.AlphaPixelData provide a cross-platform way
to access the platform-specific pixel buffer within a wx.Bitmap. They
provide both a random access method, and an iterator interface.
<p>Unfortunately, although these classes are convienient ways to access
and update the contents of a wx.Bitmap, we lose most of the efficiency
of the C++ classes by requiring one or more Python-to-C++ transitions
for each pixel. In fact it can be <b>much</b> slower than the other
ways of creating a bitmap from scratch, especially now that
wx.BitmapFromBuffer exists and can save the time needed to copy from a
wx.Image.
<p>To see this difference for yourself this module has been
instrumented to allow you to experiment with using either the raw
access or numpy/numarray, and also to time how long it takes to create
100 bitmaps like you see on the screen. Simply edit this module in
the \"Demo Code\" tab and set TIMEIT to True and then watch
the log window when the sample is reloaded. To try numpy or numarray
(if you have them installed) then set USE_NUMPY to True as well, and
watch the log window again. On my machines there is about <b>an
order of magnitude</b> difference between the raw access functions
and using a numarray.array with wx.BitmapFromBufferRGBA! Almost
another order of magnitude improvement can be gained with using the
new numpy module!
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| [
"wx.PaintDC",
"timeit.Timer",
"wx.BitmapFromBufferRGBA",
"numpy.empty",
"wx.AlphaPixelData",
"wx.GraphicsContext.Create",
"os.path.basename",
"numarray.array",
"wx.Bitmap",
"wx.Panel.__init__"
] | [((1051, 1086), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent', '(-1)'], {}), '(self, parent, -1)\n', (1068, 1086), False, 'import wx\n'), ((2284, 2300), 'wx.PaintDC', 'wx.PaintDC', (['self'], {}), '(self)\n', (2294, 2300), False, 'import wx\n'), ((3016, 3039), 'wx.Bitmap', 'wx.Bitmap', (['DIM', 'DIM', '(32)'], {}), '(DIM, DIM, 32)\n', (3025, 3039), False, 'import wx\n'), ((3151, 3173), 'wx.AlphaPixelData', 'wx.AlphaPixelData', (['bmp'], {}), '(bmp)\n', (3168, 3173), False, 'import wx\n'), ((5546, 5584), 'wx.BitmapFromBufferRGBA', 'wx.BitmapFromBufferRGBA', (['DIM', 'DIM', 'arr'], {}), '(DIM, DIM, arr)\n', (5569, 5584), False, 'import wx\n'), ((509, 540), 'numpy.empty', 'numpy.empty', (['shape', 'numpy.uint8'], {}), '(shape, numpy.uint8)\n', (520, 540), False, 'import numpy\n'), ((2571, 2600), 'wx.GraphicsContext.Create', 'wx.GraphicsContext.Create', (['dc'], {}), '(dc)\n', (2596, 2600), False, 'import wx\n'), ((1418, 1464), 'timeit.Timer', 'timeit.Timer', (['"""bmp = s.MakeBitmap(10, 20, 30)"""'], {}), "('bmp = s.MakeBitmap(10, 20, 30)')\n", (1430, 1464), False, 'import timeit\n'), ((1503, 1550), 'timeit.Timer', 'timeit.Timer', (['"""bmp = s.MakeBitmap2(10, 20, 30)"""'], {}), "('bmp = s.MakeBitmap2(10, 20, 30)')\n", (1515, 1550), False, 'import timeit\n'), ((7378, 7407), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (7394, 7407), False, 'import sys, os\n'), ((693, 735), 'numarray.array', 'numarray.array', ([], {'shape': 'shape', 'typecode': '"""u1"""'}), "(shape=shape, typecode='u1')\n", (707, 735), False, 'import numarray\n')] |
# <NAME> (github: @elaguerta)
# LBNL GIG
# File created: 19 February 2021
# Create NR3 Solution class, a namespace for calculations used by nr3
from . solution import Solution
from . circuit import Circuit
import numpy as np
from . nr3_lib.compute_NR3FT import compute_NR3FT
from . nr3_lib.compute_NR3JT import compute_NR3JT
from . nr3_lib.map_output import map_output
class SolutionNR3(Solution):
CONVERGENCE_TOLERANCE = 10**-6
@classmethod
def set_zip_values(cls, zip_v):
"""
sets zip values for the Solution class
param zip_V: List or nd.array with 7 values
[a_z_p, a_i_p, a_pq_p, a_z_q, a_i_q, a_pq_q, min voltage pu]
Note that zip values are set both on the Solution class and Circuit
class
"""
Solution.set_zip_values(zip_v)
def __init__(self, dss_fp: str, **kwargs):
super().__init__(dss_fp, **kwargs) # sets self.circuit
self._set_orient('cols')
self._init_XNR()
self._init_slack_bus_matrices()
self._init_KVL_matrices()
self._init_KCL_matrices()
self._init_KVL_matrices_vregs()
def _init_XNR(self):
"""
adapted from
https://github.com/msankur/LinDist3Flow/blob/vectorized/20180601/PYTHON/lib/basematrices.py
written by @kathleenchang
"""
V0, I0 = None, None
Vslack = self.__class__.VSLACK
nline = self.circuit.lines.num_elements
nnode = self.circuit.buses.num_elements
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
# XNR order is bus voltages, line currents, transformer line currents,
# voltage regulator line currents * 2
XNR = np.zeros((2*3*(nnode + nline) + 2*tf_lines + 2*2*vr_lines, 1),
dtype=float)
# intialize node voltage portion of XNR
if V0 is None or len(V0) == 0:
for ph in range(3):
for k1 in range(nnode):
XNR[2*ph*nnode + 2*k1] = Vslack[ph].real
XNR[2*ph*nnode + 2*k1+1] = Vslack[ph].imag
# If initial V is given (usually from CVX)
elif len(V0) != 0:
for ph in range(3):
for k1 in range(nnode):
XNR[2*ph*nnode + 2*k1] = V0[ph, k1].real
XNR[2*ph*nnode + 2*k1+1] = V0[ph, k1].imag
# intialize line current portion of XNR
if I0 is None or len(I0) == 0:
XNR[(2*3*nnode):] = 0.0*np.ones((6*nline + 2*tf_lines + 2*2* vr_lines, 1), dtype = float)
# If initial I is given
elif len(I0) != 0:
for ph in range(3):
for k1 in range(nline):
XNR[(2*3*nnode) + 2*ph*nline + 2*k1] = I0[ph, k1].real
XNR[(2*3*nnode) + 2*ph*nline + 2*k1+1] = I0[ph, k1].imag
XNR[(2*3*nnode + 2*3*nline):] = np.zeros((len(XNR) - 2*3*nnode - 2*3*nline), 1)
self.XNR = XNR
def _init_slack_bus_matrices(self):
"""
Initializes g_SB and b_SB
adapted from
https://github.com/msankur/LinDist3Flow/blob/vectorized/20180601/PYTHON/lib/basematrices.py
written by @kathleenchang
"""
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
nline = self.circuit.lines.num_elements
nnode = self.circuit.buses.num_elements
Vslack = self.__class__.VSLACK
# ------------ Slack Bus ------------------
self.g_SB = np.zeros((6, 2*3*(nnode+nline) + 2*tf_lines + 2*2*vr_lines),
dtype=float)
sb_idx = [0, 1, 2*nnode, (2*nnode)+1, 4*nnode, (4*nnode)+1]
for i in range(len(sb_idx)):
self.g_SB[i, sb_idx[i]] = 1
self.b_SB = np.zeros((6, 1), dtype=float)
for i in range(3):
self.b_SB[2*i, 0] = Vslack[i].real
self.b_SB[(2*i) + 1] = Vslack[i].imag
def _init_KVL_matrices(self):
"""
set self.b_KVL and self.G_KVL
copied from
https://github.com/msankur/LinDist3Flow/blob/vectorized/20180601/PYTHON/lib/compute_SBKVL_matrices.py
written by @kathleenchang
"""
tf_bus = self.circuit.transformers.get_bus_ph_matrix()
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
nline = self.circuit.lines.num_elements
nnode = self.circuit.buses.num_elements
X_matrix = self.circuit.lines.get_X_matrix()
R_matrix = self.circuit.lines.get_R_matrix()
# ------- Residuals for KVL across line (m,n) ----------
self.b_KVL = np.zeros((2*3*(nline) + 2*tf_lines + 2*2*vr_lines, 1),
dtype=float)
G_KVL = np.zeros((2*3*(nline) + 2*tf_lines + 2*2*vr_lines,
2*3*(nnode+nline) + 2*tf_lines + 2*2*vr_lines),
dtype=float)
for ph in range(3):
for line in range(nline): # line = line index
line_ele = self.circuit.lines.get_element(line)
bus1_idx = self.circuit.buses.get_idx(line_ele.tx)
bus2_idx = self.circuit.buses.get_idx(line_ele.rx)
bus1_phases = line_ele.phase_matrix
if bus1_phases[ph] == 1:
G_KVL[2*ph*nline + 2*line][2*(nnode)*ph + 2*(bus1_idx)] = 1 #A_m
G_KVL[2*ph*nline + 2*line][2*(nnode)*ph + 2*(bus2_idx)] = -1 #A_n
G_KVL[2*ph*nline + 2*line+1][2*(nnode)*ph + 2*(bus1_idx) + 1] = 1 #B_m
G_KVL[2*ph*nline + 2*line+1][2*(nnode)*ph + 2*(bus2_idx) + 1] = -1 #B_n
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 2*line] = -R_matrix[line][ph*3] * bus1_phases[0] #C_mn for a
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 2*line + 1] = X_matrix[line][ph*3] * bus1_phases[0] #D_mn for a
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 2*nline + 2*line] = -R_matrix[line][ph*3 + 1] * bus1_phases[1] #C_mn for b
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 2*nline + 2*line + 1] = X_matrix[line][ph*3 + 1] * bus1_phases[1] #D_mn for b
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 4*nline + 2*line] = -R_matrix[line][ph*3 + 2] * bus1_phases[2] #C_mn for c
G_KVL[2*ph*nline + 2*line][2*3*(nnode) + 4*nline + 2*line + 1] = X_matrix[line][ph*3 + 2] * bus1_phases[2] #D_mn for c
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 2*line] = -X_matrix[line][ph*3] * bus1_phases[0] #C_mn for a
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 2*line + 1] = -R_matrix[line][ph*3] * bus1_phases[0] #D_mn for a
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 2*nline + 2*line] = -X_matrix[line][ph*3 + 1] * bus1_phases[1] #C_mn for b
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 2*nline + 2*line + 1] = -R_matrix[line][ph*3 + 1] * bus1_phases[1] #D_mn for b
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 4*nline + 2*line] = -X_matrix[line][ph*3 + 2] * bus1_phases[2] #C_mn for c
G_KVL[2*ph*nline + 2*line+1][2*3*(nnode) + 4*nline + 2*line + 1] = -R_matrix[line][ph*3 + 2] * bus1_phases[2] #D_mn for c
else:
G_KVL[2*ph*nline + 2*line][2*(nnode)*3 + 2*ph*nline + 2*line] = 1 #C_mn
G_KVL[2*ph*nline + 2*line+1][2*(nnode)*3 + 2*ph*nline + 2*line+1] = 1 #D_mn
#------- Residuals for Transformer KVL ----------
line_idx_tf = range(0, tf_lines)
kvl_count = 0
for tfbs in range(len(tf_bus[0])):
for ph in range(0, 3):
if tf_bus[ph + 2, tfbs] != 0:
line = line_idx_tf[kvl_count]
G_KVL[2*3*nline + 2*line][2*nnode*ph + 2*int(tf_bus[0, tfbs])] = 1 #A_m
G_KVL[2*3*nline + 2*line][2*nnode*ph + 2*int(tf_bus[1, tfbs])] = -1 #A_n
G_KVL[2*3*nline + 2*line+1][2*nnode*ph + 2*int(tf_bus[0, tfbs]) + 1] = 1 #B_m
G_KVL[2*3*nline + 2*line+1][2*nnode*ph + 2*int(tf_bus[1, tfbs]) + 1] = -1 #B_n
kvl_count += 1
self.G_KVL = G_KVL
def _init_KCL_matrices(self, der=0, capacitance=0):
"""
set H, g, b
copied from 20180601/PYTHON/lib/compute_KCL_matrices.py
written by @kathleenchang
"""
tf_bus = self.circuit.transformers.get_bus_ph_matrix()
vr_bus = self.circuit.voltage_regulators.get_bus_ph_matrix()
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
tf_no = self.circuit.transformers.num_elements
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
vr_count = self.circuit.voltage_regulators.num_elements
nnode = self.circuit.buses.num_elements
nline = self.circuit.lines.num_elements
# Line Indices Associated with Voltage Regulators and Transformers
line_in_idx_vr = range(0, 2*vr_lines, 2)
line_out_idx_vr = range(1, 2*vr_lines, 2)
line_idx_tf = range(0, tf_lines)
load_kw = self.circuit.loads.get_ppu_matrix()
load_kvar = self.circuit.loads.get_qpu_matrix()
caparr = self.circuit.get_cappu_matrix()
# ----------Residuals for KCL at a bus (m) ----------
bp = self.circuit.buses.get_phase_matrix_dict()
dss = self.dss
# Zip Parameters
# Load
beta_S = Circuit.aPQ_p
beta_I = Circuit.aI_p
beta_Z = Circuit.aZ_p
# Capacitors
gamma_S = Circuit.aPQ_p
gamma_I = Circuit.aI_p
gamma_Z = Circuit.aZ_p
H = np.zeros((2*3*(nnode-1), 2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines, 2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines), dtype=float)
g = np.zeros((2*3*(nnode-1), 1, 2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines), dtype=float)
b = np.zeros((2*3*(nnode-1), 1, 1), dtype=float)
# --------------- Quadratic Terms -----------------
for ph in range(0,3):
if ph == 0: #set nominal voltage based on phase
A0 = 1
B0 = 0
elif ph == 1:
A0 = -1/2
B0 = -1 * np.sqrt(3)/2
elif ph == 2:
A0 = -1/2
B0 = np.sqrt(3)/2
for k2 in range(1, len(dss.Circuit.AllBusNames())): #skip slack bus
dss.Circuit.SetActiveBus(dss.Circuit.AllBusNames()[k2]) #set the bus
bus_name = dss.Circuit.AllBusNames()[k2]
in_lines = self.circuit.lines.get_line_list(bus_name, 'in') # upstream buses
out_lines = self.circuit.lines.get_line_list(bus_name, 'out') # downstream buses
for cplx in range(0,2):
idxbs = dss.Circuit.AllBusNames().index(dss.Circuit.AllBusNames()[k2])
if cplx == 0:
load_val = load_kw[ph,idxbs]
cap_val = 0
else:
load_val = load_kvar[ph,idxbs]
cap_val = caparr[ph][idxbs]
#gradient_mag = np.array([A0 * ((A0**2+B0**2) ** (-1/2)), B0 * ((A0**2+B0**2) ** (-1/2))]) #some derivatives
hessian_mag = np.array([[-((A0**2)*(A0**2+B0**2)**(-3/2))+(A0**2+B0**2)**(-1/2), -A0*B0*(A0**2+B0**2)**(-3/2)],
[-A0*B0*(A0**2+B0**2)**(-3/2), -((B0**2)*(A0**2+B0**2)**(-3/2))+((A0**2+B0**2)**(-1/2))]], dtype=float)
available_phases = bp[dss.Circuit.AllBusNames()[k2]] #phase array at specific bus
if available_phases[ph] == 1: #quadratic terms
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2][2*(nnode)*ph + 2*k2] = \
-load_val * (beta_Z + (0.5 * beta_I* hessian_mag[0][0])) + \
cap_val * (gamma_Z + (0.5 * gamma_I * hessian_mag[0][0]))# TE replace assignment w/ -load_val * beta_Z; #a**2
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2 + 1][2*(nnode)*ph + 2*k2 + 1] = \
-load_val * (beta_Z + (0.5 * beta_I * hessian_mag[1][1])) + \
cap_val * (gamma_Z + (0.5 * gamma_I * hessian_mag[1][1]))# TE replace assignment w/ -load_val * beta_Z; #b**2
#H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2][2*(nnode)*ph + 2*k2 + 1] = -load_val * beta_I * hessian_mag[0][1] / 2 #remove for TE
#H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2 + 1][2*(nnode)*ph + 2*k2] = -load_val * beta_I * hessian_mag[1][0] / 2 #remove for TE
for i in range(len(in_lines)): # in lines
line_idx = self.circuit.lines.get_idx(in_lines[i])
if available_phases[ph] == 1:
if cplx == 0: #real residual
#A_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2][2*3*(nnode) + 2*ph*nline + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx][2*(nnode)*ph + 2*k2] = 1/2
#B_m and D_lm
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = 1/2
if cplx == 1: #imaginary residual
# #A_m, D_lm
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = -1/2
#B_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode) + 2*ph*nline + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = 1/2
for j in range(len(out_lines)): # out lines
line_idx = self.circuit.lines.get_idx(out_lines[j])
if available_phases[ph] == 1:
if cplx == 0: #real residual
#A_m and C_mn
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2][2*3*(nnode) + 2*ph*nline + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx][2*(nnode)*ph + 2*k2] = -1/2
#B_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = -1/2
if cplx == 1: #imaginary residual
#A_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*(nnode)*ph + 2*k2][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1]= 1/2
H[2*ph*(nnode-1) + (k2-1)*2+ cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = 1/2
#C_m and B_mn
H[2*ph*(nnode-1) + (k2-1)*2+cplx][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode) + 2*ph*nline + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2+cplx][2*3*(nnode) + 2*ph*nline + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = -1/2
# ----------------------- Transformer KCL -----------------------
tf_no = len(dss.Transformers.AllNames()) - len(dss.RegControls.AllNames())
count_tf = 0
count_tf2 = 0
for i in range(tf_no):
for ph in range(0,3):
k2 = int(tf_bus[1, i]) #in bus index of transformer [out bus: a0] --line-a0-a1-- [in bus: a1]
if k2 == 0: #if source bus, need to skip line
count_tf += 1
if k2 != 0 and tf_bus[ph + 2, i] != 0: #if not source bus, perform KCL
line_idx = line_idx_tf[count_tf]
#A_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2 ][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 ][2*3*(nnode+nline) + 2*line_idx][2*(nnode)*ph + 2*k2] = 1/2
#B_m and D_lm
H[2*ph*(nnode-1) + (k2-1)*2][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*line_idx + 1] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2][2*3*(nnode+nline) + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = 1/2
#A_m, D_lm
H[2*ph*(nnode-1) + (k2-1)*2+ 1][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2+ 1][2*3*(nnode+nline) + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = -1/2
#B_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2+ 1][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2+ 1][2*3*(nnode+nline) + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = 1/2
count_tf += 1 #go to next line
for j in range(tf_no): #fill in H for the outlines
for ph in range(0,3):
k2 = int(tf_bus[0, j]) #out bus index of transformer
if k2 == 0:
count_tf2 += 1
if k2 != 0 and tf_bus[ph + 2, j] != 0:
line_idx = line_idx_tf[count_tf2]
#real residual
#A_m and C_mn
H[2*ph*(nnode-1) + (k2-1)*2][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2][2*3*(nnode+nline) + 2*line_idx][2*(nnode)*ph + 2*k2] = -1/2
#B_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2][2*3*(nnode+nline) + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = -1/2
#imaginary residual
#A_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*line_idx + 1]= 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = 1/2
#C_m and B_mn
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = -1/2
count_tf2+=1
# ----------------------- Voltage Regulator KCL -----------------------
vr_count = len(dss.RegControls.AllNames())
count_vr = 0
count_vr2 = 0
if vr_count > 0:
for i in range(vr_count): #in lines
for ph in range(0,3):
k2 = int(vr_bus[1, i])
if k2 == 0:
count_vr += 1
if k2 != 0 and vr_bus[ph + 2, i] != 0:
line_idx = line_in_idx_vr[count_vr]
#real residual
#A_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2 + 0][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 0][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx][2*(nnode)*ph + 2*k2] = 1/2
#B_m and D_lm
H[2*ph*(nnode-1) + (k2-1)*2 + 0][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 0][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = 1/2
#imaginary residual
# #A_m, D_lm
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = -1/2
#B_m and C_lm
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = 1/2
count_vr += 1
for j in range(vr_count): #out lines
for ph in range(0,3):
k2 = int(vr_bus[0, j])
if k2 == 0:
count_vr2 += 1
if k2 != 0 and vr_bus[ph + 2, j] != 0:
line_idx = line_out_idx_vr[count_vr2]
#real residual
#A_m and C_mn
H[2*ph*(nnode-1) + (k2-1)*2][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx][2*(nnode)*ph + 2*k2] = -1/2
#B_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1][2*(nnode)*ph + 2*k2 + 1] = -1/2
#imaginary residual
#A_m and D_mn
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1] = 1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx + 1][2*(nnode)*ph + 2*k2] = 1/2
#C_m and B_mn
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*(nnode)*ph + 2*k2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx] = -1/2
H[2*ph*(nnode-1) + (k2-1)*2 + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_idx][2*(nnode)*ph + 2*k2 + 1] = -1/2
count_vr2 += 1
# ----------------------- Linear Term & Constant Term -----------------------
for ph in range(0,3):
for k2 in range(1, len(dss.Circuit.AllBusNames())):
for cplx in range(0,2):
available_phases = bp[dss.Circuit.AllBusNames()[k2]] #phase array at specific bus
idxbs = dss.Circuit.AllBusNames().index(dss.Circuit.AllBusNames()[k2])
if cplx == 0:
load_val = load_kw[ph][idxbs]
else:
load_val = load_kvar[ph][idxbs]
# Linear terms
g_temp = np.zeros(2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines, dtype=float)
if available_phases[ph] == 0: #if phase does not exist
g_temp[2*(ph)*nnode + 2*k2 + cplx] = 1
g[2*(nnode-1)*ph + 2*(k2-1) + cplx, 0,:] = g_temp
# Constant terms
if cplx == 0:
if der.real != 0:
b_factor = der.real
b_factor = 0
else:
b_factor = 0
else:
if capacitance != 0 or der.imag != 0:
b_factor = capacitance - der.imag
b_factor = 0
else:
b_factor = caparr[ph][k2]
if available_phases[ph] == 0: #if phase does not exist
b_temp = 0
else:
b_temp = (-load_val * beta_S) + (b_factor * gamma_S)
b[2*(nnode-1)*ph + 2*(k2-1) + cplx][0][0] = b_temp
self.H, self.g, self.b = H, g, b
def _init_KVL_matrices_vregs(self):
"""
set H_reg, G_reg
copied from 20180601/PYTHON/lib/compute_SBKVL_matrices.py
written by @kathleenchang
"""
# ---------- Voltage Regulator -----------
tf_bus = self.circuit.transformers.get_bus_ph_matrix()
vr_bus = self.circuit.voltage_regulators.get_bus_ph_matrix()
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
tf_no = self.circuit.transformers.num_elements
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
vr_count = self.circuit.voltage_regulators.num_elements
nnode = self.circuit.buses.num_elements
nline = self.circuit.lines.num_elements
gain = self.circuit.voltage_regulators.get_gain_matrix()
H_reg = np.zeros((2*vr_lines, 2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines, 2*3*(nnode+nline) +
2*tf_lines + 2*2*vr_lines), dtype=float)
G_reg = np.zeros((2*vr_lines, 2*3*(nnode+nline) + 2*tf_lines +
2*2*vr_lines), dtype=float)
# voltage ratio: V_bus2 - gamma V_bus1 = 0
line_in_idx = range(0, 2*vr_lines, 2)
line_out_idx = range(1, 2*vr_lines, 2)
vr_counter = 0
for m in range(vr_count):
bus1_idx = vr_bus[0, m]
bus2_idx = vr_bus[1, m]
for ph in range(0,3):
if vr_bus[ph + 2,m] != 0:
# voltage gain: gamma*A_in = A_out
# gamma * B_in = B_out
# Negative not shown below because inserted into gain term
G_reg[2*vr_counter][2*nnode*ph + 2*bus1_idx] = gain[m] #A_in
G_reg[2*vr_counter][2*nnode*ph + 2*bus2_idx] = 1 #A_out
G_reg[2*vr_counter + 1][2*nnode*ph + 2*bus1_idx + 1] = gain[m] #B_in
G_reg[2*vr_counter + 1][2*nnode*ph + 2*bus2_idx + 1] = 1 #B_out
#conservation of power: V_bus1 (I_bus1,out)* - V_bus2 (I_bus2,in)* = 0
# A_1 * C_out + B_1 * D_out - (A_2 * C_in + B_2 * D_in)
# j(B_1 * C_out - A_1 * D_out) - j(B_2 * C_in - A_2 * D_in)
#A_1 C_out
H_reg[2*vr_counter][2*nnode*ph + 2*bus1_idx][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter]] = 1
H_reg[2*vr_counter][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter]][2*nnode*ph + 2*bus1_idx] = 1
#B_1 D_out
H_reg[2*vr_counter][2*nnode*ph + 2*bus1_idx + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter] + 1] = 1
H_reg[2*vr_counter][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter]+ 1][2*nnode*ph + 2*bus1_idx + 1] = 1
#A_2 C_in
H_reg[2*vr_counter][2*nnode*ph + 2*bus2_idx][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter]] = -1
H_reg[2*vr_counter][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter]][2*nnode*ph + 2*bus2_idx] = -1
#B_2 D_in
H_reg[2*vr_counter][2*nnode*ph + 2*bus2_idx + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter] + 1] = -1
H_reg[2*vr_counter][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter] + 1][2*nnode*ph + 2*bus2_idx + 1] = -1
#B_1 * C_out
H_reg[2*vr_counter + 1][2*nnode*ph + 2*bus1_idx + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter]] = 1
H_reg[2*vr_counter + 1][2*3*(nnode+nline) + 2*tf_lines+ 2*line_out_idx[vr_counter]][2*nnode*ph + 2*bus1_idx + 1] = 1
# A_1 * D_out
H_reg[2*vr_counter + 1][2*nnode*ph + 2*bus1_idx][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter] + 1] = -1
H_reg[2*vr_counter + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_out_idx[vr_counter] + 1][2*nnode*ph + 2*bus1_idx] = -1
#B_2 * C_in
H_reg[2*vr_counter + 1][2*nnode*ph + 2*bus2_idx + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter]] = -1
H_reg[2*vr_counter + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter]][2*nnode*ph + 2*bus2_idx + 1] = -1
# A_2 * D_in
H_reg[2*vr_counter + 1][2*nnode*ph + 2*bus2_idx][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter] + 1] = 1
H_reg[2*vr_counter + 1][2*3*(nnode+nline) + 2*tf_lines + 2*line_in_idx[vr_counter] + 1][2*nnode*ph + 2*bus2_idx] = 1
vr_counter += 1
self.H_reg, self.G_reg = H_reg, G_reg
def change_KCL_matrices(self, der=0, capacitance=0, t=-1):
H, g, b = self.H, self.g, self.b
wpu = self.circuit.get_wpu_matrix()
nnode = self.circuit.buses.num_elements
# load_kw, load_kvar = nominal_load_values(t)
# caparr = nominal_cap_arr()
load_kw = self.circuit.loads.get_ppu_matrix()
load_kvar = self.circuit.loads.get_qpu_matrix()
caparr = self.circuit.get_cappu_matrix()
# ----------Residuals for KCL at a bus (m) ----------
#Zip Parameters
beta_S = Circuit.aPQ_p
beta_I = Circuit.aI_p
beta_Z = Circuit.aZ_p
# Capacitors
gamma_S = Circuit.aPQ_p
gamma_I = Circuit.aI_p
gamma_Z = Circuit.aZ_p
# Quadratic Terms
# Time varying load
if t != -1:
for ph in range(0, 3):
for k2 in range(1, nnode): # skip slack bus
bus = self.circuit.buses.get_element(k2)
#dss.Circuit.SetActiveBus(dss.Circuit.AllBusNames()[k2]) #set the bus
available_phases = bus.phase_matrix # phase array at specific bus
for cplx in range(0, 2):
if available_phases[ph] == 1: # quadratic terms
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2][2 *
(nnode)*ph + 2*k2] *= (1 + 0.1*np.sin(2*np.pi*0.01*t))
#-load_val * (beta_Z + (0.5 * beta_I* hessian_mag[0][0])) # TE replace assignment w/ -load_val * beta_Z; #a**2
H[2*ph*(nnode-1) + (k2-1)*2 + cplx][2*(nnode)*ph + 2*k2 + 1][2 *
(nnode)*ph + 2*k2 + 1] *= (1 + 0.1*np.sin(2*np.pi*0.01*t))
#-load_val * (beta_Z + (0.5 * beta_I * hessian_mag[1][1])) # TE replace assignment w/ -load_val * beta_Z; #b**2
# Constant Term
if t != -1 or der != 0 or capacitance != 0:
for ph in range(0, 3):
for k2 in range(1, nnode):
bus = self.circuit.buses.get_element(k2)
available_phases = bus.phase_matrix
if available_phases[ph] == 1:
for cplx in range(0, 2):
if cplx == 0:
load_val = load_kw[ph][k2]
if der.real != 0:
b_factor = der.real
else:
b_factor = 0
else:
load_val = load_kvar[ph][k2]
if capacitance != 0 or der.imag != 0:
b_factor = capacitance - der.imag
else:
b_factor = caparr[ph][k2]
if t != -1:
print('Should not enter')
load_val *= (1 + 0.1*np.sin(2*np.pi*0.01*t))
b_temp = (-load_val * beta_S) + \
(b_factor * gamma_S)
# TODO: resolve numpy warning here
b[2*(nnode-1)*ph + 2*(k2-1) +
cplx][0][0] = b_temp - wpu[ph][k2]
return H, b
def solve(self):
"""
Solves powerflow once, updates self.XNR with solved XNR
From src/nr3_python/lib/NR3.py
Written by @kathleenchang
"""
# get pointers to basematrices
XNR, g_SB, b_SB = self.XNR, self.g_SB, self.b_SB
G_KVL, b_KVL, = self.G_KVL, self.b_KVL
H, g, b = self.H, self.g, self.b
H_reg, G_reg = self.H_reg, self.G_reg
# get index and Sbase info from self.circuit
nline = self.circuit.lines.num_elements
nnode = self.circuit.buses.num_elements
Sbase = self.circuit.Sbase
tf_lines = self.circuit.transformers.get_num_lines_x_ph()
vr_lines = self.circuit.voltage_regulators.get_num_lines_x_ph()
tol = self.__class__.CONVERGENCE_TOLERANCE
maxiter = self.__class__.maxiter
FT = 1e99
itercount = 0
# solve power-flow.
while np.amax(np.abs(FT)) >= 1e-9 and itercount < maxiter:
# print("Iteration number for Original NR3 %f" % (itercount))
FT = compute_NR3FT(XNR, g_SB, b_SB, G_KVL, b_KVL, H,
g, b, nnode, nline, H_reg, G_reg, vr_lines)
JT = compute_NR3JT(XNR, g_SB, G_KVL, H, g, nnode,
nline, H_reg, G_reg, tf_lines, vr_lines)
if JT.shape[0] >= JT.shape[1]:
XNR = XNR - np.linalg.inv(JT.T@JT)@JT.T@FT
itercount += 1
XNR_final = XNR
self.XNR = XNR
self.converged = True
self.map_XNR()
def map_XNR(self):
"""
Set self,V, self.I, self.Stx, self.Srx, self.i, self.s
based on the current value of self.XNR
Written by @kathleenchang
"""
TXnum = self.circuit.get_tx_idx_matrix()
RXnum = self.circuit.get_rx_idx_matrix()
nnode = self.circuit.buses.num_elements
nline = self.circuit.lines.num_elements
PH = self.circuit.buses.get_phase_matrix(self._orient)
spu = self.circuit.get_spu_matrix()
APQ = self.circuit.get_aPQ_matrix()
AZ = self.circuit.get_aZ_matrix()
AI = self.circuit.get_aI_matrix()
cappu = self.circuit.get_cappu_matrix()
wpu = self.circuit.get_wpu_matrix()
XNR = self.XNR
# TODO: can/should we include transformers and voltage regulators in
# in INR?
VNR, INR, STXNR, SRXNR, iNR, sNR = map_output(self.circuit, XNR)
self.V = VNR
self.Vmag = np.abs(VNR)
self.I = INR
self.Stx = STXNR
self.Srx = SRXNR
self.i_Node = iNR
self.sV = sNR
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.sin"
] | [((1776, 1865), 'numpy.zeros', 'np.zeros', (['(2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines, 1)'], {'dtype': 'float'}), '((2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines, 1),\n dtype=float)\n', (1784, 1865), True, 'import numpy as np\n'), ((3627, 3716), 'numpy.zeros', 'np.zeros', (['(6, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '((6, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines),\n dtype=float)\n', (3635, 3716), True, 'import numpy as np\n'), ((3897, 3926), 'numpy.zeros', 'np.zeros', (['(6, 1)'], {'dtype': 'float'}), '((6, 1), dtype=float)\n', (3905, 3926), True, 'import numpy as np\n'), ((4802, 4877), 'numpy.zeros', 'np.zeros', (['(2 * 3 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 1)'], {'dtype': 'float'}), '((2 * 3 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 1), dtype=float)\n', (4810, 4877), True, 'import numpy as np\n'), ((4918, 5053), 'numpy.zeros', 'np.zeros', (['(2 * 3 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 2 * 3 * (nnode + nline) +\n 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '((2 * 3 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 2 * 3 * (nnode +\n nline) + 2 * tf_lines + 2 * 2 * vr_lines), dtype=float)\n', (4926, 5053), True, 'import numpy as np\n'), ((9842, 10013), 'numpy.zeros', 'np.zeros', (['(2 * 3 * (nnode - 1), 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 *\n vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '((2 * 3 * (nnode - 1), 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 *\n 2 * vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines\n ), dtype=float)\n', (9850, 10013), True, 'import numpy as np\n'), ((10031, 10142), 'numpy.zeros', 'np.zeros', (['(2 * 3 * (nnode - 1), 1, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 *\n vr_lines)'], {'dtype': 'float'}), '((2 * 3 * (nnode - 1), 1, 2 * 3 * (nnode + nline) + 2 * tf_lines + \n 2 * 2 * vr_lines), dtype=float)\n', (10039, 10142), True, 'import numpy as np\n'), ((10155, 10205), 'numpy.zeros', 'np.zeros', (['(2 * 3 * (nnode - 1), 1, 1)'], {'dtype': 'float'}), '((2 * 3 * (nnode - 1), 1, 1), dtype=float)\n', (10163, 10205), True, 'import numpy as np\n'), ((26277, 26440), 'numpy.zeros', 'np.zeros', (['(2 * vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines, 2 *\n 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '((2 * vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 *\n vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines),\n dtype=float)\n', (26285, 26440), True, 'import numpy as np\n'), ((26477, 26577), 'numpy.zeros', 'np.zeros', (['(2 * vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '((2 * vr_lines, 2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 *\n vr_lines), dtype=float)\n', (26485, 26577), True, 'import numpy as np\n'), ((36226, 36237), 'numpy.abs', 'np.abs', (['VNR'], {}), '(VNR)\n', (36232, 36237), True, 'import numpy as np\n'), ((2559, 2629), 'numpy.ones', 'np.ones', (['(6 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 1)'], {'dtype': 'float'}), '((6 * nline + 2 * tf_lines + 2 * 2 * vr_lines, 1), dtype=float)\n', (2566, 2629), True, 'import numpy as np\n'), ((11585, 11873), 'numpy.array', 'np.array', (['[[-(A0 ** 2 * (A0 ** 2 + B0 ** 2) ** (-3 / 2)) + (A0 ** 2 + B0 ** 2) ** (-1 /\n 2), -A0 * B0 * (A0 ** 2 + B0 ** 2) ** (-3 / 2)], [-A0 * B0 * (A0 ** 2 +\n B0 ** 2) ** (-3 / 2), -(B0 ** 2 * (A0 ** 2 + B0 ** 2) ** (-3 / 2)) + (\n A0 ** 2 + B0 ** 2) ** (-1 / 2)]]'], {'dtype': 'float'}), '([[-(A0 ** 2 * (A0 ** 2 + B0 ** 2) ** (-3 / 2)) + (A0 ** 2 + B0 ** \n 2) ** (-1 / 2), -A0 * B0 * (A0 ** 2 + B0 ** 2) ** (-3 / 2)], [-A0 * B0 *\n (A0 ** 2 + B0 ** 2) ** (-3 / 2), -(B0 ** 2 * (A0 ** 2 + B0 ** 2) ** (-3 /\n 2)) + (A0 ** 2 + B0 ** 2) ** (-1 / 2)]], dtype=float)\n', (11593, 11873), True, 'import numpy as np\n'), ((24211, 24296), 'numpy.zeros', 'np.zeros', (['(2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines)'], {'dtype': 'float'}), '(2 * 3 * (nnode + nline) + 2 * tf_lines + 2 * 2 * vr_lines, dtype=float\n )\n', (24219, 24296), True, 'import numpy as np\n'), ((34651, 34661), 'numpy.abs', 'np.abs', (['FT'], {}), '(FT)\n', (34657, 34661), True, 'import numpy as np\n'), ((10475, 10485), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (10482, 10485), True, 'import numpy as np\n'), ((10561, 10571), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (10568, 10571), True, 'import numpy as np\n'), ((35110, 35134), 'numpy.linalg.inv', 'np.linalg.inv', (['(JT.T @ JT)'], {}), '(JT.T @ JT)\n', (35123, 35134), True, 'import numpy as np\n'), ((31686, 31714), 'numpy.sin', 'np.sin', (['(2 * np.pi * 0.01 * t)'], {}), '(2 * np.pi * 0.01 * t)\n', (31692, 31714), True, 'import numpy as np\n'), ((32066, 32094), 'numpy.sin', 'np.sin', (['(2 * np.pi * 0.01 * t)'], {}), '(2 * np.pi * 0.01 * t)\n', (32072, 32094), True, 'import numpy as np\n'), ((33382, 33410), 'numpy.sin', 'np.sin', (['(2 * np.pi * 0.01 * t)'], {}), '(2 * np.pi * 0.01 * t)\n', (33388, 33410), True, 'import numpy as np\n')] |
from typing import List, Tuple
import numpy as np
from l5kit.data import ChunkedDataset
from l5kit.data.filter import (filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames,
filter_tl_faces_by_status)
from l5kit.data.labels import PERCEPTION_LABELS
from l5kit.data.map_api import MapAPI, TLFacesColors
from l5kit.environment.envs.l5_env import EpisodeOutputGym
from l5kit.geometry import transform_points
from l5kit.rasterization.box_rasterizer import get_box_world_coords, get_ego_as_agent
from l5kit.rasterization.semantic_rasterizer import indices_in_bounds
from l5kit.sampling.agent_sampling import get_relative_poses
from l5kit.simulation.unroll import SimulationOutput, UnrollInputOutput
from l5kit.visualization.visualizer.common import (AgentVisualization, CWVisualization, EgoVisualization,
FrameVisualization, LaneVisualization, TrajectoryVisualization)
# TODO: this should not be here (maybe a config?)
COLORS = {
TLFacesColors.GREEN.name: "#33CC33",
TLFacesColors.RED.name: "#FF3300",
TLFacesColors.YELLOW.name: "#FFFF66",
"PERCEPTION_LABEL_CAR": "#1F77B4",
"PERCEPTION_LABEL_CYCLIST": "#CC33FF",
"PERCEPTION_LABEL_PEDESTRIAN": "#66CCFF",
}
def _get_frame_trajectories(frames: np.ndarray, agents_frames: List[np.ndarray], track_ids: np.ndarray,
frame_index: int) -> List[TrajectoryVisualization]:
"""Get trajectories (ego and agents) starting at frame_index.
Ego's trajectory will be named ego_trajectory while agents' agent_trajectory
:param frames: all frames from the scene
:param agents_frames: all agents from the scene as a list of array (one per frame)
:param track_ids: allowed tracks ids we want to build trajectory for
:param frame_index: index of the frame (trajectory will start from this frame)
:return: a list of trajectory for visualisation
"""
traj_visualisation: List[TrajectoryVisualization] = []
# TODO: factor out future length
agent_traj_length = 20
for track_id in track_ids:
# TODO this is not really relative (note eye and 0 yaw)
pos, *_, avail = get_relative_poses(agent_traj_length, frames[frame_index: frame_index + agent_traj_length],
track_id, agents_frames[frame_index: frame_index + agent_traj_length],
np.eye(3), 0)
traj_visualisation.append(TrajectoryVisualization(xs=pos[avail > 0, 0],
ys=pos[avail > 0, 1],
color="blue",
legend_label="agent_trajectory",
track_id=int(track_id)))
# TODO: factor out future length
ego_traj_length = 100
pos, *_, avail = get_relative_poses(ego_traj_length, frames[frame_index: frame_index + ego_traj_length],
None, agents_frames[frame_index: frame_index + ego_traj_length],
np.eye(3), 0)
traj_visualisation.append(TrajectoryVisualization(xs=pos[avail > 0, 0],
ys=pos[avail > 0, 1],
color="red",
legend_label="ego_trajectory",
track_id=-1))
return traj_visualisation
def _get_frame_data(mapAPI: MapAPI, frame: np.ndarray, agents_frame: np.ndarray,
tls_frame: np.ndarray) -> FrameVisualization:
"""Get visualisation objects for the current frame.
:param mapAPI: mapAPI object (used for lanes, crosswalks etc..)
:param frame: the current frame (used for ego)
:param agents_frame: agents in this frame
:param tls_frame: the tls of this frame
:return: A FrameVisualization object. NOTE: trajectory are not included here
"""
ego_xy = frame["ego_translation"][:2]
#################
# plot lanes
lane_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["lanes"]["bounds"], 50)
active_tl_ids = set(filter_tl_faces_by_status(tls_frame, "ACTIVE")["face_id"].tolist())
lanes_vis: List[LaneVisualization] = []
for idx, lane_idx in enumerate(lane_indices):
lane_idx = mapAPI.bounds_info["lanes"]["ids"][lane_idx]
lane_tl_ids = set(mapAPI.get_lane_traffic_control_ids(lane_idx))
lane_colour = "gray"
for tl_id in lane_tl_ids.intersection(active_tl_ids):
lane_colour = COLORS[mapAPI.get_color_for_face(tl_id)]
lane_coords = mapAPI.get_lane_coords(lane_idx)
left_lane = lane_coords["xyz_left"][:, :2]
right_lane = lane_coords["xyz_right"][::-1, :2]
lanes_vis.append(LaneVisualization(xs=np.hstack((left_lane[:, 0], right_lane[:, 0])),
ys=np.hstack((left_lane[:, 1], right_lane[:, 1])),
color=lane_colour))
#################
# plot crosswalks
crosswalk_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["crosswalks"]["bounds"], 50)
crosswalks_vis: List[CWVisualization] = []
for idx in crosswalk_indices:
crosswalk = mapAPI.get_crosswalk_coords(mapAPI.bounds_info["crosswalks"]["ids"][idx])
crosswalks_vis.append(CWVisualization(xs=crosswalk["xyz"][:, 0],
ys=crosswalk["xyz"][:, 1],
color="yellow"))
#################
# plot ego and agents
agents_frame = np.insert(agents_frame, 0, get_ego_as_agent(frame))
box_world_coords = get_box_world_coords(agents_frame)
# ego
ego_vis = EgoVisualization(xs=box_world_coords[0, :, 0], ys=box_world_coords[0, :, 1],
color="red", center_x=agents_frame["centroid"][0, 0],
center_y=agents_frame["centroid"][0, 1])
# agents
agents_frame = agents_frame[1:]
box_world_coords = box_world_coords[1:]
agents_vis: List[AgentVisualization] = []
for agent, box_coord in zip(agents_frame, box_world_coords):
label_index = np.argmax(agent["label_probabilities"])
agent_type = PERCEPTION_LABELS[label_index]
agents_vis.append(AgentVisualization(xs=box_coord[..., 0],
ys=box_coord[..., 1],
color="#1F77B4" if agent_type not in COLORS else COLORS[agent_type],
track_id=agent["track_id"],
agent_type=PERCEPTION_LABELS[label_index],
prob=agent["label_probabilities"][label_index]))
return FrameVisualization(ego=ego_vis, agents=agents_vis, lanes=lanes_vis,
crosswalks=crosswalks_vis, trajectories=[])
def zarr_to_visualizer_scene(scene_dataset: ChunkedDataset, mapAPI: MapAPI,
with_trajectories: bool = True) -> List[FrameVisualization]:
"""Convert a zarr scene into a list of FrameVisualization which can be used by the visualiser
:param scene_dataset: a scene dataset. This must contain a single scene
:param mapAPI: mapAPI object
:param with_trajectories: if to enable trajectories or not
:return: a list of FrameVisualization objects
"""
if len(scene_dataset.scenes) != 1:
raise ValueError(f"we can convert only a single scene, found {len(scene_dataset.scenes)}")
frames = scene_dataset.frames
agents_frames = filter_agents_by_frames(frames, scene_dataset.agents)
tls_frames = filter_tl_faces_by_frames(frames, scene_dataset.tl_faces)
frames_vis: List[FrameVisualization] = []
for frame_idx in range(len(frames)):
frame = frames[frame_idx]
tls_frame = tls_frames[frame_idx]
# TODO: hardcoded threshold, it would be great to have a slider filtering on this
agents_frame = agents_frames[frame_idx]
agents_frame = filter_agents_by_labels(agents_frame, 0.1)
frame_vis = _get_frame_data(mapAPI, frame, agents_frame, tls_frame)
if with_trajectories:
traj_vis = _get_frame_trajectories(frames, agents_frames, agents_frame["track_id"], frame_idx)
frame_vis = FrameVisualization(ego=frame_vis.ego, agents=frame_vis.agents,
lanes=frame_vis.lanes, crosswalks=frame_vis.crosswalks,
trajectories=traj_vis)
frames_vis.append(frame_vis)
return frames_vis
def _get_in_out_as_trajectories(in_out: UnrollInputOutput) -> Tuple[np.ndarray, np.ndarray]:
"""Convert the input (log-replayed) and output (simulated) trajectories into world space.
Apply availability on the log-replayed one
:param in_out: an UnrollInputOutput object
:return: the replayed and simulated trajectory as numpy arrays
"""
replay_traj = transform_points(in_out.inputs["target_positions"],
in_out.inputs["world_from_agent"])
replay_traj = replay_traj[in_out.inputs["target_availabilities"] > 0]
sim_traj = transform_points(in_out.outputs["positions"],
in_out.inputs["world_from_agent"])
return replay_traj, sim_traj
def simulation_out_to_visualizer_scene(sim_out: SimulationOutput, mapAPI: MapAPI) -> List[FrameVisualization]:
"""Convert a simulation output into a scene we can visualize.
The scene will include replayed and simulated trajectories for ego and agents when these are
simulated.
:param sim_out: the simulation output
:param mapAPI: a MapAPI object
:return: a list of FrameVisualization for the scene
"""
frames = sim_out.simulated_ego
agents_frames = filter_agents_by_frames(frames, sim_out.simulated_agents)
tls_frames = filter_tl_faces_by_frames(frames, sim_out.simulated_dataset.dataset.tl_faces)
agents_th = sim_out.simulated_dataset.cfg["raster_params"]["filter_agents_threshold"]
ego_ins_outs = sim_out.ego_ins_outs
agents_ins_outs = sim_out.agents_ins_outs
has_ego_info = len(ego_ins_outs) > 0
has_agents_info = len(agents_ins_outs) > 0
frames_vis: List[FrameVisualization] = []
for frame_idx in range(len(frames)):
frame = frames[frame_idx]
tls_frame = tls_frames[frame_idx]
agents_frame = agents_frames[frame_idx]
agents_frame = filter_agents_by_labels(agents_frame, agents_th)
frame_vis = _get_frame_data(mapAPI, frame, agents_frame, tls_frame)
trajectories = []
if has_ego_info:
ego_in_out = ego_ins_outs[frame_idx]
replay_traj, sim_traj = _get_in_out_as_trajectories(ego_in_out)
trajectories.append(TrajectoryVisualization(xs=replay_traj[:, 0], ys=replay_traj[:, 1],
color="blue", legend_label="ego_replay", track_id=-1))
trajectories.append(TrajectoryVisualization(xs=sim_traj[:, 0], ys=sim_traj[:, 1],
color="red", legend_label="ego_simulated", track_id=-1))
if has_agents_info:
agents_in_out = agents_ins_outs[frame_idx]
for agent_in_out in agents_in_out:
track_id = agent_in_out.inputs["track_id"]
replay_traj, sim_traj = _get_in_out_as_trajectories(agent_in_out)
trajectories.append(TrajectoryVisualization(xs=replay_traj[:, 0], ys=replay_traj[:, 1],
color="orange", legend_label="agent_replay",
track_id=track_id))
trajectories.append(TrajectoryVisualization(xs=sim_traj[:, 0], ys=sim_traj[:, 1],
color="purple", legend_label="agent_simulated",
track_id=track_id))
frame_vis = FrameVisualization(ego=frame_vis.ego, agents=frame_vis.agents,
lanes=frame_vis.lanes, crosswalks=frame_vis.crosswalks,
trajectories=trajectories)
frames_vis.append(frame_vis)
return frames_vis
def episode_out_to_visualizer_scene_gym_cle(sim_out: EpisodeOutputGym,
mapAPI: MapAPI) -> List[FrameVisualization]:
"""Convert a episode output of closed loop gym into a scene we can visualize.
The scene will include replayed and simulated trajectories for ego and agents when these are
simulated.
:param sim_out: the simulation output of L5 gym close loop environment
:param mapAPI: a MapAPI object
:return: a list of FrameVisualization for the scene
"""
frames = sim_out.simulated_ego
agents_frames = filter_agents_by_frames(frames, sim_out.simulated_agents)
tls_frames = filter_tl_faces_by_frames(frames, sim_out.tls_frames)
agents_th = sim_out.agents_th
ego_ins_outs = sim_out.ego_ins_outs
agents_ins_outs = sim_out.agents_ins_outs
has_ego_info = len(ego_ins_outs) > 0
has_agents_info = len(agents_ins_outs) > 0
frames_vis: List[FrameVisualization] = []
for frame_idx in range(len(frames) - 2):
frame = frames[frame_idx]
tls_frame = tls_frames[frame_idx]
agents_frame = agents_frames[frame_idx]
agents_frame = filter_agents_by_labels(agents_frame, agents_th)
frame_vis = _get_frame_data(mapAPI, frame, agents_frame, tls_frame)
trajectories = []
if has_ego_info:
ego_in_out = ego_ins_outs[frame_idx]
replay_traj, sim_traj = _get_in_out_as_trajectories(ego_in_out)
scale = 10
ego_centroid = np.array([[frame_vis.ego.center_x, frame_vis.ego.center_y]])
ego_next_step_replay = ego_centroid + scale * (replay_traj[0:1] - ego_centroid)
ego_next_step_sim = ego_centroid + scale * (sim_traj[0:1] - ego_centroid)
single_step_replay = np.concatenate([ego_centroid, ego_next_step_replay])
single_step_sim = np.concatenate([ego_centroid, ego_next_step_sim])
trajectories.append(TrajectoryVisualization(xs=single_step_sim[:, 0], ys=single_step_sim[:, 1],
color="green", legend_label="ego_simulated (10x scale)",
track_id=-1))
trajectories.append(TrajectoryVisualization(xs=single_step_replay[:, 0], ys=single_step_replay[:, 1],
color="yellow", legend_label="ego_replay (10x scale)",
track_id=-1))
if has_agents_info:
agents_in_out = agents_ins_outs[frame_idx]
for agent_in_out in agents_in_out:
track_id = agent_in_out.inputs["track_id"]
replay_traj, sim_traj = _get_in_out_as_trajectories(agent_in_out)
trajectories.append(TrajectoryVisualization(xs=replay_traj[:, 0], ys=replay_traj[:, 1],
color="orange", legend_label="agent_replay",
track_id=track_id))
trajectories.append(TrajectoryVisualization(xs=sim_traj[:, 0], ys=sim_traj[:, 1],
color="purple", legend_label="agent_simulated",
track_id=track_id))
frame_vis = FrameVisualization(ego=frame_vis.ego, agents=frame_vis.agents,
lanes=frame_vis.lanes, crosswalks=frame_vis.crosswalks,
trajectories=trajectories)
frames_vis.append(frame_vis)
return frames_vis
| [
"l5kit.rasterization.semantic_rasterizer.indices_in_bounds",
"numpy.eye",
"l5kit.data.filter.filter_agents_by_frames",
"numpy.hstack",
"l5kit.visualization.visualizer.common.EgoVisualization",
"l5kit.rasterization.box_rasterizer.get_ego_as_agent",
"l5kit.data.filter.filter_tl_faces_by_status",
"numpy.... | [((4222, 4290), 'l5kit.rasterization.semantic_rasterizer.indices_in_bounds', 'indices_in_bounds', (['ego_xy', "mapAPI.bounds_info['lanes']['bounds']", '(50)'], {}), "(ego_xy, mapAPI.bounds_info['lanes']['bounds'], 50)\n", (4239, 4290), False, 'from l5kit.rasterization.semantic_rasterizer import indices_in_bounds\n'), ((5258, 5331), 'l5kit.rasterization.semantic_rasterizer.indices_in_bounds', 'indices_in_bounds', (['ego_xy', "mapAPI.bounds_info['crosswalks']['bounds']", '(50)'], {}), "(ego_xy, mapAPI.bounds_info['crosswalks']['bounds'], 50)\n", (5275, 5331), False, 'from l5kit.rasterization.semantic_rasterizer import indices_in_bounds\n'), ((5859, 5893), 'l5kit.rasterization.box_rasterizer.get_box_world_coords', 'get_box_world_coords', (['agents_frame'], {}), '(agents_frame)\n', (5879, 5893), False, 'from l5kit.rasterization.box_rasterizer import get_box_world_coords, get_ego_as_agent\n'), ((5919, 6099), 'l5kit.visualization.visualizer.common.EgoVisualization', 'EgoVisualization', ([], {'xs': 'box_world_coords[0, :, 0]', 'ys': 'box_world_coords[0, :, 1]', 'color': '"""red"""', 'center_x': "agents_frame['centroid'][0, 0]", 'center_y': "agents_frame['centroid'][0, 1]"}), "(xs=box_world_coords[0, :, 0], ys=box_world_coords[0, :, 1],\n color='red', center_x=agents_frame['centroid'][0, 0], center_y=\n agents_frame['centroid'][0, 1])\n", (5935, 6099), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((6988, 7103), 'l5kit.visualization.visualizer.common.FrameVisualization', 'FrameVisualization', ([], {'ego': 'ego_vis', 'agents': 'agents_vis', 'lanes': 'lanes_vis', 'crosswalks': 'crosswalks_vis', 'trajectories': '[]'}), '(ego=ego_vis, agents=agents_vis, lanes=lanes_vis,\n crosswalks=crosswalks_vis, trajectories=[])\n', (7006, 7103), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((7820, 7873), 'l5kit.data.filter.filter_agents_by_frames', 'filter_agents_by_frames', (['frames', 'scene_dataset.agents'], {}), '(frames, scene_dataset.agents)\n', (7843, 7873), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((7891, 7948), 'l5kit.data.filter.filter_tl_faces_by_frames', 'filter_tl_faces_by_frames', (['frames', 'scene_dataset.tl_faces'], {}), '(frames, scene_dataset.tl_faces)\n', (7916, 7948), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((9222, 9313), 'l5kit.geometry.transform_points', 'transform_points', (["in_out.inputs['target_positions']", "in_out.inputs['world_from_agent']"], {}), "(in_out.inputs['target_positions'], in_out.inputs[\n 'world_from_agent'])\n", (9238, 9313), False, 'from l5kit.geometry import transform_points\n'), ((9433, 9518), 'l5kit.geometry.transform_points', 'transform_points', (["in_out.outputs['positions']", "in_out.inputs['world_from_agent']"], {}), "(in_out.outputs['positions'], in_out.inputs['world_from_agent']\n )\n", (9449, 9518), False, 'from l5kit.geometry import transform_points\n'), ((10068, 10125), 'l5kit.data.filter.filter_agents_by_frames', 'filter_agents_by_frames', (['frames', 'sim_out.simulated_agents'], {}), '(frames, sim_out.simulated_agents)\n', (10091, 10125), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((10143, 10220), 'l5kit.data.filter.filter_tl_faces_by_frames', 'filter_tl_faces_by_frames', (['frames', 'sim_out.simulated_dataset.dataset.tl_faces'], {}), '(frames, sim_out.simulated_dataset.dataset.tl_faces)\n', (10168, 10220), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((13181, 13238), 'l5kit.data.filter.filter_agents_by_frames', 'filter_agents_by_frames', (['frames', 'sim_out.simulated_agents'], {}), '(frames, sim_out.simulated_agents)\n', (13204, 13238), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((13256, 13309), 'l5kit.data.filter.filter_tl_faces_by_frames', 'filter_tl_faces_by_frames', (['frames', 'sim_out.tls_frames'], {}), '(frames, sim_out.tls_frames)\n', (13281, 13309), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((3200, 3209), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3206, 3209), True, 'import numpy as np\n'), ((3244, 3373), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'pos[avail > 0, 0]', 'ys': 'pos[avail > 0, 1]', 'color': '"""red"""', 'legend_label': '"""ego_trajectory"""', 'track_id': '(-1)'}), "(xs=pos[avail > 0, 0], ys=pos[avail > 0, 1], color=\n 'red', legend_label='ego_trajectory', track_id=-1)\n", (3267, 3373), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((5811, 5834), 'l5kit.rasterization.box_rasterizer.get_ego_as_agent', 'get_ego_as_agent', (['frame'], {}), '(frame)\n', (5827, 5834), False, 'from l5kit.rasterization.box_rasterizer import get_box_world_coords, get_ego_as_agent\n'), ((6381, 6420), 'numpy.argmax', 'np.argmax', (["agent['label_probabilities']"], {}), "(agent['label_probabilities'])\n", (6390, 6420), True, 'import numpy as np\n'), ((8275, 8317), 'l5kit.data.filter.filter_agents_by_labels', 'filter_agents_by_labels', (['agents_frame', '(0.1)'], {}), '(agents_frame, 0.1)\n', (8298, 8317), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((10722, 10770), 'l5kit.data.filter.filter_agents_by_labels', 'filter_agents_by_labels', (['agents_frame', 'agents_th'], {}), '(agents_frame, agents_th)\n', (10745, 10770), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((12310, 12465), 'l5kit.visualization.visualizer.common.FrameVisualization', 'FrameVisualization', ([], {'ego': 'frame_vis.ego', 'agents': 'frame_vis.agents', 'lanes': 'frame_vis.lanes', 'crosswalks': 'frame_vis.crosswalks', 'trajectories': 'trajectories'}), '(ego=frame_vis.ego, agents=frame_vis.agents, lanes=\n frame_vis.lanes, crosswalks=frame_vis.crosswalks, trajectories=trajectories\n )\n', (12328, 12465), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((13759, 13807), 'l5kit.data.filter.filter_agents_by_labels', 'filter_agents_by_labels', (['agents_frame', 'agents_th'], {}), '(agents_frame, agents_th)\n', (13782, 13807), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((15971, 16126), 'l5kit.visualization.visualizer.common.FrameVisualization', 'FrameVisualization', ([], {'ego': 'frame_vis.ego', 'agents': 'frame_vis.agents', 'lanes': 'frame_vis.lanes', 'crosswalks': 'frame_vis.crosswalks', 'trajectories': 'trajectories'}), '(ego=frame_vis.ego, agents=frame_vis.agents, lanes=\n frame_vis.lanes, crosswalks=frame_vis.crosswalks, trajectories=trajectories\n )\n', (15989, 16126), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((2462, 2471), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2468, 2471), True, 'import numpy as np\n'), ((5538, 5628), 'l5kit.visualization.visualizer.common.CWVisualization', 'CWVisualization', ([], {'xs': "crosswalk['xyz'][:, 0]", 'ys': "crosswalk['xyz'][:, 1]", 'color': '"""yellow"""'}), "(xs=crosswalk['xyz'][:, 0], ys=crosswalk['xyz'][:, 1], color\n ='yellow')\n", (5553, 5628), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((6499, 6764), 'l5kit.visualization.visualizer.common.AgentVisualization', 'AgentVisualization', ([], {'xs': 'box_coord[..., 0]', 'ys': 'box_coord[..., 1]', 'color': "('#1F77B4' if agent_type not in COLORS else COLORS[agent_type])", 'track_id': "agent['track_id']", 'agent_type': 'PERCEPTION_LABELS[label_index]', 'prob': "agent['label_probabilities'][label_index]"}), "(xs=box_coord[..., 0], ys=box_coord[..., 1], color=\n '#1F77B4' if agent_type not in COLORS else COLORS[agent_type], track_id\n =agent['track_id'], agent_type=PERCEPTION_LABELS[label_index], prob=\n agent['label_probabilities'][label_index])\n", (6517, 6764), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((8557, 8703), 'l5kit.visualization.visualizer.common.FrameVisualization', 'FrameVisualization', ([], {'ego': 'frame_vis.ego', 'agents': 'frame_vis.agents', 'lanes': 'frame_vis.lanes', 'crosswalks': 'frame_vis.crosswalks', 'trajectories': 'traj_vis'}), '(ego=frame_vis.ego, agents=frame_vis.agents, lanes=\n frame_vis.lanes, crosswalks=frame_vis.crosswalks, trajectories=traj_vis)\n', (8575, 8703), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((14111, 14171), 'numpy.array', 'np.array', (['[[frame_vis.ego.center_x, frame_vis.ego.center_y]]'], {}), '([[frame_vis.ego.center_x, frame_vis.ego.center_y]])\n', (14119, 14171), True, 'import numpy as np\n'), ((14383, 14435), 'numpy.concatenate', 'np.concatenate', (['[ego_centroid, ego_next_step_replay]'], {}), '([ego_centroid, ego_next_step_replay])\n', (14397, 14435), True, 'import numpy as np\n'), ((14466, 14515), 'numpy.concatenate', 'np.concatenate', (['[ego_centroid, ego_next_step_sim]'], {}), '([ego_centroid, ego_next_step_sim])\n', (14480, 14515), True, 'import numpy as np\n'), ((11056, 11182), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'replay_traj[:, 0]', 'ys': 'replay_traj[:, 1]', 'color': '"""blue"""', 'legend_label': '"""ego_replay"""', 'track_id': '(-1)'}), "(xs=replay_traj[:, 0], ys=replay_traj[:, 1], color=\n 'blue', legend_label='ego_replay', track_id=-1)\n", (11079, 11182), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((11267, 11388), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'sim_traj[:, 0]', 'ys': 'sim_traj[:, 1]', 'color': '"""red"""', 'legend_label': '"""ego_simulated"""', 'track_id': '(-1)'}), "(xs=sim_traj[:, 0], ys=sim_traj[:, 1], color='red',\n legend_label='ego_simulated', track_id=-1)\n", (11290, 11388), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((14549, 14698), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'single_step_sim[:, 0]', 'ys': 'single_step_sim[:, 1]', 'color': '"""green"""', 'legend_label': '"""ego_simulated (10x scale)"""', 'track_id': '(-1)'}), "(xs=single_step_sim[:, 0], ys=single_step_sim[:, 1],\n color='green', legend_label='ego_simulated (10x scale)', track_id=-1)\n", (14572, 14698), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((14840, 14994), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'single_step_replay[:, 0]', 'ys': 'single_step_replay[:, 1]', 'color': '"""yellow"""', 'legend_label': '"""ego_replay (10x scale)"""', 'track_id': '(-1)'}), "(xs=single_step_replay[:, 0], ys=single_step_replay[\n :, 1], color='yellow', legend_label='ego_replay (10x scale)', track_id=-1)\n", (14863, 14994), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((4315, 4361), 'l5kit.data.filter.filter_tl_faces_by_status', 'filter_tl_faces_by_status', (['tls_frame', '"""ACTIVE"""'], {}), "(tls_frame, 'ACTIVE')\n", (4340, 4361), False, 'from l5kit.data.filter import filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames, filter_tl_faces_by_status\n'), ((4984, 5030), 'numpy.hstack', 'np.hstack', (['(left_lane[:, 0], right_lane[:, 0])'], {}), '((left_lane[:, 0], right_lane[:, 0]))\n', (4993, 5030), True, 'import numpy as np\n'), ((5078, 5124), 'numpy.hstack', 'np.hstack', (['(left_lane[:, 1], right_lane[:, 1])'], {}), '((left_lane[:, 1], right_lane[:, 1]))\n', (5087, 5124), True, 'import numpy as np\n'), ((11750, 11886), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'replay_traj[:, 0]', 'ys': 'replay_traj[:, 1]', 'color': '"""orange"""', 'legend_label': '"""agent_replay"""', 'track_id': 'track_id'}), "(xs=replay_traj[:, 0], ys=replay_traj[:, 1], color=\n 'orange', legend_label='agent_replay', track_id=track_id)\n", (11773, 11886), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((12039, 12172), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'sim_traj[:, 0]', 'ys': 'sim_traj[:, 1]', 'color': '"""purple"""', 'legend_label': '"""agent_simulated"""', 'track_id': 'track_id'}), "(xs=sim_traj[:, 0], ys=sim_traj[:, 1], color=\n 'purple', legend_label='agent_simulated', track_id=track_id)\n", (12062, 12172), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((15411, 15547), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'replay_traj[:, 0]', 'ys': 'replay_traj[:, 1]', 'color': '"""orange"""', 'legend_label': '"""agent_replay"""', 'track_id': 'track_id'}), "(xs=replay_traj[:, 0], ys=replay_traj[:, 1], color=\n 'orange', legend_label='agent_replay', track_id=track_id)\n", (15434, 15547), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n'), ((15700, 15833), 'l5kit.visualization.visualizer.common.TrajectoryVisualization', 'TrajectoryVisualization', ([], {'xs': 'sim_traj[:, 0]', 'ys': 'sim_traj[:, 1]', 'color': '"""purple"""', 'legend_label': '"""agent_simulated"""', 'track_id': 'track_id'}), "(xs=sim_traj[:, 0], ys=sim_traj[:, 1], color=\n 'purple', legend_label='agent_simulated', track_id=track_id)\n", (15723, 15833), False, 'from l5kit.visualization.visualizer.common import AgentVisualization, CWVisualization, EgoVisualization, FrameVisualization, LaneVisualization, TrajectoryVisualization\n')] |
# Implementation of the Gaborfilter
# https://en.wikipedia.org/wiki/Gabor_filter
import numpy as np
from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey
def gabor_filter_kernel(
ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int
) -> np.ndarray:
"""
:param ksize: The kernelsize of the convolutional filter (ksize x ksize)
:param sigma: standard deviation of the gaussian bell curve
:param theta: The orientation of the normal to the parallel stripes
of Gabor function.
:param lambd: Wavelength of the sinusoidal component.
:param gamma: The spatial aspect ratio and specifies the ellipticity
of the support of Gabor function.
:param psi: The phase offset of the sinusoidal function.
>>> gabor_filter_kernel(3, 8, 0, 10, 0, 0).tolist()
[[0.8027212023735046, 1.0, 0.8027212023735046], [0.8027212023735046, 1.0, \
0.8027212023735046], [0.8027212023735046, 1.0, 0.8027212023735046]]
"""
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
ksize = ksize + 1
gabor = np.zeros((ksize, ksize), dtype=np.float32)
# each value
for y in range(ksize):
for x in range(ksize):
# distance from center
px = x - ksize // 2
py = y - ksize // 2
# degree to radiant
_theta = theta / 180 * np.pi
cos_theta = np.cos(_theta)
sin_theta = np.sin(_theta)
# get kernel x
_x = cos_theta * px + sin_theta * py
# get kernel y
_y = -sin_theta * px + cos_theta * py
# fill kernel
gabor[y, x] = np.exp(
-(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2)
) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
img = imread("../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
out = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
"""
ksize = 10
sigma = 8
lambd = 10
gamma = 0
psi = 0
"""
kernel_10 = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filter2D(gray, CV_8UC3, kernel_10)
out = out / out.max() * 255
out = out.astype(np.uint8)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| [
"cv2.filter2D",
"cv2.imshow",
"numpy.exp",
"numpy.zeros",
"cv2.waitKey",
"doctest.testmod",
"numpy.cos",
"cv2.cvtColor",
"numpy.sin",
"cv2.imread"
] | [((1158, 1200), 'numpy.zeros', 'np.zeros', (['(ksize, ksize)'], {'dtype': 'np.float32'}), '((ksize, ksize), dtype=np.float32)\n', (1166, 1200), True, 'import numpy as np\n'), ((1937, 1954), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1952, 1954), False, 'import doctest\n'), ((1991, 2023), 'cv2.imread', 'imread', (['"""../image_data/lena.jpg"""'], {}), "('../image_data/lena.jpg')\n", (1997, 2023), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((2072, 2101), 'cv2.cvtColor', 'cvtColor', (['img', 'COLOR_BGR2GRAY'], {}), '(img, COLOR_BGR2GRAY)\n', (2080, 2101), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((2157, 2181), 'numpy.zeros', 'np.zeros', (['gray.shape[:2]'], {}), '(gray.shape[:2])\n', (2165, 2181), True, 'import numpy as np\n'), ((2522, 2546), 'cv2.imshow', 'imshow', (['"""Original"""', 'gray'], {}), "('Original', gray)\n", (2528, 2546), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((2551, 2611), 'cv2.imshow', 'imshow', (['"""Gabor filter with 20x20 mask and 6 directions"""', 'out'], {}), "('Gabor filter with 20x20 mask and 6 directions', out)\n", (2557, 2611), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((2617, 2627), 'cv2.waitKey', 'waitKey', (['(0)'], {}), '(0)\n', (2624, 2627), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((2419, 2453), 'cv2.filter2D', 'filter2D', (['gray', 'CV_8UC3', 'kernel_10'], {}), '(gray, CV_8UC3, kernel_10)\n', (2427, 2453), False, 'from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey\n'), ((1474, 1488), 'numpy.cos', 'np.cos', (['_theta'], {}), '(_theta)\n', (1480, 1488), True, 'import numpy as np\n'), ((1513, 1527), 'numpy.sin', 'np.sin', (['_theta'], {}), '(_theta)\n', (1519, 1527), True, 'import numpy as np\n'), ((1736, 1796), 'numpy.exp', 'np.exp', (['(-(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2))'], {}), '(-(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2))\n', (1742, 1796), True, 'import numpy as np\n'), ((1829, 1865), 'numpy.cos', 'np.cos', (['(2 * np.pi * _x / lambd + psi)'], {}), '(2 * np.pi * _x / lambd + psi)\n', (1835, 1865), True, 'import numpy as np\n')] |
"""This example simulates the start-up behavior of the squirrel cage induction motor connected to
an ideal three-phase grid. The state and action space is continuous.
Running the example will create a formatted plot that show the motor's angular velocity, the drive torque,
the applied voltage in three-phase abc-coordinates, and the measured current in field-oriented dq-coordinates.
"""
import numpy as np
import gym_electric_motor as gem
import matplotlib.pyplot as plt
def parameterize_three_phase_grid(amplitude, frequency, initial_phase):
"""This nested function allows to create a function of time, which returns the momentary voltage of the
three-phase grid.
The nested structure allows to parameterize the three-phase grid by amplitude(as a fraction of the DC-link voltage),
frequency (in Hertz) and initial phase (in degree).
"""
omega = frequency * 2 * np.pi # 1/s
phi = 2 * np.pi / 3 # phase offset
phi_initial = initial_phase * 2 * np.pi / 360
def grid_voltage(t):
u_abc = [
amplitude * np.sin(omega * t + phi_initial),
amplitude * np.sin(omega * t + phi_initial - phi),
amplitude * np.sin(omega * t + phi_initial + phi)
]
return u_abc
return grid_voltage
# Create the environment
env = gem.make(
# Choose the squirrel cage induction motor (SCIM) with continuous-control-set
"AbcCont-CC-SCIM-v0",
# Define the numerical solver for the simulation
ode_solver="scipy.ode",
# Define which state variables are to be monitored concerning limit violations
# "()" means, that limit violation will not necessitate an env.reset()
constraints=(),
# Set the sampling time
tau=1e-5
)
tau = env.physical_system.tau
limits = env.physical_system.limits
# reset the environment such that the simulation can be started
(state, reference) = env.reset()
# We define these arrays in order to save our simulation results in them
# Initial state and initial time are directly inserted
STATE = np.transpose(np.array([state * limits]))
TIME = np.array([0])
# Use the previously defined function to parameterize a three-phase grid with an amplitude of
# 80 % of the DC-link voltage and a frequency of 50 Hertz
f_grid = 50 # Hertz
u_abc = parameterize_three_phase_grid(amplitude=0.8, frequency=f_grid, initial_phase=0)
# Set a time horizon to simulate, in this case 60 ms
time_horizon = 0.06
step_horizon = int(time_horizon / tau)
for idx in range(step_horizon):
# calculate the time of this simulation step
time = idx * tau
# apply the voltage as given by the grid
(state, reference), reward, done, _ = env.step(u_abc(time))
# save the results of this simulation step
STATE = np.append(STATE, np.transpose([state * limits]), axis=1)
TIME = np.append(TIME, time)
# convert the timescale from s to ms
TIME *= 1e3
# the rest of the code is for plotting the results in a nice way
# the state indices for the SCIM are:
# STATE[0]: omega (mechanical angular velocity)
# STATE[1]: T (drive torque)
# STATE[2] - STATE[4]: i_sa, i_sb, i_sc (three-phase stator currents)
# STATE[5] - STATE[6]: i_sd, i_sq (stator currents in field oriented dq-coordinates)
# STATE[7] - STATE[9]: u_sa, u_sb, u_sc (three-phase stator voltages)
# STATE[10] - STATE[11]: u_sd, u_sq (stator voltages in field oriented dq-coordinates)
# STATE[12]: epsilon (rotor angular position)
# STATE[13]: u_sup (DC-link supply voltage)
plt.subplots(2, 2, figsize=(7.45, 2.5))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.08, hspace=0.05)
plt.rcParams.update({'font.size': 8})
plt.subplot(2, 2, 1)
plt.plot(TIME, STATE[0])
plt.ylabel(r"$\omega_\mathrm{me} \, / \, \frac{1}{\mathrm{s}}$")
plt.xlim([0, 60])
plt.yticks([0, 50, 100, 150])
plt.tick_params(axis='x', which='both', labelbottom=False)
plt.tick_params(axis='both', direction="in", left=True, right=False, bottom=True, top=True)
plt.grid()
ax = plt.subplot(2, 2, 2)
plt.plot(TIME, STATE[7], label=r"$u_a$")
plt.plot(TIME, STATE[8], label=r"$u_b$")
plt.plot(TIME, STATE[9], label=r"$u_c$")
plt.ylabel(r"$u \, / \, \mathrm{V}$")
plt.xlim([0, 60])
plt.yticks([-200, 0, 200])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.tick_params(axis='x', which='both', labelbottom=False)
plt.tick_params(axis='both', direction="in", left=False, right=True, bottom=True, top=True)
plt.grid()
plt.legend(loc="lower right", ncol=3)
plt.subplot(2, 2, 3)
plt.plot(TIME, STATE[1])
plt.xlabel(r"$t \, / \, \mathrm{ms}$")
plt.ylabel(r"$T \, / \, \mathrm{Nm}$")
plt.xlim([0, 60])
plt.yticks([0, 20])
plt.tick_params(axis='both', direction="in", left=True, right=False, bottom=True, top=True)
plt.grid()
ax = plt.subplot(2, 2, 4)
plt.plot(TIME, STATE[5], label=r"$i_d$")
plt.plot(TIME, STATE[6], label=r"$i_q$")
plt.xlabel(r"$t \, / \, \mathrm{ms}$")
plt.ylabel(r"$i \, / \, \mathrm{A}$")
plt.xlim([0, 60])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.tick_params(axis='both', direction="in", left=False, right=True, bottom=True, top=True)
plt.yticks([0, 10, 20, 30])
plt.grid()
plt.legend(loc="upper right", ncol=2)
plt.show()
| [
"matplotlib.pyplot.grid",
"gym_electric_motor.make",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.yticks",
"n... | [((1312, 1398), 'gym_electric_motor.make', 'gem.make', (['"""AbcCont-CC-SCIM-v0"""'], {'ode_solver': '"""scipy.ode"""', 'constraints': '()', 'tau': '(1e-05)'}), "('AbcCont-CC-SCIM-v0', ode_solver='scipy.ode', constraints=(), tau=\n 1e-05)\n", (1320, 1398), True, 'import gym_electric_motor as gem\n'), ((2085, 2098), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2093, 2098), True, 'import numpy as np\n'), ((3470, 3509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(7.45, 2.5)'}), '(2, 2, figsize=(7.45, 2.5))\n', (3482, 3509), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3606), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None', 'wspace': '(0.08)', 'hspace': '(0.05)'}), '(left=None, bottom=None, right=None, top=None, wspace=\n 0.08, hspace=0.05)\n', (3529, 3606), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3639), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (3621, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3661), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3652, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3662, 3686), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[0]'], {}), '(TIME, STATE[0])\n', (3670, 3686), True, 'import matplotlib.pyplot as plt\n'), ((3687, 3756), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\omega_\\\\mathrm{me} \\\\, / \\\\, \\\\frac{1}{\\\\mathrm{s}}$"""'], {}), "('$\\\\omega_\\\\mathrm{me} \\\\, / \\\\, \\\\frac{1}{\\\\mathrm{s}}$')\n", (3697, 3756), True, 'import matplotlib.pyplot as plt\n'), ((3752, 3769), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 60]'], {}), '([0, 60])\n', (3760, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3799), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 50, 100, 150]'], {}), '([0, 50, 100, 150])\n', (3780, 3799), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3858), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'labelbottom': '(False)'}), "(axis='x', which='both', labelbottom=False)\n", (3815, 3858), True, 'import matplotlib.pyplot as plt\n'), ((3859, 3955), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'direction': '"""in"""', 'left': '(True)', 'right': '(False)', 'bottom': '(True)', 'top': '(True)'}), "(axis='both', direction='in', left=True, right=False, bottom\n =True, top=True)\n", (3874, 3955), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3961), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3959, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3968, 3988), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3979, 3988), True, 'import matplotlib.pyplot as plt\n'), ((3989, 4028), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[7]'], {'label': '"""$u_a$"""'}), "(TIME, STATE[7], label='$u_a$')\n", (3997, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4030, 4069), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[8]'], {'label': '"""$u_b$"""'}), "(TIME, STATE[8], label='$u_b$')\n", (4038, 4069), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4110), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[9]'], {'label': '"""$u_c$"""'}), "(TIME, STATE[9], label='$u_c$')\n", (4079, 4110), True, 'import matplotlib.pyplot as plt\n'), ((4112, 4151), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u \\\\, / \\\\, \\\\mathrm{V}$"""'], {}), "('$u \\\\, / \\\\, \\\\mathrm{V}$')\n", (4122, 4151), True, 'import matplotlib.pyplot as plt\n'), ((4150, 4167), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 60]'], {}), '([0, 60])\n', (4158, 4167), True, 'import matplotlib.pyplot as plt\n'), ((4168, 4194), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-200, 0, 200]'], {}), '([-200, 0, 200])\n', (4178, 4194), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4312), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'labelbottom': '(False)'}), "(axis='x', which='both', labelbottom=False)\n", (4269, 4312), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4409), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'direction': '"""in"""', 'left': '(False)', 'right': '(True)', 'bottom': '(True)', 'top': '(True)'}), "(axis='both', direction='in', left=False, right=True, bottom\n =True, top=True)\n", (4328, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4405, 4415), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4413, 4415), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4453), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'ncol': '(3)'}), "(loc='lower right', ncol=3)\n", (4426, 4453), True, 'import matplotlib.pyplot as plt\n'), ((4455, 4475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4466, 4475), True, 'import matplotlib.pyplot as plt\n'), ((4476, 4500), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[1]'], {}), '(TIME, STATE[1])\n', (4484, 4500), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4541), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\, / \\\\, \\\\mathrm{ms}$"""'], {}), "('$t \\\\, / \\\\, \\\\mathrm{ms}$')\n", (4511, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4580), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$T \\\\, / \\\\, \\\\mathrm{Nm}$"""'], {}), "('$T \\\\, / \\\\, \\\\mathrm{Nm}$')\n", (4550, 4580), True, 'import matplotlib.pyplot as plt\n'), ((4579, 4596), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 60]'], {}), '([0, 60])\n', (4587, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4597, 4616), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20]'], {}), '([0, 20])\n', (4607, 4616), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4713), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'direction': '"""in"""', 'left': '(True)', 'right': '(False)', 'bottom': '(True)', 'top': '(True)'}), "(axis='both', direction='in', left=True, right=False, bottom\n =True, top=True)\n", (4632, 4713), True, 'import matplotlib.pyplot as plt\n'), ((4709, 4719), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4717, 4719), True, 'import matplotlib.pyplot as plt\n'), ((4726, 4746), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4737, 4746), True, 'import matplotlib.pyplot as plt\n'), ((4747, 4786), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[5]'], {'label': '"""$i_d$"""'}), "(TIME, STATE[5], label='$i_d$')\n", (4755, 4786), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4827), 'matplotlib.pyplot.plot', 'plt.plot', (['TIME', 'STATE[6]'], {'label': '"""$i_q$"""'}), "(TIME, STATE[6], label='$i_q$')\n", (4796, 4827), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4869), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\, / \\\\, \\\\mathrm{ms}$"""'], {}), "('$t \\\\, / \\\\, \\\\mathrm{ms}$')\n", (4839, 4869), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4907), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$i \\\\, / \\\\, \\\\mathrm{A}$"""'], {}), "('$i \\\\, / \\\\, \\\\mathrm{A}$')\n", (4878, 4907), True, 'import matplotlib.pyplot as plt\n'), ((4906, 4923), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 60]'], {}), '([0, 60])\n', (4914, 4923), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5079), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'direction': '"""in"""', 'left': '(False)', 'right': '(True)', 'bottom': '(True)', 'top': '(True)'}), "(axis='both', direction='in', left=False, right=True, bottom\n =True, top=True)\n", (4998, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5102), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 10, 20, 30]'], {}), '([0, 10, 20, 30])\n', (5085, 5102), True, 'import matplotlib.pyplot as plt\n'), ((5103, 5113), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5111, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5115, 5152), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'ncol': '(2)'}), "(loc='upper right', ncol=2)\n", (5125, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5153, 5163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5161, 5163), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2076), 'numpy.array', 'np.array', (['[state * limits]'], {}), '([state * limits])\n', (2058, 2076), True, 'import numpy as np\n'), ((2814, 2835), 'numpy.append', 'np.append', (['TIME', 'time'], {}), '(TIME, time)\n', (2823, 2835), True, 'import numpy as np\n'), ((2763, 2793), 'numpy.transpose', 'np.transpose', (['[state * limits]'], {}), '([state * limits])\n', (2775, 2793), True, 'import numpy as np\n'), ((1065, 1096), 'numpy.sin', 'np.sin', (['(omega * t + phi_initial)'], {}), '(omega * t + phi_initial)\n', (1071, 1096), True, 'import numpy as np\n'), ((1122, 1159), 'numpy.sin', 'np.sin', (['(omega * t + phi_initial - phi)'], {}), '(omega * t + phi_initial - phi)\n', (1128, 1159), True, 'import numpy as np\n'), ((1185, 1222), 'numpy.sin', 'np.sin', (['(omega * t + phi_initial + phi)'], {}), '(omega * t + phi_initial + phi)\n', (1191, 1222), True, 'import numpy as np\n')] |
"""generator.py
Created by <NAME>, <NAME>.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
import numpy as np
import openmdao.api as om
import wisdem.drivetrainse.generator_models as gm
# ----------------------------------------------------------------------------------------------
class Constraints(om.ExplicitComponent):
"""
Provides a material cost estimate for a PMSG _arms generator. Manufacturing costs are excluded.
Parameters
----------
u_allow_s : float, [m]
u_as : float, [m]
z_allow_s : float, [m]
z_as : float, [m]
y_allow_s : float, [m]
y_as : float, [m]
b_allow_s : float, [m]
b_st : float, [m]
u_allow_r : float, [m]
u_ar : float, [m]
y_allow_r : float, [m]
y_ar : float, [m]
z_allow_r : float, [m]
z_ar : float, [m]
b_allow_r : float, [m]
b_arm : float, [m]
TC1 : float, [m**3]
TC2r : float, [m**3]
TC2s : float, [m**3]
B_g : float, [T]
B_smax : float, [T]
K_rad : float
K_rad_LL : float
K_rad_UL : float
D_ratio : float
D_ratio_LL : float
D_ratio_UL : float
Returns
-------
con_uas : float, [m]
con_zas : float, [m]
con_yas : float, [m]
con_bst : float, [m]
con_uar : float, [m]
con_yar : float, [m]
con_zar : float, [m]
con_br : float, [m]
TCr : float, [m**3]
TCs : float, [m**3]
con_TC2r : float, [m**3]
con_TC2s : float, [m**3]
con_Bsmax : float, [T]
K_rad_L : float
K_rad_U : float
D_ratio_L : float
D_ratio_U : float
"""
def setup(self):
self.add_input("u_allow_s", val=0.0, units="m")
self.add_input("u_as", val=0.0, units="m")
self.add_input("z_allow_s", val=0.0, units="m")
self.add_input("z_as", val=0.0, units="m")
self.add_input("y_allow_s", val=0.0, units="m")
self.add_input("y_as", val=0.0, units="m")
self.add_input("b_allow_s", val=0.0, units="m")
self.add_input("b_st", val=0.0, units="m")
self.add_input("u_allow_r", val=0.0, units="m")
self.add_input("u_ar", val=0.0, units="m")
self.add_input("y_allow_r", val=0.0, units="m")
self.add_input("y_ar", val=0.0, units="m")
self.add_input("z_allow_r", val=0.0, units="m")
self.add_input("z_ar", val=0.0, units="m")
self.add_input("b_allow_r", val=0.0, units="m")
self.add_input("b_arm", val=0.0, units="m")
self.add_input("TC1", val=0.0, units="m**3")
self.add_input("TC2r", val=0.0, units="m**3")
self.add_input("TC2s", val=0.0, units="m**3")
self.add_input("B_g", val=0.0, units="T")
self.add_input("B_smax", val=0.0, units="T")
self.add_input("K_rad", val=0.0)
self.add_input("K_rad_LL", val=0.0)
self.add_input("K_rad_UL", val=0.0)
self.add_input("D_ratio", val=0.0)
self.add_input("D_ratio_LL", val=0.0)
self.add_input("D_ratio_UL", val=0.0)
self.add_output("con_uas", val=0.0, units="m")
self.add_output("con_zas", val=0.0, units="m")
self.add_output("con_yas", val=0.0, units="m")
self.add_output("con_bst", val=0.0, units="m")
self.add_output("con_uar", val=0.0, units="m")
self.add_output("con_yar", val=0.0, units="m")
self.add_output("con_zar", val=0.0, units="m")
self.add_output("con_br", val=0.0, units="m")
self.add_output("TCr", val=0.0, units="m**3")
self.add_output("TCs", val=0.0, units="m**3")
self.add_output("con_TC2r", val=0.0, units="m**3")
self.add_output("con_TC2s", val=0.0, units="m**3")
self.add_output("con_Bsmax", val=0.0, units="T")
self.add_output("K_rad_L", val=0.0)
self.add_output("K_rad_U", val=0.0)
self.add_output("D_ratio_L", val=0.0)
self.add_output("D_ratio_U", val=0.0)
def compute(self, inputs, outputs):
outputs["con_uas"] = inputs["u_allow_s"] - inputs["u_as"]
outputs["con_zas"] = inputs["z_allow_s"] - inputs["z_as"]
outputs["con_yas"] = inputs["y_allow_s"] - inputs["y_as"]
outputs["con_bst"] = inputs["b_allow_s"] - inputs["b_st"] # b_st={'units':'m'}
outputs["con_uar"] = inputs["u_allow_r"] - inputs["u_ar"]
outputs["con_yar"] = inputs["y_allow_r"] - inputs["y_ar"]
outputs["con_TC2r"] = inputs["TC2s"] - inputs["TC1"]
outputs["con_TC2s"] = inputs["TC2s"] - inputs["TC1"]
outputs["con_Bsmax"] = inputs["B_g"] - inputs["B_smax"]
outputs["con_zar"] = inputs["z_allow_r"] - inputs["z_ar"]
outputs["con_br"] = inputs["b_allow_r"] - inputs["b_arm"] # b_r={'units':'m'}
outputs["TCr"] = inputs["TC2r"] - inputs["TC1"]
outputs["TCs"] = inputs["TC2s"] - inputs["TC1"]
outputs["K_rad_L"] = inputs["K_rad"] - inputs["K_rad_LL"]
outputs["K_rad_U"] = inputs["K_rad"] - inputs["K_rad_UL"]
outputs["D_ratio_L"] = inputs["D_ratio"] - inputs["D_ratio_LL"]
outputs["D_ratio_U"] = inputs["D_ratio"] - inputs["D_ratio_UL"]
# ----------------------------------------------------------------------------------------------
class MofI(om.ExplicitComponent):
"""
Compute moments of inertia.
Parameters
----------
R_out : float, [m]
Outer radius
stator_mass : float, [kg]
Total rotor mass
rotor_mass : float, [kg]
Total rotor mass
generator_mass : float, [kg]
Actual mass
len_s : float, [m]
Stator core length
Returns
-------
generator_I : numpy array[3], [kg*m**2]
Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass
rotor_I : numpy array[3], [kg*m**2]
Moments of Inertia for the rotor about its center of mass
stator_I : numpy array[3], [kg*m**2]
Moments of Inertia for the stator about its center of mass
"""
def setup(self):
self.add_input("R_out", val=0.0, units="m")
self.add_input("stator_mass", val=0.0, units="kg")
self.add_input("rotor_mass", val=0.0, units="kg")
self.add_input("generator_mass", val=0.0, units="kg")
self.add_input("len_s", val=0.0, units="m")
self.add_output("generator_I", val=np.zeros(3), units="kg*m**2")
self.add_output("rotor_I", val=np.zeros(3), units="kg*m**2")
self.add_output("stator_I", val=np.zeros(3), units="kg*m**2")
def compute(self, inputs, outputs):
R_out = inputs["R_out"]
Mass = inputs["generator_mass"]
m_stator = inputs["stator_mass"]
m_rotor = inputs["rotor_mass"]
len_s = inputs["len_s"]
I = np.zeros(3)
I[0] = 0.50 * Mass * R_out ** 2
I[1] = I[2] = 0.5 * I[0] + Mass * len_s ** 2 / 12.0
outputs["generator_I"] = I
coeff = m_stator / Mass if m_stator > 0.0 else 0.5
outputs["stator_I"] = coeff * I
coeff = m_rotor / Mass if m_rotor > 0.0 else 0.5
outputs["rotor_I"] = coeff * I
# ----------------------------------------------------------------------------------------------
class Cost(om.ExplicitComponent):
"""
Provides a material cost estimate for a PMSG _arms generator. Manufacturing costs are excluded.
Parameters
----------
C_Cu : float, [USD/kg]
Specific cost of copper
C_Fe : float, [USD/kg]
Specific cost of magnetic steel/iron
C_Fes : float, [USD/kg]
Specific cost of structural steel
C_PM : float, [USD/kg]
Specific cost of Magnet
Copper : float, [kg]
Copper mass
Iron : float, [kg]
Iron mass
mass_PM : float, [kg]
Magnet mass
Structural_mass : float, [kg]
Structural mass
Returns
-------
generator_cost : float, [USD]
Total cost
"""
def setup(self):
# Specific cost of material by type
self.add_input("C_Cu", val=0.0, units="USD/kg")
self.add_input("C_Fe", val=0.0, units="USD/kg")
self.add_input("C_Fes", val=0.0, units="USD/kg")
self.add_input("C_PM", val=0.0, units="USD/kg")
# Mass of each material type
self.add_input("Copper", val=0.0, units="kg")
self.add_input("Iron", val=0.0, units="kg")
self.add_input("mass_PM", val=0.0, units="kg")
self.add_input("Structural_mass", val=0.0, units="kg")
# Outputs
self.add_output("generator_cost", val=0.0, units="USD")
# self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
Copper = inputs["Copper"]
Iron = inputs["Iron"]
mass_PM = inputs["mass_PM"]
Structural_mass = inputs["Structural_mass"]
C_Cu = inputs["C_Cu"]
C_Fes = inputs["C_Fes"]
C_Fe = inputs["C_Fe"]
C_PM = inputs["C_PM"]
# Industrial electricity rate $/kWh https://www.eia.gov/electricity/monthly/epm_table_grapher.php?t=epmt_5_6_a
k_e = 0.064
# Material cost ($/kg) and electricity usage cost (kWh/kg)*($/kWh) for the materials with waste fraction
K_copper = Copper * (1.26 * C_Cu + 96.2 * k_e)
K_iron = Iron * (1.21 * C_Fe + 26.9 * k_e)
K_pm = mass_PM * (1.0 * C_PM + 79.0 * k_e)
K_steel = Structural_mass * (1.21 * C_Fes + 15.9 * k_e)
# Account for capital cost and labor share from BLS MFP by NAICS
outputs["generator_cost"] = (K_copper + K_pm) / 0.619 + (K_iron + K_steel) / 0.684
# ----------------------------------------------------------------------------------------------
class PowerElectronicsEff(om.ExplicitComponent):
"""
Compute representative efficiency of power electronics
Parameters
----------
machine_rating : float, [W]
Machine rating
shaft_rpm : numpy array[n_pc], [rpm]
rated speed of input shaft (lss for direct, hss for geared)
eandm_efficiency : numpy array[n_pc]
Generator electromagnetic efficiency values (<1)
Returns
-------
converter_efficiency : numpy array[n_pc]
Converter efficiency values (<1)
transformer_efficiency : numpy array[n_pc]
Transformer efficiency values (<1)
generator_efficiency : numpy array[n_pc]
Full generato and power electronics efficiency values (<1)
"""
def initialize(self):
self.options.declare("n_pc", default=20)
def setup(self):
n_pc = self.options["n_pc"]
self.add_input("machine_rating", val=0.0, units="W")
self.add_input("shaft_rpm", val=np.zeros(n_pc), units="rpm")
self.add_input("eandm_efficiency", val=np.zeros(n_pc))
self.add_output("converter_efficiency", val=np.zeros(n_pc))
self.add_output("transformer_efficiency", val=np.zeros(n_pc))
self.add_output("generator_efficiency", val=np.zeros(n_pc))
def compute(self, inputs, outputs):
# Unpack inputs
rating = inputs["machine_rating"]
rpmData = inputs["shaft_rpm"]
rpmRatio = rpmData / rpmData[-1]
# This converter efficiency is from the APEEM Group in 2020
# See <NAME>, <NAME>, <NAME>, <NAME>
# Converter constants
v_dc, v_dc0, c0, c1, c2, c3 = 6600, 6200, -2.1e-10, 1.2e-5, 1.46e-3, -2e-4
p_ac0, p_dc0 = 0.99 * rating, rating
p_s0 = 1e-3 * p_dc0
# calculated parameters
a = p_dc0 * (1.0 + c1 * (v_dc - v_dc0))
b = p_s0 * (1.0 + c2 * (v_dc - v_dc0))
c = c0 * (1.0 + c3 * (v_dc - v_dc0))
# Converter efficiency
p_dc = rpmRatio * p_dc0
p_ac = (p_ac0 / (a - b) - c * (a - b)) * (p_dc - b) + c * ((p_dc - b) ** 2)
conv_eff = p_ac / p_dc
# Transformer loss model is P_loss = P_0 + a^2 * P_k
# a is output power/rated
p0, pk, rT = 16.0, 111.0, 5.0 / 3.0
a = rpmRatio * (1 / rT)
# This gives loss in kW, so need to convert to efficiency
trans_eff = 1.0 - (p0 + a * a * pk) / (1e-3 * rating)
# Store outputs
outputs["converter_efficiency"] = conv_eff
outputs["transformer_efficiency"] = trans_eff
outputs["generator_efficiency"] = conv_eff * trans_eff * inputs["eandm_efficiency"]
# ----------------------------------------------------------------------------------------------
class Generator(om.Group):
def initialize(self):
genTypes = ["scig", "dfig", "eesg", "pmsg_arms", "pmsg_disc", "pmsg_outer"]
self.options.declare("design", values=genTypes + [m.upper() for m in genTypes])
self.options.declare("n_pc", default=20)
def setup(self):
genType = self.options["design"]
n_pc = self.options["n_pc"]
# ivc = om.IndepVarComp()
# sivc = om.IndepVarComp()
self.set_input_defaults("B_r", val=1.2, units="T")
self.set_input_defaults("P_Fe0e", val=1.0, units="W/kg")
self.set_input_defaults("P_Fe0h", val=4.0, units="W/kg")
self.set_input_defaults("S_N", val=-0.002)
self.set_input_defaults("alpha_p", val=0.5 * np.pi * 0.7)
self.set_input_defaults("b_r_tau_r", val=0.45)
self.set_input_defaults("b_ro", val=0.004, units="m")
self.set_input_defaults("b_s_tau_s", val=0.45)
self.set_input_defaults("b_so", val=0.004, units="m")
self.set_input_defaults("cofi", val=0.85)
self.set_input_defaults("freq", val=60, units="Hz")
self.set_input_defaults("h_i", val=0.001, units="m")
self.set_input_defaults("h_sy0", val=0.0)
self.set_input_defaults("h_w", val=0.005, units="m")
self.set_input_defaults("k_fes", val=0.9)
self.set_input_defaults("k_fillr", val=0.7)
self.set_input_defaults("k_fills", val=0.65)
self.set_input_defaults("k_s", val=0.2)
self.set_input_defaults("m", val=3)
self.set_input_defaults("mu_0", val=np.pi * 4e-7, units="m*kg/s**2/A**2")
self.set_input_defaults("mu_r", val=1.06, units="m*kg/s**2/A**2")
self.set_input_defaults("p", val=3.0)
self.set_input_defaults("phi", val=np.deg2rad(90), units="rad")
self.set_input_defaults("q1", val=6)
self.set_input_defaults("q2", val=4)
self.set_input_defaults("ratio_mw2pp", val=0.7)
self.set_input_defaults("resist_Cu", val=1.8e-8 * 1.4, units="ohm/m")
self.set_input_defaults("sigma", val=40e3, units="Pa")
self.set_input_defaults("y_tau_p", val=1.0)
self.set_input_defaults("y_tau_pr", val=10.0 / 12)
# self.set_input_defaults('I_0', val=0.0, units='A')
# self.set_input_defaults('d_r', val=0.0, units='m')
# self.set_input_defaults('h_m', val=0.0, units='m')
# self.set_input_defaults('h_0', val=0.0, units ='m')
# self.set_input_defaults('h_s', val=0.0, units='m')
# self.set_input_defaults('len_s', val=0.0, units='m')
# self.set_input_defaults('n_r', val=0.0)
# self.set_input_defaults('rad_ag', val=0.0, units='m')
# self.set_input_defaults('t_wr', val=0.0, units='m')
# self.set_input_defaults('n_s', val=0.0)
# self.set_input_defaults('b_st', val=0.0, units='m')
# self.set_input_defaults('d_s', val=0.0, units='m')
# self.set_input_defaults('t_ws', val=0.0, units='m')
# self.set_input_defaults('rho_Copper', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_Fe', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_Fes', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_PM', val=0.0, units='kg*m**-3')
# self.set_input_defaults('C_Cu', val=0.0, units='USD/kg')
# self.set_input_defaults('C_Fe', val=0.0, units='USD/kg')
# self.set_input_defaults('C_Fes', val=0.0, units='USD/kg')
# self.set_input_defaults('C_PM', val=0.0, units='USD/kg')
# if genType.lower() in ['pmsg_outer']:
# self.set_input_defaults('r_g',0.0, units ='m')
# self.set_input_defaults('N_c',0.0)
# self.set_input_defaults('b',0.0)
# self.set_input_defaults('c',0.0)
# self.set_input_defaults('E_p',0.0, units ='V')
# self.set_input_defaults('h_yr', val=0.0, units ='m')
# self.set_input_defaults('h_ys', val=0.0, units ='m')
# self.set_input_defaults('h_sr',0.0,units='m',desc='Structural Mass')
# self.set_input_defaults('h_ss',0.0, units ='m')
# self.set_input_defaults('t_r',0.0, units ='m')
# self.set_input_defaults('t_s',0.0, units ='m')
# self.set_input_defaults('u_allow_pcent',0.0)
# self.set_input_defaults('y_allow_pcent',0.0)
# self.set_input_defaults('z_allow_deg',0.0,units='deg')
# self.set_input_defaults('B_tmax',0.0, units='T')
# self.set_input_defaults('P_mech', 0.0, units='W')
# self.set_input_defaults('y_sh', units ='m')
# self.set_input_defaults('theta_sh', 0.0, units='rad')
# self.set_input_defaults('D_nose',0.0, units ='m')
# self.set_input_defaults('y_bd', units ='m')
# self.set_input_defaults('theta_bd', 0.0, units='rad')
# if genType.lower() in ['eesg','pmsg_arms','pmsg_disc']:
# self.set_input_defaults('tau_p', val=0.0, units='m')
# self.set_input_defaults('h_ys', val=0.0, units='m')
# self.set_input_defaults('h_yr', val=0.0, units='m')
# self.set_input_defaults('b_arm', val=0.0, units='m')
# elif genType.lower() in ['scig','dfig']:
# self.set_input_defaults('B_symax', val=0.0, units='T')
# self.set_input_defaults('S_Nmax', val=-0.2)
# if topLevelFlag:
# self.add_subsystem('ivc', ivc, promotes=['*'])
# self.set_input_defaults('machine_rating', 0.0, units='W')
# self.set_input_defaults('shaft_rpm', np.linspace(1.0, 10.0, n_pc), units='rpm')
# self.set_input_defaults('rated_torque', 0.0, units='N*m')
# self.set_input_defaults('D_shaft', val=0.0, units='m')
self.set_input_defaults("E", val=210e9, units="Pa")
self.set_input_defaults("G", val=81e9, units="Pa")
# self.add_subsystem('sivc', sivc, promotes=['*'])
# Easy Poisson ratio assuming isotropic
self.add_subsystem(
"poisson", om.ExecComp("v = 0.5*E/G - 1.0", E={"units": "Pa"}, G={"units": "Pa"}), promotes=["*"]
)
# Add generator design component and cost
if genType.lower() == "scig":
mygen = gm.SCIG
elif genType.lower() == "dfig":
mygen = gm.DFIG
elif genType.lower() == "eesg":
mygen = gm.EESG
elif genType.lower() == "pmsg_arms":
mygen = gm.PMSG_Arms
elif genType.lower() == "pmsg_disc":
mygen = gm.PMSG_Disc
elif genType.lower() == "pmsg_outer":
mygen = gm.PMSG_Outer
self.add_subsystem("generator", mygen(n_pc=n_pc), promotes=["*"])
self.add_subsystem("mofi", MofI(), promotes=["*"])
self.add_subsystem("gen_cost", Cost(), promotes=["*"])
self.add_subsystem("constr", Constraints(), promotes=["*"])
self.add_subsystem("eff", PowerElectronicsEff(n_pc=n_pc), promotes=["*"])
| [
"numpy.deg2rad",
"numpy.zeros",
"openmdao.api.ExecComp"
] | [((6801, 6812), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6809, 6812), True, 'import numpy as np\n'), ((18493, 18563), 'openmdao.api.ExecComp', 'om.ExecComp', (['"""v = 0.5*E/G - 1.0"""'], {'E': "{'units': 'Pa'}", 'G': "{'units': 'Pa'}"}), "('v = 0.5*E/G - 1.0', E={'units': 'Pa'}, G={'units': 'Pa'})\n", (18504, 18563), True, 'import openmdao.api as om\n'), ((6394, 6405), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6402, 6405), True, 'import numpy as np\n'), ((6463, 6474), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6471, 6474), True, 'import numpy as np\n'), ((6533, 6544), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6541, 6544), True, 'import numpy as np\n'), ((10696, 10710), 'numpy.zeros', 'np.zeros', (['n_pc'], {}), '(n_pc)\n', (10704, 10710), True, 'import numpy as np\n'), ((10772, 10786), 'numpy.zeros', 'np.zeros', (['n_pc'], {}), '(n_pc)\n', (10780, 10786), True, 'import numpy as np\n'), ((10841, 10855), 'numpy.zeros', 'np.zeros', (['n_pc'], {}), '(n_pc)\n', (10849, 10855), True, 'import numpy as np\n'), ((10911, 10925), 'numpy.zeros', 'np.zeros', (['n_pc'], {}), '(n_pc)\n', (10919, 10925), True, 'import numpy as np\n'), ((10979, 10993), 'numpy.zeros', 'np.zeros', (['n_pc'], {}), '(n_pc)\n', (10987, 10993), True, 'import numpy as np\n'), ((14212, 14226), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (14222, 14226), True, 'import numpy as np\n')] |
from flask import Flask, current_app, request, send_file, Response
import json
import io
import base64
import numpy as np
import tensorflow as tf
from PIL import Image
import cv2
from scipy.spatial import distance
import scipy.misc
from keras.preprocessing import image
from Model.bone_variational_auto_encoder import create_variational_bone_auto_encoder
from Model.bone_auto_encoder import create_bone_auto_encoder
# encoder_model = tf.keras.models.load_model('Saved_Models/0300_encoder_model.h5')
# bone_decoder_model = tf.keras.models.load_model('Saved_Models/0300_bone_decoder_model.h5')
img_dim = 128
encoder, bone_decoder, auto_encoder = create_variational_bone_auto_encoder(
dims=img_dim, latent_dim = 128)
# encoder, bone_decoder, auto_encoder = create_bone_auto_encoder(
# dims=img_dim , latent_dim = 128)
auto_encoder.load_weights('Saved_Models/bone_auto_encoder_model.h5')
app = Flask(__name__)
@app.route('/suggest', methods=['POST'])
def suggest():
try:
data = request.form['img']
except Exception:
return jsonify(status_code='400', msg='Bad Request'), 400
b64_decoded_img = base64.b64decode(data)
byte_img = io.BytesIO(b64_decoded_img)
pil_img= Image.open(byte_img)
cv2.imwrite('test.jpg',np.array(pil_img))
pil_img = pil_img.resize((img_dim,img_dim))
np_img = image.img_to_array(pil_img)
np_img = np_img/255.
sample = np.expand_dims(np_img, axis=0)
empty_CSV = np.empty((1,52,3))
# prediction = bone_decoder_model(encoder_model(sample))
prediction = auto_encoder([sample,empty_CSV,empty_CSV])
response = {"bones": prediction[0].numpy().flatten().tolist()}
return json.dumps(response)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000, debug=True) | [
"keras.preprocessing.image.img_to_array",
"Model.bone_variational_auto_encoder.create_variational_bone_auto_encoder",
"PIL.Image.open",
"flask.Flask",
"json.dumps",
"io.BytesIO",
"base64.b64decode",
"numpy.array",
"numpy.empty",
"numpy.expand_dims"
] | [((647, 713), 'Model.bone_variational_auto_encoder.create_variational_bone_auto_encoder', 'create_variational_bone_auto_encoder', ([], {'dims': 'img_dim', 'latent_dim': '(128)'}), '(dims=img_dim, latent_dim=128)\n', (683, 713), False, 'from Model.bone_variational_auto_encoder import create_variational_bone_auto_encoder\n'), ((915, 930), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (920, 930), False, 'from flask import Flask, current_app, request, send_file, Response\n'), ((1143, 1165), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (1159, 1165), False, 'import base64\n'), ((1182, 1209), 'io.BytesIO', 'io.BytesIO', (['b64_decoded_img'], {}), '(b64_decoded_img)\n', (1192, 1209), False, 'import io\n'), ((1224, 1244), 'PIL.Image.open', 'Image.open', (['byte_img'], {}), '(byte_img)\n', (1234, 1244), False, 'from PIL import Image\n'), ((1355, 1382), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['pil_img'], {}), '(pil_img)\n', (1373, 1382), False, 'from keras.preprocessing import image\n'), ((1427, 1457), 'numpy.expand_dims', 'np.expand_dims', (['np_img'], {'axis': '(0)'}), '(np_img, axis=0)\n', (1441, 1457), True, 'import numpy as np\n'), ((1475, 1495), 'numpy.empty', 'np.empty', (['(1, 52, 3)'], {}), '((1, 52, 3))\n', (1483, 1495), True, 'import numpy as np\n'), ((1699, 1719), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1709, 1719), False, 'import json\n'), ((1273, 1290), 'numpy.array', 'np.array', (['pil_img'], {}), '(pil_img)\n', (1281, 1290), True, 'import numpy as np\n')] |
"""
Plot graph structures
---------------------
This functions show how to plot graph structures, such as the transition matrix.
"""
import cellrank as cr
import numpy as np
adata = cr.datasets.pancreas_preprocessed("../example.h5ad")
adata
# %%
# First, we create a forward transition matrix using the high-level pipeline.
cr.tl.transition_matrix(
adata, show_progress_bar=False, weight_connectivities=0.2, softmax_scale=4
)
# %%
# We can now plot the transition matrix. Below we don't show any arrows, which dramatically speeds up the plotting.
cr.pl.graph(
adata,
"T_fwd",
edge_alpha=0.1,
node_size=5,
arrows=False,
keys="clusters",
keylocs="obs",
)
# %%
# To further illustrate the functionalities, let us only consider the `'Delta`' cluster. We can also filter the edges
# by their weights, as shown below. Only transitions with probability at least 0.1 are plotted.
ixs = np.where(adata.obs["clusters"] == "Delta")[0]
cr.pl.graph(adata, "T_fwd", ixs=ixs, arrows=True, node_size=200, filter_edges=(0.1, 1))
# %%
# Lastly, we can visualize different edge aggregations, such as minimum or maximum. Here we take at most 3 outgoing
# edges restricted to ``ixs`` for each node in descending order and color the nodes by the maximum outgoing weights.
# Aggregated values are always computed before any filtering happens, such as shown above.
#
# Here we also specify ``edge_reductions_restrict_to_ixs`` (by default, it is the same as ``ixs``) that computes the
# statistic between the cells marked with ``ixs`` and ``edge_reductions_restrict_to_ixs``.
#
# Below we compare the maximum transition from each of the `"Delta"` cells to any of the `"Beta"` cells.
cr.pl.graph(
adata,
"T_fwd",
ixs=ixs,
edge_alpha=0.5,
node_size=200,
keys="outgoing",
arrows=False,
top_n_edges=(3, False, "outgoing"),
title="outgoing to Beta",
edge_reductions=np.max,
edge_reductions_restrict_to_ixs=np.where(adata.obs["clusters"] == "Beta")[0],
)
| [
"numpy.where",
"cellrank.tl.transition_matrix",
"cellrank.datasets.pancreas_preprocessed",
"cellrank.pl.graph"
] | [((186, 238), 'cellrank.datasets.pancreas_preprocessed', 'cr.datasets.pancreas_preprocessed', (['"""../example.h5ad"""'], {}), "('../example.h5ad')\n", (219, 238), True, 'import cellrank as cr\n'), ((330, 433), 'cellrank.tl.transition_matrix', 'cr.tl.transition_matrix', (['adata'], {'show_progress_bar': '(False)', 'weight_connectivities': '(0.2)', 'softmax_scale': '(4)'}), '(adata, show_progress_bar=False,\n weight_connectivities=0.2, softmax_scale=4)\n', (353, 433), True, 'import cellrank as cr\n'), ((558, 665), 'cellrank.pl.graph', 'cr.pl.graph', (['adata', '"""T_fwd"""'], {'edge_alpha': '(0.1)', 'node_size': '(5)', 'arrows': '(False)', 'keys': '"""clusters"""', 'keylocs': '"""obs"""'}), "(adata, 'T_fwd', edge_alpha=0.1, node_size=5, arrows=False, keys\n ='clusters', keylocs='obs')\n", (569, 665), True, 'import cellrank as cr\n'), ((964, 1055), 'cellrank.pl.graph', 'cr.pl.graph', (['adata', '"""T_fwd"""'], {'ixs': 'ixs', 'arrows': '(True)', 'node_size': '(200)', 'filter_edges': '(0.1, 1)'}), "(adata, 'T_fwd', ixs=ixs, arrows=True, node_size=200,\n filter_edges=(0.1, 1))\n", (975, 1055), True, 'import cellrank as cr\n'), ((918, 960), 'numpy.where', 'np.where', (["(adata.obs['clusters'] == 'Delta')"], {}), "(adata.obs['clusters'] == 'Delta')\n", (926, 960), True, 'import numpy as np\n'), ((1961, 2002), 'numpy.where', 'np.where', (["(adata.obs['clusters'] == 'Beta')"], {}), "(adata.obs['clusters'] == 'Beta')\n", (1969, 2002), True, 'import numpy as np\n')] |
#!/usr/local/bin/python
"""
vector to matrix
"""
from __future__ import print_function
from __future__ import division
import sys
import argparse
import subprocess
import shlex
import logging
import itertools
import time
import gzip
import re
import os
import math
import uuid
import socket
from datetime import datetime
import numpy as np
import scipy as sp
import scipy.stats
import itertools
from collections import *
from math import cos,log,sin,sqrt
from sklearn.decomposition import PCA
from sklearn import decomposition
verboseprint=lambda *a, **k: None
__version__ = "1.0"
debug = None
def main():
parser=argparse.ArgumentParser(description='convert 1 or 2 vectors into a matrix (TXT - matrix.gz)',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vxy', '--vector_xy', dest='vector_xy', type=str, default=None, help='vector file - bedGraph or 1 col')
parser.add_argument('--vx', '--vector_x', dest='vector_x', type=str, default=None, help='x axis vector file - bedGraph or 1 col')
parser.add_argument('--vy', '--vector_y', dest='vector_y', type=str, default=None, help='y axis vector file - bedGraph or 1 col')
parser.add_argument('-a', '--assembly', dest='assembly', type=str, default="NA", help='genome assembly')
parser.add_argument('--cm', '--contrustion_mode', dest='construction_mode', type=str, default="mean", choices=['mean','multiply','add','subtract'], help='matrix constructor mode')
parser.add_argument('-v', '--verbose', dest='verbose', action='count', help='Increase verbosity (specify multiple times for more)')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args=parser.parse_args()
vector_xy=args.vector_xy
vector_x=args.vector_x
vector_y=args.vector_y
assembly=args.assembly
construction_mode=args.construction_mode
verbose=args.verbose
log_level = logging.WARNING
if verbose == 1:
log_level = logging.INFO
elif verbose >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
global verboseprint
verboseprint = print if verbose else lambda *a, **k: None
if(vector_xy == None) and (vector_x != None) and (vector_y != None):
if not os.path.isfile(vector_x):
sys.exit('invalid vector_x file! (non-existant)')
if not os.path.isfile(vector_y):
sys.exit('invalid vector_y file! (non-existant)')
elif(vector_x == None) and (vector_y == None) and (vector_xy != None):
if not os.path.isfile(vector_xy):
sys.exit('invalid vector_xy file! (non-existant)')
vector_x=vector_xy
vector_y=vector_xy
else:
sys.exit('incorrect usage')
scriptPath=os.path.realpath(__file__)
scriptPath="/".join(scriptPath.split("/")[0:-2])
name_y=os.path.basename(vector_y)
name_y=re.sub(".gz", "", name_y)
name_y=re.sub(".matrix", "", name_y)
name_x=os.path.basename(vector_x)
name_x=re.sub(".gz", "", name_x)
name_x=re.sub(".matrix", "", name_x)
if(name_y == name_x):
name=name_y+'__'+construction_mode
else:
name=name_y+'__'+name_x+'__'+construction_mode
verboseprint("")
verboseprint(name)
verboseprint("")
header_rows,vy=load_vector(vector_y,assembly)
header_cols,vx=load_vector(vector_x,assembly)
rows=vy.shape[0]
cols=vx.shape[0]
matrix=np.zeros((rows,cols),dtype="float32")
verboseprint("building matrix ... ",end="")
for vy_idx,vy_v in enumerate(vy):
for vx_idx,vx_v in enumerate(vx):
if(construction_mode == 'mean'):
matrix[vy_idx,vx_idx]=(vy_v+vx_v)/2
elif(construction_mode == 'multiply'):
matrix[vy_idx,vx_idx]=(vy_v*vx_v)
# speed up later via matrix math
elif(construction_mode == 'add'):
matrix[vy_idx,vx_idx]=(vy_v+vx_v)
elif(construction_mode == 'subtract'):
matrix[vy_idx,vx_idx]=(vy_v-vx_v)
verboseprint("done")
verboseprint("")
matrixFile=name+'.matrix.gz'
verboseprint("writing matrix ... ",end="")
writeMatrix(header_rows,header_cols,matrix,matrixFile)
verboseprint("done")
def load_vector(v,assembly):
with input_wrapper(v)as fh:
headers=[]
vector=[]
i=0
for line in fh:
l=line.rstrip("\n").split("\t")
if line.startswith("#") or line.startswith("track"):
continue
if(len(l) == 4):
score=l[3]
header='vector_'+str(i)+'|'+str(assembly)+'|'+str(l[0])+':'+str(l[1])+'-'+str(l[2])
elif(len(l) == 1):
score=l[1]
header='vector_'+str(i)
if score == 'NA':
score=np.nan
else:
score=np.float(score)
headers.append(header)
vector.append(score)
i+=1
return(headers,np.array(vector))
def input_wrapper(infile):
if infile.endswith('.gz'):
fh=gzip.open(infile,'r')
else:
fh=open(infile,'r')
return fh
def output_wrapper(outfile,append=False,suppress_comments=False):
if outfile.endswith('.gz'):
if append:
fh=gzip.open(outfile,'a')
else:
fh=gzip.open(outfile,'w')
else:
if append:
fh=open(outfile,'a')
else:
fh=open(outfile,'w')
# disable comment(s)if (UCSC format file)
if outfile.endswith('.bed'):
suppress_comments = True
if outfile.endswith('.bed.gz'):
suppress_comments = True
if outfile.endswith('.bedGraph'):
suppress_comments = True
if outfile.endswith('.bedGraph.gz'):
suppress_comments = True
if outfile.endswith('.wig'):
suppress_comments = True
if outfile.endswith('.wig.gz'):
suppress_comments = True
if outfile.endswith('.sam'):
suppress_comments = True
if outfile.endswith('.sam.gz'):
suppress_comments = True
if outfile.endswith('.bam'):
suppress_comments = True
if outfile.endswith('.fastq'):
suppress_comments = True
if outfile.endswith('.fastq.gz'):
suppress_comments = True
if not suppress_comments:
print("## ",os.path.basename(__file__),sep="",file=fh)
print("## ",sep="",file=fh)
print("## Dekker Lab",sep="",file=fh)
print("## Contact: <NAME>",sep="",file=fh)
print("## https://github.com/blajoie",sep="",file=fh)
print("## ",sep="",file=fh)
print("## Version:\t",__version__,sep="",file=fh)
print("## Date:\t",get_date(),sep="",file=fh)
print("## Host:\t",get_compute_resource(),sep="",file=fh)
return(fh)
def get_date():
time=datetime.now()
date=time.strftime('%I:%M:%S %p, %m/%d/%Y')
return date
def get_compute_resource():
return(socket.gethostname())
def writeMatrix(header_rows,header_cols,matrix,matrixFile,precision=4):
"""
write a np matrix with row/col headers - my5C file format - txt formatted gzipped file
"""
nrows=len(header_rows)
ncols=len(header_cols)
# interaction matrix output
out_fh=output_wrapper(matrixFile)
# write matrix col headers
header=[str(i) for i in header_cols]
print(str(nrows)+"x"+str(ncols)+"\t"+"\t".join(header),file=out_fh)
format_func=("{:0."+str(precision)+"f}").format
k=0
for i in xrange(nrows):
print(header_rows[i]+"\t"+"\t".join(map(format_func,matrix[i,:])),file=out_fh)
out_fh.close()
if __name__=="__main__":
main() | [
"logging.basicConfig",
"numpy.float",
"argparse.ArgumentParser",
"gzip.open",
"time.strftime",
"os.path.realpath",
"datetime.datetime.now",
"numpy.zeros",
"numpy.array",
"os.path.isfile",
"os.path.basename",
"sys.exit",
"re.sub",
"socket.gethostname"
] | [((630, 788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""convert 1 or 2 vectors into a matrix (TXT - matrix.gz)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'convert 1 or 2 vectors into a matrix (TXT - matrix.gz)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (653, 788), False, 'import argparse\n'), ((2058, 2094), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'log_level'}), '(level=log_level)\n', (2077, 2094), False, 'import logging\n'), ((2774, 2800), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2790, 2800), False, 'import os\n'), ((2870, 2896), 'os.path.basename', 'os.path.basename', (['vector_y'], {}), '(vector_y)\n', (2886, 2896), False, 'import os\n'), ((2908, 2933), 're.sub', 're.sub', (['""".gz"""', '""""""', 'name_y'], {}), "('.gz', '', name_y)\n", (2914, 2933), False, 'import re\n'), ((2949, 2978), 're.sub', 're.sub', (['""".matrix"""', '""""""', 'name_y'], {}), "('.matrix', '', name_y)\n", (2955, 2978), False, 'import re\n'), ((2995, 3021), 'os.path.basename', 'os.path.basename', (['vector_x'], {}), '(vector_x)\n', (3011, 3021), False, 'import os\n'), ((3033, 3058), 're.sub', 're.sub', (['""".gz"""', '""""""', 'name_x'], {}), "('.gz', '', name_x)\n", (3039, 3058), False, 'import re\n'), ((3074, 3103), 're.sub', 're.sub', (['""".matrix"""', '""""""', 'name_x'], {}), "('.matrix', '', name_x)\n", (3080, 3103), False, 'import re\n'), ((3491, 3530), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {'dtype': '"""float32"""'}), "((rows, cols), dtype='float32')\n", (3499, 3530), True, 'import numpy as np\n'), ((7010, 7024), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7022, 7024), False, 'from datetime import datetime\n'), ((7034, 7072), 'time.strftime', 'time.strftime', (['"""%I:%M:%S %p, %m/%d/%Y"""'], {}), "('%I:%M:%S %p, %m/%d/%Y')\n", (7047, 7072), False, 'import time\n'), ((7134, 7154), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (7152, 7154), False, 'import socket\n'), ((5151, 5167), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (5159, 5167), True, 'import numpy as np\n'), ((5243, 5265), 'gzip.open', 'gzip.open', (['infile', '"""r"""'], {}), "(infile, 'r')\n", (5252, 5265), False, 'import gzip\n'), ((2279, 2303), 'os.path.isfile', 'os.path.isfile', (['vector_x'], {}), '(vector_x)\n', (2293, 2303), False, 'import os\n'), ((2317, 2366), 'sys.exit', 'sys.exit', (['"""invalid vector_x file! (non-existant)"""'], {}), "('invalid vector_x file! (non-existant)')\n", (2325, 2366), False, 'import sys\n'), ((2382, 2406), 'os.path.isfile', 'os.path.isfile', (['vector_y'], {}), '(vector_y)\n', (2396, 2406), False, 'import os\n'), ((2420, 2469), 'sys.exit', 'sys.exit', (['"""invalid vector_y file! (non-existant)"""'], {}), "('invalid vector_y file! (non-existant)')\n", (2428, 2469), False, 'import sys\n'), ((2722, 2749), 'sys.exit', 'sys.exit', (['"""incorrect usage"""'], {}), "('incorrect usage')\n", (2730, 2749), False, 'import sys\n'), ((5468, 5491), 'gzip.open', 'gzip.open', (['outfile', '"""a"""'], {}), "(outfile, 'a')\n", (5477, 5491), False, 'import gzip\n'), ((5520, 5543), 'gzip.open', 'gzip.open', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (5529, 5543), False, 'import gzip\n'), ((6512, 6538), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (6528, 6538), False, 'import os\n'), ((2560, 2585), 'os.path.isfile', 'os.path.isfile', (['vector_xy'], {}), '(vector_xy)\n', (2574, 2585), False, 'import os\n'), ((2599, 2649), 'sys.exit', 'sys.exit', (['"""invalid vector_xy file! (non-existant)"""'], {}), "('invalid vector_xy file! (non-existant)')\n", (2607, 2649), False, 'import sys\n'), ((5009, 5024), 'numpy.float', 'np.float', (['score'], {}), '(score)\n', (5017, 5024), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# wujian@2020
"""
Compute directional/angle feature using steer vector (based on array geometry)
"""
import argparse
import numpy as np
from libs.data_handler import SpectrogramReader, ArchiveWriter, ScpReader
from libs.opts import StftParser
from libs.spatial import directional_feats
from libs.utils import get_logger
logger = get_logger(__name__)
def run(args):
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"round_power_of_two": args.round_power_of_two,
"window": args.window,
"center": args.center, # false to comparable with kaldi
"transpose": False # F x T
}
stft_reader = SpectrogramReader(args.wav_scp, **stft_kwargs)
if args.utt2idx:
utt2idx = ScpReader(args.utt2idx, value_processor=int)
logger.info(f"Using --utt2idx={args.utt2idx}")
else:
utt2idx = None
logger.info(f"Using --doa-idx={args.doa_idx}")
df_pair = [tuple(map(int, p.split(","))) for p in args.df_pair.split(";")]
if not len(df_pair):
raise RuntimeError(f"Bad configurations with --pair {args.pair}")
logger.info(f"Compute directional feature with {df_pair}")
# A x M x F
steer_vector = np.load(args.steer_vector)
num_done = 0
with ArchiveWriter(args.dup_ark, args.scp) as writer:
for key, stft in stft_reader:
# sv: M x F
if utt2idx is None:
idx = [int(v) for v in args.doa_idx.split(",")]
dfs = [
directional_feats(stft, steer_vector[i], df_pair=df_pair)
for i in idx
]
if len(dfs) == 1:
df = dfs[0]
else:
# N x T x F
dfs = np.stack(dfs)
df = dfs.transpose(1, 0, 2).reshape(dfs.shape[1], -1)
elif key in utt2idx:
# stft: M x F x T
df = directional_feats(stft,
steer_vector[utt2idx[key]],
df_pair=df_pair)
else:
logger.warn(f"Missing utt2idx for utterance {key}")
continue
writer.write(key, df)
num_done += 1
if not num_done % 1000:
logger.info(f"Processed {num_done:d} utterance...")
logger.info(f"Processed {num_done:d} utterances over {len(stft_reader):d}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to compute directional features for linear arrays, "
"based on given steer vector. Also see scripts/sptk/compute_steer_vector.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp",
type=str,
help="Multi-Channel wave scripts in kaldi format")
parser.add_argument("steer_vector",
type=str,
help="Pre-computed steer vector in each "
"directions (in shape A x M x F, A: number "
"of DoAs, M: microphone number, F: FFT bins)")
parser.add_argument("dup_ark",
type=str,
help="Location to dump features (in ark format)")
parser.add_argument("--utt2idx",
type=str,
default="",
help="utt2idx for index (between "
"[0, A - 1]) of the DoA.")
parser.add_argument("--doa-idx",
type=str,
default=0,
help="DoA index for all utterances if --utt2idx=\"\"")
parser.add_argument("--scp",
type=str,
default="",
help="If assigned, generate corresponding "
"feature scripts")
parser.add_argument("--df-pair",
type=str,
default="0,1",
help="Microphone pairs for directional "
"feature computation")
args = parser.parse_args()
run(args)
| [
"libs.data_handler.ScpReader",
"argparse.ArgumentParser",
"libs.data_handler.ArchiveWriter",
"libs.spatial.directional_feats",
"numpy.stack",
"libs.utils.get_logger",
"numpy.load",
"libs.data_handler.SpectrogramReader"
] | [((356, 376), 'libs.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (366, 376), False, 'from libs.utils import get_logger\n'), ((699, 745), 'libs.data_handler.SpectrogramReader', 'SpectrogramReader', (['args.wav_scp'], {}), '(args.wav_scp, **stft_kwargs)\n', (716, 745), False, 'from libs.data_handler import SpectrogramReader, ArchiveWriter, ScpReader\n'), ((1251, 1277), 'numpy.load', 'np.load', (['args.steer_vector'], {}), '(args.steer_vector)\n', (1258, 1277), True, 'import numpy as np\n'), ((2531, 2803), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command to compute directional features for linear arrays, based on given steer vector. Also see scripts/sptk/compute_steer_vector.py"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'parents': '[StftParser.parser]'}), "(description=\n 'Command to compute directional features for linear arrays, based on given steer vector. Also see scripts/sptk/compute_steer_vector.py'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[\n StftParser.parser])\n", (2554, 2803), False, 'import argparse\n'), ((785, 829), 'libs.data_handler.ScpReader', 'ScpReader', (['args.utt2idx'], {'value_processor': 'int'}), '(args.utt2idx, value_processor=int)\n', (794, 829), False, 'from libs.data_handler import SpectrogramReader, ArchiveWriter, ScpReader\n'), ((1305, 1342), 'libs.data_handler.ArchiveWriter', 'ArchiveWriter', (['args.dup_ark', 'args.scp'], {}), '(args.dup_ark, args.scp)\n', (1318, 1342), False, 'from libs.data_handler import SpectrogramReader, ArchiveWriter, ScpReader\n'), ((1556, 1613), 'libs.spatial.directional_feats', 'directional_feats', (['stft', 'steer_vector[i]'], {'df_pair': 'df_pair'}), '(stft, steer_vector[i], df_pair=df_pair)\n', (1573, 1613), False, 'from libs.spatial import directional_feats\n'), ((1811, 1824), 'numpy.stack', 'np.stack', (['dfs'], {}), '(dfs)\n', (1819, 1824), True, 'import numpy as np\n'), ((1987, 2055), 'libs.spatial.directional_feats', 'directional_feats', (['stft', 'steer_vector[utt2idx[key]]'], {'df_pair': 'df_pair'}), '(stft, steer_vector[utt2idx[key]], df_pair=df_pair)\n', (2004, 2055), False, 'from libs.spatial import directional_feats\n')] |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
# import tensorflow.keras
from tensorflow.keras.utils import to_categorical
import numpy as np
import os
import random
import scipy.io as sio
import tqdm
STEP = 256
def data_generator(batch_size, preproc, x, y):
num_examples = len(x)
examples = zip(x, y)
examples = sorted(examples, key = lambda x: x[0].shape[0])
end = num_examples - batch_size + 1
batches = [examples[i:i+batch_size]
for i in range(0, end, batch_size)]
random.shuffle(batches)
while True:
for batch in batches:
x, y = zip(*batch)
yield preproc.process(x, y)
class Preproc:
def __init__(self, ecg, labels,app=True,skip=False,CINC=False):
self.mean, self.std = compute_mean_std(ecg)
def generage_classes(labels):
classes = set()
for label in labels:
if isinstance(label,list):
for l in label:
classes.add(l)
else:
classes.add(label)
return sorted(classes)
self.classes = generage_classes(labels) #sorted(set(l for label in labels for l in label))
self.int_to_class = dict(zip(range(len(self.classes)), self.classes))
self.class_to_int = {c : i for i, c in self.int_to_class.items()}
self.app = app
self.skip = skip
self.CINC=CINC
def process(self, x, y):
if self.skip is True and self.app is True:
return self.process_x(x), [self.process_x(x), self.process_x(x), self.process_y(y), self.process_y(y)] # TODO change to 2 outputs
elif self.app:
return self.process_x(x),[self.process_x(x), self.process_y(y)] # TODO change to 2 outputs
else:
return self.process_x(x), self.process_x(x) # TODO change to 2 outputs
def process_x(self, x):
x = pad(x)
x = (x - self.mean) / self.std
x = x[:, :, None]
return x
def process_y(self, y):
# TODO, awni, fix hack pad with noise for cinc
if self.CINC:
y = pad([[self.class_to_int[c] for c in s] for s in y], val=3, dtype=np.int32)
y = to_categorical(
y, num_classes=len(self.classes))
return y
def pad(x, val=0, dtype=np.float32):
max_len = max(len(i) for i in x)
padded = np.full((len(x), max_len), val, dtype=dtype)
for e, i in enumerate(x):
padded[e, :len(i)] = i
return padded
def compute_mean_std(x):
x = np.hstack(x)
return (np.mean(x).astype(np.float32),
np.std(x).astype(np.float32))
def load_dataset(data_json):
with open(data_json, 'r') as fid:
data = [json.loads(l) for l in fid]
labels = []; ecgs = []
for d in tqdm.tqdm(data):
loaded_ecg = load_ecg(d['ecg'])
if (loaded_ecg.shape[0]==8960):
labels.append(d['labels'])
ecgs.append(loaded_ecg)
return ecgs, labels
def load_ecg(record):
if os.path.splitext(record)[1] == ".npy":
ecg = np.load(record)
elif os.path.splitext(record)[1] == ".mat":
ecg = sio.loadmat(record)['val'].squeeze()
else: # Assumes binary 16 bit integers
with open(record, 'r') as fid:
ecg = np.fromfile(fid, dtype=np.int16)
trunc_samp = STEP * int(len(ecg) / STEP)
return ecg[:trunc_samp]
if __name__ == "__main__":
data_json = "examples/cinc17/train.json"
train = load_dataset(data_json)
preproc = Preproc(*train)
gen = data_generator(32, preproc, *train)
for x, y in gen:
print(x.shape, y.shape)
break
| [
"numpy.mean",
"json.loads",
"numpy.fromfile",
"random.shuffle",
"numpy.hstack",
"tqdm.tqdm",
"os.path.splitext",
"scipy.io.loadmat",
"numpy.std",
"numpy.load"
] | [((586, 609), 'random.shuffle', 'random.shuffle', (['batches'], {}), '(batches)\n', (600, 609), False, 'import random\n'), ((2617, 2629), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (2626, 2629), True, 'import numpy as np\n'), ((2866, 2881), 'tqdm.tqdm', 'tqdm.tqdm', (['data'], {}), '(data)\n', (2875, 2881), False, 'import tqdm\n'), ((3145, 3160), 'numpy.load', 'np.load', (['record'], {}), '(record)\n', (3152, 3160), True, 'import numpy as np\n'), ((2798, 2811), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (2808, 2811), False, 'import json\n'), ((3092, 3116), 'os.path.splitext', 'os.path.splitext', (['record'], {}), '(record)\n', (3108, 3116), False, 'import os\n'), ((2642, 2652), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2649, 2652), True, 'import numpy as np\n'), ((2684, 2693), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (2690, 2693), True, 'import numpy as np\n'), ((3170, 3194), 'os.path.splitext', 'os.path.splitext', (['record'], {}), '(record)\n', (3186, 3194), False, 'import os\n'), ((3360, 3392), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.int16'}), '(fid, dtype=np.int16)\n', (3371, 3392), True, 'import numpy as np\n'), ((3223, 3242), 'scipy.io.loadmat', 'sio.loadmat', (['record'], {}), '(record)\n', (3234, 3242), True, 'import scipy.io as sio\n')] |
import json
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import sentencepiece as spm
from .import FairseqDataset
from .fairseq_dataset import TAG_DICT
from .indexed_dataset import IndexedRawTextDataset
from .collaters import Seq2SeqCollater
class TaggedDataset(IndexedRawTextDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.src_tokens = []
self.src_sizes = []
self.tgt_tokens = []
self.tgt_sizes = []
self.src = []
self.tgt = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.speakers = []
self.ids = []
self.dictionary = dictionary
self.read_data(path)
self.size = len(self.ids)
def read_data(self, path):
with open(path, 'r', encoding='utf-8') as f:
chat_dict = json.load(f)
for chat in chat_dict.values():
for turn in chat:
src = turn['source']
self.src.append(src)
src_tokens = torch.Tensor(self.dictionary.encode(src))
self.src_tokens.append(src_tokens)
self.src_sizes.append(len(src_tokens))
tgt = turn['target']
self.tgt.append(tgt)
tgt_tokens = torch.Tensor(self.dictionary.encode(tgt)).long()
self.tgt_tokens.append(tgt_tokens)
self.tgt_sizes.append(len(tgt_tokens))
self.speakers.append(turn['speaker'])
self.ids.append(turn['utteranceID'])
self.src_sizes = np.array(self.src_sizes)
self.tgt_sizes = np.array(self.tgt_sizes)
self.sizes = self.src_sizes
def __getitem__(self, idx):
src_speaker = torch.Tensor([self.dictionary.model.piece_to_id(TAG_DICT[self.speakers[idx]])])
source, target = torch.cat((torch.Tensor([self.dictionary.bos()]).long(), src_speaker.long(), self.src_tokens[idx].long(), torch.Tensor([self.dictionary.eos()]).long())) , torch.cat((torch.Tensor([self.dictionary.bos()]).long(), self.tgt_tokens[idx].long(), torch.Tensor([self.dictionary.eos()]).long()))
return {"id": idx, "source": source, "target": target}
def collater(self, samples):
collate_fn = Seq2SeqCollater(pad_index=self.dictionary.model.piece_to_id("<pad>"), eos_index=self.dictionary.model.piece_to_id("</s>"))
samples = collate_fn.collate(samples)
return samples
if __name__ == "__main__":
model = spm.SentencePieceProcessor(model_file="../../../data/wmtchat2020/spm.model")
dataset = TwoToOneDataset("../../../data/wmtchat2020/valid.json", model)
sample = dataset[5]
print(dataset.src[5])
print(spm.decode(sample["source"]))
print(dataset.tgt[5])
print(spm.decode(sample["target"])) | [
"sentencepiece.decode",
"numpy.array",
"sentencepiece.SentencePieceProcessor",
"json.load"
] | [((2590, 2666), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {'model_file': '"""../../../data/wmtchat2020/spm.model"""'}), "(model_file='../../../data/wmtchat2020/spm.model')\n", (2616, 2666), True, 'import sentencepiece as spm\n'), ((1680, 1704), 'numpy.array', 'np.array', (['self.src_sizes'], {}), '(self.src_sizes)\n', (1688, 1704), True, 'import numpy as np\n'), ((1730, 1754), 'numpy.array', 'np.array', (['self.tgt_sizes'], {}), '(self.tgt_sizes)\n', (1738, 1754), True, 'import numpy as np\n'), ((2804, 2832), 'sentencepiece.decode', 'spm.decode', (["sample['source']"], {}), "(sample['source'])\n", (2814, 2832), True, 'import sentencepiece as spm\n'), ((2870, 2898), 'sentencepiece.decode', 'spm.decode', (["sample['target']"], {}), "(sample['target'])\n", (2880, 2898), True, 'import sentencepiece as spm\n'), ((953, 965), 'json.load', 'json.load', (['f'], {}), '(f)\n', (962, 965), False, 'import json\n')] |
# Lint as: python3
"""
Main module to run the algorithms.
"""
import os
import atexit
import csv
import itertools
import multiprocessing
import socket
import random
import time
import psutil
# absl needs to be upgraded to >= 0.10.0, otherwise joblib might not work
from absl import app
from absl import flags
import numpy as np
import shutil
from optimal_stopping.utilities import configs_getter
from optimal_stopping.algorithms.backward_induction import DOS
from optimal_stopping.payoffs import payoff
from optimal_stopping.algorithms.backward_induction import LSM
from optimal_stopping.algorithms.backward_induction import RLSM
from optimal_stopping.algorithms.backward_induction import RRLSM
from optimal_stopping.data import stock_model
from optimal_stopping.algorithms.backward_induction import NLSM
from optimal_stopping.algorithms.reinforcement_learning import RFQI
from optimal_stopping.algorithms.reinforcement_learning import FQI
from optimal_stopping.algorithms.reinforcement_learning import LSPI
from optimal_stopping.run import write_figures
import joblib
# GLOBAL CLASSES
class SendBotMessage:
def __init__(self):
pass
@staticmethod
def send_notification(text, *args, **kwargs):
print(text)
try:
from telegram_notifications import send_bot_message as SBM
except Exception:
SBM = SendBotMessage()
NUM_PROCESSORS = multiprocessing.cpu_count()
if 'ada-' in socket.gethostname() or 'arago' in socket.gethostname():
SERVER = True
NB_JOBS = int(NUM_PROCESSORS) - 1
else:
SERVER = False
NB_JOBS = int(NUM_PROCESSORS) - 1
SEND = False
if SERVER:
SEND = True
FLAGS = flags.FLAGS
flags.DEFINE_list("nb_stocks", None, "List of number of Stocks")
flags.DEFINE_list("algos", None, "Name of the algos to run.")
flags.DEFINE_bool("print_errors", False, "Set to True to print errors if any.")
flags.DEFINE_integer("nb_jobs", NB_JOBS, "Number of CPUs to use parallelly")
flags.DEFINE_bool("generate_pdf", False, "Whether to generate latex tables")
_CSV_HEADERS = ['algo', 'model', 'payoff', 'drift', 'volatility', 'mean',
'speed', 'correlation', 'hurst', 'nb_stocks',
'nb_paths', 'nb_dates', 'spot', 'strike', 'dividend',
'maturity', 'nb_epochs', 'hidden_size', 'factors',
'ridge_coeff',
'train_ITM_only', 'use_path',
'price', 'duration']
_PAYOFFS = {
"MaxPut": payoff.MaxPut,
"MaxCall": payoff.MaxCall,
"GeometricPut": payoff.GeometricPut,
"BasketCall": payoff.BasketCall,
"Identity": payoff.Identity,
"Max": payoff.Max,
"Mean": payoff.Mean,
}
_STOCK_MODELS = {
"BlackScholes": stock_model.BlackScholes,
"FractionalBlackScholes": stock_model.FractionalBlackScholes,
"FractionalBrownianMotion": stock_model.FractionalBrownianMotion,
'FractionalBrownianMotionPathDep':
stock_model.FractionalBrownianMotionPathDep,
"Heston": stock_model.Heston,
}
_ALGOS = {
"LSM": LSM.LeastSquaresPricer,
"LSMLaguerre": LSM.LeastSquarePricerLaguerre,
"LSMRidge": LSM.LeastSquarePricerRidge,
"FQI": FQI.FQIFast,
"FQILaguerre": FQI.FQIFastLaguerre,
"LSPI": LSPI.LSPI, # TODO: this is a slow version -> update similar to FQI
"NLSM": NLSM.NeuralNetworkPricer,
"DOS": DOS.DeepOptimalStopping,
"RLSM": RLSM.ReservoirLeastSquarePricerFast,
"RLSMRidge": RLSM.ReservoirLeastSquarePricerFastRidge,
"RRLSM": RRLSM.ReservoirRNNLeastSquarePricer,
"RFQI": RFQI.FQI_ReservoirFast,
"RRFQI": RFQI.FQI_ReservoirFastRNN,
}
_NUM_FACTORS = {
"RRLSMmix": 3,
"RRLSM": 2,
"RLSM": 1,
}
def init_seed():
random.seed(0)
np.random.seed(0)
def _run_algos():
fpath = os.path.join(os.path.dirname(__file__), "../../output/metrics_draft",
f'{int(time.time()*1000)}.csv')
tmp_dirpath = f'{fpath}.tmp_results'
os.makedirs(tmp_dirpath, exist_ok=True)
atexit.register(shutil.rmtree, tmp_dirpath)
tmp_files_idx = 0
delayed_jobs = []
nb_stocks_flag = [int(nb) for nb in FLAGS.nb_stocks or []]
for config_name, config in configs_getter.get_configs():
print(f'Config {config_name}', config)
config.algos = [a for a in config.algos
if FLAGS.algos is None or a in FLAGS.algos]
if nb_stocks_flag:
config.nb_stocks = [a for a in config.nb_stocks
if a in nb_stocks_flag]
combinations = list(itertools.product(
config.algos, config.dividends, config.maturities, config.nb_dates,
config.nb_paths, config.nb_stocks, config.payoffs, config.drift,
config.spots, config.stock_models, config.strikes, config.volatilities,
config.mean, config.speed, config.correlation, config.hurst,
config.nb_epochs, config.hidden_size, config.factors,
config.ridge_coeff,
config.train_ITM_only, config.use_path))
# random.shuffle(combinations)
for params in combinations:
for i in range(config.nb_runs):
tmp_file_path = os.path.join(tmp_dirpath, str(tmp_files_idx))
tmp_files_idx += 1
delayed_jobs.append(joblib.delayed(_run_algo)(
tmp_file_path, *params, fail_on_error=FLAGS.print_errors)
)
print(f"Running {len(delayed_jobs)} tasks using "
f"{FLAGS.nb_jobs}/{NUM_PROCESSORS} CPUs...")
joblib.Parallel(n_jobs=FLAGS.nb_jobs)(delayed_jobs)
print(f'Writing results to {fpath}...')
with open(fpath, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=_CSV_HEADERS)
writer.writeheader()
for idx in range(tmp_files_idx):
tmp_file_path = os.path.join(tmp_dirpath, str(idx))
try:
with open(tmp_file_path, "r") as read_f:
csvfile.write(read_f.read())
except FileNotFoundError:
pass
return fpath
def _run_algo(
metrics_fpath, algo, dividend, maturity, nb_dates, nb_paths,
nb_stocks, payoff, drift, spot, stock_model, strike, volatility, mean,
speed, correlation, hurst, nb_epochs, hidden_size=10,
factors=(1.,1.,1.), ridge_coeff=1.,
train_ITM_only=True, use_path=False,
fail_on_error=False):
"""
This functions runs one algo for option pricing. It is called by _run_algos()
which is called in main(). Below the inputs are listed which have to be
specified in the config that is passed to main().
Args:
metrics_fpath: file path, automatically generated & passed by
_run_algos()
algo (str): the algo to train. See dict _ALGOS above.
dividend (float): the dividend of the stock model.
maturity (float): the maturity of the option.
nb_dates (int): number of equidistance dates at which option can be
exercised up to maturity.
nb_paths (int): number of paths that are simulated from stock model.
Half is used to learn the weigths, half to estimate the option
price.
nb_stocks (int): number of stocks used for the option (e.g. max call).
payoff (str): see dict _PAYOFFS.
drift (float): the drift of the stock model
spot (float): the value of the stocks at t=0.
stock_model (str): see dict _STOCK_MODELS.
strike (float): the strike price of the option, if used by the payoff.
volatility (float): the volatility of the stock model.
mean (float): parameter for Heston stock model.
speed (float): parameter for Heston stock model.
correlation (float): parameter for Heston stock model.
hurst (float): in (0,1) the hurst parameter for the
FractionalBrownianMotion and FractionalBrownianMotionPathDep stock
models.
nb_epochs (int): number of epochs to train the algos which are based on
iterative updates (FQI, RFQI etc.) or SGD (NLSM, DOS). Otherwise
unused.
hidden_size (int): the number of nodes in the hidden layers of NN based
algos.
factors (list of floats, optional. Contains scaling coeffs for the
randomized NN (i.e. scaling of the randomly sampled and fixed
weights -> changes the std of the sampling distribution). Depending
on the algo, the factors are used differently. See there directly.
ridge_coeff (float, regression coeff for the algos using Ridge
regression.
train_ITM_only (bool): whether to train weights on all paths or only on
those where payoff is positive (i.e. where it makes sense to stop).
This should be set to False when using FractionalBrownianMotion with
Identity payoff, since there the payoff can actually become negative
and therefore training on those paths is important.
use_path (bool): for DOS algo only. If true, the algo uses the entire
path up to the current time of the stock (instead of the current
value only) as input. This is used for Non-Markovian stock models,
i.e. FractionalBrownianMotion, where the decisions depend on the
history.
fail_on_error (bool): whether to continue when errors occure or not.
Automatically passed from _run_algos(), with the value of
FLAGS.print_errors.
"""
print(algo, spot, volatility, maturity, nb_paths, '... ', end="")
payoff_ = _PAYOFFS[payoff](strike)
stock_model_ = _STOCK_MODELS[stock_model](
drift=drift, volatility=volatility, mean=mean, speed=speed, hurst=hurst,
correlation=correlation, nb_stocks=nb_stocks,
nb_paths=nb_paths, nb_dates=nb_dates,
spot=spot, dividend=dividend,
maturity=maturity)
if algo in ['NLSM']:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
hidden_size=hidden_size,
train_ITM_only=train_ITM_only)
elif algo in ["DOS"]:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
hidden_size=hidden_size, use_path=use_path)
elif algo in ["RLSM", "RRLSM", "RRFQI", "RFQI",]:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
hidden_size=hidden_size, factors=factors,
train_ITM_only=train_ITM_only)
elif algo in ["RLSMRidge"]:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
hidden_size=hidden_size, factors=factors,
train_ITM_only=train_ITM_only,
ridge_coeff=ridge_coeff)
elif algo in ["LSM", "LSMLaguerre"]:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
train_ITM_only=train_ITM_only)
elif algo in ["LSMRidge"]:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs,
train_ITM_only=train_ITM_only,
ridge_coeff=ridge_coeff)
else:
pricer = _ALGOS[algo](stock_model_, payoff_, nb_epochs=nb_epochs)
t_begin = time.time()
try:
price = pricer.price()
duration = time.time() - t_begin
except BaseException as err:
if fail_on_error:
raise
print(err)
return
metrics_ = {}
metrics_['algo'] = algo
metrics_['model'] = stock_model
metrics_['payoff'] = payoff
metrics_['drift'] = drift
metrics_['volatility'] = volatility
metrics_['mean'] = mean
metrics_['speed'] = speed
metrics_['correlation'] = correlation
metrics_['hurst'] = hurst
metrics_['nb_stocks'] = nb_stocks
metrics_['nb_paths'] = nb_paths
metrics_['nb_dates'] = nb_dates
metrics_['spot'] = spot
metrics_['strike'] = strike
metrics_['dividend'] = dividend
metrics_['maturity'] = maturity
metrics_['price'] = price
metrics_['duration'] = duration
metrics_['hidden_size'] = hidden_size
metrics_['factors'] = factors
metrics_['ridge_coeff'] = ridge_coeff
metrics_['nb_epochs'] = nb_epochs
metrics_['train_ITM_only'] = train_ITM_only
metrics_['use_path'] = use_path
print("price: ", price, "duration: ", duration)
with open(metrics_fpath, "w") as metrics_f:
writer = csv.DictWriter(metrics_f, fieldnames=_CSV_HEADERS)
writer.writerow(metrics_)
def main(argv):
del argv
try:
if SEND:
SBM.send_notification(
text='start running AMC2 with config:\n{}'.format(FLAGS.configs),
chat_id="-399803347"
)
filepath = _run_algos()
if FLAGS.generate_pdf:
write_figures.write_figures()
write_figures.generate_pdf()
if SEND:
time.sleep(1)
SBM.send_notification(
text='finished',
files=[filepath],
chat_id="-399803347"
)
except Exception as e:
if SEND:
SBM.send_notification(
text='ERROR\n{}'.format(e),
chat_id="-399803347"
)
else:
print('ERROR\n{}'.format(e))
if __name__ == "__main__":
app.run(main)
| [
"csv.DictWriter",
"multiprocessing.cpu_count",
"time.sleep",
"absl.flags.DEFINE_list",
"itertools.product",
"optimal_stopping.run.write_figures.write_figures",
"absl.app.run",
"telegram_notifications.send_bot_message.send_notification",
"numpy.random.seed",
"socket.gethostname",
"atexit.register... | [((1372, 1399), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1397, 1399), False, 'import multiprocessing\n'), ((1657, 1721), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""nb_stocks"""', 'None', '"""List of number of Stocks"""'], {}), "('nb_stocks', None, 'List of number of Stocks')\n", (1674, 1721), False, 'from absl import flags\n'), ((1722, 1783), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""algos"""', 'None', '"""Name of the algos to run."""'], {}), "('algos', None, 'Name of the algos to run.')\n", (1739, 1783), False, 'from absl import flags\n'), ((1784, 1863), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""print_errors"""', '(False)', '"""Set to True to print errors if any."""'], {}), "('print_errors', False, 'Set to True to print errors if any.')\n", (1801, 1863), False, 'from absl import flags\n'), ((1864, 1940), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""nb_jobs"""', 'NB_JOBS', '"""Number of CPUs to use parallelly"""'], {}), "('nb_jobs', NB_JOBS, 'Number of CPUs to use parallelly')\n", (1884, 1940), False, 'from absl import flags\n'), ((1941, 2017), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""generate_pdf"""', '(False)', '"""Whether to generate latex tables"""'], {}), "('generate_pdf', False, 'Whether to generate latex tables')\n", (1958, 2017), False, 'from absl import flags\n'), ((3661, 3675), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3672, 3675), False, 'import random\n'), ((3678, 3695), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3692, 3695), True, 'import numpy as np\n'), ((3892, 3931), 'os.makedirs', 'os.makedirs', (['tmp_dirpath'], {'exist_ok': '(True)'}), '(tmp_dirpath, exist_ok=True)\n', (3903, 3931), False, 'import os\n'), ((3934, 3977), 'atexit.register', 'atexit.register', (['shutil.rmtree', 'tmp_dirpath'], {}), '(shutil.rmtree, tmp_dirpath)\n', (3949, 3977), False, 'import atexit\n'), ((4110, 4138), 'optimal_stopping.utilities.configs_getter.get_configs', 'configs_getter.get_configs', ([], {}), '()\n', (4136, 4138), False, 'from optimal_stopping.utilities import configs_getter\n'), ((10929, 10940), 'time.time', 'time.time', ([], {}), '()\n', (10938, 10940), False, 'import time\n'), ((12887, 12900), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (12894, 12900), False, 'from absl import app\n'), ((1413, 1433), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1431, 1433), False, 'import socket\n'), ((1448, 1468), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1466, 1468), False, 'import socket\n'), ((3739, 3764), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3754, 3764), False, 'import os\n'), ((5343, 5380), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'FLAGS.nb_jobs'}), '(n_jobs=FLAGS.nb_jobs)\n', (5358, 5380), False, 'import joblib\n'), ((5487, 5535), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': '_CSV_HEADERS'}), '(csvfile, fieldnames=_CSV_HEADERS)\n', (5501, 5535), False, 'import csv\n'), ((12024, 12074), 'csv.DictWriter', 'csv.DictWriter', (['metrics_f'], {'fieldnames': '_CSV_HEADERS'}), '(metrics_f, fieldnames=_CSV_HEADERS)\n', (12038, 12074), False, 'import csv\n'), ((4442, 4864), 'itertools.product', 'itertools.product', (['config.algos', 'config.dividends', 'config.maturities', 'config.nb_dates', 'config.nb_paths', 'config.nb_stocks', 'config.payoffs', 'config.drift', 'config.spots', 'config.stock_models', 'config.strikes', 'config.volatilities', 'config.mean', 'config.speed', 'config.correlation', 'config.hurst', 'config.nb_epochs', 'config.hidden_size', 'config.factors', 'config.ridge_coeff', 'config.train_ITM_only', 'config.use_path'], {}), '(config.algos, config.dividends, config.maturities, config\n .nb_dates, config.nb_paths, config.nb_stocks, config.payoffs, config.\n drift, config.spots, config.stock_models, config.strikes, config.\n volatilities, config.mean, config.speed, config.correlation, config.\n hurst, config.nb_epochs, config.hidden_size, config.factors, config.\n ridge_coeff, config.train_ITM_only, config.use_path)\n', (4459, 4864), False, 'import itertools\n'), ((10990, 11001), 'time.time', 'time.time', ([], {}), '()\n', (10999, 11001), False, 'import time\n'), ((12389, 12418), 'optimal_stopping.run.write_figures.write_figures', 'write_figures.write_figures', ([], {}), '()\n', (12416, 12418), False, 'from optimal_stopping.run import write_figures\n'), ((12429, 12457), 'optimal_stopping.run.write_figures.generate_pdf', 'write_figures.generate_pdf', ([], {}), '()\n', (12455, 12457), False, 'from optimal_stopping.run import write_figures\n'), ((12484, 12497), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12494, 12497), False, 'import time\n'), ((12508, 12586), 'telegram_notifications.send_bot_message.send_notification', 'SBM.send_notification', ([], {'text': '"""finished"""', 'files': '[filepath]', 'chat_id': '"""-399803347"""'}), "(text='finished', files=[filepath], chat_id='-399803347')\n", (12529, 12586), True, 'from telegram_notifications import send_bot_message as SBM\n'), ((3826, 3837), 'time.time', 'time.time', ([], {}), '()\n', (3835, 3837), False, 'import time\n'), ((5128, 5153), 'joblib.delayed', 'joblib.delayed', (['_run_algo'], {}), '(_run_algo)\n', (5142, 5153), False, 'import joblib\n')] |
#!/usr/bin/env python3
"""Categorical Feature Encoding Challengeの実験用コード。"""
import pathlib
import numpy as np
import pandas as pd
import sklearn.metrics
import pytoolkit as tk
nfold = 5
params = {
"objective": "binary",
"metric": "auc",
"learning_rate": 0.01,
"nthread": -1,
# "verbosity": -1,
# "max_bin": 511,
# "num_leaves": 31,
# "min_data_in_leaf": 10,
"feature_fraction": "sqrt",
"bagging_freq": 0,
# "max_depth": 4,
}
seeds = np.arange(5) + 1
# split_seeds = np.arange(5) + 1
data_dir = pathlib.Path("data/kaggle_cat-in-the-dat")
models_dir = pathlib.Path(f"models/{pathlib.Path(__file__).stem}")
app = tk.cli.App(output_dir=models_dir)
logger = tk.log.get(__name__)
@app.command()
def train():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, split_seed=1)
create_model().cv(train_set, folds)
validate()
@app.command()
def validate():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, split_seed=1)
model = create_model().load()
oofp = model.predict_oof(train_set, folds)
tk.notifications.post_evals(
{"auc": sklearn.metrics.roc_auc_score(train_set.labels, oofp)}
)
predict()
@app.command()
def predict():
# TODO: ValueError: Unknown values in column 'nom_8': {'2be51c868', '1f0a80e1d', 'ec337ce4c', 'a9bf3dc47'}
test_set = load_test_data()
model = create_model().load()
pred = model.predict(test_set)
df = pd.DataFrame()
df["id"] = test_set.ids
df["target"] = pred
df.to_csv(models_dir / "submission.csv", index=False)
def load_train_data():
df = pd.read_csv(data_dir / "train.csv")
return _preprocess(df)
def load_test_data():
df = pd.read_csv(data_dir / "test.csv")
return _preprocess(df)
def _preprocess(df):
df["bin_0"] = tk.preprocessing.encode_binary(df["bin_0"], 1, 0)
df["bin_1"] = tk.preprocessing.encode_binary(df["bin_1"], 1, 0)
df["bin_2"] = tk.preprocessing.encode_binary(df["bin_2"], 1, 0)
df["bin_3"] = tk.preprocessing.encode_binary(df["bin_3"], "T", "F")
df["bin_4"] = tk.preprocessing.encode_binary(df["bin_4"], "Y", "N")
df["ord_1"] = tk.preprocessing.encode_ordinal(
df["ord_1"], ["Novice", "Contributor", "Expert", "Master", "Grandmaster"]
)
df["ord_2"] = tk.preprocessing.encode_ordinal(
df["ord_2"], ["Freezing", "Cold", "Warm", "Hot", "Boiling Hot", "Lava Hot"]
)
df["ord_3"] = df["ord_3"].map(ord).astype(np.int32)
df["ord_4"] = df["ord_4"].map(ord).astype(np.int32)
df["ord_5"] = (
df["ord_5"].apply(lambda s: ord(s[0]) * 255 + ord(s[1])).astype(np.int32)
)
df[["day_sin", "day_cos"]] = tk.preprocessing.encode_cyclic(df["day"], 1, 7 + 1)
df[["month_sin", "month_cos"]] = tk.preprocessing.encode_cyclic(
df["month"], 1, 12 + 1
)
if "target" in df.columns.values:
return tk.data.Dataset(
data=df.drop(columns=["target"]), labels=df["target"].values
)
else:
return tk.data.Dataset(data=df)
def create_model():
return tk.pipeline.LGBModel(
params=params,
nfold=nfold,
models_dir=models_dir,
seeds=seeds,
preprocessors=[tk.preprocessing.FeaturesEncoder()],
)
if __name__ == "__main__":
app.run(default="train")
| [
"pandas.read_csv",
"pathlib.Path",
"pytoolkit.preprocessing.encode_cyclic",
"pytoolkit.data.Dataset",
"pytoolkit.validation.split",
"pytoolkit.log.get",
"pytoolkit.preprocessing.encode_ordinal",
"pytoolkit.preprocessing.FeaturesEncoder",
"pandas.DataFrame",
"pytoolkit.cli.App",
"pytoolkit.prepro... | [((541, 583), 'pathlib.Path', 'pathlib.Path', (['"""data/kaggle_cat-in-the-dat"""'], {}), "('data/kaggle_cat-in-the-dat')\n", (553, 583), False, 'import pathlib\n'), ((657, 690), 'pytoolkit.cli.App', 'tk.cli.App', ([], {'output_dir': 'models_dir'}), '(output_dir=models_dir)\n', (667, 690), True, 'import pytoolkit as tk\n'), ((700, 720), 'pytoolkit.log.get', 'tk.log.get', (['__name__'], {}), '(__name__)\n', (710, 720), True, 'import pytoolkit as tk\n'), ((480, 492), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (489, 492), True, 'import numpy as np\n'), ((797, 848), 'pytoolkit.validation.split', 'tk.validation.split', (['train_set', 'nfold'], {'split_seed': '(1)'}), '(train_set, nfold, split_seed=1)\n', (816, 848), True, 'import pytoolkit as tk\n'), ((983, 1034), 'pytoolkit.validation.split', 'tk.validation.split', (['train_set', 'nfold'], {'split_seed': '(1)'}), '(train_set, nfold, split_seed=1)\n', (1002, 1034), True, 'import pytoolkit as tk\n'), ((1493, 1507), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1505, 1507), True, 'import pandas as pd\n'), ((1652, 1687), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'train.csv')"], {}), "(data_dir / 'train.csv')\n", (1663, 1687), True, 'import pandas as pd\n'), ((1748, 1782), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'test.csv')"], {}), "(data_dir / 'test.csv')\n", (1759, 1782), True, 'import pandas as pd\n'), ((1851, 1900), 'pytoolkit.preprocessing.encode_binary', 'tk.preprocessing.encode_binary', (["df['bin_0']", '(1)', '(0)'], {}), "(df['bin_0'], 1, 0)\n", (1881, 1900), True, 'import pytoolkit as tk\n'), ((1919, 1968), 'pytoolkit.preprocessing.encode_binary', 'tk.preprocessing.encode_binary', (["df['bin_1']", '(1)', '(0)'], {}), "(df['bin_1'], 1, 0)\n", (1949, 1968), True, 'import pytoolkit as tk\n'), ((1987, 2036), 'pytoolkit.preprocessing.encode_binary', 'tk.preprocessing.encode_binary', (["df['bin_2']", '(1)', '(0)'], {}), "(df['bin_2'], 1, 0)\n", (2017, 2036), True, 'import pytoolkit as tk\n'), ((2055, 2108), 'pytoolkit.preprocessing.encode_binary', 'tk.preprocessing.encode_binary', (["df['bin_3']", '"""T"""', '"""F"""'], {}), "(df['bin_3'], 'T', 'F')\n", (2085, 2108), True, 'import pytoolkit as tk\n'), ((2127, 2180), 'pytoolkit.preprocessing.encode_binary', 'tk.preprocessing.encode_binary', (["df['bin_4']", '"""Y"""', '"""N"""'], {}), "(df['bin_4'], 'Y', 'N')\n", (2157, 2180), True, 'import pytoolkit as tk\n'), ((2199, 2309), 'pytoolkit.preprocessing.encode_ordinal', 'tk.preprocessing.encode_ordinal', (["df['ord_1']", "['Novice', 'Contributor', 'Expert', 'Master', 'Grandmaster']"], {}), "(df['ord_1'], ['Novice', 'Contributor',\n 'Expert', 'Master', 'Grandmaster'])\n", (2230, 2309), True, 'import pytoolkit as tk\n'), ((2338, 2450), 'pytoolkit.preprocessing.encode_ordinal', 'tk.preprocessing.encode_ordinal', (["df['ord_2']", "['Freezing', 'Cold', 'Warm', 'Hot', 'Boiling Hot', 'Lava Hot']"], {}), "(df['ord_2'], ['Freezing', 'Cold', 'Warm',\n 'Hot', 'Boiling Hot', 'Lava Hot'])\n", (2369, 2450), True, 'import pytoolkit as tk\n'), ((2714, 2765), 'pytoolkit.preprocessing.encode_cyclic', 'tk.preprocessing.encode_cyclic', (["df['day']", '(1)', '(7 + 1)'], {}), "(df['day'], 1, 7 + 1)\n", (2744, 2765), True, 'import pytoolkit as tk\n'), ((2803, 2857), 'pytoolkit.preprocessing.encode_cyclic', 'tk.preprocessing.encode_cyclic', (["df['month']", '(1)', '(12 + 1)'], {}), "(df['month'], 1, 12 + 1)\n", (2833, 2857), True, 'import pytoolkit as tk\n'), ((3050, 3074), 'pytoolkit.data.Dataset', 'tk.data.Dataset', ([], {'data': 'df'}), '(data=df)\n', (3065, 3074), True, 'import pytoolkit as tk\n'), ((620, 642), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (632, 642), False, 'import pathlib\n'), ((3249, 3283), 'pytoolkit.preprocessing.FeaturesEncoder', 'tk.preprocessing.FeaturesEncoder', ([], {}), '()\n', (3281, 3283), True, 'import pytoolkit as tk\n')] |
import os
import pickle
import numpy as np
import torch
from loguru import logger
from tqdm import tqdm
def make_adj_list(N, edge_index_transposed):
A = np.eye(N)
for edge in edge_index_transposed:
A[edge[0], edge[1]] = 1
adj_list = A != 0
return adj_list
def make_adj_list_wrapper(x):
return make_adj_list(x["num_nodes"], x["edge_index"].T)
def compute_adjacency_list(data):
out = []
for x in tqdm(data, "adjacency list", leave=False):
out.append(make_adj_list_wrapper(x))
return out
def combine_results(data, adj_list):
out_data = []
for x, l in tqdm(zip(data, adj_list), "assembling adj_list result", total=len(data), leave=False):
x["adj_list"] = l
out_data.append(x)
return out_data
def compute_adjacency_list_cached(data, key, root="/data/zhwu/tmp"):
cachefile = f"{root}/OGB_ADJLIST_{key}.pickle"
if os.path.exists(cachefile):
with open(cachefile, "rb") as cachehandle:
logger.debug("using cached result from '%s'" % cachefile)
result = pickle.load(cachehandle)
return combine_results(data, result)
result = compute_adjacency_list(data)
with open(cachefile, "wb") as cachehandle:
logger.debug("saving result to cache '%s'" % cachefile)
pickle.dump(result, cachehandle)
logger.info("Got adjacency list data for key %s" % key)
return combine_results(data, result)
| [
"os.path.exists",
"numpy.eye",
"pickle.dump",
"loguru.logger.debug",
"loguru.logger.info",
"tqdm.tqdm",
"pickle.load"
] | [((160, 169), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (166, 169), True, 'import numpy as np\n'), ((437, 478), 'tqdm.tqdm', 'tqdm', (['data', '"""adjacency list"""'], {'leave': '(False)'}), "(data, 'adjacency list', leave=False)\n", (441, 478), False, 'from tqdm import tqdm\n'), ((902, 927), 'os.path.exists', 'os.path.exists', (['cachefile'], {}), '(cachefile)\n', (916, 927), False, 'import os\n'), ((1339, 1394), 'loguru.logger.info', 'logger.info', (["('Got adjacency list data for key %s' % key)"], {}), "('Got adjacency list data for key %s' % key)\n", (1350, 1394), False, 'from loguru import logger\n'), ((1238, 1293), 'loguru.logger.debug', 'logger.debug', (['("saving result to cache \'%s\'" % cachefile)'], {}), '("saving result to cache \'%s\'" % cachefile)\n', (1250, 1293), False, 'from loguru import logger\n'), ((1302, 1334), 'pickle.dump', 'pickle.dump', (['result', 'cachehandle'], {}), '(result, cachehandle)\n', (1313, 1334), False, 'import pickle\n'), ((992, 1049), 'loguru.logger.debug', 'logger.debug', (['("using cached result from \'%s\'" % cachefile)'], {}), '("using cached result from \'%s\'" % cachefile)\n', (1004, 1049), False, 'from loguru import logger\n'), ((1071, 1095), 'pickle.load', 'pickle.load', (['cachehandle'], {}), '(cachehandle)\n', (1082, 1095), False, 'import pickle\n')] |
import json
import math
import os
import tempfile
from os import remove
from os.path import isfile
import numpy as np
import pandas as pd
from pandapower.auxiliary import _add_ppc_options, _add_opf_options, _add_auxiliary_elements
from pandapower.build_branch import _calc_line_parameter
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.idx_brch import ANGMIN, ANGMAX, BR_R, BR_X, BR_B, RATE_A, RATE_B, RATE_C, TAP, SHIFT, \
branch_cols, F_BUS, T_BUS, BR_STATUS
from pandapower.pypower.idx_bus import ZONE, VA, BASE_KV, BS, GS, BUS_I, BUS_TYPE, VMAX, VMIN, VM, PD, QD
from pandapower.pypower.idx_cost import MODEL, NCOST, COST
from pandapower.pypower.idx_gen import PG, QG, GEN_BUS, VG, GEN_STATUS, QMAX, QMIN, PMIN, PMAX
from pandapower.results import init_results
# const value in branch for tnep
CONSTRUCTION_COST = 23
try:
import pplog as logging
except ImportError:
import logging
def convert_pp_to_pm(net, pm_file_path=None, correct_pm_network_data=True, calculate_voltage_angles=True, ac=True,
trafo_model="t", delta=1e-8, trafo3w_losses="hv", check_connectivity=True,
pp_to_pm_callback=None, pm_model="ACPPowerModel", pm_solver="ipopt",
pm_mip_solver="cbc", pm_nl_solver="ipopt"):
"""
Converts a pandapower net to a PowerModels.jl datastructure and saves it to a json file
INPUT:
**net** - pandapower net
OPTIONAL:
**pm_file_path** (str, None) - file path to *.json file to store pm data to
**correct_pm_network_data** (bool, True) - correct some input data (e.g. angles, p.u. conversion)
**delta** (float, 1e-8) - (small) offset to set for "hard" OPF limits.
**pp_to_pm_callback** (function, None) - callback function to add data to the PowerModels data structure
**pm_model** (str, "ACPPowerModel") - model to use. Default is AC model
**pm_solver** (str, "ipopt") - default solver to use.
**pm_nl_solver** (str, "ipopt") - default nonlinear solver to use.
**pm_mip_solver** (str, "cbc") - default mip solver to use.
**correct_pm_network_data** (bool, True) - checks if network data is correct. If not tries to correct it
Returns
-------
**pm** (json str) - PowerModels.jl data structure
"""
net._options = {}
_add_ppc_options(net, calculate_voltage_angles=calculate_voltage_angles,
trafo_model=trafo_model, check_connectivity=check_connectivity,
mode="opf", switch_rx_ratio=2, init_vm_pu="flat", init_va_degree="flat",
enforce_q_lims=True, recycle=dict(_is_elements=False, ppc=False, Ybus=False),
voltage_depend_loads=False, delta=delta, trafo3w_losses=trafo3w_losses)
_add_opf_options(net, trafo_loading='power', ac=ac, init="flat", numba=True,
pp_to_pm_callback=pp_to_pm_callback, pm_solver=pm_solver, pm_model=pm_model,
correct_pm_network_data=correct_pm_network_data, pm_mip_solver=pm_mip_solver,
pm_nl_solver=pm_nl_solver)
net, pm, ppc, ppci = convert_to_pm_structure(net)
buffer_file = dump_pm_json(pm, pm_file_path)
if pm_file_path is None and isfile(buffer_file):
remove(buffer_file)
return pm
logger = logging.getLogger(__name__)
def convert_to_pm_structure(net):
net["OPF_converged"] = False
net["converged"] = False
_add_auxiliary_elements(net)
init_results(net)
ppc, ppci = _pd2ppc(net)
ppci = build_ne_branch(net, ppci)
net["_ppc_opf"] = ppci
pm = ppc_to_pm(net, ppci)
pm = add_pm_options(pm, net)
net._pm = pm
return net, pm, ppc, ppci
def dump_pm_json(pm, buffer_file=None):
# dump pm dict to buffer_file (*.json)
if buffer_file is None:
# if no buffer file is provided a random file name is generated
temp_name = next(tempfile._get_candidate_names())
buffer_file = os.path.join(tempfile.gettempdir(), "pp_to_pm_" + temp_name + ".json")
logger.debug("writing PowerModels data structure to %s" % buffer_file)
with open(buffer_file, 'w') as outfile:
json.dump(pm, outfile)
return buffer_file
def _pp_element_to_pm(net, pm, element, pd_bus, qd_bus, load_idx):
bus_lookup = net._pd2ppc_lookups["bus"]
pm_lookup = np.ones(max(net[element].index) + 1, dtype=int) * -1 if len(net[element].index) \
else np.array([], dtype=int)
for idx in net[element].index:
if "controllable" in net[element] and net[element].at[idx, "controllable"]:
continue
pp_bus = net[element].at[idx, "bus"]
pm_bus = bus_lookup[pp_bus] + 1
scaling = net[element].at[idx, "scaling"]
if element == "sgen":
pd = -net[element].at[idx, "p_mw"] * scaling
qd = -net[element].at[idx, "q_mvar"] * scaling
else:
pd = net[element].at[idx, "p_mw"] * scaling
qd = net[element].at[idx, "q_mvar"] * scaling
in_service = net[element].at[idx, "in_service"]
pm["load"][str(load_idx)] = {"pd": pd.item(), "qd": qd.item(), "load_bus": pm_bus.item(),
"status": int(in_service), "index": load_idx}
if pm_bus not in pd_bus:
pd_bus[pm_bus] = pd
qd_bus[pm_bus] = qd
else:
pd_bus[pm_bus] += pd
qd_bus[pm_bus] += qd
pm_lookup[idx] = load_idx
load_idx += 1
return load_idx, pm_lookup
def get_branch_angles(row, correct_pm_network_data):
angmin = row[ANGMIN].real
angmax = row[ANGMAX].real
# check if angles are too small for PowerModels OPF (recommendation from <NAME> himself)
if correct_pm_network_data:
if angmin < -60.:
logger.debug("changed voltage angle minimum of branch {}, "
"to -60 from {} degrees".format(int(row[0].real), angmin))
angmin = -60.
if angmax > 60.:
logger.debug("changed voltage angle maximum of branch {} to 60. "
"from {} degrees".format(int(row[0].real), angmax))
angmax = 60.
# convert to rad
angmin = math.radians(angmin)
angmax = math.radians(angmax)
return angmin, angmax
def create_pm_lookups(net, pm_lookup):
for key, val in net._pd2ppc_lookups.items():
if isinstance(val, dict):
# lookup is something like "branch" with dict as val -> iterate over the subdicts
pm_val = dict()
for subkey, subval in val.items():
pm_val[subkey] = tuple((v + 1 for v in subval))
elif isinstance(val, int) or isinstance(val, np.ndarray):
# lookup is a numpy array
# julia starts counting at 1 instead of 0
pm_val = val + 1
# restore -1 for not existing elements
pm_val[pm_val == 0] = -1
else:
# val not supported
continue
pm_lookup[key] = pm_val
net._pd2pm_lookups = pm_lookup
return net
def ppc_to_pm(net, ppci):
# create power models dict. Similar to matpower case file. ne_branch is for a tnep case
pm = {"gen": dict(), "branch": dict(), "bus": dict(), "dcline": dict(), "load": dict(), "storage": dict(),
"ne_branch": dict(), "switch": dict(),
"baseMVA": ppci["baseMVA"], "source_version": "2.0.0", "shunt": dict(),
"sourcetype": "matpower", "per_unit": True, "name": net.name}
load_idx = 1
shunt_idx = 1
# PowerModels has a load model -> add loads and sgens to pm["load"]
# temp dicts which hold the sum of p, q of loads + sgens
pd_bus = dict()
qd_bus = dict()
load_idx, load_lookup = _pp_element_to_pm(net, pm, "load", pd_bus, qd_bus, load_idx)
load_idx, sgen_lookup = _pp_element_to_pm(net, pm, "sgen", pd_bus, qd_bus, load_idx)
load_idx, storage_lookup = _pp_element_to_pm(net, pm, "storage", pd_bus, qd_bus, load_idx)
pm_lookup = {"load": load_lookup, "sgen": sgen_lookup, "storage": storage_lookup}
net = create_pm_lookups(net, pm_lookup)
correct_pm_network_data = net._options["correct_pm_network_data"]
for row in ppci["bus"]:
bus = dict()
idx = int(row[BUS_I]) + 1
bus["index"] = idx
bus["bus_i"] = idx
bus["zone"] = int(row[ZONE])
bus["bus_type"] = int(row[BUS_TYPE])
bus["vmax"] = row[VMAX]
bus["vmin"] = row[VMIN]
bus["va"] = row[VA]
bus["vm"] = row[VM]
bus["base_kv"] = row[BASE_KV]
pd = row[PD]
qd = row[QD]
# pd and qd are the PQ values in the ppci, if they are equal to the sum in load data is consistent
if idx in pd_bus:
pd -= pd_bus[idx]
qd -= qd_bus[idx]
# if not we have to add more loads wit the remaining value
pq_mismatch = not np.allclose(pd, 0.) or not np.allclose(qd, 0.)
if pq_mismatch:
# This will be called if ppc PQ != sum at bus.
logger.info("PQ mismatch. Adding another load at idx {}".format(load_idx))
pm["load"][str(load_idx)] = {"pd": pd, "qd": qd, "load_bus": idx,
"status": True, "index": load_idx}
load_idx += 1
# if bs or gs != 0. -> shunt element at this bus
bs = row[BS]
gs = row[GS]
if not np.allclose(bs, 0.) or not np.allclose(gs, 0.):
pm["shunt"][str(shunt_idx)] = {"gs": gs, "bs": bs, "shunt_bus": idx,
"status": True, "index": shunt_idx}
shunt_idx += 1
pm["bus"][str(idx)] = bus
n_lines = net.line.in_service.sum()
for idx, row in enumerate(ppci["branch"], start=1):
branch = dict()
branch["index"] = idx
branch["transformer"] = bool(idx > n_lines)
branch["br_r"] = row[BR_R].real
branch["br_x"] = row[BR_X].real
branch["g_fr"] = - row[BR_B].imag / 2.0
branch["g_to"] = - row[BR_B].imag / 2.0
branch["b_fr"] = row[BR_B].real / 2.0
branch["b_to"] = row[BR_B].real / 2.0
branch["rate_a"] = row[RATE_A].real if row[RATE_A] > 0 else row[RATE_B].real
branch["rate_b"] = row[RATE_B].real
branch["rate_c"] = row[RATE_C].real
branch["f_bus"] = int(row[F_BUS].real) + 1
branch["t_bus"] = int(row[T_BUS].real) + 1
branch["br_status"] = int(row[BR_STATUS].real)
branch["angmin"], branch["angmax"] = get_branch_angles(row, correct_pm_network_data)
branch["tap"] = row[TAP].real
branch["shift"] = math.radians(row[SHIFT].real)
pm["branch"][str(idx)] = branch
for idx, row in enumerate(ppci["gen"], start=1):
gen = dict()
gen["pg"] = row[PG]
gen["qg"] = row[QG]
gen["gen_bus"] = int(row[GEN_BUS]) + 1
gen["vg"] = row[VG]
gen["qmax"] = row[QMAX]
gen["gen_status"] = int(row[GEN_STATUS])
gen["qmin"] = row[QMIN]
gen["pmin"] = row[PMIN]
gen["pmax"] = row[PMAX]
gen["index"] = idx
pm["gen"][str(idx)] = gen
if "ne_branch" in ppci:
for idx, row in enumerate(ppci["ne_branch"], start=1):
branch = dict()
branch["index"] = idx
branch["transformer"] = False
branch["br_r"] = row[BR_R].real
branch["br_x"] = row[BR_X].real
branch["g_fr"] = - row[BR_B].imag / 2.0
branch["g_to"] = - row[BR_B].imag / 2.0
branch["b_fr"] = row[BR_B].real / 2.0
branch["b_to"] = row[BR_B].real / 2.0
branch["rate_a"] = row[RATE_A].real if row[RATE_A] > 0 else row[RATE_B].real
branch["rate_b"] = row[RATE_B].real
branch["rate_c"] = row[RATE_C].real
branch["f_bus"] = int(row[F_BUS].real) + 1
branch["t_bus"] = int(row[T_BUS].real) + 1
branch["br_status"] = int(row[BR_STATUS].real)
branch["angmin"], branch["angmax"] = get_branch_angles(row, correct_pm_network_data)
branch["tap"] = row[TAP].real
branch["shift"] = math.radians(row[SHIFT].real)
branch["construction_cost"] = row[CONSTRUCTION_COST].real
pm["ne_branch"][str(idx)] = branch
if len(ppci["gencost"]) > len(ppci["gen"]):
logger.warning("PowerModels.jl does not consider reactive power cost - costs are ignored")
ppci["gencost"] = ppci["gencost"][:ppci["gen"].shape[0], :]
for idx, row in enumerate(ppci["gencost"], start=1):
gen = pm["gen"][str(idx)]
gen["model"] = int(row[MODEL])
if gen["model"] == 1:
gen["ncost"] = int(row[NCOST])
gen["cost"] = row[COST:COST + gen["ncost"] * 2].tolist()
elif gen["model"] == 2:
gen["ncost"] = 3
gen["cost"] = [0] * 3
costs = row[COST:]
if len(costs) > 3:
logger.info(costs)
raise ValueError("Maximum quadratic cost function allowed")
gen["cost"][-len(costs):] = costs
return pm
def add_pm_options(pm, net):
# read values from net_options if present else use default values
pm["pm_solver"] = net._options["pm_solver"] if "pm_solver" in net._options else "ipopt"
pm["pm_mip_solver"] = net._options["pm_mip_solver"] if "pm_mip_solver" in net._options else "cbc"
pm["pm_nl_solver"] = net._options["pm_nl_solver"] if "pm_nl_solver" in net._options else "ipopt"
pm["pm_model"] = net._options["pm_model"] if "pm_model" in net._options else "DCPPowerModel"
pm["pm_log_level"] = net._options["pm_log_level"] if "pm_log_level" in net._options else 0
if "pm_time_limits" in net._options and isinstance(net._options["pm_time_limits"], dict):
# write time limits to power models data structure
for key, val in net._options["pm_time_limits"].items():
pm[key] = val
else:
pm["pm_time_limit"], pm["pm_nl_time_limit"], pm["pm_mip_time_limit"] = np.inf, np.inf, np.inf
pm["correct_pm_network_data"] = net._options["correct_pm_network_data"]
return pm
def build_ne_branch(net, ppc):
# this is only used by pm tnep
if "ne_line" in net:
length = len(net["ne_line"])
ppc["ne_branch"] = np.zeros(shape=(length, branch_cols + 1), dtype=np.complex128)
ppc["ne_branch"][:, :13] = np.array([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -60, 60])
# create branch array ne_branch like the common branch array in the ppc
net._pd2ppc_lookups["ne_branch"] = dict()
net._pd2ppc_lookups["ne_branch"]["ne_line"] = (0, length)
_calc_line_parameter(net, ppc, "ne_line", "ne_branch")
ppc["ne_branch"][:, CONSTRUCTION_COST] = net["ne_line"].loc[:, "construction_cost"].values
return ppc
def init_ne_line(net, new_line_index, construction_costs=None):
"""
init function for new line dataframe, which specifies the possible new lines being built by power models tnep opt
Parameters
----------
net - pp net
new_line_index (list) - indices of new lines. These are copied to the new dataframe net["ne_line"] from net["line"]
construction_costs (list, 0.) - costs of newly constructed lines
Returns
-------
"""
# init dataframe
net["ne_line"] = net["line"].loc[new_line_index, :]
# add costs, if None -> init with zeros
construction_costs = np.zeros(len(new_line_index)) if construction_costs is None else construction_costs
net["ne_line"].loc[new_line_index, "construction_cost"] = construction_costs
# set in service, but only in ne line dataframe
net["ne_line"].loc[new_line_index, "in_service"] = True
# init res_ne_line to save built status afterwards
net["res_ne_line"] = pd.DataFrame(data=0, index=new_line_index, columns=["built"], dtype=int)
| [
"logging.getLogger",
"pandapower.pd2ppc._pd2ppc",
"numpy.allclose",
"json.dump",
"pandapower.results.init_results",
"math.radians",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"pandapower.build_branch._calc_line_parameter",
"tempfile.gettempdir",
"pandas.item",
"tempfile._get_candidate_n... | [((3286, 3313), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3303, 3313), False, 'import logging\n'), ((2754, 3025), 'pandapower.auxiliary._add_opf_options', '_add_opf_options', (['net'], {'trafo_loading': '"""power"""', 'ac': 'ac', 'init': '"""flat"""', 'numba': '(True)', 'pp_to_pm_callback': 'pp_to_pm_callback', 'pm_solver': 'pm_solver', 'pm_model': 'pm_model', 'correct_pm_network_data': 'correct_pm_network_data', 'pm_mip_solver': 'pm_mip_solver', 'pm_nl_solver': 'pm_nl_solver'}), "(net, trafo_loading='power', ac=ac, init='flat', numba=True,\n pp_to_pm_callback=pp_to_pm_callback, pm_solver=pm_solver, pm_model=\n pm_model, correct_pm_network_data=correct_pm_network_data,\n pm_mip_solver=pm_mip_solver, pm_nl_solver=pm_nl_solver)\n", (2770, 3025), False, 'from pandapower.auxiliary import _add_ppc_options, _add_opf_options, _add_auxiliary_elements\n'), ((3416, 3444), 'pandapower.auxiliary._add_auxiliary_elements', '_add_auxiliary_elements', (['net'], {}), '(net)\n', (3439, 3444), False, 'from pandapower.auxiliary import _add_ppc_options, _add_opf_options, _add_auxiliary_elements\n'), ((3449, 3466), 'pandapower.results.init_results', 'init_results', (['net'], {}), '(net)\n', (3461, 3466), False, 'from pandapower.results import init_results\n'), ((3483, 3495), 'pandapower.pd2ppc._pd2ppc', '_pd2ppc', (['net'], {}), '(net)\n', (3490, 3495), False, 'from pandapower.pd2ppc import _pd2ppc\n'), ((6171, 6191), 'math.radians', 'math.radians', (['angmin'], {}), '(angmin)\n', (6183, 6191), False, 'import math\n'), ((6205, 6225), 'math.radians', 'math.radians', (['angmax'], {}), '(angmax)\n', (6217, 6225), False, 'import math\n'), ((15771, 15843), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '(0)', 'index': 'new_line_index', 'columns': "['built']", 'dtype': 'int'}), "(data=0, index=new_line_index, columns=['built'], dtype=int)\n", (15783, 15843), True, 'import pandas as pd\n'), ((3212, 3231), 'os.path.isfile', 'isfile', (['buffer_file'], {}), '(buffer_file)\n', (3218, 3231), False, 'from os.path import isfile\n'), ((3241, 3260), 'os.remove', 'remove', (['buffer_file'], {}), '(buffer_file)\n', (3247, 3260), False, 'from os import remove\n'), ((4135, 4157), 'json.dump', 'json.dump', (['pm', 'outfile'], {}), '(pm, outfile)\n', (4144, 4157), False, 'import json\n'), ((4406, 4429), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4414, 4429), True, 'import numpy as np\n'), ((10596, 10625), 'math.radians', 'math.radians', (['row[SHIFT].real'], {}), '(row[SHIFT].real)\n', (10608, 10625), False, 'import math\n'), ((14275, 14337), 'numpy.zeros', 'np.zeros', ([], {'shape': '(length, branch_cols + 1)', 'dtype': 'np.complex128'}), '(shape=(length, branch_cols + 1), dtype=np.complex128)\n', (14283, 14337), True, 'import numpy as np\n'), ((14373, 14431), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -60, 60]'], {}), '([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -60, 60])\n', (14381, 14431), True, 'import numpy as np\n'), ((14636, 14690), 'pandapower.build_branch._calc_line_parameter', '_calc_line_parameter', (['net', 'ppc', '"""ne_line"""', '"""ne_branch"""'], {}), "(net, ppc, 'ne_line', 'ne_branch')\n", (14656, 14690), False, 'from pandapower.build_branch import _calc_line_parameter\n'), ((3881, 3912), 'tempfile._get_candidate_names', 'tempfile._get_candidate_names', ([], {}), '()\n', (3910, 3912), False, 'import tempfile\n'), ((3949, 3970), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (3968, 3970), False, 'import tempfile\n'), ((5081, 5090), 'pandas.item', 'pd.item', ([], {}), '()\n', (5088, 5090), True, 'import pandas as pd\n'), ((12121, 12150), 'math.radians', 'math.radians', (['row[SHIFT].real'], {}), '(row[SHIFT].real)\n', (12133, 12150), False, 'import math\n'), ((8858, 8878), 'numpy.allclose', 'np.allclose', (['pd', '(0.0)'], {}), '(pd, 0.0)\n', (8869, 8878), True, 'import numpy as np\n'), ((8885, 8905), 'numpy.allclose', 'np.allclose', (['qd', '(0.0)'], {}), '(qd, 0.0)\n', (8896, 8905), True, 'import numpy as np\n'), ((9369, 9389), 'numpy.allclose', 'np.allclose', (['bs', '(0.0)'], {}), '(bs, 0.0)\n', (9380, 9389), True, 'import numpy as np\n'), ((9396, 9416), 'numpy.allclose', 'np.allclose', (['gs', '(0.0)'], {}), '(gs, 0.0)\n', (9407, 9416), True, 'import numpy as np\n')] |
from tfcgp.config import Config
from tfcgp.chromosome import Chromosome
from tfcgp.classifier import Classifier
from tfcgp.problem import Problem
import numpy as np
import tensorflow as tf
from sklearn import datasets
c = Config()
c.update("cfg/test.yaml")
data = datasets.load_iris()
p = Problem(data.data, data.target)
ch = Chromosome(p.nin, p.nout)
ch.random(c)
orig_genes = np.copy(ch.genes)
clf = Classifier(ch, p.x_train, p.x_test, p.y_train, p.y_test,
batch_size=p.batch_size, epochs=p.epochs, seed=p.seed,
lamarckian=p.lamarckian)
fit = 0.0
def test_eval():
acc = clf.evaluate()
print("Accuracy: ", acc)
params = clf.get_params()
print("1 Params: ", params)
assert acc >= 0.0
assert acc <= 1.0
fit = acc
assert True
def test_train():
params = clf.get_params()
print("2 Params: ", params)
history = clf.train()
params = clf.get_params()
print("3 Params: ", params)
# assert history[0][0] >= history[-1][0] # loss
# assert history[0][1] <= history[-1][0] # accuracy
assert np.all(orig_genes == ch.genes) # not lamarckian
assert True
def test_improvement():
print("Test improvement")
# clf.delete()
# clf2 = Classifier(ch, p.x_train, p.x_test, p.y_train, p.y_test,
# batch_size=p.batch_size, epochs=p.epochs, seed=p.seed)
params = clf.get_params()
print("4 Params: ", params)
acc1 = clf.evaluate()
params = clf.get_params()
print("5 Params: ", params)
history = clf.train()
params = clf.get_params()
print("6 Params: ", params)
acc2 = clf.evaluate()
print("Trained accuracy: ", acc1, acc2)
# assert acc2 >= acc1
assert True
def test_lamarckian():
clf.lamarckian = True
params = clf.get_params()
print("7 Params: ", params)
print("Node pids", ch.param_id)
print(len(ch.genes))
print("Before: ", ch.genes)
history = clf.train()
print("After: ", ch.genes)
params = clf.get_params()
acc = clf.evaluate()
print("8 Params: ", params)
print("Changed genes: ", ch.genes[orig_genes != ch.genes])
assert np.any(orig_genes != ch.genes)
ch2 = Chromosome(p.nin, p.nout)
ch2.from_genes(ch.genes, c)
clf2 = Classifier(ch2, p.x_train, p.x_test, p.y_train, p.y_test,
batch_size=p.batch_size, epochs=p.epochs, seed=p.seed,
lamarckian=p.lamarckian)
acc2 = clf.evaluate()
params2 = clf.get_params()
print("Accuracies: ", acc, acc2)
print("Params: ", params, params2)
print("Node pids: ", ch.param_id, ch2.param_id)
print("Genes: ", ch.genes, ch2.genes)
# assert acc == acc2
assert np.all(params == params2)
assert ch.param_id == ch2.param_id
for nid in range(len(ch.nodes)):
assert ch.nodes[nid].param == ch2.nodes[nid].param
| [
"sklearn.datasets.load_iris",
"numpy.copy",
"tfcgp.chromosome.Chromosome",
"tfcgp.config.Config",
"numpy.any",
"tfcgp.classifier.Classifier",
"tfcgp.problem.Problem",
"numpy.all"
] | [((223, 231), 'tfcgp.config.Config', 'Config', ([], {}), '()\n', (229, 231), False, 'from tfcgp.config import Config\n'), ((266, 286), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (284, 286), False, 'from sklearn import datasets\n'), ((292, 323), 'tfcgp.problem.Problem', 'Problem', (['data.data', 'data.target'], {}), '(data.data, data.target)\n', (299, 323), False, 'from tfcgp.problem import Problem\n'), ((329, 354), 'tfcgp.chromosome.Chromosome', 'Chromosome', (['p.nin', 'p.nout'], {}), '(p.nin, p.nout)\n', (339, 354), False, 'from tfcgp.chromosome import Chromosome\n'), ((381, 398), 'numpy.copy', 'np.copy', (['ch.genes'], {}), '(ch.genes)\n', (388, 398), True, 'import numpy as np\n'), ((405, 546), 'tfcgp.classifier.Classifier', 'Classifier', (['ch', 'p.x_train', 'p.x_test', 'p.y_train', 'p.y_test'], {'batch_size': 'p.batch_size', 'epochs': 'p.epochs', 'seed': 'p.seed', 'lamarckian': 'p.lamarckian'}), '(ch, p.x_train, p.x_test, p.y_train, p.y_test, batch_size=p.\n batch_size, epochs=p.epochs, seed=p.seed, lamarckian=p.lamarckian)\n', (415, 546), False, 'from tfcgp.classifier import Classifier\n'), ((1082, 1112), 'numpy.all', 'np.all', (['(orig_genes == ch.genes)'], {}), '(orig_genes == ch.genes)\n', (1088, 1112), True, 'import numpy as np\n'), ((2137, 2167), 'numpy.any', 'np.any', (['(orig_genes != ch.genes)'], {}), '(orig_genes != ch.genes)\n', (2143, 2167), True, 'import numpy as np\n'), ((2178, 2203), 'tfcgp.chromosome.Chromosome', 'Chromosome', (['p.nin', 'p.nout'], {}), '(p.nin, p.nout)\n', (2188, 2203), False, 'from tfcgp.chromosome import Chromosome\n'), ((2247, 2389), 'tfcgp.classifier.Classifier', 'Classifier', (['ch2', 'p.x_train', 'p.x_test', 'p.y_train', 'p.y_test'], {'batch_size': 'p.batch_size', 'epochs': 'p.epochs', 'seed': 'p.seed', 'lamarckian': 'p.lamarckian'}), '(ch2, p.x_train, p.x_test, p.y_train, p.y_test, batch_size=p.\n batch_size, epochs=p.epochs, seed=p.seed, lamarckian=p.lamarckian)\n', (2257, 2389), False, 'from tfcgp.classifier import Classifier\n'), ((2688, 2713), 'numpy.all', 'np.all', (['(params == params2)'], {}), '(params == params2)\n', (2694, 2713), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy,genutil
import unittest
class GENUTIL(unittest.TestCase):
def assertArraysEqual(self,A,B):
self.assertTrue(numpy.all(numpy.equal(A,B)))
def testStatisticsNumpy(self):
a=numpy.ones((15,25),'d')
rk = [0.0, 91.66666666666667, 87.5, 83.33333333333333, 79.16666666666667, 75.0, 70.83333333333333, 66.66666666666667, 62.5, 58.333333333333336, 54.166666666666664, 95.83333333333333, 50.0, 41.666666666666664, 37.5, 33.333333333333336, 29.166666666666668, 25.0, 20.833333333333332, 16.666666666666668, 12.5, 8.333333333333334, 4.166666666666667, 45.833333333333336, 100.0]
# rk will be copied over and over
self.assertArraysEqual(genutil.statistics.rank(a,axis=1),rk)
self.assertTrue(numpy.allclose(genutil.statistics.variance(a,axis=0),0.))
| [
"numpy.equal",
"numpy.ones",
"genutil.statistics.rank",
"genutil.statistics.variance"
] | [((230, 255), 'numpy.ones', 'numpy.ones', (['(15, 25)', '"""d"""'], {}), "((15, 25), 'd')\n", (240, 255), False, 'import numpy, genutil\n'), ((707, 741), 'genutil.statistics.rank', 'genutil.statistics.rank', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (730, 741), False, 'import numpy, genutil\n'), ((166, 183), 'numpy.equal', 'numpy.equal', (['A', 'B'], {}), '(A, B)\n', (177, 183), False, 'import numpy, genutil\n'), ((784, 822), 'genutil.statistics.variance', 'genutil.statistics.variance', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (811, 822), False, 'import numpy, genutil\n')] |
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os import path, listdir
import os
import pickle as pkl
import argparse
import re
import numpy as np
import xgboost as xgb
from scipy.special import expit
from utils import *
np.random.seed(998)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
model_dir = '../level3-model-final/' + str(args.yix)
X_train_pred = np.vstack((
np.load(path.join(model_dir, 'outFold.npy')),
np.load(path.join(model_dir, 'outFold_rf.npy')),
np.load(path.join(model_dir, 'outFold_ext.npy'))
)).T
X_test_pred = np.vstack((
np.load(path.join(model_dir, 'pred.npy')),
np.load(path.join(model_dir, 'pred_rf.npy')),
np.load(path.join(model_dir, 'pred_ext.npy'))
)).T
X_train_all = np.hstack((X_train, X_train_pred))
X_test_all = np.hstack((X_test, X_test_pred))
print(X_train_all.shape)
print(X_train_all.shape)
save_dir = path.join("../level4-feature/" + str(args.yix))
if not path.exists(save_dir):
os.makedirs(save_dir)
np.save(path.join(save_dir, "X_train.npy"), X_train_all)
np.save(path.join(save_dir, "X_test.npy"), X_test_all)
| [
"os.path.exists",
"argparse.ArgumentParser",
"numpy.hstack",
"os.makedirs",
"os.path.join",
"numpy.random.seed"
] | [((332, 351), 'numpy.random.seed', 'np.random.seed', (['(998)'], {}), '(998)\n', (346, 351), True, 'import numpy as np\n'), ((983, 1016), 'numpy.hstack', 'np.hstack', (['(X_train, X_train_ext)'], {}), '((X_train, X_train_ext))\n', (992, 1016), True, 'import numpy as np\n'), ((1026, 1057), 'numpy.hstack', 'np.hstack', (['(X_test, X_test_ext)'], {}), '((X_test, X_test_ext))\n', (1035, 1057), True, 'import numpy as np\n'), ((1562, 1596), 'numpy.hstack', 'np.hstack', (['(X_train, X_train_pred)'], {}), '((X_train, X_train_pred))\n', (1571, 1596), True, 'import numpy as np\n'), ((1610, 1642), 'numpy.hstack', 'np.hstack', (['(X_test, X_test_pred)'], {}), '((X_test, X_test_pred))\n', (1619, 1642), True, 'import numpy as np\n'), ((385, 410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (408, 410), False, 'import argparse\n'), ((584, 618), 'os.path.join', 'path.join', (['data_dir', '"""X_train.npy"""'], {}), "(data_dir, 'X_train.npy')\n", (593, 618), False, 'from os import path, listdir\n'), ((637, 670), 'os.path.join', 'path.join', (['data_dir', '"""X_test.npy"""'], {}), "(data_dir, 'X_test.npy')\n", (646, 670), False, 'from os import path, listdir\n'), ((690, 724), 'os.path.join', 'path.join', (['data_dir', '"""y_train.npy"""'], {}), "(data_dir, 'y_train.npy')\n", (699, 724), False, 'from os import path, listdir\n'), ((1761, 1782), 'os.path.exists', 'path.exists', (['save_dir'], {}), '(save_dir)\n', (1772, 1782), False, 'from os import path, listdir\n'), ((1788, 1809), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1799, 1809), False, 'import os\n'), ((1819, 1853), 'os.path.join', 'path.join', (['save_dir', '"""X_train.npy"""'], {}), "(save_dir, 'X_train.npy')\n", (1828, 1853), False, 'from os import path, listdir\n'), ((1876, 1909), 'os.path.join', 'path.join', (['save_dir', '"""X_test.npy"""'], {}), "(save_dir, 'X_test.npy')\n", (1885, 1909), False, 'from os import path, listdir\n'), ((1220, 1255), 'os.path.join', 'path.join', (['model_dir', '"""outFold.npy"""'], {}), "(model_dir, 'outFold.npy')\n", (1229, 1255), False, 'from os import path, listdir\n'), ((1270, 1308), 'os.path.join', 'path.join', (['model_dir', '"""outFold_rf.npy"""'], {}), "(model_dir, 'outFold_rf.npy')\n", (1279, 1308), False, 'from os import path, listdir\n'), ((1323, 1362), 'os.path.join', 'path.join', (['model_dir', '"""outFold_ext.npy"""'], {}), "(model_dir, 'outFold_ext.npy')\n", (1332, 1362), False, 'from os import path, listdir\n'), ((1407, 1439), 'os.path.join', 'path.join', (['model_dir', '"""pred.npy"""'], {}), "(model_dir, 'pred.npy')\n", (1416, 1439), False, 'from os import path, listdir\n'), ((1454, 1489), 'os.path.join', 'path.join', (['model_dir', '"""pred_rf.npy"""'], {}), "(model_dir, 'pred_rf.npy')\n", (1463, 1489), False, 'from os import path, listdir\n'), ((1504, 1540), 'os.path.join', 'path.join', (['model_dir', '"""pred_ext.npy"""'], {}), "(model_dir, 'pred_ext.npy')\n", (1513, 1540), False, 'from os import path, listdir\n')] |
import numpy as np
import os.path
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
if '.DS_Store' in i:
continue
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg' or ext == '.png':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata) | [
"numpy.array"
] | [((828, 846), 'numpy.array', 'np.array', (['metadata'], {}), '(metadata)\n', (836, 846), True, 'import numpy as np\n')] |
"""performs procrustes analysis on the two embeddings given, calculates distance between them, returns values as a pandas dataframe. Can also return a procrustes analysis figure for you (if clade membership is given, it will be colored by that"""
import argparse
from augur.utils import read_node_data
from augur.utils import write_json
import numpy as np
import pandas as pd
from scipy.spatial import procrustes
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--method", help="name of embedding type")
parser.add_argument("--embeddings", nargs=2, help="embeddings to perform procrustes analysis on")
parser.add_argument("--columns", nargs=2, help="names of columns in both embeddings to use in the procrustes analysis")
parser.add_argument("--metadata", help="the node data for clades membership for the figure")
parser.add_argument("--colors", nargs="+", help="a list of the colors to use in the analysis")
parser.add_argument("--domain", nargs="+", help="the clade names")
parser.add_argument("--output-distance-metric", help="JSON file of the distance for exporting to the tree")
parser.add_argument("--output-figure", help="PNG figure of the procrustes analysis with lines connecting the points. If clade membership is given in the metadata, it will be used")
parser.add_argument("--output-boxplot", help="PNG figure of the distances for boxplot split by the first embeddings clade membership")
parser.add_argument("--output-metadata", help="extra information about the data given")
args = parser.parse_args()
if args.output_distance_metric is None and args.output_boxplot is not None:
print("You must create the distance metric to create the boxplot", file=sys.stderr)
sys.exit(1)
if args.metadata is None and args.output_boxplot is not None:
print("You must have metadata to create the boxplot", file=sys.stderr)
sys.exit(1)
embedding_1_df = pd.read_csv(args.embeddings[0])
embedding_2_df = pd.read_csv(args.embeddings[1])
if args.metadata is not None:
node_data = read_node_data(args.metadata)
metadata_df = clade_annotations = pd.DataFrame([
{"strain": strain, "clade_membership": annotations["clade_membership"]}
for strain, annotations in node_data["nodes"].items()])
embedding_1_df = metadata_df.merge(embedding_1_df, on="strain")
embedding_2_df = metadata_df.merge(embedding_2_df, on="strain")
#procrustes analysis on the embeddings
a = np.array([list(a) for a in zip(embedding_1_df[args.columns[0]].values.tolist(), embedding_1_df[args.columns[1]].values.tolist())])
b = np.array([list(a) for a in zip(embedding_2_df[args.columns[0]].values.tolist(), embedding_2_df[args.columns[1]].values.tolist())])
mtx1, mtx2, disparity = procrustes(a, b)
df1 = pd.DataFrame(mtx1.tolist(), columns =["1_scaled_x", "1_scaled_y"])
df2 = pd.DataFrame(mtx2.tolist(), columns =["2_scaled_x", "2_scaled_y"])
merged_scaled_df = pd.merge(df1, df2, left_index=True, right_index=True)
merged_scaled_df["strain"] = embedding_1_df["strain"]
merged_scaled_df["clade_membership"] = embedding_1_df["clade_membership"]
if args.output_distance_metric is not None:
Ax = merged_scaled_df["1_scaled_x"].to_numpy()
Ay = merged_scaled_df["1_scaled_y"].to_numpy()
Ox = merged_scaled_df["2_scaled_x"].to_numpy()
Oy = merged_scaled_df["2_scaled_y"].to_numpy()
distance = np.sqrt(np.sum(((Ax-Ox)**2, (Ay-Oy)**2), axis=0))
merged_scaled_df["distance"] = distance
if args.output_metadata is not None:
merged_scaled_df.to_csv(args.output_metadata)
classifier_threshold = (np.mean(distance) + (1*np.std(distance)))
estimated_outlier_status = np.where(distance < classifier_threshold, -1, 1)
distance_df = pd.DataFrame()
distance_df["distance_" + str(args.method)] = estimated_outlier_status
#distance_df["distance_" + str(args.method)] = distance
distance_df.index = merged_scaled_df["strain"]
distance_dict = distance_df.transpose().to_dict()
write_json({"nodes": distance_dict}, args.output_distance_metric)
if args.output_boxplot is not None:
sns_plot = sns.catplot(x="clade_membership", y="distance", kind="box", data=merged_scaled_df, height=4, aspect = 2)
sns_plot.savefig(args.output_boxplot)
if args.output_figure is not None:
if args.metadata is not None:
from matplotlib.lines import Line2D
domain = args.domain
range_ = args.colors
print(range_)
legend_elements = [Line2D([0], [0], color=range_[i], lw=4, label=domain[i]) for i in range(0, len(domain))]
print(legend_elements)
df = merged_scaled_df.copy()
df.replace(dict(zip(domain,range_)), inplace=True)
val = df["clade_membership"].to_numpy()
print(val)
line_segments = []
for i in range(0, len(mtx1)):
line_segments.append([(mtx1[i,0], mtx1[i,1]), (mtx2[i,0], mtx2[i,1])])
x = merged_scaled_df["1_scaled_x"].values
y = merged_scaled_df["1_scaled_y"].values
pos_x = merged_scaled_df["2_scaled_x"].values
pos_y = merged_scaled_df["2_scaled_y"].values
fig, ax = plt.subplots(figsize=(8,8))
if args.metadata is not None:
ax.scatter(pos_x, pos_y, c=val)
ax.legend(handles=legend_elements, loc=1)
else:
ax.scatter(pos_x, pos_y)
ax.scatter(x, y, c="#696969")
if args.metadata is not None:
line_segments = LineCollection(line_segments, linewidths=(0.5, 1, 1.5, 2), color=val, linestyle='solid', alpha=0.5)
else:
line_segments = LineCollection(line_segments, linewidths=(0.5, 1, 1.5, 2), linestyle='solid', alpha=0.5)
ax.add_collection(line_segments)
ax.set_xlim(min(pos_x) - .015 , max(pos_x) + .015)
ax.set_ylim(min(pos_y) - .015 , max(pos_y) + .015)
plt.savefig(args.output_figure, dpi=300) | [
"augur.utils.write_json",
"numpy.mean",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.where",
"pandas.merge",
"augur.utils.read_node_data",
"seaborn.catplot",
"matplotlib.collections.LineCollection",
"numpy.sum",
"matplotlib.pyplot.subplots",
"numpy.std",
... | [((618, 643), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (641, 643), False, 'import argparse\n'), ((2148, 2179), 'pandas.read_csv', 'pd.read_csv', (['args.embeddings[0]'], {}), '(args.embeddings[0])\n', (2159, 2179), True, 'import pandas as pd\n'), ((2201, 2232), 'pandas.read_csv', 'pd.read_csv', (['args.embeddings[1]'], {}), '(args.embeddings[1])\n', (2212, 2232), True, 'import pandas as pd\n'), ((3021, 3037), 'scipy.spatial.procrustes', 'procrustes', (['a', 'b'], {}), '(a, b)\n', (3031, 3037), False, 'from scipy.spatial import procrustes\n'), ((3220, 3273), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'left_index': '(True)', 'right_index': '(True)'}), '(df1, df2, left_index=True, right_index=True)\n', (3228, 3273), True, 'import pandas as pd\n'), ((2288, 2317), 'augur.utils.read_node_data', 'read_node_data', (['args.metadata'], {}), '(args.metadata)\n', (2302, 2317), False, 'from augur.utils import read_node_data\n'), ((4011, 4059), 'numpy.where', 'np.where', (['(distance < classifier_threshold)', '(-1)', '(1)'], {}), '(distance < classifier_threshold, -1, 1)\n', (4019, 4059), True, 'import numpy as np\n'), ((4083, 4097), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4095, 4097), True, 'import pandas as pd\n'), ((4363, 4428), 'augur.utils.write_json', 'write_json', (["{'nodes': distance_dict}", 'args.output_distance_metric'], {}), "({'nodes': distance_dict}, args.output_distance_metric)\n", (4373, 4428), False, 'from augur.utils import write_json\n'), ((5591, 5619), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5603, 5619), True, 'import matplotlib.pyplot as plt\n'), ((6319, 6359), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output_figure'], {'dpi': '(300)'}), '(args.output_figure, dpi=300)\n', (6330, 6359), True, 'import matplotlib.pyplot as plt\n'), ((3707, 3755), 'numpy.sum', 'np.sum', (['((Ax - Ox) ** 2, (Ay - Oy) ** 2)'], {'axis': '(0)'}), '(((Ax - Ox) ** 2, (Ay - Oy) ** 2), axis=0)\n', (3713, 3755), True, 'import numpy as np\n'), ((3934, 3951), 'numpy.mean', 'np.mean', (['distance'], {}), '(distance)\n', (3941, 3951), True, 'import numpy as np\n'), ((4498, 4605), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""clade_membership"""', 'y': '"""distance"""', 'kind': '"""box"""', 'data': 'merged_scaled_df', 'height': '(4)', 'aspect': '(2)'}), "(x='clade_membership', y='distance', kind='box', data=\n merged_scaled_df, height=4, aspect=2)\n", (4509, 4605), True, 'import seaborn as sns\n'), ((5911, 6014), 'matplotlib.collections.LineCollection', 'LineCollection', (['line_segments'], {'linewidths': '(0.5, 1, 1.5, 2)', 'color': 'val', 'linestyle': '"""solid"""', 'alpha': '(0.5)'}), "(line_segments, linewidths=(0.5, 1, 1.5, 2), color=val,\n linestyle='solid', alpha=0.5)\n", (5925, 6014), False, 'from matplotlib.collections import LineCollection\n'), ((6053, 6146), 'matplotlib.collections.LineCollection', 'LineCollection', (['line_segments'], {'linewidths': '(0.5, 1, 1.5, 2)', 'linestyle': '"""solid"""', 'alpha': '(0.5)'}), "(line_segments, linewidths=(0.5, 1, 1.5, 2), linestyle=\n 'solid', alpha=0.5)\n", (6067, 6146), False, 'from matplotlib.collections import LineCollection\n'), ((3957, 3973), 'numpy.std', 'np.std', (['distance'], {}), '(distance)\n', (3963, 3973), True, 'import numpy as np\n'), ((4902, 4958), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': 'range_[i]', 'lw': '(4)', 'label': 'domain[i]'}), '([0], [0], color=range_[i], lw=4, label=domain[i])\n', (4908, 4958), False, 'from matplotlib.lines import Line2D\n')] |
import numpy as np
import tensorflow as tf
from basic_nn import Linear
def mse(y_pred, y_true):
return tf.reduce_mean((y_pred - y_true)**2)
if __name__ == "__main__":
f = np.asarray([[1, 1],[2, 1], [3, 1], [4, 1], [5, 1]], dtype=float)
t = np.asarray([[1, 2], [2, 4], [3, 6], [4, 8], [5, 10]], dtype=float)
x = tf.placeholder(tf.float32, [None, 2])
y = tf.placeholder(tf.float32, [None, 2])
l1 = Linear(2, 2, "LinearRegression")
cost = mse(l1(x), y)
init = tf.global_variables_initializer()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
with tf.Session() as sess:
sess.run(init)
feed_dict = {
x: f,
y: t
}
for i in range(100):
sess.run(optimizer, feed_dict=feed_dict)
print(sess.run(cost, feed_dict=feed_dict)) | [
"basic_nn.Linear",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.reduce_mean"
] | [((108, 146), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((y_pred - y_true) ** 2)'], {}), '((y_pred - y_true) ** 2)\n', (122, 146), True, 'import tensorflow as tf\n'), ((181, 246), 'numpy.asarray', 'np.asarray', (['[[1, 1], [2, 1], [3, 1], [4, 1], [5, 1]]'], {'dtype': 'float'}), '([[1, 1], [2, 1], [3, 1], [4, 1], [5, 1]], dtype=float)\n', (191, 246), True, 'import numpy as np\n'), ((254, 320), 'numpy.asarray', 'np.asarray', (['[[1, 2], [2, 4], [3, 6], [4, 8], [5, 10]]'], {'dtype': 'float'}), '([[1, 2], [2, 4], [3, 6], [4, 8], [5, 10]], dtype=float)\n', (264, 320), True, 'import numpy as np\n'), ((329, 366), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (343, 366), True, 'import tensorflow as tf\n'), ((375, 412), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (389, 412), True, 'import tensorflow as tf\n'), ((422, 454), 'basic_nn.Linear', 'Linear', (['(2)', '(2)', '"""LinearRegression"""'], {}), "(2, 2, 'LinearRegression')\n", (428, 454), False, 'from basic_nn import Linear\n'), ((491, 524), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (522, 524), True, 'import tensorflow as tf\n'), ((624, 636), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (634, 636), True, 'import tensorflow as tf\n'), ((541, 594), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (574, 594), True, 'import tensorflow as tf\n')] |
import numpy as np
import numba as nb
import scrtbp.exceptions as exceptions
from scrtbp.taylor import expansion
from scrtbp.taylor import steppers
from scrtbp.util import root
def generate_event_observer(StepperClass, FuncAdapter, one_way_mode=True):
if one_way_mode:
# only - to + roots are detected
def py_root_condition(fa, fb):
return fa < 0.0 and 0.0 < fb
else:
# all sign crossings are detected as roots
def py_root_condition(fa, fb):
return fa * fb < 0.0
root_condition = nb.njit(py_root_condition)
event_observer_spec = dict(
stepper=StepperClass.class_type.instance_type,
func=FuncAdapter.class_type.instance_type,
t=nb.float64,
f=nb.float64,
next_t=nb.float64,
next_f=nb.float64,
)
@nb.jitclass(event_observer_spec)
class EventObserver:
def __init__(self, stepper):
self.stepper = stepper
self.func = FuncAdapter(stepper.expansion.series)
self.update()
def update(self):
self.t = self.stepper.t
self.f = self.func.eval_from_state(self.stepper.expansion.state)
self.next_t = self.stepper.next_t
self.next_f = self.func.eval(self.stepper.step)
def cached_update(self):
if self.next_t == self.stepper.t:
self.t = self.next_t
self.f = self.next_f
self.next_t = self.stepper.next_t
self.next_f = self.func.eval(self.stepper.step)
else:
self.update()
def event_detected(self):
if self.t != self.stepper.t:
self.cached_update()
return self.f == 0.0 or root_condition(self.f, self.next_f)
def get_brackets(self):
return root.Brackets(0.0, self.f, self.stepper.step, self.next_f)
def resolve_event(self):
if self.f == 0.0:
return 0.0
else:
brackets = self.get_brackets()
return root.solve(self.func, brackets)
def extract_event(self, output):
if self.f == 0.0:
output = self.stepper.expansion.state
return self.t
else:
root_step = self.resolve_event()
self.stepper.expansion.eval(root_step, output)
return self.t + root_step
return EventObserver
def generate_event_solver(
taylor_coeff_func,
state_dim,
extra_dim,
event_func,
step=0.01,
order=20,
max_event_steps=1000000,
max_steps=1000000000,
one_way_mode=True,
):
TaylorExpansion = expansion.generate_taylor_expansion(
taylor_coeff_func, state_dim, extra_dim
)
FuncAdapter = expansion.generate_func_adapter(event_func)
Stepper = steppers.generate_fixed_stepper(TaylorExpansion)
StepLimiterProxy = steppers.generate_step_limter_proxy(Stepper)
EventObserver = generate_event_observer(Stepper, FuncAdapter, one_way_mode)
@nb.njit
def solve_points(input_state, n_points, t0=0.0):
points = np.empty((n_points, state_dim))
times = np.empty(n_points)
stepper = Stepper(input_state, t0, step, order)
observer = EventObserver(stepper)
limiter = StepLimiterProxy(stepper, max_event_steps, max_steps)
i = 0
while limiter.valid():
if observer.event_detected():
times[i] = observer.extract_event(points[i])
i += 1
if i < n_points:
limiter.reset_constraint()
else:
break
limiter.advance()
else:
raise exceptions.MaxStepsExceeded
return points, times
return solve_points
def generate_adaptive_event_solver(
taylor_coeff_func,
state_dim,
extra_dim,
event_func,
order=20,
tol_abs=1e-16,
tol_rel=1e-10,
max_event_steps=1000000,
max_steps=1000000000,
one_way_mode=True,
):
TaylorExpansion = expansion.generate_taylor_expansion(
taylor_coeff_func, state_dim, extra_dim
)
FuncAdapter = expansion.generate_func_adapter(event_func)
Stepper = steppers.generate_adaptive_stepper(TaylorExpansion)
StepLimiterProxy = steppers.generate_step_limter_proxy(Stepper)
EventObserver = generate_event_observer(Stepper, FuncAdapter, one_way_mode)
@nb.njit
def solve_points(input_state, n_points, t0=0.0):
points = np.empty((n_points, state_dim))
times = np.empty(n_points)
stepper = Stepper(input_state, t0, order, tol_abs, tol_rel)
observer = EventObserver(stepper)
limiter = StepLimiterProxy(stepper, max_event_steps, max_steps)
i = 0
while limiter.valid():
if observer.event_detected():
times[i] = observer.extract_event(points[i])
i += 1
if i < n_points:
limiter.reset_constraint()
else:
break
limiter.advance()
else:
raise exceptions.MaxStepsExceeded
return points, times
return solve_points
| [
"scrtbp.util.root.Brackets",
"scrtbp.taylor.expansion.generate_taylor_expansion",
"scrtbp.taylor.steppers.generate_step_limter_proxy",
"scrtbp.taylor.steppers.generate_fixed_stepper",
"numba.njit",
"scrtbp.util.root.solve",
"numba.jitclass",
"numpy.empty",
"scrtbp.taylor.steppers.generate_adaptive_s... | [((553, 579), 'numba.njit', 'nb.njit', (['py_root_condition'], {}), '(py_root_condition)\n', (560, 579), True, 'import numba as nb\n'), ((829, 861), 'numba.jitclass', 'nb.jitclass', (['event_observer_spec'], {}), '(event_observer_spec)\n', (840, 861), True, 'import numba as nb\n'), ((2702, 2778), 'scrtbp.taylor.expansion.generate_taylor_expansion', 'expansion.generate_taylor_expansion', (['taylor_coeff_func', 'state_dim', 'extra_dim'], {}), '(taylor_coeff_func, state_dim, extra_dim)\n', (2737, 2778), False, 'from scrtbp.taylor import expansion\n'), ((2811, 2854), 'scrtbp.taylor.expansion.generate_func_adapter', 'expansion.generate_func_adapter', (['event_func'], {}), '(event_func)\n', (2842, 2854), False, 'from scrtbp.taylor import expansion\n'), ((2869, 2917), 'scrtbp.taylor.steppers.generate_fixed_stepper', 'steppers.generate_fixed_stepper', (['TaylorExpansion'], {}), '(TaylorExpansion)\n', (2900, 2917), False, 'from scrtbp.taylor import steppers\n'), ((2941, 2985), 'scrtbp.taylor.steppers.generate_step_limter_proxy', 'steppers.generate_step_limter_proxy', (['Stepper'], {}), '(Stepper)\n', (2976, 2985), False, 'from scrtbp.taylor import steppers\n'), ((4095, 4171), 'scrtbp.taylor.expansion.generate_taylor_expansion', 'expansion.generate_taylor_expansion', (['taylor_coeff_func', 'state_dim', 'extra_dim'], {}), '(taylor_coeff_func, state_dim, extra_dim)\n', (4130, 4171), False, 'from scrtbp.taylor import expansion\n'), ((4204, 4247), 'scrtbp.taylor.expansion.generate_func_adapter', 'expansion.generate_func_adapter', (['event_func'], {}), '(event_func)\n', (4235, 4247), False, 'from scrtbp.taylor import expansion\n'), ((4262, 4313), 'scrtbp.taylor.steppers.generate_adaptive_stepper', 'steppers.generate_adaptive_stepper', (['TaylorExpansion'], {}), '(TaylorExpansion)\n', (4296, 4313), False, 'from scrtbp.taylor import steppers\n'), ((4337, 4381), 'scrtbp.taylor.steppers.generate_step_limter_proxy', 'steppers.generate_step_limter_proxy', (['Stepper'], {}), '(Stepper)\n', (4372, 4381), False, 'from scrtbp.taylor import steppers\n'), ((3150, 3181), 'numpy.empty', 'np.empty', (['(n_points, state_dim)'], {}), '((n_points, state_dim))\n', (3158, 3181), True, 'import numpy as np\n'), ((3198, 3216), 'numpy.empty', 'np.empty', (['n_points'], {}), '(n_points)\n', (3206, 3216), True, 'import numpy as np\n'), ((4546, 4577), 'numpy.empty', 'np.empty', (['(n_points, state_dim)'], {}), '((n_points, state_dim))\n', (4554, 4577), True, 'import numpy as np\n'), ((4594, 4612), 'numpy.empty', 'np.empty', (['n_points'], {}), '(n_points)\n', (4602, 4612), True, 'import numpy as np\n'), ((1848, 1906), 'scrtbp.util.root.Brackets', 'root.Brackets', (['(0.0)', 'self.f', 'self.stepper.step', 'self.next_f'], {}), '(0.0, self.f, self.stepper.step, self.next_f)\n', (1861, 1906), False, 'from scrtbp.util import root\n'), ((2086, 2117), 'scrtbp.util.root.solve', 'root.solve', (['self.func', 'brackets'], {}), '(self.func, brackets)\n', (2096, 2117), False, 'from scrtbp.util import root\n')] |
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2017
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
sclstm.py - Semantically Conditioned LSTM Generator
===========================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: CUED Imports/Dependencies:
import :class:`semo.RNNLG.nn.basic` |.|
************************
'''
import operator
import numpy as np
import theano.tensor as T
from Queue import PriorityQueue
from basic import *
class sclstm(BaseRLG):
'''
Semantically Conditioned LSTM Generator
'''
def __init__(self, gentype, vocab, beamwidth, overgen,
vocab_size, hidden_size, batch_size, da_sizes):
# calling superclass constructor
BaseRLG.__init__(self, gentype, beamwidth, overgen,
vocab_size, hidden_size, batch_size, da_sizes)
self.dsv = self.dfs[2]-self.dfs[1]
self.da = self.dfs[1]-self.dfs[0]
self.vocab = vocab
# init params
self._init_params()
def _init_params(self):
# word embedding weight matrix
self.Wemb = theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.di,self.dh)).astype(theano.config.floatX))
# lstm gate weight matrix
self.Wgate = theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.dh*2+self.dsv,self.dh*3)).astype(theano.config.floatX))
# for reading gate
self.Wrgate = theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.dh*2+self.dsv,self.dsv)).\
astype(theano.config.floatX))
# for overriding the memory cell
self.Wcx = theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.dh*2,self.dh)).astype(theano.config.floatX))
# 1hot DA to distributed vector weight matrix
self.Wfc= theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.da+self.dsv,self.dh)).astype(theano.config.floatX))
# hidden to output weight matrix
self.Who= theano.shared(0.3 * np.random.uniform(-1.0,1.0,\
(self.dh,self.di)).astype(theano.config.floatX))
# initial memory cell and hidden layer
self.h0 = theano.shared(np.zeros((self.db,self.dh),
dtype=theano.config.floatX))
self.c0 = theano.shared(np.zeros((self.db,self.dh),
dtype=theano.config.floatX))
# all parameters
self.params = [
self.Wemb,
self.Wgate, self.Wrgate,
self.Wcx, self.Wfc,
self.Who ]
def setWordVec(self,word2vec):
'''
Set pretrained word embeddings
'''
self.Wemb_np = self.Wemb.get_value()
for w,v in word2vec.iteritems():
self.Wemb_np[w,:] = v
self.Wemb.set_value(self.Wemb_np)
def _form1hot(self, hot_x, idx_x, cutoff_x):
'''
Form 1-hot representation for DA
'''
update_x = T.set_subtensor(hot_x[idx_x[:cutoff_x]],1.0)
return update_x
def unroll(self,a,sv,words,cutoff_f,cutoff_b):
# form 1-hot representation
a_1hot = theano.shared(np.zeros((self.db,self.da),
dtype=theano.config.floatX))
sv_1hot= theano.shared(np.zeros((self.db,self.dsv),
dtype=theano.config.floatX))
a_1hot ,_= theano.scan(fn=self._form1hot,
sequences=[a_1hot,a,cutoff_f[0]])
sv_1hot,_= theano.scan(fn=self._form1hot,
sequences=[sv_1hot,sv,cutoff_f[1]])
# recurrence
[f,h,c,p],_ = theano.scan(fn=self._recur,
sequences=[words[:-1,:],words[1:,:]],
outputs_info=[sv_1hot,self.h0,self.c0,None],
non_sequences=[a_1hot])
# compute desired sent_logp by slicing
cutoff_logp = collectSentLogp(p,cutoff_f[4],cutoff_b)
# semantic alignment cost
semcost = T.sum(abs(f[0,:,:]-sv_1hot))+\
T.sum(abs(f[-1,:,:]))+\
T.sum(0.0001*(100.0**abs(f[:-1,:,:]-f[1:,:,:])))
cost = -T.sum(cutoff_logp) + semcost
return cost, cutoff_logp
def _recur(self, w_t, y_t, sv_tm1, h_tm1, c_tm1, a):
# input word embedding
wv_t = T.nnet.sigmoid(self.Wemb[w_t,:])
# compute ig, fg, og together and slice it
gates_t = T.dot( T.concatenate([wv_t,h_tm1,sv_tm1],axis=1),self.Wgate)
ig = T.nnet.sigmoid(gates_t[:,:self.dh])
fg = T.nnet.sigmoid(gates_t[:,self.dh:self.dh*2])
og = T.nnet.sigmoid(gates_t[:,self.dh*2:self.dh*3])
# compute reading rg
rg = T.nnet.sigmoid(T.dot(
T.concatenate([wv_t,h_tm1,sv_tm1],axis=1),self.Wrgate))
# compute proposed cell value
cx_t= T.tanh(T.dot(T.concatenate([wv_t,h_tm1],axis=1),self.Wcx))
# update DA 1-hot vector
sv_t = rg*sv_tm1
# update lstm internal state
c_t = ig*cx_t + fg*c_tm1 + \
T.tanh(T.dot(T.concatenate([a,sv_t],axis=1),self.Wfc))
# obtain new hiddne layer
h_t = og*T.tanh(c_t)
# compute output distribution target word prob
o_t = T.nnet.softmax( T.dot(h_t,self.Who) )
p_t = o_t[T.arange(self.db),y_t]
return sv_t, h_t, c_t, p_t
def _get1hot(self,idxes,dim):
vec = np.zeros(dim)
vec[idxes] = 1.0
return vec
def beamSearch(self,a,sv):
# get 1 hot vector
a = self._get1hot(a,self.da)
sv= self._get1hot(sv,self.dsv)
# end nodes
endnodes = []
# initial layers
h0,c0 = np.zeros(self.dh),np.zeros(self.dh)
# starting node
node = BeamSearchNode(h0,c0,None,1,0,1)
node.sv = sv
node.a = a
# queue for beam search
nodes= PriorityQueue()
nodes.put((-node.eval(),node))
qsize = 1
# start beam search
while True:
# give up when decoding takes too long
if qsize>10000: break
# fetch the best node
score, n = nodes.get()
# if end of sentence token
if n.wordid==1 and n.prevNode!=None:
# update score with sem cost
n.logp -= np.sum(abs(n.sv))
score = -n.eval()
endnodes.append((score,n))
# if reach maximum # of sentences required
if len(endnodes)>=self.overgen: break
else: continue
# decode for one step using decoder
words, probs, sv, c, h = self._gen(n)
# put them into a queue
for i in range(len(words)):
node = BeamSearchNode(h,c,n,words[i],
n.logp+np.log10(probs[i])-
np.sum(0.0001*(100.0**abs(sv-n.sv)))
,n.leng+1)
node.sv = sv
node.a = a
nodes.put( (-node.eval(),node) )
# increase qsize
qsize += len(words)-1
# if no finished nodes, choose the top scored paths
if len(endnodes)==0:
endnodes = [nodes.get() for n in range(self.overgen)]
# choose nbest paths, back trace them
utts = []
for score,n in sorted(endnodes,key=operator.itemgetter(0)):
utt = [n.wordid]
while n.prevNode!=None:
# back trace
n = n.prevNode
utt.append(n.wordid)
utt = utt[::-1]
utts.append((score,utt))
return utts
def sample(self,a,sv):
# get 1 hot vector
a0 = self._get1hot(a,self.da)
sv0= self._get1hot(sv,self.dsv)
# initial state
h0,c0 = np.zeros(self.dh),np.zeros(self.dh)
# container
gens = []
# to obtain topk generations
for i in range(self.overgen):
# starting node
node = BeamSearchNode(h0,c0,None,1,0,1)
node.sv = sv0
node.a = a0
# put in queue
nodes = [[-node.eval(),node]]
# start sampling
while True:
# check stopping criteria
last_node = nodes[-1][-1]
if (last_node.wordid==1 and len(nodes)>1) or\
len(nodes)>40: # undesirable long utt
# update score with sem cost
last_node.logp -= np.sum(abs(last_node.sv))
score = -last_node.eval()
nodes[-1] = [score,last_node]
break
# expand for one time step
words, probs, sv, c, h = self._gen(last_node)
# sampling according to probability
o_sample = np.argmax(np.random.multinomial(1,probs,1))
# put new node into the queue
node = BeamSearchNode(h,c,last_node,words[o_sample],
last_node.logp+np.log10(probs[o_sample])-
np.sum(0.0001*(100.0**abs(sv-last_node.sv))),
last_node.leng+1)
node.sv = sv
node.a = a0
nodes.append( [-node.eval(),node] )
# obtain sentence
gen = [n.wordid for s,n in nodes]
score = nodes[-1][0]
# score the sentences
# make sure the generated sentence doesn't repeat
if (score,gen) not in gens:
gens.append((score,gen))
# ranking generation according to score
overgen = self.overgen if len(gens)>self.overgen else len(gens)
gens = sorted(gens,key=operator.itemgetter(0))[:overgen]
return gens
def _gen(self,node):
# input word embedding
wv_t = sigmoid(self.Wemb_np[node.wordid,:])
# compute ig, fg, og together and slice it
gates_t = np.dot( np.concatenate(
[wv_t,node.h,node.sv],axis=0),self.Wgate_np)
ig = sigmoid(gates_t[:self.dh])
fg = sigmoid(gates_t[self.dh:self.dh*2])
og = sigmoid(gates_t[self.dh*2:self.dh*3])
# compute reading rg
rg = sigmoid(np.dot(np.concatenate(
[wv_t,node.h,node.sv],axis=0),self.Wrgate_np))
# compute proposed cell value
cx_t= np.tanh(np.dot(np.concatenate(
[wv_t,node.h],axis=0),self.Wcx_np))
# update DA 1-hot vector
sv_t = np.multiply(rg,node.sv)
# update lstm internal state
c_t = np.multiply(ig,cx_t) +\
np.multiply(fg,node.c)+\
tanh(np.dot(np.concatenate([node.a,sv_t],axis=0),self.Wfc_np))
# obtain new hiddne layer
h_t = np.multiply(og,tanh(c_t))
# compute output distribution target word prob
o_t = softmax( np.dot(h_t,self.Who_np) )
# make sure we won't sample unknown word
o_t[0] = 0.0
selected_words = np.argsort(o_t)[::-1][:self.beamwidth].tolist()
# return results
return selected_words, o_t[selected_words], sv_t, c_t, h_t
def loadConverseParams(self):
self.Wemb_np = self.params[0].get_value()
self.Wgate_np = self.params[1].get_value()
self.Wrgate_np = self.params[2].get_value()
self.Wcx_np = self.params[3].get_value()
self.Wfc_np = self.params[4].get_value()
self.Who_np = self.params[5].get_value()
| [
"numpy.multiply",
"Queue.PriorityQueue",
"numpy.log10",
"theano.tensor.sum",
"numpy.random.multinomial",
"numpy.argsort",
"theano.tensor.arange",
"numpy.zeros",
"numpy.dot",
"numpy.random.uniform",
"theano.tensor.set_subtensor",
"theano.tensor.tanh",
"numpy.concatenate",
"operator.itemgett... | [((3864, 3909), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['hot_x[idx_x[:cutoff_x]]', '(1.0)'], {}), '(hot_x[idx_x[:cutoff_x]], 1.0)\n', (3879, 3909), True, 'import theano.tensor as T\n'), ((5150, 5183), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['self.Wemb[w_t, :]'], {}), '(self.Wemb[w_t, :])\n', (5164, 5183), True, 'import theano.tensor as T\n'), ((5327, 5363), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['gates_t[:, :self.dh]'], {}), '(gates_t[:, :self.dh])\n', (5341, 5363), True, 'import theano.tensor as T\n'), ((5377, 5424), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['gates_t[:, self.dh:self.dh * 2]'], {}), '(gates_t[:, self.dh:self.dh * 2])\n', (5391, 5424), True, 'import theano.tensor as T\n'), ((5436, 5487), 'theano.tensor.nnet.sigmoid', 'T.nnet.sigmoid', (['gates_t[:, self.dh * 2:self.dh * 3]'], {}), '(gates_t[:, self.dh * 2:self.dh * 3])\n', (5450, 5487), True, 'import theano.tensor as T\n'), ((6226, 6239), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (6234, 6239), True, 'import numpy as np\n'), ((6698, 6713), 'Queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (6711, 6713), False, 'from Queue import PriorityQueue\n'), ((11344, 11368), 'numpy.multiply', 'np.multiply', (['rg', 'node.sv'], {}), '(rg, node.sv)\n', (11355, 11368), True, 'import numpy as np\n'), ((3110, 3166), 'numpy.zeros', 'np.zeros', (['(self.db, self.dh)'], {'dtype': 'theano.config.floatX'}), '((self.db, self.dh), dtype=theano.config.floatX)\n', (3118, 3166), True, 'import numpy as np\n'), ((3211, 3267), 'numpy.zeros', 'np.zeros', (['(self.db, self.dh)'], {'dtype': 'theano.config.floatX'}), '((self.db, self.dh), dtype=theano.config.floatX)\n', (3219, 3267), True, 'import numpy as np\n'), ((4053, 4109), 'numpy.zeros', 'np.zeros', (['(self.db, self.da)'], {'dtype': 'theano.config.floatX'}), '((self.db, self.da), dtype=theano.config.floatX)\n', (4061, 4109), True, 'import numpy as np\n'), ((4153, 4210), 'numpy.zeros', 'np.zeros', (['(self.db, self.dsv)'], {'dtype': 'theano.config.floatX'}), '((self.db, self.dsv), dtype=theano.config.floatX)\n', (4161, 4210), True, 'import numpy as np\n'), ((5259, 5303), 'theano.tensor.concatenate', 'T.concatenate', (['[wv_t, h_tm1, sv_tm1]'], {'axis': '(1)'}), '([wv_t, h_tm1, sv_tm1], axis=1)\n', (5272, 5303), True, 'import theano.tensor as T\n'), ((5981, 5992), 'theano.tensor.tanh', 'T.tanh', (['c_t'], {}), '(c_t)\n', (5987, 5992), True, 'import theano.tensor as T\n'), ((6078, 6098), 'theano.tensor.dot', 'T.dot', (['h_t', 'self.Who'], {}), '(h_t, self.Who)\n', (6083, 6098), True, 'import theano.tensor as T\n'), ((6502, 6519), 'numpy.zeros', 'np.zeros', (['self.dh'], {}), '(self.dh)\n', (6510, 6519), True, 'import numpy as np\n'), ((6520, 6537), 'numpy.zeros', 'np.zeros', (['self.dh'], {}), '(self.dh)\n', (6528, 6537), True, 'import numpy as np\n'), ((8653, 8670), 'numpy.zeros', 'np.zeros', (['self.dh'], {}), '(self.dh)\n', (8661, 8670), True, 'import numpy as np\n'), ((8671, 8688), 'numpy.zeros', 'np.zeros', (['self.dh'], {}), '(self.dh)\n', (8679, 8688), True, 'import numpy as np\n'), ((10816, 10863), 'numpy.concatenate', 'np.concatenate', (['[wv_t, node.h, node.sv]'], {'axis': '(0)'}), '([wv_t, node.h, node.sv], axis=0)\n', (10830, 10863), True, 'import numpy as np\n'), ((11717, 11741), 'numpy.dot', 'np.dot', (['h_t', 'self.Who_np'], {}), '(h_t, self.Who_np)\n', (11723, 11741), True, 'import numpy as np\n'), ((4975, 4993), 'theano.tensor.sum', 'T.sum', (['cutoff_logp'], {}), '(cutoff_logp)\n', (4980, 4993), True, 'import theano.tensor as T\n'), ((5560, 5604), 'theano.tensor.concatenate', 'T.concatenate', (['[wv_t, h_tm1, sv_tm1]'], {'axis': '(1)'}), '([wv_t, h_tm1, sv_tm1], axis=1)\n', (5573, 5604), True, 'import theano.tensor as T\n'), ((5681, 5717), 'theano.tensor.concatenate', 'T.concatenate', (['[wv_t, h_tm1]'], {'axis': '(1)'}), '([wv_t, h_tm1], axis=1)\n', (5694, 5717), True, 'import theano.tensor as T\n'), ((6118, 6135), 'theano.tensor.arange', 'T.arange', (['self.db'], {}), '(self.db)\n', (6126, 6135), True, 'import theano.tensor as T\n'), ((8206, 8228), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (8225, 8228), False, 'import operator\n'), ((11090, 11137), 'numpy.concatenate', 'np.concatenate', (['[wv_t, node.h, node.sv]'], {'axis': '(0)'}), '([wv_t, node.h, node.sv], axis=0)\n', (11104, 11137), True, 'import numpy as np\n'), ((11232, 11270), 'numpy.concatenate', 'np.concatenate', (['[wv_t, node.h]'], {'axis': '(0)'}), '([wv_t, node.h], axis=0)\n', (11246, 11270), True, 'import numpy as np\n'), ((11421, 11442), 'numpy.multiply', 'np.multiply', (['ig', 'cx_t'], {}), '(ig, cx_t)\n', (11432, 11442), True, 'import numpy as np\n'), ((11461, 11484), 'numpy.multiply', 'np.multiply', (['fg', 'node.c'], {}), '(fg, node.c)\n', (11472, 11484), True, 'import numpy as np\n'), ((5888, 5920), 'theano.tensor.concatenate', 'T.concatenate', (['[a, sv_t]'], {'axis': '(1)'}), '([a, sv_t], axis=1)\n', (5901, 5920), True, 'import theano.tensor as T\n'), ((9692, 9726), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'probs', '(1)'], {}), '(1, probs, 1)\n', (9713, 9726), True, 'import numpy as np\n'), ((10566, 10588), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (10585, 10588), False, 'import operator\n'), ((11514, 11552), 'numpy.concatenate', 'np.concatenate', (['[node.a, sv_t]'], {'axis': '(0)'}), '([node.a, sv_t], axis=0)\n', (11528, 11552), True, 'import numpy as np\n'), ((2017, 2065), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.di, self.dh)'], {}), '(-1.0, 1.0, (self.di, self.dh))\n', (2034, 2065), True, 'import numpy as np\n'), ((2187, 2254), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.dh * 2 + self.dsv, self.dh * 3)'], {}), '(-1.0, 1.0, (self.dh * 2 + self.dsv, self.dh * 3))\n', (2204, 2254), True, 'import numpy as np\n'), ((2363, 2427), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.dh * 2 + self.dsv, self.dsv)'], {}), '(-1.0, 1.0, (self.dh * 2 + self.dsv, self.dsv))\n', (2380, 2427), True, 'import numpy as np\n'), ((2567, 2619), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.dh * 2, self.dh)'], {}), '(-1.0, 1.0, (self.dh * 2, self.dh))\n', (2584, 2619), True, 'import numpy as np\n'), ((2755, 2814), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.da + self.dsv, self.dh)'], {}), '(-1.0, 1.0, (self.da + self.dsv, self.dh))\n', (2772, 2814), True, 'import numpy as np\n'), ((2937, 2985), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(self.dh, self.di)'], {}), '(-1.0, 1.0, (self.dh, self.di))\n', (2954, 2985), True, 'import numpy as np\n'), ((11838, 11853), 'numpy.argsort', 'np.argsort', (['o_t'], {}), '(o_t)\n', (11848, 11853), True, 'import numpy as np\n'), ((7659, 7677), 'numpy.log10', 'np.log10', (['probs[i]'], {}), '(probs[i])\n', (7667, 7677), True, 'import numpy as np\n'), ((9880, 9905), 'numpy.log10', 'np.log10', (['probs[o_sample]'], {}), '(probs[o_sample])\n', (9888, 9905), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import os
import unittest
import numpy as np
from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump
__author__ = '<NAME>'
__email__ = '<EMAIL>'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class TestLammpsDump(unittest.TestCase):
def test_init(self):
# general tests + gzipped
rdx_10 = LammpsDump(filename=os.path.join(test_dir, "dump.rdx.gz"))
np.testing.assert_array_equal(rdx_10.timesteps, np.arange(0, 101, 10))
obox = rdx_10[0]["box"]
np.testing.assert_array_equal(obox.bounds, np.array([(35, 48)] * 3))
atom = rdx_10[-1]["atoms_data"][-1]
np.testing.assert_array_equal(atom,
[19, 2, 0.42369, 0.47347, 0.555425])
# timestep wildcard
rdx_25 = LammpsDump(filename=os.path.join(test_dir, "dump.rdx_wc.*"),
parse_box=False)
self.assertEqual(len(rdx_25), 5)
np.testing.assert_array_equal(rdx_25.timesteps, np.arange(0, 101, 25))
self.assertNotIn("box", rdx_25[0])
# tilted box
tatb = LammpsDump(filename=os.path.join(test_dir, "dump.tatb"))
tbox = tatb[0]["box"]
bounds = [[0, 13.624], [0, 17.1149153805], [0, 15.1826391451]]
tilt = [-5.75315630927, -6.325466, 7.4257288]
np.testing.assert_array_almost_equal(tbox.bounds, bounds)
np.testing.assert_array_almost_equal(tbox.tilt, tilt)
class TestLammpsRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
data_file = os.path.join(test_dir, "nvt.data")
traj_file = os.path.join(test_dir, "nvt.dump")
log_file = os.path.join(test_dir, "nvt.log")
cls.lmps_log = LammpsLog(log_file=log_file)
cls.lammpsrun = LammpsRun(data_file, traj_file, log_file)
def test_lammps_log(self):
fields = "step vol temp press ke pe etotal enthalpy evdwl ecoul epair " \
"ebond eangle edihed eimp " \
"emol elong etail lx ly lz xy xz yz density"
fields = fields.split()
thermo_data_ans = np.loadtxt(
os.path.join(test_dir, "thermo_data.txt"))
thermo_data = self.lammpsrun.log.thermo_data
self.assertEqual(sorted(list(thermo_data.keys())), sorted(fields))
self.assertEqual(self.lammpsrun.log.nmdsteps + 1,
len(thermo_data['step']))
data = [thermo_data[k] for k in fields]
np.testing.assert_almost_equal(data, np.transpose(thermo_data_ans), decimal=10)
def test_lammps_trajectory(self):
fields = "Atoms_id atom_type x y z vx vy vz mol mass"
fields = fields.split()
timestep_ans = 82
trajectory_ans = np.loadtxt(os.path.join(test_dir,
"trajectory_timestep_82_sorted.txt"))
begin = int(timestep_ans / 2) * self.lammpsrun.natoms
end = (int(timestep_ans / 2) + 1) * self.lammpsrun.natoms
trajectory = self.lammpsrun.trajectory[begin:end]
# atom ids in the trajectory starts from 0
np.testing.assert_almost_equal(trajectory[:][fields[0]],
trajectory_ans[:, 0] - 1, decimal=10)
for i, fld in enumerate(fields[1:]):
np.testing.assert_almost_equal(trajectory[:][fld],
trajectory_ans[:, i + 1],
decimal=10)
def test_get_structures_from_trajectory(self):
structures = self.lammpsrun.get_structures_from_trajectory()
self.assertEqual(len(structures), len(self.lammpsrun.timesteps))
def test_get_displacements(self):
structure, disp = self.lammpsrun.get_displacements()
self.assertEqual(disp.shape[0], len(structure))
self.assertEqual(disp.shape[1], len(self.lammpsrun.timesteps) - 1)
self.assertEqual(disp.shape[2], 3)
self.assertAlmostEqual(disp[-1, -1, -1], 0.077079999999999788)
def test_serialization(self):
d = self.lammpsrun.as_dict()
lmps_run = LammpsRun.from_dict(d)
self.assertDictEqual(d, lmps_run.as_dict())
d2 = self.lmps_log.as_dict()
lmps_log = LammpsLog.from_dict(d2)
self.assertDictEqual(d2, lmps_log.as_dict())
if __name__ == "__main__":
unittest.main()
| [
"pymatgen.io.lammps.output.LammpsLog",
"numpy.testing.assert_array_almost_equal",
"pymatgen.io.lammps.output.LammpsRun.from_dict",
"numpy.arange",
"os.path.join",
"os.path.dirname",
"pymatgen.io.lammps.output.LammpsRun",
"numpy.testing.assert_almost_equal",
"pymatgen.io.lammps.output.LammpsLog.from_... | [((388, 413), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (403, 413), False, 'import os\n'), ((4580, 4595), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4593, 4595), False, 'import unittest\n'), ((906, 978), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['atom', '[19, 2, 0.42369, 0.47347, 0.555425]'], {}), '(atom, [19, 2, 0.42369, 0.47347, 0.555425])\n', (935, 978), True, 'import numpy as np\n'), ((1587, 1644), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['tbox.bounds', 'bounds'], {}), '(tbox.bounds, bounds)\n', (1623, 1644), True, 'import numpy as np\n'), ((1653, 1706), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['tbox.tilt', 'tilt'], {}), '(tbox.tilt, tilt)\n', (1689, 1706), True, 'import numpy as np\n'), ((1811, 1845), 'os.path.join', 'os.path.join', (['test_dir', '"""nvt.data"""'], {}), "(test_dir, 'nvt.data')\n", (1823, 1845), False, 'import os\n'), ((1866, 1900), 'os.path.join', 'os.path.join', (['test_dir', '"""nvt.dump"""'], {}), "(test_dir, 'nvt.dump')\n", (1878, 1900), False, 'import os\n'), ((1920, 1953), 'os.path.join', 'os.path.join', (['test_dir', '"""nvt.log"""'], {}), "(test_dir, 'nvt.log')\n", (1932, 1953), False, 'import os\n'), ((1977, 2005), 'pymatgen.io.lammps.output.LammpsLog', 'LammpsLog', ([], {'log_file': 'log_file'}), '(log_file=log_file)\n', (1986, 2005), False, 'from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump\n'), ((2030, 2071), 'pymatgen.io.lammps.output.LammpsRun', 'LammpsRun', (['data_file', 'traj_file', 'log_file'], {}), '(data_file, traj_file, log_file)\n', (2039, 2071), False, 'from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump\n'), ((3343, 3442), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['trajectory[:][fields[0]]', '(trajectory_ans[:, 0] - 1)'], {'decimal': '(10)'}), '(trajectory[:][fields[0]], trajectory_ans[:, \n 0] - 1, decimal=10)\n', (3373, 3442), True, 'import numpy as np\n'), ((4339, 4361), 'pymatgen.io.lammps.output.LammpsRun.from_dict', 'LammpsRun.from_dict', (['d'], {}), '(d)\n', (4358, 4361), False, 'from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump\n'), ((4470, 4493), 'pymatgen.io.lammps.output.LammpsLog.from_dict', 'LammpsLog.from_dict', (['d2'], {}), '(d2)\n', (4489, 4493), False, 'from pymatgen.io.lammps.output import LammpsRun, LammpsLog, LammpsDump\n'), ((722, 743), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(10)'], {}), '(0, 101, 10)\n', (731, 743), True, 'import numpy as np\n'), ((828, 852), 'numpy.array', 'np.array', (['([(35, 48)] * 3)'], {}), '([(35, 48)] * 3)\n', (836, 852), True, 'import numpy as np\n'), ((1265, 1286), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(25)'], {}), '(0, 101, 25)\n', (1274, 1286), True, 'import numpy as np\n'), ((2377, 2418), 'os.path.join', 'os.path.join', (['test_dir', '"""thermo_data.txt"""'], {}), "(test_dir, 'thermo_data.txt')\n", (2389, 2418), False, 'import os\n'), ((2750, 2779), 'numpy.transpose', 'np.transpose', (['thermo_data_ans'], {}), '(thermo_data_ans)\n', (2762, 2779), True, 'import numpy as np\n'), ((2988, 3047), 'os.path.join', 'os.path.join', (['test_dir', '"""trajectory_timestep_82_sorted.txt"""'], {}), "(test_dir, 'trajectory_timestep_82_sorted.txt')\n", (3000, 3047), False, 'import os\n'), ((3534, 3626), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['trajectory[:][fld]', 'trajectory_ans[:, i + 1]'], {'decimal': '(10)'}), '(trajectory[:][fld], trajectory_ans[:, i + 1],\n decimal=10)\n', (3564, 3626), True, 'import numpy as np\n'), ((627, 664), 'os.path.join', 'os.path.join', (['test_dir', '"""dump.rdx.gz"""'], {}), "(test_dir, 'dump.rdx.gz')\n", (639, 664), False, 'import os\n'), ((1082, 1121), 'os.path.join', 'os.path.join', (['test_dir', '"""dump.rdx_wc.*"""'], {}), "(test_dir, 'dump.rdx_wc.*')\n", (1094, 1121), False, 'import os\n'), ((1387, 1422), 'os.path.join', 'os.path.join', (['test_dir', '"""dump.tatb"""'], {}), "(test_dir, 'dump.tatb')\n", (1399, 1422), False, 'import os\n')] |
# -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:<EMAIL>
# datetime:1993/12/01
# filename:configs.py
# software: PyCharm
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from nets.yolo4 import yolo_body
from nets.yolo4_loss import yolo_loss
from keras.backend.tensorflow_backend import set_session
from utils.utils import get_random_data, get_random_mosaic_data, get_random_mosaic_data_v2
from my_queue import GeneratorEnqueuer
import time
import math
from cosine_anneal import WarmUpCosineDecayScheduler
from config.configs import CONFIG
def get_classes(classes_path):
"""loads the classes"""
with open(classes_path) as f:
class_names = f.readlines()
# use list expression to make your code more concise
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
"""loads the anchors from a file"""
with open(anchors_path) as f:
anchors = f.readline()
# use list expression to make your code more concise
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def data_generator(annotation_lines,
batch_size,
input_shape,
anchors,
num_classes):
"""data generator for fit_generator
the assignment strategy:
one gt ---> one anchor
1.find which anchor(9 anchors) gt belongs to
2.find which grid gt belongs to
Args:
annotation_lines: a list [anno1, anno2, ...]
batch_size: batch size
input_shape: resolution [h, w]
anchors: anchor boxes
num_classes: the number of class
max_boxes: box_data: [max_boxes, 5]
when have a lot of gt to predict, need to set max_boxes bigger.
Returns:
batch data: [image_data, *y_true], np.zeros(batch_size)
"""
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
# shuffle dataset at begin of epoch
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape)
image_data.append(image)
box_data.append(box)
i = (i + 1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
# get true_boxes
# y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
y_true = preprocess_true_boxes_iou_thres(box_data, input_shape, anchors, num_classes,
iou_threshold=CONFIG.TRAIN.IOU_THRESHOLD)
# use yield to get generator
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_mosaic_iou_thres(annotation_lines,
batch_size,
input_shape,
anchors,
num_classes):
"""data generator for fit_generator
the assignment strategy:
one gt ---> more anchor(iou > iou_threshold)
Args:
annotation_lines: a list [anno1, anno2, ...]
batch_size: batch size
input_shape: resolution [h, w]
anchors: anchor boxes
num_classes: the number of class
max_boxes: box_data: [max_boxes, 5]
when have a lot of gt to predict, must set max_boxes bigger.
iou_threshold: if iou > iou_threshold, the anchor is responsible for this gt.
Returns:
batch data: [image_data, *y_true], np.zeros(batch_size)
"""
n = len(annotation_lines)
shuffle_num = n // 4
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
# shuffle dataset at begin of epoch
np.random.shuffle(annotation_lines)
image, box = get_random_mosaic_data(annotation_lines[4 * i:4 * i + 4], input_shape)
image_data.append(image)
box_data.append(box)
i = (i + 1) % shuffle_num
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes_iou_thres(box_data, input_shape, anchors, num_classes,
iou_threshold=CONFIG.TRAIN.IOU_THRESHOLD)
# use yield to get generator
yield [image_data, *y_true], np.zeros(batch_size)
def preprocess_true_boxes(true_boxes,
input_shape,
anchors,
num_classes):
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors) // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32') # 416,416
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[:]
true_boxes[..., 2:4] = boxes_wh / input_shape[:]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# [(m, 13, 13, 3, 85), (m, 26, 26, 3, 85), (m, 52, 52, 3, 85)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# (1, 9, 2)
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# filter invalid boxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0:
continue
# [n, 1, 2]
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# get iou
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
# assign gt to one grid
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
# assign gt to one anchor
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
# score = 1 and get one hot class label
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def preprocess_true_boxes_iou_thres(true_boxes,
input_shape,
anchors,
num_classes,
iou_threshold=0.3):
"""get true boxes with iou threshold"""
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors) // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32') # 416,416
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[:]
true_boxes[..., 2:4] = boxes_wh / input_shape[:]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),
dtype='float32') for l in range(num_layers)]
# [1, 9, 2]
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# filter invalid boxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0:
continue
# [n, 1, 2]
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# 1.iou > iou_threshold
positive = iou > iou_threshold # [num_true_boxes, num_anchors]
for t, n in enumerate(positive):
n = np.array(n, dtype=np.int32)
pos_index = np.argwhere(n == 1)
if len(pos_index):
continue
for id in pos_index:
id = id[0]
for l in range(num_layers):
if id in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(id)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
# 2.if no positive anchor, just choose the best one to be the positive.
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def get_batch(num_workers,
max_queue_size=32,
use_mosaic_iout_generator=CONFIG.DATASET.MOSAIC_AUG,
multiprocessing=CONFIG.DATASET.MULTIPROCESS,
**kwargs):
"""
Args:
num_workers: number of workers
max_queue_size: max queue size
multiprocessing: true in linux and false in windows
use_mosaic_iout_generator: use mosaic_iou_thres_generator or not
**kwargs: args used in data generator
"""
enqueuer = None
try:
if use_mosaic_iout_generator:
enqueuer = GeneratorEnqueuer(data_generator_mosaic_iou_thres(**kwargs),
use_multiprocessing=multiprocessing)
else:
enqueuer = GeneratorEnqueuer(data_generator(**kwargs),
use_multiprocessing=multiprocessing)
enqueuer.start(max_queue_size=max_queue_size, workers=num_workers)
generator_output = None
while True:
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(0.01)
yield generator_output
generator_output = None
finally:
if enqueuer is not None:
enqueuer.stop()
config = tf.ConfigProto()
# A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
if __name__ == "__main__":
annotation_path = CONFIG.TRAIN.ANNO_PATH
valid_anno_path = CONFIG.TRAIN.VALID_PATH
classes_path = CONFIG.TRAIN.CLASS_PATH
anchors_path = CONFIG.TRAIN.ANCHOR_PATH
# pretrained model path
weights_path = CONFIG.TRAIN.PRE_TRAINED_MODEL
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
num_anchors = len(anchors)
# checkpoint path
log_dir = CONFIG.TRAIN.SAVE_PATH
# resolution
input_shape = CONFIG.TRAIN.RESOLUTION
# clear previous tf graph
K.clear_session()
image_input = Input(shape=(None, None, 3))
h, w = input_shape
# create model
print('Create YOLOv4 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
model_body = yolo_body(image_input, num_anchors // 3, num_classes)
print('Load weights {}.'.format(weights_path))
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
y_true = [Input(shape=(h // {0: 32, 1: 16, 2: 8}[l], w // {0: 32, 1: 16, 2: 8}[l],
num_anchors // 3, num_classes + 5)) for l in range(3)]
loss_input = [*model_body.output, *y_true]
# merge custom loss layer into model
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': num_classes,
'ignore_thresh': CONFIG.TRAIN.IGNORE_THRES,
'use_focal_confidence_loss': CONFIG.TRAIN.CONFIDENCE_FOCAL,
'use_focal_class_loss': CONFIG.TRAIN.CLASS_FOCAL,
'use_diou': CONFIG.TRAIN.DIOU,
'use_ciou': CONFIG.TRAIN.CIOU,
'print_loss': False})(loss_input)
# create model_loss
model = Model([model_body.input, *y_true], model_loss)
# freeze_layers = 249
freeze_layers = CONFIG.TRAIN.FREEZE_LAYERS
for i in range(freeze_layers):
model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(freeze_layers, len(model_body.layers)))
# checkpoint
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}.h5',
monitor='loss',
save_weights_only=True,
save_best_only=False, period=CONFIG.TRAIN.SAVE_PERIOD)
# reduce lr on plateau
# this lr decay is worse than cosine anneal
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=5, verbose=1)
# i don't use early stopping frequently because it is not orthogonal.
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1)
# get training annotations
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
num_train = len(lines)
# get validating annotations
with open(valid_anno_path) as f:
valid_lines = f.readlines()
np.random.shuffle(valid_lines)
num_val = len(valid_lines)
np.random.seed(None)
# one stage training
if CONFIG.TRAIN.TRANSFER:
model.compile(optimizer=Adam(lr=CONFIG.TRAIN.LR_STAGE1),
loss={'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = CONFIG.TRAIN.BATCH1
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(get_batch(num_workers=CONFIG.DATASET.WORKERS,
max_queue_size=CONFIG.DATASET.MAX_QUEUE,
annotation_lines=lines, batch_size=batch_size,
input_shape=input_shape, anchors=anchors,
num_classes=num_classes),
steps_per_epoch=max(1, num_train // batch_size),
# validation_data=get_batch(1, annotation_lines=valid_lines, batch_size=batch_size,
# input_shape=input_shape, anchors=anchors,
# num_classes=num_classes),
# validation steps: at the end of epoch, generate validation_steps * batch data
# validation_steps=max(1, num_val // batch_size),
epochs=CONFIG.TRAIN.EPOCH1,
initial_epoch=0,
callbacks=[checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# unfreeze in second stage
for i in range(freeze_layers):
model_body.layers[i].trainable = True
print('layers have been unfrozen!!')
# training in second stage
# fine tune
if True:
model.compile(optimizer=Adam(lr=CONFIG.TRAIN.LR_STAGE2),
loss={'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = CONFIG.TRAIN.BATCH2
# cosine anneal
total_epoch = CONFIG.TRAIN.EPOCH2 - CONFIG.TRAIN.EPOCH1
cosine_anneal = WarmUpCosineDecayScheduler(learning_rate_base=CONFIG.TRAIN.LR_STAGE2,
total_steps=total_epoch * math.ceil(num_train / batch_size),
interval_epoch=CONFIG.TRAIN.COS_INTERVAL)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(get_batch(num_workers=CONFIG.DATASET.WORKERS,
max_queue_size=CONFIG.DATASET.MAX_QUEUE,
annotation_lines=lines, batch_size=batch_size,
input_shape=input_shape, anchors=anchors,
num_classes=num_classes),
steps_per_epoch=max(1, num_train // batch_size),
# validation_data=get_batch(annotation_lines=valid_lines, batch_size=batch_size,
# input_shape=input_shape, anchors=anchors,
# num_classes=num_classes),
# validation_steps=max(1, num_val // batch_size),
epochs=CONFIG.TRAIN.EPOCH2,
initial_epoch=CONFIG.TRAIN.EPOCH1,
callbacks=[checkpoint, cosine_anneal])
model.save_weights(log_dir + 'last1.h5')
| [
"utils.utils.get_random_mosaic_data",
"time.sleep",
"numpy.array",
"utils.utils.get_random_data",
"tensorflow.Session",
"keras.backend.clear_session",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"numpy.random.seed",
"tensorflow.ConfigProto",
"numpy.maximum",
"keras.optimizers.Adam",... | [((12705, 12721), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (12719, 12721), True, 'import tensorflow as tf\n'), ((5218, 5255), 'numpy.array', 'np.array', (['true_boxes'], {'dtype': '"""float32"""'}), "(true_boxes, dtype='float32')\n", (5226, 5255), True, 'import numpy as np\n'), ((5274, 5310), 'numpy.array', 'np.array', (['input_shape'], {'dtype': '"""int32"""'}), "(input_shape, dtype='int32')\n", (5282, 5310), True, 'import numpy as np\n'), ((5940, 5966), 'numpy.expand_dims', 'np.expand_dims', (['anchors', '(0)'], {}), '(anchors, 0)\n', (5954, 5966), True, 'import numpy as np\n'), ((8117, 8154), 'numpy.array', 'np.array', (['true_boxes'], {'dtype': '"""float32"""'}), "(true_boxes, dtype='float32')\n", (8125, 8154), True, 'import numpy as np\n'), ((8173, 8209), 'numpy.array', 'np.array', (['input_shape'], {'dtype': '"""int32"""'}), "(input_shape, dtype='int32')\n", (8181, 8209), True, 'import numpy as np\n'), ((8771, 8797), 'numpy.expand_dims', 'np.expand_dims', (['anchors', '(0)'], {}), '(anchors, 0)\n', (8785, 8797), True, 'import numpy as np\n'), ((12951, 12976), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (12961, 12976), True, 'import tensorflow as tf\n'), ((13568, 13585), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (13583, 13585), True, 'import keras.backend as K\n'), ((13605, 13633), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), '(shape=(None, None, 3))\n', (13610, 13633), False, 'from keras.layers import Input, Lambda\n'), ((13792, 13845), 'nets.yolo4.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 3)', 'num_classes'], {}), '(image_input, num_anchors // 3, num_classes)\n', (13801, 13845), False, 'from nets.yolo4 import yolo_body\n'), ((14921, 14967), 'keras.models.Model', 'Model', (['[model_body.input, *y_true]', 'model_loss'], {}), '([model_body.input, *y_true], model_loss)\n', (14926, 14967), False, 'from keras.models import Model\n'), ((15262, 15290), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (15273, 15290), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((15308, 15472), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(log_dir + 'ep{epoch:03d}-loss{loss:.3f}.h5')"], {'monitor': '"""loss"""', 'save_weights_only': '(True)', 'save_best_only': '(False)', 'period': 'CONFIG.TRAIN.SAVE_PERIOD'}), "(log_dir + 'ep{epoch:03d}-loss{loss:.3f}.h5', monitor='loss',\n save_weights_only=True, save_best_only=False, period=CONFIG.TRAIN.\n SAVE_PERIOD)\n", (15323, 15472), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((15654, 15722), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.1)', 'patience': '(5)', 'verbose': '(1)'}), "(monitor='loss', factor=0.1, patience=5, verbose=1)\n", (15671, 15722), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((15818, 15884), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='loss', min_delta=0, patience=10, verbose=1)\n", (15831, 15884), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((15988, 16009), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (16002, 16009), True, 'import numpy as np\n'), ((16014, 16038), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (16031, 16038), True, 'import numpy as np\n'), ((16176, 16206), 'numpy.random.shuffle', 'np.random.shuffle', (['valid_lines'], {}), '(valid_lines)\n', (16193, 16206), True, 'import numpy as np\n'), ((16242, 16262), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (16256, 16262), True, 'import numpy as np\n'), ((2591, 2611), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (2599, 2611), True, 'import numpy as np\n'), ((2631, 2649), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (2639, 2649), True, 'import numpy as np\n'), ((4476, 4496), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (4484, 4496), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (4524, 4534), True, 'import numpy as np\n'), ((6239, 6261), 'numpy.expand_dims', 'np.expand_dims', (['wh', '(-2)'], {}), '(wh, -2)\n', (6253, 6261), True, 'import numpy as np\n'), ((6364, 6397), 'numpy.maximum', 'np.maximum', (['box_mins', 'anchor_mins'], {}), '(box_mins, anchor_mins)\n', (6374, 6397), True, 'import numpy as np\n'), ((6424, 6459), 'numpy.minimum', 'np.minimum', (['box_maxes', 'anchor_maxes'], {}), '(box_maxes, anchor_maxes)\n', (6434, 6459), True, 'import numpy as np\n'), ((6483, 6532), 'numpy.maximum', 'np.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (6493, 6532), True, 'import numpy as np\n'), ((6796, 6819), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (6805, 6819), True, 'import numpy as np\n'), ((9071, 9093), 'numpy.expand_dims', 'np.expand_dims', (['wh', '(-2)'], {}), '(wh, -2)\n', (9085, 9093), True, 'import numpy as np\n'), ((9178, 9211), 'numpy.maximum', 'np.maximum', (['box_mins', 'anchor_mins'], {}), '(box_mins, anchor_mins)\n', (9188, 9211), True, 'import numpy as np\n'), ((9238, 9273), 'numpy.minimum', 'np.minimum', (['box_maxes', 'anchor_maxes'], {}), '(box_maxes, anchor_maxes)\n', (9248, 9273), True, 'import numpy as np\n'), ((9297, 9346), 'numpy.maximum', 'np.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), '(intersect_maxes - intersect_mins, 0.0)\n', (9307, 9346), True, 'import numpy as np\n'), ((10620, 10643), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (10629, 10643), True, 'import numpy as np\n'), ((13989, 14114), 'keras.layers.Input', 'Input', ([], {'shape': '(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2): 8}[l], \n num_anchors // 3, num_classes + 5)'}), '(shape=(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2\n ): 8}[l], num_anchors // 3, num_classes + 5))\n', (13994, 14114), False, 'from keras.layers import Input, Lambda\n'), ((14251, 14624), 'keras.layers.Lambda', 'Lambda', (['yolo_loss'], {'output_shape': '(1,)', 'name': '"""yolo_loss"""', 'arguments': "{'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': CONFIG.\n TRAIN.IGNORE_THRES, 'use_focal_confidence_loss': CONFIG.TRAIN.\n CONFIDENCE_FOCAL, 'use_focal_class_loss': CONFIG.TRAIN.CLASS_FOCAL,\n 'use_diou': CONFIG.TRAIN.DIOU, 'use_ciou': CONFIG.TRAIN.CIOU,\n 'print_loss': False}"}), "(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors':\n anchors, 'num_classes': num_classes, 'ignore_thresh': CONFIG.TRAIN.\n IGNORE_THRES, 'use_focal_confidence_loss': CONFIG.TRAIN.\n CONFIDENCE_FOCAL, 'use_focal_class_loss': CONFIG.TRAIN.CLASS_FOCAL,\n 'use_diou': CONFIG.TRAIN.DIOU, 'use_ciou': CONFIG.TRAIN.CIOU,\n 'print_loss': False})\n", (14257, 14624), False, 'from keras.layers import Input, Lambda\n'), ((1278, 1295), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (1286, 1295), True, 'import numpy as np\n'), ((2422, 2471), 'utils.utils.get_random_data', 'get_random_data', (['annotation_lines[i]', 'input_shape'], {}), '(annotation_lines[i], input_shape)\n', (2437, 2471), False, 'from utils.utils import get_random_data, get_random_mosaic_data, get_random_mosaic_data_v2\n'), ((4276, 4346), 'utils.utils.get_random_mosaic_data', 'get_random_mosaic_data', (['annotation_lines[4 * i:4 * i + 4]', 'input_shape'], {}), '(annotation_lines[4 * i:4 * i + 4], input_shape)\n', (4298, 4346), False, 'from utils.utils import get_random_data, get_random_mosaic_data, get_random_mosaic_data_v2\n'), ((9749, 9776), 'numpy.array', 'np.array', (['n'], {'dtype': 'np.int32'}), '(n, dtype=np.int32)\n', (9757, 9776), True, 'import numpy as np\n'), ((9801, 9820), 'numpy.argwhere', 'np.argwhere', (['(n == 1)'], {}), '(n == 1)\n', (9812, 9820), True, 'import numpy as np\n'), ((2361, 2396), 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), '(annotation_lines)\n', (2378, 2396), True, 'import numpy as np\n'), ((3020, 3040), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (3028, 3040), True, 'import numpy as np\n'), ((4215, 4250), 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), '(annotation_lines)\n', (4232, 4250), True, 'import numpy as np\n'), ((4795, 4815), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (4803, 4815), True, 'import numpy as np\n'), ((16351, 16382), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'CONFIG.TRAIN.LR_STAGE1'}), '(lr=CONFIG.TRAIN.LR_STAGE1)\n', (16355, 16382), False, 'from keras.optimizers import Adam\n'), ((18019, 18050), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'CONFIG.TRAIN.LR_STAGE2'}), '(lr=CONFIG.TRAIN.LR_STAGE2)\n', (18023, 18050), False, 'from keras.optimizers import Adam\n'), ((12532, 12548), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (12542, 12548), False, 'import time\n'), ((18427, 18460), 'math.ceil', 'math.ceil', (['(num_train / batch_size)'], {}), '(num_train / batch_size)\n', (18436, 18460), False, 'import math\n'), ((7014, 7063), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 0] * grid_shapes[l][1])'], {}), '(true_boxes[b, t, 0] * grid_shapes[l][1])\n', (7022, 7063), True, 'import numpy as np\n'), ((7104, 7153), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 1] * grid_shapes[l][0])'], {}), '(true_boxes[b, t, 1] * grid_shapes[l][0])\n', (7112, 7153), True, 'import numpy as np\n'), ((10792, 10841), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 0] * grid_shapes[l][1])'], {}), '(true_boxes[b, t, 0] * grid_shapes[l][1])\n', (10800, 10841), True, 'import numpy as np\n'), ((10882, 10931), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 1] * grid_shapes[l][0])'], {}), '(true_boxes[b, t, 1] * grid_shapes[l][0])\n', (10890, 10931), True, 'import numpy as np\n'), ((10054, 10103), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 0] * grid_shapes[l][1])'], {}), '(true_boxes[b, t, 0] * grid_shapes[l][1])\n', (10062, 10103), True, 'import numpy as np\n'), ((10148, 10197), 'numpy.floor', 'np.floor', (['(true_boxes[b, t, 1] * grid_shapes[l][0])'], {}), '(true_boxes[b, t, 1] * grid_shapes[l][0])\n', (10156, 10197), True, 'import numpy as np\n')] |
# Copyright (c) 2021 <NAME>
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import cv2
class InputPipeLine:
"""
InputPipeLine : this class starts capturing video from camera and resizes the frames
to 'target_height' and 'target_width'. Then, returns the resulting image as a torch.Tensor.
"""
def __init__(self, target_height, target_width):
self.target_height = target_height
self.target_width = target_width
self.__cam = None
self.__cam_running = False
def start(self):
"""
Start capturing video from the first available camera.
"""
self.__cam = cv2.VideoCapture(0)
self.__cam_running = True
def stop(self):
"""
Stop capturing video from camera device. And release the camera to OS.
"""
if self.__cam is None:
raise RuntimeError('Camera is not started yet! So, it is not defined.')
self.__cam_running = False
self.__cam.release()
def frames(self, as_tensor=True):
"""
This function returns a generator that iterates over images of any available camera.
Resizes frames to target sizes and converts to torch.Tensor if as_tensor is True.
If camera is not started, it will raise RuntimeError.
:param as_tensor: If True, this generator yields a tensor of shape: (3, H, W),
Otherwise, it yields a numpy array of shape: (3, H, W).
:return: torch.Tensor, or a Numpy array.
"""
if self.__cam is None:
raise RuntimeError('Camera is not started yet! So, it is not defined.')
cam = self.__cam
while cam.isOpened():
if not self.__cam_running:
break
success, frame = cam.read()
if success:
resized = cv2.resize(frame,(self.target_width, self.target_height))
img_out = np.transpose(resized, [2, 0, 1])
if as_tensor:
img_out = torch.as_tensor(img_out)
yield img_out
else:
break | [
"torch.as_tensor",
"cv2.resize",
"numpy.transpose",
"cv2.VideoCapture"
] | [((746, 765), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (762, 765), False, 'import cv2\n'), ((1933, 1991), 'cv2.resize', 'cv2.resize', (['frame', '(self.target_width, self.target_height)'], {}), '(frame, (self.target_width, self.target_height))\n', (1943, 1991), False, 'import cv2\n'), ((2017, 2049), 'numpy.transpose', 'np.transpose', (['resized', '[2, 0, 1]'], {}), '(resized, [2, 0, 1])\n', (2029, 2049), True, 'import numpy as np\n'), ((2110, 2134), 'torch.as_tensor', 'torch.as_tensor', (['img_out'], {}), '(img_out)\n', (2125, 2134), False, 'import torch\n')] |
import numpy as np
try:
import cupy as cp
except:
cp = np
#CupyScalars = NumpyScalars
import pytissueoptics.vectors as vc
class NativeScalars:
""" An array of scalars that is compatible with operations on Vectors
There is a reason for not using numpy.array directly: we want to
add new functions that will be specific to our problem here,
and Python does not allow us to extend system classes.
The class works with float, int and bool. With boolean values, True is 1.
"""
def __init__(self, array=None, N=None):
"""
@rtype: object
"""
if array is not None:
self.v = np.array(array)
elif N is not None:
self.v = np.array([0] * N)
else:
raise ValueError("You must provide an array or N")
self.selected = [True]*len(self.v)
self._iteration = 0
@classmethod
def random(cls, N):
return Scalars([np.random.random() for i in range(N)])
def __iter__(self):
self._iteration = 0
return self
def __next__(self):
if self._iteration < len(self):
result = self.v[self._iteration]
self._iteration += 1
return result
else:
raise StopIteration
def __len__(self) -> int:
return len(self.v)
def __mul__(self, scale) -> 'Scalars':
return Scalars(self.v * scale)
def __rmul__(self, scale) -> 'Scalars':
return Scalars(self.v * scale)
def __truediv__(self, scale) -> 'Scalars':
return Scalars(self.v / scale)
def __add__(self, rhs) -> 'Scalars':
return Scalars([v1 + v2 for (v1, v2) in list(zip(self.v, rhs.v))])
def __neg__(self) -> 'Scalars':
return Scalars([-v1 for v1 in self.v])
def __sub__(self, rhs) -> 'Scalars':
return Scalars([v1 - v2 for (v1, v2) in list(zip(self.v, rhs.v))])
def __getitem__(self, index):
return self.v[index]
def __setitem__(self, index, newvalue):
self.v[index] = newvalue
def __eq__(self, rhs) -> bool:
if isinstance(rhs, Scalars):
each = [v1 == v2 for (v1, v2) in list(zip(self.v, rhs.v))]
else:
each = [v1 == v2 for (v1, v2) in list(zip(self.v, rhs))]
return np.array(each).all()
def logicalNot(self) -> 'Scalars':
return Scalars([not bool(v1) for v1 in self.v])
def logicalAnd(self, rhs) -> 'Scalars':
return Scalars([bool(v1) and bool(v2) for v1, v2 in list(zip(self.v, rhs))])
def logicalOr(self, rhs) -> 'Scalars':
return Scalars([bool(v1) or bool(v2) for v1, v2 in list(zip(self.v, rhs))])
def all(self) -> bool:
return self.v.all()
def any(self) -> bool:
return self.v.any()
def none(self) -> bool:
return not self.v.any()
class NumpyScalars:
def __init__(self, array=None, N=None):
if array is not None:
if type(array) == np.ndarray:
self.v = array.astype('float64')
elif type(array) == cp.ndarray:
self.v = array.astype(np.float64)
else:
self.v = np.asarray(array, dtype=np.float64)
elif N is not None:
self.v = np.zeros((1, N), dtype=np.float64)
self._iteration = 0
def __len__(self):
return self.v.shape[1]
def __add__(self, other):
if isinstance(other, NumpyScalars):
return NumpyScalars(np.add(self.v, other.v))
else:
return NumpyScalars(np.add(self.v, other))
def __sub__(self, other):
if isinstance(other, NumpyScalars):
return NumpyScalars(np.subtract(self.v, other.v))
else:
return NumpyScalars(np.subtract(self.v, other))
def __mul__(self, other):
if isinstance(other, NumpyScalars):
return NumpyScalars(np.multiply(self.v, other.v))
elif isinstance(other, vc.NumpyVectors):
return NumpyScalars(np.multiply(self.v[:, None], other.v))
else:
return NumpyScalars(np.multiply(self.v, other))
def __truediv__(self, other):
if isinstance(other, NumpyScalars):
return NumpyScalars(np.true_divide(self.v, other.v))
elif isinstance(other, vc.NumpyVectors):
return NumpyScalars(np.multiply(self.v[:, None], other.v))
else:
return NumpyScalars(np.true_divide(self.v, other))
def __neg__(self):
return NumpyScalars(np.negative(self.v))
def __getitem__(self, item):
return self.v[item]
def __setitem__(self, key, value: np.float32):
self.v[key] = value
def __eq__(self, other):
if isinstance(other, NumpyScalars):
return np.equal(self.v, other.v)
else:
return np.equal(self.v, other)
def __iter__(self):
self._iteration = 0
return self
def __next__(self):
if self._iteration < len(self):
result = self.v[:, self._iteration]
self._iteration += 1
return result
else:
raise StopIteration
def __contains__(self, item):
if item in self.v:
return True
else:
return False
@classmethod
def setAll(cls, value, N):
return NumpyScalars(np.full((N), value))
@classmethod
def random(cls, N: int):
"""Random number between [0, 1]"""
return NumpyScalars(np.random.rand(N))
@classmethod
def random2(cls, N: int):
"""Random number between [-1, 1]"""
return NumpyScalars((np.random.rand(N) * 2) - 1)
def isEqualTo(self, other):
if isinstance(other, NumpyScalars):
return NumpyScalars(np.less_equal(np.abs(np.subtract(self.v, other.v)), 1e-9))
else:
return NumpyScalars(np.less_equal(np.abs(np.subtract(self.v, other)), 1e-9))
class CupyScalars:
def __init__(self, array=None, N=None):
if array is not None:
if type(array) == cp.ndarray:
self.v = array.astype('float64')
elif type(array) == cp.ndarray:
self.v = array.astype(cp.float64)
else:
self.v = cp.asarray(array, dtype=cp.float64)
elif N is not None:
self.v = cp.zeros((1, N), dtype=cp.float64)
self._iteration = 0
def __len__(self):
return self.v.shape[1]
def __add__(self, other):
if isinstance(other, CupyScalars):
return CupyScalars(cp.add(self.v, other.v))
else:
return CupyScalars(cp.add(self.v, other))
def __sub__(self, other):
if isinstance(other, CupyScalars):
return CupyScalars(cp.subtract(self.v, other.v))
else:
return CupyScalars(cp.subtract(self.v, other))
def __mul__(self, other):
if isinstance(other, CupyScalars):
return CupyScalars(cp.multiply(self.v, other.v))
elif isinstance(other, vc.CupyVectors):
return CupyScalars(cp.multiply(self.v[:, None], other.v))
else:
return CupyScalars(cp.multiply(self.v, other))
def __truediv__(self, other):
if isinstance(other, CupyScalars):
return CupyScalars(cp.true_divide(self.v, other.v))
elif isinstance(other, vc.CupyVectors):
return CupyScalars(cp.multiply(self.v[:, None], other.v))
else:
return CupyScalars(cp.true_divide(self.v, other))
def __neg__(self):
return CupyScalars(cp.negative(self.v))
def __getitem__(self, item):
return self.v[item]
def __setitem__(self, key, value: cp.float32):
self.v[key] = value
def __eq__(self, other):
if isinstance(other, CupyScalars):
return cp.equal(self.v, other.v)
else:
return cp.equal(self.v, other)
def __iter__(self):
self._iteration = 0
return self
def __next__(self):
if self._iteration < len(self):
result = self.v[:, self._iteration]
self._iteration += 1
return result
else:
raise StopIteration
def __contains__(self, item):
if item in self.v:
return True
else:
return False
@classmethod
def setAll(cls, value, N):
return CupyScalars(cp.full((N), value))
@classmethod
def random(cls, N: int):
"""Random number between [0, 1]"""
return CupyScalars(cp.random.rand(N))
@classmethod
def random2(cls, N: int):
"""Random number between [-1, 1]"""
return CupyScalars((cp.random.rand(N) * 2) - 1)
def isEqualTo(self, other):
if isinstance(other, CupyScalars):
return CupyScalars(cp.less_equal(cp.abs(cp.subtract(self.v, other.v)), 1e-9))
else:
return CupyScalars(cp.less_equal(cp.abs(cp.subtract(self.v, other)), 1e-9))
Scalars = NativeScalars
| [
"numpy.random.rand",
"numpy.equal",
"cupy.subtract",
"numpy.array",
"cupy.equal",
"cupy.negative",
"cupy.full",
"cupy.true_divide",
"numpy.multiply",
"cupy.random.rand",
"numpy.random.random",
"numpy.asarray",
"numpy.subtract",
"cupy.multiply",
"cupy.asarray",
"numpy.add",
"cupy.add"... | [((658, 673), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (666, 673), True, 'import numpy as np\n'), ((4508, 4527), 'numpy.negative', 'np.negative', (['self.v'], {}), '(self.v)\n', (4519, 4527), True, 'import numpy as np\n'), ((4764, 4789), 'numpy.equal', 'np.equal', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (4772, 4789), True, 'import numpy as np\n'), ((4823, 4846), 'numpy.equal', 'np.equal', (['self.v', 'other'], {}), '(self.v, other)\n', (4831, 4846), True, 'import numpy as np\n'), ((5340, 5357), 'numpy.full', 'np.full', (['N', 'value'], {}), '(N, value)\n', (5347, 5357), True, 'import numpy as np\n'), ((5479, 5496), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (5493, 5496), True, 'import numpy as np\n'), ((7564, 7583), 'cupy.negative', 'cp.negative', (['self.v'], {}), '(self.v)\n', (7575, 7583), True, 'import cupy as cp\n'), ((7819, 7844), 'cupy.equal', 'cp.equal', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (7827, 7844), True, 'import cupy as cp\n'), ((7878, 7901), 'cupy.equal', 'cp.equal', (['self.v', 'other'], {}), '(self.v, other)\n', (7886, 7901), True, 'import cupy as cp\n'), ((8394, 8411), 'cupy.full', 'cp.full', (['N', 'value'], {}), '(N, value)\n', (8401, 8411), True, 'import cupy as cp\n'), ((8532, 8549), 'cupy.random.rand', 'cp.random.rand', (['N'], {}), '(N)\n', (8546, 8549), True, 'import cupy as cp\n'), ((723, 740), 'numpy.array', 'np.array', (['([0] * N)'], {}), '([0] * N)\n', (731, 740), True, 'import numpy as np\n'), ((955, 973), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (971, 973), True, 'import numpy as np\n'), ((2296, 2310), 'numpy.array', 'np.array', (['each'], {}), '(each)\n', (2304, 2310), True, 'import numpy as np\n'), ((3253, 3287), 'numpy.zeros', 'np.zeros', (['(1, N)'], {'dtype': 'np.float64'}), '((1, N), dtype=np.float64)\n', (3261, 3287), True, 'import numpy as np\n'), ((3479, 3502), 'numpy.add', 'np.add', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (3485, 3502), True, 'import numpy as np\n'), ((3550, 3571), 'numpy.add', 'np.add', (['self.v', 'other'], {}), '(self.v, other)\n', (3556, 3571), True, 'import numpy as np\n'), ((3680, 3708), 'numpy.subtract', 'np.subtract', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (3691, 3708), True, 'import numpy as np\n'), ((3756, 3782), 'numpy.subtract', 'np.subtract', (['self.v', 'other'], {}), '(self.v, other)\n', (3767, 3782), True, 'import numpy as np\n'), ((3891, 3919), 'numpy.multiply', 'np.multiply', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (3902, 3919), True, 'import numpy as np\n'), ((4226, 4257), 'numpy.true_divide', 'np.true_divide', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (4240, 4257), True, 'import numpy as np\n'), ((6326, 6360), 'cupy.zeros', 'cp.zeros', (['(1, N)'], {'dtype': 'cp.float64'}), '((1, N), dtype=cp.float64)\n', (6334, 6360), True, 'import cupy as cp\n'), ((6550, 6573), 'cupy.add', 'cp.add', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (6556, 6573), True, 'import cupy as cp\n'), ((6620, 6641), 'cupy.add', 'cp.add', (['self.v', 'other'], {}), '(self.v, other)\n', (6626, 6641), True, 'import cupy as cp\n'), ((6748, 6776), 'cupy.subtract', 'cp.subtract', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (6759, 6776), True, 'import cupy as cp\n'), ((6823, 6849), 'cupy.subtract', 'cp.subtract', (['self.v', 'other'], {}), '(self.v, other)\n', (6834, 6849), True, 'import cupy as cp\n'), ((6956, 6984), 'cupy.multiply', 'cp.multiply', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (6967, 6984), True, 'import cupy as cp\n'), ((7286, 7317), 'cupy.true_divide', 'cp.true_divide', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (7300, 7317), True, 'import cupy as cp\n'), ((3168, 3203), 'numpy.asarray', 'np.asarray', (['array'], {'dtype': 'np.float64'}), '(array, dtype=np.float64)\n', (3178, 3203), True, 'import numpy as np\n'), ((4002, 4039), 'numpy.multiply', 'np.multiply', (['self.v[:, None]', 'other.v'], {}), '(self.v[:, None], other.v)\n', (4013, 4039), True, 'import numpy as np\n'), ((4087, 4113), 'numpy.multiply', 'np.multiply', (['self.v', 'other'], {}), '(self.v, other)\n', (4098, 4113), True, 'import numpy as np\n'), ((4340, 4377), 'numpy.multiply', 'np.multiply', (['self.v[:, None]', 'other.v'], {}), '(self.v[:, None], other.v)\n', (4351, 4377), True, 'import numpy as np\n'), ((4425, 4454), 'numpy.true_divide', 'np.true_divide', (['self.v', 'other'], {}), '(self.v, other)\n', (4439, 4454), True, 'import numpy as np\n'), ((5619, 5636), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (5633, 5636), True, 'import numpy as np\n'), ((6241, 6276), 'cupy.asarray', 'cp.asarray', (['array'], {'dtype': 'cp.float64'}), '(array, dtype=cp.float64)\n', (6251, 6276), True, 'import cupy as cp\n'), ((7065, 7102), 'cupy.multiply', 'cp.multiply', (['self.v[:, None]', 'other.v'], {}), '(self.v[:, None], other.v)\n', (7076, 7102), True, 'import cupy as cp\n'), ((7149, 7175), 'cupy.multiply', 'cp.multiply', (['self.v', 'other'], {}), '(self.v, other)\n', (7160, 7175), True, 'import cupy as cp\n'), ((7398, 7435), 'cupy.multiply', 'cp.multiply', (['self.v[:, None]', 'other.v'], {}), '(self.v[:, None], other.v)\n', (7409, 7435), True, 'import cupy as cp\n'), ((7482, 7511), 'cupy.true_divide', 'cp.true_divide', (['self.v', 'other'], {}), '(self.v, other)\n', (7496, 7511), True, 'import cupy as cp\n'), ((8671, 8688), 'cupy.random.rand', 'cp.random.rand', (['N'], {}), '(N)\n', (8685, 8688), True, 'import cupy as cp\n'), ((5777, 5805), 'numpy.subtract', 'np.subtract', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (5788, 5805), True, 'import numpy as np\n'), ((5882, 5908), 'numpy.subtract', 'np.subtract', (['self.v', 'other'], {}), '(self.v, other)\n', (5893, 5908), True, 'import numpy as np\n'), ((8827, 8855), 'cupy.subtract', 'cp.subtract', (['self.v', 'other.v'], {}), '(self.v, other.v)\n', (8838, 8855), True, 'import cupy as cp\n'), ((8931, 8957), 'cupy.subtract', 'cp.subtract', (['self.v', 'other'], {}), '(self.v, other)\n', (8942, 8957), True, 'import cupy as cp\n')] |
import numpy as np
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_surface(X, y, clf, title="", xlabel="", ylabel=""):
x_min, x_max = X.min(), X.max()
xx, yy = np.meshgrid(np.linspace(x_min[0], x_max[0], num=50),
np.linspace(x_min[1], x_max[1], num=50))
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1].reshape(xx.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contourf(xx, yy, Z, cmap=plt.cm.RdBu, alpha=.8)
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, cmap=ListedColormap(['#FF0000', '#0000FF']), edgecolors='k')
ax.set_xlim(left=x_min[0], right=x_max[0])
ax.set_ylim(bottom=x_min[1], top=x_max[1])
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show() | [
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((514, 526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (524, 526), True, 'import matplotlib.pyplot as plt\n'), ((875, 885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (883, 885), True, 'import matplotlib.pyplot as plt\n'), ((214, 253), 'numpy.linspace', 'np.linspace', (['x_min[0]', 'x_max[0]'], {'num': '(50)'}), '(x_min[0], x_max[0], num=50)\n', (225, 253), True, 'import numpy as np\n'), ((262, 301), 'numpy.linspace', 'np.linspace', (['x_min[1]', 'x_max[1]'], {'num': '(50)'}), '(x_min[1], x_max[1], num=50)\n', (273, 301), True, 'import numpy as np\n'), ((659, 697), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (673, 697), False, 'from matplotlib.colors import ListedColormap\n')] |
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.segan.segan_model as segan
from awesome_gans.datasets import MNISTDataSet
results = {'output': './gen_img/', 'checkpoint': './model/checkpoint', 'model': './model/SEGAN-model.ckpt'}
train_step = {
'global_step': 150001,
'logging_interval': 1500,
}
def main():
start_time = time.time() # Clocking start
# UrbanSound8K Dataset load
mnist = MNISTDataSet().data
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# CoGAN Model
model = segan.SEGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
sample_x, _ = mnist.test.next_batch(model.sample_num)
sample_y = np.zeros(shape=[model.sample_num, model.n_classes])
for i in range(10):
sample_y[10 * i : 10 * (i + 1), i] = 1
for step in range(train_step['global_step']):
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run(
[model.d_op, model.d_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Update G network
_, g_loss = s.run(
[model.g_op, model.g_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
if step % train_step['logging_interval'] == 0:
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Print loss
print("[+] Step %08d => " % step, " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss))
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
# Training G model with sample image and noise
samples_1 = s.run(
model.g_sample_1,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_2 = s.run(
model.g_sample_2,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_1 = np.reshape(samples_1, [-1] + model.image_shape[1:])
samples_2 = np.reshape(samples_2, [-1] + model.image_shape[1:])
# Summary saver
model.writer.add_summary(summary, global_step=step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir_1 = results['output'] + 'train_1_{:08d}.png'.format(step)
sample_dir_2 = results['output'] + 'train_2_{:08d}.png'.format(step)
# Generated image save
iu.save_images(samples_1, size=[sample_image_height, sample_image_width], image_path=sample_dir_1)
iu.save_images(samples_2, size=[sample_image_height, sample_image_width], image_path=sample_dir_2)
# Model save
model.saver.save(s, results['model'], global_step=step)
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
| [
"awesome_gans.segan.segan_model.SEGAN",
"numpy.reshape",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"awesome_gans.datasets.MNISTDataSet",
"numpy.random.uniform",
"tensorflow.ConfigProto",
"awesome_gans.image_utils.save_images",
"time.time"
] | [((404, 415), 'time.time', 'time.time', ([], {}), '()\n', (413, 415), False, 'import time\n'), ((533, 549), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (547, 549), True, 'import tensorflow as tf\n'), ((479, 493), 'awesome_gans.datasets.MNISTDataSet', 'MNISTDataSet', ([], {}), '()\n', (491, 493), False, 'from awesome_gans.datasets import MNISTDataSet\n'), ((603, 628), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (613, 628), True, 'import tensorflow as tf\n'), ((673, 687), 'awesome_gans.segan.segan_model.SEGAN', 'segan.SEGAN', (['s'], {}), '(s)\n', (684, 687), True, 'import awesome_gans.segan.segan_model as segan\n'), ((843, 894), 'numpy.zeros', 'np.zeros', ([], {'shape': '[model.sample_num, model.n_classes]'}), '(shape=[model.sample_num, model.n_classes])\n', (851, 894), True, 'import numpy as np\n'), ((4363, 4374), 'time.time', 'time.time', ([], {}), '()\n', (4372, 4374), False, 'import time\n'), ((726, 759), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (757, 759), True, 'import tensorflow as tf\n'), ((1123, 1161), 'numpy.reshape', 'np.reshape', (['batch_x', 'model.image_shape'], {}), '(batch_x, model.image_shape)\n', (1133, 1161), True, 'import numpy as np\n'), ((2079, 2117), 'numpy.reshape', 'np.reshape', (['batch_x', 'model.image_shape'], {}), '(batch_x, model.image_shape)\n', (2089, 2117), True, 'import numpy as np\n'), ((3407, 3458), 'numpy.reshape', 'np.reshape', (['samples_1', '([-1] + model.image_shape[1:])'], {}), '(samples_1, [-1] + model.image_shape[1:])\n', (3417, 3458), True, 'import numpy as np\n'), ((3487, 3538), 'numpy.reshape', 'np.reshape', (['samples_2', '([-1] + model.image_shape[1:])'], {}), '(samples_2, [-1] + model.image_shape[1:])\n', (3497, 3538), True, 'import numpy as np\n'), ((4031, 4133), 'awesome_gans.image_utils.save_images', 'iu.save_images', (['samples_1'], {'size': '[sample_image_height, sample_image_width]', 'image_path': 'sample_dir_1'}), '(samples_1, size=[sample_image_height, sample_image_width],\n image_path=sample_dir_1)\n', (4045, 4133), True, 'import awesome_gans.image_utils as iu\n'), ((4146, 4248), 'awesome_gans.image_utils.save_images', 'iu.save_images', (['samples_2'], {'size': '[sample_image_height, sample_image_width]', 'image_path': 'sample_dir_2'}), '(samples_2, size=[sample_image_height, sample_image_width],\n image_path=sample_dir_2)\n', (4160, 4248), True, 'import awesome_gans.image_utils as iu\n'), ((1184, 1245), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '[model.batch_size, model.z_dim]'], {}), '(-1.0, 1.0, [model.batch_size, model.z_dim])\n', (1201, 1245), True, 'import numpy as np\n'), ((2144, 2205), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '[model.batch_size, model.z_dim]'], {}), '(-1.0, 1.0, [model.batch_size, model.z_dim])\n', (2161, 2205), True, 'import numpy as np\n'), ((2764, 2825), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '[model.sample_num, model.z_dim]'], {}), '(-1.0, 1.0, [model.sample_num, model.z_dim])\n', (2781, 2825), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.