code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from metod_alg import objective_functions as mt_obj
def test_1():
"""Computational test for mt_obj.shekel_function() with d=2."""
p = 3
matrix_test = np.array([[[1, 0],
[0, 1]],
[[1, 0],
[0, 1]],
[[1, 0],
[0, 1]]])
C = np.array([[10, 3, 0.5],
[11, 5, 1]])
b = np.array([1, 1, 1])
x = np.array([2, 4])
args = p, matrix_test, C, b
func_val = mt_obj.shekel_function(x, *args)
assert(np.round(func_val, 4) == -0.2119)
| [
"numpy.array",
"metod_alg.objective_functions.shekel_function",
"numpy.round"
] | [((184, 248), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]], [[1, 0], [0, 1]], [[1, 0], [0, 1]]]'], {}), '([[[1, 0], [0, 1]], [[1, 0], [0, 1]], [[1, 0], [0, 1]]])\n', (192, 248), True, 'import numpy as np\n'), ((397, 433), 'numpy.array', 'np.array', (['[[10, 3, 0.5], [11, 5, 1]]'], {}), '([[10, 3, 0.5], [11, 5, 1]])\n', (405, 433), True, 'import numpy as np\n'), ((460, 479), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (468, 479), True, 'import numpy as np\n'), ((488, 504), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (496, 504), True, 'import numpy as np\n'), ((552, 584), 'metod_alg.objective_functions.shekel_function', 'mt_obj.shekel_function', (['x', '*args'], {}), '(x, *args)\n', (574, 584), True, 'from metod_alg import objective_functions as mt_obj\n'), ((596, 617), 'numpy.round', 'np.round', (['func_val', '(4)'], {}), '(func_val, 4)\n', (604, 617), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
__author__ = '<NAME>'
def print_metrics_dict(metrics):
for name, val in metrics.items():
print('--------------', name, '--------------')
if isinstance(val, tf.Tensor):
val = val.numpy()
if name == 'confusion':
print(np.array2string(val, separator=', ', precision=2))
else:
print(val)
| [
"numpy.array2string"
] | [((314, 363), 'numpy.array2string', 'np.array2string', (['val'], {'separator': '""", """', 'precision': '(2)'}), "(val, separator=', ', precision=2)\n", (329, 363), True, 'import numpy as np\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import yaml
import math
import os
from skopt.plots import plot_objective
from fbprophet.plot import add_changepoints_to_plot
# Set some matplotlib parameters
mpl.rcParams['figure.figsize'] = (20, 15)
cfg = yaml.full_load(open(os.getcwd() + "/config.yml", 'r'))
def visualize_silhouette_plot(k_range, silhouette_scores, optimal_k, save_fig=False):
'''
Plot average silhouette score for all samples at different values of k. Use this to determine optimal number of
clusters (k). The optimal k is the one that maximizes the average Silhouette Score over the range of k provided.
:param k_range: Range of k explored
:param silhouette_scores: Average Silhouette Score corresponding to values in k range
:param optimal_k: The value of k that has the highest average Silhouette Score
:param save_fig: Flag indicating whether to save the figure
'''
# Plot the average Silhouette Score vs. k
axes = plt.subplot()
axes.plot(k_range, silhouette_scores)
# Set plot axis labels, title, and subtitle.
axes.set_xlabel("k (# of clusters)", labelpad=10, size=15)
axes.set_ylabel("Average Silhouette Score", labelpad=10, size=15)
axes.set_xticks(k_range, minor=False)
axes.axvline(x=optimal_k, linestyle='--')
axes.set_title("Silhouette Plot", fontsize=25)
axes.text(0.5, 0.92, "Average Silhouette Score over a range of k-values", size=15, ha='center')
# Save the image
if save_fig:
file_path = cfg['PATHS']['DATA_VISUALIZATIONS'] + 'silhouette_plot_' + \
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png'
plt.savefig(file_path)
return
def plot_model_evaluation(forecast_df, model_name, metrics, save_dir=None, figsize=(20,13), save_fig=False, train_date=''):
'''
Plot model's predictions on training and test sets, along with key performance metrics.
:param forecast_df: DataFrame consisting of predicted and ground truth consumption values
:param model_name: model identifier
:param metrics: key performance metrics
:param figsize: size of matplotlib figure
:param train_date: string representing date model was trained
'''
fig = plt.figure(figsize=figsize)
fig.suptitle(model_name + ' Forecast', fontsize=20)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
# Plot training performance
forecast_df[pd.notnull(forecast_df["model"])][["gt", "model"]].plot(color=["black", "green"], title="Training Set Predictions",
grid=True, ax=ax1)
ax1.set(xlabel=None)
# Plot test performance
if "test_pred" in forecast_df.columns:
forecast_df[pd.isnull(forecast_df["model"])][["gt", "forecast", "test_pred"]].plot(color=["black", "red", "yellow"],
title="Test Set Forecast", grid=True, ax=ax2)
else:
forecast_df[pd.isnull(forecast_df["model"])][["gt", "forecast"]].plot(color=["black", "red"],
title="Test Set Forecast", grid=True, ax=ax2)
ax2.fill_between(x=forecast_df.index, y1=forecast_df['pred_int_low'], y2=forecast_df['pred_int_up'], color='b', alpha=0.2)
ax2.fill_between(x=forecast_df.index, y1=forecast_df['conf_int_low'], y2=forecast_df['conf_int_up'], color='b', alpha=0.3)
ax2.set(xlabel=None)
# Plot residuals
forecast_df[["residuals", "error"]].plot(ax=ax3, color=["green", "red"], title="Residuals", grid=True)
ax3.set(xlabel=None)
# Plot residuals distribution
forecast_df[["residuals", "error"]].plot(ax=ax4, color=["green", "red"], kind='kde',
title="Residuals Distribution", grid=True)
ax4.set(ylabel=None)
print("Training --> Residuals mean:", np.round(metrics['residuals_mean']), " | std:", np.round(metrics['residuals_std']))
print("Test --> Error mean:", np.round(metrics['error_mean']), " | std:", np.round(metrics['error_std']),
" | mae:", np.round(metrics['MAE']), " | mape:", np.round(metrics['MAPE'] * 100), "% | mse:", np.round(metrics['MSE']),
" | rmse:", np.round(metrics['RMSE']))
if save_fig:
save_dir = cfg['PATHS']['FORECAST_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + '/' + model_name + '_eval_' +
train_date + '.png')
return
def correlation_matrix(dataset, save_fig=False):
'''
Produces a correlation matrix for a dataset
:param dataset: A DataFrame
:save_fig: Flag indicating whether to save the figure
'''
corr_mat = dataset.corr()
mask = np.triu(np.ones_like(corr_mat, dtype=bool)) # Generate mask for upper right triangle
cmap = sns.diverging_palette(230, 20, as_cmap=True) # Custom diverging colour map
fig, axes = plt.subplots()
sns.heatmap(corr_mat, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}) # Draw a heatmap with mask and correct aspect ratio
axes.set_title('Correlation Matrix', fontsize=20)
plt.tight_layout(pad=1.2)
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'correlation_matrix' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def client_box_plot(client_df, save_fig=False):
'''
Produces a box plot for all features in the dataset
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
cat_feats = [f for f in cfg['DATA']['CATEGORICAL_FEATS'] if f in client_df.columns]
bool_feats = [f for f in cfg['DATA']['BOOLEAN_FEATS'] if f in client_df.columns]
feats = cat_feats + bool_feats
n_rows = math.floor(math.sqrt(len(feats)))
n_cols = math.ceil(math.sqrt(len(feats)))
fig, axes = plt.subplots(n_rows, n_cols)
idx = 0
for i in range(n_rows):
for j in range(n_cols):
sns.boxplot(x=client_df[feats[idx]], y=client_df['CONS_0m_AGO'], palette="Set2", ax=axes[i, j])
axes[i, j].set_yscale('log')
axes[i, j].set_title(feats[idx], fontsize=14)
axes[i, j].set_xticklabels(axes[i, j].get_xticklabels(), rotation=45, ha='right')
if idx < len(feats) - 1:
idx += 1
else:
break
fig.suptitle('Box Plots for consumption in recent month grouped by categorical variables', fontsize=20, y=0.99)
fig.tight_layout(pad=1, rect=(0,0,1,0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'client_box_plot' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def client_cmptn_by_rc_violin_plot(client_df, save_fig=False):
'''
Produces a violin plot for consumption by client in the most recent month stratified by rate class
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
fig, axes = plt.subplots()
sns.violinplot(x=client_df['CONS_0m_AGO'], y=client_df['RATE_CLASS'], palette="Set2", scale='area', orient='h',
linewidth=0.2, ax=axes)
axes.set_yticklabels(axes.get_yticklabels(), fontsize=12)
axes.set_xlabel('Consumption in last month [m^3]', fontsize=20, labelpad=10)
axes.set_ylabel('Rate Class', fontsize=20, labelpad=10)
fig.suptitle('Violin plot for consumption in recent month grouped by rate class', fontsize=30)
fig.tight_layout(pad=1, rect=(0,0.05,1,0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'violin_plot_rate_class' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def visualize_client_dataset_stats(client_df, save_fig=False):
'''
Obtain general statistics for features in the client dataset and create a summary figure
:param client_df: A DataFrame indexed by client identifier
:param save_fig: Flag indicating whether to save the figure
'''
cat_feats = [f for f in cfg['DATA']['CATEGORICAL_FEATS'] if f in client_df.columns]
bool_feats = [f for f in cfg['DATA']['BOOLEAN_FEATS'] if f in client_df.columns]
num_feats = [f for f in cfg['DATA']['NUMERICAL_FEATS'] if f in client_df.columns]
feats = cat_feats + bool_feats + num_feats
n_feats = len(feats)
n_rows = math.floor(math.sqrt(n_feats))
n_cols = math.ceil(math.sqrt(n_feats))
fig, axes = plt.subplots(n_rows, n_cols)
idx = 0
for i in range(n_rows):
for j in range(n_cols):
if feats[idx] in num_feats:
sns.kdeplot(data=client_df, x=feats[idx], palette="Set2", ax=axes[i, j])
mean = client_df[feats[idx]].mean()
median = client_df[feats[idx]].median()
std = client_df[feats[idx]].std()
axes[i, j].axvline(mean, color='r', linestyle='-', linewidth=0.8, label='mean=' + '{:.1e}'.format(mean))
axes[i, j].axvline(median, color='g', linestyle='-', linewidth=0.8, label='median=' + '{:.1e}'.format(median))
axes[i, j].axvline(mean - std, color='r', linestyle='--', linewidth=0.8, label='+/- std' + '{:.1e}'.format(std))
axes[i, j].axvline(mean + std, color='r', linestyle='--', linewidth=0.8)
axes[i, j].legend(fontsize=8)
axes[i, j].set_title(feats[idx], fontsize=14)
else:
mode = client_df[feats[idx]].mode()
sns.countplot(data=client_df, x=feats[idx], ax=axes[i, j], palette='Set3')
axes[i, j].set_xticklabels(axes[i, j].get_xticklabels(), rotation=45, ha='right')
axes[i, j].text(0.6, 0.9, 'mode=' + str(mode[0]), transform=axes[i, j].transAxes, fontsize=8)
axes[i, j].set_title(feats[idx], fontsize=14)
if idx < n_feats - 1:
idx += 1
else:
break
fig.suptitle('General statistics for client data', fontsize=20, y=0.99)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
if save_fig:
plt.savefig(cfg['PATHS']['DATA_VISUALIZATIONS'] + 'client_general_visualization' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
return
def produce_data_visualizations(preprocessed_path=None, client_path=None):
'''
Produces a series of data visualizations for client data and preprocessed consumption data.
:param preprocessed_path: Path of preprocessed data CSV
:param client_path: Path of client data CSV
'''
if preprocessed_path is None:
preprocessed_df = pd.read_csv(cfg['PATHS']['PREPROCESSED_DATA'])
if client_path is None:
client_df = pd.read_csv(cfg['PATHS']['CLIENT_DATA'])
plt.clf()
correlation_matrix(preprocessed_df, save_fig=True)
plt.clf()
client_box_plot(client_df, save_fig=True)
plt.clf()
visualize_client_dataset_stats(client_df, save_fig=True)
return
def plot_bayesian_hparam_opt(model_name, hparam_names, search_results, save_fig=False):
'''
Plot all 2D hyperparameter comparisons from the logs of a Bayesian hyperparameter optimization.
:param model_name: Name of the model
:param hparam_names: List of hyperparameter identifiers
:param search_results: The object resulting from a Bayesian hyperparameter optimization with the skopt package
:param save_fig:
:return:
'''
# Abbreviate hyperparameters to improve plot readability
axis_labels = hparam_names.copy()
for i in range(len(axis_labels)):
if len(axis_labels[i]) >= 12:
axis_labels[i] = axis_labels[i][:4] + '...' + axis_labels[i][-4:]
# Plot
axes = plot_objective(result=search_results, dimensions=axis_labels)
# Create a title
fig = plt.gcf()
fig.suptitle('Bayesian Hyperparameter\n Optimization for ' + model_name, fontsize=15, x=0.65, y=0.97)
# Indicate which hyperparameter abbreviations correspond with which hyperparameter
hparam_abbrs_text = ''
for i in range(len(hparam_names)):
hparam_abbrs_text += axis_labels[i] + ':\n'
fig.text(0.50, 0.8, hparam_abbrs_text, fontsize=10, style='italic', color='mediumblue')
hparam_names_text = ''
for i in range(len(hparam_names)):
hparam_names_text += hparam_names[i] + '\n'
fig.text(0.65, 0.8, hparam_names_text, fontsize=10, color='darkblue')
fig.tight_layout()
if save_fig:
plt.savefig(cfg['PATHS']['EXPERIMENT_VISUALIZATIONS'] + 'Bayesian_opt_' + model_name + '_' +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.png')
def plot_prophet_components(prophet_model, forecast, save_dir=None, train_date=''):
'''
Plot Prophet model's forecast components. This plot visualizes trend, yearly seasonality, weekly seasonality,
holiday effects
:param prophet_model: Fitted Prophet model
:param forecast: A forecast from a Prophet model
:param train_date: string representing date model was trained
'''
fig = prophet_model.plot_components(forecast)
fig.suptitle('Prophet Model Components', fontsize=15)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
save_dir = cfg['PATHS']['INTERPRETABILITY_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + 'Prophet_components' +
train_date + '.png')
return
def plot_prophet_forecast(prophet_model, prophet_pred, save_dir=None, train_date=''):
'''
Plot Prophet model's forecast using the Prophet API, including changepoints
:param prophet_model: Fitted Prophet model
:param prophet_pred: A forecast from a Prophet model (result of a prophet.predict() call)
'''
fig = prophet_model.plot(prophet_pred)
ax = fig.gca()
add_changepoints_to_plot(ax, prophet_model, prophet_pred)
ax = fig.gca()
ax.set_xlabel('Date')
ax.set_ylabel('Consumption [m^3]')
fig.suptitle('Prophet Model Forecast', fontsize=15)
fig.tight_layout(pad=2, rect=(0, 0, 1, 0.95))
save_dir = cfg['PATHS']['FORECAST_VISUALIZATIONS'] if save_dir is None else save_dir
plt.savefig(save_dir + 'Prophet_API_forecast' +
train_date + '.png')
return | [
"fbprophet.plot.add_changepoints_to_plot",
"pandas.read_csv",
"math.sqrt",
"seaborn.violinplot",
"pandas.notnull",
"skopt.plots.plot_objective",
"numpy.round",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"seaborn.diverging_palette",
"seaborn.heatmap",
"numpy.ones_like",
"pandas.isn... | [((1068, 1081), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (1079, 1081), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2332, 2349), True, 'import matplotlib.pyplot as plt\n'), ((5033, 5077), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(230)', '(20)'], {'as_cmap': '(True)'}), '(230, 20, as_cmap=True)\n', (5054, 5077), True, 'import seaborn as sns\n'), ((5128, 5142), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5140, 5142), True, 'import matplotlib.pyplot as plt\n'), ((5147, 5269), 'seaborn.heatmap', 'sns.heatmap', (['corr_mat'], {'mask': 'mask', 'cmap': 'cmap', 'vmax': '(0.3)', 'center': '(0)', 'square': '(True)', 'linewidths': '(0.5)', 'cbar_kws': "{'shrink': 0.5}"}), "(corr_mat, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True,\n linewidths=0.5, cbar_kws={'shrink': 0.5})\n", (5158, 5269), True, 'import seaborn as sns\n'), ((5392, 5417), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.2)'}), '(pad=1.2)\n', (5408, 5417), True, 'import matplotlib.pyplot as plt\n'), ((6175, 6203), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {}), '(n_rows, n_cols)\n', (6187, 6203), True, 'import matplotlib.pyplot as plt\n'), ((7356, 7370), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7368, 7370), True, 'import matplotlib.pyplot as plt\n'), ((7375, 7514), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': "client_df['CONS_0m_AGO']", 'y': "client_df['RATE_CLASS']", 'palette': '"""Set2"""', 'scale': '"""area"""', 'orient': '"""h"""', 'linewidth': '(0.2)', 'ax': 'axes'}), "(x=client_df['CONS_0m_AGO'], y=client_df['RATE_CLASS'],\n palette='Set2', scale='area', orient='h', linewidth=0.2, ax=axes)\n", (7389, 7514), True, 'import seaborn as sns\n'), ((8811, 8839), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {}), '(n_rows, n_cols)\n', (8823, 8839), True, 'import matplotlib.pyplot as plt\n'), ((11126, 11135), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11133, 11135), True, 'import matplotlib.pyplot as plt\n'), ((11195, 11204), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11202, 11204), True, 'import matplotlib.pyplot as plt\n'), ((11255, 11264), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11262, 11264), True, 'import matplotlib.pyplot as plt\n'), ((12070, 12131), 'skopt.plots.plot_objective', 'plot_objective', ([], {'result': 'search_results', 'dimensions': 'axis_labels'}), '(result=search_results, dimensions=axis_labels)\n', (12084, 12131), False, 'from skopt.plots import plot_objective\n'), ((12164, 12173), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12171, 12173), True, 'import matplotlib.pyplot as plt\n'), ((13654, 13720), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'Prophet_components' + train_date + '.png')"], {}), "(save_dir + 'Prophet_components' + train_date + '.png')\n", (13665, 13720), True, 'import matplotlib.pyplot as plt\n'), ((14140, 14197), 'fbprophet.plot.add_changepoints_to_plot', 'add_changepoints_to_plot', (['ax', 'prophet_model', 'prophet_pred'], {}), '(ax, prophet_model, prophet_pred)\n', (14164, 14197), False, 'from fbprophet.plot import add_changepoints_to_plot\n'), ((14481, 14549), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'Prophet_API_forecast' + train_date + '.png')"], {}), "(save_dir + 'Prophet_API_forecast' + train_date + '.png')\n", (14492, 14549), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1775), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_path'], {}), '(file_path)\n', (1764, 1775), True, 'import matplotlib.pyplot as plt\n'), ((4088, 4123), 'numpy.round', 'np.round', (["metrics['residuals_mean']"], {}), "(metrics['residuals_mean'])\n", (4096, 4123), True, 'import numpy as np\n'), ((4136, 4170), 'numpy.round', 'np.round', (["metrics['residuals_std']"], {}), "(metrics['residuals_std'])\n", (4144, 4170), True, 'import numpy as np\n'), ((4206, 4237), 'numpy.round', 'np.round', (["metrics['error_mean']"], {}), "(metrics['error_mean'])\n", (4214, 4237), True, 'import numpy as np\n'), ((4250, 4280), 'numpy.round', 'np.round', (["metrics['error_std']"], {}), "(metrics['error_std'])\n", (4258, 4280), True, 'import numpy as np\n'), ((4303, 4327), 'numpy.round', 'np.round', (["metrics['MAE']"], {}), "(metrics['MAE'])\n", (4311, 4327), True, 'import numpy as np\n'), ((4341, 4372), 'numpy.round', 'np.round', (["(metrics['MAPE'] * 100)"], {}), "(metrics['MAPE'] * 100)\n", (4349, 4372), True, 'import numpy as np\n'), ((4387, 4411), 'numpy.round', 'np.round', (["metrics['MSE']"], {}), "(metrics['MSE'])\n", (4395, 4411), True, 'import numpy as np\n'), ((4435, 4460), 'numpy.round', 'np.round', (["metrics['RMSE']"], {}), "(metrics['RMSE'])\n", (4443, 4460), True, 'import numpy as np\n'), ((4581, 4654), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + '/' + model_name + '_eval_' + train_date + '.png')"], {}), "(save_dir + '/' + model_name + '_eval_' + train_date + '.png')\n", (4592, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4940, 4974), 'numpy.ones_like', 'np.ones_like', (['corr_mat'], {'dtype': 'bool'}), '(corr_mat, dtype=bool)\n', (4952, 4974), True, 'import numpy as np\n'), ((8732, 8750), 'math.sqrt', 'math.sqrt', (['n_feats'], {}), '(n_feats)\n', (8741, 8750), False, 'import math\n'), ((8775, 8793), 'math.sqrt', 'math.sqrt', (['n_feats'], {}), '(n_feats)\n', (8784, 8793), False, 'import math\n'), ((10986, 11032), 'pandas.read_csv', 'pd.read_csv', (["cfg['PATHS']['PREPROCESSED_DATA']"], {}), "(cfg['PATHS']['PREPROCESSED_DATA'])\n", (10997, 11032), True, 'import pandas as pd\n'), ((11081, 11121), 'pandas.read_csv', 'pd.read_csv', (["cfg['PATHS']['CLIENT_DATA']"], {}), "(cfg['PATHS']['CLIENT_DATA'])\n", (11092, 11121), True, 'import pandas as pd\n'), ((361, 372), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (370, 372), False, 'import os\n'), ((6289, 6389), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'client_df[feats[idx]]', 'y': "client_df['CONS_0m_AGO']", 'palette': '"""Set2"""', 'ax': 'axes[i, j]'}), "(x=client_df[feats[idx]], y=client_df['CONS_0m_AGO'], palette=\n 'Set2', ax=axes[i, j])\n", (6300, 6389), True, 'import seaborn as sns\n'), ((8969, 9041), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'data': 'client_df', 'x': 'feats[idx]', 'palette': '"""Set2"""', 'ax': 'axes[i, j]'}), "(data=client_df, x=feats[idx], palette='Set2', ax=axes[i, j])\n", (8980, 9041), True, 'import seaborn as sns\n'), ((9860, 9934), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'client_df', 'x': 'feats[idx]', 'ax': 'axes[i, j]', 'palette': '"""Set3"""'}), "(data=client_df, x=feats[idx], ax=axes[i, j], palette='Set3')\n", (9873, 9934), True, 'import seaborn as sns\n'), ((2595, 2627), 'pandas.notnull', 'pd.notnull', (["forecast_df['model']"], {}), "(forecast_df['model'])\n", (2605, 2627), True, 'import pandas as pd\n'), ((1686, 1709), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1707, 1709), False, 'import datetime\n'), ((2919, 2950), 'pandas.isnull', 'pd.isnull', (["forecast_df['model']"], {}), "(forecast_df['model'])\n", (2928, 2950), True, 'import pandas as pd\n'), ((3178, 3209), 'pandas.isnull', 'pd.isnull', (["forecast_df['model']"], {}), "(forecast_df['model'])\n", (3187, 3209), True, 'import pandas as pd\n'), ((5536, 5559), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5557, 5559), False, 'import datetime\n'), ((6958, 6981), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6979, 6981), False, 'import datetime\n'), ((8004, 8027), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8025, 8027), False, 'import datetime\n'), ((10558, 10581), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10579, 10581), False, 'import datetime\n'), ((12932, 12955), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12953, 12955), False, 'import datetime\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, line-too-long
"""Depthwise convolution in python"""
import numpy as np
from scipy import signal
def depthwise_conv2d_python_nchw(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHW layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
filter_np : numpy.ndarray
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = input_np.shape
_, channel_multiplier, filter_height, filter_width = filter_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
# calculate output shape
if padding == "VALID":
out_channel = in_channel * channel_multiplier
out_height = (in_height - filter_height) // stride_h + 1
out_width = (in_width - filter_width) // stride_w + 1
output_np = np.zeros((batch, out_channel, out_height, out_width))
for i in range(batch):
for j in range(out_channel):
output_np[i, j, :, :] = signal.convolve2d(
input_np[i, j // channel_multiplier, :, :],
np.rot90(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], 2),
mode="valid",
)[
0 : (in_height - filter_height + 1) : stride_h,
0 : (in_width - filter_width + 1) : stride_w,
]
elif padding == "SAME":
out_channel = in_channel * channel_multiplier
out_height = np.int(np.ceil(float(in_height) / float(stride_h)))
out_width = np.int(np.ceil(float(in_width) / float(stride_w)))
output_np = np.zeros((batch, out_channel, out_height, out_width))
pad_along_height = np.int(
np.max((out_height - 1) * stride_h + filter_height - in_height, 0)
)
pad_along_width = np.int(np.max((out_width - 1) * stride_w + filter_width - in_width, 0))
pad_top_tvm = np.int(np.ceil(float(pad_along_height) / 2))
pad_left_tvm = np.int(np.ceil(float(pad_along_width) / 2))
pad_top_scipy = np.int(np.ceil(float(filter_height - 1) / 2))
pad_left_scipy = np.int(np.ceil(float(filter_width - 1) / 2))
index_h = pad_top_scipy - pad_top_tvm
index_w = pad_left_scipy - pad_left_tvm
for i in range(batch):
for j in range(out_channel):
output_np[i, j, :, :] = signal.convolve2d(
input_np[i, j // channel_multiplier, :, :],
np.rot90(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], 2),
mode="same",
)[index_h:in_height:stride_h, index_w:in_width:stride_w]
return output_np
def depthwise_conv2d_python_nhwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in nchw layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
filter_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
batch, in_height, in_width, in_channel = input_np.shape
filter_height, filter_width, _, channel_multiplier = filter_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
# calculate output shape
if padding == "VALID":
out_channel = in_channel * channel_multiplier
out_height = (in_height - filter_height) // stride_h + 1
out_width = (in_width - filter_width) // stride_w + 1
output_np = np.zeros((batch, out_height, out_width, out_channel))
for i in range(batch):
for j in range(out_channel):
output_np[i, :, :, j] = signal.convolve2d(
input_np[i, :, :, j // channel_multiplier],
np.rot90(filter_np[:, :, j // channel_multiplier, j % channel_multiplier], 2),
mode="valid",
)[
0 : (in_height - filter_height + 1) : stride_h,
0 : (in_width - filter_width + 1) : stride_w,
]
if padding == "SAME":
out_channel = in_channel * channel_multiplier
out_height = np.int(np.ceil(float(in_height) / float(stride_h)))
out_width = np.int(np.ceil(float(in_width) / float(stride_w)))
output_np = np.zeros((batch, out_height, out_width, out_channel))
pad_along_height = np.int(
np.max((out_height - 1) * stride_h + filter_height - in_height, 0)
)
pad_along_width = np.int(np.max((out_width - 1) * stride_w + filter_width - in_width, 0))
pad_top_tvm = np.int(np.ceil(float(pad_along_height) / 2))
pad_left_tvm = np.int(np.ceil(float(pad_along_width) / 2))
pad_top_scipy = np.int(np.ceil(float(filter_height - 1) / 2))
pad_left_scipy = np.int(np.ceil(float(filter_width - 1) / 2))
index_h = pad_top_scipy - pad_top_tvm
index_w = pad_left_scipy - pad_left_tvm
for i in range(batch):
for j in range(out_channel):
output_np[i, :, :, j] = signal.convolve2d(
input_np[i, :, :, j // channel_multiplier],
np.rot90(filter_np[:, :, j // channel_multiplier, j % channel_multiplier], 2),
mode="same",
)[index_h:in_height:stride_h, index_w:in_width:stride_w]
return output_np
| [
"numpy.zeros",
"numpy.rot90",
"numpy.max"
] | [((2051, 2104), 'numpy.zeros', 'np.zeros', (['(batch, out_channel, out_height, out_width)'], {}), '((batch, out_channel, out_height, out_width))\n', (2059, 2104), True, 'import numpy as np\n'), ((5037, 5090), 'numpy.zeros', 'np.zeros', (['(batch, out_height, out_width, out_channel)'], {}), '((batch, out_height, out_width, out_channel))\n', (5045, 5090), True, 'import numpy as np\n'), ((5834, 5887), 'numpy.zeros', 'np.zeros', (['(batch, out_height, out_width, out_channel)'], {}), '((batch, out_height, out_width, out_channel))\n', (5842, 5887), True, 'import numpy as np\n'), ((2850, 2903), 'numpy.zeros', 'np.zeros', (['(batch, out_channel, out_height, out_width)'], {}), '((batch, out_channel, out_height, out_width))\n', (2858, 2903), True, 'import numpy as np\n'), ((5935, 6001), 'numpy.max', 'np.max', (['((out_height - 1) * stride_h + filter_height - in_height)', '(0)'], {}), '((out_height - 1) * stride_h + filter_height - in_height, 0)\n', (5941, 6001), True, 'import numpy as np\n'), ((6045, 6108), 'numpy.max', 'np.max', (['((out_width - 1) * stride_w + filter_width - in_width)', '(0)'], {}), '((out_width - 1) * stride_w + filter_width - in_width, 0)\n', (6051, 6108), True, 'import numpy as np\n'), ((2951, 3017), 'numpy.max', 'np.max', (['((out_height - 1) * stride_h + filter_height - in_height)', '(0)'], {}), '((out_height - 1) * stride_h + filter_height - in_height, 0)\n', (2957, 3017), True, 'import numpy as np\n'), ((3061, 3124), 'numpy.max', 'np.max', (['((out_width - 1) * stride_w + filter_width - in_width)', '(0)'], {}), '((out_width - 1) * stride_w + filter_width - in_width, 0)\n', (3067, 3124), True, 'import numpy as np\n'), ((2320, 2397), 'numpy.rot90', 'np.rot90', (['filter_np[j // channel_multiplier, j % channel_multiplier, :, :]', '(2)'], {}), '(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], 2)\n', (2328, 2397), True, 'import numpy as np\n'), ((5306, 5383), 'numpy.rot90', 'np.rot90', (['filter_np[:, :, j // channel_multiplier, j % channel_multiplier]', '(2)'], {}), '(filter_np[:, :, j // channel_multiplier, j % channel_multiplier], 2)\n', (5314, 5383), True, 'import numpy as np\n'), ((6693, 6770), 'numpy.rot90', 'np.rot90', (['filter_np[:, :, j // channel_multiplier, j % channel_multiplier]', '(2)'], {}), '(filter_np[:, :, j // channel_multiplier, j % channel_multiplier], 2)\n', (6701, 6770), True, 'import numpy as np\n'), ((3709, 3786), 'numpy.rot90', 'np.rot90', (['filter_np[j // channel_multiplier, j % channel_multiplier, :, :]', '(2)'], {}), '(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], 2)\n', (3717, 3786), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to implement the network in Google Colab for access to hardware
accelerator i.e. GPUs. With GPUs, the training is accelerated manifold.
@author: rpm1412
"""
#%% Cell 1: Import libraries
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
#Import libraries for the Neural Network Section
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from google.colab import drive
from scipy.signal import hilbert
#%% Cell 2: Setting variables
start_epoch = 0
epochs = 5
stop_epoch = start_epoch + epochs
train_scans = 600 #Number of scanning data frames
# Image/Time of Flight data dimensions. Modify according to need
rows = 374
cols = 128
channels = 128
'''
# It is preferable to name the scan data as follows:
# Time of flight data : Dimension : Rows x Cols x Channels
# MVDR Data prior to hilbert transform & log compression : Dimension : Rows x Cols
#Name them with numbers as indexes. An example is. tofc_1,tofc_2 and mvdr_1,mvdr_2
#Naming them with numerical index gives an advantage to pick them for training.
'''
#%% Cell 3: Making the network
class Antirectifier(layers.Layer):
def compute_output_shape(self, input_shape):
shape = list(input_shape)
assert len(shape) == 3 # make sure it is a 3D tensors
shape[-1] *= 2
return tuple(shape)
def call(self, inputs):
inputs -= K.mean(inputs, axis=-1, keepdims=True)
inputs = K.l2_normalize(inputs, axis=-1)
pos = K.relu(inputs)
neg = K.relu(-inputs)
return K.concatenate([pos, neg], axis=-1)
#CNN Model
inputs = Input(shape=(rows,cols,channels)) #Input layer
# Normalizing inputs using channel as axis
inputs_norm = Lambda(lambda x : K.l2_normalize(x,axis=-1))(inputs)
output_1 = Conv2D(32,(3,3), padding='same',kernel_initializer='glorot_normal')(inputs_norm)
act_1 = Antirectifier()(output_1)
B1 = BatchNormalization()(act_1)
output_2 = Conv2D(32,(3,3), padding='same',kernel_initializer='glorot_normal')(B1)
act_2 = Antirectifier()(output_2)
B2 = BatchNormalization()(act_2)
output_3 = Conv2D(64,(3,3), padding='same',kernel_initializer='glorot_normal')(B2)
act_3 = Antirectifier()(output_3)
B3 = BatchNormalization()(act_3)
output_4 = Conv2D(64,(3,3), padding='same',kernel_initializer='glorot_normal')(B3)
act_4 = Antirectifier()(output_4)
B4 = BatchNormalization()(act_4)
# Adaptive weights
output_5 = Conv2D(channels,(3,3), activation = "softmax", padding='same')(B4)
#Beamforming
beamform = Multiply()([inputs,output_5])
beamform_sum = Lambda(lambda x: K.sum(x, axis=-1))(beamform)
output_fig = Reshape((rows,cols))(beamform_sum)
model = Model(inputs=inputs, outputs=output_fig)
# Print a model summary
model.summary()
#%% Cell 4: Compiling the model
def msle(y_true, y_pred):
y_true = K.cast(y_true, y_pred.dtype)
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
first_log = K.log(K.clip(K.abs(y_pred), K.epsilon(), None) )
second_log = K.log(K.clip(K.abs(y_true), K.epsilon(), None) )
return K.mean(K.square(first_log - second_log), axis=-1)
def mse(y_true, y_pred):
y_true = K.cast(y_true, y_pred.dtype)
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
return K.mean(K.square(y_true - y_pred), axis=-1)
learning_rate = 1e-4
adam_lr = keras.optimizers.Adam(lr = learning_rate)
model.compile(optimizer = adam_lr, loss= mse)
#%% Cell 5: Train the model
# Loading Dataset and Training
drive.mount('/content/gdrive')
'''
# Best to save data with the number 1 to 600 as it will be easy to pick them
#to train as mentioned above in cell 2.
'''
# Produces indexed for picking data to train
list_train = np.add(1,np.arange(train_scans))
X_final = np.zeros((30,rows,cols,channels)) # Fixing batch size to be 30
y_final = np.zeros((30,rows,cols)) # Fixing batch size to be 30
for epoch in range(start_epoch, stop_epoch): # Run through all epochs
np.random.shuffle(list_train) # Randomize the entry of data to train
for batch in range(0,600,30):
# To load one batch at a time to prevent memory issues
for scan in range(30):
j=list_train[batch+scan]
# Load your data here one by one by using j as index.
# As mentioned in cell 2, eg. 'tofc_'+str(j) or 'mvdr_'+str(j)
#X= load_ToFC_data_using_j_as_index #dimensions:(rows,cols,channels)
#y= load_mvdr_data_using_j_as_index #dimensions:(rows,cols)
X_final[scan,:,:,:] = X #storing loaded data as a batch
y_final[scan,:,:] = y #storing loaded data as a batch
model.fit(X_final, y_final, batch_size = 30, epochs=epoch+1, initial_epoch=epoch, shuffle=True, verbose = 1) # starts training
file_name_w = '/content/gdrive/My Drive/<your_path>/model.h5' #Enter your path
model.save(file_name_w)
#%% Cell 6: Loading model from Google drive for prediction during validation
#Pre loading saved model
drive.mount('/content/gdrive')
#Please enter your filepath to load saved model file.
file_name = '/content/gdrive/My Drive/<your_path>/model.h5'
model.load_weights(file_name)
#%% Cell 7: Predicting on validation data
#X = load_validation_time_of_flight_data #dimensions:(rows,cols,channels)
Xf[0,:,:,:] = X # Converting to suit entry into the network
#y = load_validation_mvdr_data #dimensions:(rows,cols)
test = model.predict(Xf, batch_size = 1, verbose = 1)
test_image = np.reshape(test[0],(rows,cols))
validation_image = y
#Hilbert transform for B mode imaging and Log compression
test_image = 20*np.log10(np.abs(hilbert(test_image)))
MVDR_image = 20*np.log10(np.abs(hilbert(validation_image)))
#Plot the images
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10,10))
ax1.set_title("CNN")
im1 = ax1.imshow(test_image, cmap='gray')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im1, cax = cax1)
ax2.set_title("MVDR")
im2 = ax2.imshow(MVDR_image, cmap='gray')
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im2, cax= cax2)
| [
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.layers.Multiply",
"google.colab.drive.mount",
"tensorflow.keras.backend.flatten",
"tensorflow.keras.layers.BatchNormalization",
"numpy.arange",
"tensorflow.keras.layers.Input",
"numpy.reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.kera... | [((1858, 1893), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(rows, cols, channels)'}), '(shape=(rows, cols, channels))\n', (1863, 1893), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2899, 2939), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'output_fig'}), '(inputs=inputs, outputs=output_fig)\n', (2904, 2939), False, 'from tensorflow.keras.models import Model\n'), ((3533, 3572), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (3554, 3572), False, 'from tensorflow import keras\n'), ((3682, 3712), 'google.colab.drive.mount', 'drive.mount', (['"""/content/gdrive"""'], {}), "('/content/gdrive')\n", (3693, 3712), False, 'from google.colab import drive\n'), ((3942, 3978), 'numpy.zeros', 'np.zeros', (['(30, rows, cols, channels)'], {}), '((30, rows, cols, channels))\n', (3950, 3978), True, 'import numpy as np\n'), ((4015, 4041), 'numpy.zeros', 'np.zeros', (['(30, rows, cols)'], {}), '((30, rows, cols))\n', (4023, 4041), True, 'import numpy as np\n'), ((5106, 5136), 'google.colab.drive.mount', 'drive.mount', (['"""/content/gdrive"""'], {}), "('/content/gdrive')\n", (5117, 5136), False, 'from google.colab import drive\n'), ((5590, 5623), 'numpy.reshape', 'np.reshape', (['test[0]', '(rows, cols)'], {}), '(test[0], (rows, cols))\n', (5600, 5623), True, 'import numpy as np\n'), ((5855, 5891), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 10)'}), '(1, 2, figsize=(10, 10))\n', (5867, 5891), True, 'import matplotlib.pyplot as plt\n'), ((5962, 5986), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax1'], {}), '(ax1)\n', (5981, 5986), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((6149, 6173), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax2'], {}), '(ax2)\n', (6168, 6173), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2027, 2097), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': '"""glorot_normal"""'}), "(32, (3, 3), padding='same', kernel_initializer='glorot_normal')\n", (2033, 2097), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2147, 2167), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2165, 2167), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2187, 2257), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': '"""glorot_normal"""'}), "(32, (3, 3), padding='same', kernel_initializer='glorot_normal')\n", (2193, 2257), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2298, 2318), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2316, 2318), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2338, 2408), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': '"""glorot_normal"""'}), "(64, (3, 3), padding='same', kernel_initializer='glorot_normal')\n", (2344, 2408), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2449, 2469), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2467, 2469), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2489, 2559), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': '"""glorot_normal"""'}), "(64, (3, 3), padding='same', kernel_initializer='glorot_normal')\n", (2495, 2559), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2600, 2620), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2618, 2620), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2659, 2721), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['channels', '(3, 3)'], {'activation': '"""softmax"""', 'padding': '"""same"""'}), "(channels, (3, 3), activation='softmax', padding='same')\n", (2665, 2721), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2751, 2761), 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (2759, 2761), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((2856, 2877), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(rows, cols)'], {}), '((rows, cols))\n', (2863, 2877), False, 'from tensorflow.keras.layers import Input, Conv2D, Reshape, Multiply, Lambda, BatchNormalization\n'), ((3052, 3080), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', 'y_pred.dtype'], {}), '(y_true, y_pred.dtype)\n', (3058, 3080), True, 'import tensorflow.keras.backend as K\n'), ((3092, 3109), 'tensorflow.keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (3101, 3109), True, 'import tensorflow.keras.backend as K\n'), ((3121, 3138), 'tensorflow.keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (3130, 3138), True, 'import tensorflow.keras.backend as K\n'), ((3362, 3390), 'tensorflow.keras.backend.cast', 'K.cast', (['y_true', 'y_pred.dtype'], {}), '(y_true, y_pred.dtype)\n', (3368, 3390), True, 'import tensorflow.keras.backend as K\n'), ((3402, 3419), 'tensorflow.keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (3411, 3419), True, 'import tensorflow.keras.backend as K\n'), ((3431, 3448), 'tensorflow.keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (3440, 3448), True, 'import tensorflow.keras.backend as K\n'), ((3907, 3929), 'numpy.arange', 'np.arange', (['train_scans'], {}), '(train_scans)\n', (3916, 3929), True, 'import numpy as np\n'), ((4142, 4171), 'numpy.random.shuffle', 'np.random.shuffle', (['list_train'], {}), '(list_train)\n', (4159, 4171), True, 'import numpy as np\n'), ((1639, 1677), 'tensorflow.keras.backend.mean', 'K.mean', (['inputs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(inputs, axis=-1, keepdims=True)\n', (1645, 1677), True, 'import tensorflow.keras.backend as K\n'), ((1695, 1726), 'tensorflow.keras.backend.l2_normalize', 'K.l2_normalize', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (1709, 1726), True, 'import tensorflow.keras.backend as K\n'), ((1741, 1755), 'tensorflow.keras.backend.relu', 'K.relu', (['inputs'], {}), '(inputs)\n', (1747, 1755), True, 'import tensorflow.keras.backend as K\n'), ((1770, 1785), 'tensorflow.keras.backend.relu', 'K.relu', (['(-inputs)'], {}), '(-inputs)\n', (1776, 1785), True, 'import tensorflow.keras.backend as K\n'), ((1801, 1835), 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[pos, neg]'], {'axis': '(-1)'}), '([pos, neg], axis=-1)\n', (1814, 1835), True, 'import tensorflow.keras.backend as K\n'), ((3282, 3314), 'tensorflow.keras.backend.square', 'K.square', (['(first_log - second_log)'], {}), '(first_log - second_log)\n', (3290, 3314), True, 'import tensorflow.keras.backend as K\n'), ((3465, 3490), 'tensorflow.keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (3473, 3490), True, 'import tensorflow.keras.backend as K\n'), ((1980, 2006), 'tensorflow.keras.backend.l2_normalize', 'K.l2_normalize', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1994, 2006), True, 'import tensorflow.keras.backend as K\n'), ((2813, 2830), 'tensorflow.keras.backend.sum', 'K.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (2818, 2830), True, 'import tensorflow.keras.backend as K\n'), ((3166, 3179), 'tensorflow.keras.backend.abs', 'K.abs', (['y_pred'], {}), '(y_pred)\n', (3171, 3179), True, 'import tensorflow.keras.backend as K\n'), ((3181, 3192), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3190, 3192), True, 'import tensorflow.keras.backend as K\n'), ((3230, 3243), 'tensorflow.keras.backend.abs', 'K.abs', (['y_true'], {}), '(y_true)\n', (3235, 3243), True, 'import tensorflow.keras.backend as K\n'), ((3245, 3256), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3254, 3256), True, 'import tensorflow.keras.backend as K\n'), ((5736, 5755), 'scipy.signal.hilbert', 'hilbert', (['test_image'], {}), '(test_image)\n', (5743, 5755), False, 'from scipy.signal import hilbert\n'), ((5791, 5816), 'scipy.signal.hilbert', 'hilbert', (['validation_image'], {}), '(validation_image)\n', (5798, 5816), False, 'from scipy.signal import hilbert\n')] |
'''
Various types of "assist", i.e. different methods for shared control
between neural control and machine control. Only applies in cases where
some knowledge of the task goals is available.
'''
import numpy as np
from riglib.stereo_opengl import ik
from riglib.bmi import feedback_controllers
import pickle
from utils.angle_utils import *
from utils.constants import *
class Assister(object):
'''
Parent class for various methods of assistive BMI. Children of this class
can compute an "optimal" input to the system, which is mixed in with the input
derived from the subject's neural input. The parent exists primarily for
interface standardization and type-checking.
'''
def calc_assisted_BMI_state(self, current_state, target_state, assist_level, mode=None, **kwargs):
'''
Main assist calculation function
Parameters
----------
current_state: np.ndarray of shape (n_states, 1)
Vector representing the current state of the prosthesis
target_state: np.ndarray of shape (n_states, 1)
Vector representing the target state of the prosthesis, i.e. the optimal state for the prosthesis to be in
assist_level: float
Number indicating the level of the assist. This can in general have arbitrary units but most assisters
will have this be a number in the range (0, 1) where 0 is no assist and 1 is full assist
mode: hashable type, optional, default=None
Indicator of which mode of the assistive controller to use. When applied, this 'mode' is used as a dictionary key and must be hashable
kwargs: additional keyword arguments
These are ignored
Returns
-------
'''
pass
def __call__(self, *args, **kwargs):
'''
Wrapper for self.calc_assisted_BMI_state
'''
return self.calc_assisted_BMI_state(*args, **kwargs)
class FeedbackControllerAssist(Assister):
'''
Assister where the machine control is an LQR controller, possibly with different 'modes' depending on the state of the task
'''
def __init__(self, fb_ctrl, style='additive'):
'''
Parameters
----------
fb_ctrl : feedback_controllers.FeedbackController instance
TODO
Returns
-------
FeedbackControllerAssist instance
'''
self.fb_ctrl = fb_ctrl
self.style = style
assert self.style in ['additive', 'mixing', 'additive_cov']
def calc_assisted_BMI_state(self, current_state, target_state, assist_level, mode=None, **kwargs):
'''
See docs for Assister.calc_assisted_BMI_state
'''
if self.style == 'additive':
Bu = assist_level * self.fb_ctrl(current_state, target_state, mode=mode)
return dict(Bu=Bu, assist_level=0)
elif self.style == 'mixing':
x_assist = self.fb_ctrl.calc_next_state(current_state, target_state, mode=mode)
return dict(x_assist=x_assist, assist_level=assist_level)
elif self.style == 'additive_cov':
F = self.get_F(assist_level)
return dict(F=F, x_target=target_state)
class FeedbackControllerAssist_StateSpecAssistLevels(FeedbackControllerAssist):
'''
Assister where machine controller is LQR controller, but different assist_levels for
different control variables (e.g. X,Y,PSI in ArmAssist vs. Rehand)
'''
def __init__(self, fb_ctrl, style='additive', **kwargs):
super(FeedbackControllerAssist_StateSpecAssistLevels, self).__init__(fb_ctrl, style)
# Currently this assister assumes that plant is IsMore Plant:
self.assist_level_state_ix = dict()
self.assist_level_state_ix[0] = np.array([0, 1, 2, 7, 8, 9]) # ARM ASSIST
self.assist_level_state_ix[1] = np.array([3, 4, 5, 6, 10, 11, 12, 13]) # REHAND
def calc_assisted_BMI_state(self, current_state, target_state, assist_level, mode=None, **kwargs):
if self.style == 'additive':
Bu = self.fb_ctrl(current_state, target_state, mode=mode)
for ia, al in enumerate(assist_level):
Bu[self.assist_level_state_ix[ia]] = al*Bu[self.assist_level_state_ix[ia]]
return dict(Bu=Bu, assist_level=0)
elif self.style == 'mixing':
x_assist = self.fb_ctrl.calc_next_state(current_state, target_state, mode=mode)
return dict(x_assist=x_assist, assist_level=assist_level, assist_level_ix=self.assist_level_state_ix)
class SSMLFCAssister(FeedbackControllerAssist):
'''
An LFC assister where the state-space matrices (A, B) are specified from the Decoder's 'ssm' attribute
'''
def __init__(self, ssm, Q, R, **kwargs):
'''
Constructor for SSMLFCAssister
Parameters
----------
ssm: riglib.bmi.state_space_models.StateSpace instance
The state-space model's A and B matrices represent the system to be controlled
args: positional arguments
These are ignored (none are necessary)
kwargs: keyword arguments
The constructor must be supplied with the 'kin_chain' kwarg, which must have the attribute 'link_lengths'
This is specific to 'KinematicChain' plants.
Returns
-------
SSMLFCAssister instance
'''
if ssm is None:
raise ValueError("SSMLFCAssister requires a state space model!")
A, B, W = ssm.get_ssm_matrices()
self.lqr_controller = feedback_controllers.LQRController(A, B, Q, R)
| [
"numpy.array",
"riglib.bmi.feedback_controllers.LQRController"
] | [((3812, 3840), 'numpy.array', 'np.array', (['[0, 1, 2, 7, 8, 9]'], {}), '([0, 1, 2, 7, 8, 9])\n', (3820, 3840), True, 'import numpy as np\n'), ((3894, 3932), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 10, 11, 12, 13]'], {}), '([3, 4, 5, 6, 10, 11, 12, 13])\n', (3902, 3932), True, 'import numpy as np\n'), ((5627, 5673), 'riglib.bmi.feedback_controllers.LQRController', 'feedback_controllers.LQRController', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (5661, 5673), False, 'from riglib.bmi import feedback_controllers\n')] |
import os
import numpy as np
from typing import List
from paddle.io import Dataset
# The input data bigin with '[CLS]', using '[SEP]' split conversation content(
# Previous part, current part, following part, etc.). If there are multiple
# conversation in split part, using 'INNER_SEP' to further split.
INNER_SEP = '[unused0]'
def get_label_map(label_list):
""" Create label maps """
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
return label_map
class UDCv1(Dataset):
"""
The UDCv1 dataset is using in task Dialogue Response Selection.
The source dataset is UDCv1(Ubuntu Dialogue Corpus v1.0). See detail at
http://dataset.cs.mcgill.ca/ubuntu-corpus-1.0/
"""
MAX_LEN_OF_RESPONSE = 60
LABEL_MAP = get_label_map(['0', '1'])
def __init__(self, data_dir, mode='train'):
super(UDCv1, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt-small')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) < 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row contains at least three parts: label\tconversation1\t.....\tresponse.'
)
continue
label = arr[0]
text_a = arr[1:-1]
text_b = arr[-1]
self.data.append([label, text_a, text_b])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(text_a: List[str],
text_b: str,
tokenizer,
max_seq_length):
tokens_b = tokenizer(text_b)
tokens_b = tokens_b[:min(cls.MAX_LEN_OF_RESPONSE, len(tokens_b))]
tokens_a = []
for text in text_a:
tokens_a.extend(tokenizer(text))
tokens_a.append(INNER_SEP)
tokens_a = tokens_a[:-1]
if len(tokens_a) > max_seq_length - len(tokens_b) - 3:
tokens_a = tokens_a[len(tokens_a) - max_seq_length + len(
tokens_b) + 3:]
tokens, segment_ids = [], []
tokens.extend([tokenizer.cls_token] + tokens_a +
[tokenizer.sep_token])
segment_ids.extend([0] * len(tokens))
tokens.extend(tokens_b + [tokenizer.sep_token])
segment_ids.extend([1] * (len(tokens_b) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, segment_ids
label, text_a, text_b = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, segment_ids = _truncate_and_concat(text_a, text_b, tokenizer,
max_seq_length)
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class DSTC2(Dataset):
"""
The dataset DSTC2 is using in task Dialogue State Tracking.
The source dataset is DSTC2(Dialog State Tracking Challenges 2). See detail at
https://github.com/matthen/dstc
"""
LABEL_MAP = get_label_map([str(i) for i in range(217)])
def __init__(self, data_dir, mode='train'):
super(DSTC2, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
max_turns = 20
for i in range(len(examples)):
multi_turns = examples[max(i - max_turns, 0):i + 1]
new_qa = '\1'.join([example[0] for example in multi_turns])
new_examples.append((new_qa.split('\1'), examples[i][1]))
return new_examples
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains three parts: id\tquestion\1answer\tlabel1 label2 ...'
)
continue
idx = arr[0]
qa = arr[1]
label_list = arr[2].split()
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
self.data.extend(examples)
examples = []
pre_idx = idx
examples.append((qa, label_list))
if examples:
examples = _concat_dialogues(examples)
self.data.extend(examples)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(texts: List[str], tokenizer, max_seq_length):
tokens = []
for text in texts:
tokens.extend(tokenizer(text))
tokens.append(INNER_SEP)
tokens = tokens[:-1]
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, segment_ids
texts, labels = example
input_ids, segment_ids = _truncate_and_concat(texts, tokenizer,
max_seq_length)
labels = [cls.get_label(l) for l in labels]
label = np.zeros(cls.num_classes(), dtype='int64')
for l in labels:
label[l] = 1
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DSF(Dataset):
"""
The dataset ATIS_DSF is using in task Dialogue Slot Filling.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(130)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DSF, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: conversation_content\tlabel1 label2 label3.'
)
continue
text = arr[0]
label_list = arr[1].split()
self.data.append([text, label_list])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
text, labels = example
tokens, label_list = [], []
words = text.split()
assert len(words) == len(labels)
for word, label in zip(words, labels):
piece_words = tokenizer(word)
tokens.extend(piece_words)
label = cls.get_label(label)
label_list.extend([label] * len(piece_words))
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
label_list = label_list[len(tokens) - max_seq_length + 2:]
tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
label_list = [0] + label_list + [0]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array(label_list, dtype='int64')
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DID(Dataset):
"""
The dataset ATIS_ID is using in task Dialogue Intent Detection.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(26)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DID, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: label\tconversation_content.'
)
continue
label = arr[0]
text = arr[1]
self.data.append([label, text])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, text = example
tokens = tokenizer(text)
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array([cls.get_label(label)], dtype='int64')
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def read_da_data(data_dir, mode):
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
for i in range(len(examples)):
label, caller, text = examples[i]
cur_txt = "%s : %s" % (caller, text)
pre_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[max(0, i - 5):i]
]
suf_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[i + 1:min(len(examples), i + 3)]
]
sample = [label, pre_txt, cur_txt, suf_txt]
new_examples.append(sample)
return new_examples
if mode == 'train':
data_path = os.path.join(data_dir, 'train.txt')
elif mode == 'dev':
data_path = os.path.join(data_dir, 'dev.txt')
elif mode == 'test':
data_path = os.path.join(data_dir, 'test.txt')
data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 4:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains four parts: id\tlabel\tcaller\tconversation_content.'
)
continue
idx, label, caller, text = arr
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
data.extend(examples)
examples = []
pre_idx = idx
examples.append((label, caller, text))
if examples:
examples = _concat_dialogues(examples)
data.extend(examples)
return data
def truncate_and_concat(pre_txt: List[str],
cur_txt: str,
suf_txt: List[str],
tokenizer,
max_seq_length,
max_len_of_cur_text):
cur_tokens = tokenizer(cur_txt)
cur_tokens = cur_tokens[:min(max_len_of_cur_text, len(cur_tokens))]
pre_tokens = []
for text in pre_txt:
pre_tokens.extend(tokenizer(text))
pre_tokens.append(INNER_SEP)
pre_tokens = pre_tokens[:-1]
suf_tokens = []
for text in suf_txt:
suf_tokens.extend(tokenizer(text))
suf_tokens.append(INNER_SEP)
suf_tokens = suf_tokens[:-1]
if len(cur_tokens) + len(pre_tokens) + len(suf_tokens) > max_seq_length - 4:
left_num = max_seq_length - 4 - len(cur_tokens)
if len(pre_tokens) > len(suf_tokens):
suf_num = int(left_num / 2)
suf_tokens = suf_tokens[:suf_num]
pre_num = left_num - len(suf_tokens)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
else:
pre_num = int(left_num / 2)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
suf_num = left_num - len(pre_tokens)
suf_tokens = suf_tokens[:suf_num]
tokens, segment_ids = [], []
tokens.extend([tokenizer.cls_token] + pre_tokens + [tokenizer.sep_token])
segment_ids.extend([0] * len(tokens))
tokens.extend(cur_tokens + [tokenizer.sep_token])
segment_ids.extend([1] * (len(cur_tokens) + 1))
if suf_tokens:
tokens.extend(suf_tokens + [tokenizer.sep_token])
segment_ids.extend([0] * (len(suf_tokens) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, segment_ids
class MRDA(Dataset):
"""
The dataset MRDA is using in task Dialogue Act.
The source dataset is MRDA(Meeting Recorder Dialogue Act). See detail at
https://www.aclweb.org/anthology/W04-2319.pdf
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(5)])
def __init__(self, data_dir, mode='train'):
super(MRDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt,
tokenizer, max_seq_length,
cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class SwDA(Dataset):
"""
The dataset SwDA is using in task Dialogue Act.
The source dataset is SwDA(Switchboard Dialog Act). See detail at
http://compprag.christopherpotts.net/swda.html
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(42)])
def __init__(self, data_dir, mode='train'):
super(SwDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt,
tokenizer, max_seq_length,
cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| [
"numpy.array",
"os.path.join"
] | [((10047, 10082), 'numpy.array', 'np.array', (['label_list'], {'dtype': '"""int64"""'}), "(label_list, dtype='int64')\n", (10055, 10082), True, 'import numpy as np\n'), ((13277, 13312), 'os.path.join', 'os.path.join', (['data_dir', '"""train.txt"""'], {}), "(data_dir, 'train.txt')\n", (13289, 13312), False, 'import os\n'), ((1062, 1103), 'os.path.join', 'os.path.join', (['self._data_dir', '"""train.txt"""'], {}), "(self._data_dir, 'train.txt')\n", (1074, 1103), False, 'import os\n'), ((4786, 4827), 'os.path.join', 'os.path.join', (['self._data_dir', '"""train.txt"""'], {}), "(self._data_dir, 'train.txt')\n", (4798, 4827), False, 'import os\n'), ((8083, 8124), 'os.path.join', 'os.path.join', (['self._data_dir', '"""train.txt"""'], {}), "(self._data_dir, 'train.txt')\n", (8095, 8124), False, 'import os\n'), ((10809, 10850), 'os.path.join', 'os.path.join', (['self._data_dir', '"""train.txt"""'], {}), "(self._data_dir, 'train.txt')\n", (10821, 10850), False, 'import os\n'), ((13357, 13390), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.txt"""'], {}), "(data_dir, 'dev.txt')\n", (13369, 13390), False, 'import os\n'), ((1162, 1207), 'os.path.join', 'os.path.join', (['self._data_dir', '"""dev.txt-small"""'], {}), "(self._data_dir, 'dev.txt-small')\n", (1174, 1207), False, 'import os\n'), ((4886, 4925), 'os.path.join', 'os.path.join', (['self._data_dir', '"""dev.txt"""'], {}), "(self._data_dir, 'dev.txt')\n", (4898, 4925), False, 'import os\n'), ((8183, 8222), 'os.path.join', 'os.path.join', (['self._data_dir', '"""dev.txt"""'], {}), "(self._data_dir, 'dev.txt')\n", (8195, 8222), False, 'import os\n'), ((10909, 10948), 'os.path.join', 'os.path.join', (['self._data_dir', '"""dev.txt"""'], {}), "(self._data_dir, 'dev.txt')\n", (10921, 10948), False, 'import os\n'), ((13436, 13470), 'os.path.join', 'os.path.join', (['data_dir', '"""test.txt"""'], {}), "(data_dir, 'test.txt')\n", (13448, 13470), False, 'import os\n'), ((1267, 1307), 'os.path.join', 'os.path.join', (['self._data_dir', '"""test.txt"""'], {}), "(self._data_dir, 'test.txt')\n", (1279, 1307), False, 'import os\n'), ((4985, 5025), 'os.path.join', 'os.path.join', (['self._data_dir', '"""test.txt"""'], {}), "(self._data_dir, 'test.txt')\n", (4997, 5025), False, 'import os\n'), ((8282, 8322), 'os.path.join', 'os.path.join', (['self._data_dir', '"""test.txt"""'], {}), "(self._data_dir, 'test.txt')\n", (8294, 8322), False, 'import os\n'), ((11008, 11048), 'os.path.join', 'os.path.join', (['self._data_dir', '"""test.txt"""'], {}), "(self._data_dir, 'test.txt')\n", (11020, 11048), False, 'import os\n')] |
from IPython.terminal.embed import embed
from numpy.lib.function_base import _angle_dispatcher
import torch
import numpy as np
class AverageValueMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_requires_grad(nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def save_model(path, net, net_d=None):
if net_d is not None:
torch.save({'net_state_dict': net.module.state_dict(),
'D_state_dict': net_d.module.state_dict()}, path)
else:
torch.save({'net_state_dict': net.module.state_dict()}, path)
def save_model_nonmodule(path, net, net_d=None):
if net_d is not None:
torch.save({'net_state_dict': net.state_dict(),
'D_state_dict': net_d.state_dict()}, path)
else:
torch.save({'net_state_dict': net.state_dict()}, path)
def generator_step(net_d, out2, net_loss, optimizer):
set_requires_grad(net_d, False)
d_fake = net_d(out2[:, 0:2048, :])
errG_loss_batch = torch.mean((d_fake - 1) ** 2)
total_gen_loss_batch = errG_loss_batch + net_loss * 200
total_gen_loss_batch.backward(torch.ones(torch.cuda.device_count()).cuda(), retain_graph=True, )
optimizer.step()
return d_fake
def discriminator_step(net_d, gt, d_fake, optimizer_d):
set_requires_grad(net_d, True)
d_real = net_d(gt[:, 0:2048, :])
d_loss_fake = torch.mean(d_fake ** 2)
d_loss_real = torch.mean((d_real - 1) ** 2)
errD_loss_batch = 0.5 * (d_loss_real + d_loss_fake)
total_dis_loss_batch = errD_loss_batch
total_dis_loss_batch.backward(torch.ones(torch.cuda.device_count()).cuda())
optimizer_d.step()
def getResult(dataset, numpy_list):
size = len(dataset)
ans = np.zeros((size,2048,3))
index = np.zeros((len(numpy_list)), dtype=np.int)
for i in range(size):
label, _, _ = dataset.__getitem__(i)
ans[i] = numpy_list[label][index[label]]
index[label] += 1
return ans
| [
"torch.mean",
"numpy.zeros",
"torch.cuda.device_count"
] | [((1419, 1448), 'torch.mean', 'torch.mean', (['((d_fake - 1) ** 2)'], {}), '((d_fake - 1) ** 2)\n', (1429, 1448), False, 'import torch\n'), ((1797, 1820), 'torch.mean', 'torch.mean', (['(d_fake ** 2)'], {}), '(d_fake ** 2)\n', (1807, 1820), False, 'import torch\n'), ((1839, 1868), 'torch.mean', 'torch.mean', (['((d_real - 1) ** 2)'], {}), '((d_real - 1) ** 2)\n', (1849, 1868), False, 'import torch\n'), ((2144, 2169), 'numpy.zeros', 'np.zeros', (['(size, 2048, 3)'], {}), '((size, 2048, 3))\n', (2152, 2169), True, 'import numpy as np\n'), ((1554, 1579), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1577, 1579), False, 'import torch\n'), ((2013, 2038), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2036, 2038), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Two-pulse single-qubit gate.
"""
import numpy
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
class U3Gate(Gate):
"""Two-pulse single-qubit gate."""
def __init__(self, theta, phi, lam, label=None):
"""Create new two-pulse single qubit gate."""
super().__init__("u3", 1, [theta, phi, lam], label=label)
def inverse(self):
"""Invert this gate.
u3(theta, phi, lamb)^dagger = u3(-theta, -lam, -phi)
"""
return U3Gate(-self.params[0], -self.params[2], -self.params[1])
def to_matrix(self):
"""Return a Numpy.array for the U3 gate."""
theta, phi, lam = self.params
theta, phi, lam = float(theta), float(phi), float(lam)
return numpy.array(
[[
numpy.cos(theta / 2),
-numpy.exp(1j * lam) * numpy.sin(theta / 2)
],
[
numpy.exp(1j * phi) * numpy.sin(theta / 2),
numpy.exp(1j * (phi + lam)) * numpy.cos(theta / 2)
]],
dtype=complex)
def u3(self, theta, phi, lam, q):
"""Apply u3 to q."""
return self.append(U3Gate(theta, phi, lam), [q], [])
QuantumCircuit.u3 = u3
| [
"numpy.sin",
"numpy.exp",
"numpy.cos"
] | [((1331, 1351), 'numpy.cos', 'numpy.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (1340, 1351), False, 'import numpy\n'), ((1392, 1412), 'numpy.sin', 'numpy.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (1401, 1412), False, 'import numpy\n'), ((1460, 1481), 'numpy.exp', 'numpy.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1469, 1481), False, 'import numpy\n'), ((1482, 1502), 'numpy.sin', 'numpy.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (1491, 1502), False, 'import numpy\n'), ((1521, 1550), 'numpy.exp', 'numpy.exp', (['(1.0j * (phi + lam))'], {}), '(1.0j * (phi + lam))\n', (1530, 1550), False, 'import numpy\n'), ((1551, 1571), 'numpy.cos', 'numpy.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (1560, 1571), False, 'import numpy\n'), ((1370, 1391), 'numpy.exp', 'numpy.exp', (['(1.0j * lam)'], {}), '(1.0j * lam)\n', (1379, 1391), False, 'import numpy\n')] |
from numbers import Real
from typing import Optional
import numpy as np
import mygrad._utils.graph_tracking as _tracking
from mygrad.operation_base import Operation
from mygrad.tensor_base import Tensor, asarray
from mygrad.typing import ArrayLike
class MarginRanking(Operation):
def __call__(self, x1, x2, y, margin):
"""Computes the margin ranking loss between ``x1``
and ``x2``.
Parameters
----------
x1 : mygrad.Tensor, shape=(N,) or (N, D)
x2 : mygrad.Tensor, shape=(N,) or (N, D)
y : numpy.ndarray
margin : float
Returns
-------
numpy.ndarray, shape=()
"""
self.variables = (x1, x2)
x1 = x1.data
x2 = x2.data
self.y = y
M = margin - self.y * (x1 - x2)
not_thresh = M <= 0
loss = M
loss[not_thresh] = 0.0
if _tracking.TRACK_GRAPH:
self._grad = np.ones_like(M)
self._grad[not_thresh] = 0.0
self._grad /= M.size
return np.mean(loss)
def backward_var(self, grad, index, **kwargs):
sign = -self.y if index == 0 else self.y
return grad * (sign * self._grad)
def margin_ranking_loss(
x1: ArrayLike,
x2: ArrayLike,
y: ArrayLike,
margin: float,
*,
constant: Optional[bool] = None
) -> Tensor:
r"""Computes the margin average margin ranking loss.
Equivalent to::
>>> import mygrad as mg
>>> mg.mean(mg.maximum(0, margin - y * (x1 - x2)))
Parameters
----------
x1 : ArrayLike, shape=(N,) or (N, D)
A batch of scores or descriptors to compare against those in `x2`
x2 : ArrayLike, shape=(N,) or (N, D)
A batch of scores or descriptors to compare against those in `x1`
y : Union[int, ArrayLike], scalar or shape=(N,)
1 or -1. Specifies whether the margin is compared against `(x1 - x2)`
or `(x2 - x1)`, for each of the N comparisons.
margin : float
A non-negative value to be used as the margin for the loss.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor, shape=()
The mean margin ranking loss.
"""
if not 0 < x1.ndim < 3:
raise ValueError("`x1` must have shape (N,) or (N, D)")
if not x1.shape == x2.shape:
raise ValueError("`x1` and `x2` must have the same shape")
if not np.issubdtype(x1.dtype, np.floating):
raise TypeError("`x1` must contain floats")
if not np.issubdtype(x2.dtype, np.floating):
raise TypeError("`x2` must contain floats")
if not isinstance(margin, Real) or margin < 0:
raise ValueError("`margin` must be a non-negative scalar")
y = asarray(y)
if y.size == 1:
y = np.array(y.item())
if not y.ndim == 0 and not (y.ndim == 1 and len(y) == len(x1)):
raise ValueError("`y` must be a scalar or shape-(N,) array of ones")
if y.ndim:
if x1.ndim == 2:
y = y.reshape(-1, 1)
return Tensor._op(MarginRanking, x1, x2, op_args=(y, margin), constant=constant)
| [
"numpy.ones_like",
"mygrad.tensor_base.asarray",
"numpy.mean",
"mygrad.tensor_base.Tensor._op",
"numpy.issubdtype"
] | [((2831, 2841), 'mygrad.tensor_base.asarray', 'asarray', (['y'], {}), '(y)\n', (2838, 2841), False, 'from mygrad.tensor_base import Tensor, asarray\n'), ((3125, 3198), 'mygrad.tensor_base.Tensor._op', 'Tensor._op', (['MarginRanking', 'x1', 'x2'], {'op_args': '(y, margin)', 'constant': 'constant'}), '(MarginRanking, x1, x2, op_args=(y, margin), constant=constant)\n', (3135, 3198), False, 'from mygrad.tensor_base import Tensor, asarray\n'), ((1049, 1062), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (1056, 1062), True, 'import numpy as np\n'), ((2513, 2549), 'numpy.issubdtype', 'np.issubdtype', (['x1.dtype', 'np.floating'], {}), '(x1.dtype, np.floating)\n', (2526, 2549), True, 'import numpy as np\n'), ((2614, 2650), 'numpy.issubdtype', 'np.issubdtype', (['x2.dtype', 'np.floating'], {}), '(x2.dtype, np.floating)\n', (2627, 2650), True, 'import numpy as np\n'), ((944, 959), 'numpy.ones_like', 'np.ones_like', (['M'], {}), '(M)\n', (956, 959), True, 'import numpy as np\n')] |
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import numpy as np
import pickle
from prepare_lyft_data_v2 import class2angle, class2size
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER
from prepare_lyft_data import get_sensor_to_world_transform_matrix_from_sample_data_token, \
convert_box_to_world_coord_with_sample_data_token
from lyft_dataset_sdk.utils.data_classes import Box, Quaternion
from lyft_dataset_sdk.lyftdataset import LyftDataset
from prepare_lyft_data_v2 import parse_inference_record
def rotate_pc_along_y(pc, rot_angle):
'''
Input:
point_cloud_3d: numpy array (N,C), first 3 channels are XYZ
z is facing forward, x is left ward, y is downward
rot_angle: rad scalar
Output:
point_cloud_3d: updated point_cloud_3d with XYZ rotated
'''
npc = np.copy(pc)
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
npc[:, [0, 2]] = np.dot(npc[:, [0, 2]], np.transpose(rotmat))
return npc
def read_frustum_pointnet_output_v2(ldt: LyftDataset, inference_tfrecord_file):
raw_dataset = tf.data.TFRecordDataset(inference_tfrecord_file)
for raw_record in raw_dataset:
example = parse_inference_record(raw_record)
inferred_box = get_box_from_inference(lyftd=ldt,
heading_cls=example['rot_heading_angle_class'].numpy(),
heading_res=example['rot_heading_angle_residual'].numpy(),
rot_angle=example['frustum_angle'].numpy(),
size_cls=example['size_class'].numpy(),
size_res=example['size_residual'].numpy(),
center_coord=example['rot_box_center'].numpy(),
sample_data_token=example['camera_token'].numpy().decode('utf8'),
score=example['score'].numpy(),
type_name=example['type_name'].numpy().decode('utf8'))
pc_record = ldt.get("sample_data", example['camera_token'].numpy().decode('utf8'))
sample_of_pc_record = ldt.get("sample", pc_record['sample_token'])
yield inferred_box, pc_record['sample_token']
def read_frustum_pointnet_output(ldt: LyftDataset, inference_pickle_file, token_pickle_file, from_rgb_detection: bool):
with open(inference_pickle_file, 'rb') as fp:
ps_list = pickle.load(fp)
if not from_rgb_detection:
seg_list = pickle.load(fp)
segp_list = pickle.load(fp)
center_list = pickle.load(fp)
heading_cls_list = pickle.load(fp)
heading_res_list = pickle.load(fp)
size_cls_list = pickle.load(fp)
size_res_list = pickle.load(fp)
rot_angle_list = pickle.load(fp)
score_list = pickle.load(fp)
if from_rgb_detection:
onehot_list = pickle.load(fp)
with open(token_pickle_file, 'rb') as fp:
sample_token_list = pickle.load(fp)
annotation_token_list = pickle.load(fp)
camera_data_token_list = pickle.load(fp)
type_list = pickle.load(fp)
ldt.get('sample', sample_token_list[0])
if annotation_token_list:
ldt.get('sample_annotation', annotation_token_list[0])
print("lengh of sample token:", len(sample_token_list))
print("lengh of ps list:", len(ps_list))
assert len(sample_token_list) == len(ps_list)
boxes = []
gt_boxes = []
for data_idx in range(len(ps_list)):
inferred_box = get_box_from_inference(lyftd=ldt, heading_cls=heading_cls_list[data_idx],
heading_res=heading_res_list[data_idx],
rot_angle=rot_angle_list[data_idx],
size_cls=size_cls_list[data_idx],
size_res=size_res_list[data_idx],
center_coord=center_list[data_idx],
sample_data_token=camera_data_token_list[data_idx],
score=score_list[data_idx])
inferred_box.name = type_list[data_idx]
boxes.append(inferred_box)
if not from_rgb_detection:
gt_boxes.append(ldt.get_box(annotation_token_list[data_idx]))
return boxes, gt_boxes, sample_token_list
def get_heading_angle(heading_cls, heading_res, rot_angle):
pred_angle_radius = class2angle(heading_cls,
heading_res, NUM_HEADING_BIN) + rot_angle
return pred_angle_radius
def get_size(size_cls, size_res) -> np.ndarray:
"""
compute size(l,w,h) from size class and residuals
:param size_cls:
:param size_res:
:return: np.ndarray([l,w,h])
"""
return class2size(size_cls, size_res)
def get_center_in_sensor_coord(center_coord, rot_angle):
center_before_rotation = rotate_pc_along_y(np.expand_dims(center_coord, 0), rot_angle=-rot_angle).squeeze()
return center_before_rotation
def get_center_in_world_coord(center_in_sensor_coord, sample_data_token: str):
"""
:param center_in_sensor_coord: 3xN array
:param sample_data_token:
:return: 3xN array
"""
mtx = get_sensor_to_world_transform_matrix_from_sample_data_token(sample_data_token)
center_in_sensor_coord_h = np.concatenate((center_in_sensor_coord, np.ones(1)))
return np.dot(mtx, center_in_sensor_coord_h).ravel()[0:3]
def get_box_from_inference(lyftd: LyftDataset, heading_cls, heading_res, rot_angle,
size_cls, size_res, center_coord, sample_data_token, score,type_name:str) -> Box:
heading_angle = get_heading_angle(heading_cls, heading_res, rot_angle)
size = get_size(size_cls, size_res)
rot_angle += np.pi / 2
center_sensor_coord = get_center_in_sensor_coord(center_coord=center_coord, rot_angle=rot_angle)
# Make Box
# The rationale of doing this: to conform the convention of Box class, the car is originally heading to +x axis,
# with y(left) and z(top). To make the car heading to the angle it should be in the camera coordinate,
# we have to rotate it by 90 degree around x axis and [theta] degree around y axis, where [theta] is the heading angle
l, w, h = size
first_rot = Quaternion(axis=[1, 0, 0], angle=np.pi / 2)
second_rot = Quaternion(axis=[0, -1, 0], angle=-heading_angle)
box_in_sensor_coord = Box(center=center_sensor_coord, size=[w, l, h],
orientation=second_rot * first_rot, score=score,name=type_name)
box_in_world_coord = convert_box_to_world_coord_with_sample_data_token(box_in_sensor_coord, sample_data_token,
lyftd)
# assert np.abs(box_in_world_coord.orientation.axis[0]) <= 0.02
# assert np.abs(box_in_world_coord.orientation.axis[1]) <= 0.02
return box_in_world_coord
| [
"tensorflow.data.TFRecordDataset",
"numpy.copy",
"lyft_dataset_sdk.utils.data_classes.Box",
"lyft_dataset_sdk.utils.data_classes.Quaternion",
"numpy.ones",
"prepare_lyft_data_v2.parse_inference_record",
"pickle.load",
"prepare_lyft_data_v2.class2angle",
"numpy.array",
"prepare_lyft_data.get_sensor... | [((25, 62), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (60, 62), True, 'import tensorflow as tf\n'), ((845, 856), 'numpy.copy', 'np.copy', (['pc'], {}), '(pc)\n', (852, 856), True, 'import numpy as np\n'), ((870, 887), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (876, 887), True, 'import numpy as np\n'), ((901, 918), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (907, 918), True, 'import numpy as np\n'), ((932, 979), 'numpy.array', 'np.array', (['[[cosval, -sinval], [sinval, cosval]]'], {}), '([[cosval, -sinval], [sinval, cosval]])\n', (940, 979), True, 'import numpy as np\n'), ((1161, 1209), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['inference_tfrecord_file'], {}), '(inference_tfrecord_file)\n', (1184, 1209), True, 'import tensorflow as tf\n'), ((5054, 5084), 'prepare_lyft_data_v2.class2size', 'class2size', (['size_cls', 'size_res'], {}), '(size_cls, size_res)\n', (5064, 5084), False, 'from prepare_lyft_data_v2 import class2angle, class2size\n'), ((5497, 5575), 'prepare_lyft_data.get_sensor_to_world_transform_matrix_from_sample_data_token', 'get_sensor_to_world_transform_matrix_from_sample_data_token', (['sample_data_token'], {}), '(sample_data_token)\n', (5556, 5575), False, 'from prepare_lyft_data import get_sensor_to_world_transform_matrix_from_sample_data_token, convert_box_to_world_coord_with_sample_data_token\n'), ((6562, 6605), 'lyft_dataset_sdk.utils.data_classes.Quaternion', 'Quaternion', ([], {'axis': '[1, 0, 0]', 'angle': '(np.pi / 2)'}), '(axis=[1, 0, 0], angle=np.pi / 2)\n', (6572, 6605), False, 'from lyft_dataset_sdk.utils.data_classes import Box, Quaternion\n'), ((6623, 6672), 'lyft_dataset_sdk.utils.data_classes.Quaternion', 'Quaternion', ([], {'axis': '[0, -1, 0]', 'angle': '(-heading_angle)'}), '(axis=[0, -1, 0], angle=-heading_angle)\n', (6633, 6672), False, 'from lyft_dataset_sdk.utils.data_classes import Box, Quaternion\n'), ((6699, 6815), 'lyft_dataset_sdk.utils.data_classes.Box', 'Box', ([], {'center': 'center_sensor_coord', 'size': '[w, l, h]', 'orientation': '(second_rot * first_rot)', 'score': 'score', 'name': 'type_name'}), '(center=center_sensor_coord, size=[w, l, h], orientation=second_rot *\n first_rot, score=score, name=type_name)\n', (6702, 6815), False, 'from lyft_dataset_sdk.utils.data_classes import Box, Quaternion\n'), ((6867, 6967), 'prepare_lyft_data.convert_box_to_world_coord_with_sample_data_token', 'convert_box_to_world_coord_with_sample_data_token', (['box_in_sensor_coord', 'sample_data_token', 'lyftd'], {}), '(box_in_sensor_coord,\n sample_data_token, lyftd)\n', (6916, 6967), False, 'from prepare_lyft_data import get_sensor_to_world_transform_matrix_from_sample_data_token, convert_box_to_world_coord_with_sample_data_token\n'), ((1024, 1044), 'numpy.transpose', 'np.transpose', (['rotmat'], {}), '(rotmat)\n', (1036, 1044), True, 'import numpy as np\n'), ((1263, 1297), 'prepare_lyft_data_v2.parse_inference_record', 'parse_inference_record', (['raw_record'], {}), '(raw_record)\n', (1285, 1297), False, 'from prepare_lyft_data_v2 import parse_inference_record\n'), ((2625, 2640), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2636, 2640), False, 'import pickle\n'), ((2735, 2750), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2746, 2750), False, 'import pickle\n'), ((2773, 2788), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2784, 2788), False, 'import pickle\n'), ((2816, 2831), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2827, 2831), False, 'import pickle\n'), ((2859, 2874), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2870, 2874), False, 'import pickle\n'), ((2899, 2914), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2910, 2914), False, 'import pickle\n'), ((2939, 2954), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2950, 2954), False, 'import pickle\n'), ((2980, 2995), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2991, 2995), False, 'import pickle\n'), ((3017, 3032), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3028, 3032), False, 'import pickle\n'), ((3180, 3195), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3191, 3195), False, 'import pickle\n'), ((3228, 3243), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3239, 3243), False, 'import pickle\n'), ((3277, 3292), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3288, 3292), False, 'import pickle\n'), ((3313, 3328), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3324, 3328), False, 'import pickle\n'), ((4713, 4767), 'prepare_lyft_data_v2.class2angle', 'class2angle', (['heading_cls', 'heading_res', 'NUM_HEADING_BIN'], {}), '(heading_cls, heading_res, NUM_HEADING_BIN)\n', (4724, 4767), False, 'from prepare_lyft_data_v2 import class2angle, class2size\n'), ((2699, 2714), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2710, 2714), False, 'import pickle\n'), ((3090, 3105), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (3101, 3105), False, 'import pickle\n'), ((5648, 5658), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (5655, 5658), True, 'import numpy as np\n'), ((5191, 5222), 'numpy.expand_dims', 'np.expand_dims', (['center_coord', '(0)'], {}), '(center_coord, 0)\n', (5205, 5222), True, 'import numpy as np\n'), ((5673, 5710), 'numpy.dot', 'np.dot', (['mtx', 'center_in_sensor_coord_h'], {}), '(mtx, center_in_sensor_coord_h)\n', (5679, 5710), True, 'import numpy as np\n')] |
__author__ = 'sibirrer'
import pytest
import lenstronomy.Util.simulation_util as sim_util
from lenstronomy.ImSim.image_model import ImageModel
import lenstronomy.Util.param_util as param_util
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.Plots.model_plot import ModelPlot
import lenstronomy.Plots.model_plot as output_plots
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Data.psf import PSF
from lenstronomy.Plots import chain_plot
import unittest
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
class TestChainPlots(object):
"""
test the fitting sequences
"""
def setup(self):
# data specifics
deltaPix = 0.5 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.5 # full width half max of PSF
kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix}
psf_gaussian = PSF(**kwargs_psf_gaussian)
self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf_gaussian.kernel_point_source}
def test_psf_iteration_compare(self):
kwargs_psf = self.kwargs_psf
kwargs_psf['kernel_point_source_init'] = kwargs_psf['kernel_point_source']
f, ax = chain_plot.psf_iteration_compare(kwargs_psf=kwargs_psf, vmin=-1, vmax=1)
plt.close()
f, ax = chain_plot.psf_iteration_compare(kwargs_psf=kwargs_psf)
plt.close()
def test_plot_chain(self):
X2_list = [1, 1, 2]
pos_list = [[1, 0], [2, 0], [3, 0]]
vel_list = [[-1, 0], [0, 0], [1, 0]]
param_list = ['test1', 'test2']
chain = X2_list, pos_list, vel_list, None
chain_plot.plot_chain(chain=chain, param_list=param_list)
plt.close()
def test_plot_mcmc_behaviour(self):
f, ax = plt.subplots(1, 1, figsize=(4, 4))
param_mcmc = ['a', 'b']
samples_mcmc = np.random.random((10, 1000))
dist_mcmc = np.random.random(1000)
chain_plot.plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc, num_average=10)
plt.close()
def test_chain_list(self):
param = ['a', 'b']
X2_list = [1, 1, 2]
pos_list = [[1, 0], [2, 0], [3, 0]]
vel_list = [[-1, 0], [0, 0], [1, 0]]
chain = X2_list, pos_list, vel_list, None
samples_mcmc = np.random.random((10, 1000))
dist_mcmc = np.random.random(1000)
chain_list = [['PSO', chain, param],
['EMCEE', samples_mcmc, param, dist_mcmc],
['MULTINEST', samples_mcmc, param, dist_mcmc]
]
chain_plot.plot_chain_list(chain_list, index=0)
plt.close()
chain_plot.plot_chain_list(chain_list, index=1, num_average=10)
plt.close()
chain_plot.plot_chain_list(chain_list, index=2, num_average=10)
plt.close()
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
chain_plot.plot_chain_list(chain_list=[['WRONG']], index=0)
if __name__ == '__main__':
pytest.main()
| [
"matplotlib.use",
"numpy.random.random",
"lenstronomy.Plots.chain_plot.plot_mcmc_behaviour",
"lenstronomy.Plots.chain_plot.psf_iteration_compare",
"lenstronomy.Data.psf.PSF",
"matplotlib.pyplot.close",
"pytest.main",
"lenstronomy.Plots.chain_plot.plot_chain",
"lenstronomy.Plots.chain_plot.plot_chain... | [((636, 657), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (650, 657), False, 'import matplotlib\n'), ((3252, 3265), 'pytest.main', 'pytest.main', ([], {}), '()\n', (3263, 3265), False, 'import pytest\n'), ((1094, 1120), 'lenstronomy.Data.psf.PSF', 'PSF', ([], {}), '(**kwargs_psf_gaussian)\n', (1097, 1120), False, 'from lenstronomy.Data.psf import PSF\n'), ((1405, 1477), 'lenstronomy.Plots.chain_plot.psf_iteration_compare', 'chain_plot.psf_iteration_compare', ([], {'kwargs_psf': 'kwargs_psf', 'vmin': '(-1)', 'vmax': '(1)'}), '(kwargs_psf=kwargs_psf, vmin=-1, vmax=1)\n', (1437, 1477), False, 'from lenstronomy.Plots import chain_plot\n'), ((1486, 1497), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1495, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1569), 'lenstronomy.Plots.chain_plot.psf_iteration_compare', 'chain_plot.psf_iteration_compare', ([], {'kwargs_psf': 'kwargs_psf'}), '(kwargs_psf=kwargs_psf)\n', (1546, 1569), False, 'from lenstronomy.Plots import chain_plot\n'), ((1578, 1589), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1587, 1589), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1895), 'lenstronomy.Plots.chain_plot.plot_chain', 'chain_plot.plot_chain', ([], {'chain': 'chain', 'param_list': 'param_list'}), '(chain=chain, param_list=param_list)\n', (1859, 1895), False, 'from lenstronomy.Plots import chain_plot\n'), ((1904, 1915), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1913, 1915), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2007), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4, 4)'}), '(1, 1, figsize=(4, 4))\n', (1985, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2091), 'numpy.random.random', 'np.random.random', (['(10, 1000)'], {}), '((10, 1000))\n', (2079, 2091), True, 'import numpy as np\n'), ((2112, 2134), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (2128, 2134), True, 'import numpy as np\n'), ((2143, 2234), 'lenstronomy.Plots.chain_plot.plot_mcmc_behaviour', 'chain_plot.plot_mcmc_behaviour', (['ax', 'samples_mcmc', 'param_mcmc', 'dist_mcmc'], {'num_average': '(10)'}), '(ax, samples_mcmc, param_mcmc, dist_mcmc,\n num_average=10)\n', (2173, 2234), False, 'from lenstronomy.Plots import chain_plot\n'), ((2239, 2250), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2248, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2530), 'numpy.random.random', 'np.random.random', (['(10, 1000)'], {}), '((10, 1000))\n', (2518, 2530), True, 'import numpy as np\n'), ((2551, 2573), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (2567, 2573), True, 'import numpy as np\n'), ((2786, 2833), 'lenstronomy.Plots.chain_plot.plot_chain_list', 'chain_plot.plot_chain_list', (['chain_list'], {'index': '(0)'}), '(chain_list, index=0)\n', (2812, 2833), False, 'from lenstronomy.Plots import chain_plot\n'), ((2842, 2853), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2851, 2853), True, 'import matplotlib.pyplot as plt\n'), ((2862, 2925), 'lenstronomy.Plots.chain_plot.plot_chain_list', 'chain_plot.plot_chain_list', (['chain_list'], {'index': '(1)', 'num_average': '(10)'}), '(chain_list, index=1, num_average=10)\n', (2888, 2925), False, 'from lenstronomy.Plots import chain_plot\n'), ((2934, 2945), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2943, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2954, 3017), 'lenstronomy.Plots.chain_plot.plot_chain_list', 'chain_plot.plot_chain_list', (['chain_list'], {'index': '(2)', 'num_average': '(10)'}), '(chain_list, index=2, num_average=10)\n', (2980, 3017), False, 'from lenstronomy.Plots import chain_plot\n'), ((3026, 3037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3035, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3159, 3218), 'lenstronomy.Plots.chain_plot.plot_chain_list', 'chain_plot.plot_chain_list', ([], {'chain_list': "[['WRONG']]", 'index': '(0)'}), "(chain_list=[['WRONG']], index=0)\n", (3185, 3218), False, 'from lenstronomy.Plots import chain_plot\n')] |
from numpy.random.mtrand import RandomState
import numpy as np
from .abstract import Agent
epsilon_greedy_args = {
'epsilon': 0.01,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select an Action that is ABSOLUTELY different to the Action
# that would have been selected in case when Epsilon-Greedy Policy Selection
# had not been applied.
'epsilon_pure_new': True,
# Try to select the worse case in epsilon-case.
'epsilon_select_worse': False,
}
class EpsilonGreedy(Agent):
def __init__(self, config, agent):
super(EpsilonGreedy, self).__init__(config)
self.agent = agent
self.rng = RandomState(self.config.random_seed)
def train(self, observation, action, reward, done = False):
self.agent.train(observation, action, reward, done)
def act(self, observation, reward, done):
greedy_action = self.agent.act(observation, reward, done)
if self.rng.choice([True, False], p = [self.config.epsilon, 1.0 - self.config.epsilon]):
if self.config.epsilon_select_worse:
product_probas = greedy_action['ps-a']
product_probas = (1.0 - product_probas) # Inversion of probabilities.
else:
product_probas = np.ones(self.config.num_products)
if self.config.epsilon_pure_new:
product_probas[greedy_action['a']] = 0.0
product_probas = product_probas / np.sum(product_probas)
epsilon_action = self.rng.choice(
self.config.num_products,
p = product_probas
)
return {
**super().act(observation, reward, done),
**{
'a': epsilon_action,
'ps': self.config.epsilon * product_probas[epsilon_action],
'ps-a': self.config.epsilon * product_probas,
'greedy': False,
'h0': greedy_action['a']
}
}
else:
return {
**greedy_action,
'greedy': True,
'ps': (1.0 - self.config.epsilon) * greedy_action['ps'],
'ps-a': (1.0 - self.config.epsilon) * greedy_action['ps-a'],
}
def reset(self):
self.agent.reset()
| [
"numpy.sum",
"numpy.random.randint",
"numpy.random.mtrand.RandomState",
"numpy.ones"
] | [((157, 187), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31 - 1)'], {}), '(2 ** 31 - 1)\n', (174, 187), True, 'import numpy as np\n'), ((652, 688), 'numpy.random.mtrand.RandomState', 'RandomState', (['self.config.random_seed'], {}), '(self.config.random_seed)\n', (663, 688), False, 'from numpy.random.mtrand import RandomState\n'), ((1267, 1300), 'numpy.ones', 'np.ones', (['self.config.num_products'], {}), '(self.config.num_products)\n', (1274, 1300), True, 'import numpy as np\n'), ((1450, 1472), 'numpy.sum', 'np.sum', (['product_probas'], {}), '(product_probas)\n', (1456, 1472), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Compares pairwise performance through independent t-test
of SVM models with different hyperparameters C.
Saves results into `svm_params_ttest.csv` and `svm_params_values.csv`
"""
import argparse
import itertools
from pathlib import Path
import numpy as np
import pandas as pd
from joblib import load
from utils import ttest_ind_corrected
PROJECT_ROOT = Path.cwd()
parser = argparse.ArgumentParser()
parser.add_argument('-E', '--experiment_name',
dest='experiment_name',
help='Name of the experiment.')
args = parser.parse_args()
def main(experiment_name):
"""Pairwise comparison of SVM classifier performances with different hyperparameters C."""
# ----------------------------------------------------------------------------------------
svm_dir = PROJECT_ROOT / 'outputs' / experiment_name / 'SVM'
cv_dir = svm_dir / 'cv'
n_repetitions = 10
n_folds = 10
search_space = {'C': [2 ** -7, 2 ** -5, 2 ** -3, 2 ** -1, 2 ** 0, 2 ** 1, 2 ** 3, 2 ** 5, 2 ** 7]}
scores_params = []
for i_repetition in range(n_repetitions):
for i_fold in range(n_folds):
params_dict = load(cv_dir / f'{i_repetition:02d}_{i_fold:02d}_params.joblib')
scores_params.append(params_dict['means'])
scores_params = np.array(scores_params)
combinations = list(itertools.combinations(range(scores_params.shape[1]), 2))
# Bonferroni correction for multiple comparisons
corrected_alpha = 0.05 / len(combinations)
results_df = pd.DataFrame(columns=['params', 'p-value', 'stats'])
# Corrected repeated k-fold cv test to compare pairwise performance of the SVM classifiers
# through independent t-test
for param_a, param_b in combinations:
statistic, pvalue = ttest_ind_corrected(scores_params[:, param_a], scores_params[:, param_b],
k=n_folds, r=n_repetitions)
print(f"{search_space['C'][param_a]} vs. {search_space['C'][param_b]} pvalue: {pvalue:6.3}", end='')
if pvalue <= corrected_alpha:
print('*')
else:
print('')
results_df = results_df.append({'params': f"{search_space['C'][param_a]} vs. {search_space['C'][param_b]}",
'p-value': pvalue,
'stats': statistic},
ignore_index=True)
# Output to csv
results_df.to_csv(svm_dir / 'svm_params_ttest.csv', index=False)
values_df = pd.DataFrame(columns=['measures'] + list(search_space['C']))
scores_params_mean = np.mean(scores_params, axis=0)
scores_params_std = np.std(scores_params, axis=0)
values_df.loc[0] = ['mean'] + list(scores_params_mean)
values_df.loc[1] = ['std'] + list(scores_params_std)
values_df.to_csv(svm_dir / 'svm_params_values.csv', index=False)
if __name__ == '__main__':
main(args.experiment_name)
| [
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path.cwd",
"utils.ttest_ind_corrected",
"joblib.load",
"numpy.array",
"numpy.std",
"pandas.DataFrame"
] | [((383, 393), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (391, 393), False, 'from pathlib import Path\n'), ((404, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (427, 429), False, 'import argparse\n'), ((1333, 1356), 'numpy.array', 'np.array', (['scores_params'], {}), '(scores_params)\n', (1341, 1356), True, 'import numpy as np\n'), ((1559, 1611), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['params', 'p-value', 'stats']"}), "(columns=['params', 'p-value', 'stats'])\n", (1571, 1611), True, 'import pandas as pd\n'), ((2656, 2686), 'numpy.mean', 'np.mean', (['scores_params'], {'axis': '(0)'}), '(scores_params, axis=0)\n', (2663, 2686), True, 'import numpy as np\n'), ((2711, 2740), 'numpy.std', 'np.std', (['scores_params'], {'axis': '(0)'}), '(scores_params, axis=0)\n', (2717, 2740), True, 'import numpy as np\n'), ((1811, 1917), 'utils.ttest_ind_corrected', 'ttest_ind_corrected', (['scores_params[:, param_a]', 'scores_params[:, param_b]'], {'k': 'n_folds', 'r': 'n_repetitions'}), '(scores_params[:, param_a], scores_params[:, param_b], k\n =n_folds, r=n_repetitions)\n', (1830, 1917), False, 'from utils import ttest_ind_corrected\n'), ((1193, 1256), 'joblib.load', 'load', (["(cv_dir / f'{i_repetition:02d}_{i_fold:02d}_params.joblib')"], {}), "(cv_dir / f'{i_repetition:02d}_{i_fold:02d}_params.joblib')\n", (1197, 1256), False, 'from joblib import load\n')] |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import cpufreq
import pyRAPL
import time
import numpy as np
from math import ceil
class FinalEnv02(gym.Env):
### DEFAULT PERSONAL VALUES
DEF_POWER = 65.0
DEF_SOCKET = 0
DEF_CORES = [0,1,2,3,4,5,6,7]
DEF_MAXSTEPS = 20
DEF_SEED = None
DEF_MINPOWER = 15.0
DEF_MAXPOWER = 115.0
DEF_POWERSTEP = 3.0
DEF_DECISION = 0.25 # 4 decisions / sec
def __init__(self, **config):
### CPUEnv constant values.
# POWER power cap to reach
self.POWER = config.get('power', self.DEF_POWER)
# SOCKET socket to get pyRAPL measures
# CORES CPU cores assigned to SOCKET
self.SOCKET = config.get('socket', self.DEF_SOCKET)
self.CORES = config.get('cores', self.DEF_CORES)
# MAXSTEPS maximum iterations for environment
# SEED seed for RNG reporducibility
self.MAXSTEPS = config.get('maxsteps', self.DEF_MAXSTEPS)
self.SEED = config.get('seed', self.DEF_SEED)
# MINPOWER minimum in power bandwidth
# MAXPOWER maximum in power bandwidth
self.MINPOWER = config.get('minpower', self.DEF_MINPOWER)
self.MAXPOWER = config.get('maxpower', self.DEF_MAXPOWER)
assert(self.MINPOWER < self.MAXPOWER)
# DECISION_TIME time spent between actions (frequency change and power measure)
# MEASURE_TIME time spent measuring energy data
# SLEEP_TIME* waiting time after frequency change
self.DECISION_TIME = config.get('decision_time', self.DEF_DECISION)
self.MEASURE_TIME = config.get('measure_time', self.DECISION_TIME)
self.SLEEP_TIME = self.DECISION_TIME - self.MEASURE_TIME
# POWERSTEP size of intervals of observation space
# POWERPOINTS extrema of power intervals
# INTERVALS list power intervals
self.POWERSTEP = config.get('powstep', self.DEF_POWERSTEP)
self.POWERPOINTS = self.get_powerpoints(self.POWERSTEP)
self.INTERVALS = self.get_intervals(self.POWERPOINTS)
### Default metadata.
self.metadata = { 'render.modes': ['human'] }
### Frequency control.
# _cpu cpufreq class control
# _frequencies list of available frequencies (<= order)
# _freqpos position of current frequency
self._cpu = cpufreq.cpuFreq()
self._frequencies = sorted( self._cpu.available_frequencies )[:-1]
self._freqpos = -1
# Set used cores to 'userspace' scheme for frequency modification.
self._cpu.set_governors('userspace', self.CORES)
### Power measure.
pyRAPL.setup(
devices = [pyRAPL.Device.PKG],
socket_ids = [self.SOCKET]
)
### Action space.
# 0: hold frequency
# 1: lower frequency
# 2: raise frequency
self.action_space = gym.spaces.Discrete(3)
self.HOLD_FREQ = 0
self.LOWER_FREQ = 1
self.RAISE_FREQ = 2
### Action rewards:
# See 'get_reward()'
# REWARD_CLOSER given when action approaches goal
# REWARD_FARTHER given when action gets farther from goal
# REWARD_GOAL given when action gets to goal state
self.REWARD_CLOSER = +1
self.REWARD_FARTHER = -1
self.REWARD_GOAL = +2
### Observation space:
# Interval partition of power range of CPU.
# Shape of intervals: (power_i, power_i+1]
self.observation_space = gym.spaces.Discrete( len(self.INTERVALS) + 1 )
# _power: current power consumption
# _state: interval of current power consumption
# _goal: interval of self.LIMIT
self._power = 0.0
self._state = 0
self._goal = self.get_state(self.POWER)
### CPUEnv: random number generator.
# RNG random number generator
self.RNG = None
self.seed( self.SEED )
### CPUEnv: general environment variables.
# _reward: accumulated environment reward
# _done: boolean value to indicate if goal or max steps were reached
# _info: dict for auxiliary debug values
# _count: counts the number of steps taken during environment action
self._reward = None
self._acc_reward = None
self._done = None
self._info = None
self._count = None
self.reset()
def reset(self, reset_freqpos = None):
### General environment variables.
self._reward = 0
self._acc_reward = 0
self._done = False
self._info = {}
self._count = 0
### Choose preset or random initial frequency.
if reset_freqpos is None:
self._freqpos = self.RNG.choice( np.arange( len(self._frequencies) ) )
else:
self._freqpos = reset_freqpos
freq = self._frequencies[ self._freqpos ]
### Set frequency, wait sleep time and measure.
self._power = self.set_wait_measure(freq, 'Reset')
### Set state from measured power.
self._state = self.get_state( self._power )
self.update_info()
return self._state
def step(self, action):
### Check if max steps reached.
if self._count == self.MAXSTEPS:
self._done = True
return self._state, self._reward, self._done, self._info
assert self.action_space.contains(action)
### DECIDE ACTION:
if action == self.HOLD_FREQ:
pass
elif action == self.RAISE_FREQ:
if self._freqpos == len(self._frequencies) - 1:
pass
else:
self._freqpos += 1
elif action == self.LOWER_FREQ:
if self._freqpos == 0:
pass
else:
self._freqpos -= 1
### DO ACTION, WAIT AND MEASURE:
freq = self._frequencies[ self._freqpos ]
label = f"Iter {self._count + 1}"
next_power = self.set_wait_measure(freq, label)
next_state = self.get_state( next_power )
### REWARD:
self._reward = self.get_reward(next_state, self._state)
self._acc_reward += self._reward
### GOAL: no goal.
### INFO AND STATE UPDATE:
self._power = next_power
self._state = next_state
self._count += 1
self.update_info()
### RETURN:
return [self._state, self._reward, self._done, self._info]
def render(self, mode='human'):
### Print current environtment state info.
print(self._info)
def seed(self, seed=None):
### Make random number generator from seed.
self.RNG, seed = seeding.np_random(seed)
return [seed]
def close(self):
### Reset CPU to default system values.
self._cpu.reset()
### AUXILIARY ENV METHODS
def get_powerpoints(self, pstep):
powers = []
ppoint = self.MINPOWER
powers.append(ppoint)
while ppoint < self.MAXPOWER:
ppoint += pstep
powers.append(ppoint)
return powers
def get_intervals(self, powerpoints):
intervals = []
# First interval.
ppoint = powerpoints[0]
intervals.append( [None, ppoint] )
for i in range(1, len(powerpoints)):
intervals.append( [ppoint, powerpoints[i]] )
ppoint = powerpoints[i]
# Last interval.
intervals.append( [ppoint, None] )
return intervals
def get_state(self, power):
pos = np.searchsorted(self.POWERPOINTS, power, side='right')
return pos + 1
def get_reward(self, state, prev_state):
### Positive while on goal.
if state == self._goal:
return self.REWARD_GOAL
if state < self._goal:
if state - prev_state > 0:
return self.REWARD_CLOSER
else:
return self.REWARD_FARTHER
if state > self._goal:
if state - prev_state < 0:
return self.REWARD_CLOSER
else:
return self.REWARD_FARTHER
def update_info(self):
self._info['step'] = self._count
self._info['state'] = self._state
self._info['interval'] = self.INTERVALS[self._state - 1]
self._info['reward'] = self._reward
self._info['acc_reward'] = self._acc_reward
self._info['freqpos'] = self._freqpos
self._info['frequency'] = self._frequencies[ self._freqpos ]
self._info['power'] = self._power
### AUXILIARY FREQUENCY/MEASURE METHODS
def set_frequency(self, freq):
### Check if current frequency is above or below
current_freq = self._cpu.get_min_freq()[ self.CORES[0] ]
if current_freq < freq:
# Above
self._cpu.set_max_frequencies(freq, self.CORES)
self._cpu.set_min_frequencies(freq, self.CORES)
else:
# Below
self._cpu.set_min_frequencies(freq, self.CORES)
self._cpu.set_max_frequencies(freq, self.CORES)
self._cpu.set_frequencies(freq, self.CORES)
def measure_power(self, label):
meter = pyRAPL.Measurement(label=label)
while meter._results is None or meter._results.pkg is None:
meter.begin()
time.sleep(self.MEASURE_TIME)
meter.end()
m_energy = meter._results.pkg[self.SOCKET] # micro-J
m_time = meter._results.duration # micro-s
power = m_energy / m_time # watts
return power
def set_wait_measure(self, freq, label):
self.set_frequency(freq)
time.sleep(self.SLEEP_TIME)
power = self.measure_power(label)
return power
| [
"numpy.searchsorted",
"pyRAPL.setup",
"gym.spaces.Discrete",
"time.sleep",
"cpufreq.cpuFreq",
"pyRAPL.Measurement",
"gym.utils.seeding.np_random"
] | [((2521, 2538), 'cpufreq.cpuFreq', 'cpufreq.cpuFreq', ([], {}), '()\n', (2536, 2538), False, 'import cpufreq\n'), ((2811, 2878), 'pyRAPL.setup', 'pyRAPL.setup', ([], {'devices': '[pyRAPL.Device.PKG]', 'socket_ids': '[self.SOCKET]'}), '(devices=[pyRAPL.Device.PKG], socket_ids=[self.SOCKET])\n', (2823, 2878), False, 'import pyRAPL\n'), ((3071, 3093), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(3)'], {}), '(3)\n', (3090, 3093), False, 'import gym\n'), ((6966, 6989), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (6983, 6989), False, 'from gym.utils import seeding\n'), ((7837, 7891), 'numpy.searchsorted', 'np.searchsorted', (['self.POWERPOINTS', 'power'], {'side': '"""right"""'}), "(self.POWERPOINTS, power, side='right')\n", (7852, 7891), True, 'import numpy as np\n'), ((9495, 9526), 'pyRAPL.Measurement', 'pyRAPL.Measurement', ([], {'label': 'label'}), '(label=label)\n', (9513, 9526), False, 'import pyRAPL\n'), ((9964, 9991), 'time.sleep', 'time.sleep', (['self.SLEEP_TIME'], {}), '(self.SLEEP_TIME)\n', (9974, 9991), False, 'import time\n'), ((9634, 9663), 'time.sleep', 'time.sleep', (['self.MEASURE_TIME'], {}), '(self.MEASURE_TIME)\n', (9644, 9663), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/1/29 16:03
# @Author : zhuzhaowen
# @email : <EMAIL>
# @File : gen_gif_andvideo.py
# @Software: PyCharm
# @desc : "在当前目录下根据图片自动生成gif 图片与视频"
from PIL import Image
import numpy as np
import imageio
import os
def imgs2mp4(imgs, filename, w, h, fps=3, ):
print("imgs {}".format(len(imgs)))
filename_ = filename + ".mp4"
shape = [w, h]
imgs_ = []
with imageio.get_writer(filename_, fps=fps) as video:
for i, img in enumerate(imgs):
img_data = np.array(img, dtype=np.uint8)
img_data = Image.fromarray(img_data)
img_data = img_data.resize(shape)
img_data = np.array(img_data)
imgs_.append(img_data)
video.append_data(img_data)
print("save path:{}".format(filename_))
filename_ = filename + ".gif"
imageio.mimsave(filename_, imgs_, fps=fps)
return filename_
import pyautogui
if __name__ == '__main__':
s = pyautogui.confirm('组合当前目录下的jpg 与 png 文件形成gif与mp4')
s = pyautogui.prompt("请输入图片归一化构成的分辨率默认为{}x{},如不需要更改请按ok,".format(480 * 4, 270 * 4))
if len(s) > 0:
w, h = s.split("x")
w, h = int(w), int(h)
else:
w, h = 480 * 4, 270 * 4
fps = 3
s = pyautogui.prompt("请输入fps默认为{},".format(3))
if len(s) > 0:
fps = int(s)
paths = []
imgs = []
for root, dirs, files in os.walk("./"):
for name in files:
if name.endswith(".jpg") or name.endswith(".png") or name.endswith(".jpeg"):
k = os.path.join(root, name)
paths.append(k)
paths.sort()
for path in paths:
image = Image.open(path)
image = np.array(image.resize([w, h], resample=0))
imgs.append(image)
imgs2mp4(imgs, "result", w, h, fps=fps)
pyautogui.confirm('文件保存在当前目录下的result.mp4,result.gif') | [
"PIL.Image.fromarray",
"PIL.Image.open",
"os.walk",
"os.path.join",
"numpy.array",
"imageio.mimsave",
"pyautogui.confirm",
"imageio.get_writer"
] | [((880, 922), 'imageio.mimsave', 'imageio.mimsave', (['filename_', 'imgs_'], {'fps': 'fps'}), '(filename_, imgs_, fps=fps)\n', (895, 922), False, 'import imageio\n'), ((1000, 1050), 'pyautogui.confirm', 'pyautogui.confirm', (['"""组合当前目录下的jpg 与 png 文件形成gif与mp4"""'], {}), "('组合当前目录下的jpg 与 png 文件形成gif与mp4')\n", (1017, 1050), False, 'import pyautogui\n'), ((1420, 1433), 'os.walk', 'os.walk', (['"""./"""'], {}), "('./')\n", (1427, 1433), False, 'import os\n'), ((1835, 1888), 'pyautogui.confirm', 'pyautogui.confirm', (['"""文件保存在当前目录下的result.mp4,result.gif"""'], {}), "('文件保存在当前目录下的result.mp4,result.gif')\n", (1852, 1888), False, 'import pyautogui\n'), ((445, 483), 'imageio.get_writer', 'imageio.get_writer', (['filename_'], {'fps': 'fps'}), '(filename_, fps=fps)\n', (463, 483), False, 'import imageio\n'), ((1684, 1700), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1694, 1700), False, 'from PIL import Image\n'), ((556, 585), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (564, 585), True, 'import numpy as np\n'), ((609, 634), 'PIL.Image.fromarray', 'Image.fromarray', (['img_data'], {}), '(img_data)\n', (624, 634), False, 'from PIL import Image\n'), ((704, 722), 'numpy.array', 'np.array', (['img_data'], {}), '(img_data)\n', (712, 722), True, 'import numpy as np\n'), ((1571, 1595), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1583, 1595), False, 'import os\n')] |
import numpy as np
'''
Class that generates potential field given obstacle map.
'''
class PotentialField:
'''
map: Numpy array, 2d or 3d map of obstacles. 1 means obstacle, otherwise, free space.
limits: Dictionary of tuples {xlim: (min, max), ylim: (c, d), zlim: (e, f)}
params: Dictionary of potential field parameters: zeta ζ, d_goal, eta η, q_star.
resolution: Configuration space resolution, unit/pixel.
topology: Topological space the map represents, could be
Torus or Euclidean.
dimension: Either 2 or 3.
connectivity: For a cell, under what condition is the adjacent cell considered
connected to this cell. Connected by 'edge' or 'vertex'.
'''
def __init__(self, obstacle_map, params, resolution=1.0, topology='euclidean', dimension=2, connectivity='edge'):
self.obstacle_map = obstacle_map
self.params = params
self.resolution = resolution
self.topology = topology
self.dimension = dimension
self.connectivity = connectivity
self.obstacle_dist_map = None
self.repulsive_potential_field = None
self.attractive_potential_field = None
self.negative_gradient_field_x = None
self.negative_gradient_field_y = None
self.OBSTACLE_NUM = 0
if dimension != 2:
raise NotImplemented("Dimension other than 2 is not implemented!")
'''
Generate a map, each cell containing minimum manhattan distance to adjacent obstacle
'''
def _compute_obstacle_dist_map(self):
h, w = self.obstacle_map.shape
# -1 means uninitialized distance value.
UNINITIALIZED_VAL = -1
self.obstacle_dist_map = np.full((h, w), UNINITIALIZED_VAL)
# Add obstacles to frontier.
frontier = []
for row in range(h):
for col in range(w):
# Obstacle.
if self.obstacle_map[row, col] == self.OBSTACLE_NUM:
frontier.append((row, col))
self.obstacle_dist_map[row, col] = 0
while len(frontier) > 0:
row, col = frontier.pop(0)
dist = self.obstacle_dist_map[row, col]
neighbors = [(row + 1, col), (row - 1, col), (row, col + 1), (row, col - 1)]
if self.connectivity == 'vertex':
neighbors = neighbors + [(row + 1, col + 1), (row + 1, col - 1), (row - 1, col - 1), (row - 1, col + 1)]
# For each neighbor, calculate distance.
for n in neighbors:
n_row, n_col = n
# Euclidean topology cannot wrap around.
if self.topology == 'euclidean':
if n_row < 0 or n_row >= h or n_col < 0 or n_col >= w:
break
# Torus topology can wrap around.
elif self.topology == 'torus':
n_row = (n_row + h) % h
n_col = (n_col + w) % w
# If neighbor is uninitialized or having larger obstacle distance values,
# update that value, and put it into frontier.
if self.obstacle_dist_map[n_row, n_col] == UNINITIALIZED_VAL \
or dist + 1 < self.obstacle_dist_map[n_row, n_col]:
self.obstacle_dist_map[n_row, n_col] = dist + 1
frontier.append((n_row, n_col))
'''
Generate a potential field as a result of presence of obstacle.
'''
def _compute_repulsive_potential_field(self):
if self.obstacle_dist_map is None:
self._compute_obstacle_dist_map()
self.repulsive_potential_field = np.zeros(self.obstacle_dist_map.shape)
h, w = self.obstacle_dist_map.shape
for row in range(h):
for col in range(w):
dist = self.obstacle_dist_map[row, col] * self.resolution
potential = 0
if dist == 0:
potential = self.params['max_potential']
elif dist <= self.params['q_star']:
potential = 0.5 * self.params['eta'] * (1.0 / dist - 1.0 / self.params['q_star']) ** 2
potential = min(self.params['max_potential'], potential)
self.repulsive_potential_field[row, col] = potential
'''
Generate attractive potential field due to presence of goal.
'''
def _compute_attractive_potential_field(self, goal):
h, w = self.obstacle_map.shape
self.attractive_potential_field = np.zeros((h, w))
x_goal, y_goal = goal
for row in range(h):
for col in range(w):
x_curr = col * self.resolution
y_curr = row * self.resolution
if self.topology == 'torus':
x_diff = min(np.fabs(x_curr - x_goal), np.fabs(x_curr + w * self.resolution - x_goal))
y_diff = min(np.fabs(y_curr - y_goal), np.fabs(y_curr + h * self.resolution - y_goal))
else:
x_diff = x_curr - x_goal
y_diff = y_curr - y_goal
distance = np.sqrt(x_diff ** 2 + y_diff ** 2)
zeta = self.params['zeta']
d_goal = self.params['d_goal']
if distance <= self.params['d_goal']:
potential = 0.5 * zeta * distance ** 2
else:
potential = d_goal * zeta * distance - 0.5 * zeta * d_goal ** 2
self.attractive_potential_field[row, col] = potential
def get_potential_field(self):
return self.get_repulsive_potential_field() + self.get_attractive_potential_field()
def get_obstacle_distance_map(self):
if self.obstacle_dist_map is None:
self._compute_obstacle_dist_map()
return self.obstacle_dist_map
def get_repulsive_potential_field(self):
if self.repulsive_potential_field is None:
self._compute_repulsive_potential_field()
return self.repulsive_potential_field
def get_attractive_potential_field(self):
if self.goal is None:
raise Exception("You need to set goal using set_goal function.")
if self.attractive_potential_field is None:
self._compute_attractive_potential_field(self.goal)
return self.attractive_potential_field
'''
config: Current configuration.
return: Gradient vector
'''
def get_negative_gradient(self, config):
potential_field = self.get_potential_field()
h, w = potential_field.shape
x, y = config
col, row = int(x / self.resolution), int(y / self.resolution)
# A ring of pixels surrounding current configuration.
neighbor_pixels = [(col, row - 3), (col + 1, row - 3), (col + 2, row - 2), (col + 3, row - 1), (col + 3, row), \
(col + 3, row + 1), (col + 2, row + 2), (col + 1, row + 3), (col, row + 3), (col - 1, row + 3),\
(col - 2, row + 2), (col - 3, row + 1), (col - 3, row), (col - 3, row - 1), (col - 2, row - 2), \
(col - 1, row - 3)]
min_potential = np.Inf
min_pixel = None
# Filter through these neighboring pixels,
# out of range pixels will not be considered in the next step.
for n in neighbor_pixels:
col_n, row_n = n
if self.topology == 'torus':
col_n = (col_n + w) % w
row_n = (row_n + h) % h
if col_n < 0 or col_n >= w or row_n < 0 or row_n >= h:
continue
# This pixel is valid, find minimum potential.
if potential_field[row_n, col_n] < min_potential:
min_potential = potential_field[row_n, col_n]
min_pixel = n
# Compute gradient vector.
col_min, row_min = min_pixel
x_min, y_min = col_min * self.resolution, row_min * self.resolution
x_diff = x_min - x
y_diff = y_min - y
diff_vector = np.array([x_diff, y_diff])
diff_norm = np.linalg.norm(diff_vector)
direction = diff_vector / diff_norm
potential_diff = potential_field[row, col] - potential_field[row_min, col_min]
gradient_mag = potential_diff / diff_norm
if gradient_mag > self.params["max_gradient"]:
gradient_mag = self.params["max_gradient"]
gradient = direction * gradient_mag
return gradient
'''
Return an NxN grid of 2d vectors.
'''
def get_negative_gradient_field(self):
if self.negative_gradient_field_x is not None and self.negative_gradient_field_y is not None:
return self.negative_gradient_field_x, self.negative_gradient_field_y
h, w = self.obstacle_map.shape
self.negative_gradient_field_x = np.zeros((h, w))
self.negative_gradient_field_y = np.zeros((h, w))
for row in range(h):
for col in range(w):
gradient = self.get_negative_gradient((col * self.resolution, row * self.resolution))
self.negative_gradient_field_x[row, col] = gradient[0]
self.negative_gradient_field_y[row, col] = gradient[1]
return self.negative_gradient_field_x, self.negative_gradient_field_y
'''
goal: Goal in configuration space.
'''
def set_goal(self, goal):
self.goal = goal
# Reset attractive potential field because the goal changes.
self.attractive_potential_field = None
self.negative_gradient_field_x = None
self.negative_gradient_field_y = None | [
"numpy.fabs",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.full"
] | [((1700, 1734), 'numpy.full', 'np.full', (['(h, w)', 'UNINITIALIZED_VAL'], {}), '((h, w), UNINITIALIZED_VAL)\n', (1707, 1734), True, 'import numpy as np\n'), ((3647, 3685), 'numpy.zeros', 'np.zeros', (['self.obstacle_dist_map.shape'], {}), '(self.obstacle_dist_map.shape)\n', (3655, 3685), True, 'import numpy as np\n'), ((4513, 4529), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (4521, 4529), True, 'import numpy as np\n'), ((8027, 8053), 'numpy.array', 'np.array', (['[x_diff, y_diff]'], {}), '([x_diff, y_diff])\n', (8035, 8053), True, 'import numpy as np\n'), ((8074, 8101), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_vector'], {}), '(diff_vector)\n', (8088, 8101), True, 'import numpy as np\n'), ((8826, 8842), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (8834, 8842), True, 'import numpy as np\n'), ((8884, 8900), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (8892, 8900), True, 'import numpy as np\n'), ((5117, 5151), 'numpy.sqrt', 'np.sqrt', (['(x_diff ** 2 + y_diff ** 2)'], {}), '(x_diff ** 2 + y_diff ** 2)\n', (5124, 5151), True, 'import numpy as np\n'), ((4797, 4821), 'numpy.fabs', 'np.fabs', (['(x_curr - x_goal)'], {}), '(x_curr - x_goal)\n', (4804, 4821), True, 'import numpy as np\n'), ((4823, 4869), 'numpy.fabs', 'np.fabs', (['(x_curr + w * self.resolution - x_goal)'], {}), '(x_curr + w * self.resolution - x_goal)\n', (4830, 4869), True, 'import numpy as np\n'), ((4904, 4928), 'numpy.fabs', 'np.fabs', (['(y_curr - y_goal)'], {}), '(y_curr - y_goal)\n', (4911, 4928), True, 'import numpy as np\n'), ((4930, 4976), 'numpy.fabs', 'np.fabs', (['(y_curr + h * self.resolution - y_goal)'], {}), '(y_curr + h * self.resolution - y_goal)\n', (4937, 4976), True, 'import numpy as np\n')] |
###
# Introspective Autoencoder Main training Function
# <NAME>, 2016
import argparse
import imp
import time
import logging
# import sys
# sys.path.insert(0, 'C:\Users\Andy\Generative-and-Discriminative-Voxel-Modeling')
import numpy as np
from path import Path
import theano
import theano.tensor as T
import lasagne
from utils import checkpoints, npytar, metrics_logging
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg') # Turn this off if you want to display plots on your own computer or have X11 forwarding set up.
import matplotlib.pyplot as plt
#####################
# Training Functions#
#####################
#
# This function compiles all theano functions and returns
# two dicts containing the functions and theano variables.
#
def make_training_functions(cfg,model):
# Input Array
X = T.TensorType('float32', [False]*5)('X')
# Class Vector, for classification or augmenting the latent space vector
y = T.TensorType('float32', [False]*2)('y')
# Shared variable for input array
X_shared = lasagne.utils.shared_empty(5, dtype='float32')
# Shared variable for class vector
y_shared = lasagne.utils.shared_empty(2, dtype='float32')
# Input layer
l_in = model['l_in']
# Output layer
l_out = model['l_out']
# Latent Layer
l_latents = model['l_latents']
# Latent Means
l_mu = model['l_mu']
# Log-sigmas
l_ls = model['l_ls']
# Classifier
l_classifier = model['l_classifier']
# Class-conditional latents
l_cc = model['l_cc']
# Decoder Layers, including final output layer
l_decoder = lasagne.layers.get_all_layers(l_out)[len(lasagne.layers.get_all_layers(l_latents)):]
# Batch Parameters
batch_index = T.iscalar('batch_index')
batch_slice = slice(batch_index*cfg['batch_size'], (batch_index+1)*cfg['batch_size'])
#####################################
# Step 1: Compute full forward pass #
#####################################
#
# Note that calling get_output() builds a new graph each time.
# Get outputs
outputs = lasagne.layers.get_output([l_out]+[l_mu]+[l_ls]+[l_classifier]+lasagne.layers.get_all_layers(l_classifier),
{l_in:X, model['l_cc']:y}) # Consider swapping l_classifier in for l_latents
# Get the reconstruction
X_hat = outputs[0]
# Get latent means
Z_mu = outputs[1]
# Get latent logsigmas
Z_ls = outputs[2]
# Get classification guesses
y_hat = outputs[3]
# Get the outputs of the encoder layers, given the training input
g_X = outputs[5:]
# Get the outputs of the feature layers of the encoder given the reconstruction
g_X_hat = lasagne.layers.get_output(lasagne.layers.get_all_layers(l_classifier)[1:],lasagne.nonlinearities.tanh(X_hat))
# Get testing outputs
[X_hat_deterministic,latent_values,y_hat_deterministic] = lasagne.layers.get_output([l_out,l_latents,l_classifier],
{l_in:X, model['l_cc']:y},deterministic=True)
# Latent values at a given
# latent_values = lasagne.layers.get_output(l_latents,deterministic=True)
# For classification
# class_prediction = softmax_out = T.nnet.softmax(g_X[-1])
#################################
# Step 2: Define loss functions #
#################################
# L2 normalization for all params
l2_all = lasagne.regularization.regularize_network_params(l_out,
lasagne.regularization.l2)
# Weighted binary cross-entropy for use in voxel loss. Allows weighting of false positives relative to false negatives.
# Nominally set to strongly penalize false negatives
def weighted_binary_crossentropy(output,target):
return -(98.0*target * T.log(output) + 2.0*(1.0 - target) * T.log(1.0 - output))/100.0
# Voxel-Wise Reconstruction Loss
# Note that the output values are clipped to prevent the BCE from evaluating log(0).
voxel_loss = T.cast(T.mean(weighted_binary_crossentropy(T.clip(lasagne.nonlinearities.sigmoid( X_hat ), 1e-7, 1.0 - 1e-7), X)),'float32')
# KL Divergence from isotropic gaussian prior
kl_div = -0.5 * T.mean(1 + 2*Z_ls - T.sqr(Z_mu) - T.exp(2 * Z_ls))
# Compute classification loss if augmenting with a classification objective
if cfg['discriminative']:
print('discriminating')
classifier_loss = T.cast(T.mean(T.nnet.categorical_crossentropy(T.nnet.softmax(y_hat), y)), 'float32')
classifier_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat,axis=1), T.argmax(y,axis=1)) ), 'float32' )
classifier_test_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat_deterministic,axis=1), T.argmax(y,axis=1))), 'float32' )
# Sum the reconstruction loss, the regularization term, the KL divergence over the prior, and the classifier loss.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all +classifier_loss+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all +classifier_loss
# If not, ignore classifier
else:
classifier_loss = None
classifier_error_rate = None
classifier_test_error_rate = None
# Sum the reconstruction loss, the regularization term, and the KL divergence over the prior.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all
##########################
# Step 3: Define Updates #
##########################
# Define learning rate in case of annealing or decay.
if isinstance(cfg['learning_rate'], dict):
learning_rate = theano.shared(np.float32(cfg['learning_rate'][0]))
else:
learning_rate = theano.shared(np.float32(cfg['learning_rate']))
# All network params
params = lasagne.layers.get_all_params(l_out,trainable=True)
# Decoder params
decoder_params = lasagne.layers.get_all_params(l_out,trainable=True)[len(lasagne.layers.get_all_params(l_latents,trainable=True)):]
# Update dict
updates = OrderedDict()
# Reconstruction and Regularization SGD terms
# Note that momentum (or a variant such as Adam) is added further down.
voxel_grads = lasagne.updates.get_or_compute_grads(reg_voxel_loss,params)
for param,grad in zip(params,voxel_grads):
updates[param] = param - learning_rate * grad
# Feature SGD Terms (AKA Introspective SGD Terms)
# Note that momentum (or a variant such as Adam) is added further down.
# Optionally add scale term to weight deeper layers more heavily.
if cfg['introspect']:
# To scale weights differently, add /sum(xrange(1,len(g_X_hat)-1))
# Also (i+1) to scale weights
feature_loss = T.cast(T.mean([T.mean(lasagne.objectives.squared_error(g_X[i],g_X_hat[i])) for i in xrange(0,len(g_X_hat)-2)]),'float32')
feature_grads = lasagne.updates.get_or_compute_grads(feature_loss,decoder_params)
for param,grad in zip(decoder_params,feature_grads):
updates[param] += - learning_rate * grad
else:
feature_loss = None
# Apply nesterov momentum to all updates.
updates = lasagne.updates.apply_nesterov_momentum(updates,momentum=cfg['momentum'])
# Reconstruction Accuracy Term
error_rate = T.cast( T.mean( T.neq(T.ge(X_hat,0), T.ge(X,0))), 'float32' )
# Test Reconstruction Accuracy
test_error_rate = T.cast( T.mean( T.neq(T.ge(X_hat_deterministic,0), T.ge(X,0))), 'float32' )
# Test Reconstruction True Positives
true_positives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.ge(X,0.5))/T.mean(T.ge(X,0.5)),'float32')
# Test Reconstruction True Negatives
true_negatives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.lt(X,0.5))/T.mean(T.lt(X,0.5)),'float32')
# List comprehension to define which outputs are available during training
update_outs = [x for x in [voxel_loss,
feature_loss,
classifier_loss,
kl_div,
classifier_error_rate,
error_rate] if x is not None]
# Training function
update_iter = theano.function([batch_index],update_outs,
updates=updates, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# List comprehension to define which outputs are available during testing
test_outs = [x for x in [test_error_rate,
classifier_test_error_rate,
latent_values,true_positives,true_negatives] if x is not None]
# Test function
test_error_fn = theano.function([batch_index],
test_outs, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# Dictionary of theano functions
tfuncs = {'update_iter':update_iter,
'test_function':test_error_fn,
}
# Dictionary of theano variables
tvars = {'X' : X,
'y' : y,
'X_shared' : X_shared,
'y_shared' : y_shared,
'batch_slice' : batch_slice,
'batch_index' : batch_index,
'learning_rate' : learning_rate,
}
return tfuncs, tvars
## Data augmentation function from Voxnet, which randomly translates
## and/or horizontally flips a chunk of data.
def jitter_chunk(src, cfg):
dst = src.copy()
if np.random.binomial(1, .2):
dst[:, :, ::-1, :, :] = dst
if np.random.binomial(1, .2):
dst[:, :, :, ::-1, :] = dst
max_ij = cfg['max_jitter_ij']
max_k = cfg['max_jitter_k']
shift_ijk = [np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_k, max_k)]
for axis, shift in enumerate(shift_ijk):
if shift != 0:
# beware wraparound
dst = np.roll(dst, shift, axis+2)
return dst
## Data loading function, originally from VoxNet.
def data_loader(cfg, fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']//2
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
# Test data loading function, originally from VoxNet
def test_data_loader(cfg,fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
yield (3.0*xc-1.0, yc)
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
yield (3.0*xc-1.0, yc)
# Main Function
def main(args):
# Load config file
config_module = imp.load_source('config', args.config_path)
cfg = config_module.cfg
# Define weights file name
weights_fname = str(args.config_path)[:-3]+'.npz'
# Define training metrics filename
metrics_fname = weights_fname[:-4]+'METRICS.jsonl'
# Prepare Logs
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s| %(message)s')
logging.info('Metrics will be saved to {}'.format(metrics_fname))
mlog = metrics_logging.MetricsLogger(metrics_fname, reinitialize=True)
# Get model and compile theano functions
model = config_module.get_model()
logging.info('Compiling theano functions...')
tfuncs, tvars = make_training_functions(cfg,model)
logging.info('Training...')
# Iteration Counter. One iteration corresponds to one minibatch.
itr = 0
# Best true-positive rate
best_tp = 0
for epoch in xrange(cfg['max_epochs']):
# Prepare data loader
loader = (data_loader(cfg,args.train_file))
# Update Learning Rate. Note that this version of the function does not support a decay rate;
# See other training files in the discriminative section for this.
if isinstance(cfg['learning_rate'], dict) and epoch > 0:
if any(x==epoch for x in cfg['learning_rate'].keys()):
lr = np.float32(tvars['learning_rate'].get_value())
new_lr = cfg['learning_rate'][epoch]
logging.info('Changing learning rate from {} to {}'.format(lr, new_lr))
tvars['learning_rate'].set_value(np.float32(new_lr))
# Initialize epoch-wise chunk counter
iter_counter = 0;
# Initialize Epoch-wise metrics
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = 0, 0, 0, 0, 0, 0
# Train!
for x_shared, y_shared in loader: # Loop across chunks
# Increment chunk counter
iter_counter+=1
# Determine number of batches in this chunk; this should only vary from
# cfg['batches_per_chunk'] if we're at the end of the dataset.
num_batches = len(x_shared)//cfg['batch_size']
# Load chunk into memory
tvars['X_shared'].set_value(x_shared, borrow=True)
tvars['y_shared'].set_value(y_shared, borrow=True)
# Initialize Chunk-wise metrics
voxel_lvs,feature_lvs,class_lvs,kl_divs,class_accs,accs = [],[],[],[],[],[]
for bi in xrange(num_batches): # Loop across batches within chunk
# Update!
results = tfuncs['update_iter'](bi)
# Assign results
# This could definitely be done more cleanly with a list comprehension.
voxel_loss = results[0]
feature_loss = results[1] if cfg['introspect'] else 0
classifier_loss = results[1+cfg['introspect']] if cfg['discriminative'] else 0
kl_div = results[1+cfg['introspect']+cfg['discriminative']]
class_acc = results[2+cfg['introspect']+cfg['discriminative']] if cfg['discriminative'] else 0
acc = results[2+cfg['introspect']+2*cfg['discriminative']]
# Append results to chunk-wise result list; these will be averaged later.
voxel_lvs.append(voxel_loss)
feature_lvs.append(feature_loss)
class_lvs.append(classifier_loss)
kl_divs.append(kl_div)
class_accs.append(class_acc)
accs.append(acc)
# Increment batch counter
itr += 1
# Average metrics across chunk
[vloss, floss,closs, d_kl,c_acc,acc] = [float(np.mean(voxel_lvs)), float(np.mean(feature_lvs)),
float(np.mean(class_lvs)), float(np.mean(kl_divs)),
1.0-float(np.mean(class_accs)), 1.0-float(np.mean(accs))]
# Update epoch-wise metrics
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = [vloss_e+vloss, floss_e+floss, closs_e+closs, d_kl_e+d_kl, c_acc_e+c_acc, acc_e+acc]
# Report and Log chunk-wise metrics
logging.info('epoch: {}, itr: {}, v_loss: {}, f_loss: {}, c_loss: {}, D_kl: {}, class_acc: {}, acc: {}'.format(epoch, itr, vloss, floss,
closs, d_kl, c_acc, acc))
mlog.log(epoch=epoch, itr=itr, vloss=vloss,floss=floss, acc=acc,d_kl=d_kl,c_acc=c_acc)
# Average metrics across epoch
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = [vloss_e/iter_counter, floss_e/iter_counter,
closs_e/iter_counter, d_kl_e/iter_counter,
c_acc_e/iter_counter, acc_e/iter_counter]
# Report and log epoch-wise metrics
logging.info('Training metrics, Epoch {}, v_loss: {}, f_loss: {}, c_loss: {}, D_kl: {}, class_acc: {}, acc: {}'.format(epoch, vloss_e, floss_e,closs_e,d_kl_e,c_acc_e,acc_e))
mlog.log(epoch=epoch, vloss_e=vloss_e, floss_e=floss_e, closs_e=closs_e, d_kl_e=d_kl_e, c_acc_e=c_acc_e, acc_e=acc_e)
# Every Nth epoch, save weights
if not (epoch%cfg['checkpoint_every_nth']):
checkpoints.save_weights(weights_fname, model['l_out'],
{'itr': itr, 'ts': time.time()})
# When training is complete, check test performance
test_loader = test_data_loader(cfg,'shapenet10_test_nr.tar')
logging.info('Examining performance on test set')
# Initialize test metrics
test_error,test_class_error,latent_values,tp,tn = [],[],[],[],[]
# Initialize true class array for 2D manifold plots
true_class = np.array([],dtype=np.int)
for x_shared,y_shared in test_loader: # Loop across test chunks
# Calculate number of batches
num_batches = len(x_shared)//cfg['batch_size']
# Load test chunk into memory
tvars['X_shared'].set_value(x_shared, borrow=True)
tvars['y_shared'].set_value(y_shared, borrow=True)
# Update true class array for 2D Manifold Plots
true_class = np.append(true_class,np.argmax(y_shared,axis=1))
for bi in xrange(num_batches): # Loop across minibatches
# Get test results
test_results = tfuncs['test_function'](bi)
# Assign test results
# This could be done more cleanly with a list comprehension
batch_test_error=test_results[0]
batch_test_class_error = test_results[1] if cfg['discriminative'] else 0
latents = test_results[1+cfg['discriminative']]
batch_tp = test_results[2+cfg['discriminative']]
batch_tn = test_results[3+cfg['discriminative']]
test_error.append(batch_test_error)
test_class_error.append(batch_test_class_error)
latent_values.append(latents)
tp.append(batch_tp)
tn.append(batch_tn)
# Average results
t_error = 1-float(np.mean(test_error))
true_positives = float(np.mean(tp))
true_negatives = float(np.mean(tn))
t_class_error = 1-float(np.mean(test_class_error))
Zs = np.asarray(latent_values,np.float32)
# Report and log results
logging.info('Test Accuracy: {}, Classification Test Accuracy: {}, True Positives: {}, True Negatives: {}'.format(t_error,t_class_error,true_positives,true_negatives))
mlog.log(test_error=t_error,t_class_error = t_class_error,true_positives=true_positives,true_negatives=true_negatives)
# Optionally plot and save 2D manifold if using only 2 latent variables.
if np.shape(Zs)[2]==2:
Zs = np.reshape(Zs,(np.shape(Zs)[0]*np.shape(Zs)[1],1,2))
ygnd = np.asarray(true_class,np.int)
plt.scatter(Zs[:,0,0],Zs[:,0,1],s = 30, c=ygnd,alpha = 0.5)
plt.savefig('figs/'+weights_fname[:-4]+str(epoch)+'.png')
plt.clf()
logging.info('training done')
checkpoints.save_weights(weights_fname, model['l_out'],
{'itr': itr, 'ts': time.time()})
### TODO: Clean this up and add the necessary arguments to enable all of the options we want.
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', type=Path, help='config .py file')
parser.add_argument('train_file', type=Path,default='shapenet10_train_nr.tar')
parser.add_argument('test_file', type=Path, default = 'shapenet10_test_nr.tar')
args = parser.parse_args()
main(args)
| [
"theano.tensor.exp",
"theano.tensor.iscalar",
"imp.load_source",
"numpy.array",
"theano.tensor.nnet.softmax",
"theano.tensor.argmax",
"utils.metrics_logging.MetricsLogger",
"theano.tensor.TensorType",
"lasagne.objectives.squared_error",
"logging.info",
"numpy.random.binomial",
"lasagne.layers.... | [((430, 451), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (444, 451), False, 'import matplotlib\n'), ((1069, 1115), 'lasagne.utils.shared_empty', 'lasagne.utils.shared_empty', (['(5)'], {'dtype': '"""float32"""'}), "(5, dtype='float32')\n", (1095, 1115), False, 'import lasagne\n'), ((1175, 1221), 'lasagne.utils.shared_empty', 'lasagne.utils.shared_empty', (['(2)'], {'dtype': '"""float32"""'}), "(2, dtype='float32')\n", (1201, 1221), False, 'import lasagne\n'), ((1806, 1830), 'theano.tensor.iscalar', 'T.iscalar', (['"""batch_index"""'], {}), "('batch_index')\n", (1815, 1830), True, 'import theano.tensor as T\n'), ((3017, 3130), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['[l_out, l_latents, l_classifier]', "{l_in: X, model['l_cc']: y}"], {'deterministic': '(True)'}), "([l_out, l_latents, l_classifier], {l_in: X, model\n ['l_cc']: y}, deterministic=True)\n", (3042, 3130), False, 'import lasagne\n'), ((3582, 3669), 'lasagne.regularization.regularize_network_params', 'lasagne.regularization.regularize_network_params', (['l_out', 'lasagne.regularization.l2'], {}), '(l_out, lasagne.\n regularization.l2)\n', (3630, 3669), False, 'import lasagne\n'), ((6090, 6142), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_out'], {'trainable': '(True)'}), '(l_out, trainable=True)\n', (6119, 6142), False, 'import lasagne\n'), ((6332, 6345), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6343, 6345), False, 'from collections import OrderedDict\n'), ((6495, 6555), 'lasagne.updates.get_or_compute_grads', 'lasagne.updates.get_or_compute_grads', (['reg_voxel_loss', 'params'], {}), '(reg_voxel_loss, params)\n', (6531, 6555), False, 'import lasagne\n'), ((7457, 7531), 'lasagne.updates.apply_nesterov_momentum', 'lasagne.updates.apply_nesterov_momentum', (['updates'], {'momentum': "cfg['momentum']"}), "(updates, momentum=cfg['momentum'])\n", (7496, 7531), False, 'import lasagne\n'), ((8465, 8614), 'theano.function', 'theano.function', (['[batch_index]', 'update_outs'], {'updates': 'updates', 'givens': '{X: X_shared[batch_slice], y: y_shared[batch_slice]}', 'on_unused_input': '"""warn"""'}), "([batch_index], update_outs, updates=updates, givens={X:\n X_shared[batch_slice], y: y_shared[batch_slice]}, on_unused_input='warn')\n", (8480, 8614), False, 'import theano\n'), ((8961, 9091), 'theano.function', 'theano.function', (['[batch_index]', 'test_outs'], {'givens': '{X: X_shared[batch_slice], y: y_shared[batch_slice]}', 'on_unused_input': '"""warn"""'}), "([batch_index], test_outs, givens={X: X_shared[batch_slice],\n y: y_shared[batch_slice]}, on_unused_input='warn')\n", (8976, 9091), False, 'import theano\n'), ((9791, 9817), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.2)'], {}), '(1, 0.2)\n', (9809, 9817), True, 'import numpy as np\n'), ((9861, 9887), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.2)'], {}), '(1, 0.2)\n', (9879, 9887), True, 'import numpy as np\n'), ((10517, 10583), 'numpy.zeros', 'np.zeros', (["((chunk_size, cfg['n_channels']) + dims)"], {'dtype': 'np.float32'}), "((chunk_size, cfg['n_channels']) + dims, dtype=np.float32)\n", (10525, 10583), True, 'import numpy as np\n'), ((10596, 10622), 'utils.npytar.NpyTarReader', 'npytar.NpyTarReader', (['fname'], {}), '(fname)\n', (10615, 10622), False, 'from utils import checkpoints, npytar, metrics_logging\n'), ((10632, 10690), 'numpy.zeros', 'np.zeros', (["(chunk_size, cfg['n_classes'])"], {'dtype': 'np.float32'}), "((chunk_size, cfg['n_classes']), dtype=np.float32)\n", (10640, 10690), True, 'import numpy as np\n'), ((12023, 12089), 'numpy.zeros', 'np.zeros', (["((chunk_size, cfg['n_channels']) + dims)"], {'dtype': 'np.float32'}), "((chunk_size, cfg['n_channels']) + dims, dtype=np.float32)\n", (12031, 12089), True, 'import numpy as np\n'), ((12102, 12128), 'utils.npytar.NpyTarReader', 'npytar.NpyTarReader', (['fname'], {}), '(fname)\n', (12121, 12128), False, 'from utils import checkpoints, npytar, metrics_logging\n'), ((12138, 12196), 'numpy.zeros', 'np.zeros', (["(chunk_size, cfg['n_classes'])"], {'dtype': 'np.float32'}), "((chunk_size, cfg['n_classes']), dtype=np.float32)\n", (12146, 12196), True, 'import numpy as np\n'), ((13142, 13185), 'imp.load_source', 'imp.load_source', (['"""config"""', 'args.config_path'], {}), "('config', args.config_path)\n", (13157, 13185), False, 'import imp\n'), ((13430, 13523), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s| %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s| %(message)s')\n", (13449, 13523), False, 'import logging\n'), ((13600, 13663), 'utils.metrics_logging.MetricsLogger', 'metrics_logging.MetricsLogger', (['metrics_fname'], {'reinitialize': '(True)'}), '(metrics_fname, reinitialize=True)\n', (13629, 13663), False, 'from utils import checkpoints, npytar, metrics_logging\n'), ((13756, 13801), 'logging.info', 'logging.info', (['"""Compiling theano functions..."""'], {}), "('Compiling theano functions...')\n", (13768, 13801), False, 'import logging\n'), ((13862, 13889), 'logging.info', 'logging.info', (['"""Training..."""'], {}), "('Training...')\n", (13874, 13889), False, 'import logging\n'), ((22113, 22142), 'logging.info', 'logging.info', (['"""training done"""'], {}), "('training done')\n", (22125, 22142), False, 'import logging\n'), ((22405, 22430), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (22428, 22430), False, 'import argparse\n'), ((841, 877), 'theano.tensor.TensorType', 'T.TensorType', (['"""float32"""', '([False] * 5)'], {}), "('float32', [False] * 5)\n", (853, 877), True, 'import theano.tensor as T\n'), ((971, 1007), 'theano.tensor.TensorType', 'T.TensorType', (['"""float32"""', '([False] * 2)'], {}), "('float32', [False] * 2)\n", (983, 1007), True, 'import theano.tensor as T\n'), ((1674, 1710), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_out'], {}), '(l_out)\n', (1703, 1710), False, 'import lasagne\n'), ((2888, 2922), 'lasagne.nonlinearities.tanh', 'lasagne.nonlinearities.tanh', (['X_hat'], {}), '(X_hat)\n', (2915, 2922), False, 'import lasagne\n'), ((6184, 6236), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_out'], {'trainable': '(True)'}), '(l_out, trainable=True)\n', (6213, 6236), False, 'import lasagne\n'), ((7170, 7236), 'lasagne.updates.get_or_compute_grads', 'lasagne.updates.get_or_compute_grads', (['feature_loss', 'decoder_params'], {}), '(feature_loss, decoder_params)\n', (7206, 7236), False, 'import lasagne\n'), ((10007, 10049), 'numpy.random.random_integers', 'np.random.random_integers', (['(-max_ij)', 'max_ij'], {}), '(-max_ij, max_ij)\n', (10032, 10049), True, 'import numpy as np\n'), ((10068, 10110), 'numpy.random.random_integers', 'np.random.random_integers', (['(-max_ij)', 'max_ij'], {}), '(-max_ij, max_ij)\n', (10093, 10110), True, 'import numpy as np\n'), ((10129, 10169), 'numpy.random.random_integers', 'np.random.random_integers', (['(-max_k)', 'max_k'], {}), '(-max_k, max_k)\n', (10154, 10169), True, 'import numpy as np\n'), ((2230, 2273), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_classifier'], {}), '(l_classifier)\n', (2259, 2273), False, 'import lasagne\n'), ((2840, 2883), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_classifier'], {}), '(l_classifier)\n', (2869, 2883), False, 'import lasagne\n'), ((5927, 5962), 'numpy.float32', 'np.float32', (["cfg['learning_rate'][0]"], {}), "(cfg['learning_rate'][0])\n", (5937, 5962), True, 'import numpy as np\n'), ((6012, 6044), 'numpy.float32', 'np.float32', (["cfg['learning_rate']"], {}), "(cfg['learning_rate'])\n", (6022, 6044), True, 'import numpy as np\n'), ((10289, 10318), 'numpy.roll', 'np.roll', (['dst', 'shift', '(axis + 2)'], {}), '(dst, shift, axis + 2)\n', (10296, 10318), True, 'import numpy as np\n'), ((19151, 19200), 'logging.info', 'logging.info', (['"""Examining performance on test set"""'], {}), "('Examining performance on test set')\n", (19163, 19200), False, 'import logging\n'), ((19432, 19458), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (19440, 19458), True, 'import numpy as np\n'), ((21273, 21310), 'numpy.asarray', 'np.asarray', (['latent_values', 'np.float32'], {}), '(latent_values, np.float32)\n', (21283, 21310), True, 'import numpy as np\n'), ((22258, 22269), 'time.time', 'time.time', ([], {}), '()\n', (22267, 22269), False, 'import time\n'), ((1715, 1755), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_latents'], {}), '(l_latents)\n', (1744, 1755), False, 'import lasagne\n'), ((4405, 4420), 'theano.tensor.exp', 'T.exp', (['(2 * Z_ls)'], {}), '(2 * Z_ls)\n', (4410, 4420), True, 'import theano.tensor as T\n'), ((6240, 6296), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_latents'], {'trainable': '(True)'}), '(l_latents, trainable=True)\n', (6269, 6296), False, 'import lasagne\n'), ((7619, 7633), 'theano.tensor.ge', 'T.ge', (['X_hat', '(0)'], {}), '(X_hat, 0)\n', (7623, 7633), True, 'import theano.tensor as T\n'), ((7634, 7644), 'theano.tensor.ge', 'T.ge', (['X', '(0)'], {}), '(X, 0)\n', (7638, 7644), True, 'import theano.tensor as T\n'), ((7744, 7772), 'theano.tensor.ge', 'T.ge', (['X_hat_deterministic', '(0)'], {}), '(X_hat_deterministic, 0)\n', (7748, 7772), True, 'import theano.tensor as T\n'), ((7773, 7783), 'theano.tensor.ge', 'T.ge', (['X', '(0)'], {}), '(X, 0)\n', (7777, 7783), True, 'import theano.tensor as T\n'), ((7946, 7958), 'theano.tensor.ge', 'T.ge', (['X', '(0.5)'], {}), '(X, 0.5)\n', (7950, 7958), True, 'import theano.tensor as T\n'), ((8118, 8130), 'theano.tensor.lt', 'T.lt', (['X', '(0.5)'], {}), '(X, 0.5)\n', (8122, 8130), True, 'import theano.tensor as T\n'), ((21132, 21143), 'numpy.mean', 'np.mean', (['tp'], {}), '(tp)\n', (21139, 21143), True, 'import numpy as np\n'), ((21180, 21191), 'numpy.mean', 'np.mean', (['tn'], {}), '(tn)\n', (21187, 21191), True, 'import numpy as np\n'), ((21897, 21927), 'numpy.asarray', 'np.asarray', (['true_class', 'np.int'], {}), '(true_class, np.int)\n', (21907, 21927), True, 'import numpy as np\n'), ((21943, 22005), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Zs[:, 0, 0]', 'Zs[:, 0, 1]'], {'s': '(30)', 'c': 'ygnd', 'alpha': '(0.5)'}), '(Zs[:, 0, 0], Zs[:, 0, 1], s=30, c=ygnd, alpha=0.5)\n', (21954, 22005), True, 'import matplotlib.pyplot as plt\n'), ((22093, 22102), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (22100, 22102), True, 'import matplotlib.pyplot as plt\n'), ((4222, 4259), 'lasagne.nonlinearities.sigmoid', 'lasagne.nonlinearities.sigmoid', (['X_hat'], {}), '(X_hat)\n', (4252, 4259), False, 'import lasagne\n'), ((4391, 4402), 'theano.tensor.sqr', 'T.sqr', (['Z_mu'], {}), '(Z_mu)\n', (4396, 4402), True, 'import theano.tensor as T\n'), ((4642, 4663), 'theano.tensor.nnet.softmax', 'T.nnet.softmax', (['y_hat'], {}), '(y_hat)\n', (4656, 4663), True, 'import theano.tensor as T\n'), ((4735, 4758), 'theano.tensor.argmax', 'T.argmax', (['y_hat'], {'axis': '(1)'}), '(y_hat, axis=1)\n', (4743, 4758), True, 'import theano.tensor as T\n'), ((4759, 4778), 'theano.tensor.argmax', 'T.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4767, 4778), True, 'import theano.tensor as T\n'), ((4853, 4890), 'theano.tensor.argmax', 'T.argmax', (['y_hat_deterministic'], {'axis': '(1)'}), '(y_hat_deterministic, axis=1)\n', (4861, 4890), True, 'import theano.tensor as T\n'), ((4891, 4910), 'theano.tensor.argmax', 'T.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4899, 4910), True, 'import theano.tensor as T\n'), ((7926, 7938), 'theano.tensor.ge', 'T.ge', (['X', '(0.5)'], {}), '(X, 0.5)\n', (7930, 7938), True, 'import theano.tensor as T\n'), ((8098, 8110), 'theano.tensor.lt', 'T.lt', (['X', '(0.5)'], {}), '(X, 0.5)\n', (8102, 8110), True, 'import theano.tensor as T\n'), ((11805, 11830), 'numpy.append', 'np.append', (['yc', 'yc'], {'axis': '(0)'}), '(yc, yc, axis=0)\n', (11814, 11830), True, 'import numpy as np\n'), ((14735, 14753), 'numpy.float32', 'np.float32', (['new_lr'], {}), '(new_lr)\n', (14745, 14753), True, 'import numpy as np\n'), ((17015, 17033), 'numpy.mean', 'np.mean', (['voxel_lvs'], {}), '(voxel_lvs)\n', (17022, 17033), True, 'import numpy as np\n'), ((17042, 17062), 'numpy.mean', 'np.mean', (['feature_lvs'], {}), '(feature_lvs)\n', (17049, 17062), True, 'import numpy as np\n'), ((17123, 17141), 'numpy.mean', 'np.mean', (['class_lvs'], {}), '(class_lvs)\n', (17130, 17141), True, 'import numpy as np\n'), ((17150, 17166), 'numpy.mean', 'np.mean', (['kl_divs'], {}), '(kl_divs)\n', (17157, 17166), True, 'import numpy as np\n'), ((18990, 19001), 'time.time', 'time.time', ([], {}), '()\n', (18999, 19001), False, 'import time\n'), ((20001, 20028), 'numpy.argmax', 'np.argmax', (['y_shared'], {'axis': '(1)'}), '(y_shared, axis=1)\n', (20010, 20028), True, 'import numpy as np\n'), ((21076, 21095), 'numpy.mean', 'np.mean', (['test_error'], {}), '(test_error)\n', (21083, 21095), True, 'import numpy as np\n'), ((21229, 21254), 'numpy.mean', 'np.mean', (['test_class_error'], {}), '(test_class_error)\n', (21236, 21254), True, 'import numpy as np\n'), ((21780, 21792), 'numpy.shape', 'np.shape', (['Zs'], {}), '(Zs)\n', (21788, 21792), True, 'import numpy as np\n'), ((3955, 3968), 'theano.tensor.log', 'T.log', (['output'], {}), '(output)\n', (3960, 3968), True, 'import theano.tensor as T\n'), ((3992, 4011), 'theano.tensor.log', 'T.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (3997, 4011), True, 'import theano.tensor as T\n'), ((7046, 7098), 'lasagne.objectives.squared_error', 'lasagne.objectives.squared_error', (['g_X[i]', 'g_X_hat[i]'], {}), '(g_X[i], g_X_hat[i])\n', (7078, 7098), False, 'import lasagne\n'), ((7884, 7912), 'theano.tensor.ge', 'T.ge', (['X_hat_deterministic', '(0)'], {}), '(X_hat_deterministic, 0)\n', (7888, 7912), True, 'import theano.tensor as T\n'), ((7913, 7925), 'theano.tensor.ge', 'T.ge', (['X', '(0.5)'], {}), '(X, 0.5)\n', (7917, 7925), True, 'import theano.tensor as T\n'), ((8056, 8084), 'theano.tensor.ge', 'T.ge', (['X_hat_deterministic', '(0)'], {}), '(X_hat_deterministic, 0)\n', (8060, 8084), True, 'import theano.tensor as T\n'), ((8085, 8097), 'theano.tensor.ge', 'T.ge', (['X', '(0.5)'], {}), '(X, 0.5)\n', (8089, 8097), True, 'import theano.tensor as T\n'), ((11097, 11122), 'numpy.append', 'np.append', (['yc', 'yc'], {'axis': '(0)'}), '(yc, yc, axis=0)\n', (11106, 11122), True, 'import numpy as np\n'), ((17231, 17250), 'numpy.mean', 'np.mean', (['class_accs'], {}), '(class_accs)\n', (17238, 17250), True, 'import numpy as np\n'), ((17263, 17276), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (17270, 17276), True, 'import numpy as np\n'), ((21836, 21848), 'numpy.shape', 'np.shape', (['Zs'], {}), '(Zs)\n', (21844, 21848), True, 'import numpy as np\n'), ((21852, 21864), 'numpy.shape', 'np.shape', (['Zs'], {}), '(Zs)\n', (21860, 21864), True, 'import numpy as np\n')] |
import os
import math
import numpy as np
#import itertools
#import open3d as o3d
# import pandas as pd
# from tqdm import tqdm
# import joblib
# import time
import rosbag
import sensor_msgs.point_cloud2 as pc2
import torch
import yaml
'''
-
name: "x"
offset: 0
datatype: 7
count: 1
-
name: "y"
offset: 4
datatype: 7
count: 1
-
name: "z"
offset: 8
datatype: 7
count: 1
-
name: "intensity"
offset: 16
datatype: 7
count: 1
-
name: "t"
offset: 20
datatype: 6
count: 1
-
name: "reflectivity"
offset: 24
datatype: 4
count: 1
-
name: "ring"
offset: 26
datatype: 2
count: 1
-
name: "noise"
offset: 28
datatype: 4
count: 1
-
name: "range"
offset: 32
datatype: 6
count: 1
---
'''
#FILENAME = '/home/hanli/Documents/waymo/segment-15533468984793020049_800_000_820_000_with_camera_labels.tfrecord'
labelMapping = {
"0": "Pedestrian",
"1": "Cyclist",
"2": "Car",
"3": "Motorcycle",
"0": "Truck",
"0": "Pedestrian",
"0": "Pedestrian",
}
def reshape_torch(pointcloud, num_field):
device = torch.device("cpu")
pointcloud2 = torch.tensor(pointcloud, dtype=torch.float32, device=device)
return pointcloud2.reshape(
(-1, num_field)).detach().numpy().astype(np.float32)
def euler_from_quaternion(x, y, z, w):
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z #- math.pi/2# in radians
def obj_to_quad(obj):
l_x = obj.dimensions.x
l_y = obj.dimensions.y
x_0 = obj.pose.position.x
y_0 = obj.pose.position.y
w = obj.pose.orientation.w
x = obj.pose.orientation.x
y = obj.pose.orientation.y
z = obj.pose.orientation.z
siny_cosp = 2.0 * (w * z + x * y)
cosy_cosp = 1.0 - 2.0 * (y * y + z * z)
yaw = math.atan2(siny_cosp, cosy_cosp)
A_x = l_x / 2 * math.cos(yaw) - l_y / 2 * math.sin(yaw) + x_0
A_y = l_x / 2 * math.sin(yaw) + l_y / 2 * math.cos(yaw) + y_0
B_x = -l_x / 2 * math.cos(yaw) - l_y / 2 * math.sin(yaw) + x_0
B_y = -l_x / 2 * math.sin(yaw) + l_y / 2 * math.cos(yaw) + y_0
C_x = -l_x / 2 * math.cos(yaw) + l_y / 2 * math.sin(yaw) + x_0
C_y = -l_x / 2 * math.sin(yaw) - l_y / 2 * math.cos(yaw) + y_0
D_x = l_x / 2 * math.cos(yaw) + l_y / 2 * math.sin(yaw) + x_0
D_y = l_x / 2 * math.sin(yaw) - l_y / 2 * math.cos(yaw) + y_0
return A_x, A_y, B_x, B_y, C_x, C_y, D_x, D_y, yaw
def in_obj(point, obj):
A_x, A_y, B_x, B_y, C_x, C_y, D_x, D_y, yaw = obj_to_quad(obj)
if point[0] > obj.pose.position.x + 20:
return False, yaw
if point[1] > obj.pose.position.y + 20:
return False, yaw
if point[2] < (obj.pose.position.z - obj.dimensions.z / 2 + 0.1):
return False, yaw
a = (B_x - A_x) * (point[1] - A_y) - (B_y - A_y) * (point[0] - A_x)
b = (C_x - B_x) * (point[1] - B_y) - (C_y - B_y) * (point[0] - B_x)
c = (D_x - C_x) * (point[1] - C_y) - (D_y - C_y) * (point[0] - C_x)
d = (A_x - D_x) * (point[1] - D_y) - (A_y - D_y) * (point[0] - D_x)
if (a > 0 and b > 0 and c > 0 and d > 0) or (a < 0 and b < 0 and c < 0
and d < 0):
return True, yaw
return False, yaw
def ProcessRosbag():
base_path = "/media/sikun/ld_harddisk/sikun/LD_compass/dataset/org"
save_path = '/media/sikun/ld_harddisk/sikun/LD_compass/dataset/data/'
save_label_path = '/media/sikun/ld_harddisk/sikun/LD_compass/dataset/label/'
bag_PATH = base_path + '/split_ouster_128_20210608141208_004_processed_sync_00_OD.bag'
save_validation_path = save_path + '/validation'
bag_num = 0
index = 0
topic_name = ['/ld_object_lists', '/os_cloud_node/points']
object_lists = []
try:
with rosbag.Bag(bag_PATH, 'r') as bag:
for topic, msg, t in bag.read_messages(topics='/ld_object_lists'):
# for obj in msg.objects:
object_lists.append(msg.objects)
# print(len(msg.objects))
finally:
bag.close()
try:
with rosbag.Bag(bag_PATH, 'r') as bag:
for topic, msg, t in bag.read_messages(topics=topic_name):
if topic == str('/os_cloud_node/points'):
for i, obj in enumerate(object_lists[index]):
lidar = pc2.read_points(msg, skip_nans=True)
index_num = str(bag_num).zfill(4) + "_" + str(
index).zfill(4) + "_" + str(i).zfill(4)
bin_file = save_path + index_num + ".bin"
yaml_file = save_label_path + index_num + ".yaml"
point_arry = []
for point in lidar:
in_box, yaw = in_obj(point, obj)
if in_box:
point_arry.append(
[point[0], point[1], point[2], point[3]])
# print("point: ", point, " is in ", index_num)
obj_yaml = {
'Yaw': yaw,
'Dim_x': obj.dimensions.x,
'Dim_y': obj.dimensions.y,
'Dim_z': obj.dimensions.z,
'Pose_x': obj.pose.position.x,
'Pose_y': obj.pose.position.y,
'Pose_z': obj.pose.position.z,
'Velocity': obj.velocity.linear.x,
'class_name': obj.class_label_true,
'Num_of_points': len(point_arry),
'Difficulty': 0
}
with open(yaml_file, "w", encoding="utf8") as f:
yaml.dump(obj_yaml, f)
np.array(point_arry).astype(
np.float32).tofile(bin_file)
# print("===============================")
index += 1
print(index)
# for topic, msg, t in bag.read_messages(topics='/ld_object_lists'):
# for obj in msg.objects:
# object_lists.append(msg.objects)
# new_label.append(obj.dimensions.z)
# new_label.append(obj.dimensions.y + 0.2)
# new_label.append(obj.dimensions.x + 0.4)
# new_label.append(obj.pose.position.x)
# new_label.append(obj.pose.position.y)
# new_label.append(obj.pose.position.z - 1.74)
# roll_x, pitch_y, yaw_z = euler_from_quaternion(
# obj.pose.orientation.x, obj.pose.orientation.y,
# obj.pose.orientation.z, obj.pose.orientation.w)
# print(index)
finally:
bag.close()
"""
for number in range(len(dirs)):
filename = dirs[number]
dirs_s = os.listdir(save_path+'/'+filename)
for filename_s in tqdm(dirs_s):
dataset = tf.data.TFRecordDataset(save_path + '/' + filename + '/' + filename_s, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
(range_images, camera_projections,
range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(
frame)
points= frame_utils.convert_range_image_to_point_cloud(
frame,
range_images,
camera_projections,
range_image_top_pose)
#points_all = np.concatenate(points, axis=0)
print(points.shape)
points[:,2] =points[:,2] - 1.73
points=points[(points[:,3]<1) ,:] # filter intensity > 1
points.tofile(save_bin_path+str(i).zfill(6)+".bin")
df = pd.DataFrame(columns=['type','truncated','occluded','alpha','a','b','c','d','height','width','length','x','y','z','yaw'],data=[])
for label in frame.laser_labels:
new_label = []
hwl = np.zeros((1,3))
hwl[0,0] = label.box.height -0.14
hwl[0,1] = label.box.width-0.13
hwl[0,2] = label.box.length-0.15
if label.type == 0:
continue
else:
if label.type == 1:
new_predict = _map[str(int(clf.predict(hwl)[0]))] # new label
#print(new_predict)
new_label.append(new_predict)
if new_predict == 'Car':
height_car.append(label.box.center_z-1.8)
if new_predict == 'Van':
height_van.append(label.box.center_z-1.8)
if new_predict == 'Truck':
height_truck.append(label.box.center_z-1.8)
elif label.type == 2:
new_label.append('Pedestrian')
height_people.append(label.box.center_z-1.8)
elif label.type == 3:
print('traffic sign')
new_label.append('Sign')
height_sign.append(label.box.center_z-1.8)
elif label.type == 4:
new_label.append('Cyclist')
height_cyc.append(label.box.center_z-1.8)
print(label.detection_difficulty_level)
print(label.tracking_difficulty_level)
#break
new_label.append(label.tracking_difficulty_level)
new_label.append(label.detection_difficulty_level)
new_label.append(0)
new_label.append(0)
new_label.append(0)
new_label.append(0)
new_label.append(0)
if label.type == 1:
new_label.append(label.box.height-0.14)
new_label.append(label.box.width-0.13)
new_label.append(label.box.length-0.17)
else:
new_label.append(label.box.height)
new_label.append(label.box.width-0.08)
new_label.append(label.box.length-0.09)
new_label.append(label.box.center_x)
new_label.append(label.box.center_y)
new_label.append(label.box.center_z-1.73)
new_label.append(label.box.heading)
df.loc[df.shape[0]+1]=new_label
#print(temp_label)
df.to_csv(save_label_path+str(i).zfill(6)+".txt",sep=' ', index=False, header=False)
#points_all = np.concatenate(points, axis=0)
i += 1
print(i)
print(f'car of z : {np.mean(height_car)}')
print(f'truck of z : {np.mean(height_truck)}')
print(f'van of z : {np.mean(height_van)}')
print(f'people of z : {np.mean(height_people)}')
print(f'cyc of z : {np.mean(height_cyc)}')
print(f'sign of z : {np.mean(height_sign)}')
"""
#%%
if __name__ == '__main__':
ProcessRosbag()
# %% | [
"yaml.dump",
"math.asin",
"rosbag.Bag",
"math.cos",
"torch.tensor",
"numpy.array",
"math.atan2",
"math.sin",
"sensor_msgs.point_cloud2.read_points",
"torch.device"
] | [((1089, 1108), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1101, 1108), False, 'import torch\n'), ((1127, 1187), 'torch.tensor', 'torch.tensor', (['pointcloud'], {'dtype': 'torch.float32', 'device': 'device'}), '(pointcloud, dtype=torch.float32, device=device)\n', (1139, 1187), False, 'import torch\n'), ((1405, 1423), 'math.atan2', 'math.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (1415, 1423), False, 'import math\n'), ((1541, 1554), 'math.asin', 'math.asin', (['t2'], {}), '(t2)\n', (1550, 1554), False, 'import math\n'), ((1638, 1656), 'math.atan2', 'math.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (1648, 1656), False, 'import math\n'), ((2074, 2106), 'math.atan2', 'math.atan2', (['siny_cosp', 'cosy_cosp'], {}), '(siny_cosp, cosy_cosp)\n', (2084, 2106), False, 'import math\n'), ((4038, 4063), 'rosbag.Bag', 'rosbag.Bag', (['bag_PATH', '"""r"""'], {}), "(bag_PATH, 'r')\n", (4048, 4063), False, 'import rosbag\n'), ((4340, 4365), 'rosbag.Bag', 'rosbag.Bag', (['bag_PATH', '"""r"""'], {}), "(bag_PATH, 'r')\n", (4350, 4365), False, 'import rosbag\n'), ((2128, 2141), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2136, 2141), False, 'import math\n'), ((2154, 2167), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2162, 2167), False, 'import math\n'), ((2194, 2207), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2202, 2207), False, 'import math\n'), ((2220, 2233), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2228, 2233), False, 'import math\n'), ((2262, 2275), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2270, 2275), False, 'import math\n'), ((2288, 2301), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2296, 2301), False, 'import math\n'), ((2329, 2342), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2337, 2342), False, 'import math\n'), ((2355, 2368), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2363, 2368), False, 'import math\n'), ((2397, 2410), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2405, 2410), False, 'import math\n'), ((2423, 2436), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2431, 2436), False, 'import math\n'), ((2464, 2477), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2472, 2477), False, 'import math\n'), ((2490, 2503), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2498, 2503), False, 'import math\n'), ((2531, 2544), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2539, 2544), False, 'import math\n'), ((2557, 2570), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2565, 2570), False, 'import math\n'), ((2597, 2610), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (2605, 2610), False, 'import math\n'), ((2623, 2636), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (2631, 2636), False, 'import math\n'), ((4601, 4637), 'sensor_msgs.point_cloud2.read_points', 'pc2.read_points', (['msg'], {'skip_nans': '(True)'}), '(msg, skip_nans=True)\n', (4616, 4637), True, 'import sensor_msgs.point_cloud2 as pc2\n'), ((6092, 6114), 'yaml.dump', 'yaml.dump', (['obj_yaml', 'f'], {}), '(obj_yaml, f)\n', (6101, 6114), False, 'import yaml\n'), ((6139, 6159), 'numpy.array', 'np.array', (['point_arry'], {}), '(point_arry)\n', (6147, 6159), True, 'import numpy as np\n')] |
import copy
import math
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.transformers import PretrainedModel, register_base_model
__all__ = [
'NeZhaModel',
"NeZhaPretrainedModel",
'NeZhaForPretraining',
'NeZhaForSequenceClassification',
'NeZhaPretrainingHeads',
'NeZhaForTokenClassification',
'NeZhaForQuestionAnswering',
'NeZhaForMultipleChoice'
]
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(
activation_string, list(ACT2FN.keys())))
def mish(x):
return x * F.tanh(F.softplus(x))
def linear_act(x):
return x
def swish(x):
return x * F.sigmoid(x)
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + paddle.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * paddle.pow(x, 3.0))))
ACT2FN = {
"relu": F.relu,
"gelu": F.gelu,
"gelu_new": gelu_new,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"mish": mish,
"linear": linear_act,
"swish": swish,
}
class NeZhaAttention(nn.Layer):
def __init__(self,
hidden_size,
num_attention_heads,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps):
super(NeZhaAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.relative_positions_embeddings = self.generate_relative_positions_embeddings(
length=512, depth=self.attention_head_size, max_relative_position=max_relative_position
)
self.attention_dropout = nn.Dropout(attention_probs_dropout_prob)
self.dense = nn.Linear(hidden_size, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, epsilon=layer_norm_eps)
self.output_dropout = nn.Dropout(hidden_dropout_prob)
def generate_relative_positions_embeddings(self, length, depth, max_relative_position=127):
vocab_size = max_relative_position * 2 + 1
range_vec = paddle.arange(length)
range_mat = paddle.tile(
range_vec, repeat_times=[length]
).reshape((length, length))
distance_mat = range_mat - paddle.t(range_mat)
distance_mat_clipped = paddle.clip(
distance_mat.astype( 'float32'),
-max_relative_position,
max_relative_position
)
final_mat = distance_mat_clipped + max_relative_position
embeddings_table = np.zeros([vocab_size, depth])
for pos in range(vocab_size):
for i in range(depth // 2):
embeddings_table[pos, 2 * i] = np.sin(pos / np.power(10000, 2 * i / depth))
embeddings_table[pos, 2 * i + 1] = np.cos(pos / np.power(10000, 2 * i / depth))
embeddings_table_tensor = paddle.to_tensor(embeddings_table, dtype='float32')
flat_relative_positions_matrix = final_mat.reshape((-1,))
one_hot_relative_positions_matrix = paddle.nn.functional.one_hot(
flat_relative_positions_matrix.astype('int64'),
num_classes=vocab_size
)
embeddings = paddle.matmul(
one_hot_relative_positions_matrix,
embeddings_table_tensor
)
my_shape = final_mat.shape
my_shape.append(depth)
embeddings = embeddings.reshape(my_shape)
return embeddings
def transpose_for_scores(self, x):
new_x_shape = x.shape[:-1] + [self.num_attention_heads, self.attention_head_size]
x = x.reshape(new_x_shape)
return x.transpose((0, 2, 1, 3))
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = paddle.matmul(
query_layer,
key_layer.transpose((0, 1, 3, 2))
)
batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.shape
relations_keys = self.relative_positions_embeddings.detach().clone()[:to_seq_length, :to_seq_length, :]
query_layer_t = query_layer.transpose((2, 0, 1, 3))
query_layer_r = query_layer_t.reshape(
(from_seq_length, batch_size *
num_attention_heads, self.attention_head_size)
)
key_position_scores = paddle.matmul(
query_layer_r,
relations_keys.transpose((0, 2, 1))
)
key_position_scores_r = key_position_scores.reshape(
(from_seq_length, batch_size, num_attention_heads, from_seq_length)
)
key_position_scores_r_t = key_position_scores_r.transpose((1, 2, 0, 3))
attention_scores = attention_scores + key_position_scores_r_t
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(axis=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
context_layer = paddle.matmul(attention_probs, value_layer)
relations_values = self.relative_positions_embeddings.clone()[:to_seq_length, :to_seq_length, :]
attention_probs_t = attention_probs.transpose((2, 0, 1, 3))
attentions_probs_r = attention_probs_t.reshape(
(from_seq_length, batch_size * num_attention_heads, to_seq_length)
)
value_position_scores = paddle.matmul(attentions_probs_r, relations_values)
value_position_scores_r = value_position_scores.reshape(
(from_seq_length, batch_size,
num_attention_heads, self.attention_head_size)
)
value_position_scores_r_t = value_position_scores_r.transpose((1, 2, 0, 3))
context_layer = context_layer + value_position_scores_r_t
context_layer = context_layer.transpose((0, 2, 1, 3))
new_context_layer_shape = context_layer.shape[:-2] + [self.all_head_size]
context_layer = context_layer.reshape(new_context_layer_shape)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layer_normed_context_layer = self.layer_norm(
hidden_states + projected_context_layer_dropout
)
return layer_normed_context_layer, attention_scores
class NeZhaLayer(nn.Layer):
def __init__(self,
hidden_size,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps):
super(NeZhaLayer, self).__init__()
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(hidden_size, epsilon=layer_norm_eps)
self.attention = NeZhaAttention(
hidden_size,
num_attention_heads,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps
)
self.ffn = nn.Linear(hidden_size, intermediate_size)
self.ffn_output = nn.Linear(intermediate_size, hidden_size)
self.activation = ACT2FN[hidden_act]
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, attention_mask=None):
attention_output, layer_att = self.attention(hidden_states, attention_mask)
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
ffn_output_dropout = self.dropout(ffn_output)
hidden_states = self.layer_norm(ffn_output_dropout + attention_output)
return hidden_states, layer_att
class NeZhaEncoder(nn.Layer):
def __init__(self,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps):
super(NeZhaEncoder, self).__init__()
layer = NeZhaLayer(
hidden_size,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps
)
self.layer = nn.LayerList([copy.deepcopy(layer) for _ in range(num_hidden_layers)])
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
all_encoder_att = []
for i, layer_module in enumerate(self.layer):
all_encoder_layers.append(hidden_states)
hidden_states, layer_att = layer_module(all_encoder_layers[i], attention_mask)
all_encoder_att.append(layer_att)
all_encoder_layers.append(hidden_states)
return all_encoder_layers, all_encoder_att
class NeZhaEmbeddings(nn.Layer):
def __init__(self,
vocab_size,
hidden_size=768,
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
use_relative_position=True):
super(NeZhaEmbeddings, self).__init__()
self.use_relative_position = use_relative_position
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
if not use_relative_position:
self.position_embeddings = nn.Embedding(
max_position_embeddings, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
position_ids = paddle.arange(seq_length, dtype='int64')
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids, dtype="int64")
words_embeddings = self.word_embeddings(input_ids)
embeddings = words_embeddings
if not self.use_relative_position:
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class NeZhaPooler(nn.Layer):
def __init__(self, hidden_size):
super(NeZhaPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class NeZhaPretrainedModel(PretrainedModel):
model_config_file = "model_config.json"
pretrained_init_configuration = {
"nezha-base-chinese": {
"vocab_size": 21128,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"max_relative_position": 64,
"type_vocab_size": 2,
"initializer_range": 0.02,
"use_relative_position": True
},
"nezha-large-chinese": {
"vocab_size": 21128,
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"max_relative_position": 64,
"type_vocab_size": 2,
"initializer_range": 0.02,
"use_relative_position": True
},
"nezha-base-wwm-chinese": {
"vocab_size": 21128,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"max_relative_position": 64,
"type_vocab_size": 2,
"initializer_range": 0.02,
"use_relative_position": True
},
"nezha-large-wwm-chinese": {
"vocab_size": 21128,
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"max_relative_position": 64,
"type_vocab_size": 2,
"initializer_range": 0.02,
"use_relative_position": True
},
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"nezha-base-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/nezha/nezha-base-chinese.pdparams",
"nezha-large-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/nezha/nezha-large-chinese.pdparams",
"nezha-base-wwm-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/nezha/nezha-base-wwm-chinese.pdparams",
"nezha-large-wwm-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/nezha/nezha-large-wwm-chinese.pdparams",
}
}
base_model_prefix = "nezha"
def init_weights(self, layer):
""" Initialization hook """
if isinstance(layer, (nn.Linear, nn.Embedding)):
# In the dygraph mode, use the `set_value` to reset the parameter directly,
# and reset the `state_dict` to update parameter in static mode.
if isinstance(layer.weight, paddle.Tensor):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0,
std=self.initializer_range
if hasattr(self, "initializer_range") else
self.nezha.config["initializer_range"],
shape=layer.weight.shape))
elif isinstance(layer, nn.LayerNorm):
layer._epsilon = 1e-12
@register_base_model
class NeZhaModel(NeZhaPretrainedModel):
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
max_relative_position=64,
layer_norm_eps=1e-12,
use_relative_position=True):
super(NeZhaModel, self).__init__()
self.initializer_range = initializer_range
self.embeddings = NeZhaEmbeddings(
vocab_size,
hidden_size,
hidden_dropout_prob,
max_position_embeddings,
type_vocab_size,
use_relative_position
)
self.encoder = NeZhaEncoder(
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_relative_position,
layer_norm_eps
)
self.pooler = NeZhaPooler(hidden_size)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
if attention_mask is None:
attention_mask = paddle.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoder_outputs, _ = self.encoder(embedding_output, extended_attention_mask)
sequence_output = encoder_outputs[-1]
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
class NeZhaLMPredictionHead(nn.Layer):
def __init__(self,
hidden_size,
vocab_size,
hidden_act,
embedding_weights=None,
layer_norm_eps=1e-12):
super(NeZhaLMPredictionHead, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = ACT2FN[hidden_act]
self.layer_norm = nn.LayerNorm(hidden_size, epsilon=layer_norm_eps)
self.decoder_weight = embedding_weights
self.decoder_bias = self.create_parameter(
shape=[vocab_size],
dtype=self.decoder_weight.dtype,
is_bias=True
)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = paddle.tensor.matmul(
hidden_states,
self.decoder_weight,
transpose_y=True
) + self.decoder_bias
return hidden_states
class NeZhaPretrainingHeads(nn.Layer):
def __init__(self,
hidden_size,
vocab_size,
hidden_act,
embedding_weights=None):
super(NeZhaPretrainingHeads, self).__init__()
self.predictions = NeZhaLMPredictionHead(
hidden_size,
vocab_size,
hidden_act,
embedding_weights
)
self.seq_relationship = nn.Linear(hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class NeZhaForPretraining(NeZhaPretrainedModel):
def __init__(self, nezha):
super(NeZhaForPretraining, self).__init__()
self.nezha = nezha
self.cls = NeZhaPretrainingHeads(
self.nezha.config["hidden_size"],
self.nezha.config["vocab_size"],
self.nezha.config["hidden_act"],
self.nezha.embeddings.word_embeddings.weight
)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.nezha(input_ids, token_type_ids, attention_mask)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.reshape(
(-1, self.nezha.config["vocab_size"])), masked_lm_labels.reshape((-1,)))
next_sentence_loss = loss_fct(seq_relationship_score.reshape(
(-1, 2)), next_sentence_label.reshape((-1,)))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif masked_lm_labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.reshape(
(-1, self.nezha.config["vocab_size"])), masked_lm_labels.reshape((-1,)))
total_loss = masked_lm_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class NeZhaForQuestionAnswering(NeZhaPretrainedModel):
def __init__(self, nezha, dropout=None):
super(NeZhaForQuestionAnswering, self).__init__()
self.nezha = nezha
self.classifier = nn.Linear(self.nezha.config["hidden_size"], 2)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
sequence_output, _ = self.nezha(input_ids, token_type_ids, attention_mask)
logits = self.classifier(sequence_output)
logits = paddle.transpose(logits, perm=[2, 0, 1])
start_logits, end_logits = paddle.unstack(x=logits, axis=0)
return start_logits, end_logits
class NeZhaForSequenceClassification(NeZhaPretrainedModel):
def __init__(self, nezha, num_classes=2, dropout=None):
super(NeZhaForSequenceClassification, self).__init__()
self.num_classes = num_classes
self.nezha = nezha
self.dropout = nn.Dropout(dropout if dropout is not None else self.nezha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.nezha.config["hidden_size"], num_classes)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
_, pooled_output = self.nezha(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
class NeZhaForTokenClassification(NeZhaPretrainedModel):
def __init__(self, nezha, num_classes=2, dropout=None):
super(NeZhaForTokenClassification, self).__init__()
self.num_classes = num_classes
self.nezha = nezha
self.dropout = nn.Dropout(dropout if dropout is not None else self.nezha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.nezha.config["hidden_size"], num_classes)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
sequence_output, _ = self.nezha(input_ids, token_type_ids, attention_mask)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
return logits
class NeZhaForMultipleChoice(NeZhaPretrainedModel):
def __init__(self, nezha, num_choices=2, dropout=None):
super(NeZhaForMultipleChoice, self).__init__()
self.num_choices = num_choices
self.nezha = nezha
self.dropout = nn.Dropout(dropout if dropout is not None else self.nezha.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.nezha.config["hidden_size"], 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
# input_ids: [bs, num_choice, seq_l]
input_ids = input_ids.reshape((-1, input_ids.shape[-1])) # flat_input_ids: [bs*num_choice,seq_l]
if token_type_ids:
token_type_ids = token_type_ids.reshape((-1, token_type_ids.shape[-1]))
if attention_mask:
attention_mask = attention_mask.reshape((-1, attention_mask.shape[-1]))
_, pooled_output = self.nezha(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output) # logits: (bs*num_choice,1)
reshaped_logits = logits.reshape((-1, self.num_choices)) # logits: (bs, num_choice)
return reshaped_logits
| [
"paddle.pow",
"paddle.matmul",
"paddle.nn.Tanh",
"paddle.nn.LayerNorm",
"paddle.nn.CrossEntropyLoss",
"math.sqrt",
"paddle.arange",
"paddle.nn.Embedding",
"copy.deepcopy",
"paddle.ones_like",
"paddle.transpose",
"paddle.tile",
"paddle.to_tensor",
"paddle.tensor.matmul",
"paddle.nn.functi... | [((854, 866), 'paddle.nn.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (863, 866), True, 'import paddle.nn.functional as F\n'), ((2227, 2269), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'self.all_head_size'], {}), '(hidden_size, self.all_head_size)\n', (2236, 2269), True, 'import paddle.nn as nn\n'), ((2290, 2332), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'self.all_head_size'], {}), '(hidden_size, self.all_head_size)\n', (2299, 2332), True, 'import paddle.nn as nn\n'), ((2355, 2397), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'self.all_head_size'], {}), '(hidden_size, self.all_head_size)\n', (2364, 2397), True, 'import paddle.nn as nn\n'), ((2635, 2675), 'paddle.nn.Dropout', 'nn.Dropout', (['attention_probs_dropout_prob'], {}), '(attention_probs_dropout_prob)\n', (2645, 2675), True, 'import paddle.nn as nn\n'), ((2700, 2735), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2709, 2735), True, 'import paddle.nn as nn\n'), ((2763, 2812), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {'epsilon': 'layer_norm_eps'}), '(hidden_size, epsilon=layer_norm_eps)\n', (2775, 2812), True, 'import paddle.nn as nn\n'), ((2844, 2875), 'paddle.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (2854, 2875), True, 'import paddle.nn as nn\n'), ((3048, 3069), 'paddle.arange', 'paddle.arange', (['length'], {}), '(length)\n', (3061, 3069), False, 'import paddle\n'), ((3513, 3542), 'numpy.zeros', 'np.zeros', (['[vocab_size, depth]'], {}), '([vocab_size, depth])\n', (3521, 3542), True, 'import numpy as np\n'), ((3852, 3903), 'paddle.to_tensor', 'paddle.to_tensor', (['embeddings_table'], {'dtype': '"""float32"""'}), "(embeddings_table, dtype='float32')\n", (3868, 3903), False, 'import paddle\n'), ((4177, 4250), 'paddle.matmul', 'paddle.matmul', (['one_hot_relative_positions_matrix', 'embeddings_table_tensor'], {}), '(one_hot_relative_positions_matrix, embeddings_table_tensor)\n', (4190, 4250), False, 'import paddle\n'), ((6698, 6741), 'paddle.matmul', 'paddle.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (6711, 6741), False, 'import paddle\n'), ((7100, 7151), 'paddle.matmul', 'paddle.matmul', (['attentions_probs_r', 'relations_values'], {}), '(attentions_probs_r, relations_values)\n', (7113, 7151), False, 'import paddle\n'), ((8504, 8553), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {'epsilon': 'layer_norm_eps'}), '(hidden_size, epsilon=layer_norm_eps)\n', (8516, 8553), True, 'import paddle.nn as nn\n'), ((8828, 8869), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'intermediate_size'], {}), '(hidden_size, intermediate_size)\n', (8837, 8869), True, 'import paddle.nn as nn\n'), ((8897, 8938), 'paddle.nn.Linear', 'nn.Linear', (['intermediate_size', 'hidden_size'], {}), '(intermediate_size, hidden_size)\n', (8906, 8938), True, 'import paddle.nn as nn\n'), ((9009, 9040), 'paddle.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (9019, 9040), True, 'import paddle.nn as nn\n'), ((11267, 11304), 'paddle.nn.Embedding', 'nn.Embedding', (['vocab_size', 'hidden_size'], {}), '(vocab_size, hidden_size)\n', (11279, 11304), True, 'import paddle.nn as nn\n'), ((11495, 11537), 'paddle.nn.Embedding', 'nn.Embedding', (['type_vocab_size', 'hidden_size'], {}), '(type_vocab_size, hidden_size)\n', (11507, 11537), True, 'import paddle.nn as nn\n'), ((11565, 11590), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (11577, 11590), True, 'import paddle.nn as nn\n'), ((11615, 11646), 'paddle.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (11625, 11646), True, 'import paddle.nn as nn\n'), ((11770, 11810), 'paddle.arange', 'paddle.arange', (['seq_length'], {'dtype': '"""int64"""'}), "(seq_length, dtype='int64')\n", (11783, 11810), False, 'import paddle\n'), ((12652, 12687), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (12661, 12687), True, 'import paddle.nn as nn\n'), ((12715, 12724), 'paddle.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (12722, 12724), True, 'import paddle.nn as nn\n'), ((19509, 19544), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (19518, 19544), True, 'import paddle.nn as nn\n'), ((19618, 19667), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {'epsilon': 'layer_norm_eps'}), '(hidden_size, epsilon=layer_norm_eps)\n', (19630, 19667), True, 'import paddle.nn as nn\n'), ((20759, 20784), 'paddle.nn.Linear', 'nn.Linear', (['hidden_size', '(2)'], {}), '(hidden_size, 2)\n', (20768, 20784), True, 'import paddle.nn as nn\n'), ((22987, 23033), 'paddle.nn.Linear', 'nn.Linear', (["self.nezha.config['hidden_size']", '(2)'], {}), "(self.nezha.config['hidden_size'], 2)\n", (22996, 23033), True, 'import paddle.nn as nn\n'), ((23307, 23347), 'paddle.transpose', 'paddle.transpose', (['logits'], {'perm': '[2, 0, 1]'}), '(logits, perm=[2, 0, 1])\n', (23323, 23347), False, 'import paddle\n'), ((23386, 23418), 'paddle.unstack', 'paddle.unstack', ([], {'x': 'logits', 'axis': '(0)'}), '(x=logits, axis=0)\n', (23400, 23418), False, 'import paddle\n'), ((23744, 23837), 'paddle.nn.Dropout', 'nn.Dropout', (["(dropout if dropout is not None else self.nezha.config['hidden_dropout_prob'])"], {}), "(dropout if dropout is not None else self.nezha.config[\n 'hidden_dropout_prob'])\n", (23754, 23837), True, 'import paddle.nn as nn\n'), ((23860, 23916), 'paddle.nn.Linear', 'nn.Linear', (["self.nezha.config['hidden_size']", 'num_classes'], {}), "(self.nezha.config['hidden_size'], num_classes)\n", (23869, 23916), True, 'import paddle.nn as nn\n'), ((24523, 24616), 'paddle.nn.Dropout', 'nn.Dropout', (["(dropout if dropout is not None else self.nezha.config['hidden_dropout_prob'])"], {}), "(dropout if dropout is not None else self.nezha.config[\n 'hidden_dropout_prob'])\n", (24533, 24616), True, 'import paddle.nn as nn\n'), ((24639, 24695), 'paddle.nn.Linear', 'nn.Linear', (["self.nezha.config['hidden_size']", 'num_classes'], {}), "(self.nezha.config['hidden_size'], num_classes)\n", (24648, 24695), True, 'import paddle.nn as nn\n'), ((25299, 25392), 'paddle.nn.Dropout', 'nn.Dropout', (["(dropout if dropout is not None else self.nezha.config['hidden_dropout_prob'])"], {}), "(dropout if dropout is not None else self.nezha.config[\n 'hidden_dropout_prob'])\n", (25309, 25392), True, 'import paddle.nn as nn\n'), ((25415, 25461), 'paddle.nn.Linear', 'nn.Linear', (["self.nezha.config['hidden_size']", '(1)'], {}), "(self.nezha.config['hidden_size'], 1)\n", (25424, 25461), True, 'import paddle.nn as nn\n'), ((766, 779), 'paddle.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (776, 779), True, 'import paddle.nn.functional as F\n'), ((3223, 3242), 'paddle.t', 'paddle.t', (['range_mat'], {}), '(range_mat)\n', (3231, 3242), False, 'import paddle\n'), ((6215, 6250), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (6224, 6250), False, 'import math\n'), ((6402, 6421), 'paddle.nn.Softmax', 'nn.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (6412, 6421), True, 'import paddle.nn as nn\n'), ((11386, 11436), 'paddle.nn.Embedding', 'nn.Embedding', (['max_position_embeddings', 'hidden_size'], {}), '(max_position_embeddings, hidden_size)\n', (11398, 11436), True, 'import paddle.nn as nn\n'), ((11950, 11993), 'paddle.zeros_like', 'paddle.zeros_like', (['input_ids'], {'dtype': '"""int64"""'}), "(input_ids, dtype='int64')\n", (11967, 11993), False, 'import paddle\n'), ((18598, 18625), 'paddle.ones_like', 'paddle.ones_like', (['input_ids'], {}), '(input_ids)\n', (18614, 18625), False, 'import paddle\n'), ((18692, 18720), 'paddle.zeros_like', 'paddle.zeros_like', (['input_ids'], {}), '(input_ids)\n', (18709, 18720), False, 'import paddle\n'), ((20120, 20194), 'paddle.tensor.matmul', 'paddle.tensor.matmul', (['hidden_states', 'self.decoder_weight'], {'transpose_y': '(True)'}), '(hidden_states, self.decoder_weight, transpose_y=True)\n', (20140, 20194), False, 'import paddle\n'), ((21933, 21969), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (21952, 21969), True, 'import paddle.nn as nn\n'), ((3091, 3136), 'paddle.tile', 'paddle.tile', (['range_vec'], {'repeat_times': '[length]'}), '(range_vec, repeat_times=[length])\n', (3102, 3136), False, 'import paddle\n'), ((10295, 10315), 'copy.deepcopy', 'copy.deepcopy', (['layer'], {}), '(layer)\n', (10308, 10315), False, 'import copy\n'), ((22425, 22461), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (22444, 22461), True, 'import paddle.nn as nn\n'), ((1143, 1167), 'math.sqrt', 'math.sqrt', (['(2.0 / math.pi)'], {}), '(2.0 / math.pi)\n', (1152, 1167), False, 'import math\n'), ((3686, 3716), 'numpy.power', 'np.power', (['(10000)', '(2 * i / depth)'], {}), '(10000, 2 * i / depth)\n', (3694, 3716), True, 'import numpy as np\n'), ((3783, 3813), 'numpy.power', 'np.power', (['(10000)', '(2 * i / depth)'], {}), '(10000, 2 * i / depth)\n', (3791, 3813), True, 'import numpy as np\n'), ((1186, 1204), 'paddle.pow', 'paddle.pow', (['x', '(3.0)'], {}), '(x, 3.0)\n', (1196, 1204), False, 'import paddle\n')] |
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2015 SCoT Development Team
import unittest
from importlib import import_module
import numpy as np
from numpy.testing import assert_allclose
import scot
from scot import varica, datatools
from scot.var import VAR
class TestMVARICA(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInterface(self):
self.assertRaises(TypeError, varica.mvarica)
# simply pass in different data shapes and see if the functions runs without error
varica.mvarica(np.sin(np.arange(30)).reshape((10, 3)), VAR(1)) # 10 samples, 3 channels
varica.mvarica(np.sin(np.arange(30)).reshape((5, 3, 2)), VAR(1)) # 5 samples, 3 channels, 2 trials
def testFit(self):
""" Test submodel fitting on instationary data
"""
np.random.seed(42)
# original model coefficients
b01 = np.array([[0.0, 0], [0, 0]])
b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
t, m, l = 10, 2, 100
noisefunc = lambda: np.random.normal(size=(1, m)) ** 3 / 1e3
var = VAR(1)
var.coef = b01
sources1 = var.simulate([l, t], noisefunc)
var.coef = b02
sources2 = var.simulate([l, t], noisefunc)
var.coef = b03
sources3 = var.simulate([l, t * 2], noisefunc)
sources = np.vstack([sources1, sources2, sources3])
cl = [1] * t + [2] * t + [1, 2] * t
var = VAR(1)
r_trial = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='trial')
r_class = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='class')
r_ensemble = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='ensemble')
vars = [np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]]
# class one consists of trials generated with b01 and b03
# class two consists of trials generated with b02 and b03
#
# ensemble fitting cannot resolve any model -> highest residual variance
# class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
# trial fitting can resolve all three models -> lowest residual variance
self.assertLess(vars[0], vars[1])
self.assertLess(vars[1], vars[2])
def testModelIdentification(self):
""" generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
do this for every backend """
# original model coefficients
b0 = np.zeros((3, 6))
b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
[-0.7, 0.0, 0.9, 0.0]]
m0 = b0.shape[0]
l, t = 1000, 100
# generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3
var = VAR(2)
var.coef = b0
sources = var.simulate([l, t], noisefunc)
# simulate volume conduction... 3 sources measured with 7 channels
mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
[0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
data = datatools.dot_special(np.transpose(mix), sources)
for backend_name, backend_gen in scot.backend.items():
# apply MVARICA
# - default setting of 0.99 variance should reduce to 3 channels with this data
# - automatically determine delta (enough data, so it should most likely be 0)
result = varica.mvarica(data, var, optimize_var=True, backend=backend_gen())
# ICA does not define the ordering and sign of components
# so wee need to test all combinations to find if one of them fits the original coefficients
permutations = np.array(
[[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5], [4, 5, 0, 1, 2, 3],
[4, 5, 2, 3, 0, 1]])
signperms = np.array(
[[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])
best, d = np.inf, None
for perm in permutations:
b = result.b.coef[perm[::2] // 2, :]
b = b[:, perm]
for sgn in signperms:
c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6, 0).T
err = np.sum((c - b0) ** 2)
if err < best:
best = err
d = c
assert_allclose(d, b0, rtol=1e-2, atol=1e-2)
class TestCSPVARICA(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInterface(self):
# self.assertRaises(TypeError, varica.cspvarica)
# simply pass in different data shapes and see if the functions runs without error
self.assertRaises(AttributeError, varica.cspvarica, np.sin(np.arange(30)).reshape((10, 3)), VAR(1), [0])
# varica.cspvarica(np.sin(np.arange(30)).reshape((2, 3, 5)), VAR(1), ['A', 'B']) # 5 samples, 3 channels, 2 trials
def testFit(self):
""" Test submodel fitting on instationary data
"""
np.random.seed(42)
# original model coefficients
b01 = np.array([[0.0, 0], [0, 0]])
b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
t, m, l = 10, 2, 100
noisefunc = lambda: np.random.normal(size=(1, m)) ** 3 / 1e3
var = VAR(1)
var.coef = b01
sources1 = var.simulate([l, t], noisefunc)
var.coef = b02
sources2 = var.simulate([l, t], noisefunc)
var.coef = b03
sources3 = var.simulate([l, t * 2], noisefunc)
sources = np.vstack([sources1, sources2, sources3])
cl = [1] * t + [2] * t + [1, 2] * t
var = VAR(1)
r_trial = varica.cspvarica(sources, var, cl, reducedim=None, varfit='trial')
r_class = varica.cspvarica(sources, var, cl, reducedim=None, varfit='class')
r_ensemble = varica.cspvarica(sources, var, cl, reducedim=None, varfit='ensemble')
vars = [np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]]
# class one consists of trials generated with b01 and b03
# class two consists of trials generated with b02 and b03
#
# ensemble fitting cannot resolve any model -> highest residual variance
# class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
# trial fitting can resolve all three models -> lowest residual variance
print(vars)
self.assertLess(vars[0], vars[1])
self.assertLess(vars[1], vars[2])
| [
"numpy.random.normal",
"numpy.repeat",
"numpy.arange",
"scot.varica.cspvarica",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros",
"numpy.var",
"numpy.sum",
"numpy.vstack",
"numpy.random.seed",
"scot.backend.items",
"numpy.transpose",
"scot.varica.mvarica",
"scot.var.VAR"
] | [((904, 922), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (918, 922), True, 'import numpy as np\n'), ((976, 1004), 'numpy.array', 'np.array', (['[[0.0, 0], [0, 0]]'], {}), '([[0.0, 0], [0, 0]])\n', (984, 1004), True, 'import numpy as np\n'), ((1019, 1053), 'numpy.array', 'np.array', (['[[0.5, 0.3], [0.3, 0.5]]'], {}), '([[0.5, 0.3], [0.3, 0.5]])\n', (1027, 1053), True, 'import numpy as np\n'), ((1068, 1102), 'numpy.array', 'np.array', (['[[0.1, 0.1], [0.1, 0.1]]'], {}), '([[0.1, 0.1], [0.1, 0.1]])\n', (1076, 1102), True, 'import numpy as np\n'), ((1217, 1223), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (1220, 1223), False, 'from scot.var import VAR\n'), ((1469, 1510), 'numpy.vstack', 'np.vstack', (['[sources1, sources2, sources3]'], {}), '([sources1, sources2, sources3])\n', (1478, 1510), True, 'import numpy as np\n'), ((1570, 1576), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (1573, 1576), False, 'from scot.var import VAR\n'), ((1595, 1663), 'scot.varica.mvarica', 'varica.mvarica', (['sources', 'var', 'cl'], {'reducedim': '"""no_pca"""', 'varfit': '"""trial"""'}), "(sources, var, cl, reducedim='no_pca', varfit='trial')\n", (1609, 1663), False, 'from scot import varica, datatools\n'), ((1682, 1750), 'scot.varica.mvarica', 'varica.mvarica', (['sources', 'var', 'cl'], {'reducedim': '"""no_pca"""', 'varfit': '"""class"""'}), "(sources, var, cl, reducedim='no_pca', varfit='class')\n", (1696, 1750), False, 'from scot import varica, datatools\n'), ((1772, 1843), 'scot.varica.mvarica', 'varica.mvarica', (['sources', 'var', 'cl'], {'reducedim': '"""no_pca"""', 'varfit': '"""ensemble"""'}), "(sources, var, cl, reducedim='no_pca', varfit='ensemble')\n", (1786, 1843), False, 'from scot import varica, datatools\n'), ((2636, 2652), 'numpy.zeros', 'np.zeros', (['(3, 6)'], {}), '((3, 6))\n', (2644, 2652), True, 'import numpy as np\n'), ((2977, 2983), 'scot.var.VAR', 'VAR', (['(2)'], {}), '(2)\n', (2980, 2983), False, 'from scot.var import VAR\n'), ((3395, 3415), 'scot.backend.items', 'scot.backend.items', ([], {}), '()\n', (3413, 3415), False, 'import scot\n'), ((5455, 5473), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5469, 5473), True, 'import numpy as np\n'), ((5527, 5555), 'numpy.array', 'np.array', (['[[0.0, 0], [0, 0]]'], {}), '([[0.0, 0], [0, 0]])\n', (5535, 5555), True, 'import numpy as np\n'), ((5570, 5604), 'numpy.array', 'np.array', (['[[0.5, 0.3], [0.3, 0.5]]'], {}), '([[0.5, 0.3], [0.3, 0.5]])\n', (5578, 5604), True, 'import numpy as np\n'), ((5619, 5653), 'numpy.array', 'np.array', (['[[0.1, 0.1], [0.1, 0.1]]'], {}), '([[0.1, 0.1], [0.1, 0.1]])\n', (5627, 5653), True, 'import numpy as np\n'), ((5768, 5774), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (5771, 5774), False, 'from scot.var import VAR\n'), ((6020, 6061), 'numpy.vstack', 'np.vstack', (['[sources1, sources2, sources3]'], {}), '([sources1, sources2, sources3])\n', (6029, 6061), True, 'import numpy as np\n'), ((6121, 6127), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (6124, 6127), False, 'from scot.var import VAR\n'), ((6146, 6212), 'scot.varica.cspvarica', 'varica.cspvarica', (['sources', 'var', 'cl'], {'reducedim': 'None', 'varfit': '"""trial"""'}), "(sources, var, cl, reducedim=None, varfit='trial')\n", (6162, 6212), False, 'from scot import varica, datatools\n'), ((6231, 6297), 'scot.varica.cspvarica', 'varica.cspvarica', (['sources', 'var', 'cl'], {'reducedim': 'None', 'varfit': '"""class"""'}), "(sources, var, cl, reducedim=None, varfit='class')\n", (6247, 6297), False, 'from scot import varica, datatools\n'), ((6319, 6388), 'scot.varica.cspvarica', 'varica.cspvarica', (['sources', 'var', 'cl'], {'reducedim': 'None', 'varfit': '"""ensemble"""'}), "(sources, var, cl, reducedim=None, varfit='ensemble')\n", (6335, 6388), False, 'from scot import varica, datatools\n'), ((661, 667), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (664, 667), False, 'from scot.var import VAR\n'), ((762, 768), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (765, 768), False, 'from scot.var import VAR\n'), ((1861, 1884), 'numpy.var', 'np.var', (['r.var_residuals'], {}), '(r.var_residuals)\n', (1867, 1884), True, 'import numpy as np\n'), ((3325, 3342), 'numpy.transpose', 'np.transpose', (['mix'], {}), '(mix)\n', (3337, 3342), True, 'import numpy as np\n'), ((3923, 4057), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4,\n 5], [4, 5, 0, 1, 2, 3], [4, 5, 2, 3, 0, 1]]'], {}), '([[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3,\n 0, 1, 4, 5], [4, 5, 0, 1, 2, 3], [4, 5, 2, 3, 0, 1]])\n', (3931, 4057), True, 'import numpy as np\n'), ((4112, 4316), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1,\n -1, -1, -1], [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1,\n -1, 1, 1], [-1, -1, -1, -1, -1, -1]]'], {}), '([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [\n 1, 1, -1, -1, -1, -1], [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-\n 1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])\n', (4120, 4316), True, 'import numpy as np\n'), ((4782, 4826), 'numpy.testing.assert_allclose', 'assert_allclose', (['d', 'b0'], {'rtol': '(0.01)', 'atol': '(0.01)'}), '(d, b0, rtol=0.01, atol=0.01)\n', (4797, 4826), False, 'from numpy.testing import assert_allclose\n'), ((5219, 5225), 'scot.var.VAR', 'VAR', (['(1)'], {}), '(1)\n', (5222, 5225), False, 'from scot.var import VAR\n'), ((6406, 6429), 'numpy.var', 'np.var', (['r.var_residuals'], {}), '(r.var_residuals)\n', (6412, 6429), True, 'import numpy as np\n'), ((1161, 1190), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, m)'}), '(size=(1, m))\n', (1177, 1190), True, 'import numpy as np\n'), ((2920, 2950), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, m0)'}), '(size=(1, m0))\n', (2936, 2950), True, 'import numpy as np\n'), ((4647, 4668), 'numpy.sum', 'np.sum', (['((c - b0) ** 2)'], {}), '((c - b0) ** 2)\n', (4653, 4668), True, 'import numpy as np\n'), ((5712, 5741), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, m)'}), '(size=(1, m))\n', (5728, 5741), True, 'import numpy as np\n'), ((628, 641), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (637, 641), True, 'import numpy as np\n'), ((727, 740), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (736, 740), True, 'import numpy as np\n'), ((5186, 5199), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (5195, 5199), True, 'import numpy as np\n'), ((4566, 4588), 'numpy.repeat', 'np.repeat', (['[sgn]', '(3)', '(0)'], {}), '([sgn], 3, 0)\n', (4575, 4588), True, 'import numpy as np\n'), ((4591, 4618), 'numpy.repeat', 'np.repeat', (['[sgn[::2]]', '(6)', '(0)'], {}), '([sgn[::2]], 6, 0)\n', (4600, 4618), True, 'import numpy as np\n')] |
#================================LabFuncs.py===================================#
# Created by <NAME> 2019
# Description:
# Contains an assortment of functions that are all related to the 'Lab' somehow
# e.g. the nuclear form factor, lab velocity etc.
# Contains:
#####
# FormFactorHelm: Only Form factor being used atm
#####
##### Resolutions
# Smear: Applies angular resolution to a recoil map as a function of direction
# SmearE: Applies energy resolution to a recoil spectrum as a function of energy
#####
##### Lab velocity
# LabVelocity: Full lab velocity in (N,W,Z) with Earth rotation
# LabVelocitySimple: Simplified Lab velocity in galactic coordinates
# JulianDay: JulianDay at dd-mm-yyyy hh:hh
# EarthVelocity: Earth velocity to second order in eccentricity
# EarthVector: Earth radius vector to second order in eccentricity
# v_infinity: transforms velocity to the value outside the Sun's potential
# v_infinity_alt: same as v_inficity but with a different velocity discretisation
#####
##### Solar direction:
# EarthSunDistance: Distance between Earth and Sun as a function of time
# SolarDirection: Direction of the sun at a given time
#####
##### Co-ordinate transformations
# eqt2lab: Equatorial system to laboratory system
# gal2eqt: Galactic system to equatorial system
# gal2lab: Galactic system to lab system
#####
#==============================================================================#
import numpy as np
from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos
from numpy import array, trapz
import Params
#==============================Form Factors====================================#
def FormFactorHelm(E_r,A):
q = sqrt(2*A*931.5*1000*E_r)*1.0e-12/1.97e-7
c1 = 1.23*A**(1.0/3.0)-0.6
s = 0.9
R_1 = sqrt(c1**2 + (7.0/3.0)*pi**2.0*(0.52**2.0) - 5*s**2.0)
F = (3*(sin(q*R_1) - q*R_1*cos(q*R_1))*exp(-q*q*s*s/2.0)/(q*R_1)**3)
return F
#------------------------------------------------------------------------------#
#=======================Apply Angular Resolution===============================#
def Smear(x,dR,sig_gamma):
# x = cartesian vectors
# dR = value of rate at directions in x
# sig_gamma = Gaussian width to smear dR by
npix = size(dR)
dR_smeared = zeros(shape=shape(dR))
for i in range(0,npix):
x0 = x[i,:]
gamma = x0[0]*x[:,0] + x0[1]*x[:,1] + x0[2]*x[:,2]
gamma[i] = 1.0
gamma = arccos(gamma)
dR_smeared[i] = sum(dR*exp(-gamma**2.0/(2*sig_gamma**2.0)))
# Make sure it's normalised to what it was before the smearing
dR_smeared = dR_smeared*sum(dR)/sum(dR_smeared)
return dR_smeared
#------------------------------------------------------------------------------#
#===========================Apply Energy Res===================================#
def SmearE(E,dR,sig_E):
# E = energies
# dR = value of rate at energies in E
# sig_E = Gaussian width to smear dR by
nE = size(dR)
dR_smeared = zeros(shape=shape(dR))
if size(sig_E)==1:
sig_E *= ones(shape=shape(dR))
for i in range(0,nE):
Ediff = abs(E-E[i])
dR_smeared[i] = trapz(dR*exp(-Ediff**2.0/(2*sig_E**2.0)),E)
# Make sure it's normalised to what it was before the smearing
dR_smeared = dR_smeared*trapz(dR,E)/trapz(dR_smeared,E)
return dR_smeared
#------------------------------------------------------------------------------#
#==============================Lab Velocity====================================#
# Peculiar velocity
v_pec = array([11.1,12.2,7.3])
# Earth orbital params
vv_earthrev = 29.79
eccentricity = 0.016722
eccentricity_deg = 0.9574
orb_long_ecliptic = 13.0+1.0
lat_ecl_gal = np.array([-5.5303,59.575,29.812])
long_ecl_gal = np.array([266.141,-13.3485,179.3212])
e1 = array([0.9941,0.1088,0.0042])
e2 = array([-0.0504,0.4946,-0.8677])
w_p = 2*pi/365 # orbital freq.
t1 = 79
ve = 29.79 # Earth's revolution
vrot = 0.47 # Earth's rotation
# Other constants
AstronomicalUnit = 1.49597892e11 # Astronomical Unit
EarthRadius = 6371.01*1000.0 # Earth Radius
Msun = 2.0e30 # Solar mass (kg)
bigG = 6.67e-11*(1.0e3)**(-3)
Jan1 = 2458849.5 # Julian date of January 1 2019
#------------------------------------------------------------------------------#
def LabVelocity(day, Loc=Params.Boulby, v_LSR=233.0):
JD = day+Jan1
lat = Loc.Latitude
lon = Loc.Longitude
# Convert day into phase of Earth rotation t_lab
UT = 24*(JD+0.5-floor(JD+0.5)) #Universal time
MJD = JD - 2400000.5 #Modified Julian Day
T_0 = (floor(MJD)-55197.5)/36525.0
t_GAST = (101.0308 + 36000.770*T_0 + 15.04107*UT)/15.0
t_lab = t_GAST + lon/15
t_lab = 15*t_lab #Lab time in degrees
# Galactic (LSR) Rotation
vtemp = np.array([0.0,v_LSR,0.0])
v_galrot = gal2lab(vtemp,t_lab, lat) #transform to lab co-ords
# Peculiar solar Motion
vtemp1 = v_pec
v_solar = gal2lab(vtemp1,t_lab, lat) # transform to lab co-ords
#Earth's revolution (first calculate in galactic frame then transform)
e = eccentricity
lambda_0 = orb_long_ecliptic
L = 281.0298 + 36000.77*T_0 + 0.04107*UT
g = 357.9258 + 35999.05*T_0 + 0.04107*UT
lambda_sun = L + (1.915 - 0.0048*T_0)*sin(g*pi/180.0)\
+ 0.020*sin(2*g*pi/180.0)
beta = lat_ecl_gal
lambda_i = long_ecl_gal
v_earthrev1 = vv_earthrev*(1-e*sin(pi/180.0*(lambda_sun-lambda_0)))*\
(cos(beta*pi/180.0)*sin(pi/180.0*(lambda_sun-lambda_i)))
v_earthrev = gal2lab(v_earthrev1,t_lab, lat) #transform to lab co-ords
# Earth's rotation (already in lab co-ords)
v_earthrot = 0.465102*cos(lat*pi/180)*np.array([0.0,-1.0,0.0])
# Add them all together (delete as needed)
v_lab = np.array([0.,0.,0.])
v_lab += v_earthrot
v_lab += v_earthrev
v_lab += v_solar
v_lab += v_galrot
return v_lab
def JulianDay(month, day, year, hour): # Calculates time in JD for a given date
year_r = year+4800-floor((14-month)/12.0)
month_r = month+12*floor((14-month)/12.0)-3
JulianDay = day + floor((153*month_r+2)/5.0) + 365*year_r\
+ floor(year_r/4.0) - floor(year_r/100.0)\
+ floor(year_r/400.0) - 32045 + (hour-12.0)/24.0
return JulianDay
def LabVelocitySimple(day,v_LSR=233.0):
# day measured from Jan1
vsun = array([0.0,v_LSR,0.0])+v_pec
v_lab = vsun + EarthVelocity(day)
return v_lab
def EarthVelocity(day):
# Second order in eccentricity
# day measured from Jan1
lambda_p = 102.93*pi/180.0
th = w_p*(day-t1)
v_E = cos(th)*(e1-2*eccentricity*sin(lambda_p)*e2) \
+sin(th)*(e2+2*eccentricity*sin(lambda_p)*e1) \
-eccentricity*(cos(2*th)*(cos(lambda_p)*e1-sin(lambda_p)*e2) \
+sin(2*th)*(sin(lambda_p)*e1+cos(lambda_p)*e2))
return vv_earthrev*v_E
def EarthVector(day):
# Earth's orbital radius vectors
# day measured from Jan1
# Second order in Earth's eccentricity
a_earth = AstronomicalUnit/1.0e3
tp = 3
lamb_p = 102*pi/180
g = w_p*(day-tp)
nu = g + 2.*eccentricity*sin(g)*(5.0/4.0)+eccentricity**2.0*sin(2*g)
r = a_earth*(1-eccentricity**2.0)/(1+eccentricity*cos(nu))
r_earth = r*(-sin(lamb_p+nu)*e1 + cos(lamb_p+nu)*e2)
return r_earth
def v_infinity(v,costh,phi,day):
# v_infinity used for Gravitational focusing, this version uses a set of
# angles costh and phi
# day measured from Jan1
x_earth = EarthVector(day)
r_earth = sqrt(sum(x_earth**2.0))
x_earth /= r_earth # unit vector towards Earth
v_earth = EarthVelocity(day)
uu_esc = 2*bigG*Msun/r_earth # escape speed
vx = v*sqrt(1.0-costh**2.0)*cos(phi)+v_earth[0]
vy = v*sqrt(1.0-costh**2.0)*sin(phi)+v_earth[1]
vz = v*costh+v_earth[2]
vv_inf = (vx**2.0+vy**2.0+vz**2.0)-uu_esc
vv_inf = (vv_inf+abs(vv_inf))/2.0
vdotr = vx*x_earth[0]+vy*x_earth[1]+vz*x_earth[2]
v_inf = sqrt(vv_inf)
denom = vv_inf + 0.5*uu_esc - v_inf*vdotr
v_infx = (vv_inf*vx + 0.5*v_inf*uu_esc*x_earth[0] - v_inf*vx*vdotr)/denom
v_infy = (vv_inf*vy + 0.5*v_inf*uu_esc*x_earth[1] - v_inf*vy*vdotr)/denom
v_infz = (vv_inf*vz + 0.5*v_inf*uu_esc*x_earth[2] - v_inf*vz*vdotr)/denom
return v_infx,v_infy,v_infz
def v_infinity_alt(v3,day):
# v_infinity used for Gravitational focusing, this version uses a set of
# angles cartesian velocity vectors in v3 which defines a Healpix
# discretisation. Tends to be a bit faster and more accurate.
# day measured from Jan1
x_earth = EarthVector(day)
r_earth = sqrt(sum(x_earth**2.0))
x_earth /= r_earth # unit vector towards Earth
v_earth = EarthVelocity(day)
uu_esc = 2*bigG*Msun/r_earth # escape speed
vx = v3[:,0]+v_earth[0] # galactic x-component
vy = v3[:,1]+v_earth[1] # galactic y-component
vz = v3[:,2]+v_earth[2] # galactic z-component
vv_inf = (vx**2.0+vy**2.0+vz**2.0)-uu_esc
vv_inf = (vv_inf+abs(vv_inf))/2.0
#vv_inf[vv_inf<0.0] = 0.0
vdotr = vx*x_earth[0]+vy*x_earth[1]+vz*x_earth[2] # (v.x_earth)
v_inf = sqrt(vv_inf)
denom = vv_inf + 0.5*uu_esc - v_inf*vdotr
v_infx = (vv_inf*vx + 0.5*v_inf*uu_esc*x_earth[0] - v_inf*vx*vdotr)/denom
v_infy = (vv_inf*vy + 0.5*v_inf*uu_esc*x_earth[1] - v_inf*vy*vdotr)/denom
v_infz = (vv_inf*vz + 0.5*v_inf*uu_esc*x_earth[2] - v_inf*vz*vdotr)/denom
return v_infx,v_infy,v_infz
#==========================Solar direction=====================================#
def EarthSunDistance(JD): # Earth-sun distance at Julian Day (JD)
D = JD-2451545.0
g = 357.529 + 0.98560028*D
g = g*pi/180.0
r_es = 1.00014 - 0.01671*cos(g) - 0.00014*cos(2*g)
r_es = r_es*AstronomicalUnit
return r_es
#------------------------------------------------------------------------------#
def SolarDirection(JD,Loc): # Solar direction in lab coords at Julian Day (JD)
lat = Loc.Latitude
lon = Loc.Longitude
# Compute RA and dec of Sun
#JD = day+Jan1
n = JD - 2451545.0
Omega = 2.1429-0.0010394594*n
L = 4.8950630 + 0.017202791698*n
g = 6.2400600 + 0.0172019699*n
ll = L+0.03341607*sin(g) + 0.00034894*sin(2*g)\
- 0.0001134 - 0.0000203*sin(Omega)
ep = 0.4090928 - 6.214e-9*n + 0.0000396*cos(Omega)
ra = np.arctan2((cos(ep)*sin(ll)),cos(ll)) # Right ascension of Sun
dec = np.arcsin(sin(ep)*sin(ll)) # Declination of sun
# Solar vector
x_sun1 = np.array([0.,0.,0.])
x_sun1[0] = cos(dec)*cos(ra)
x_sun1[1] = cos(dec)*sin(ra)
x_sun1[2] = sin(dec)
# Lab time conversion
UT = 24*(JD+0.5-floor(JD+0.5))
MJD = JD - 2400000.5
T_0 = (floor(MJD)-55197.5)/36525.0
t_GAST = (101.0308 + 36000.770*T_0 + 15.04107*UT)/15.0
t_lab = t_GAST + lon/15.0
t_lab = 15*t_lab # DEGREES
# Convert vector from equatorial system into lab system
x_sun = eqt2lab(x_sun1,t_lab,lat)
return x_sun
def EarthSunDistanceMod(JD):
# Solar neutrinos:
# Flux is scaled by 1/EarthSunDistance^2 but since Flux is already averaged
# We need to also divide by Integral(1/R^2) over one year
# Integral_inv_EarthSun_sq is defined in params.f95
Integral_inv_EarthSun_sq = 4.468864372000642e-23 # integral(1/R^2) over 1 year
f = (1.0/Integral_inv_EarthSun_sq)*(1.0/EarthSunDistance(JD)**2.0)
return f
#------------------------------------------------------------------------------#
#==============================================================================#
#---------------------------Coordinate trans.----------------------------------#
def eqt2lab(vp,t_lab,lat): # Equatorial (x_e,y_e,z_e) to Laboratory (N,W,Z)
t = t_lab*pi/180.0
latr = lat*pi/180.0
v = vp*0.0
v[0] = -cos(t)*sin(latr)*vp[0] - sin(t)*sin(latr)*vp[1] + cos(latr)*vp[2]
v[1] = sin(t)*vp[0] - cos(t)*vp[1]
v[2] = cos(t)*cos(latr)*vp[0] + cos(latr)*sin(t)*vp[1] + sin(latr)*vp[2]
return v
def gal2eqt(vp): # Galactic (x_g,y_g,z_g) to Equatorial (x_e,y_e,z_e)
v = 0.0*vp
v[0] = -0.06699*vp[0] + 0.4927*vp[1] - 0.8676*vp[2]
v[1] = -0.8728*vp[0] - 0.4503*vp[1] - 0.1884*vp[2]
v[2] = -0.4835*vp[0] + 0.7446*vp[1] + 0.4602*vp[2]
return v
def gal2lab(v,t_lab, lat): # Galactic (x_g,y_g,z_g) to Laboratory (N,W,Z)
vp = gal2eqt(v)
return eqt2lab(vp, t_lab, lat)
#==============================================================================#
# def BinEvents(Expt,dRfunc,*Args):
# # Expt = Detector class
# # dRfunc = differential recoil rate that is being binned
# # Args = anything else needed by dRfunc
#
# # Energy and time binning:
# E_bins = Expt.Energies
# t_bins = Expt.Times
# Efficiency = Expt.Efficiency
# ne = size(E_bins)
# nt = size(t_bins)
#
# # DIRECTIONAL LIMITS
# if Expt.Directional:
# q = Expt.Directions
# sig_gamma = Expt.AngularResolution
# HeadTail = Expt.HeadTailEfficiency
#
# npix = size(q)/3
# E_r = zeros(shape=(ne*nt*npix))
# E = zeros(shape=(ne*nt*npix,3))
# eff = zeros(shape=(ne*nt*npix))
# t = zeros(shape=(ne*nt*npix))
# eff_HT = zeros(shape=(ne*nt*npix))
# ii = 0
# for i in range(0,nt):
# for j in range(0,ne):
# for k in range(0,npix):
# E_r[ii] = E_bins[j]
# E[ii,:] = E_bins[j]*q[k,:]
# t[ii] = t_bins[i]
# eff[ii] = Efficiency[j]
# eff_HT[ii] = HeadTail[j]
# ii += 1
#
# # Correct for Head-Tail
# if HeadTail[0]>0.99:
# dR = dRfunc(E,t,Expt,Args[0],Args[1])
# else:
# dR = (1.0-eff_HT)*dRfunc(E,t,Expt,Args[0],Args[1])\
# +eff_HT*dRfunc(-1.0*E,t,Expt,Args[0],Args[1])
#
# dR = dR*4*pi/(1.0*npix*nt)
# # Correct for Efficiency
# if Efficiency[0]<0.99:
# dR = dR*eff
#
# # Correct for Angular resolution
# if sig_gamma[0]>0.01:
# i1 = 0
# dR_smear = zeros(shape=shape(dR))
# for i in range(0,nt):
# for j in range(0,ne):
# i2 = i1 + npix - 1
# if sum(dR[i1:i2+1])>0.0:
# dR_smear[i1:i2+1] = Smear(q,dR[i1:i2+1],sig_gamma[j])
# i1 = i2+1
# dR = dR_smear
#
# # Bin events
# i1 = 0
# RD = zeros(shape=(ne-1)*nt*npix)
# for i in range(0,nt):
# for j in range(0,ne-1):
# i2 = i1 + npix - 1
# dR1 = dR[(t==t_bins[i])&(E_r==E_bins[j])]
# dR2 = dR[(t==t_bins[i])&(E_r==E_bins[j+1])]
# RD[i1:i2+1] = 0.5*(E_bins[j+1] - E_bins[j])*(dR1+dR2)
# i1 = i2+1
# # Last step: turn energy off if needed
# if Expt.EnergyOff:
# i1 = 0
# RD_reduced = zeros(shape=(ne-1)*nt)
# it1 = 0
# for i in range(0,nt):
# it2 = it1 + npix -1
# for j in range(0,ne-1):
# i2 = i1 + npix - 1
# RD_reduced[it1:it2+1] += RD[i1:i2]
# i1 = i2 + 1
# it1 = it2+1
# RD = RD_reduced
#
# # Non-directional limits
# else:
# E = zeros(shape=(ne*nt))
# t = zeros(shape=(ne*nt))
# eff = zeros(shape=(ne*nt))
# ii = 0
# for i in range(0,nt):
# for j in range(0,ne):
# E[ii] = E_bins[j]
# t[ii] = t_bins[i]
# eff[ii] = Efficiency[j]
# ii += 1
#
# dR = dRfunc(E,t,Expt,Args[0],Args[1])
# dR = dR/(1.0*nt)
# # Correct for Efficiency
# if Efficiency[0]<0.99:
# dR = dR*eff
#
# # Bin events
# i1 = 0
# RD = zeros(shape=(ne-1)*nt)
# for i in range(0,nt):
# i2 = i1 + ne - 2
# dR1 = dR[(t==t_bins[i])]
# RD[i1:i2+1] = 0.5*(E_bins[1:] - E_bins[0:-1])*(dR1[1:]+dR1[0:-1])
# i1 = i2 + 1
#
# RD *= Expt.Exposure
# return RD
# #------------------------------------------------------------------------------#
#
#
def GravFocusAngles(vv,costh,phi,day,sig=164.75,v_LSR=233.0,v_shift=array([0.0,0.0,0.0])):
v1 = vv*sqrt(1.0-costh**2.0)*cos(phi)
v2 = vv*sqrt(1.0-costh**2.0)*sin(phi)
v3 = vv*costh
v0 = sqrt(2.0)*sig
vsun = array([0.0,v_LSR,0.0])+v_pec-v_shift
vearth = EarthVelocity(day)
rearth = EarthVector(day)
r = sqrt(sum(rearth**2.0))
xearth = rearth/r
vsum1 = v1+vearth[0]
vsum2 = v2+vearth[1]
vsum3 = v3+vearth[2]
normvsum = sqrt(vsum1**2 + vsum2**2 + vsum3**2)
xsum1 = vsum1/normvsum
xsum2 = vsum2/normvsum
xsum3 = vsum3/normvsum
rx = 1.0-(xearth[0]*xsum1 + xearth[1]*xsum2 + xearth[2]*xsum3)
vrx = (v1+vearth[0]+vsun[0])*(xearth[0]-xsum1)\
+(v2+vearth[1]+vsun[1])*(xearth[1]-xsum2)\
+(v3+vearth[2]+vsun[2])*(xearth[2]-xsum3)
J = (-2*bigG*Msun/(r*v0**2.0*vv))*(vrx/rx)
return J
| [
"numpy.trapz",
"numpy.sqrt",
"numpy.arccos",
"numpy.size",
"numpy.floor",
"numpy.exp",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.shape"
] | [((3541, 3565), 'numpy.array', 'array', (['[11.1, 12.2, 7.3]'], {}), '([11.1, 12.2, 7.3])\n', (3546, 3565), False, 'from numpy import array, trapz\n'), ((3701, 3736), 'numpy.array', 'np.array', (['[-5.5303, 59.575, 29.812]'], {}), '([-5.5303, 59.575, 29.812])\n', (3709, 3736), True, 'import numpy as np\n'), ((3750, 3789), 'numpy.array', 'np.array', (['[266.141, -13.3485, 179.3212]'], {}), '([266.141, -13.3485, 179.3212])\n', (3758, 3789), True, 'import numpy as np\n'), ((3793, 3824), 'numpy.array', 'array', (['[0.9941, 0.1088, 0.0042]'], {}), '([0.9941, 0.1088, 0.0042])\n', (3798, 3824), False, 'from numpy import array, trapz\n'), ((3828, 3861), 'numpy.array', 'array', (['[-0.0504, 0.4946, -0.8677]'], {}), '([-0.0504, 0.4946, -0.8677])\n', (3833, 3861), False, 'from numpy import array, trapz\n'), ((1779, 1845), 'numpy.sqrt', 'sqrt', (['(c1 ** 2 + 7.0 / 3.0 * pi ** 2.0 * 0.52 ** 2.0 - 5 * s ** 2.0)'], {}), '(c1 ** 2 + 7.0 / 3.0 * pi ** 2.0 * 0.52 ** 2.0 - 5 * s ** 2.0)\n', (1783, 1845), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((2245, 2253), 'numpy.size', 'size', (['dR'], {}), '(dR)\n', (2249, 2253), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((2965, 2973), 'numpy.size', 'size', (['dR'], {}), '(dR)\n', (2969, 2973), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((4753, 4780), 'numpy.array', 'np.array', (['[0.0, v_LSR, 0.0]'], {}), '([0.0, v_LSR, 0.0])\n', (4761, 4780), True, 'import numpy as np\n'), ((5718, 5743), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5726, 5743), True, 'import numpy as np\n'), ((7904, 7916), 'numpy.sqrt', 'sqrt', (['vv_inf'], {}), '(vv_inf)\n', (7908, 7916), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((9056, 9068), 'numpy.sqrt', 'sqrt', (['vv_inf'], {}), '(vv_inf)\n', (9060, 9068), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10408, 10433), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10416, 10433), True, 'import numpy as np\n'), ((10511, 10519), 'numpy.sin', 'sin', (['dec'], {}), '(dec)\n', (10514, 10519), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16298, 16320), 'numpy.array', 'array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (16303, 16320), False, 'from numpy import array, trapz\n'), ((16702, 16744), 'numpy.sqrt', 'sqrt', (['(vsum1 ** 2 + vsum2 ** 2 + vsum3 ** 2)'], {}), '(vsum1 ** 2 + vsum2 ** 2 + vsum3 ** 2)\n', (16706, 16744), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((2440, 2453), 'numpy.arccos', 'arccos', (['gamma'], {}), '(gamma)\n', (2446, 2453), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3021, 3032), 'numpy.size', 'size', (['sig_E'], {}), '(sig_E)\n', (3025, 3032), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3306, 3326), 'numpy.trapz', 'trapz', (['dR_smeared', 'E'], {}), '(dR_smeared, E)\n', (3311, 3326), False, 'from numpy import array, trapz\n'), ((5633, 5659), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (5641, 5659), True, 'import numpy as np\n'), ((5953, 5979), 'numpy.floor', 'floor', (['((14 - month) / 12.0)'], {}), '((14 - month) / 12.0)\n', (5958, 5979), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6313, 6337), 'numpy.array', 'array', (['[0.0, v_LSR, 0.0]'], {}), '([0.0, v_LSR, 0.0])\n', (6318, 6337), False, 'from numpy import array, trapz\n'), ((10283, 10290), 'numpy.cos', 'cos', (['ll'], {}), '(ll)\n', (10286, 10290), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10445, 10453), 'numpy.cos', 'cos', (['dec'], {}), '(dec)\n', (10448, 10453), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10454, 10461), 'numpy.cos', 'cos', (['ra'], {}), '(ra)\n', (10457, 10461), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10478, 10486), 'numpy.cos', 'cos', (['dec'], {}), '(dec)\n', (10481, 10486), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10487, 10494), 'numpy.sin', 'sin', (['ra'], {}), '(ra)\n', (10490, 10494), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16354, 16362), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (16357, 16362), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16396, 16404), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (16399, 16404), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16433, 16442), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (16437, 16442), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((1685, 1717), 'numpy.sqrt', 'sqrt', (['(2 * A * 931.5 * 1000 * E_r)'], {}), '(2 * A * 931.5 * 1000 * E_r)\n', (1689, 1717), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((1877, 1902), 'numpy.exp', 'exp', (['(-q * q * s * s / 2.0)'], {}), '(-q * q * s * s / 2.0)\n', (1880, 1902), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((2283, 2292), 'numpy.shape', 'shape', (['dR'], {}), '(dR)\n', (2288, 2292), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3003, 3012), 'numpy.shape', 'shape', (['dR'], {}), '(dR)\n', (3008, 3012), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3294, 3306), 'numpy.trapz', 'trapz', (['dR', 'E'], {}), '(dR, E)\n', (3299, 3306), False, 'from numpy import array, trapz\n'), ((4464, 4479), 'numpy.floor', 'floor', (['(JD + 0.5)'], {}), '(JD + 0.5)\n', (4469, 4479), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((4552, 4562), 'numpy.floor', 'floor', (['MJD'], {}), '(MJD)\n', (4557, 4562), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5258, 5281), 'numpy.sin', 'sin', (['(2 * g * pi / 180.0)'], {}), '(2 * g * pi / 180.0)\n', (5261, 5281), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5411, 5433), 'numpy.cos', 'cos', (['(beta * pi / 180.0)'], {}), '(beta * pi / 180.0)\n', (5414, 5433), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5430, 5471), 'numpy.sin', 'sin', (['(pi / 180.0 * (lambda_sun - lambda_i))'], {}), '(pi / 180.0 * (lambda_sun - lambda_i))\n', (5433, 5471), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5617, 5636), 'numpy.cos', 'cos', (['(lat * pi / 180)'], {}), '(lat * pi / 180)\n', (5620, 5636), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7102, 7112), 'numpy.sin', 'sin', (['(2 * g)'], {}), '(2 * g)\n', (7105, 7112), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7651, 7659), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (7654, 7659), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7703, 7711), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (7706, 7711), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((9647, 9657), 'numpy.cos', 'cos', (['(2 * g)'], {}), '(2 * g)\n', (9650, 9657), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10179, 10189), 'numpy.sin', 'sin', (['Omega'], {}), '(Omega)\n', (10182, 10189), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10234, 10244), 'numpy.cos', 'cos', (['Omega'], {}), '(Omega)\n', (10237, 10244), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10266, 10273), 'numpy.cos', 'cos', (['ep'], {}), '(ep)\n', (10269, 10273), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10274, 10281), 'numpy.sin', 'sin', (['ll'], {}), '(ll)\n', (10277, 10281), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10337, 10344), 'numpy.sin', 'sin', (['ep'], {}), '(ep)\n', (10340, 10344), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10345, 10352), 'numpy.sin', 'sin', (['ll'], {}), '(ll)\n', (10348, 10352), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10567, 10582), 'numpy.floor', 'floor', (['(JD + 0.5)'], {}), '(JD + 0.5)\n', (10572, 10582), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10618, 10628), 'numpy.floor', 'floor', (['MJD'], {}), '(MJD)\n', (10623, 10628), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11746, 11755), 'numpy.cos', 'cos', (['latr'], {}), '(latr)\n', (11749, 11755), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11773, 11779), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (11776, 11779), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11788, 11794), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (11791, 11794), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11862, 11871), 'numpy.sin', 'sin', (['latr'], {}), '(latr)\n', (11865, 11871), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16333, 16357), 'numpy.sqrt', 'sqrt', (['(1.0 - costh ** 2.0)'], {}), '(1.0 - costh ** 2.0)\n', (16337, 16357), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16375, 16399), 'numpy.sqrt', 'sqrt', (['(1.0 - costh ** 2.0)'], {}), '(1.0 - costh ** 2.0)\n', (16379, 16399), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((16459, 16483), 'numpy.array', 'array', (['[0.0, v_LSR, 0.0]'], {}), '([0.0, v_LSR, 0.0])\n', (16464, 16483), False, 'from numpy import array, trapz\n'), ((2485, 2528), 'numpy.exp', 'exp', (['(-gamma ** 2.0 / (2 * sig_gamma ** 2.0))'], {}), '(-gamma ** 2.0 / (2 * sig_gamma ** 2.0))\n', (2488, 2528), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3065, 3074), 'numpy.shape', 'shape', (['dR'], {}), '(dR)\n', (3070, 3074), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((3163, 3202), 'numpy.exp', 'exp', (['(-Ediff ** 2.0 / (2 * sig_E ** 2.0))'], {}), '(-Ediff ** 2.0 / (2 * sig_E ** 2.0))\n', (3166, 3202), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5224, 5243), 'numpy.sin', 'sin', (['(g * pi / 180.0)'], {}), '(g * pi / 180.0)\n', (5227, 5243), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5999, 6025), 'numpy.floor', 'floor', (['((14 - month) / 12.0)'], {}), '((14 - month) / 12.0)\n', (6004, 6025), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6164, 6185), 'numpy.floor', 'floor', (['(year_r / 400.0)'], {}), '(year_r / 400.0)\n', (6169, 6185), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6549, 6556), 'numpy.cos', 'cos', (['th'], {}), '(th)\n', (6552, 6556), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6607, 6614), 'numpy.sin', 'sin', (['th'], {}), '(th)\n', (6610, 6614), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7165, 7172), 'numpy.cos', 'cos', (['nu'], {}), '(nu)\n', (7168, 7172), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7212, 7228), 'numpy.cos', 'cos', (['(lamb_p + nu)'], {}), '(lamb_p + nu)\n', (7215, 7228), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7630, 7654), 'numpy.sqrt', 'sqrt', (['(1.0 - costh ** 2.0)'], {}), '(1.0 - costh ** 2.0)\n', (7634, 7654), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7682, 7706), 'numpy.sqrt', 'sqrt', (['(1.0 - costh ** 2.0)'], {}), '(1.0 - costh ** 2.0)\n', (7686, 7706), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((9630, 9636), 'numpy.cos', 'cos', (['g'], {}), '(g)\n', (9633, 9636), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((1846, 1858), 'numpy.sin', 'sin', (['(q * R_1)'], {}), '(q * R_1)\n', (1849, 1858), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((5362, 5403), 'numpy.sin', 'sin', (['(pi / 180.0 * (lambda_sun - lambda_0))'], {}), '(pi / 180.0 * (lambda_sun - lambda_0))\n', (5365, 5403), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6125, 6146), 'numpy.floor', 'floor', (['(year_r / 100.0)'], {}), '(year_r / 100.0)\n', (6130, 6146), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6679, 6690), 'numpy.cos', 'cos', (['(2 * th)'], {}), '(2 * th)\n', (6682, 6690), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6738, 6749), 'numpy.sin', 'sin', (['(2 * th)'], {}), '(2 * th)\n', (6741, 6749), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7067, 7073), 'numpy.sin', 'sin', (['g'], {}), '(g)\n', (7070, 7073), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((7192, 7208), 'numpy.sin', 'sin', (['(lamb_p + nu)'], {}), '(lamb_p + nu)\n', (7195, 7208), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10137, 10147), 'numpy.sin', 'sin', (['(2 * g)'], {}), '(2 * g)\n', (10140, 10147), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11703, 11712), 'numpy.sin', 'sin', (['latr'], {}), '(latr)\n', (11706, 11712), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11721, 11727), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (11724, 11727), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11728, 11737), 'numpy.sin', 'sin', (['latr'], {}), '(latr)\n', (11731, 11737), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11812, 11818), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (11815, 11818), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11819, 11828), 'numpy.cos', 'cos', (['latr'], {}), '(latr)\n', (11822, 11828), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11837, 11846), 'numpy.cos', 'cos', (['latr'], {}), '(latr)\n', (11840, 11846), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11847, 11853), 'numpy.sin', 'sin', (['t'], {}), '(t)\n', (11850, 11853), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((1865, 1877), 'numpy.cos', 'cos', (['(q * R_1)'], {}), '(q * R_1)\n', (1868, 1877), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6105, 6124), 'numpy.floor', 'floor', (['(year_r / 4.0)'], {}), '(year_r / 4.0)\n', (6110, 6124), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((10117, 10123), 'numpy.sin', 'sin', (['g'], {}), '(g)\n', (10120, 10123), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((11696, 11702), 'numpy.cos', 'cos', (['t'], {}), '(t)\n', (11699, 11702), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6576, 6589), 'numpy.sin', 'sin', (['lambda_p'], {}), '(lambda_p)\n', (6579, 6589), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6634, 6647), 'numpy.sin', 'sin', (['lambda_p'], {}), '(lambda_p)\n', (6637, 6647), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6690, 6703), 'numpy.cos', 'cos', (['lambda_p'], {}), '(lambda_p)\n', (6693, 6703), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6707, 6720), 'numpy.sin', 'sin', (['lambda_p'], {}), '(lambda_p)\n', (6710, 6720), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6749, 6762), 'numpy.sin', 'sin', (['lambda_p'], {}), '(lambda_p)\n', (6752, 6762), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6766, 6779), 'numpy.cos', 'cos', (['lambda_p'], {}), '(lambda_p)\n', (6769, 6779), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n'), ((6046, 6078), 'numpy.floor', 'floor', (['((153 * month_r + 2) / 5.0)'], {}), '((153 * month_r + 2) / 5.0)\n', (6051, 6078), False, 'from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos\n')] |
#!/usr/bin/python
#-*- coding: utf-8 -*
# SAMPLE FOR SIMPLE CONTROL LOOP TO IMPLEMENT BAXTER_CONTROL MPC ALGORITHMS
"""
MPC sample tracking for Baxter's right limb with specific references.
Authors: <NAME> and <NAME>.
"""
# Built-int imports
import time
import random
# Own imports
import baxter_essentials.baxter_class as bc
import baxter_essentials.transformation as transf
import baxter_control.mpc_controller as b_mpc
# General module imports
import numpy as np
import matplotlib.pyplot as plt
def create_plots(iteration_vector, x_matrix, u_matrix, sample_time, title, name1, name2):
"""
Create simple simulation plots based on vectors
It returns two pop-out matplotlib graphs.
"""
# Define a figure for the creation of the plot
figure_1, all_axes = plt.subplots(x_matrix.shape[0], 1)
current_axes = 0
for axes_i in all_axes:
# Generate the plot and its axes for each Xi and Ui.
axes_i.plot(iteration_vector,
x_matrix[current_axes, :].T, 'b', linewidth=1)
axes_i.plot(iteration_vector,
u_matrix[current_axes, :].T, 'g', linewidth=1)
current_axes = current_axes + 1
# Customize figure with the specific "x"-"y" labels
if (current_axes <= 3):
if (name1 == "x"):
axes_i.set_ylabel("[rad]")
else:
axes_i.set_ylabel("[m]")
else:
axes_i.set_ylabel("[rad]")
# Add labels to each subplot
axes_i.legend(["{}{}".format(name1, current_axes),
"{}{}".format(name2, current_axes)])
# Remove inner axes layers (only leave the outer ones)
axes_i.label_outer()
# Add personalized text to each subplot (at lower-right side)
axes_i.text(0.98,
0.02,
'SGA-EJGG',
verticalalignment='bottom',
horizontalalignment='right',
transform=axes_i.transAxes,
color='black',
fontsize=6
)
# Add grid
axes_i.grid(color='black', linestyle='-', alpha=0.2, linewidth=1)
# Change the background color of the external part
figure_1.patch.set_facecolor((0.2, 1, 1))
# Configure plot title and horizontal x-label
all_axes[0].set_title(title)
all_axes[len(
all_axes) - 1].set_xlabel("Iterations [k] (Ts={} seconds)".format(sample_time))
def calculate_cartesian_vectors(current_thetas):
# CURRENT CARTESIAN POSITION CALCULATIONS...
tm_current = bc.BaxterClass().fpk(current_thetas, "right", 7)
current_position = tm_current[0:3, 3]
current_orientation = transf.Transformation(
0, 0, 0, [0, 0, 0]).get_fixed_angles_from_tm(tm_current)
return np.concatenate([current_position, current_orientation], axis=0).reshape(6, 1)
def test_1_step_response_without_feedback(show_results=True):
"""
Sample loop to plot step response with constant change in each DOF without
any control algorithm (just to see Baxter's "chaos" response)
"""
# Variables for simulation
x_k = np.matrix([[0.1], [0.15], [0.2], [0.25], [0.3], [0.35], [0.4]])
u_k = np.matrix([[0.01], [0.01], [0.01], [0.01], [0.01], [0.01], [0.01]])
# Desired cartesian goal [x_g, y_g, z_g, x_angle_g, y_angle_g, z_angle_g]
# (NOT any goal, just to be able to plot)
cartesian_goal = np.array([0, 0, 0, 0, 0, 0]).reshape(6, 1)
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
total_time_in_seconds = 5
sample_time_in_seconds = 0.01
final_time = time.time() + total_time_in_seconds
last_time = 0
iteration = 0
while (time.time() < final_time):
if (time.time() - last_time >= sample_time_in_seconds):
last_time = time.time()
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
x_k_plus_1 = x_k + u_k
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
x_matrix = np.hstack((x_matrix, x_k_plus_1))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal))
if (show_results == True):
print("iteration_vector:")
print(iteration_vector)
print("len(iteration_vector):")
print(len(iteration_vector))
print("u_matrix:")
print(u_matrix)
print("x_matrix:")
print(x_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on step respone no feedback",
"current",
"fake-goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U vectors response based on step respone no feedback",
"x",
"u"
)
plt.show()
def test_2_mpc_first_attempt(show_results=True):
"""
Sample control loop to test MPC algorithm on Baxter right limbr for custom
variables such as N, total_time, sample_time, cartesian_goal, x0, u0 and
validate the resulting plots with or without noise.
"""
# Main conditions for executing the control loop with MPC algorithm
N = 1 # Prediction horizon
total_time_in_seconds = 20
sample_time_in_seconds = 0.1
# Initial conditions for states and inputs
x0 = np.array(
[
0.39500005288049406,
-1.2831749290661485,
-0.18867963690990588,
2.5905100555414924,
-0.11428156869746332,
-1.3506700837331067,
0.11504855909140603
]
).reshape(7, 1)
u0 = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(7, 1)
# Number inputs (same as number of degrees of freedom)
nu = u0.shape[0]
# Initial cartesian_goal "default" value
cartesian_goal = np.array(
[
[
-0.9,
-1.0,
1.1,
0.6660425877100662,
1.5192944057794895,
-1.3616725381467032
],
] * N
).transpose().reshape(6, N)
# ---------- Main Control loop -------------
# Variables for control loop
x_k = x0
u_k = u0
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
# Instead of running the algorithms in real time, we will run the total
# amount of discrete iterations (to get the total time)...
iteration = 0
total_iterations = int(total_time_in_seconds/sample_time_in_seconds)
for _ in range(total_iterations):
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
# Apply MPC prediction
mpc = b_mpc.MpcController(N, True, True)
cartesian_goal = cartesian_goal + np.array(
[
[
0.001 * np.sin(iteration/5),
0.001 * np.sin(iteration/5),
0.001 * np.sin(iteration/5),
0,
0,
0
],
] * N
).transpose().reshape((6, N))
dict_results = mpc.execute_mpc(cartesian_goal, x_k)
u = dict_results["optimal_dthetas"]
# Prediction Horizon for 1 iteration at a time
u_k = u[:, 0].reshape((nu, 1))
# Calculate new states based on StateSpace representation
x_k_plus_1 = x_k + u_k
# Add "random noise" to measurements (like real-life)
x_k_plus_1 = x_k_plus_1 + np.array(
[
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005)
]
).reshape((7, 1))
# Update current state for the next iteration
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
# Save current x and u values to plot latter
x_matrix = np.hstack((x_matrix, x_k))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal[:, 0].reshape(6, 1)))
if (show_results == True):
print("len(iteration_vector):")
print(len(iteration_vector))
print("iteration_vector:")
print(iteration_vector)
print("u_matrix.shape:")
print(u_matrix.shape)
print("u_matrix:")
print(u_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
print("x_matrix:")
print(x_matrix)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on MPC with N={}".format(N),
"current",
"goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U responses based on MPC with N={}".format(N),
"x",
"u"
)
plt.show()
def test_3_mpc_with_control_horizon(show_results=True):
"""
Sample control loop to test MPC algorithm on Baxter right limbr for custom
variables such as N, M, total_time, sample_time, cartesian_goal, x0, u0 and
validate the resulting plots with or without noise.
"""
# Main conditions for executing the control loop with MPC algorithm
N = 1 # Prediction horizon
M = 1 # Control horizon
m_count = 1 # Control horizon counter (1, 2, ... , M, 1, 2, ..., M, 1, 2, ..., M ...)
total_time_in_seconds = 10
sample_time_in_seconds = 0.1
# Initial conditions for states and inputs
x0 = np.array(
[
0.39500005288049406,
-1.2831749290661485,
-0.18867963690990588,
2.5905100555414924,
-0.11428156869746332,
-1.3506700837331067,
0.11504855909140603
]
).reshape(7, 1)
u0 = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(7, 1)
# Number inputs (same as number of degrees of freedom)
nu = u0.shape[0]
# Initial cartesian_goal "default" value
cartesian_goal = np.array(
[
[
-0.9,
-1.0,
1.1,
0.6660425877100662,
1.5192944057794895,
-1.3616725381467032
],
] * N
).transpose().reshape(6, N)
# ---------- Main Control loop -------------
# Variables for control loop
x_k = x0
u_k = u0
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
# Instead of running the algorithms in real time, we will run the total
# amount of discrete iterations (to get the total time)...
iteration = 0
total_iterations = int(total_time_in_seconds/sample_time_in_seconds)
for _ in range(total_iterations):
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
# Apply MPC prediction
if (m_count >= M):
m_count = 1
if (m_count == 1):
mpc = b_mpc.MpcController(N, True, True)
cartesian_goal = cartesian_goal + np.array(
[
[
0 * np.sin(iteration/5),
0 * np.sin(iteration/5),
0 * np.sin(iteration/5),
0,
0,
0
],
] * N
).transpose().reshape((6, N))
dict_results = mpc.execute_mpc(cartesian_goal, x_k)
u = dict_results["optimal_dthetas"]
# Prediction Horizon for 1 iteration at a time
u_k = u[:, m_count - 1].reshape((nu, 1))
# Modify counter to pass to next control horizon value
m_count = m_count + 1
# Calculate new states based on StateSpace representation
x_k_plus_1 = x_k + u_k
# Add "random noise" to measurements (like real-life)
x_k_plus_1 = x_k_plus_1 + np.array(
[
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005)
]
).reshape((7, 1))
# Update current state for the next iteration
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
# Save current x and u values to plot latter
x_matrix = np.hstack((x_matrix, x_k))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal[:, 0].reshape(6, 1)))
if (show_results == True):
print("len(iteration_vector):")
print(len(iteration_vector))
print("iteration_vector:")
print(iteration_vector)
print("u_matrix.shape:")
print(u_matrix.shape)
print("u_matrix:")
print(u_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
print("x_matrix:")
print(x_matrix)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on MPC with N={}".format(N),
"current",
"goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U responses based on MPC with N={}".format(N),
"x",
"u"
)
plt.show()
if __name__ == '__main__':
# test_1_step_response_without_feedback(True)
# test_2_mpc_first_attempt(True)
test_3_mpc_with_control_horizon(True)
| [
"random.uniform",
"baxter_control.mpc_controller.MpcController",
"numpy.hstack",
"baxter_essentials.transformation.Transformation",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.sin",
"numpy.matrix",
"baxter_essentials.baxter_class.BaxterClass",
"time.time",
"matplotlib.pyplot.subp... | [((787, 821), 'matplotlib.pyplot.subplots', 'plt.subplots', (['x_matrix.shape[0]', '(1)'], {}), '(x_matrix.shape[0], 1)\n', (799, 821), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3229), 'numpy.matrix', 'np.matrix', (['[[0.1], [0.15], [0.2], [0.25], [0.3], [0.35], [0.4]]'], {}), '([[0.1], [0.15], [0.2], [0.25], [0.3], [0.35], [0.4]])\n', (3175, 3229), True, 'import numpy as np\n'), ((3240, 3307), 'numpy.matrix', 'np.matrix', (['[[0.01], [0.01], [0.01], [0.01], [0.01], [0.01], [0.01]]'], {}), '([[0.01], [0.01], [0.01], [0.01], [0.01], [0.01], [0.01]])\n', (3249, 3307), True, 'import numpy as np\n'), ((3543, 3570), 'numpy.zeros', 'np.zeros', (['(x_k.shape[0], 0)'], {}), '((x_k.shape[0], 0))\n', (3551, 3570), True, 'import numpy as np\n'), ((3586, 3613), 'numpy.zeros', 'np.zeros', (['(u_k.shape[0], 0)'], {}), '((u_k.shape[0], 0))\n', (3594, 3613), True, 'import numpy as np\n'), ((3637, 3675), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (3645, 3675), True, 'import numpy as np\n'), ((3704, 3742), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (3712, 3742), True, 'import numpy as np\n'), ((5434, 5444), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5442, 5444), True, 'import matplotlib.pyplot as plt\n'), ((6860, 6887), 'numpy.zeros', 'np.zeros', (['(x_k.shape[0], 0)'], {}), '((x_k.shape[0], 0))\n', (6868, 6887), True, 'import numpy as np\n'), ((6903, 6930), 'numpy.zeros', 'np.zeros', (['(u_k.shape[0], 0)'], {}), '((u_k.shape[0], 0))\n', (6911, 6930), True, 'import numpy as np\n'), ((6954, 6992), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (6962, 6992), True, 'import numpy as np\n'), ((7021, 7059), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (7029, 7059), True, 'import numpy as np\n'), ((10087, 10097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10095, 10097), True, 'import matplotlib.pyplot as plt\n'), ((11643, 11670), 'numpy.zeros', 'np.zeros', (['(x_k.shape[0], 0)'], {}), '((x_k.shape[0], 0))\n', (11651, 11670), True, 'import numpy as np\n'), ((11686, 11713), 'numpy.zeros', 'np.zeros', (['(u_k.shape[0], 0)'], {}), '((u_k.shape[0], 0))\n', (11694, 11713), True, 'import numpy as np\n'), ((11737, 11775), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (11745, 11775), True, 'import numpy as np\n'), ((11804, 11842), 'numpy.zeros', 'np.zeros', (['(cartesian_goal.shape[0], 0)'], {}), '((cartesian_goal.shape[0], 0))\n', (11812, 11842), True, 'import numpy as np\n'), ((15101, 15111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15109, 15111), True, 'import matplotlib.pyplot as plt\n'), ((3825, 3836), 'time.time', 'time.time', ([], {}), '()\n', (3834, 3836), False, 'import time\n'), ((3909, 3920), 'time.time', 'time.time', ([], {}), '()\n', (3918, 3920), False, 'import time\n'), ((7535, 7569), 'baxter_control.mpc_controller.MpcController', 'b_mpc.MpcController', (['N', '(True)', '(True)'], {}), '(N, True, True)\n', (7554, 7569), True, 'import baxter_control.mpc_controller as b_mpc\n'), ((8968, 8994), 'numpy.hstack', 'np.hstack', (['(x_matrix, x_k)'], {}), '((x_matrix, x_k))\n', (8977, 8994), True, 'import numpy as np\n'), ((9014, 9040), 'numpy.hstack', 'np.hstack', (['(u_matrix, u_k)'], {}), '((u_matrix, u_k))\n', (9023, 9040), True, 'import numpy as np\n'), ((9068, 9110), 'numpy.hstack', 'np.hstack', (['(cartesian_matrix, cartesian_k)'], {}), '((cartesian_matrix, cartesian_k))\n', (9077, 9110), True, 'import numpy as np\n'), ((13982, 14008), 'numpy.hstack', 'np.hstack', (['(x_matrix, x_k)'], {}), '((x_matrix, x_k))\n', (13991, 14008), True, 'import numpy as np\n'), ((14028, 14054), 'numpy.hstack', 'np.hstack', (['(u_matrix, u_k)'], {}), '((u_matrix, u_k))\n', (14037, 14054), True, 'import numpy as np\n'), ((14082, 14124), 'numpy.hstack', 'np.hstack', (['(cartesian_matrix, cartesian_k)'], {}), '((cartesian_matrix, cartesian_k))\n', (14091, 14124), True, 'import numpy as np\n'), ((2603, 2619), 'baxter_essentials.baxter_class.BaxterClass', 'bc.BaxterClass', ([], {}), '()\n', (2617, 2619), True, 'import baxter_essentials.baxter_class as bc\n'), ((2720, 2761), 'baxter_essentials.transformation.Transformation', 'transf.Transformation', (['(0)', '(0)', '(0)', '[0, 0, 0]'], {}), '(0, 0, 0, [0, 0, 0])\n', (2741, 2761), True, 'import baxter_essentials.transformation as transf\n'), ((2820, 2883), 'numpy.concatenate', 'np.concatenate', (['[current_position, current_orientation]'], {'axis': '(0)'}), '([current_position, current_orientation], axis=0)\n', (2834, 2883), True, 'import numpy as np\n'), ((3454, 3482), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (3462, 3482), True, 'import numpy as np\n'), ((4024, 4035), 'time.time', 'time.time', ([], {}), '()\n', (4033, 4035), False, 'import time\n'), ((4361, 4394), 'numpy.hstack', 'np.hstack', (['(x_matrix, x_k_plus_1)'], {}), '((x_matrix, x_k_plus_1))\n', (4370, 4394), True, 'import numpy as np\n'), ((4418, 4444), 'numpy.hstack', 'np.hstack', (['(u_matrix, u_k)'], {}), '((u_matrix, u_k))\n', (4427, 4444), True, 'import numpy as np\n'), ((4476, 4518), 'numpy.hstack', 'np.hstack', (['(cartesian_matrix, cartesian_k)'], {}), '((cartesian_matrix, cartesian_k))\n', (4485, 4518), True, 'import numpy as np\n'), ((4555, 4605), 'numpy.hstack', 'np.hstack', (['(cartesian_goal_matrix, cartesian_goal)'], {}), '((cartesian_goal_matrix, cartesian_goal))\n', (4564, 4605), True, 'import numpy as np\n'), ((5952, 6120), 'numpy.array', 'np.array', (['[0.39500005288049406, -1.2831749290661485, -0.18867963690990588, \n 2.5905100555414924, -0.11428156869746332, -1.3506700837331067, \n 0.11504855909140603]'], {}), '([0.39500005288049406, -1.2831749290661485, -0.18867963690990588, \n 2.5905100555414924, -0.11428156869746332, -1.3506700837331067, \n 0.11504855909140603])\n', (5960, 6120), True, 'import numpy as np\n'), ((6243, 6274), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (6251, 6274), True, 'import numpy as np\n'), ((10735, 10903), 'numpy.array', 'np.array', (['[0.39500005288049406, -1.2831749290661485, -0.18867963690990588, \n 2.5905100555414924, -0.11428156869746332, -1.3506700837331067, \n 0.11504855909140603]'], {}), '([0.39500005288049406, -1.2831749290661485, -0.18867963690990588, \n 2.5905100555414924, -0.11428156869746332, -1.3506700837331067, \n 0.11504855909140603])\n', (10743, 10903), True, 'import numpy as np\n'), ((11026, 11057), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (11034, 11057), True, 'import numpy as np\n'), ((12401, 12435), 'baxter_control.mpc_controller.MpcController', 'b_mpc.MpcController', (['N', '(True)', '(True)'], {}), '(N, True, True)\n', (12420, 12435), True, 'import baxter_control.mpc_controller as b_mpc\n'), ((3948, 3959), 'time.time', 'time.time', ([], {}), '()\n', (3957, 3959), False, 'import time\n'), ((6437, 6536), 'numpy.array', 'np.array', (['([[-0.9, -1.0, 1.1, 0.6660425877100662, 1.5192944057794895, -\n 1.3616725381467032]] * N)'], {}), '([[-0.9, -1.0, 1.1, 0.6660425877100662, 1.5192944057794895, -\n 1.3616725381467032]] * N)\n', (6445, 6536), True, 'import numpy as np\n'), ((11220, 11319), 'numpy.array', 'np.array', (['([[-0.9, -1.0, 1.1, 0.6660425877100662, 1.5192944057794895, -\n 1.3616725381467032]] * N)'], {}), '([[-0.9, -1.0, 1.1, 0.6660425877100662, 1.5192944057794895, -\n 1.3616725381467032]] * N)\n', (11228, 11319), True, 'import numpy as np\n'), ((8383, 8412), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8397, 8412), False, 'import random\n'), ((8434, 8463), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8448, 8463), False, 'import random\n'), ((8485, 8514), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8499, 8514), False, 'import random\n'), ((8536, 8565), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8550, 8565), False, 'import random\n'), ((8587, 8616), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8601, 8616), False, 'import random\n'), ((8638, 8667), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8652, 8667), False, 'import random\n'), ((8689, 8718), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (8703, 8718), False, 'import random\n'), ((13397, 13426), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13411, 13426), False, 'import random\n'), ((13448, 13477), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13462, 13477), False, 'import random\n'), ((13499, 13528), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13513, 13528), False, 'import random\n'), ((13550, 13579), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13564, 13579), False, 'import random\n'), ((13601, 13630), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13615, 13630), False, 'import random\n'), ((13652, 13681), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13666, 13681), False, 'import random\n'), ((13703, 13732), 'random.uniform', 'random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (13717, 13732), False, 'import random\n'), ((7682, 7703), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (7688, 7703), True, 'import numpy as np\n'), ((7731, 7752), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (7737, 7752), True, 'import numpy as np\n'), ((7780, 7801), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (7786, 7801), True, 'import numpy as np\n'), ((12560, 12581), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (12566, 12581), True, 'import numpy as np\n'), ((12609, 12630), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (12615, 12630), True, 'import numpy as np\n'), ((12658, 12679), 'numpy.sin', 'np.sin', (['(iteration / 5)'], {}), '(iteration / 5)\n', (12664, 12679), True, 'import numpy as np\n')] |
from __future__ import print_function
import time
import numpy as np
import numpy.random as rnd
from pymanopt import Problem
from pymanopt.solvers.steepest_descent import SteepestDescent
from pymanopt.solvers.solver import Solver
def compute_centroid(man, x):
"""
Compute the centroid as Karcher mean of points x belonging to the manifold
man.
"""
n = len(x)
def objective(y): # weighted Frechet variance
acc = 0
for i in range(n):
acc += man.dist(y, x[i]) ** 2
return acc / 2
def gradient(y):
g = man.zerovec(y)
for i in range(n):
g -= man.log(y, x[i])
return g
# XXX: manopt runs a few TR iterations here. For us to do this, we either
# need to work out the Hessian of the Frechet variance by hand or
# implement approximations for the Hessian to use in the TR solver.
# This is because we cannot implement the Frechet variance with theano
# and compute the Hessian automatically due to dependency on the
# manifold-dependent distance function.
solver = SteepestDescent(maxiter=15)
problem = Problem(man, cost=objective, grad=gradient, verbosity=0)
return solver.solve(problem)
class NelderMead(Solver):
"""
Nelder-Mead minimization alglorithm for derivative-free minimization
based on neldermead.m and centroid.m from the manopt MATLAB package.
"""
def __init__(self, maxcostevals=None, maxiter=None, reflection=1,
expansion=2, contraction=0.5, *args, **kwargs):
"""
Instantiate Nelder-Mead method solver class.
Variable attributes (defaults in brackets):
- maxcostevals (max(5000, 2 * dim))
Maximum number of allowed cost evaluations
- maxiter (max(500, 4 * dim))
Maximum number of allowed iterations
- reflection (1)
Determines how far to reflect away from the worst vertex;
stretched (reflection > 1), compressed (0 < reflection < 1),
or exact (reflection = 1)
- expansion (2)
Factor by which to expand the reflected simplex
- contraction (0.5)
Factor by which to contract the reflected simplex
"""
super(NelderMead, self).__init__(*args, **kwargs)
self._maxcostevals = maxcostevals
self._maxiter = maxiter
self._reflection = reflection
self._expansion = expansion
self._contraction = contraction
def solve(self, problem, x=None):
"""
Perform optimization using a Nelder-Mead minimization algorithm.
Arguments:
- problem
Pymanopt problem setup using the Problem class, this must
have a .manifold attribute specifying the manifold to optimize
over, as well as a cost and enough information to compute
the gradient of that cost.
- x=None
Optional parameter. Initial population of elements on the
manifold. If None then an initial population will be randomly
generated
Returns:
- x
Local minimum of obj, or if algorithm terminated before
convergence x will be the point at which it terminated
"""
man = problem.manifold
verbosity = problem.verbosity
objective = problem.cost
# Choose proper default algorithm parameters. We need to know about the
# dimension of the manifold to limit the parameter range, so we have to
# defer proper initialization until this point.
dim = man.dim
if self._maxcostevals is None:
self._maxcostevals = max(1000, 2 * dim)
if self._maxiter is None:
self._maxiter = max(2000, 4 * dim)
# If no initial simplex x is given by the user, generate one at random.
if x is None:
x = [man.rand() for i in range(int(dim + 1))]
elif not hasattr(x, "__iter__"):
raise ValueError("The initial simplex x must be iterable")
else:
# XXX: Is this necessary?
if len(x) != dim + 1:
print("The simplex size was adapted to the dimension "
"of the manifold")
x = x[:dim + 1]
# Compute objective-related quantities for x, and setup a function
# evaluations counter.
costs = np.array([objective(xi) for xi in x])
fy = list(costs)
costevals = dim + 1
# Sort simplex points by cost.
order = np.argsort(costs)
costs = costs[order]
x = [x[i] for i in order] # XXX: Probably inefficient
# Iteration counter (at any point, iter is the number of fully executed
# iterations so far).
iter = 0
time0 = time.time()
self._start_optlog()
while True:
iter += 1
if verbosity >= 2:
print("Cost evals: %7d\t"
"Best cost: %+.8e" % (costevals, costs[0]))
# Sort simplex points by cost.
order = np.argsort(costs)
costs = costs[order]
x = [x[i] for i in order] # XXX: Probably inefficient
stop_reason = self._check_stopping_criterion(
time0, iter=iter, costevals=costevals)
if stop_reason:
if verbosity >= 1:
print(stop_reason)
print('')
break
# Compute a centroid for the dim best points.
xbar = compute_centroid(man, x[:-1])
# Compute the direction for moving along the axis xbar - worst x.
vec = man.log(xbar, x[-1])
# Reflection step
xr = man.exp(xbar, -self._reflection * vec)
costr = objective(xr)
costevals += 1
# If the reflected point is honorable, drop the worst point,
# replace it by the reflected point and start a new iteration.
if costr >= costs[0] and costr < costs[-2]:
if verbosity >= 2:
print("Reflection")
costs[-1] = costr
x[-1] = xr
continue
# If the reflected point is better than the best point, expand.
if costr < costs[0]:
xe = man.exp(xbar, -self._expansion * vec)
coste = objective(xe)
costevals += 1
if coste < costr:
if verbosity >= 2:
print("Expansion")
costs[-1] = coste
x[-1] = xe
continue
else:
if verbosity >= 2:
print("Reflection (failed expansion)")
costs[-1] = costr
x[-1] = xr
continue
# If the reflected point is worse than the second to worst point,
# contract.
if costr >= costs[-2]:
if costr < costs[-1]:
# do an outside contraction
xoc = man.exp(xbar, -self._contraction * vec)
costoc = objective(xoc)
costevals += 1
if costoc <= costr:
if verbosity >= 2:
print("Outside contraction")
costs[-1] = costoc
x[-1] = xoc
continue
else:
# do an inside contraction
xic = man.exp(xbar, self._contraction * vec)
costic = objective(xic)
costevals += 1
if costic <= costs[-1]:
if verbosity >= 2:
print("Inside contraction")
costs[-1] = costic
x[-1] = xic
continue
# If we get here, shrink the simplex around x[0].
if verbosity >= 2:
print("Shrinkage")
x0 = x[0]
for i in np.arange(1, dim + 1):
x[i] = man.pairmean(x0, x[i])
costs[i] = objective(x[i])
costevals += dim
if self._logverbosity <= 0:
return x[0]
else:
self._stop_optlog(x[0], objective(x[0]), stop_reason, time0,
costevals=costevals, iter=iter)
return x[0], self._optlog
| [
"pymanopt.Problem",
"pymanopt.solvers.steepest_descent.SteepestDescent",
"numpy.argsort",
"time.time",
"numpy.arange"
] | [((1118, 1145), 'pymanopt.solvers.steepest_descent.SteepestDescent', 'SteepestDescent', ([], {'maxiter': '(15)'}), '(maxiter=15)\n', (1133, 1145), False, 'from pymanopt.solvers.steepest_descent import SteepestDescent\n'), ((1160, 1216), 'pymanopt.Problem', 'Problem', (['man'], {'cost': 'objective', 'grad': 'gradient', 'verbosity': '(0)'}), '(man, cost=objective, grad=gradient, verbosity=0)\n', (1167, 1216), False, 'from pymanopt import Problem\n'), ((4674, 4691), 'numpy.argsort', 'np.argsort', (['costs'], {}), '(costs)\n', (4684, 4691), True, 'import numpy as np\n'), ((4929, 4940), 'time.time', 'time.time', ([], {}), '()\n', (4938, 4940), False, 'import time\n'), ((5218, 5235), 'numpy.argsort', 'np.argsort', (['costs'], {}), '(costs)\n', (5228, 5235), True, 'import numpy as np\n'), ((8279, 8300), 'numpy.arange', 'np.arange', (['(1)', '(dim + 1)'], {}), '(1, dim + 1)\n', (8288, 8300), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from ctypes import addressof
import cv2
import numpy as np
import pickle
import requests
import json
import urllib
import hashlib
import urllib.parse
from hashlib import md5
import sys
from xlrd import open_workbook # xlrd用于读取xld
import xlwt # 用于写入xls
import PIL
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
PIL.Image.MAX_IMAGE_PIXELS = None
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d #引入scipy中的一维插值库
from scipy.interpolate import griddata#引入scipy中的二维插值库
def load_map(dir):
# 读入六环内地图
BJ_map = cv2.imread(dir)
BJ_map = cv2.cvtColor(BJ_map,cv2.COLOR_BGR2RGB)
# 绿色地块0,97,0
a = (BJ_map[:,:,0]>=0) * \
(BJ_map[:,:,0] <= 10) * \
(BJ_map[:,:,1] >= 95) * \
(BJ_map[:,:,1] <= 100) * \
(BJ_map[:,:,2] >= 0) * \
(BJ_map[:,:,2] <= 10)
use_map = np.zeros_like(a,dtype=np.uint8)
use_map[a] = 1
with open('BJ.pkl','wb') as f:
pickle.dump(use_map,f)
def get_coord(address,ak='<KEY>'):
url = 'http://api.map.baidu.com/geocoding/v3/?address={inputAddress}&output=json&ak={myAk}'.format(inputAddress=address,myAk=ak)
# sk = 'sD6fln89DbEvL1fMW190Z7KY0iuX65X0'
res = requests.get(url)
jd = json.loads(res.text)
return jd['result']['location']
def load_xls():
'''
读取租房信息的地址
即第1列-第4列
'''
workbook = open_workbook('./data/20年房租.xls') # 打开xls文件
sheet_name = workbook.sheet_names() # 打印所有sheet名称,是个列表
sheet = workbook.sheet_by_index(0) # 根据sheet索引读取sheet中的所有内容
sheet1 = workbook.sheet_by_name('20年') # 根据sheet名称读取sheet中的所有内容
print(sheet.name, sheet.nrows, sheet.ncols) # sheet的名称、行数、列数
# column_0 = sheet.col_values(0) # 第0列内容
column_1 = sheet.col_values(1) # 第1列内容
column_2 = sheet.col_values(2) # 第2列内容
column_3 = sheet.col_values(3) # 第3列内容
column_4 = sheet.col_values(4) # 第4列内容
address = []
for i in range(sheet.nrows):
address.append(column_1[i]+column_2[i]+column_3[i]+column_4[i])
with open('./data/20年房租address.pkl','wb') as f:
pickle.dump(add,f)
return None
# print(content)
def get_coords_from_xls(add_file):
'''
批量计算经纬度
'''
with open(add_file,'rb') as f:
address = pickle.load(f)
coord = []
for i in range(len(address)):
if i == 0:
continue
add = address[i]
tmp = get_coord(address=add)
coord.append([tmp['lng'],tmp['lat']])
with open('coord.pkl','wb') as f:
pickle.dump(coord,f)
def cal_distance(A,B):
'''
给定AB两点经纬度,计算△x和△y
假设北京地区为平面,球面距离近似为平面直线距离
R = 6378.137km
△x = (A点经度-B点经度) * R
△y = (A点纬度-B点纬度) * R
'''
R = 6378.137 # km
A_longitude, A_latitude = A
B_longitude, B_latitude = B
delta_long = R * (A_longitude - B_longitude) * np.pi / 180
delta_lat = R * (A_latitude - B_latitude) * np.pi / 180
return [delta_long,delta_lat]
def cal_coord(A):
'''
根據如下兩點的經緯坐標,與其在圖像中的index,計算A點在圖像中的index
A = [A_longitude, A_latitude]
"天安门"在BJ_map_crop.jpg中的坐标是[11750,10250],经纬度是[116.403882,39.914824]
"双清路与荷清路交叉口"在BJ_map_crop.jpg中的坐标是[7830,8250],经纬度是[116.343885,40.004397]
图像矩阵的
第1维,index增大,向南移动1個格點,緯度變小,位移 -dy km
第1维,index減小,向北移动1個格點,緯度變大,位移 dy km
第2維,index增大,向東移動1個格點,經度變大,位移 dx km
第2維,index減小,向西移動1個格點,經度變小,位移 -dx km
'''
Tiananmen = [116.403882,39.914824]
Tam_coord = [11750,10250]
Shuangqingheqing = [116.343885,40.004397]
Sqhq_coord = [7830,8250]
# Wudaokou = [116.337742,39.992894]
# Xizhimen = [116.355426,39.940474]
dx = 0.003339417744561774 # 經度
dy = 0.0025436787624554245
dx1,dy1 = cal_distance(Tiananmen,A)
dx2,dy2 = cal_distance(Shuangqingheqing,A)
dx1 = int(0.5*dx1 + 0.5*dx2)
dy1 = int(0.5*dy1 + 0.5*dy2)
A_coord = [0,0]
A_coord[0] = Tam_coord[0] + int(np.round(dy1/dy))
A_coord[1] = Tam_coord[1] - int(np.round(dx1/dx))
return A_coord
def get_coord_railway_station(file=r'C:\Users\44670\Documents\GitHub\ABMRL\data\地铁站点.xls'):
'''
從Excel file中读取地鐵站點的經緯度坐標,再將其轉化為圖像index
'''
max_x = 21290
max_y = 20890
workbook = open_workbook(file) # 打开xls文件
sheet_name= workbook.sheet_names() # 打印所有sheet名称,是个列表
sheet = workbook.sheet_by_index(0) # 根据sheet索引读取sheet中的所有内容
sheet1= workbook.sheet_by_name('Sheet1') # 根据sheet名称读取sheet中的所有内容
# print(sheet.name, sheet.nrows, sheet.ncols) # sheet的名称、行数、列数
# column_0 = sheet.col_values(0) # 第0列内容
longitude = sheet.col_values(4) # 第4列内容
latitude = sheet.col_values(5) # 第5列内容
longitude.pop(0) # pop表头
latitude.pop(0) # pop表头
railway_station_coord = []
for i in range(len(longitude)):
coord = cal_coord([longitude[i],latitude[i]])
if coord[0]<0 or coord[0]>max_x or coord[1]<0 or coord[1]>max_y:
continue
railway_station_coord.append(coord)
with open('railway_station_coord.pkl','wb') as f:
pickle.dump(railway_station_coord,f)
return railway_station_coord
'''
address = []
for i in range(sheet.nrows):
address.append(column_1[i]+column_2[i]+column_3[i]+column_4[i])
with open('address.pkl','wb') as f:
pickle.dump(add,f)
return None
'''
def plot_railway_station(r_coord_list):
BJ = plt.imread('BJ_map_crop.jpg')
for x,y in r_coord_list:
BJ[x:x+100,y:y+100,:] = 0
plt.imshow(BJ[0:20000,0:20000,:])
plt.pause(5000)
def rent_index_price(file=r'./data/20年房租coord.pkl'):
'''
计算20年房租数据在图像矩阵上的index
计算20年房租数据的单套均价
根据(index,price)插值,得到size=(21290,20890)的房价分布图
'''
max_x = 21290
max_y = 20890
# 读取经纬坐标
with open(file,'rb') as f:
rent_2020 = pickle.load(f) # 65535条数据,经纬坐标
# 读取房租价格
file = r'./data/20年房租.xls'
workbook = open_workbook(file) # 打开xls文件
sheet = workbook.sheet_by_index(0) # 根据sheet索引读取sheet中的所有内容
c16 = sheet.col_values(16) # 成交套数
c17 = sheet.col_values(17) # 成交总面积
c18 = sheet.col_values(18) # 每平米均价
c16.pop(0) # pop表头
c17.pop(0) # pop表头
c18.pop(0) # pop表头
# 计算单房均价
rent_price = np.array(c17) * np.array(c18) / np.array(c16) # 单套房成交均价,65535条数据
# 整合数据,剔除无效数据
rent_index_price = []
for i in range(len(rent_2020)):
# 计算index
x,y = rent_2020[i]
coord = cal_coord([x,y])
# 判定有无超出仿真边界
if coord[0]<0 or coord[0]>max_x or coord[1]<0 or coord[1]>max_y:
continue
# 记录合法数据
rent_index_price.append([coord[0],coord[1],rent_price[i]])
with open('./data/20年房租index+price.pkl','wb') as f:
pickle.dump(rent_index_price,f)
r_i_p = np.array(rent_index_price)
xy = r_i_p[:,0:2]
p = r_i_p[:,2]
grid_x, grid_y = np.mgrid[0:max_x, 0:max_y]
val_map_origin = griddata(xy, p, (grid_x, grid_y), method='cubic',fill_value=5)
with open('./data/val_map_origin.pkl','wb') as f:
pickle.dump(val_map_origin,f)
print(val_map_origin.shape)
if __name__ == '__main__':
# load_map('BJ_map_crop.jpg')
# g.load_map()
# coord = get_coord(address='北京市海淀区上地十街10号')
# load_xls()
# get_coords_from_xls(add_file='address.pkl')
# r_coord_list = get_coord_railway_station()
# print(len(r_coord_list))
# plot_railway_station(r_coord_list)
# rent_index_price()
rent_index_price()
| [
"matplotlib.pyplot.imshow",
"json.loads",
"pickle.dump",
"xlrd.open_workbook",
"matplotlib.pyplot.imread",
"scipy.interpolate.griddata",
"pickle.load",
"requests.get",
"numpy.array",
"cv2.cvtColor",
"matplotlib.pyplot.pause",
"numpy.zeros_like",
"numpy.round",
"cv2.imread"
] | [((587, 602), 'cv2.imread', 'cv2.imread', (['dir'], {}), '(dir)\n', (597, 602), False, 'import cv2\n'), ((616, 655), 'cv2.cvtColor', 'cv2.cvtColor', (['BJ_map', 'cv2.COLOR_BGR2RGB'], {}), '(BJ_map, cv2.COLOR_BGR2RGB)\n', (628, 655), False, 'import cv2\n'), ((885, 917), 'numpy.zeros_like', 'np.zeros_like', (['a'], {'dtype': 'np.uint8'}), '(a, dtype=np.uint8)\n', (898, 917), True, 'import numpy as np\n'), ((1244, 1261), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1256, 1261), False, 'import requests\n'), ((1271, 1291), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1281, 1291), False, 'import json\n'), ((1404, 1437), 'xlrd.open_workbook', 'open_workbook', (['"""./data/20年房租.xls"""'], {}), "('./data/20年房租.xls')\n", (1417, 1437), False, 'from xlrd import open_workbook\n'), ((4217, 4236), 'xlrd.open_workbook', 'open_workbook', (['file'], {}), '(file)\n', (4230, 4236), False, 'from xlrd import open_workbook\n'), ((5388, 5417), 'matplotlib.pyplot.imread', 'plt.imread', (['"""BJ_map_crop.jpg"""'], {}), "('BJ_map_crop.jpg')\n", (5398, 5417), True, 'import matplotlib.pyplot as plt\n'), ((5485, 5520), 'matplotlib.pyplot.imshow', 'plt.imshow', (['BJ[0:20000, 0:20000, :]'], {}), '(BJ[0:20000, 0:20000, :])\n', (5495, 5520), True, 'import matplotlib.pyplot as plt\n'), ((5523, 5538), 'matplotlib.pyplot.pause', 'plt.pause', (['(5000)'], {}), '(5000)\n', (5532, 5538), True, 'import matplotlib.pyplot as plt\n'), ((5898, 5917), 'xlrd.open_workbook', 'open_workbook', (['file'], {}), '(file)\n', (5911, 5917), False, 'from xlrd import open_workbook\n'), ((6753, 6779), 'numpy.array', 'np.array', (['rent_index_price'], {}), '(rent_index_price)\n', (6761, 6779), True, 'import numpy as np\n'), ((6895, 6958), 'scipy.interpolate.griddata', 'griddata', (['xy', 'p', '(grid_x, grid_y)'], {'method': '"""cubic"""', 'fill_value': '(5)'}), "(xy, p, (grid_x, grid_y), method='cubic', fill_value=5)\n", (6903, 6958), False, 'from scipy.interpolate import griddata\n'), ((979, 1002), 'pickle.dump', 'pickle.dump', (['use_map', 'f'], {}), '(use_map, f)\n', (990, 1002), False, 'import pickle\n'), ((2123, 2142), 'pickle.dump', 'pickle.dump', (['add', 'f'], {}), '(add, f)\n', (2134, 2142), False, 'import pickle\n'), ((2296, 2310), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2307, 2310), False, 'import pickle\n'), ((2559, 2580), 'pickle.dump', 'pickle.dump', (['coord', 'f'], {}), '(coord, f)\n', (2570, 2580), False, 'import pickle\n'), ((5034, 5071), 'pickle.dump', 'pickle.dump', (['railway_station_coord', 'f'], {}), '(railway_station_coord, f)\n', (5045, 5071), False, 'import pickle\n'), ((5803, 5817), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5814, 5817), False, 'import pickle\n'), ((6245, 6258), 'numpy.array', 'np.array', (['c16'], {}), '(c16)\n', (6253, 6258), True, 'import numpy as np\n'), ((6704, 6736), 'pickle.dump', 'pickle.dump', (['rent_index_price', 'f'], {}), '(rent_index_price, f)\n', (6715, 6736), False, 'import pickle\n'), ((7020, 7050), 'pickle.dump', 'pickle.dump', (['val_map_origin', 'f'], {}), '(val_map_origin, f)\n', (7031, 7050), False, 'import pickle\n'), ((3921, 3939), 'numpy.round', 'np.round', (['(dy1 / dy)'], {}), '(dy1 / dy)\n', (3929, 3939), True, 'import numpy as np\n'), ((3975, 3993), 'numpy.round', 'np.round', (['(dx1 / dx)'], {}), '(dx1 / dx)\n', (3983, 3993), True, 'import numpy as np\n'), ((6213, 6226), 'numpy.array', 'np.array', (['c17'], {}), '(c17)\n', (6221, 6226), True, 'import numpy as np\n'), ((6229, 6242), 'numpy.array', 'np.array', (['c18'], {}), '(c18)\n', (6237, 6242), True, 'import numpy as np\n')] |
import numpy as np
from hamiltonian import SingleParticle, DiscreteSpace
def test_solver():
import time
# define test parameters
n_eigs = 10
support = (-10, 10)
dtype = np.float64
potential_1d = lambda x: 1 / 2 * x ** 2
opotential_2d = lambda x, y: 1 / 2 * np.add.outer(x ** 2, y ** 2)
potential_3d = lambda x, y, z: 1 / 2 * np.add.outer(np.add.outer(x ** 2, y ** 2), z ** 2)
test_suite_1 = [dict(dim=1, v=potential_1d, grid=1000, solver='eigsh'),
dict(dim=1, v=potential_1d, grid=1000, solver='lobpcg'),
dict(dim=2, v=potential_2d, grid=200, solver='eigsh'),
dict(dim=2, v=potential_2d, grid=200, solver='lobpcg'),
dict(dim=3, v=potential_3d, grid=25, solver='eigsh'),
dict(dim=3, v=potential_3d, grid=25, solver='lobpcg')]
# run tests
for t in test_suite_1:
print("-" * 20)
print("Solving with", t)
dim = t['dim']
v = t['v']
grid = t['grid']
solver = t['solver']
space = DiscreteSpace(dim, support, grid, dtype)
ham = SingleParticle(space, v, solver=solver)
ti = time.time()
eigs, vecs = ham.solve(n_eigs)
tf = time.time()
print(f"{tf - ti:0.3} seconds to solve.")
print("Eigenvals:", eigs)
| [
"hamiltonian.DiscreteSpace",
"numpy.add.outer",
"time.time",
"hamiltonian.SingleParticle"
] | [((1080, 1120), 'hamiltonian.DiscreteSpace', 'DiscreteSpace', (['dim', 'support', 'grid', 'dtype'], {}), '(dim, support, grid, dtype)\n', (1093, 1120), False, 'from hamiltonian import SingleParticle, DiscreteSpace\n'), ((1135, 1174), 'hamiltonian.SingleParticle', 'SingleParticle', (['space', 'v'], {'solver': 'solver'}), '(space, v, solver=solver)\n', (1149, 1174), False, 'from hamiltonian import SingleParticle, DiscreteSpace\n'), ((1188, 1199), 'time.time', 'time.time', ([], {}), '()\n', (1197, 1199), False, 'import time\n'), ((1252, 1263), 'time.time', 'time.time', ([], {}), '()\n', (1261, 1263), False, 'import time\n'), ((289, 317), 'numpy.add.outer', 'np.add.outer', (['(x ** 2)', '(y ** 2)'], {}), '(x ** 2, y ** 2)\n', (301, 317), True, 'import numpy as np\n'), ((374, 402), 'numpy.add.outer', 'np.add.outer', (['(x ** 2)', '(y ** 2)'], {}), '(x ** 2, y ** 2)\n', (386, 402), True, 'import numpy as np\n')] |
import gfa_reduce.common as common
import numpy as np
import gfa_reduce.analysis.util as util
def adu_to_surface_brightness(sky_adu_1pixel, acttime, extname):
"""
convert from ADU (per pixel) to mag per square asec (AB)
note that this is meant to be applied to an average sky value across
an entire GFA camera; this function does not take into account
platescale variations within a camera
"""
if (sky_adu_1pixel <= 0) or (acttime <= 0):
return np.nan
par = common.gfa_misc_params()
pixel_area_sq_asec = util.nominal_pixel_area_sq_asec(extname)
sky_adu_per_sq_asec = sky_adu_1pixel/pixel_area_sq_asec
sky_adu_per_sec_sq_asec = sky_adu_per_sq_asec/acttime
sky_e_per_sec_sq_asec = sky_adu_per_sec_sq_asec*common.gfa_camera_gain(extname)
return (par['nominal_zeropoint'] - 2.5*np.log10(sky_e_per_sec_sq_asec))
| [
"gfa_reduce.common.gfa_camera_gain",
"gfa_reduce.common.gfa_misc_params",
"numpy.log10",
"gfa_reduce.analysis.util.nominal_pixel_area_sq_asec"
] | [((502, 526), 'gfa_reduce.common.gfa_misc_params', 'common.gfa_misc_params', ([], {}), '()\n', (524, 526), True, 'import gfa_reduce.common as common\n'), ((553, 593), 'gfa_reduce.analysis.util.nominal_pixel_area_sq_asec', 'util.nominal_pixel_area_sq_asec', (['extname'], {}), '(extname)\n', (584, 593), True, 'import gfa_reduce.analysis.util as util\n'), ((767, 798), 'gfa_reduce.common.gfa_camera_gain', 'common.gfa_camera_gain', (['extname'], {}), '(extname)\n', (789, 798), True, 'import gfa_reduce.common as common\n'), ((843, 874), 'numpy.log10', 'np.log10', (['sky_e_per_sec_sq_asec'], {}), '(sky_e_per_sec_sq_asec)\n', (851, 874), True, 'import numpy as np\n')] |
from CNN_architecture import *
from LSTM_NN_architecture import *
import numpy as np
from keras.models import Sequential
from matplotlib import pyplot as plt
from testsets import *
from math import *
import sys
sys.path.insert(0, "../")
class CNN_LSTM_Ensemble():
def __init__(self, cnn_model_weights_file, lstm_model_weights_file, train_data_file, word_to_vec_mapping, max_len, train_we):
self.word_to_vec_mapping = word_to_vec_mapping
self.max_len = max_len
self.train_we = train_we
self.cnn_model = Conv_NN(self.word_to_vec_mapping, self.max_len, self.train_we)
self.cnn_model.model.load_weights(cnn_model_weights_file)
self.lstm_model = LSTM_NN(train_data_file, self.word_to_vec_mapping, self.max_len, self.train_we)
self.lstm_model.model.load_weights(lstm_model_weights_file)
self.model = Sequential()
self.model.add(Dense(128, activation='relu', input_dim=6))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(3, activation='softmax'))
self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
print(self.model.summary())
def train(self, train_data_file, dev_data_file=None):
_, X_train, Y_train = csv_to_np(train_data_file)
cnn_X = example_to_indices(X_train, self.cnn_model.word_to_vec_mapping, self.cnn_model.max_len)
lstm_X = example_to_indices_v2(X_train, self.lstm_model.vocabulary, self.lstm_model.max_len)
Y_train = integer_to_one_hot(Y_train)
cnn_Y_hat = self.cnn_model.model.predict(cnn_X)
lstm_Y_hat = self.lstm_model.model.predict(lstm_X)
X_train = np.dstack((cnn_Y_hat, lstm_Y_hat))
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1]*X_train.shape[2]))
save_filename = "GloVe_"+str(self.word_to_vec_mapping.vector_size)+"d_"+type(self).__name__+"_padding"+str(self.max_len)+"d_model"
if dev_data_file == None:
checkpoint = ModelCheckpoint("../models_weights/"+save_filename+".{epoch:02d}-{acc:.4f}.hdf5", monitor='acc', verbose=1, save_best_only=True, mode='max')
history = self.model.fit(X_train, Y_train, epochs = 100, batch_size = 32, shuffle=True, callbacks=[checkpoint])
elif dev_data_file != None:
_, X_dev, Y_dev = csv_to_np(dev_data_file)
cnn_X_dev = example_to_indices(X_dev, self.cnn_model.word_to_vec_mapping, self.cnn_model.max_len)
lstm_X_dev = example_to_indices_v2(X_dev, self.lstm_model.vocabulary, self.lstm_model.max_len)
cnn_Y_hat_dev = self.cnn_model.model.predict(cnn_X_dev)
lstm_Y_hat_dev = self.lstm_model.model.predict(lstm_X_dev)
X_dev = np.dstack((cnn_Y_hat_dev, lstm_Y_hat_dev))
X_dev = X_dev.reshape((X_dev.shape[0], X_dev.shape[1]*X_dev.shape[2]))
Y_dev = integer_to_one_hot(Y_dev)
checkpoint = ModelCheckpoint("../models_weights/"+save_filename+".{epoch:02d}-{val_acc:.4f}.hdf5", monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_acc', patience=10, mode='max')
callbacks_list = [checkpoint, early_stop]
history = self.model.fit(X_train, Y_train, validation_data=(X_dev, Y_dev), epochs = 100, batch_size = 32, shuffle=True, callbacks=callbacks_list)
plt.plot(history.history['acc'], label='train_acc')
plt.plot(history.history['val_acc'], label='dev_acc')
plt.xlabel("#epochs")
plt.ylabel("accuracy")
plt.legend()
plt.savefig("../training_plots/"+save_filename+".png", bbox_inches='tight')
plt.show()
def evaluate(self, test_data_file):
ID_test, _, _ = csv_to_np(test_data_file[0])
cnn_X_test, _ = self.cnn_model.evaluate(test_data_file)
lstm_X_test, Y_test = self.lstm_model.evaluate(test_data_file)
X_test = np.dstack((cnn_X_test, lstm_X_test))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1]*X_test.shape[2]))
predictions = self.model.predict(X_test)
pred_dict = dict()
for i in range(len(X_test)):
pred_dict[str(ID_test[i])] = label_to_sentiment(np.argmax(predictions[i]))
loss, accuracy = self.model.evaluate(X_test, Y_test)
print()
print("Loss = ", loss)
print("Test accuracy = " + str(accuracy*100) + "%")
evaluation.evaluate(pred_dict, test_data_file[1], type(self).__name__)
evaluation.confusion(pred_dict, test_data_file[1], type(self).__name__)
return (predictions, Y_test)
def show_errors(self, data_file):
cnn_X, _ = self.cnn_model.evaluate(train_data_file)
lstm_X, Y = self.lstm_model.evaluate(train_data_file)
X = np.dstack((cnn_X, lstm_X))
X = X.reshape((X.shape[0], X.shape[1]*X.shape[2]))
predictions = self.model.predict(X)
for i in range(len(X)):
num = np.argmax(predictions[i])
if(num != Y[i]):
print('TWEET: ' + X[i])
print('PREDICTION: ' + label_to_sentiment(num))
print('REAL: '+ label_to_sentiment(Y[i])+ '\n')
# def prediction_on_new_example(self, example):
# X_test = np.array([example])
# X_test_indices = example_to_indices(X_test, self.word_to_vec_mapping, self.max_len)
# print(X_test[0] + ' -> ' + label_to_sentiment(np.argmax(self.model.predict(X_test_indices))))
| [
"numpy.dstack",
"sys.path.insert",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"keras.models.Sequential",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((211, 236), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (226, 236), False, 'import sys\n'), ((815, 827), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (825, 827), False, 'from keras.models import Sequential\n'), ((1629, 1663), 'numpy.dstack', 'np.dstack', (['(cnn_Y_hat, lstm_Y_hat)'], {}), '((cnn_Y_hat, lstm_Y_hat))\n', (1638, 1663), True, 'import numpy as np\n'), ((3188, 3239), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {'label': '"""train_acc"""'}), "(history.history['acc'], label='train_acc')\n", (3196, 3239), True, 'from matplotlib import pyplot as plt\n'), ((3242, 3295), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {'label': '"""dev_acc"""'}), "(history.history['val_acc'], label='dev_acc')\n", (3250, 3295), True, 'from matplotlib import pyplot as plt\n'), ((3298, 3319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""#epochs"""'], {}), "('#epochs')\n", (3308, 3319), True, 'from matplotlib import pyplot as plt\n'), ((3322, 3344), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (3332, 3344), True, 'from matplotlib import pyplot as plt\n'), ((3347, 3359), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3357, 3359), True, 'from matplotlib import pyplot as plt\n'), ((3362, 3441), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_plots/' + save_filename + '.png')"], {'bbox_inches': '"""tight"""'}), "('../training_plots/' + save_filename + '.png', bbox_inches='tight')\n", (3373, 3441), True, 'from matplotlib import pyplot as plt\n'), ((3440, 3450), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3448, 3450), True, 'from matplotlib import pyplot as plt\n'), ((3671, 3707), 'numpy.dstack', 'np.dstack', (['(cnn_X_test, lstm_X_test)'], {}), '((cnn_X_test, lstm_X_test))\n', (3680, 3707), True, 'import numpy as np\n'), ((4438, 4464), 'numpy.dstack', 'np.dstack', (['(cnn_X, lstm_X)'], {}), '((cnn_X, lstm_X))\n', (4447, 4464), True, 'import numpy as np\n'), ((4596, 4621), 'numpy.argmax', 'np.argmax', (['predictions[i]'], {}), '(predictions[i])\n', (4605, 4621), True, 'import numpy as np\n'), ((2595, 2637), 'numpy.dstack', 'np.dstack', (['(cnn_Y_hat_dev, lstm_Y_hat_dev)'], {}), '((cnn_Y_hat_dev, lstm_Y_hat_dev))\n', (2604, 2637), True, 'import numpy as np\n'), ((3933, 3958), 'numpy.argmax', 'np.argmax', (['predictions[i]'], {}), '(predictions[i])\n', (3942, 3958), True, 'import numpy as np\n')] |
"""
Test core functionality of normaliser objects
"""
import numpy
import sys
import unittest
sys.path.append("..")
from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser
from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser
from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser
class test_utilities_normalisation(unittest.TestCase):
"""
Test class for the normalisation objects. Contains tests covering the basic functionality of individual objects
and their interaction and usage inside the nPYc Dataset objects.
"""
def setUp(self):
# Simulate some data
self.noSamp = numpy.random.randint(5, high=50, size=None)
self.noFeat = numpy.random.randint(60, high=200, size=None)
self.X = numpy.random.randn(self.noSamp, self.noFeat)
# Object test
def test_nullNormaliser(self):
"""
Check that the NullNormaliser works
"""
# Check if output data = input data (its not supposed to do anything)
numpy.testing.assert_array_equal(self.X, NullNormaliser().normalise(self.X), err_msg="Null Normaliser not working as expected")
self.assertEqual(1, NullNormaliser().normalisation_coefficients)
def test_nullNormaliser_eq_(self):
"""
Check that the NullNormaliser equality testing works
"""
with self.subTest():
norm = NullNormaliser()
norm2 = NullNormaliser()
self.assertEqual(norm, norm2)
pqn = ProbabilisticQuotientNormaliser()
tanorm = TotalAreaNormaliser(keepMagnitude=False)
tanorm2 = TotalAreaNormaliser(keepMagnitude=True)
notEqualList = [1, True, 'str', 1.1, list(), dict(), tanorm, tanorm2, pqn]
norm = NullNormaliser()
for comparison in notEqualList:
with self.subTest(msg=comparison):
self.assertNotEqual(norm, comparison)
class test_utilities_totalAreaNormaliser(unittest.TestCase):
def setUp(self):
# Simulate some data
self.noSamp = numpy.random.randint(5, high=50, size=None)
self.noFeat = numpy.random.randint(60, high=200, size=None)
self.X = numpy.random.randn(self.noSamp, self.noFeat)
# Object test
def test_totalAreaNormaliser(self):
"""
Check that the TotalAreaNormaliser works
"""
# Check if algorithm is being performed correctly
tanorm = TotalAreaNormaliser(keepMagnitude=False)
X = numpy.copy(self.X)
x_normed = X/X.sum(axis=1)[:, None]
numpy.testing.assert_array_almost_equal(x_normed, tanorm.normalise(X), err_msg="Total Area normaliser not working correctly")
numpy.testing.assert_array_equal(X.sum(axis=1), tanorm.normalisation_coefficients)
def test_eq_(self):
"""
Check that the TotalAreaNormaliser equality testing works
"""
with self.subTest(msg='Test keepMagnitude is respected'):
tanorm = TotalAreaNormaliser(keepMagnitude=False)
tanorm2 = TotalAreaNormaliser(keepMagnitude=False)
tanorm3 = TotalAreaNormaliser(keepMagnitude=True)
tanorm4 = TotalAreaNormaliser(keepMagnitude=True)
self.assertEqual(tanorm, tanorm2)
self.assertEqual(tanorm3, tanorm3)
self.assertNotEqual(tanorm, tanorm3)
pqn = ProbabilisticQuotientNormaliser()
notEqualList = [1, True, 'str', 1.1, list(), dict(), NullNormaliser(), pqn]
tanorm = TotalAreaNormaliser()
for comparison in notEqualList:
with self.subTest(msg=comparison):
self.assertNotEqual(tanorm, comparison)
def test_raises(self):
tanorm = TotalAreaNormaliser(keepMagnitude=False)
with self.subTest(msg='Not two dimensions'):
X = numpy.random.randn(2,2,2)
self.assertRaises(ValueError, tanorm.normalise, X)
X = numpy.random.randn(2)
self.assertRaises(ValueError, tanorm.normalise, X)
def test_repr(self):
with self.subTest(msg='Preserving magnitude'):
tanorm = TotalAreaNormaliser(keepMagnitude=False)
strform = str(tanorm)
self.assertEqual(strform, 'Normalised to unit area.')
with self.subTest(msg='Preserving magnitude'):
tanorm = TotalAreaNormaliser(keepMagnitude=True)
strform = str(tanorm)
self.assertEqual(strform, 'Normalised to constant area, preserving magnitude.')
class test_utilities_probabilisticQuotientNormaliser(unittest.TestCase):
def setUp(self):
# Simulate some data
self.noSamp = numpy.random.randint(5, high=50, size=None)
self.noFeat = numpy.random.randint(60, high=200, size=None)
self.X = numpy.random.randn(self.noSamp, self.noFeat)
# Object test
def test_probabilisticQuotientNormaliser(self):
"""
Check that the ProbabilisticQuotientNormaliser
"""
X = numpy.copy(self.X)
reference = numpy.nanmedian(X, axis=0)
pqn_norm = ProbabilisticQuotientNormaliser()
fold_change_matrix = X / reference
pqn_norm_coefs = numpy.absolute(numpy.median(fold_change_matrix, axis=1))
pqn_normed = X / pqn_norm_coefs[:, None]
numpy.testing.assert_array_almost_equal(pqn_normed, pqn_norm.normalise(self.X), err_msg="PQN normaliser not working correctly - mismatching normalised data")
# Run twice to pick up the hashed coefficients
numpy.testing.assert_array_almost_equal(pqn_normed, pqn_norm.normalise(self.X), err_msg="PQN normaliser not working correctly - mismatching normalised data")
numpy.testing.assert_array_almost_equal(pqn_norm_coefs, pqn_norm.normalisation_coefficients, err_msg="PQN normaliser not working correctly - non-matching PQN coefficients")
numpy.testing.assert_array_equal(reference, pqn_norm.reference)
def test_nans(self):
self.X[0, 0] = numpy.nan
pqn_norm = ProbabilisticQuotientNormaliser()
pqn_norm.normalise(self.X)
def test_repr(self):
with self.subTest(msg='Default reference profile'):
pqn_norm = ProbabilisticQuotientNormaliser()
strform = str(pqn_norm)
self.assertEqual(strform, 'Normalised to median fold-change, reference profile was the median profile.')
def test_delete_reference(self):
pqn_norm = ProbabilisticQuotientNormaliser()
pqn_norm.normalise(self.X)
del pqn_norm.reference
self.assertIsNone(pqn_norm.normalisation_coefficients)
def test_pass_reference(self):
X = numpy.copy(self.X)
reference = numpy.abs(numpy.random.randn(self.noFeat))
pqn_norm = ProbabilisticQuotientNormaliser(reference=reference)
pqn_norm.normalise(X)
featureMask = numpy.logical_and(numpy.isfinite(reference), reference != 0)
fold_change_matrix = X[:, featureMask] / reference[featureMask]
fold_change_matrix[fold_change_matrix == 0] = numpy.nan
pqn_norm_coefs = numpy.absolute(numpy.median(fold_change_matrix, axis=1))
# Set 0 cofficients to 1
pqn_norm_coefs[pqn_norm_coefs == 0] = 1
numpy.testing.assert_array_almost_equal(pqn_norm_coefs,
pqn_norm.normalisation_coefficients,
err_msg="Change of reference does not work")
def test_raises(self):
pqn_norm = ProbabilisticQuotientNormaliser()
with self.subTest(msg='1D X matrix'):
X = numpy.array([2])
self.assertRaises(ValueError, pqn_norm.normalise, X)
with self.subTest(msg='3D X matrix'):
X = numpy.array([2,2,2])
self.assertRaises(ValueError, pqn_norm.normalise, X)
with self.subTest(msg='Reference wrong size'):
X = numpy.array([5,5])
pqn_norm.reference = numpy.array([4])
self.assertRaises(ValueError, pqn_norm.normalise, X)
if __name__ == '__main__':
unittest.main()
| [
"nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser",
"numpy.copy",
"numpy.testing.assert_array_almost_equal",
"numpy.median",
"numpy.nanmedian",
"nPYc.utilities.normalisation._nullNormaliser.NullNormaliser",
"numpy.array",
"numpy.random.randint",
"numpy.isfinite",
"unittest.mai... | [((96, 117), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (111, 117), False, 'import sys\n'), ((7176, 7191), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7189, 7191), False, 'import unittest\n'), ((684, 727), 'numpy.random.randint', 'numpy.random.randint', (['(5)'], {'high': '(50)', 'size': 'None'}), '(5, high=50, size=None)\n', (704, 727), False, 'import numpy\n'), ((744, 789), 'numpy.random.randint', 'numpy.random.randint', (['(60)'], {'high': '(200)', 'size': 'None'}), '(60, high=200, size=None)\n', (764, 789), False, 'import numpy\n'), ((802, 846), 'numpy.random.randn', 'numpy.random.randn', (['self.noSamp', 'self.noFeat'], {}), '(self.noSamp, self.noFeat)\n', (820, 846), False, 'import numpy\n'), ((1444, 1477), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (1475, 1477), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((1489, 1529), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (1508, 1529), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((1542, 1581), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(True)'}), '(keepMagnitude=True)\n', (1561, 1581), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((1669, 1685), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (1683, 1685), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n'), ((1920, 1963), 'numpy.random.randint', 'numpy.random.randint', (['(5)'], {'high': '(50)', 'size': 'None'}), '(5, high=50, size=None)\n', (1940, 1963), False, 'import numpy\n'), ((1980, 2025), 'numpy.random.randint', 'numpy.random.randint', (['(60)'], {'high': '(200)', 'size': 'None'}), '(60, high=200, size=None)\n', (2000, 2025), False, 'import numpy\n'), ((2038, 2082), 'numpy.random.randn', 'numpy.random.randn', (['self.noSamp', 'self.noFeat'], {}), '(self.noSamp, self.noFeat)\n', (2056, 2082), False, 'import numpy\n'), ((2254, 2294), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (2273, 2294), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((2301, 2319), 'numpy.copy', 'numpy.copy', (['self.X'], {}), '(self.X)\n', (2311, 2319), False, 'import numpy\n'), ((3065, 3098), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (3096, 3098), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((3188, 3209), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {}), '()\n', (3207, 3209), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((3364, 3404), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (3383, 3404), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((4179, 4222), 'numpy.random.randint', 'numpy.random.randint', (['(5)'], {'high': '(50)', 'size': 'None'}), '(5, high=50, size=None)\n', (4199, 4222), False, 'import numpy\n'), ((4239, 4284), 'numpy.random.randint', 'numpy.random.randint', (['(60)'], {'high': '(200)', 'size': 'None'}), '(60, high=200, size=None)\n', (4259, 4284), False, 'import numpy\n'), ((4297, 4341), 'numpy.random.randn', 'numpy.random.randn', (['self.noSamp', 'self.noFeat'], {}), '(self.noSamp, self.noFeat)\n', (4315, 4341), False, 'import numpy\n'), ((4477, 4495), 'numpy.copy', 'numpy.copy', (['self.X'], {}), '(self.X)\n', (4487, 4495), False, 'import numpy\n'), ((4510, 4536), 'numpy.nanmedian', 'numpy.nanmedian', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4525, 4536), False, 'import numpy\n'), ((4550, 4583), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (4581, 4583), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((5113, 5295), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['pqn_norm_coefs', 'pqn_norm.normalisation_coefficients'], {'err_msg': '"""PQN normaliser not working correctly - non-matching PQN coefficients"""'}), "(pqn_norm_coefs, pqn_norm.\n normalisation_coefficients, err_msg=\n 'PQN normaliser not working correctly - non-matching PQN coefficients')\n", (5152, 5295), False, 'import numpy\n'), ((5288, 5351), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['reference', 'pqn_norm.reference'], {}), '(reference, pqn_norm.reference)\n', (5320, 5351), False, 'import numpy\n'), ((5416, 5449), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (5447, 5449), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((5790, 5823), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (5821, 5823), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((5977, 5995), 'numpy.copy', 'numpy.copy', (['self.X'], {}), '(self.X)\n', (5987, 5995), False, 'import numpy\n'), ((6066, 6118), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {'reference': 'reference'}), '(reference=reference)\n', (6097, 6118), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((6493, 6635), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['pqn_norm_coefs', 'pqn_norm.normalisation_coefficients'], {'err_msg': '"""Change of reference does not work"""'}), "(pqn_norm_coefs, pqn_norm.\n normalisation_coefficients, err_msg='Change of reference does not work')\n", (6532, 6635), False, 'import numpy\n'), ((6694, 6727), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (6725, 6727), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((1353, 1369), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (1367, 1369), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n'), ((1381, 1397), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (1395, 1397), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n'), ((2739, 2779), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (2758, 2779), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((2793, 2833), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (2812, 2833), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((2847, 2886), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(True)'}), '(keepMagnitude=True)\n', (2866, 2886), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((2900, 2939), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(True)'}), '(keepMagnitude=True)\n', (2919, 2939), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((3154, 3170), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (3168, 3170), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n'), ((3461, 3488), 'numpy.random.randn', 'numpy.random.randn', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3479, 3488), False, 'import numpy\n'), ((3549, 3570), 'numpy.random.randn', 'numpy.random.randn', (['(2)'], {}), '(2)\n', (3567, 3570), False, 'import numpy\n'), ((3712, 3752), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(False)'}), '(keepMagnitude=False)\n', (3731, 3752), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((3898, 3937), 'nPYc.utilities.normalisation._totalAreaNormaliser.TotalAreaNormaliser', 'TotalAreaNormaliser', ([], {'keepMagnitude': '(True)'}), '(keepMagnitude=True)\n', (3917, 3937), False, 'from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser\n'), ((4656, 4696), 'numpy.median', 'numpy.median', (['fold_change_matrix'], {'axis': '(1)'}), '(fold_change_matrix, axis=1)\n', (4668, 4696), False, 'import numpy\n'), ((5572, 5605), 'nPYc.utilities.normalisation._probabilisticQuotientNormaliser.ProbabilisticQuotientNormaliser', 'ProbabilisticQuotientNormaliser', ([], {}), '()\n', (5603, 5605), False, 'from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser\n'), ((6020, 6051), 'numpy.random.randn', 'numpy.random.randn', (['self.noFeat'], {}), '(self.noFeat)\n', (6038, 6051), False, 'import numpy\n'), ((6178, 6203), 'numpy.isfinite', 'numpy.isfinite', (['reference'], {}), '(reference)\n', (6192, 6203), False, 'import numpy\n'), ((6379, 6419), 'numpy.median', 'numpy.median', (['fold_change_matrix'], {'axis': '(1)'}), '(fold_change_matrix, axis=1)\n', (6391, 6419), False, 'import numpy\n'), ((6775, 6791), 'numpy.array', 'numpy.array', (['[2]'], {}), '([2])\n', (6786, 6791), False, 'import numpy\n'), ((6896, 6918), 'numpy.array', 'numpy.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (6907, 6918), False, 'import numpy\n'), ((7030, 7049), 'numpy.array', 'numpy.array', (['[5, 5]'], {}), '([5, 5])\n', (7041, 7049), False, 'import numpy\n'), ((7073, 7089), 'numpy.array', 'numpy.array', (['[4]'], {}), '([4])\n', (7084, 7089), False, 'import numpy\n'), ((1170, 1186), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (1184, 1186), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n'), ((1061, 1077), 'nPYc.utilities.normalisation._nullNormaliser.NullNormaliser', 'NullNormaliser', ([], {}), '()\n', (1075, 1077), False, 'from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser\n')] |
# -*- coding: utf-8 -*-
"""
Provides common utility functions.
"""
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
import inspect
from functools import wraps
import numpy as np
def all_parameters_as_numpy_arrays(fn):
"""Converts all of a function's arguments to numpy arrays.
Used as a decorator to reduce duplicate code.
"""
# wraps allows us to pass the docstring back
# or the decorator will hide the function from our doc generator
@wraps(fn)
def wrapper(*args, **kwargs):
args = list(args)
for i, v in enumerate(args):
if v is not None:
args[i] = np.array(v)
for k, v in kwargs.items():
if v is not None:
kwargs[k] = np.array(v)
return fn(*args, **kwargs)
return wrapper
def parameters_as_numpy_arrays(*args_to_convert):
"""
Converts specific arguments to numpy arrays.
Used as a decorator to reduce duplicate code.
Arguments are specified by their argument name.
For example
::
@parameters_as_numpy_arrays('a', 'b', 'optional')
def myfunc(a, b, *args, **kwargs):
pass
myfunc(1, [2,2], optional=[3,3,3])
"""
def decorator(fn):
# wraps allows us to pass the docstring back
# or the decorator will hide the function from our doc generator
@wraps(fn)
def wrapper(*args, **kwargs):
# get the arguments of the function we're decorating
fn_args = inspect.getargspec(fn)
# convert any values that are specified
# if the argument isn't in our list, just pass it through
# convert the *args list
# we zip the args with the argument names we received from
# the inspect function
args = list(args)
for i, (k, v) in enumerate(zip(fn_args.args, args)):
if k in args_to_convert and v is not None:
args[i] = np.array(v)
# convert the **kwargs dict
for k, v in kwargs.items():
if k in args_to_convert and v is not None:
kwargs[k] = np.array(v)
# pass the converted values to our function
return fn(*args, **kwargs)
return wrapper
return decorator
| [
"numpy.array",
"functools.wraps",
"inspect.getargspec"
] | [((522, 531), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (527, 531), False, 'from functools import wraps\n'), ((1428, 1437), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1433, 1437), False, 'from functools import wraps\n'), ((1563, 1585), 'inspect.getargspec', 'inspect.getargspec', (['fn'], {}), '(fn)\n', (1581, 1585), False, 'import inspect\n'), ((685, 696), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (693, 696), True, 'import numpy as np\n'), ((791, 802), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (799, 802), True, 'import numpy as np\n'), ((2037, 2048), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2045, 2048), True, 'import numpy as np\n'), ((2221, 2232), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2229, 2232), True, 'import numpy as np\n')] |
""" Helper class and functions for loading KITTI objects
Author: <NAME>
Date: September 2017
"""
import os
import sys
import numpy as np
import cv2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, "mayavi"))
import kitti_util as utils
import argparse
care_types = ['Car', 'Pedestrian', 'Cyclist']
class kitti_object(object):
"""Load and parse object data into a usable format."""
def __init__(self, root_dir, split='trainval', pred_dir=None, pred_only=False):
"""root_dir contains training and testing folders"""
self.root_dir = root_dir
self.split = split
if 'train' in self.split or 'val' in self.split:
self.subset = 'training'
elif 'test' in self.split:
self.subset = 'testing'
else:
raise ValueError('Irregular name of split! Should include \"train\", \"val\", or \"test\" to indicate its subset.')
self.split_file = './data/splits/{}.txt'.format(split)
if not os.path.exists(self.split_file):
raise FileNotFoundError('Not found split file! Please include {}.txt in ./data/splits.'.format(self.split))
self.pred_dir = pred_dir
if self.pred_dir is not None:
if self.split not in os.path.basename(os.path.abspath(self.pred_dir)):
raise ValueError('Irregular name of prediction folder! Should include split name \"{}\" for consistency.'.format(self.split))
self.pred_only = pred_only
if self.pred_only and self.pred_dir is not None:
pred_files = os.listdir(self.pred_dir)
self.sample_ids = []
for pred_file in pred_files:
if pred_file.endswith('.txt'):
self.sample_ids.append(int(pred_file[:-4]))
else:
with open(self.split_file, 'r') as f:
self.sample_ids = [int(id) for id in f.read().splitlines()]
self.sample_ids = sorted(self.sample_ids)
self.subset_dir = os.path.join(self.root_dir , self.subset)
self.image_dir = os.path.join(self.subset_dir, "image_2")
self.label_dir = os.path.join(self.subset_dir, "label_2")
self.calib_dir = os.path.join(self.subset_dir, "calib")
self.depthpc_dir = os.path.join(self.subset_dir, "depth_pc")
self.lidar_dir = os.path.join(self.subset_dir, "velodyne")
self.depth_dir = os.path.join(self.subset_dir, "depth")
def __len__(self):
return len(self.sample_ids)
def get_image(self, idx):
idx = self.sample_ids[idx]
img_filename = os.path.join(self.image_dir, "%06d.png" % (idx))
return utils.load_image(img_filename)
def get_lidar(self, idx, dtype=np.float32, n_vec=4):
idx = self.sample_ids[idx]
lidar_filename = os.path.join(self.lidar_dir, "%06d.bin" % (idx))
return utils.load_velo_scan(lidar_filename, dtype, n_vec)
def get_calibration(self, idx):
idx = self.sample_ids[idx]
calib_filename = os.path.join(self.calib_dir, "%06d.txt" % (idx))
return utils.Calibration(calib_filename)
def get_label_objects(self, idx):
idx = self.sample_ids[idx]
if self.subset == "training":
label_filename = os.path.join(self.label_dir, "%06d.txt" % (idx))
return utils.read_label(label_filename)
else:
print('WARNING: Testing set does not have label!')
return None
def get_pred_objects(self, idx):
if self.pred_dir is None:
raise RuntimeError('Prediction folder not provided!')
idx = self.sample_ids[idx]
pred_filename = os.path.join(self.pred_dir, "%06d.txt" % (idx))
if os.path.exists(pred_filename):
return utils.read_label(pred_filename)
else:
print('WARNING: Prediction file not found!')
return None
def get_depth(self, idx):
idx = self.sample_ids[idx]
img_filename = os.path.join(self.depth_dir, "%06d.png" % (idx))
return utils.load_depth(img_filename)
def show_image_with_boxes(img, calib, objects=[], objects_pred=[]):
""" Show image with 2D bounding boxes """
img_2d = np.copy(img) # for 2d bbox
img_3d = np.copy(img) # for 3d bbox
for obj in objects:
if obj.type not in care_types:
continue
cv2.rectangle(
img_2d,
(int(obj.xmin), int(obj.ymin)),
(int(obj.xmax), int(obj.ymax)),
(0, 255, 0),
2,
)
box3d_pts_2d, _ = utils.compute_box_3d(obj, calib.P)
img_3d = utils.draw_projected_box3d(img_3d, box3d_pts_2d)
for obj in objects_pred:
if obj.type not in care_types:
continue
cv2.rectangle(
img_2d,
(int(obj.xmin), int(obj.ymin)),
(int(obj.xmax), int(obj.ymax)),
(0, 0, 255),
2,
)
box3d_pts_2d, _ = utils.compute_box_3d(obj, calib.P)
img_3d = utils.draw_projected_box3d(img_3d, box3d_pts_2d, color=(0, 0, 255))
return img_2d, img_3d
def get_lidar_index_in_image_fov(
pc_velo, calib, xmin, ymin, xmax, ymax, return_more=False, clip_distance=2.0
):
""" Filter lidar points, keep those in image FOV """
pts_2d = calib.project_velo_to_image(pc_velo)
fov_inds = (
(pts_2d[:, 0] < xmax)
& (pts_2d[:, 0] >= xmin)
& (pts_2d[:, 1] < ymax)
& (pts_2d[:, 1] >= ymin)
)
fov_inds = fov_inds & (pc_velo[:, 0] > clip_distance)
return fov_inds
def show_lidar_with_depth(
pc_velo,
calib,
fig,
objects=None,
img_fov=False,
img_width=None,
img_height=None,
objects_pred=None,
depth=None,
cam_img=None,
constraint_box=False,
pc_label=False,
save=False,
):
""" Show all LiDAR points.
Draw 3d box in LiDAR point cloud (in velo coord system) """
if "mlab" not in sys.modules:
import mayavi.mlab as mlab
from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d
#print(("All point num: ", pc_velo.shape[0]))
if img_fov:
pc_velo_index = get_lidar_index_in_image_fov(
pc_velo[:, :3], calib, 0, 0, img_width, img_height
)
pc_velo = pc_velo[pc_velo_index, :]
#print(("FOV point num: ", pc_velo.shape))
#print("pc_velo", pc_velo.shape)
draw_lidar(pc_velo, fig=fig, pc_label=pc_label)
# Draw depth
if depth is not None:
depth_pc_velo = calib.project_depth_to_velo(depth, constraint_box)
indensity = np.ones((depth_pc_velo.shape[0], 1)) * 0.5
depth_pc_velo = np.hstack((depth_pc_velo, indensity))
print("depth_pc_velo:", depth_pc_velo.shape)
print("depth_pc_velo:", type(depth_pc_velo))
print(depth_pc_velo[:5])
draw_lidar(depth_pc_velo, fig=fig, pts_color=(1, 1, 1))
if save:
data_idx = 0
vely_dir = "data/object/training/depth_pc"
save_filename = os.path.join(vely_dir, "%06d.bin" % (data_idx))
print(save_filename)
# np.save(save_filename+".npy", np.array(depth_pc_velo))
depth_pc_velo = depth_pc_velo.astype(np.float32)
depth_pc_velo.tofile(save_filename)
color = (0, 1, 0)
if objects is not None:
for obj in objects:
if obj.type not in care_types:
continue
# Draw gt 3d bounding box
_, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color, label=obj.type)
# Draw heading arrow
_, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
x1, y1, z1 = ori3d_pts_3d_velo[0, :]
x2, y2, z2 = ori3d_pts_3d_velo[1, :]
mlab.plot3d(
[x1, x2],
[y1, y2],
[z1, z2],
color=color,
tube_radius=None,
line_width=1,
figure=fig,
)
if objects_pred is not None:
color = (1, 0, 0)
for obj in objects_pred:
if obj.type not in care_types:
continue
# Draw 3d bounding box
_, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
#print("box3d_pts_3d_velo:")
#print(box3d_pts_3d_velo)
draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color, label=obj.type)
# Draw heading arrow
_, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
x1, y1, z1 = ori3d_pts_3d_velo[0, :]
x2, y2, z2 = ori3d_pts_3d_velo[1, :]
mlab.plot3d(
[x1, x2],
[y1, y2],
[z1, z2],
color=color,
tube_radius=None,
line_width=1,
figure=fig,
)
return fig | [
"kitti_util.read_label",
"numpy.hstack",
"kitti_util.load_velo_scan",
"viz_util.draw_lidar",
"os.path.exists",
"kitti_util.load_image",
"os.listdir",
"kitti_util.compute_orientation_3d",
"viz_util.draw_gt_boxes3d",
"numpy.ones",
"os.path.dirname",
"os.path.abspath",
"numpy.copy",
"kitti_ut... | [((216, 241), 'os.path.dirname', 'os.path.dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (231, 241), False, 'import os\n'), ((178, 203), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'import os\n'), ((258, 290), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mayavi"""'], {}), "(ROOT_DIR, 'mayavi')\n", (270, 290), False, 'import os\n'), ((4279, 4291), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (4286, 4291), True, 'import numpy as np\n'), ((4320, 4332), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (4327, 4332), True, 'import numpy as np\n'), ((6477, 6524), 'viz_util.draw_lidar', 'draw_lidar', (['pc_velo'], {'fig': 'fig', 'pc_label': 'pc_label'}), '(pc_velo, fig=fig, pc_label=pc_label)\n', (6487, 6524), False, 'from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d\n'), ((2079, 2119), 'os.path.join', 'os.path.join', (['self.root_dir', 'self.subset'], {}), '(self.root_dir, self.subset)\n', (2091, 2119), False, 'import os\n'), ((2146, 2186), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""image_2"""'], {}), "(self.subset_dir, 'image_2')\n", (2158, 2186), False, 'import os\n'), ((2212, 2252), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""label_2"""'], {}), "(self.subset_dir, 'label_2')\n", (2224, 2252), False, 'import os\n'), ((2278, 2316), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""calib"""'], {}), "(self.subset_dir, 'calib')\n", (2290, 2316), False, 'import os\n'), ((2345, 2386), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""depth_pc"""'], {}), "(self.subset_dir, 'depth_pc')\n", (2357, 2386), False, 'import os\n'), ((2412, 2453), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""velodyne"""'], {}), "(self.subset_dir, 'velodyne')\n", (2424, 2453), False, 'import os\n'), ((2479, 2517), 'os.path.join', 'os.path.join', (['self.subset_dir', '"""depth"""'], {}), "(self.subset_dir, 'depth')\n", (2491, 2517), False, 'import os\n'), ((2667, 2713), 'os.path.join', 'os.path.join', (['self.image_dir', "('%06d.png' % idx)"], {}), "(self.image_dir, '%06d.png' % idx)\n", (2679, 2713), False, 'import os\n'), ((2731, 2761), 'kitti_util.load_image', 'utils.load_image', (['img_filename'], {}), '(img_filename)\n', (2747, 2761), True, 'import kitti_util as utils\n'), ((2880, 2926), 'os.path.join', 'os.path.join', (['self.lidar_dir', "('%06d.bin' % idx)"], {}), "(self.lidar_dir, '%06d.bin' % idx)\n", (2892, 2926), False, 'import os\n'), ((2944, 2994), 'kitti_util.load_velo_scan', 'utils.load_velo_scan', (['lidar_filename', 'dtype', 'n_vec'], {}), '(lidar_filename, dtype, n_vec)\n', (2964, 2994), True, 'import kitti_util as utils\n'), ((3092, 3138), 'os.path.join', 'os.path.join', (['self.calib_dir', "('%06d.txt' % idx)"], {}), "(self.calib_dir, '%06d.txt' % idx)\n", (3104, 3138), False, 'import os\n'), ((3156, 3189), 'kitti_util.Calibration', 'utils.Calibration', (['calib_filename'], {}), '(calib_filename)\n', (3173, 3189), True, 'import kitti_util as utils\n'), ((3730, 3775), 'os.path.join', 'os.path.join', (['self.pred_dir', "('%06d.txt' % idx)"], {}), "(self.pred_dir, '%06d.txt' % idx)\n", (3742, 3775), False, 'import os\n'), ((3789, 3818), 'os.path.exists', 'os.path.exists', (['pred_filename'], {}), '(pred_filename)\n', (3803, 3818), False, 'import os\n'), ((4055, 4101), 'os.path.join', 'os.path.join', (['self.depth_dir', "('%06d.png' % idx)"], {}), "(self.depth_dir, '%06d.png' % idx)\n", (4067, 4101), False, 'import os\n'), ((4119, 4149), 'kitti_util.load_depth', 'utils.load_depth', (['img_filename'], {}), '(img_filename)\n', (4135, 4149), True, 'import kitti_util as utils\n'), ((4640, 4674), 'kitti_util.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (4660, 4674), True, 'import kitti_util as utils\n'), ((4692, 4740), 'kitti_util.draw_projected_box3d', 'utils.draw_projected_box3d', (['img_3d', 'box3d_pts_2d'], {}), '(img_3d, box3d_pts_2d)\n', (4718, 4740), True, 'import kitti_util as utils\n'), ((5038, 5072), 'kitti_util.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (5058, 5072), True, 'import kitti_util as utils\n'), ((5090, 5157), 'kitti_util.draw_projected_box3d', 'utils.draw_projected_box3d', (['img_3d', 'box3d_pts_2d'], {'color': '(0, 0, 255)'}), '(img_3d, box3d_pts_2d, color=(0, 0, 255))\n', (5116, 5157), True, 'import kitti_util as utils\n'), ((6732, 6769), 'numpy.hstack', 'np.hstack', (['(depth_pc_velo, indensity)'], {}), '((depth_pc_velo, indensity))\n', (6741, 6769), True, 'import numpy as np\n'), ((6917, 6972), 'viz_util.draw_lidar', 'draw_lidar', (['depth_pc_velo'], {'fig': 'fig', 'pts_color': '(1, 1, 1)'}), '(depth_pc_velo, fig=fig, pts_color=(1, 1, 1))\n', (6927, 6972), False, 'from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d\n'), ((1065, 1096), 'os.path.exists', 'os.path.exists', (['self.split_file'], {}), '(self.split_file)\n', (1079, 1096), False, 'import os\n'), ((1641, 1666), 'os.listdir', 'os.listdir', (['self.pred_dir'], {}), '(self.pred_dir)\n', (1651, 1666), False, 'import os\n'), ((3331, 3377), 'os.path.join', 'os.path.join', (['self.label_dir', "('%06d.txt' % idx)"], {}), "(self.label_dir, '%06d.txt' % idx)\n", (3343, 3377), False, 'import os\n'), ((3399, 3431), 'kitti_util.read_label', 'utils.read_label', (['label_filename'], {}), '(label_filename)\n', (3415, 3431), True, 'import kitti_util as utils\n'), ((3839, 3870), 'kitti_util.read_label', 'utils.read_label', (['pred_filename'], {}), '(pred_filename)\n', (3855, 3870), True, 'import kitti_util as utils\n'), ((6665, 6701), 'numpy.ones', 'np.ones', (['(depth_pc_velo.shape[0], 1)'], {}), '((depth_pc_velo.shape[0], 1))\n', (6672, 6701), True, 'import numpy as np\n'), ((7099, 7144), 'os.path.join', 'os.path.join', (['vely_dir', "('%06d.bin' % data_idx)"], {}), "(vely_dir, '%06d.bin' % data_idx)\n", (7111, 7144), False, 'import os\n'), ((7573, 7607), 'kitti_util.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (7593, 7607), True, 'import kitti_util as utils\n'), ((7694, 7768), 'viz_util.draw_gt_boxes3d', 'draw_gt_boxes3d', (['[box3d_pts_3d_velo]'], {'fig': 'fig', 'color': 'color', 'label': 'obj.type'}), '([box3d_pts_3d_velo], fig=fig, color=color, label=obj.type)\n', (7709, 7768), False, 'from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d\n'), ((7832, 7874), 'kitti_util.compute_orientation_3d', 'utils.compute_orientation_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (7860, 7874), True, 'import kitti_util as utils\n'), ((8058, 8160), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x1, x2]', '[y1, y2]', '[z1, z2]'], {'color': 'color', 'tube_radius': 'None', 'line_width': '(1)', 'figure': 'fig'}), '([x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None,\n line_width=1, figure=fig)\n', (8069, 8160), True, 'import mayavi.mlab as mlab\n'), ((8510, 8544), 'kitti_util.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (8530, 8544), True, 'import kitti_util as utils\n'), ((8709, 8783), 'viz_util.draw_gt_boxes3d', 'draw_gt_boxes3d', (['[box3d_pts_3d_velo]'], {'fig': 'fig', 'color': 'color', 'label': 'obj.type'}), '([box3d_pts_3d_velo], fig=fig, color=color, label=obj.type)\n', (8724, 8783), False, 'from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d\n'), ((8847, 8889), 'kitti_util.compute_orientation_3d', 'utils.compute_orientation_3d', (['obj', 'calib.P'], {}), '(obj, calib.P)\n', (8875, 8889), True, 'import kitti_util as utils\n'), ((9073, 9175), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x1, x2]', '[y1, y2]', '[z1, z2]'], {'color': 'color', 'tube_radius': 'None', 'line_width': '(1)', 'figure': 'fig'}), '([x1, x2], [y1, y2], [z1, z2], color=color, tube_radius=None,\n line_width=1, figure=fig)\n', (9084, 9175), True, 'import mayavi.mlab as mlab\n'), ((1340, 1370), 'os.path.abspath', 'os.path.abspath', (['self.pred_dir'], {}), '(self.pred_dir)\n', (1355, 1370), False, 'import os\n')] |
#!/usr/bin/python3
import os
import sys
import numpy as np
import rospy
import ros_numpy
import tf2_ros
from shapely.geometry import Point
from geometry_msgs.msg import PoseWithCovarianceStamped
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from darknet_ros_msgs.msg import BoundingBoxes
from mapless_mcl.drmcl import DRMCL
from mapless_mcl.trajectory import Trajectory
from mapless_mcl.hlmap import HLMap
from helpers.convert import tf_from_arrays
class DRNode:
def spin(self) -> None:
rate = rospy.Rate(10)
while not rospy.is_shutdown():
self.publish()
rate.sleep()
# INIT
#==========================================================================
def __init__(self):
rospy.init_node("mapless_mcl_ros_runner")
self.load_args()
self.init_communication()
self.setup()
def load_args(self) -> None:
self.n_init_particles = rospy.get_param("~particles")
self.frame = rospy.get_param("~frame", "base_footprint")
self.flag_publish_map_to_frame_tf = rospy.get_param("~publish_map_to_frame_tf", False)
self.flag_publish_utm_to_map_tf = rospy.get_param("~publish_utm_to_map_tf", False)
self.path_to_map = os.path.abspath( rospy.get_param("~map_path") )
self.model_sensitivity = float( rospy.get_param("~model_sensitivity") )
self.model_false_positive_rate = float( rospy.get_param("~model_false_positive_rate") )
self.path_to_trajectory = os.path.abspath( rospy.get_param("~trajectory_path") ) # TODO: pass as a service
def init_communication(self) -> None:
self.offset_subscriber = rospy.Subscriber("/odometry/offset",PoseWithCovarianceStamped, self.offset_callback, queue_size= 1000)
self.object_detection_subscriber = rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes, self.object_detection_callback, queue_size=1000)
self.particles_publisher = rospy.Publisher("/mcl/drmcl/particles", PointCloud2, queue_size=10)
self.state_publisher = rospy.Publisher("/mcl/drmcl/pose", Odometry, queue_size=1)
self.transform_broadcaster = tf2_ros.TransformBroadcaster()
self.static_tf_broadcaster = tf2_ros.StaticTransformBroadcaster()
def setup(self) -> None:
# Load the map
rospy.loginfo("Loading HL map.")
hlmap = HLMap()
hlmap.load(self.path_to_map)
# Load the initial trajectory
rospy.loginfo("Loading trajectory.")
trajectory = Trajectory(self.path_to_trajectory)
self.set_origin(trajectory.get_origin())
# Starts the filter
self.mcl = DRMCL()
self.mcl.assign_map(hlmap)
self.mcl.assign_trajectory(trajectory)
rospy.loginfo("Loading trajectory intersections.")
self.mcl.load_trajectory_intersections()
self.mcl.sample(n_particles = self.n_init_particles)
if self.flag_publish_utm_to_map_tf:
# Publish utm->map transform
origin = self.origin
translation = np.array([origin.x, origin.y, 0])
rotation = np.array([1.0, 0.0, 0.0, 0.0])
tf_from_utm_to_map = tf_from_arrays(translation, rotation, "utm", "map")
tf_from_utm_to_map.header.stamp = rospy.Time.now()
self.static_tf_broadcaster.sendTransform(tf_from_utm_to_map)
rospy.loginfo("Published utm->map transform")
def set_origin(self, origin : Point) -> None:
self.origin = origin
#==========================================================================
# CALLBACKS
#==========================================================================
def offset_callback(self, msg : PoseWithCovarianceStamped) -> None:
if self.mcl is None:
return
# TODO: perform checking for frame id
translation = msg.pose.pose.position
#covariance = np.array( msg.pose.covariance ).reshape(6,6)
# TODO: use full translation and rotation as input
control_cmd = np.array( ( translation.x, translation.y ) )
if np.any(np.isnan(control_cmd)):
rospy.logwarn("Ignored nan offset.")
return
if np.linalg.norm(control_cmd) > 10.0:
rospy.logwarn("Ignored large offset.")
return
covariance = np.diag([0.5,0.5])
self.mcl.predict(control_cmd, covariance)
def object_detection_callback(self, msg : BoundingBoxes) -> None:
bboxes = msg.bounding_boxes
for bbox in bboxes:
detected_label = bbox.Class
if detected_label in ["stop sign", "traffic light"]:
weights = self.mcl.weigh_by_intersection_evidence(
detected_label,
model_sensitivity = self.model_sensitivity,
model_false_positive_rate = self.model_false_positive_rate
)
self.mcl.resample(weights)
#==========================================================================
# PUBLISHERS
#==========================================================================
def publish(self) -> None:
# Map origin
origin = self.origin
# Retrieve estimation
mean, covariance = self.mcl.get_position()
self.publish_state(mean, covariance, origin)
self.publish_particles(origin)
self.publish_tf(mean, origin)
def publish_state(self, mean : Point, covariance : np.ndarray, origin : Point) -> None:
msg = Odometry()
# Fill metadata
msg.header.frame_id = "map"
msg.child_frame_id = self.frame
msg.header.stamp = rospy.Time.now()
# Fill position
msg.pose.pose.position.x = mean.x - origin.x
msg.pose.pose.position.y = mean.y - origin.y
msg.pose.pose.position.z = 0.0 # TODO
# Fill orientation
msg.pose.pose.orientation.w = 1.0
msg.pose.pose.orientation.x = 0.
msg.pose.pose.orientation.y = 0.
msg.pose.pose.orientation.z = 0.
# Fill covariance
out_covariance = np.diag([1e9] * 6)#, dtype=float)
out_covariance[:2,:2] = covariance[:2,:2]
msg.pose.covariance = out_covariance.flatten()
# Publish!
self.state_publisher.publish(msg)
def publish_tf(self, mean : Point, origin : Point) -> None:
# Publish map->frame transform
if self.flag_publish_map_to_frame_tf:
translation = np.array([ mean.x - origin.x, mean.y - origin.y, 0.0 ])
rotation = rotation # TODO: fill with a true rotation
rospy.loginfo(mean)
tf_from_map_to_frame = tf_from_arrays(translation, rotation, "map", self.frame)
tf_from_map_to_frame.header.stamp = rospy.Time.now()
self.transform_broadcaster.sendTransform(tf_from_map_to_frame)
def publish_particles(self, origin : Point) -> None:
points = self.mcl.get_particles()
# The particles' point cloud.
point_cloud_array = np.zeros(
len(points),
dtype=[
("x", np.float32),
("y", np.float32),
("z", np.float32)
]
)
point_cloud_array["x"] = [ p.x - origin.x for p in points ]
point_cloud_array["y"] = [ p.y - origin.y for p in points ]
point_cloud_array["z"] = np.zeros(len(points))
cloud_msg = ros_numpy.msgify(ros_numpy.numpy_msg(PointCloud2), point_cloud_array)
# The particles are referenced based on the map origin
cloud_msg.header.frame_id = "map"
self.particles_publisher.publish(cloud_msg)
#==========================================================================
def main() -> int:
node = DRNode()
node.spin()
return 0
if __name__ == "__main__":
sys.exit(main()) | [
"rospy.logwarn",
"rospy.init_node",
"tf2_ros.StaticTransformBroadcaster",
"numpy.array",
"rospy.Rate",
"numpy.linalg.norm",
"mapless_mcl.hlmap.HLMap",
"helpers.convert.tf_from_arrays",
"nav_msgs.msg.Odometry",
"ros_numpy.numpy_msg",
"rospy.Subscriber",
"rospy.get_param",
"tf2_ros.TransformBr... | [((567, 581), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (577, 581), False, 'import rospy\n'), ((802, 843), 'rospy.init_node', 'rospy.init_node', (['"""mapless_mcl_ros_runner"""'], {}), "('mapless_mcl_ros_runner')\n", (817, 843), False, 'import rospy\n'), ((992, 1021), 'rospy.get_param', 'rospy.get_param', (['"""~particles"""'], {}), "('~particles')\n", (1007, 1021), False, 'import rospy\n'), ((1056, 1099), 'rospy.get_param', 'rospy.get_param', (['"""~frame"""', '"""base_footprint"""'], {}), "('~frame', 'base_footprint')\n", (1071, 1099), False, 'import rospy\n'), ((1146, 1196), 'rospy.get_param', 'rospy.get_param', (['"""~publish_map_to_frame_tf"""', '(False)'], {}), "('~publish_map_to_frame_tf', False)\n", (1161, 1196), False, 'import rospy\n'), ((1243, 1291), 'rospy.get_param', 'rospy.get_param', (['"""~publish_utm_to_map_tf"""', '(False)'], {}), "('~publish_utm_to_map_tf', False)\n", (1258, 1291), False, 'import rospy\n'), ((1780, 1887), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/odometry/offset"""', 'PoseWithCovarianceStamped', 'self.offset_callback'], {'queue_size': '(1000)'}), "('/odometry/offset', PoseWithCovarianceStamped, self.\n offset_callback, queue_size=1000)\n", (1796, 1887), False, 'import rospy\n'), ((1929, 2045), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/darknet_ros/bounding_boxes"""', 'BoundingBoxes', 'self.object_detection_callback'], {'queue_size': '(1000)'}), "('/darknet_ros/bounding_boxes', BoundingBoxes, self.\n object_detection_callback, queue_size=1000)\n", (1945, 2045), False, 'import rospy\n'), ((2087, 2154), 'rospy.Publisher', 'rospy.Publisher', (['"""/mcl/drmcl/particles"""', 'PointCloud2'], {'queue_size': '(10)'}), "('/mcl/drmcl/particles', PointCloud2, queue_size=10)\n", (2102, 2154), False, 'import rospy\n'), ((2201, 2259), 'rospy.Publisher', 'rospy.Publisher', (['"""/mcl/drmcl/pose"""', 'Odometry'], {'queue_size': '(1)'}), "('/mcl/drmcl/pose', Odometry, queue_size=1)\n", (2216, 2259), False, 'import rospy\n'), ((2306, 2336), 'tf2_ros.TransformBroadcaster', 'tf2_ros.TransformBroadcaster', ([], {}), '()\n', (2334, 2336), False, 'import tf2_ros\n'), ((2383, 2419), 'tf2_ros.StaticTransformBroadcaster', 'tf2_ros.StaticTransformBroadcaster', ([], {}), '()\n', (2417, 2419), False, 'import tf2_ros\n'), ((2489, 2521), 'rospy.loginfo', 'rospy.loginfo', (['"""Loading HL map."""'], {}), "('Loading HL map.')\n", (2502, 2521), False, 'import rospy\n'), ((2538, 2545), 'mapless_mcl.hlmap.HLMap', 'HLMap', ([], {}), '()\n', (2543, 2545), False, 'from mapless_mcl.hlmap import HLMap\n'), ((2638, 2674), 'rospy.loginfo', 'rospy.loginfo', (['"""Loading trajectory."""'], {}), "('Loading trajectory.')\n", (2651, 2674), False, 'import rospy\n'), ((2696, 2731), 'mapless_mcl.trajectory.Trajectory', 'Trajectory', (['self.path_to_trajectory'], {}), '(self.path_to_trajectory)\n', (2706, 2731), False, 'from mapless_mcl.trajectory import Trajectory\n'), ((2829, 2836), 'mapless_mcl.drmcl.DRMCL', 'DRMCL', ([], {}), '()\n', (2834, 2836), False, 'from mapless_mcl.drmcl import DRMCL\n'), ((2927, 2977), 'rospy.loginfo', 'rospy.loginfo', (['"""Loading trajectory intersections."""'], {}), "('Loading trajectory intersections.')\n", (2940, 2977), False, 'import rospy\n'), ((4220, 4260), 'numpy.array', 'np.array', (['(translation.x, translation.y)'], {}), '((translation.x, translation.y))\n', (4228, 4260), True, 'import numpy as np\n'), ((4514, 4533), 'numpy.diag', 'np.diag', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (4521, 4533), True, 'import numpy as np\n'), ((5713, 5723), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (5721, 5723), False, 'from nav_msgs.msg import Odometry\n'), ((5852, 5868), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5866, 5868), False, 'import rospy\n'), ((6288, 6315), 'numpy.diag', 'np.diag', (['([1000000000.0] * 6)'], {}), '([1000000000.0] * 6)\n', (6295, 6315), True, 'import numpy as np\n'), ((600, 619), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (617, 619), False, 'import rospy\n'), ((1351, 1379), 'rospy.get_param', 'rospy.get_param', (['"""~map_path"""'], {}), "('~map_path')\n", (1366, 1379), False, 'import rospy\n'), ((1431, 1468), 'rospy.get_param', 'rospy.get_param', (['"""~model_sensitivity"""'], {}), "('~model_sensitivity')\n", (1446, 1468), False, 'import rospy\n'), ((1520, 1565), 'rospy.get_param', 'rospy.get_param', (['"""~model_false_positive_rate"""'], {}), "('~model_false_positive_rate')\n", (1535, 1565), False, 'import rospy\n'), ((1627, 1662), 'rospy.get_param', 'rospy.get_param', (['"""~trajectory_path"""'], {}), "('~trajectory_path')\n", (1642, 1662), False, 'import rospy\n'), ((3233, 3266), 'numpy.array', 'np.array', (['[origin.x, origin.y, 0]'], {}), '([origin.x, origin.y, 0])\n', (3241, 3266), True, 'import numpy as np\n'), ((3290, 3320), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (3298, 3320), True, 'import numpy as np\n'), ((3354, 3405), 'helpers.convert.tf_from_arrays', 'tf_from_arrays', (['translation', 'rotation', '"""utm"""', '"""map"""'], {}), "(translation, rotation, 'utm', 'map')\n", (3368, 3405), False, 'from helpers.convert import tf_from_arrays\n'), ((3452, 3468), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3466, 3468), False, 'import rospy\n'), ((3554, 3599), 'rospy.loginfo', 'rospy.loginfo', (['"""Published utm->map transform"""'], {}), "('Published utm->map transform')\n", (3567, 3599), False, 'import rospy\n'), ((4283, 4304), 'numpy.isnan', 'np.isnan', (['control_cmd'], {}), '(control_cmd)\n', (4291, 4304), True, 'import numpy as np\n'), ((4319, 4355), 'rospy.logwarn', 'rospy.logwarn', (['"""Ignored nan offset."""'], {}), "('Ignored nan offset.')\n", (4332, 4355), False, 'import rospy\n'), ((4386, 4413), 'numpy.linalg.norm', 'np.linalg.norm', (['control_cmd'], {}), '(control_cmd)\n', (4400, 4413), True, 'import numpy as np\n'), ((4434, 4472), 'rospy.logwarn', 'rospy.logwarn', (['"""Ignored large offset."""'], {}), "('Ignored large offset.')\n", (4447, 4472), False, 'import rospy\n'), ((6673, 6726), 'numpy.array', 'np.array', (['[mean.x - origin.x, mean.y - origin.y, 0.0]'], {}), '([mean.x - origin.x, mean.y - origin.y, 0.0])\n', (6681, 6726), True, 'import numpy as np\n'), ((6807, 6826), 'rospy.loginfo', 'rospy.loginfo', (['mean'], {}), '(mean)\n', (6820, 6826), False, 'import rospy\n'), ((6862, 6918), 'helpers.convert.tf_from_arrays', 'tf_from_arrays', (['translation', 'rotation', '"""map"""', 'self.frame'], {}), "(translation, rotation, 'map', self.frame)\n", (6876, 6918), False, 'from helpers.convert import tf_from_arrays\n'), ((6967, 6983), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (6981, 6983), False, 'import rospy\n'), ((7663, 7695), 'ros_numpy.numpy_msg', 'ros_numpy.numpy_msg', (['PointCloud2'], {}), '(PointCloud2)\n', (7682, 7695), False, 'import ros_numpy\n')] |
import numpy as np
#python 3 自动继承object
class GaussianFeatures(object):
"""
Gaussian Features
Parameters
----------
mean: (n_features, ndim) or (n_features,) ndarray
places to locate gaussian function at
var: float
variance of the gaussian function
"""
# python 中 __init__ 方法第一参数永远是self, 代表实例
def __init__(self, mean, var):
# be sure that mean is ndarray and not scalar
if mean.ndim == 1:
mean = mean[:, None]
else:
assert mean.ndim == 2
# be sure that var has the right type
assert isinstance(var, (int,float))
#python 中 实例的参数想加就加
self.__mean = mean
self.__var = var
def _gauss(self, x, mean):
"""
compute the gaussian function
Parameters
----------
x: (sample_size, ndim) or (sample_size, )
input array
"""
result = np.exp(-0.5 * np.sum(np.square(x - mean), axis = -1) / self.__var)
print("result shape",result.shape)
return result
def transform(self, x):
"""
transform input array with gaussian features
Parameters
----------
x: (sample_size, ndim) or (sample_size, )
input array
Returns
-------
output : (sample_size, n_features)
gaussian features
"""
if x.ndim == 1:
x = x[:, None]
else:
assert x.ndim == 2
assert np.size(x, axis = 1) == np.size(self.__mean, axis = 1)
basis = [np.ones(len(x))]
for m in self.__mean:
basis.append(self._gauss(x, m))
return np.asarray(basis).transpose()
| [
"numpy.size",
"numpy.asarray",
"numpy.square"
] | [((1528, 1546), 'numpy.size', 'np.size', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (1535, 1546), True, 'import numpy as np\n'), ((1552, 1580), 'numpy.size', 'np.size', (['self.__mean'], {'axis': '(1)'}), '(self.__mean, axis=1)\n', (1559, 1580), True, 'import numpy as np\n'), ((1717, 1734), 'numpy.asarray', 'np.asarray', (['basis'], {}), '(basis)\n', (1727, 1734), True, 'import numpy as np\n'), ((973, 992), 'numpy.square', 'np.square', (['(x - mean)'], {}), '(x - mean)\n', (982, 992), True, 'import numpy as np\n')] |
from mnist import MNIST
import numpy as np
import math
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score
from collections import Counter
import matplotlib.pyplot as plt
import time
# --------------------------------------------------------
# Global Variables
training_size = 3000
validation_size = 1000
testing_size = 1000
# --------------------------------------------------------
"""
Predicts the instances labels using top k neighbours
"""
def predict(neighbours, k):
top_k = [Counter(x[:k]) for x in neighbours]
predicted_labels = [x.most_common(1)[0][0] for x in top_k]
return predicted_labels
# --------------------------------------------------------
"""
Finds the optimal value of k using cross validation.
The value of k with minimum error is the optimal one
"""
def find_k(neighbours, real_validation_labels, similarity_measure):
k_values = []
error_values = []
real_validation_labels = list(real_validation_labels)
"""
Its a convention to start from k = 1 to k = sqrt(N) where N is the size of training data
"""
for k in range(math.ceil(math.sqrt(training_size))):
k += 1
predicted_labels = predict(neighbours, k)
# check accuracy
acc = accuracy_score(real_validation_labels, predicted_labels)
k_values.append(k)
error_values.append(1 - acc)
if similarity_measure == 1:
s = "Cosine Similarity"
else:
s = "Euclidean Distance"
k = k_values[np.argmin(error_values)]
""" Plotting the Validation Error Curve """
plt.ylabel('Validation Error', fontsize=14)
plt.xlabel('K', fontsize=14)
plt.title("Validation Error Curve using %s" % s, fontsize=16, color='green')
plt.plot(k_values, error_values, 'bo--')
figure = plt.gcf() # get current figure
figure.set_size_inches(13, 7)
plt.savefig("Validation Error Curve using %s.png" % s, dpi=300)
plt.clf()
"""
The value of K which gave minimum validation error is the optimal value of k
"""
return k_values[np.argmin(error_values)]
# --------------------------------------------------------
def knn(train_images, test_images, train_labels, similarity_measure):
if similarity_measure == 1:
# compute cosine similarity
v = [[np.dot(x, y)/(np.linalg.norm(x) * np.linalg.norm(y)) for y in train_images] for x in test_images]
# v = cosine_similarity(test_images, train_images)
r = True
else:
# compute euclidean distance
v = [[np.sum((x - y) ** 2) for y in train_images] for x in test_images]
r = False
# append labels
v = [[(x[i], train_labels[i]) for i in range(len(x))] for x in v]
# sort in descending order
[x.sort(key=lambda y: y[0], reverse=r) for x in v]
# get all neighbours
neighbours = [[n for similarity, n in x] for x in v]
return neighbours
# --------------------------------------------------------
"""
Note: This is an experiment in which first the optimal value of K is determined using the
cross validation technique and then its used to classify test images. This experiment
is run twice. Once for Cosine Similarity and once for Euclidean Distance.
"""
def run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels,
similarity_measure):
"""
First finding the optimal value of K using validation images
and then using it to classify test images
"""
if similarity_measure == 1:
s = "Cosine Similarity"
else:
s = "Euclidean Distance"
print("------------------------------------------")
print("Running Experiment using %s" % s)
print("------------------------------------------")
print("Finding Optimal Value of K ...")
neighbours_labels = knn(train_images, validation_images, train_labels, similarity_measure)
k = find_k(neighbours_labels, validation_labels, similarity_measure)
print("Optimal Value of K using Cross Validation is: %d" % k)
print("Classifying Test Images ...")
start_time = time.clock()
neighbours_labels = knn(train_images, test_images, train_labels, similarity_measure)
predicted_labels = predict(neighbours_labels, k)
print("Prediction Time: %.2f seconds" % (time.clock() - start_time))
print("Test Images Classified!")
accuracy = accuracy_score(test_labels, predicted_labels) * 100
print("KNN with k = %d" % k)
print("Accuracy: %f" % accuracy, "%")
print("---------------------\n")
# --------------------------------------------------------
def main():
# load data
data = MNIST('samples')
train_images, train_labels = data.load_training()
test_images, test_labels = data.load_testing()
validation_images = np.array(train_images[training_size:training_size + validation_size])
validation_labels = np.array(train_labels[training_size:training_size + validation_size])
train_images = np.array(train_images[:training_size])
train_labels = np.array(train_labels[:training_size])
test_images = np.array(test_images[:testing_size])
test_labels = np.array(test_labels[:testing_size])
"""Rescaling Data"""
train_images = train_images/255
test_images = test_images/255
validation_images = validation_images/255
""" run knn with cosine similarity as similarity measure """
run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels, 1)
""" run knn with euclidean distance as similarity measure """
run_experiment(train_images, train_labels, test_images, test_labels, validation_images, validation_labels, 2)
# --------------------------------------------------------
# get things rolling
main()
| [
"mnist.MNIST",
"matplotlib.pyplot.savefig",
"time.clock",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"math.sqrt",
"collections.Counter",
"numpy.array",
"numpy.sum",
"numpy.dot",
"numpy.linalg.norm",
... | [((1616, 1659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Error"""'], {'fontsize': '(14)'}), "('Validation Error', fontsize=14)\n", (1626, 1659), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1692), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K"""'], {'fontsize': '(14)'}), "('K', fontsize=14)\n", (1674, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1773), 'matplotlib.pyplot.title', 'plt.title', (["('Validation Error Curve using %s' % s)"], {'fontsize': '(16)', 'color': '"""green"""'}), "('Validation Error Curve using %s' % s, fontsize=16, color='green')\n", (1706, 1773), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1818), 'matplotlib.pyplot.plot', 'plt.plot', (['k_values', 'error_values', '"""bo--"""'], {}), "(k_values, error_values, 'bo--')\n", (1786, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1841), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1839, 1841), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1966), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Validation Error Curve using %s.png' % s)"], {'dpi': '(300)'}), "('Validation Error Curve using %s.png' % s, dpi=300)\n", (1914, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1971, 1980), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1978, 1980), True, 'import matplotlib.pyplot as plt\n'), ((4159, 4171), 'time.clock', 'time.clock', ([], {}), '()\n', (4169, 4171), False, 'import time\n'), ((4709, 4725), 'mnist.MNIST', 'MNIST', (['"""samples"""'], {}), "('samples')\n", (4714, 4725), False, 'from mnist import MNIST\n'), ((4857, 4926), 'numpy.array', 'np.array', (['train_images[training_size:training_size + validation_size]'], {}), '(train_images[training_size:training_size + validation_size])\n', (4865, 4926), True, 'import numpy as np\n'), ((4951, 5020), 'numpy.array', 'np.array', (['train_labels[training_size:training_size + validation_size]'], {}), '(train_labels[training_size:training_size + validation_size])\n', (4959, 5020), True, 'import numpy as np\n'), ((5041, 5079), 'numpy.array', 'np.array', (['train_images[:training_size]'], {}), '(train_images[:training_size])\n', (5049, 5079), True, 'import numpy as np\n'), ((5099, 5137), 'numpy.array', 'np.array', (['train_labels[:training_size]'], {}), '(train_labels[:training_size])\n', (5107, 5137), True, 'import numpy as np\n'), ((5157, 5193), 'numpy.array', 'np.array', (['test_images[:testing_size]'], {}), '(test_images[:testing_size])\n', (5165, 5193), True, 'import numpy as np\n'), ((5212, 5248), 'numpy.array', 'np.array', (['test_labels[:testing_size]'], {}), '(test_labels[:testing_size])\n', (5220, 5248), True, 'import numpy as np\n'), ((541, 555), 'collections.Counter', 'Counter', (['x[:k]'], {}), '(x[:k])\n', (548, 555), False, 'from collections import Counter\n'), ((1287, 1343), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['real_validation_labels', 'predicted_labels'], {}), '(real_validation_labels, predicted_labels)\n', (1301, 1343), False, 'from sklearn.metrics import accuracy_score\n'), ((1535, 1558), 'numpy.argmin', 'np.argmin', (['error_values'], {}), '(error_values)\n', (1544, 1558), True, 'import numpy as np\n'), ((2099, 2122), 'numpy.argmin', 'np.argmin', (['error_values'], {}), '(error_values)\n', (2108, 2122), True, 'import numpy as np\n'), ((4442, 4487), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'predicted_labels'], {}), '(test_labels, predicted_labels)\n', (4456, 4487), False, 'from sklearn.metrics import accuracy_score\n'), ((1153, 1177), 'math.sqrt', 'math.sqrt', (['training_size'], {}), '(training_size)\n', (1162, 1177), False, 'import math\n'), ((2574, 2594), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (2580, 2594), True, 'import numpy as np\n'), ((4361, 4373), 'time.clock', 'time.clock', ([], {}), '()\n', (4371, 4373), False, 'import time\n'), ((2339, 2351), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (2345, 2351), True, 'import numpy as np\n'), ((2353, 2370), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (2367, 2370), True, 'import numpy as np\n'), ((2373, 2390), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (2387, 2390), True, 'import numpy as np\n')] |
import torch
import os
import argparse
from glob import glob
import soundfile as sf
from torchaudio.compliance.kaldi import mfcc
from osdc.utils.oladd import overlap_add
import numpy as np
from osdc.features.ola_feats import compute_feats_windowed
import yaml
from train import OSDC_AMI
parser = argparse.ArgumentParser("Single-Channel inference, average logits")
parser.add_argument("exp_dir", type=str)
parser.add_argument("checkpoint_name", type=str)
parser.add_argument("wav_dir", type=str)
parser.add_argument("out_dir", type=str)
parser.add_argument("gpus", type=str, default="0")
parser.add_argument("--window_size", type=int, default=200)
parser.add_argument("--lookahead", type=int, default=200)
parser.add_argument("--lookbehind", type=int, default=200)
parser.add_argument("--regex", type=str, default="")
def plain_single_file_predict(model, wav_dir, train_configs, out_dir, window_size=400, lookahead=200, lookbehind=200, regex=""):
model = model.eval().cuda()
wavs = glob(os.path.join(wav_dir, "**/*{}*.wav".format(regex)), recursive=True)
assert len(wavs) > 0, "No file found"
for wav in wavs:
print("Processing File {}".format(wav))
audio, _ = sf.read(wav)
if train_configs["feats"]["type"] == "mfcc_kaldi":
feats_func = lambda x: mfcc(torch.from_numpy(x.astype("float32").reshape(1, -1)),
**train_configs["mfcc_kaldi"]).transpose(0, 1)
else:
raise NotImplementedError
tot_feats = compute_feats_windowed(feats_func, audio)
tot_feats = tot_feats.detach().cpu().numpy()
pred_func = lambda x : model(torch.from_numpy(x).unsqueeze(0).cuda()).detach().cpu().numpy()
preds = overlap_add(tot_feats, pred_func, window_size, window_size // 2, lookahead=lookahead, lookbehind=lookbehind)
out_file = os.path.join(out_dir, wav.split("/")[-1].split(".wav")[0] + ".logits")
np.save(out_file, preds)
if __name__ == "__main__":
args = parser.parse_args()
with open(os.path.join(args.exp_dir, "confs.yml"), "r") as f:
confs = yaml.load(f)
# test if compatible with lightning
confs.update(args.__dict__)
model = OSDC_AMI(confs)
if confs["checkpoint_name"].startswith("avg"):
state_dict = torch.load(os.path.join(confs["exp_dir"], confs["checkpoint_name"]),
map_location='cpu')
else:
state_dict = torch.load(os.path.join(confs["exp_dir"], confs["checkpoint_name"]),
map_location='cpu')["state_dict"]
model.load_state_dict(state_dict)
model = model.model
os.makedirs(confs["out_dir"], exist_ok=True)
plain_single_file_predict(model, confs["wav_dir"],
confs, confs["out_dir"], window_size=args.window_size,
lookahead=args.lookahead, lookbehind=args.lookbehind, regex=args.regex)
| [
"argparse.ArgumentParser",
"osdc.features.ola_feats.compute_feats_windowed",
"os.makedirs",
"os.path.join",
"yaml.load",
"train.OSDC_AMI",
"torch.from_numpy",
"soundfile.read",
"numpy.save",
"osdc.utils.oladd.overlap_add"
] | [((297, 364), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Single-Channel inference, average logits"""'], {}), "('Single-Channel inference, average logits')\n", (320, 364), False, 'import argparse\n'), ((2214, 2229), 'train.OSDC_AMI', 'OSDC_AMI', (['confs'], {}), '(confs)\n', (2222, 2229), False, 'from train import OSDC_AMI\n'), ((2654, 2698), 'os.makedirs', 'os.makedirs', (["confs['out_dir']"], {'exist_ok': '(True)'}), "(confs['out_dir'], exist_ok=True)\n", (2665, 2698), False, 'import os\n'), ((1197, 1209), 'soundfile.read', 'sf.read', (['wav'], {}), '(wav)\n', (1204, 1209), True, 'import soundfile as sf\n'), ((1530, 1571), 'osdc.features.ola_feats.compute_feats_windowed', 'compute_feats_windowed', (['feats_func', 'audio'], {}), '(feats_func, audio)\n', (1552, 1571), False, 'from osdc.features.ola_feats import compute_feats_windowed\n'), ((1742, 1855), 'osdc.utils.oladd.overlap_add', 'overlap_add', (['tot_feats', 'pred_func', 'window_size', '(window_size // 2)'], {'lookahead': 'lookahead', 'lookbehind': 'lookbehind'}), '(tot_feats, pred_func, window_size, window_size // 2, lookahead=\n lookahead, lookbehind=lookbehind)\n', (1753, 1855), False, 'from osdc.utils.oladd import overlap_add\n'), ((1949, 1973), 'numpy.save', 'np.save', (['out_file', 'preds'], {}), '(out_file, preds)\n', (1956, 1973), True, 'import numpy as np\n'), ((2116, 2128), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (2125, 2128), False, 'import yaml\n'), ((2048, 2087), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""confs.yml"""'], {}), "(args.exp_dir, 'confs.yml')\n", (2060, 2087), False, 'import os\n'), ((2313, 2369), 'os.path.join', 'os.path.join', (["confs['exp_dir']", "confs['checkpoint_name']"], {}), "(confs['exp_dir'], confs['checkpoint_name'])\n", (2325, 2369), False, 'import os\n'), ((2467, 2523), 'os.path.join', 'os.path.join', (["confs['exp_dir']", "confs['checkpoint_name']"], {}), "(confs['exp_dir'], confs['checkpoint_name'])\n", (2479, 2523), False, 'import os\n'), ((1662, 1681), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1678, 1681), False, 'import torch\n')] |
import argparse
import os
import cv2
import csv
import sys
import operator
import numpy as np
import config as cf
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import datasets, models, transforms
from networks import *
from torch.autograd import Variable
from PIL import Image
count = 0
parser = argparse.ArgumentParser(description='Pytorch Cell Classification weight upload')
parser.add_argument('--net_type', default='resnet', type=str, help='model')
parser.add_argument('--depth', default=50, type=int, help='depth of model')
parser.add_argument('--start', default=1, type=int, help='starting index')
parser.add_argument('--finish', default=5, type=int, help='finishing index')
args = parser.parse_args()
if (sys.version_info > (3,0)):
test_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(cf.mean, cf.std)
])
else:
test_transform = transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(cf.mean, cf.std)
])
def getNetwork(args):
if (args.net_type == 'alexnet'):
file_name = 'alexnet'
elif (args.net_type == 'vggnet'):
file_name = 'vgg-%s' %(args.depth)
elif (args.net_type == 'resnet'):
file_name = 'resnet-%s' %(args.depth)
else:
print('[Error]: Network should be either [alexnet / vgget / resnet]')
sys.exit(1)
return file_name
def check_and_mkdir(in_dir):
if not os.path.exists(in_dir):
print("Creating %s..." %in_dir)
os.makedirs(in_dir)
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def inference_crop(model, cropped_img):
# check if cropped img is a torch Variable, if not, convert
use_gpu = torch.cuda.is_available()
if test_transform is not None:
img = test_transform(Image.fromarray(cropped_img, mode='RGB'))
inputs = img
with torch.no_grad():
inputs = Variable(inputs)
if use_gpu:
inputs = inputs.cuda()
inputs = inputs.view(1, inputs.size(0), inputs.size(1), inputs.size(2))
outputs = model(inputs)
softmax_res = softmax(outputs.data.cpu().numpy()[0])
index, score = max(enumerate(softmax_res), key=operator.itemgetter(1))
return index, score
def inference(original_img, mask_img, inference_csv, model):
global count
ret, threshed_img = cv2.threshold(cv2.cvtColor(mask_img, cv2.COLOR_BGR2GRAY), 100, 255, cv2.THRESH_BINARY)
kernel = np.ones((3,3), np.uint8)
closing = cv2.morphologyEx(threshed_img, cv2.MORPH_CLOSE, kernel, iterations=4)
_, contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
fieldnames = ['prediction', 'x', 'y', 'w', 'h']
writer = csv.DictWriter(inference_csv, fieldnames=fieldnames)
for cnt in contours:
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
crop = original_img[y:y+h, x:x+w]
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
idx, score = inference_crop(model, crop)
answ = dset_classes[idx]
#print("./%s_%s.png" %(answ, str(count)))
#cv2.imwrite("./%s_%s.png" %(answ, str(count)), crop)
count += 1
writer.writerow({
'prediction': answ,
'x': x,
'y': y,
'w': w,
'h': h
})
def compute_IoU(img, back_img, pred_csv, answ_csv):
"""
@ Input:
csv files : ['prediction', 'x', 'y', 'w', 'h']
"""
IoU = 0
pred_reader = csv.reader(pred_csv)
answ_reader = csv.reader(answ_csv)
lst_A, lst_B = [], []
for row in pred_reader:
#print("Predictions")
#print(row)
lst_A.append(row)
pred = row[0]
for row in answ_reader:
#print("Answers")
#print(row)
lst_B.append(row)
label = row[0]
has_printed_label, count_label, count_pred, IoU_lst = False, 0, 0, []
for comp_A in lst_A:
A_x, A_y, A_w, A_h = map(int, comp_A[1:])
pred = comp_A[0]
#print(pred)
if (pred == 'RBC'):
continue
count_pred += 1
cv2.putText(back_img, "Pred = %s" %str(pred), (A_x+A_w, A_y+A_h),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2, cv2.LINE_AA)
cv2.rectangle(back_img, (A_x, A_y), (A_x+A_w, A_y+A_h), (0,255,0), 2)
for comp_B in lst_B:
B_x, B_y, B_w, B_h = map(int, comp_B[1:])
label = comp_B[0]
in_x1 = max(A_x, B_x)
in_x2 = min(A_x+A_w, B_x+B_w)
in_w = in_x2 - in_x1
in_y1 = max(A_y, B_y)
in_y2 = min(A_y+A_h, B_y+B_h)
in_h = in_y2 - in_y1
if (in_w < 0 or in_h <0):
interArea = 0
else:
interArea = in_w * in_h
unionArea = (A_w*A_h) + (B_w*B_h) - interArea
IoU = float(interArea) / float(unionArea)
if (has_printed_label == False):
count_label += 1
cv2.putText(back_img, "Label = %s" %str(label), (B_x, B_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2, cv2.LINE_AA)
cv2.rectangle(back_img, (B_x, B_y), (B_x+B_w, B_y+B_h), (255, 0, 0), 2)
if(IoU > 0):
cv2.rectangle(back_img, (in_x1, in_y1), (in_x2, in_y2), (0,0,255), 2)
cv2.putText(back_img, "IoU = %s" %str(IoU), (in_x1+int(in_w/2), in_y1+int(in_h/2)),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2, cv2.LINE_AA)
IoU_lst.append(IoU)
has_printed_label = True
#return float(sum(IoU_lst))/len(IoU_lst), back_img
if count_pred > count_label:
count_gt = count_pred
else:
count_gt = count_label
return float(sum(IoU_lst))/float(count_gt), back_img
if __name__ == "__main__":
for i in range(args.start, args.finish+1):
trainset_dir = cf.data_base.split("/")[-1]+os.sep
dsets = datasets.ImageFolder(os.path.join(cf.aug_base, 'train'))
dset_classes = dsets.classes
model_dir = '../3_classifier/checkpoint/'
model_name = model_dir + trainset_dir
file_name = getNetwork(args)
assert os.path.isdir(model_dir), '[Error]: No checkpoint dir found!'
assert os.path.isdir(model_name), '[Error]: There is no model weight to upload!'
checkpoint = torch.load(model_name+file_name+".t7")
model = checkpoint['model']
in_dir = './results/%s/' %cf.name
if not os.path.exists(in_dir):
print("There is no result directory")
sys.exit(1)
img = cv2.imread(cf.test_dir + 'TEST%d/TEST%d.png' %(i,i))
mask_img = cv2.imread(in_dir + 'masks/TEST%d.png' %i)
check_and_mkdir(in_dir + '/inferenced/')
with open(in_dir + '/inferenced/TEST%d.csv' %i, 'w') as csvfile:
inference(img, mask_img, csvfile, model)
with open(in_dir + '/inferenced/TEST%d.csv' %i, 'r') as pred_csv:
with open(cf.test_dir + 'TEST%d/TEST%d.csv' %(i,i)) as answ_csv:
back_img = img
IoU, marked_img = compute_IoU(img, back_img, pred_csv, answ_csv)
print("TEST#%d : Average IOU = %s" %(i, str(IoU)))
cv2.imwrite(in_dir + "/inferenced/TEST%d.png" %i, marked_img)
| [
"csv.DictWriter",
"cv2.rectangle",
"torch.cuda.is_available",
"sys.exit",
"operator.itemgetter",
"os.path.exists",
"argparse.ArgumentParser",
"cv2.contourArea",
"numpy.exp",
"os.path.isdir",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"csv.reader",
"numpy.ones",
"torchv... | [((361, 446), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pytorch Cell Classification weight upload"""'}), "(description='Pytorch Cell Classification weight upload'\n )\n", (384, 446), False, 'import argparse\n'), ((1902, 1927), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1925, 1927), False, 'import torch\n'), ((2653, 2678), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2660, 2678), True, 'import numpy as np\n'), ((2692, 2761), 'cv2.morphologyEx', 'cv2.morphologyEx', (['threshed_img', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(4)'}), '(threshed_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n', (2708, 2761), False, 'import cv2\n'), ((2784, 2853), 'cv2.findContours', 'cv2.findContours', (['closing', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2800, 2853), False, 'import cv2\n'), ((2920, 2972), 'csv.DictWriter', 'csv.DictWriter', (['inference_csv'], {'fieldnames': 'fieldnames'}), '(inference_csv, fieldnames=fieldnames)\n', (2934, 2972), False, 'import csv\n'), ((3706, 3726), 'csv.reader', 'csv.reader', (['pred_csv'], {}), '(pred_csv)\n', (3716, 3726), False, 'import csv\n'), ((3745, 3765), 'csv.reader', 'csv.reader', (['answ_csv'], {}), '(answ_csv)\n', (3755, 3765), False, 'import csv\n'), ((1625, 1647), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (1639, 1647), False, 'import os\n'), ((1697, 1716), 'os.makedirs', 'os.makedirs', (['in_dir'], {}), '(in_dir)\n', (1708, 1716), False, 'import os\n'), ((1745, 1754), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1751, 1754), True, 'import numpy as np\n'), ((2063, 2078), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2076, 2078), False, 'import torch\n'), ((2097, 2113), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (2105, 2113), False, 'from torch.autograd import Variable\n'), ((2567, 2609), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_img', 'cv2.COLOR_BGR2GRAY'], {}), '(mask_img, cv2.COLOR_BGR2GRAY)\n', (2579, 2609), False, 'import cv2\n'), ((3013, 3033), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3028, 3033), False, 'import cv2\n'), ((3056, 3077), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3072, 3077), False, 'import cv2\n'), ((3135, 3172), 'cv2.cvtColor', 'cv2.cvtColor', (['crop', 'cv2.COLOR_BGR2RGB'], {}), '(crop, cv2.COLOR_BGR2RGB)\n', (3147, 3172), False, 'import cv2\n'), ((4471, 4546), 'cv2.rectangle', 'cv2.rectangle', (['back_img', '(A_x, A_y)', '(A_x + A_w, A_y + A_h)', '(0, 255, 0)', '(2)'], {}), '(back_img, (A_x, A_y), (A_x + A_w, A_y + A_h), (0, 255, 0), 2)\n', (4484, 4546), False, 'import cv2\n'), ((6414, 6438), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (6427, 6438), False, 'import os\n'), ((6491, 6516), 'os.path.isdir', 'os.path.isdir', (['model_name'], {}), '(model_name)\n', (6504, 6516), False, 'import os\n'), ((6586, 6628), 'torch.load', 'torch.load', (["(model_name + file_name + '.t7')"], {}), "(model_name + file_name + '.t7')\n", (6596, 6628), False, 'import torch\n'), ((6832, 6886), 'cv2.imread', 'cv2.imread', (["(cf.test_dir + 'TEST%d/TEST%d.png' % (i, i))"], {}), "(cf.test_dir + 'TEST%d/TEST%d.png' % (i, i))\n", (6842, 6886), False, 'import cv2\n'), ((6904, 6947), 'cv2.imread', 'cv2.imread', (["(in_dir + 'masks/TEST%d.png' % i)"], {}), "(in_dir + 'masks/TEST%d.png' % i)\n", (6914, 6947), False, 'import cv2\n'), ((855, 877), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (872, 877), False, 'from torchvision import datasets, models, transforms\n'), ((887, 913), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (908, 913), False, 'from torchvision import datasets, models, transforms\n'), ((923, 944), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (942, 944), False, 'from torchvision import datasets, models, transforms\n'), ((954, 991), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['cf.mean', 'cf.std'], {}), '(cf.mean, cf.std)\n', (974, 991), False, 'from torchvision import datasets, models, transforms\n'), ((1055, 1076), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (1071, 1076), False, 'from torchvision import datasets, models, transforms\n'), ((1086, 1112), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1107, 1112), False, 'from torchvision import datasets, models, transforms\n'), ((1122, 1143), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1141, 1143), False, 'from torchvision import datasets, models, transforms\n'), ((1153, 1190), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['cf.mean', 'cf.std'], {}), '(cf.mean, cf.std)\n', (1173, 1190), False, 'from torchvision import datasets, models, transforms\n'), ((1764, 1773), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1770, 1773), True, 'import numpy as np\n'), ((1993, 2033), 'PIL.Image.fromarray', 'Image.fromarray', (['cropped_img'], {'mode': '"""RGB"""'}), "(cropped_img, mode='RGB')\n", (2008, 2033), False, 'from PIL import Image\n'), ((6191, 6225), 'os.path.join', 'os.path.join', (['cf.aug_base', '"""train"""'], {}), "(cf.aug_base, 'train')\n", (6203, 6225), False, 'import os\n'), ((6719, 6741), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (6733, 6741), False, 'import os\n'), ((6805, 6816), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6813, 6816), False, 'import sys\n'), ((1550, 1561), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1558, 1561), False, 'import sys\n'), ((2400, 2422), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2419, 2422), False, 'import operator\n'), ((5369, 5444), 'cv2.rectangle', 'cv2.rectangle', (['back_img', '(B_x, B_y)', '(B_x + B_w, B_y + B_h)', '(255, 0, 0)', '(2)'], {}), '(back_img, (B_x, B_y), (B_x + B_w, B_y + B_h), (255, 0, 0), 2)\n', (5382, 5444), False, 'import cv2\n'), ((5482, 5553), 'cv2.rectangle', 'cv2.rectangle', (['back_img', '(in_x1, in_y1)', '(in_x2, in_y2)', '(0, 0, 255)', '(2)'], {}), '(back_img, (in_x1, in_y1), (in_x2, in_y2), (0, 0, 255), 2)\n', (5495, 5553), False, 'import cv2\n'), ((6119, 6142), 'config.data_base.split', 'cf.data_base.split', (['"""/"""'], {}), "('/')\n", (6137, 6142), True, 'import config as cf\n'), ((7471, 7533), 'cv2.imwrite', 'cv2.imwrite', (["(in_dir + '/inferenced/TEST%d.png' % i)", 'marked_img'], {}), "(in_dir + '/inferenced/TEST%d.png' % i, marked_img)\n", (7482, 7533), False, 'import cv2\n')] |
import tensorflow as tf
import numpy as np
from nascd.xorandor.load_data import load_data
(x, y), _ = load_data()
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense11 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense12 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(3, activation=tf.nn.sigmoid)
# self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dense11(x)
x = self.dense12(x)
# if training:
# x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
# code to bce loss: https://github.com/tensorflow/tensorflow/blob/v2.1.0/tensorflow/python/keras/backend.py#L4585-L4615
model.compile(loss="binary_crossentropy", metrics=[tf.keras.metrics.binary_accuracy])
model.fit(x, y, epochs=200, batch_size=2)
y_pred = np.array(model.predict(x))
sig = lambda x: 1 / (1 + np.exp(-x))
y_pred = (y_pred >= 0.5).astype(int)
y_true = np.array(y, dtype=np.int32)
acc = (y_true.flatten() == y_pred.flatten()).astype(int).sum() / len(y_true.flatten())
print(f"acc: {acc}")
for yp, yt in zip(y_pred, y_true):
print(yp, yt)
| [
"numpy.exp",
"numpy.array",
"nascd.xorandor.load_data.load_data",
"tensorflow.keras.layers.Dense"
] | [((103, 114), 'nascd.xorandor.load_data.load_data', 'load_data', ([], {}), '()\n', (112, 114), False, 'from nascd.xorandor.load_data import load_data\n'), ((1183, 1210), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.int32'}), '(y, dtype=np.int32)\n', (1191, 1210), True, 'import numpy as np\n'), ((234, 282), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.relu'}), '(10, activation=tf.nn.relu)\n', (255, 282), True, 'import tensorflow as tf\n'), ((306, 354), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.relu'}), '(10, activation=tf.nn.relu)\n', (327, 354), True, 'import tensorflow as tf\n'), ((378, 426), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.relu'}), '(10, activation=tf.nn.relu)\n', (399, 426), True, 'import tensorflow as tf\n'), ((449, 499), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': 'tf.nn.sigmoid'}), '(3, activation=tf.nn.sigmoid)\n', (470, 499), True, 'import tensorflow as tf\n'), ((1125, 1135), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1131, 1135), True, 'import numpy as np\n')] |
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
import logging
import numpy as np
def build(bins=10, density=None):
pipeline = Pipeline([('transformer',
SentiWSPolarityDistribution(bins=bins, density=density)),
])
return ('polarity_sentiws_distribution', pipeline)
def extract_polarity_tokens(document):
polarity_tokens = []
for token in document.tokens:
if token.polarity is not None:
polarity_tokens.append(token.polarity_sentiws)
if not polarity_tokens:
return [0]
else:
return polarity_tokens
class SentiWSPolarityDistribution(BaseEstimator):
def __init__(self, bins=10, density=None):
self.bins = bins
self.density = density
self.logger = logging.getLogger()
def fit(self, X, y):
all_polarity_values = []
for thf_sentence in X:
all_polarity_values.extend(extract_polarity_tokens(thf_sentence))
histogram, edges = np.histogram(all_polarity_values, bins=self.bins, density=self.density)
self.edges = edges
return self
def transform(self, X):
transformed = list(map(lambda x: self.transform_document(x), X))
return transformed
def transform_document(self, document):
polarity_tokens = extract_polarity_tokens(document)
histogram, edges = np.histogram(polarity_tokens, bins=self.edges, density=False)
return histogram
| [
"logging.getLogger",
"numpy.histogram"
] | [((818, 837), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (835, 837), False, 'import logging\n'), ((1033, 1104), 'numpy.histogram', 'np.histogram', (['all_polarity_values'], {'bins': 'self.bins', 'density': 'self.density'}), '(all_polarity_values, bins=self.bins, density=self.density)\n', (1045, 1104), True, 'import numpy as np\n'), ((1413, 1474), 'numpy.histogram', 'np.histogram', (['polarity_tokens'], {'bins': 'self.edges', 'density': '(False)'}), '(polarity_tokens, bins=self.edges, density=False)\n', (1425, 1474), True, 'import numpy as np\n')] |
import numpy as np
import scipy
from ._simsig_tools import _check_list,_rand_uniform
from ._generator_base import generator_base
#------------------------------------------------------------------------------------
__all__=['harmonics','Harmonics']
#------------------------------------------------------------------------------------
_SIGNAL_PARAMETER_DEFAULT = {'amp':1, 'f0':1, 'delta_f':0, 'delay':0,'phase0':0,'callback': None}
_SYSTEM_PARAMETER_DEFAULT = {'fs':10, 'length':512}
#------------------------------------------------------------------------------------
def harmonics(amplitude = [1],
f0 = [1],
delta_f = [0],
delay = [0],
phase0 = [0],
callback = [None],
fs=10,
length=512,
snr_db = None):
'''
Harmonic signal generation.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signals.
* f0: 1d ndarray,
initial frequency (carried frequency).
* delta_f: 1d ndarray,
delta_f frequency (frequency band).
* delay: 1d ndarray,
signal delay.
* phase0: 1d ndarray,
initla phase.
* callback: 1d ndarray,
callback for special operations on signals.
* fs: float,
is the sampling frequency.
* length: int,
is the signal length;
* snr_db: float,
sngnal-to-noise ration in dB.
Returns:
-------------
* signal: 1d ndarray (complex),
harmonic signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
#Example1----------------------------------------
signal = dsatools.generator.harmonics()
ut.probe(signal)
#Example2----------------------------------------
signal = dsatools.generator.harmonics(amplitude=[1],
f0=[1,2,3],
delta_f=[0.3],
delay=[0],
phase0=[0],
callback=[None],
fs=10,
length=512,
snr_db=None,)
ut.probe(signal)
#Example3----------------------------------------
cb1 = callbacks.harmonic_modulataion(amp_am=0.5,freq_am=9.5,phase_shift=0)
cb2 = callbacks.harmonic_modulataion(amp_am=0.7,freq_am=8.2,phase_shift=0)
signal = dsatools.generator.harmonics(amplitude=[1,1,0.4,0.3],
f0=[1,2,3,4],
delta_f=[0.2,1.3,],
delay =[0,0,0,4],
phase0=[0,1.2],
callback=[cb1,None,cb2],
fs=10,
length=512,
snr_db=20,)
ut.probe(signal)
'''
signal = Harmonics(fs, length)
signal.set_signal_parameters(amplitude = amplitude,
f0 = f0,
delta_f = delta_f,
delay = delay,
phase0 = phase0,
callback = callback,)
return signal.get_signal(snr_db = snr_db)
#------------------------------------------------------------------------------------
class Harmonics(generator_base):
'''
Harmonic signal generation.
Atriburts
----------------
> system_parameters = {fs, length},
* fs: float,
is the sampling frequency.
* length: int,
is the signal length.
> signal_parameters = list of
{amp,f0,delta_f,delay,phase0,callback},
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Methods
-----------
* set_system_parameters;
* get_system_parameters;
* set_signal_parameters;
* add_signal_parameters;
* print_signal_parameters;
* get_signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
cb1 = callbacks.harmonic_modulataion(amp_am=0.1,freq_am=0.5,phase_shift=0)
callbacks.probe_modulation(cb1,512)
cb2 = callbacks.pulse_modulataion(200,400)
callbacks.probe_modulation(cb2,512)
signal1 = dsatools.generator.Harmonics()
signal1.get_system_parameters()
signal1.set_signal_parameters(amplitude=[1,0.5],
f0=[1,2,3],
delta_f=[0.4,0.1],
delay=[0],
phase0=[0],
callback=[cb1,cb2],)
sig1 = signal1.get_signal(snr_db = 200)
ut.probe(sig1)
'''
#@override
def __init__(self,
fs = _SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['length']
):
self._signal_parameters_dict_default = _SIGNAL_PARAMETER_DEFAULT.copy()
self._system_parameters_dict_default = _SYSTEM_PARAMETER_DEFAULT.copy()
self.set_system_parameters(fs, length)
self.set_signal_parameters_dict_default()
#------------------------------------------------------------------------------------
#@override
def set_system_parameters(self,
fs=_SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['fs']):
'''
Set system parameters.
Parameters
-------------
* fs: float,
is the sampling frequency.
* length: int,
is the length of signal.
'''
self._system_parameters['fs'] = fs
self._system_parameters['length'] = length
#------------------------------------------------------------------------------------
#@override
def make_signal_parameters_dict(self,
amplitude = _SIGNAL_PARAMETER_DEFAULT['amp'],
f0 = _SIGNAL_PARAMETER_DEFAULT['f0'],
delta_f = _SIGNAL_PARAMETER_DEFAULT['delta_f'],
delay = _SIGNAL_PARAMETER_DEFAULT['delay'],
phase0 = _SIGNAL_PARAMETER_DEFAULT['phase0'],
callback = _SIGNAL_PARAMETER_DEFAULT['callback']):
'''
Make the signal parameters dictionary.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Returns
----------
* signal_parameters_dict: dict,
signal parameters dictionary.
'''
signal_parameters_dict = self.get_signal_parameters_dict_default()
signal_parameters_dict['amp'] = amplitude
signal_parameters_dict['f0'] = f0
signal_parameters_dict['delta_f'] = delta_f
signal_parameters_dict['delay'] = delay
signal_parameters_dict['phase0'] = phase0
signal_parameters_dict['callback'] = callback
return signal_parameters_dict
#------------------------------------------------------------------------------------
#@override
def add_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Add signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
# main array - f0
f0 = _check_list(f0,-1)
len_list = len(f0) #required length for all other arrays
amplitude = _check_list(amplitude, len_list, 'last')
delta_f = _check_list(delta_f, len_list, 0)
delay = _check_list(delay, len_list, 0)
phase0 = _check_list(phase0, len_list, 0)
callback = _check_list(callback, len_list, 'None')
dict2add = []
for (amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_) in \
zip(amplitude,
f0,
delta_f,
delay,
phase0,
callback):
dict2add += [self.make_signal_parameters_dict(amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_)]
self.add_signal_parameters_dicts(dict2add)
#------------------------------------------------------------------------------------
#@override
def set_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Set signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
self.clear_signal_parameters()
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback)
#------------------------------------------------------------------------------------
#@override
def add_random_signal_parameters(self,
n_of_params = 1,
amplitude_range = [0,_SIGNAL_PARAMETER_DEFAULT['amp']],
f0_range = [0,_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f_range = [0,_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay_range = [0,_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0_range = [0,_SIGNAL_PARAMETER_DEFAULT['phase0']]):
'''
Add random uniformly distributed signal_parameters.
Parameters
-------------
* n_of_params: int,
number of paramentrs.
* amplitude_range: [float,float],
ranges of amplitudes.
* f0_range: [float,float],
ranges of the initial frequencies
(carried frequencies).
* delta_f_range: [float,float],
ranges of the delta_f frequencies
(frequency bands).
* delay_range: [float,float],
ranges of the signal delays.
* phase0_range: [float,float],
ranges of the initla phases.
Notes
-------
* Callbacks doesnot applied for this function.
'''
scale_float = _SCALE_TO_FLOAT_
amplitude = _rand_uniform(amplitude_range, n_of_params, scale_float)
f0 = _rand_uniform(f0_range, n_of_params, scale_float)
delta_f = _rand_uniform(delta_f_range, n_of_params, scale_float)
delay = _rand_uniform(delay_range, n_of_params, scale_float)
phase0 = _rand_uniform(phase0_range, n_of_params, scale_float)
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback = n_of_params * [None])
#------------------------------------------------------------------------------------
#@override
def _sim_one_sig(self, sig_param):
'''
Simulate one harmonic (actually frequency modulated signal).
Parameters
-----------
* sig_param: dict,
dictionary of signal parameters, whcih include
(a,f_0,\Delta f,\tau,phi0,callback).
Returns
-----------
* sig: 1d ndarray (complex),
simulated signal.
Notes
---------
* Fs and N are system parameters.
* In harmonic signal \tau and \varphi_0/2/pi
are play the same role.
* If callback is not None: s = callback(s)
(format of callback = f(x)),
if callback is None it does not applied.
* Signal in form:
..math::
s = f(a*exp[j2pi(f_0(t-tau)+Delta_f(t-tau)^2/(N/fs))+j varphi_0]),
where:
* a is the amplitude;
* f_0 is the initial frequency;
* tau is the signal delay;
* Delta_f is the frequency band
(from f_0 to f_0+\Delta f);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* t is the time (up to N/fs);
* varphi_0 is the initial phase
* f modulation callback.
'''
fs = self._system_parameters['fs']
N = self._system_parameters['length']
f0 = sig_param['f0']
incF = sig_param['delta_f']
tau = sig_param['delay']
phi0 = sig_param['phase0']
A = sig_param['amp']
callback = sig_param['callback']
t = np.arange(N)/fs - tau
Tm = N/fs
sig = A*np.exp(2j*np.pi*( f0*t + incF*np.square(t)/2/Tm )+ phi0*1j )
sig = np.asarray(sig,dtype= np.complex)
if (callback in ['None', None]):
return sig
elif type(callback ) is not list:
callback = list([callback])
for callback_i in callback:
sig = callback_i(sig)
return sig | [
"numpy.asarray",
"numpy.arange",
"numpy.square"
] | [((19060, 19093), 'numpy.asarray', 'np.asarray', (['sig'], {'dtype': 'np.complex'}), '(sig, dtype=np.complex)\n', (19070, 19093), True, 'import numpy as np\n'), ((18916, 18928), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (18925, 18928), True, 'import numpy as np\n'), ((19005, 19017), 'numpy.square', 'np.square', (['t'], {}), '(t)\n', (19014, 19017), True, 'import numpy as np\n')] |
from ...main.CV import BPtCV, CVStrategy
import numpy as np
def test_basic():
try:
from ..BPtLGBM import BPtLGBMClassifier, BPtLGBMRegressor
except:
return
X = np.ones((20, 20))
y = np.ones((20))
y[:10] = 0
# Just shouldn't fail
regr = BPtLGBMRegressor()
regr.fit(X, y)
regr.predict(X)
clasif = BPtLGBMClassifier()
clasif.fit(X, y)
clasif.predict(X)
def test_with_bpt_cv():
try:
from ..BPtLGBM import BPtLGBMRegressor
except:
return
cv = BPtCV(splits=.5, n_repeats=1, cv_strategy=CVStrategy(),
splits_vals=None, random_state=1)
X = np.ones((20, 20))
y = np.ones((20))
# Make sure fit works w/ custom CV
regr = BPtLGBMRegressor(eval_split=cv, early_stopping_rounds=10)
regr.fit(X, y, fit_index=np.arange(20))
X_eval, y_eval, eval_set =\
regr._get_eval_set(X, y, fit_index=np.arange(20))
assert X_eval.shape == (10, 20)
assert y_eval.shape == (10, )
assert eval_set[0].shape == (10, 20)
assert eval_set[1].shape == (10, )
# Regressor w/o early stop rounds
regr = BPtLGBMRegressor(eval_split=cv, early_stopping_rounds=None)
X_eval, y_eval, eval_set =\
regr._get_eval_set(X, y, fit_index=np.arange(20))
assert X_eval.shape == (20, 20)
assert eval_set is None
def test_cv_as_size():
try:
from ..BPtLGBM import BPtLGBMRegressor
except:
return
X = np.ones((20, 20))
y = np.ones((20))
# Check works with cv as size
regr = BPtLGBMRegressor(eval_split=.5, early_stopping_rounds=10)
regr.fit(X, y, fit_index=np.arange(20))
X_eval, y_eval, eval_set =\
regr._get_eval_set(X, y, fit_index=np.arange(20))
assert X_eval.shape == (10, 20)
assert y_eval.shape == (10, )
assert eval_set[0].shape == (10, 20)
assert eval_set[1].shape == (10, )
def test_with_cat_features():
try:
from ..BPtLGBM import BPtLGBMRegressor
except:
return
X = np.ones((5, 5))
y = np.ones((5))
base_mapping = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
regr = BPtLGBMRegressor(cat_inds=[0, 1])
regr.fit(X, y)
categorical_feature = regr._get_categorical_feature(base_mapping)
assert categorical_feature == [0, 1]
mapping = {0: [0, 1, 2], 1: [1, 2, None], 2: None, 3: 3, 4: 4, 5: [1, 2]}
categorical_feature = regr._get_categorical_feature(mapping)
assert categorical_feature == [0, 1, 2]
| [
"numpy.ones",
"numpy.arange"
] | [((192, 209), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (199, 209), True, 'import numpy as np\n'), ((218, 229), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (225, 229), True, 'import numpy as np\n'), ((654, 671), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (661, 671), True, 'import numpy as np\n'), ((680, 691), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (687, 691), True, 'import numpy as np\n'), ((1472, 1489), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (1479, 1489), True, 'import numpy as np\n'), ((1498, 1509), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (1505, 1509), True, 'import numpy as np\n'), ((2027, 2042), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (2034, 2042), True, 'import numpy as np\n'), ((2051, 2061), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2058, 2061), True, 'import numpy as np\n'), ((832, 845), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (841, 845), True, 'import numpy as np\n'), ((923, 936), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (932, 936), True, 'import numpy as np\n'), ((1274, 1287), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1283, 1287), True, 'import numpy as np\n'), ((1645, 1658), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1654, 1658), True, 'import numpy as np\n'), ((1736, 1749), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1745, 1749), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import networkx as nx
import matplotlib.pyplot as plt
import time
# modules specific to this project
from context import network as nw
from context import physics
from context import timemarching as tm
from context import plotter
from context import logger
# %% [markdown]
# ### 1. Define the broadcasting channels of the network
# This is done by creating a list of the channel names. The names are arbitrary and can be set by the user, such as 'postive', 'negative' or explicit wavelenghts like '870 nm', '700 nm'. Here I chose the colors 'red' and 'blue'.
# %%
channel_list = ['red', 'blue']
# Automatically generate the object that handles them
channels = {channel_list[v] : v for v in range(len(channel_list))}
# %% [markdown]
# ### 2. Define the layers
# Define the layers of nodes in terms of how they are connected to the channels. Layers and weights are organized in dictionaries. The input and output layers do not need to be changed, but for the hidden layer we need to specify the number of nodes N and assign the correct channels to the input/output of the node.
# %%
# Create layers ordered from 0 to P organized in a dictionary
layers = {}
Nring=5
# An input layer automatically creates on node for each channel that we define
layers[0] = nw.InputLayer(input_channels=channels)
# Forward signal layer
layers[1] = nw.HiddenLayer(N=1, output_channel='blue',excitation_channel='blue',inhibition_channel='red')
# Inhibiting memory layer
layers[2] = nw.HiddenLayer(N=1, output_channel='red' ,excitation_channel='blue',inhibition_channel='red')
layers[3] = nw.OutputLayer(output_channels=channels) # similar to input layer
# %% [markdown]
# ### 3. Define existing connections between layers
# The weights are set in two steps.
# First the connetions between layers are defined. This should be done using the keys defined for each layer above, i.e. 0, 1, 2 ... for input, hidden and output layers, respectively. The `connect_layers` function returns a weight matrix object that we store under a chosen key, for example `'inp->hid'`.
# Second, the specific connections on the node-to-node level are specified using the node index in each layer
# %%
# Define the overall connectivity
weights = {}
# The syntax is connect_layers(from_layer, to_layer, layers, channels)
weights['inp->hd0'] = nw.connect_layers(0, 1, layers, channels)
#weights['inp->hd1'] = nw.connect_layers(0, 2, layers, channels)
#weights['hd0->hd1'] = nw.connect_layers(1, 2, layers, channels)
weights['hd0->out'] = nw.connect_layers(1, 3, layers, channels)
#weights['hd1->out'] = nw.connect_layers(2, 3, layers, channels)
# Backwards connection from the memory
#weights['hd1->hd0'] = nw.connect_layers(2, 1, layers, channels)
# Teacher forcing connection back into the network
weights['out->hd1'] = nw.connect_layers(3, 2, layers, channels)
# Define the specific node-to-node connections in the weight matrices
low_weight = 1.0 # 0.02
# The syntax is connect_nodes(from_node, to_node, channel=label, weight=value in weight matrix)
# Draw a ring network with Nring nodes (Nring defined above)
# Input to first ring layer node
weights['inp->hd0'].connect_nodes(channels['blue'] ,0, channel='blue', weight=1.0) # channels['blue']=1
weights['inp->hd0'].connect_nodes(channels['red'] ,0, channel='red', weight=1.0) # channels['blue']=1
# Connect second hidden layer
#weights['inp->hd1'].connect_nodes(channels['blue'] ,0, channel='blue', weight=1.0) # channels['blue']=1
# Output layer connections back to network
#weights['out->hd1'].connect_nodes(channels['blue'] ,0 , channel='blue', weight=low_weight)
# Add damping connection
#weights['hd1->hd0'].connect_nodes(0 ,0 , channel='red', weight=low_weight)
# Connect to output
weights['hd0->out'].connect_nodes(0, channels['blue'], channel='blue', weight=0.9)
#weights['hd1->out'].connect_nodes(0, channels['red'], channel='red', weight=0.9)
# %% [markdown]
# ### 4. Visualize the network
# The `plotter` module supplies functions to visualize the network structure. The nodes are named by the layer type (Input, Hidden or Output) and the index. To supress the printing of weight values on each connection, please supply `show_edge_labels=False`.
#
# #### Available layouts:
# **multipartite**: Standard neural network appearance. Hard to see recurrent couplings within layers.
# **circular**: Nodes drawn as a circle
# **shell**: Layers drawn as concetric circles
# **kamada_kawai**: Optimization to minimize weighted internode distance in graph
# **spring**: Spring layout which is standard in `networkx`
#
# #### Shell layout
# This is my current favorite. It is configured to plot the input and output nodes on the outside of the hidden layer circle, in a combined outer concentric circle.
# %%
plotter.visualize_network(layers, weights, layout='shell', show_edge_labels=False)
# %% [markdown]
# ### 5. Specify the physics of the nodes
# Before running any simulations, we need to specify the input currents and the physics of the hidden layer nodes. Parameters can either be specified directly or coupled from the `physics` module.
# %%
# Specify two types of devices for the hidden layer
# 1. Propagator (standard parameters)
propagator = physics.Device('device_parameters.txt')
propagator.print_parameter('Cstore')
#propagator.set_parameter('Rstore',1e6)
# 2. Memory (modify the parameters)
memory = physics.Device('device_parameters.txt')
#memory.set_parameter('Rstore',1e6)
#memory.set_parameter('Cstore',2e-15)
# a 3e-15 F capacitor can be build by 800x900 plates 20 nm apart
memory.print_parameter('Cstore')
# %%
# Specify the internal dynamics by supplying the RC constants to the hidden layer (six parameters)
layers[1].assign_device(propagator)
layers[2].assign_device(memory)
# Tweak the threshold voltage
Vthres=0.5
layers[1].Vthres=Vthres
layers[2].Vthres=Vthres
# Calculate the unity_coeff to scale the weights accordingly
unity_coeff, Imax = propagator.inverse_gain_coefficient(propagator.eta_ABC, Vthres)
print(f'Unity coupling coefficient calculated as unity_coeff={unity_coeff:.4f}')
print(f'Imax is found to be {Imax} nA')
# %% [markdown]
# Produce Bode plots to determine what our frequency cutoff will be
# %%
# Setup the gain function
eigvals = propagator.setup_gain(propagator.gammas)
# The eigenvalues
print('System eigenvalues:')
k=0
for l in eigvals :
print(f'eig[{k}]={l:.2f} ns^-1 ')
k+=1
# Regarding the eigenvalues, eig[0]=-25.81 ns^-1, eig[1]=-5.00 ns^-1, eig[2]=-0.19 ns^-1
# eig[1] regards the charge collecting subsystem, this is their RC constant
# eig[0] regards the exchange between the collectors and the storage, I believe. For A33=20 it's zero
# eig[2] regards the time scale of the storage unit, the longest timescale in the system.
# PUT THIS IN THE PLOTTER MODULE
import numpy as np
# Visualize the response function
Ns = 100
# Units are already in GHz so this creates a space from 1 MHz to 10 GHz
s_exp = np.linspace(-3,1,Ns)
s = 1j*10**s_exp
# Sample the gain function
G11, _ = propagator.gain(s,eigvals,propagator.gammas)
mag_G11 = np.absolute(G11) / np.absolute(G11[0])
arg_G11 = np.angle(G11)
mag_G11_dB = 20*np.log10(mag_G11)
# %%
# Produce Bode plots of the results
# Define parameters
my_dpi = 300
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# Figure sizes
inchmm = 25.4
nature_single = 89.0 / 25.4
nature_double = 183.0 / 25.4
nature_full = 247.0 / 25.4
# Plot options
font = {'family' : 'sans',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(nature_double,nature_single),sharex=True)
f_min = abs(s[0])
plot_f_max = 10 # GHz
ax1.plot(abs(s),mag_G11_dB)
ax1.plot([f_min,plot_f_max],[-3,-3],'k--')
ax1.grid('True')
ax1.set_xscale('log')
ax1.set_title('Bode plots for dynamic node')
#ax1.set_xlabel('Frequency (GHz)')
ax1.set_ylabel('|G11|/|G11[0]| (dB)')
ax1.set_ylabel('|H| (dB)')
ax1.set_xlim(f_min,plot_f_max)
ax1.set_ylim(-20,2)
ax2.plot(abs(s),arg_G11*180/np.pi)
ax2.grid('True')
#ax2.set_xscale('log')
ax2.set_xlabel('Frequency (GHz)')
ax2.set_ylabel('Phase (radians)')
ax2.set_ylim(-180,10)
figname = 'sine_test_bode'
plt.tight_layout()
#plt.legend()
plt.savefig(figname+'.eps',bbox_inhces='tight')
plt.savefig(figname+'.png',dpi=my_dpi)
plt.savefig(figname+'.svg')
plt.show()
# %% [markdown]
# Use the physics of the device to configure a sine wave of changing frequency to use as an input signal
# %%
# Generate a time series using an increasing frequency series
def freqeuncy_step_generator(tend,fmin,fmax,factor=2,res=10) :
# determine the size of the sequence
dt = fmax**-1/res
N = int(tend/dt)
# chop it up into parts
Nint = np.log(fmax/fmin)/np.log(factor)+1
Nstep = int(N/Nint)
changepoints = np.insert(np.arange(0,N,step=Nstep),int(Nint+1),N)
# These are our frequencies
freq = fmin*factor**np.arange(int(Nint)+1)
# From here on we use the pyESN example code, with some modifications
const_intervals = list(zip(changepoints,np.roll(changepoints,-1)))[:-1]
frequency_control = np.zeros((N,1))
for k, (t0,t1) in enumerate(const_intervals): # enumerate here
frequency_control[t0:t1] = freq[k]
# run time update through a sine, while changing the freqeuncy
frequency_output = np.zeros((N,1))
z = 0
for i in range(N):
z = z + frequency_control[i]*dt
frequency_output[i] = (np.sin(z) + 1)/2
tseries = np.arange(0,tend,step=dt)
return np.hstack([np.ones((N,1)),frequency_control]),frequency_output,tseries
T = 1000 # ns
fmin = 0.05 # GHz
fmax = 1.6 # GHz
frequency_input, frequency_output, tseries = freqeuncy_step_generator(T,fmin,fmax)
#print(f'Length of time: {len(time)}, Length of frequency output: {len(frequency_output)}')
# Now we use interpolation to get a function handle from these data
from scipy.interpolate import interp1d
increase_freq = interp1d(tseries,frequency_output,axis=0)
# %%
# Specify an exciting current based on the frequency step function
def step_freq (t,I0) :
return I0*increase_freq(t)
# Try to modulate the nodes with red input
t_red = [(8.0,9.0), (12.0,13.0)] # at 6 ns, 11 ns, and 16 ns
# Constant inhibition to stabilize circuit
I_red = 0.0 # nA
# Use the square pulse function and specify which node in the input layer gets which pulse
layers[0].set_input_func(channel='blue',func_handle=step_freq, func_args=(Imax,))
# Use the costant function to specify the inhibition from I0 to H0
#layers[0].set_input_func(channel='red', func_handle=physics.constant, func_args=I_red)
layers[0].set_input_func(channel='red', func_handle=physics.square_pulse, func_args=(t_red, I_red))
# %% [markdown]
# ### 6. Evolve in time
# %%
# Start time t, end time T
t = 0.0
T = 1000.0 # ns
# To sample result over a fixed time-step, use savetime
savestep = 1
savetime = savestep
# These parameters are used to determine an appropriate time step each update
dtmax = 0.1 # ns
dVmax = 0.005 # V
nw.reset(layers)
# Create a log over the dynamic data
time_log = logger.Logger(layers,channels) # might need some flags
start = time.time()
while t < T:
# evolve by calculating derivatives, provides dt
dt = tm.evolve(t, layers, dVmax, dtmax )
# update with explicit Euler using dt
# supplying the unity_coeff here to scale the weights
tm.update(dt, t, layers, weights, unity_coeff)
t += dt
# Log the progress
if t > savetime :
# Put log update here to have (more or less) fixed sample rate
time_log.add_tstep(t, layers, unity_coeff)
# Now this is only to check progress
print(f'Time at t={t} ns')
savetime += savestep
end = time.time()
print('Time used:',end-start)
# This is a large pandas data frame of all system variables
result = time_log.get_timelog()
# %% [markdown]
# ### 7. Visualize results
# Plot results specific to certain nodes
# %%
#nodes = ['H0','H1','H2','H3','H4']
nodes = ['H0']
plotter.plot_nodes(result, nodes)
# %% [markdown]
# For this system it's quite elegant to use the `plot_chainlist` function, taking as arguments a graph object, the source node (I1 for blue) and a target node (O1 for blue)
# %%
# Variable G contains a graph object descibing the network
G = plotter.retrieve_G(layers, weights)
plotter.plot_chainlist(result,G,'I1','O1')
# %% [markdown]
# Plot specific attributes
# %%
attr_list = ['Vgate','Vexc']
plotter.plot_attributes(result, attr_list)
# %% [markdown]
# We can be totally specific if we want. First we list the available columns to choose from
# %%
print(result.columns)
# %%
plotter.visualize_dynamic_result(result, ['I0-Iout-red','I1-Iout-blue'])
# %%
# The unit of H0-Iexc is V/ns, which is different compared to the other quantaties.
# Consider removing or restructuring.
plotter.visualize_dynamic_result(result, ['H0-Iexc','H0-Iinh'])
# %%
plotter.visualize_transistor(propagator.transistorIV_example())
# %%
plotter.visualize_LED_efficiency(propagator.eta_example(propagator.eta_ABC))
# %%
| [
"numpy.log10",
"context.plotter.retrieve_G",
"context.network.OutputLayer",
"numpy.log",
"context.network.reset",
"scipy.interpolate.interp1d",
"context.plotter.plot_attributes",
"numpy.sin",
"numpy.arange",
"context.plotter.visualize_network",
"context.network.HiddenLayer",
"context.network.I... | [((1626, 1664), 'context.network.InputLayer', 'nw.InputLayer', ([], {'input_channels': 'channels'}), '(input_channels=channels)\n', (1639, 1664), True, 'from context import network as nw\n'), ((1700, 1799), 'context.network.HiddenLayer', 'nw.HiddenLayer', ([], {'N': '(1)', 'output_channel': '"""blue"""', 'excitation_channel': '"""blue"""', 'inhibition_channel': '"""red"""'}), "(N=1, output_channel='blue', excitation_channel='blue',\n inhibition_channel='red')\n", (1714, 1799), True, 'from context import network as nw\n'), ((1832, 1930), 'context.network.HiddenLayer', 'nw.HiddenLayer', ([], {'N': '(1)', 'output_channel': '"""red"""', 'excitation_channel': '"""blue"""', 'inhibition_channel': '"""red"""'}), "(N=1, output_channel='red', excitation_channel='blue',\n inhibition_channel='red')\n", (1846, 1930), True, 'from context import network as nw\n'), ((1938, 1978), 'context.network.OutputLayer', 'nw.OutputLayer', ([], {'output_channels': 'channels'}), '(output_channels=channels)\n', (1952, 1978), True, 'from context import network as nw\n'), ((2671, 2712), 'context.network.connect_layers', 'nw.connect_layers', (['(0)', '(1)', 'layers', 'channels'], {}), '(0, 1, layers, channels)\n', (2688, 2712), True, 'from context import network as nw\n'), ((2865, 2906), 'context.network.connect_layers', 'nw.connect_layers', (['(1)', '(3)', 'layers', 'channels'], {}), '(1, 3, layers, channels)\n', (2882, 2906), True, 'from context import network as nw\n'), ((3149, 3190), 'context.network.connect_layers', 'nw.connect_layers', (['(3)', '(2)', 'layers', 'channels'], {}), '(3, 2, layers, channels)\n', (3166, 3190), True, 'from context import network as nw\n'), ((5115, 5202), 'context.plotter.visualize_network', 'plotter.visualize_network', (['layers', 'weights'], {'layout': '"""shell"""', 'show_edge_labels': '(False)'}), "(layers, weights, layout='shell', show_edge_labels\n =False)\n", (5140, 5202), False, 'from context import plotter\n'), ((5564, 5603), 'context.physics.Device', 'physics.Device', (['"""device_parameters.txt"""'], {}), "('device_parameters.txt')\n", (5578, 5603), False, 'from context import physics\n'), ((5726, 5765), 'context.physics.Device', 'physics.Device', (['"""device_parameters.txt"""'], {}), "('device_parameters.txt')\n", (5740, 5765), False, 'from context import physics\n'), ((7291, 7313), 'numpy.linspace', 'np.linspace', (['(-3)', '(1)', 'Ns'], {}), '(-3, 1, Ns)\n', (7302, 7313), True, 'import numpy as np\n'), ((7471, 7484), 'numpy.angle', 'np.angle', (['G11'], {}), '(G11)\n', (7479, 7484), True, 'import numpy as np\n'), ((7887, 7909), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (7893, 7909), True, 'import matplotlib.pyplot as plt\n'), ((7929, 8000), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(nature_double, nature_single)', 'sharex': '(True)'}), '(2, 1, figsize=(nature_double, nature_single), sharex=True)\n', (7941, 8000), True, 'import matplotlib.pyplot as plt\n'), ((8540, 8558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8556, 8558), True, 'import matplotlib.pyplot as plt\n'), ((8573, 8623), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figname + '.eps')"], {'bbox_inhces': '"""tight"""'}), "(figname + '.eps', bbox_inhces='tight')\n", (8584, 8623), True, 'import matplotlib.pyplot as plt\n'), ((8621, 8662), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figname + '.png')"], {'dpi': 'my_dpi'}), "(figname + '.png', dpi=my_dpi)\n", (8632, 8662), True, 'import matplotlib.pyplot as plt\n'), ((8660, 8689), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figname + '.svg')"], {}), "(figname + '.svg')\n", (8671, 8689), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8696, 8698), True, 'import matplotlib.pyplot as plt\n'), ((10304, 10347), 'scipy.interpolate.interp1d', 'interp1d', (['tseries', 'frequency_output'], {'axis': '(0)'}), '(tseries, frequency_output, axis=0)\n', (10312, 10347), False, 'from scipy.interpolate import interp1d\n'), ((11370, 11386), 'context.network.reset', 'nw.reset', (['layers'], {}), '(layers)\n', (11378, 11386), True, 'from context import network as nw\n'), ((11435, 11466), 'context.logger.Logger', 'logger.Logger', (['layers', 'channels'], {}), '(layers, channels)\n', (11448, 11466), False, 'from context import logger\n'), ((11499, 11510), 'time.time', 'time.time', ([], {}), '()\n', (11508, 11510), False, 'import time\n'), ((12076, 12087), 'time.time', 'time.time', ([], {}), '()\n', (12085, 12087), False, 'import time\n'), ((12354, 12387), 'context.plotter.plot_nodes', 'plotter.plot_nodes', (['result', 'nodes'], {}), '(result, nodes)\n', (12372, 12387), False, 'from context import plotter\n'), ((12647, 12682), 'context.plotter.retrieve_G', 'plotter.retrieve_G', (['layers', 'weights'], {}), '(layers, weights)\n', (12665, 12682), False, 'from context import plotter\n'), ((12683, 12728), 'context.plotter.plot_chainlist', 'plotter.plot_chainlist', (['result', 'G', '"""I1"""', '"""O1"""'], {}), "(result, G, 'I1', 'O1')\n", (12705, 12728), False, 'from context import plotter\n'), ((12805, 12847), 'context.plotter.plot_attributes', 'plotter.plot_attributes', (['result', 'attr_list'], {}), '(result, attr_list)\n', (12828, 12847), False, 'from context import plotter\n'), ((12991, 13064), 'context.plotter.visualize_dynamic_result', 'plotter.visualize_dynamic_result', (['result', "['I0-Iout-red', 'I1-Iout-blue']"], {}), "(result, ['I0-Iout-red', 'I1-Iout-blue'])\n", (13023, 13064), False, 'from context import plotter\n'), ((13193, 13257), 'context.plotter.visualize_dynamic_result', 'plotter.visualize_dynamic_result', (['result', "['H0-Iexc', 'H0-Iinh']"], {}), "(result, ['H0-Iexc', 'H0-Iinh'])\n", (13225, 13257), False, 'from context import plotter\n'), ((7422, 7438), 'numpy.absolute', 'np.absolute', (['G11'], {}), '(G11)\n', (7433, 7438), True, 'import numpy as np\n'), ((7441, 7460), 'numpy.absolute', 'np.absolute', (['G11[0]'], {}), '(G11[0])\n', (7452, 7460), True, 'import numpy as np\n'), ((7502, 7519), 'numpy.log10', 'np.log10', (['mag_G11'], {}), '(mag_G11)\n', (7510, 7519), True, 'import numpy as np\n'), ((9458, 9474), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (9466, 9474), True, 'import numpy as np\n'), ((9679, 9695), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (9687, 9695), True, 'import numpy as np\n'), ((9839, 9866), 'numpy.arange', 'np.arange', (['(0)', 'tend'], {'step': 'dt'}), '(0, tend, step=dt)\n', (9848, 9866), True, 'import numpy as np\n'), ((11587, 11621), 'context.timemarching.evolve', 'tm.evolve', (['t', 'layers', 'dVmax', 'dtmax'], {}), '(t, layers, dVmax, dtmax)\n', (11596, 11621), True, 'from context import timemarching as tm\n'), ((11728, 11774), 'context.timemarching.update', 'tm.update', (['dt', 't', 'layers', 'weights', 'unity_coeff'], {}), '(dt, t, layers, weights, unity_coeff)\n', (11737, 11774), True, 'from context import timemarching as tm\n'), ((9164, 9191), 'numpy.arange', 'np.arange', (['(0)', 'N'], {'step': 'Nstep'}), '(0, N, step=Nstep)\n', (9173, 9191), True, 'import numpy as np\n'), ((9076, 9095), 'numpy.log', 'np.log', (['(fmax / fmin)'], {}), '(fmax / fmin)\n', (9082, 9095), True, 'import numpy as np\n'), ((9094, 9108), 'numpy.log', 'np.log', (['factor'], {}), '(factor)\n', (9100, 9108), True, 'import numpy as np\n'), ((9402, 9427), 'numpy.roll', 'np.roll', (['changepoints', '(-1)'], {}), '(changepoints, -1)\n', (9409, 9427), True, 'import numpy as np\n'), ((9799, 9808), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (9805, 9808), True, 'import numpy as np\n'), ((9887, 9902), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (9894, 9902), True, 'import numpy as np\n')] |
"""
File: figureS01.py
Purpose: Generates figure S01.
Figure S01 analyzes heterogeneous (2 state), uncensored,
single lineages (no more than one lineage per population).
"""
import numpy as np
from .figureCommon import (
getSetup,
subplotLabel,
commonAnalyze,
figureMaker,
pi,
T,
E,
max_desired_num_cells,
num_data_points,
min_desired_num_cells,
)
from ..LineageTree import LineageTree
# Creating a list of populations to analyze over
cells = np.linspace(min_desired_num_cells, max_desired_num_cells, num_data_points)
list_of_fpi = [pi] * cells.size
# Generate populations
list_of_populations = [[LineageTree.init_from_parameters(pi, T, E, cell_num)] for cell_num in cells]
def makeFigure():
"""
Makes figure 2.
"""
# Get list of axis objects
ax, f = getSetup((10, 10), (3, 3))
figureMaker(ax, *commonAnalyze(list_of_populations, 2, list_of_fpi=list_of_fpi))
subplotLabel(ax)
return f
| [
"numpy.linspace"
] | [((485, 559), 'numpy.linspace', 'np.linspace', (['min_desired_num_cells', 'max_desired_num_cells', 'num_data_points'], {}), '(min_desired_num_cells, max_desired_num_cells, num_data_points)\n', (496, 559), True, 'import numpy as np\n')] |
import numpy as np
import torch
import logging
import time
import os
import ujson as json
from config import config
from scripts.config_args import parse_args
from common.dataset.lc_quad import LC_QuAD
from common.dataset.qald_7_ml import Qald_7_ml
from common.model.runner import Runner
np.random.seed(6)
torch.manual_seed(6)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
start = time.time()
args = parse_args()
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info(args)
dataset = None
if args.dataset == 'lcquad':
dataset = LC_QuAD(config['lc_quad']['train'], config['lc_quad']['test'], config['lc_quad']['vocab'],
False, args.remove_stop_words)
elif args.dataset == 'qald_7_ml':
dataset = Qald_7_ml(config['qald_7_ml']['train'], config['qald_7_ml']['test'], config['qald_7_ml']['vocab'],
False, False)
if args.mode == 'test':
eval_results = {}
for k in range(0, 11):
file_name = '{}-{}'.format(args.dataset, k)
args.k = k
try:
runner = Runner(dataset, args)
runner.load_checkpoint(os.path.join(config['chk_path'], args.checkpoint))
print(args)
results = runner.test(dataset, args, use_elastic=True) # use_EARL=True)
eval_results[file_name] = results
finish = time.time()
print('total runtime:', finish - start)
except Exception as e:
print(e)
print(file_name)
with open('eval-mrr-{}.json'.format(args.dataset), 'wt') as json_file:
json.dump(eval_results, json_file)
| [
"logging.getLogger",
"torch.manual_seed",
"common.model.runner.Runner",
"scripts.config_args.parse_args",
"logging.StreamHandler",
"common.dataset.lc_quad.LC_QuAD",
"common.dataset.qald_7_ml.Qald_7_ml",
"logging.Formatter",
"ujson.dump",
"os.path.join",
"numpy.random.seed",
"time.time"
] | [((290, 307), 'numpy.random.seed', 'np.random.seed', (['(6)'], {}), '(6)\n', (304, 307), True, 'import numpy as np\n'), ((308, 328), 'torch.manual_seed', 'torch.manual_seed', (['(6)'], {}), '(6)\n', (325, 328), False, 'import torch\n'), ((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((434, 446), 'scripts.config_args.parse_args', 'parse_args', ([], {}), '()\n', (444, 446), False, 'from scripts.config_args import parse_args\n'), ((460, 485), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (477, 485), False, 'import logging\n'), ((536, 591), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s:%(name)s:%(message)s"""'], {}), "('%(levelname)s:%(name)s:%(message)s')\n", (553, 591), False, 'import logging\n'), ((601, 624), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (622, 624), False, 'import logging\n'), ((775, 901), 'common.dataset.lc_quad.LC_QuAD', 'LC_QuAD', (["config['lc_quad']['train']", "config['lc_quad']['test']", "config['lc_quad']['vocab']", '(False)', 'args.remove_stop_words'], {}), "(config['lc_quad']['train'], config['lc_quad']['test'], config[\n 'lc_quad']['vocab'], False, args.remove_stop_words)\n", (782, 901), False, 'from common.dataset.lc_quad import LC_QuAD\n'), ((1876, 1910), 'ujson.dump', 'json.dump', (['eval_results', 'json_file'], {}), '(eval_results, json_file)\n', (1885, 1910), True, 'import ujson as json\n'), ((979, 1096), 'common.dataset.qald_7_ml.Qald_7_ml', 'Qald_7_ml', (["config['qald_7_ml']['train']", "config['qald_7_ml']['test']", "config['qald_7_ml']['vocab']", '(False)', '(False)'], {}), "(config['qald_7_ml']['train'], config['qald_7_ml']['test'], config\n ['qald_7_ml']['vocab'], False, False)\n", (988, 1096), False, 'from common.dataset.qald_7_ml import Qald_7_ml\n'), ((1327, 1348), 'common.model.runner.Runner', 'Runner', (['dataset', 'args'], {}), '(dataset, args)\n', (1333, 1348), False, 'from common.model.runner import Runner\n'), ((1631, 1642), 'time.time', 'time.time', ([], {}), '()\n', (1640, 1642), False, 'import time\n'), ((1388, 1437), 'os.path.join', 'os.path.join', (["config['chk_path']", 'args.checkpoint'], {}), "(config['chk_path'], args.checkpoint)\n", (1400, 1437), False, 'import os\n')] |
#!/usr/bin/env python3
# JM: 05 Sep 2018
# plot the eke variable in log scale
# (designed for the GEOMETRIC depth-integrated eddy energy but ok for NEMO
# generated too probably)
# styling is default and this script is intended to be used for quick and dirty
# visualisations
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
from numpy import maximum, sum, nan, linspace, log10, arange, newaxis
from orca_plotting_commands import *
from midpointnorm import *
import netCDF4, sys
# need iris if wanting to use cartopy_command
import iris
import iris.analysis.cartography
# for editing
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# style settings
plt.rcParams["font.family"] = "DejaVu Serif"
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["mathtext.rm"] = "serif"
plt.rcParams["image.cmap"] = "RdBu_r" # "*_r" is reverse of standard colour
#--------------------------------------------------------
# define the argument parser
import argparse
parser = argparse.ArgumentParser(description = """Plot the eke variable in log scale with Plate Carree projection
(needs Iris and Cartopy package)""")
# fixed arguments
parser.add_argument("data_dir", type = str,
help = "specify data directory")
parser.add_argument("fileT", type = str,
help = "specify data filename")
# optional arguments
parser.add_argument("--t_var", type = str,
help = "specify T variable name (default = eke)", default = "eke")
parser.add_argument("--lprint",
help = "print out the variables available in fileT", action = "store_true")
parser.add_argument("--kt", type = int,
help = "plot a specified time slice (default = last time entry in variable)")
parser.add_argument("--level", nargs = 2, type = float,
help = "specify the limits of levels to plot if any (default: 1000 to 100000)")
parser.add_argument("--cshift", type = float,
help = "specify a shift of the data to emphasis high/low values (set to low value to emphasise high colours)")
parser.add_argument("--file_out", type = str,
help = "specify output name (default = fileT + _eke.png)")
# collect arguments
args = parser.parse_args()
if args.level is None:
args.level = []
if args.lzavg:
args.level.append(1e-4)
args.level.append(1e-1)
else:
args.level.append(1e3)
args.level.append(1e6)
#--------------------------------------------------------
# load files if necessary
data_netcdf4 = netCDF4.Dataset(args.data_dir + args.fileT)
if args.lprint:
print(data_netcdf4)
data_netcdf4.close()
sys.exit("finished displaying data in file, exiting...")
if args.kt is None:
kt = data_netcdf4.dimensions["time_counter"].size - 1 # default load the last time level
eE_raw = data_netcdf4.variables[args.t_var][kt, :, :]
latT = data_netcdf4.variables["nav_lat"][:, :]
lonT = data_netcdf4.variables["nav_lon"][:, :]
depthT = data_netcdf4.variables["deptht"][:]
e3T = data_netcdf4.variables["e3t"][kt, :, :, :]
data_netcdf4.close()
data_netcdf4 = netCDF4.Dataset(args.data_dir + "mesh_mask.nc")
tmask = data_netcdf4.variables["tmask"][0, :, : ,:]
data_netcdf4.close()
#--------------------------------------------------------
# Main plotting commands
# process and projection step
depth = sum(depthT[:, newaxis, newaxis] * tmask, axis = 0)
#eE_raw /= maximum(depth, 1e-16)
rho0 = 1024.0
eE_raw *= rho0
iris.FUTURE.netcdf_promote = True
pcarree = ccrs.PlateCarree()
target_proj = pcarree
lat = iris.coords.AuxCoord(latT, standard_name = "latitude", units = "degrees")
lon = iris.coords.AuxCoord(lonT, standard_name = "longitude", units = "degrees")
data_cube = iris.cube.Cube(eE_raw,
long_name = "geom_eke_depth_avg",
units = "m2 s-2",
aux_coords_and_dims = [(lat, (0, 1)), (lon, (0,1))])
data_proj, extent = iris.analysis.cartography.project(data_cube[:, :], pcarree, nx = 600, ny = 300)
x = data_proj.coord('projection_x_coordinate').points
y = data_proj.coord('projection_y_coordinate').points
plot_data = data_proj.data
# plot
plot_data[(plot_data == 0)] = nan
fig = plt.figure(figsize=(12, 7))
misc_format = {"levels" : linspace(log10(args.level[0]), log10(args.level[1]), 21),
"extend" : "both",
"cmap" : "Spectral_r"}
if args.cshift is not None:
misc_format["norm"] = MidPointNorm(midpoint = log10(args.cshift))
ax, mesh = cartopy_contourf(x, y, log10(plot_data), proj = target_proj, **misc_format)
ax.set_extent([-180, 180, -75, 80], crs = ccrs.PlateCarree())
# set axes, title and add gridlines
gl = ax.gridlines(crs=ccrs.PlateCarree(),
draw_labels = True, linewidth = 1, linestyle = '--')
gl.xlabels_top = False
gl.ylabels_right = False
gl.ylocator = mpl.ticker.FixedLocator([-90, -60, -30, 0, 30, 60, 90])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
ax.text(-0.05, 0.5, r'Lat $\left( {}^\circ \right)$',
va='bottom', ha='center',
rotation=90, rotation_mode='anchor',
transform=ax.transAxes)
ax.text(0.5, -0.1, r'Lon $\left( {}^\circ \right)$',
va='bottom', ha='center',
rotation='horizontal', rotation_mode='anchor',
transform=ax.transAxes)
# add colorbar
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="1%", pad=0.2, axes_class=plt.Axes)
fig.add_axes(ax_cb)
cb = plt.colorbar(mesh, cax=ax_cb, orientation = "vertical")
cb.ax.set_title(r"$\mathrm{J}\ \mathrm{m}^{-2}$")
cb.set_ticks(arange(int(log10(args.level[0])), int(log10(args.level[1])) + 1, 1))
cb.set_ticklabels([r"$10^{%s}$" % i for i in range(int(log10(args.level[0])), int(log10(args.level[1])) + 1, 1)])
#--------------------------------------------------------
# saving commands
if args.file_out is None:
args.file_out = args.fileT.replace(".nc", "") + "_eke.png"
fig.savefig(args.file_out, dpi = 300, bbox_inches = "tight")
plt.close(fig)
print("generated %s , exiting..." % args.file_out)
| [
"numpy.log10",
"matplotlib.ticker.FixedLocator",
"argparse.ArgumentParser",
"matplotlib.use",
"netCDF4.Dataset",
"iris.coords.AuxCoord",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"numpy.sum",
"matplotlib.pyplot.figure",
"sys.exit",
"iris.analysis.cartography.project",
"iris.cu... | [((306, 320), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (313, 320), True, 'import matplotlib as mpl\n'), ((1021, 1220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot the eke variable in log scale with Plate Carree projection\n (needs Iris and Cartopy package)"""'}), '(description=\n """Plot the eke variable in log scale with Plate Carree projection\n (needs Iris and Cartopy package)"""\n )\n', (1044, 1220), False, 'import argparse\n'), ((2673, 2716), 'netCDF4.Dataset', 'netCDF4.Dataset', (['(args.data_dir + args.fileT)'], {}), '(args.data_dir + args.fileT)\n', (2688, 2716), False, 'import netCDF4, sys\n'), ((3227, 3274), 'netCDF4.Dataset', 'netCDF4.Dataset', (["(args.data_dir + 'mesh_mask.nc')"], {}), "(args.data_dir + 'mesh_mask.nc')\n", (3242, 3274), False, 'import netCDF4, sys\n'), ((3472, 3520), 'numpy.sum', 'sum', (['(depthT[:, newaxis, newaxis] * tmask)'], {'axis': '(0)'}), '(depthT[:, newaxis, newaxis] * tmask, axis=0)\n', (3475, 3520), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((3680, 3749), 'iris.coords.AuxCoord', 'iris.coords.AuxCoord', (['latT'], {'standard_name': '"""latitude"""', 'units': '"""degrees"""'}), "(latT, standard_name='latitude', units='degrees')\n", (3700, 3749), False, 'import iris\n'), ((3760, 3830), 'iris.coords.AuxCoord', 'iris.coords.AuxCoord', (['lonT'], {'standard_name': '"""longitude"""', 'units': '"""degrees"""'}), "(lonT, standard_name='longitude', units='degrees')\n", (3780, 3830), False, 'import iris\n'), ((3847, 3973), 'iris.cube.Cube', 'iris.cube.Cube', (['eE_raw'], {'long_name': '"""geom_eke_depth_avg"""', 'units': '"""m2 s-2"""', 'aux_coords_and_dims': '[(lat, (0, 1)), (lon, (0, 1))]'}), "(eE_raw, long_name='geom_eke_depth_avg', units='m2 s-2',\n aux_coords_and_dims=[(lat, (0, 1)), (lon, (0, 1))])\n", (3861, 3973), False, 'import iris\n'), ((4078, 4153), 'iris.analysis.cartography.project', 'iris.analysis.cartography.project', (['data_cube[:, :]', 'pcarree'], {'nx': '(600)', 'ny': '(300)'}), '(data_cube[:, :], pcarree, nx=600, ny=300)\n', (4111, 4153), False, 'import iris\n'), ((4343, 4370), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (4353, 4370), True, 'import matplotlib.pyplot as plt\n'), ((5002, 5057), 'matplotlib.ticker.FixedLocator', 'mpl.ticker.FixedLocator', (['[-90, -60, -30, 0, 30, 60, 90]'], {}), '([-90, -60, -30, 0, 30, 60, 90])\n', (5025, 5057), True, 'import matplotlib as mpl\n'), ((5618, 5671), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mesh'], {'cax': 'ax_cb', 'orientation': '"""vertical"""'}), "(mesh, cax=ax_cb, orientation='vertical')\n", (5630, 5671), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6166), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6161, 6166), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2836), 'sys.exit', 'sys.exit', (['"""finished displaying data in file, exiting..."""'], {}), "('finished displaying data in file, exiting...')\n", (2788, 2836), False, 'import netCDF4, sys\n'), ((4677, 4693), 'numpy.log10', 'log10', (['plot_data'], {}), '(plot_data)\n', (4682, 4693), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((4407, 4427), 'numpy.log10', 'log10', (['args.level[0]'], {}), '(args.level[0])\n', (4412, 4427), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((4429, 4449), 'numpy.log10', 'log10', (['args.level[1]'], {}), '(args.level[1])\n', (4434, 4449), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((4622, 4640), 'numpy.log10', 'log10', (['args.cshift'], {}), '(args.cshift)\n', (4627, 4640), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((5751, 5771), 'numpy.log10', 'log10', (['args.level[0]'], {}), '(args.level[0])\n', (5756, 5771), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((5778, 5798), 'numpy.log10', 'log10', (['args.level[1]'], {}), '(args.level[1])\n', (5783, 5798), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((5864, 5884), 'numpy.log10', 'log10', (['args.level[0]'], {}), '(args.level[0])\n', (5869, 5884), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n'), ((5891, 5911), 'numpy.log10', 'log10', (['args.level[1]'], {}), '(args.level[1])\n', (5896, 5911), False, 'from numpy import maximum, sum, nan, linspace, log10, arange, newaxis\n')] |
from pandas_datareader import DataReader
import numpy as np
import pandas as pd
import datetime
# Grab time series data for 5-year history for the stock (here AAPL)
# and for S&P-500 Index
start_date = datetime.datetime.now() - datetime.timedelta(days=1826)
end_date = datetime.date.today()
stock = 'MSFT'
index = '^GSPC'
# Grab time series data for 5-year history for the stock
# and for S&P-500 Index
df = DataReader(stock,'yahoo', start_date, end_date)
dfb = DataReader(index,'yahoo', start_date, end_date)
# create a time-series of monthly data points
rts = df.resample('M').last()
rbts = dfb.resample('M').last()
dfsm = pd.DataFrame({'s_adjclose' : rts['Adj Close'],
'b_adjclose' : rbts['Adj Close']},
index=rts.index)
# compute returns
dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
dfsm[['s_adjclose','b_adjclose']].shift(1) -1
dfsm = dfsm.dropna()
covmat = np.cov(dfsm["s_returns"],dfsm["b_returns"])
# calculate measures now
beta = covmat[0,1]/covmat[1,1]
alpha= np.mean(dfsm["s_returns"])-beta*np.mean(dfsm["b_returns"])
# r_squared = 1. - SS_res/SS_tot
ypred = alpha + beta * dfsm["b_returns"]
SS_res = np.sum(np.power(ypred-dfsm["s_returns"],2))
SS_tot = covmat[0,0]*(len(dfsm)-1) # SS_tot is sample_variance*(n-1)
r_squared = 1. - SS_res/SS_tot
# 5- year volatiity and 1-year momentum
volatility = np.sqrt(covmat[0,0])
momentum = np.prod(1+dfsm["s_returns"].tail(12).values) -1
# annualize the numbers
prd = 12. # used monthly returns; 12 periods to annualize
alpha = alpha*prd
volatility = volatility*np.sqrt(prd)
print (f'beta = {beta}')
print (f'alpha = {alpha}')
print (f'r_squared = {r_squared}')
print (f'volatility = {volatility}')
print (f'momentum = {momentum}')
volume = df.Volume
volume = volume.tail(60).mean()
print (volume)
| [
"numpy.mean",
"numpy.sqrt",
"numpy.power",
"pandas_datareader.DataReader",
"datetime.timedelta",
"datetime.datetime.now",
"numpy.cov",
"pandas.DataFrame",
"datetime.date.today"
] | [((278, 299), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (297, 299), False, 'import datetime\n'), ((427, 475), 'pandas_datareader.DataReader', 'DataReader', (['stock', '"""yahoo"""', 'start_date', 'end_date'], {}), "(stock, 'yahoo', start_date, end_date)\n", (437, 475), False, 'from pandas_datareader import DataReader\n'), ((482, 530), 'pandas_datareader.DataReader', 'DataReader', (['index', '"""yahoo"""', 'start_date', 'end_date'], {}), "(index, 'yahoo', start_date, end_date)\n", (492, 530), False, 'from pandas_datareader import DataReader\n'), ((651, 752), 'pandas.DataFrame', 'pd.DataFrame', (["{'s_adjclose': rts['Adj Close'], 'b_adjclose': rbts['Adj Close']}"], {'index': 'rts.index'}), "({'s_adjclose': rts['Adj Close'], 'b_adjclose': rbts[\n 'Adj Close']}, index=rts.index)\n", (663, 752), True, 'import pandas as pd\n'), ((975, 1019), 'numpy.cov', 'np.cov', (["dfsm['s_returns']", "dfsm['b_returns']"], {}), "(dfsm['s_returns'], dfsm['b_returns'])\n", (981, 1019), True, 'import numpy as np\n'), ((1439, 1460), 'numpy.sqrt', 'np.sqrt', (['covmat[0, 0]'], {}), '(covmat[0, 0])\n', (1446, 1460), True, 'import numpy as np\n'), ((210, 233), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (231, 233), False, 'import datetime\n'), ((236, 265), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1826)'}), '(days=1826)\n', (254, 265), False, 'import datetime\n'), ((1087, 1113), 'numpy.mean', 'np.mean', (["dfsm['s_returns']"], {}), "(dfsm['s_returns'])\n", (1094, 1113), True, 'import numpy as np\n'), ((1245, 1283), 'numpy.power', 'np.power', (["(ypred - dfsm['s_returns'])", '(2)'], {}), "(ypred - dfsm['s_returns'], 2)\n", (1253, 1283), True, 'import numpy as np\n'), ((1650, 1662), 'numpy.sqrt', 'np.sqrt', (['prd'], {}), '(prd)\n', (1657, 1662), True, 'import numpy as np\n'), ((1119, 1145), 'numpy.mean', 'np.mean', (["dfsm['b_returns']"], {}), "(dfsm['b_returns'])\n", (1126, 1145), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# mirto_code_main.py
import numpy as np
from scipy.linalg import lu , solve
import cProfile, pstats
import mirto_code_configuration
import mirto_code_compute_F
import sys
import ctypes
# Empty class to store result
class mirto_state:
pass
class mirto_residuals:
pass
class mirto_history:
def __init__(self):
self.measurement_space_only = []
class mirto_norm:
def __init__(self):
self.measurement_space_only = np.NAN
class mirto:
def __init__(self,datapath,oss):
# Load configuration parameters and Input data for retrieval
self.cx = mirto_code_configuration.mirto_config(datapath,oss)
self.obsErr = mirto_code_configuration.mirto_obsErr(self.cx)
self.apriori = mirto_code_configuration.mirto_apriori(self.cx)
self.state = mirto_state()
self.norm = mirto_norm()
self.hist = mirto_history()
self.residuals = mirto_residuals()
self.converge = 0.03 * len(self.cx.state_var_indx)
def compute_chi_square(self):
self.norm.measurement_space_only = (
np.dot(np.dot(self.residuals.yobs_minus_yhat.T,self.obsErr.SeInv),
self.residuals.yobs_minus_yhat/len(self.residuals.yobs_minus_yhat)) )
def update_solution(self,fm):
use_mkl = True
psize = np.shape(fm.K.T)
if ( use_mkl ):
mkl = ctypes.cdll.LoadLibrary('./libmkl_rt.so')
cblas_dgemm = mkl.cblas_dgemm
CblasRowMajor = ctypes.c_int(101)
CblasNoTrans = ctypes.c_int(111)
c_double_p = ctypes.POINTER(ctypes.c_double)
KtSeInv = np.zeros(shape=(psize[0],psize[1]))
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,
ctypes.c_int(psize[0]),ctypes.c_int(psize[1]),
ctypes.c_int(psize[1]),ctypes.c_double(1.0),
fm.K.T.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]),
self.obsErr.SeInv.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]), ctypes.c_double(0.0),
KtSeInv.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]))
else:
KtSeInv = np.dot(fm.K.T,self.obsErr.SeInv)
KtSeInvK = np.dot(KtSeInv,fm.K)
A = (KtSeInvK+(1.0+self.cx.gamma)*self.state.SaInv_ret)
dx = (self.state.xhat-self.state.xa)
d = (np.dot(KtSeInv,self.residuals.yobs_minus_yhat) -
np.dot(self.state.SaInv_ret,dx))
# Use iterative LU decomposition to determine the solution
# First iteration
L,U = lu(A,permute_l=True)
y = solve(L,d)
x = solve(U,y)
# Second iteration
r = d - np.dot(A,x)
dz = solve(L,r)
ddx = solve(U,dz)
# Solution
totx = x+ddx
self.state.xhat_new = self.state.xhat+totx
self.state.d2 = np.dot(totx.T,d)
def invert(self,profiling=None):
if ( profiling is not None ):
if ( profiling == True ):
pr = cProfile.Profile()
pr.enable()
# Assing values for the state vector
xhat = self.apriori.x0
# Iteration of the Newton-Gauss method to find the zero of the first
# derivative of the Gaussian PDF
Iteration = 0
# Initialize state vector per previous iteration to apriori.xa
# (same as apriori.X0)
xhat_pre = self.apriori.xa
fm = mirto_code_compute_F.radiance(self.cx)
jj = self.cx.state_var_indx
self.state.SaInv_ret = self.apriori.SaInv[jj,:]
self.state.SaInv_ret = self.state.SaInv_ret[:,jj]
self.state.xa = self.apriori.xa[jj]
while (Iteration < self.cx.Iteration_limit):
#
# Compute F (forward model)
#
# print('... Compute_F')
fm.compute_forward(xhat)
fm.estimate_K(xhat)
fm.compute_residuals(self.residuals)
# Subselect from forward model output the channels used for the
# inversion
ii = self.cx.indx
fm.K = fm.K[ii,:]
fm.F = fm.F[ii]
fm.wnF = fm.wnF[ii]
fm.wnK = fm.wnK[ii]
# Subselect from forward model output (Jacobians) and from
# whole apriori the variables actually retrieved.
fm.K = fm.K[:,jj]
self.state.xhat = xhat[jj]
self.state.xhat_pre = xhat_pre[jj]
self.compute_chi_square()
self.update_solution(fm)
if ( Iteration > 0 ):
ref_norm = min(self.hist.measurement_space_only)
print ('Actual value of Norm : ',self.norm.measurement_space_only)
print ('Last low value of Norm : ',ref_norm)
if (self.norm.measurement_space_only <= ref_norm):
self.cx.gamma = self.cx.gamma/2.0
xxdel = (100.0 * (ref_norm - self.norm.measurement_space_only) /
self.norm.measurement_space_only)
print('UWPHYSRET residuals decreased by ',xxdel,'%')
else:
self.cx.gamma = self.cx.gamma*5.0
xxdel = (100.0 * (self.norm.measurement_space_only - ref_norm) /
self.norm.measurement_space_only)
print('UWPHYSRET residuals increased by ',xxdel,'%')
xhat[jj] = self.state.xhat
if (abs(self.state.d2) < self.converge):
print('**** CONVERGED!!! ****')
break
else:
if (Iteration < self.cx.Iteration_limit):
# assign new value to the solution and continue the iterative solution
# Note: Don't update xhat if this is the final iteration or it
# will overwrite the solution
xhat_pre[jj] = self.state.xhat
xhat[jj] = self.state.xhat_new
self.hist.measurement_space_only.append(self.norm.measurement_space_only)
Iteration = Iteration + 1
print ('Iteration = ', Iteration)
print ('New Gamma = ', self.cx.gamma)
print ('Distance = ', abs(self.state.d2))
print ('Wanted = ', self.converge)
if ( profiling is not None ):
if ( profiling == True ):
pr.disable()
s = ()
try:
# Default to Python 3
import io
s = io.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.strip_dirs().sort_stats('cumulative').print_stats()
print(s.getvalue())
except:
# May be this is Python 2?
import StringIO
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.strip_dirs().sort_stats('cumulative').print_stats()
print(s.getvalue())
return(self.state)
if ( __name__ == '__main__' ):
sys.path.append('/home/ggiuliani/pythoncode/oss')
from oss4SHIS import oss4SHIS
from os import path
import time
#
# OSS init input
#
solar = 'solar_irradiances.nc'
precomputed = 'leo.cris.0.05.nc'
datapath = '/home/ggiuliani/pythoncode/data'
oss = oss4SHIS(path.join(datapath,solar),path.join(datapath,precomputed))
# This part of code must be repeated for each input profile in data
# directory. Must find a way to have names here. Probably the errors
# also can be preloaded.
start = time.clock()
profiling = True
check_output = False
inverter = mirto(datapath,oss)
solution = inverter.invert(profiling)
print('Elapsed Time in the Inversion: ',time.clock() - start,' s')
if ( check_output ):
print('Profiles')
print('Pressure Temperature Water Vapor O3')
for i in range(0,61):
print(inverter.cx.pressure_grid[i],solution.xhat[i],
solution.xhat[i+61],solution.xhat[i+122])
print('Skin temperature : ',solution.xhat[183])
print('Surface Emissivity : ')
for i in range(184,189):
print(solution.xhat[i])
print('Value of distance : ',solution.d2)
try:
import pylab as p
except:
print('Cannot use pylab. Is it installed?')
sys.exit()
x = solution.xhat[0:61]
y = np.log(inverter.cx.pressure_grid[0:61])
p.ylabel("Log Pressure")
p.xlabel("Temperature")
p.plot(x,y)
p.plt.gca().invert_yaxis()
p.show()
p.ylabel("Pressure")
p.xlabel("Mixing Ratio")
x = np.exp(solution.xhat[61:122])
y = inverter.cx.pressure_grid[0:61]
p.plot(x,y)
p.plt.gca().invert_yaxis()
p.show()
| [
"mirto_code_configuration.mirto_config",
"time.clock",
"numpy.log",
"pylab.xlabel",
"sys.exit",
"mirto_code_configuration.mirto_obsErr",
"sys.path.append",
"StringIO.StringIO",
"ctypes.cdll.LoadLibrary",
"pylab.plot",
"numpy.exp",
"numpy.dot",
"ctypes.c_int",
"cProfile.Profile",
"io.Stri... | [((6345, 6394), 'sys.path.append', 'sys.path.append', (['"""/home/ggiuliani/pythoncode/oss"""'], {}), "('/home/ggiuliani/pythoncode/oss')\n", (6360, 6394), False, 'import sys\n'), ((6860, 6872), 'time.clock', 'time.clock', ([], {}), '()\n', (6870, 6872), False, 'import time\n'), ((586, 638), 'mirto_code_configuration.mirto_config', 'mirto_code_configuration.mirto_config', (['datapath', 'oss'], {}), '(datapath, oss)\n', (623, 638), False, 'import mirto_code_configuration\n'), ((656, 702), 'mirto_code_configuration.mirto_obsErr', 'mirto_code_configuration.mirto_obsErr', (['self.cx'], {}), '(self.cx)\n', (693, 702), False, 'import mirto_code_configuration\n'), ((722, 769), 'mirto_code_configuration.mirto_apriori', 'mirto_code_configuration.mirto_apriori', (['self.cx'], {}), '(self.cx)\n', (760, 769), False, 'import mirto_code_configuration\n'), ((1247, 1263), 'numpy.shape', 'np.shape', (['fm.K.T'], {}), '(fm.K.T)\n', (1255, 1263), True, 'import numpy as np\n'), ((2136, 2157), 'numpy.dot', 'np.dot', (['KtSeInv', 'fm.K'], {}), '(KtSeInv, fm.K)\n', (2142, 2157), True, 'import numpy as np\n'), ((2454, 2475), 'scipy.linalg.lu', 'lu', (['A'], {'permute_l': '(True)'}), '(A, permute_l=True)\n', (2456, 2475), False, 'from scipy.linalg import lu, solve\n'), ((2483, 2494), 'scipy.linalg.solve', 'solve', (['L', 'd'], {}), '(L, d)\n', (2488, 2494), False, 'from scipy.linalg import lu, solve\n'), ((2502, 2513), 'scipy.linalg.solve', 'solve', (['U', 'y'], {}), '(U, y)\n', (2507, 2513), False, 'from scipy.linalg import lu, solve\n'), ((2569, 2580), 'scipy.linalg.solve', 'solve', (['L', 'r'], {}), '(L, r)\n', (2574, 2580), False, 'from scipy.linalg import lu, solve\n'), ((2590, 2602), 'scipy.linalg.solve', 'solve', (['U', 'dz'], {}), '(U, dz)\n', (2595, 2602), False, 'from scipy.linalg import lu, solve\n'), ((2701, 2718), 'numpy.dot', 'np.dot', (['totx.T', 'd'], {}), '(totx.T, d)\n', (2707, 2718), True, 'import numpy as np\n'), ((3215, 3253), 'mirto_code_compute_F.radiance', 'mirto_code_compute_F.radiance', (['self.cx'], {}), '(self.cx)\n', (3244, 3253), False, 'import mirto_code_compute_F\n'), ((6622, 6648), 'os.path.join', 'path.join', (['datapath', 'solar'], {}), '(datapath, solar)\n', (6631, 6648), False, 'from os import path\n'), ((6648, 6680), 'os.path.join', 'path.join', (['datapath', 'precomputed'], {}), '(datapath, precomputed)\n', (6657, 6680), False, 'from os import path\n'), ((7659, 7698), 'numpy.log', 'np.log', (['inverter.cx.pressure_grid[0:61]'], {}), '(inverter.cx.pressure_grid[0:61])\n', (7665, 7698), True, 'import numpy as np\n'), ((7703, 7727), 'pylab.ylabel', 'p.ylabel', (['"""Log Pressure"""'], {}), "('Log Pressure')\n", (7711, 7727), True, 'import pylab as p\n'), ((7732, 7755), 'pylab.xlabel', 'p.xlabel', (['"""Temperature"""'], {}), "('Temperature')\n", (7740, 7755), True, 'import pylab as p\n'), ((7760, 7772), 'pylab.plot', 'p.plot', (['x', 'y'], {}), '(x, y)\n', (7766, 7772), True, 'import pylab as p\n'), ((7807, 7815), 'pylab.show', 'p.show', ([], {}), '()\n', (7813, 7815), True, 'import pylab as p\n'), ((7820, 7840), 'pylab.ylabel', 'p.ylabel', (['"""Pressure"""'], {}), "('Pressure')\n", (7828, 7840), True, 'import pylab as p\n'), ((7845, 7869), 'pylab.xlabel', 'p.xlabel', (['"""Mixing Ratio"""'], {}), "('Mixing Ratio')\n", (7853, 7869), True, 'import pylab as p\n'), ((7878, 7907), 'numpy.exp', 'np.exp', (['solution.xhat[61:122]'], {}), '(solution.xhat[61:122])\n', (7884, 7907), True, 'import numpy as np\n'), ((7952, 7964), 'pylab.plot', 'p.plot', (['x', 'y'], {}), '(x, y)\n', (7958, 7964), True, 'import pylab as p\n'), ((7999, 8007), 'pylab.show', 'p.show', ([], {}), '()\n', (8005, 8007), True, 'import pylab as p\n'), ((1043, 1102), 'numpy.dot', 'np.dot', (['self.residuals.yobs_minus_yhat.T', 'self.obsErr.SeInv'], {}), '(self.residuals.yobs_minus_yhat.T, self.obsErr.SeInv)\n', (1049, 1102), True, 'import numpy as np\n'), ((1296, 1337), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['"""./libmkl_rt.so"""'], {}), "('./libmkl_rt.so')\n", (1319, 1337), False, 'import ctypes\n'), ((1396, 1413), 'ctypes.c_int', 'ctypes.c_int', (['(101)'], {}), '(101)\n', (1408, 1413), False, 'import ctypes\n'), ((1435, 1452), 'ctypes.c_int', 'ctypes.c_int', (['(111)'], {}), '(111)\n', (1447, 1452), False, 'import ctypes\n'), ((1472, 1503), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (1486, 1503), False, 'import ctypes\n'), ((1520, 1556), 'numpy.zeros', 'np.zeros', ([], {'shape': '(psize[0], psize[1])'}), '(shape=(psize[0], psize[1]))\n', (1528, 1556), True, 'import numpy as np\n'), ((2088, 2121), 'numpy.dot', 'np.dot', (['fm.K.T', 'self.obsErr.SeInv'], {}), '(fm.K.T, self.obsErr.SeInv)\n', (2094, 2121), True, 'import numpy as np\n'), ((2267, 2314), 'numpy.dot', 'np.dot', (['KtSeInv', 'self.residuals.yobs_minus_yhat'], {}), '(KtSeInv, self.residuals.yobs_minus_yhat)\n', (2273, 2314), True, 'import numpy as np\n'), ((2326, 2358), 'numpy.dot', 'np.dot', (['self.state.SaInv_ret', 'dx'], {}), '(self.state.SaInv_ret, dx)\n', (2332, 2358), True, 'import numpy as np\n'), ((2548, 2560), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (2554, 2560), True, 'import numpy as np\n'), ((7030, 7042), 'time.clock', 'time.clock', ([], {}), '()\n', (7040, 7042), False, 'import time\n'), ((1633, 1655), 'ctypes.c_int', 'ctypes.c_int', (['psize[0]'], {}), '(psize[0])\n', (1645, 1655), False, 'import ctypes\n'), ((1656, 1678), 'ctypes.c_int', 'ctypes.c_int', (['psize[1]'], {}), '(psize[1])\n', (1668, 1678), False, 'import ctypes\n'), ((1698, 1720), 'ctypes.c_int', 'ctypes.c_int', (['psize[1]'], {}), '(psize[1])\n', (1710, 1720), False, 'import ctypes\n'), ((1721, 1741), 'ctypes.c_double', 'ctypes.c_double', (['(1.0)'], {}), '(1.0)\n', (1736, 1741), False, 'import ctypes\n'), ((1814, 1836), 'ctypes.c_int', 'ctypes.c_int', (['psize[1]'], {}), '(psize[1])\n', (1826, 1836), False, 'import ctypes\n'), ((1920, 1942), 'ctypes.c_int', 'ctypes.c_int', (['psize[1]'], {}), '(psize[1])\n', (1932, 1942), False, 'import ctypes\n'), ((1944, 1964), 'ctypes.c_double', 'ctypes.c_double', (['(0.0)'], {}), '(0.0)\n', (1959, 1964), False, 'import ctypes\n'), ((2038, 2060), 'ctypes.c_int', 'ctypes.c_int', (['psize[1]'], {}), '(psize[1])\n', (2050, 2060), False, 'import ctypes\n'), ((2838, 2856), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (2854, 2856), False, 'import cProfile, pstats\n'), ((7612, 7622), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7620, 7622), False, 'import sys\n'), ((7776, 7787), 'pylab.plt.gca', 'p.plt.gca', ([], {}), '()\n', (7785, 7787), True, 'import pylab as p\n'), ((7968, 7979), 'pylab.plt.gca', 'p.plt.gca', ([], {}), '()\n', (7977, 7979), True, 'import pylab as p\n'), ((5886, 5899), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5897, 5899), False, 'import io\n'), ((5915, 5941), 'pstats.Stats', 'pstats.Stats', (['pr'], {'stream': 's'}), '(pr, stream=s)\n', (5927, 5941), False, 'import cProfile, pstats\n'), ((6130, 6149), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (6147, 6149), False, 'import StringIO\n'), ((6165, 6191), 'pstats.Stats', 'pstats.Stats', (['pr'], {'stream': 's'}), '(pr, stream=s)\n', (6177, 6191), False, 'import cProfile, pstats\n')] |
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from torchvision.utils import save_image
class Generator(nn.Module):
def __init__(self, n_noise=62, n_disc=10, n_cont=2):
super().__init__()
self.n_latent = n_noise + n_disc + n_cont
self.fc1 = nn.Sequential(nn.Linear(self.n_latent, 1024, bias=False),
nn.BatchNorm1d(1024),
nn.ReLU())
self.fc2 = nn.Sequential(nn.Linear(1024, 128 * 7 * 7, bias=False),
nn.BatchNorm1d(128 * 7 * 7),
nn.ReLU())
self.conv_transpose1 = nn.Sequential(nn.ConvTranspose2d(128, 64, 4, 2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU())
self.conv_transpose2 = nn.Sequential(nn.ConvTranspose2d(64, 1, 4, 2, padding=1),
nn.Sigmoid()) # paper中无激活函数
def forward(self, x):
x = nn.Sequential(self.fc1, self.fc2)(x)
x = x.view(-1, 128, 7, 7)
x = nn.Sequential(self.conv_transpose1, self.conv_transpose2)(x)
return x
class Share(nn.Module):
"""The common part for Discriminator and net Q."""
def __init__(self):
super().__init__()
# 1 * 28 * 28
self.conv1 = nn.Sequential(nn.Conv2d(1, 64, 4, 2, padding=1),
nn.LeakyReLU(0.1))
# 64 * 14 * 14
self.conv2 = nn.Sequential(nn.Conv2d(64, 128, 4, 2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1))
# 128 * 7 * 7
self.fc = nn.Sequential(nn.Linear(128 * 7 * 7, 1024, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.1))
# 1024
def forward(self, x):
x = nn.Sequential(self.conv1, self.conv2)(x)
x = x.view(-1, 128 * 7 * 7)
x = self.fc(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Sequential(nn.Linear(1024, 1), nn.Sigmoid())
def forward(self, x):
x = self.fc(x)
return x
class Q(nn.Module):
def __init__(self, n_disc=10, n_cont=2):
super().__init__()
self.n_disc = n_disc
self.n_cont = n_cont
self.fc = nn.Sequential(nn.Linear(1024, 128, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(0.1))
self.fc_disc = nn.Linear(128, self.n_disc)
self.fc_cont_mu = nn.Linear(128, self.n_cont)
self.fc_cont_var = nn.Linear(128, self.n_cont)
def forward(self, x):
"""回头试试这种:
# mu = x[:, self.n_disc:self.n_disc+self.n_cont]
# var = x[:, self.n_disc+self.n_cont:]
"""
x = self.fc(x)
disc = self.fc_disc(x)
cont_mu = self.fc_cont_mu(x)
cont_var = torch.exp(self.fc_cont_var(x))
return disc, cont_mu, cont_var
def weights_init(layer):
classname = layer.__class__.__name__
if classname.find('Conv') != -1:
layer.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
layer.weight.data.normal_(1.0, 0.02)
layer.bias.data.fill_(0)
class NormalNLLLoss:
"""
Calculate the negative log likelihood
of normal distribution.
This needs to be minimised.
Treating Q(cj | x) as a factored Gaussian.
"""
def __call__(self, x, mu, var):
logli = -0.5 * (var.mul(2 * np.pi) + 1e-6).log() - \
(x - mu).pow(2).div(var.mul(2.0) + 1e-6)
nll = -(logli.sum(1).mean())
return nll
def sample_z(n_noise, n_disc, n_cont, batch_size, device):
noise = torch.randn(batch_size, n_noise, device=device)
# torch.multinomial()?
# one-hot vectors
disc = torch.as_tensor(np.random.multinomial(1, n_disc * [1 / n_disc], size=batch_size),
dtype=torch.float32, device=device)
cont = torch.empty(batch_size, n_cont, dtype=torch.float32,
device=device).uniform_(-1, 1)
z = torch.cat((noise, disc, cont), dim=1)
return z
N_NOISE = 62
N_DISC = 10
N_CONT = 2
N_EPOCH = 50
BS = 64
LR_D = 0.0002 # 2e-4
LR_G = 0.001 # 1e-3
BETA1 = 0.5
BETA2 = 0.999
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# source_path = '../VAE/data/all.npy'
# source = torch.from_numpy(np.load(source_path)) # device(type='cpu')
source = torch.load(
r'E:\研究生文件\Code\GAN\MNIST\processed\training.pt')
source = torch.unsqueeze(source[0], 1)
train_set = DataLoader(TensorDataset(source), batch_size=BS, shuffle=True)
net_Share = Share().to(device)
net_D = Discriminator().to(device)
net_Q = Q(N_DISC, N_CONT).to(device)
net_G = Generator(N_NOISE, N_DISC, N_CONT).to(device)
for net in [net_Share, net_D, net_Q, net_G]:
net.apply(weights_init)
optim_D = torch.optim.Adam([{"params": net_Share.parameters()}, {
"params": net_D.parameters()}], lr=LR_D, betas=(BETA1, BETA2))
optim_G = torch.optim.Adam([{"params": net_G.parameters()}, {
"params": net_Q.parameters()}], lr=LR_G, betas=(BETA1, BETA2))
criterion_D = nn.BCELoss()
criterion_disc = nn.CrossEntropyLoss()
criterion_cont = NormalNLLLoss() # log_gaussian()
real_label = 1
fake_label = 0
# 100 fixed latent codes
fixed_noise = torch.randn(100, N_NOISE, device=device)
fixed_disc = torch.cat((torch.eye(10, device=device),)*10, dim=0)
cont_template = torch.repeat_interleave(
torch.linspace(-1, 1, 10), 10).view(-1, 1)
fixed_cont1 = torch.cat(
(cont_template, torch.zeros_like(cont_template)), dim=1).to(device)
fixed_cont2 = torch.cat(
(torch.zeros_like(cont_template), cont_template), dim=1).to(device)
fixed_z1 = torch.cat((fixed_noise, fixed_disc, fixed_cont1), dim=1)
fixed_z2 = torch.cat((fixed_noise, fixed_disc, fixed_cont2), dim=1)
losses_D = []
losses_G = []
time_begin = time.time()
for epoch in range(N_EPOCH):
for i, (x,) in enumerate(train_set):
# udpate Discriminator
optim_D.zero_grad()
# 判真
x = x.to(dtype=torch.float, device=device)
torch.div(x, 255, out=x)
label = torch.full((x.size(0), 1), real_label, device=device)
judgement_real = net_D(net_Share(x))
loss_D_real = criterion_D(judgement_real, label)
loss_D_real.backward()
# 判假
z = sample_z(N_NOISE, N_DISC, N_CONT, x.size(0), device)
label.fill_(fake_label)
fake = net_G(z)
judgement_fake = net_D(net_Share(fake.detach()))
loss_D_fake = criterion_D(judgement_fake, label)
loss_D_fake.backward()
# 综合
loss_D = loss_D_real + loss_D_fake
optim_D.step()
# update Generator and Q
optim_G.zero_grad()
share_out = net_Share(fake)
# update Generator part
judgement = net_D(share_out)
label.fill_(real_label)
# treat fake data as real
loss_G_reconstruct = criterion_D(judgement, label)
# update Q part
q_disc, q_cont_mu, q_cont_var = net_Q(share_out)
disc = z[:, N_NOISE: N_NOISE + N_DISC]
# torch.max(disc, 1)[1]是nn.CrossEntropyLoss()的target
loss_G_disc = criterion_disc(q_disc, torch.max(disc, 1)[1])
cont = z[:, -N_CONT:]
# cont本采样自均匀分布,现在却又用均值和方差来衡量其与正态分布的距离???
# 迷之操作???
loss_G_cont = 0.1 * criterion_cont(cont, q_cont_mu, q_cont_var)
loss_G = loss_G_reconstruct + loss_G_disc + loss_G_cont
loss_G.backward()
optim_G.step()
losses_D.append(loss_D.item())
losses_G.append(loss_G.item())
if (i + 1) % 30 == 0 or (i + 1) == len(train_set):
time_cost = int(time.time() - time_begin)
print('Time cost so far: {}h {}min {}s'.format(
time_cost // 3600, time_cost % 3600 // 60, time_cost % 3600 % 60 // 1))
print("Epoch[{}/{}], Step [{}/{}], Loss_D: {:.4f}, Loss_G: {:.4f}, Loss_Info: {:.4f}".
format(epoch + 1, N_EPOCH, i + 1, len(train_set), loss_D.item(), loss_G.item(), (loss_G_disc + loss_G_cont).item()))
with torch.no_grad():
generated_images = net_G(fixed_z1)
save_image(generated_images, "{}-1.png".format(epoch + 1), nrow=10)
generated_images = net_G(fixed_z2)
save_image(generated_images, "{}-2.png".format(epoch + 1), nrow=10)
# Plot the training losses.
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(losses_G, label="G")
plt.plot(losses_D, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig("Loss Curve")
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"torch.nn.Sequential",
"torch.max",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.unsqueeze",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.eye",
... | [((4852, 4917), 'torch.load', 'torch.load', (['"""E:\\\\研究生文件\\\\Code\\\\GAN\\\\MNIST\\\\processed\\\\training.pt"""'], {}), "('E:\\\\研究生文件\\\\Code\\\\GAN\\\\MNIST\\\\processed\\\\training.pt')\n", (4862, 4917), False, 'import torch\n'), ((4929, 4958), 'torch.unsqueeze', 'torch.unsqueeze', (['source[0]', '(1)'], {}), '(source[0], 1)\n', (4944, 4958), False, 'import torch\n'), ((5604, 5616), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5614, 5616), False, 'from torch import nn\n'), ((5635, 5656), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5654, 5656), False, 'from torch import nn\n'), ((5786, 5826), 'torch.randn', 'torch.randn', (['(100)', 'N_NOISE'], {'device': 'device'}), '(100, N_NOISE, device=device)\n', (5797, 5826), False, 'import torch\n'), ((6194, 6250), 'torch.cat', 'torch.cat', (['(fixed_noise, fixed_disc, fixed_cont1)'], {'dim': '(1)'}), '((fixed_noise, fixed_disc, fixed_cont1), dim=1)\n', (6203, 6250), False, 'import torch\n'), ((6263, 6319), 'torch.cat', 'torch.cat', (['(fixed_noise, fixed_disc, fixed_cont2)'], {'dim': '(1)'}), '((fixed_noise, fixed_disc, fixed_cont2), dim=1)\n', (6272, 6319), False, 'import torch\n'), ((6368, 6379), 'time.time', 'time.time', ([], {}), '()\n', (6377, 6379), False, 'import time\n'), ((8928, 8955), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8938, 8955), True, 'import matplotlib.pyplot as plt\n'), ((8957, 9018), 'matplotlib.pyplot.title', 'plt.title', (['"""Generator and Discriminator Loss During Training"""'], {}), "('Generator and Discriminator Loss During Training')\n", (8966, 9018), True, 'import matplotlib.pyplot as plt\n'), ((9020, 9049), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_G'], {'label': '"""G"""'}), "(losses_G, label='G')\n", (9028, 9049), True, 'import matplotlib.pyplot as plt\n'), ((9051, 9080), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_D'], {'label': '"""D"""'}), "(losses_D, label='D')\n", (9059, 9080), True, 'import matplotlib.pyplot as plt\n'), ((9082, 9106), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations"""'], {}), "('iterations')\n", (9092, 9106), True, 'import matplotlib.pyplot as plt\n'), ((9108, 9126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (9118, 9126), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((9142, 9167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Loss Curve"""'], {}), "('Loss Curve')\n", (9153, 9167), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4124), 'torch.randn', 'torch.randn', (['batch_size', 'n_noise'], {'device': 'device'}), '(batch_size, n_noise, device=device)\n', (4088, 4124), False, 'import torch\n'), ((4463, 4500), 'torch.cat', 'torch.cat', (['(noise, disc, cont)'], {'dim': '(1)'}), '((noise, disc, cont), dim=1)\n', (4472, 4500), False, 'import torch\n'), ((4983, 5004), 'torch.utils.data.TensorDataset', 'TensorDataset', (['source'], {}), '(source)\n', (4996, 5004), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((2809, 2836), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'self.n_disc'], {}), '(128, self.n_disc)\n', (2818, 2836), False, 'from torch import nn\n'), ((2864, 2891), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'self.n_cont'], {}), '(128, self.n_cont)\n', (2873, 2891), False, 'from torch import nn\n'), ((2920, 2947), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'self.n_cont'], {}), '(128, self.n_cont)\n', (2929, 2947), False, 'from torch import nn\n'), ((4204, 4268), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', '(n_disc * [1 / n_disc])'], {'size': 'batch_size'}), '(1, n_disc * [1 / n_disc], size=batch_size)\n', (4225, 4268), True, 'import numpy as np\n'), ((4691, 4716), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4714, 4716), False, 'import torch\n'), ((6588, 6612), 'torch.div', 'torch.div', (['x', '(255)'], {'out': 'x'}), '(x, 255, out=x)\n', (6597, 6612), False, 'import torch\n'), ((8637, 8652), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8650, 8652), False, 'import torch\n'), ((405, 447), 'torch.nn.Linear', 'nn.Linear', (['self.n_latent', '(1024)'], {'bias': '(False)'}), '(self.n_latent, 1024, bias=False)\n', (414, 447), False, 'from torch import nn\n'), ((483, 503), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (497, 503), False, 'from torch import nn\n'), ((539, 548), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (546, 548), False, 'from torch import nn\n'), ((584, 624), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(128 * 7 * 7)'], {'bias': '(False)'}), '(1024, 128 * 7 * 7, bias=False)\n', (593, 624), False, 'from torch import nn\n'), ((660, 687), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128 * 7 * 7)'], {}), '(128 * 7 * 7)\n', (674, 687), False, 'from torch import nn\n'), ((723, 732), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (730, 732), False, 'from torch import nn\n'), ((780, 836), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)', '(4)', '(2)'], {'padding': '(1)', 'bias': '(False)'}), '(128, 64, 4, 2, padding=1, bias=False)\n', (798, 836), False, 'from torch import nn\n'), ((884, 902), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (898, 902), False, 'from torch import nn\n'), ((950, 959), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (957, 959), False, 'from torch import nn\n'), ((1007, 1049), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(1)', '(4)', '(2)'], {'padding': '(1)'}), '(64, 1, 4, 2, padding=1)\n', (1025, 1049), False, 'from torch import nn\n'), ((1097, 1109), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1107, 1109), False, 'from torch import nn\n'), ((1168, 1201), 'torch.nn.Sequential', 'nn.Sequential', (['self.fc1', 'self.fc2'], {}), '(self.fc1, self.fc2)\n', (1181, 1201), False, 'from torch import nn\n'), ((1253, 1310), 'torch.nn.Sequential', 'nn.Sequential', (['self.conv_transpose1', 'self.conv_transpose2'], {}), '(self.conv_transpose1, self.conv_transpose2)\n', (1266, 1310), False, 'from torch import nn\n'), ((1531, 1564), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)', '(4)', '(2)'], {'padding': '(1)'}), '(1, 64, 4, 2, padding=1)\n', (1540, 1564), False, 'from torch import nn\n'), ((1602, 1619), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1614, 1619), False, 'from torch import nn\n'), ((1681, 1728), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(4)', '(2)'], {'padding': '(1)', 'bias': '(False)'}), '(64, 128, 4, 2, padding=1, bias=False)\n', (1690, 1728), False, 'from torch import nn\n'), ((1766, 1785), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1780, 1785), False, 'from torch import nn\n'), ((1823, 1840), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1835, 1840), False, 'from torch import nn\n'), ((1898, 1938), 'torch.nn.Linear', 'nn.Linear', (['(128 * 7 * 7)', '(1024)'], {'bias': '(False)'}), '(128 * 7 * 7, 1024, bias=False)\n', (1907, 1938), False, 'from torch import nn\n'), ((1973, 1993), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (1987, 1993), False, 'from torch import nn\n'), ((2028, 2045), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2040, 2045), False, 'from torch import nn\n'), ((2105, 2142), 'torch.nn.Sequential', 'nn.Sequential', (['self.conv1', 'self.conv2'], {}), '(self.conv1, self.conv2)\n', (2118, 2142), False, 'from torch import nn\n'), ((2348, 2366), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1)'], {}), '(1024, 1)\n', (2357, 2366), False, 'from torch import nn\n'), ((2368, 2380), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2378, 2380), False, 'from torch import nn\n'), ((2645, 2677), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(128)'], {'bias': '(False)'}), '(1024, 128, bias=False)\n', (2654, 2677), False, 'from torch import nn\n'), ((2712, 2731), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (2726, 2731), False, 'from torch import nn\n'), ((2766, 2783), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2778, 2783), False, 'from torch import nn\n'), ((4346, 4413), 'torch.empty', 'torch.empty', (['batch_size', 'n_cont'], {'dtype': 'torch.float32', 'device': 'device'}), '(batch_size, n_cont, dtype=torch.float32, device=device)\n', (4357, 4413), False, 'import torch\n'), ((5852, 5880), 'torch.eye', 'torch.eye', (['(10)'], {'device': 'device'}), '(10, device=device)\n', (5861, 5880), False, 'import torch\n'), ((5941, 5966), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', '(10)'], {}), '(-1, 1, 10)\n', (5955, 5966), False, 'import torch\n'), ((6031, 6062), 'torch.zeros_like', 'torch.zeros_like', (['cont_template'], {}), '(cont_template)\n', (6047, 6062), False, 'import torch\n'), ((6115, 6146), 'torch.zeros_like', 'torch.zeros_like', (['cont_template'], {}), '(cont_template)\n', (6131, 6146), False, 'import torch\n'), ((7728, 7746), 'torch.max', 'torch.max', (['disc', '(1)'], {}), '(disc, 1)\n', (7737, 7746), False, 'import torch\n'), ((8213, 8224), 'time.time', 'time.time', ([], {}), '()\n', (8222, 8224), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 12:50:49 2018
@author: ead2019
"""
def read_txt_file(txtfile):
import numpy as np
lines = []
with open(txtfile, "r") as f:
for line in f:
line = line.strip()
lines.append(line)
return np.array(lines)
def read_boxes(txtfile):
import numpy as np
lines = []
with open(txtfile, "r") as f:
for line in f:
line = line.strip()
box = np.hstack(line.split()).astype(np.float)
box[0] = int(box[0])
lines.append(box)
return np.array(lines)
def read_obj_names(textfile):
classnames = []
with open(textfile) as f:
for line in f:
line = line.strip('\n')
if len(line)>0:
classnames.append(line)
return np.hstack(classnames)
debug = 0
useParsing=0
if __name__=="__main__":
import numpy as np
# import pandas as pd
import argparse
import glob
annotationImagePaths=''
classnames=''
if useParsing:
parser = argparse.ArgumentParser()
parser.add_argument('-bounding_boxes_folder', action='store', help='please include the full path of the folder with bounding boxes (.txt)', type=str)
parser.add_argument('-class_lists', action='store', help='class list file (fullpath)', type=str)
args = parser.parse_args()
classtextfile= args.class_lists
annotationImagePaths=args.bounding_boxes_folder
# read class names (equivalent to indexes stored in annotation Oth index)
classnames=read_obj_names(classtextfile)
else:
#use you explicit path settings here if you dont want to parse
bbox_base_path='../all_bbox_test/'
bbox_subFolder='bbox_txt/'
#
classFileName='class_list.txt'
classtextfile = classFileName
classnames=read_obj_names(classtextfile)
annotationImagePaths=bbox_base_path+bbox_subFolder
if debug:
fileName='00002.txt'
textfile = bbox_base_path+bbox_subFolder+fileName
bboxes=read_boxes(textfile)
print('box class identified:', bboxes[0][0])
i =(int)(bboxes[0][0])
print('corresponding name of the class type:', classnames[i])
print('total boxes annotated in this file:', len(bboxes))
# histogram
ext = ['*.txt']
classCounter = [ 0, 0, 0, 0, 0, 0, 0 ]
print(annotationImagePaths)
for filename in sorted(glob.glob(annotationImagePaths + ext[0], recursive = True)):
if debug:
print(filename)
#1) read
bboxes=read_boxes(filename)
#2) loop throgh for class types in array
for i in range (len(bboxes)):
for j in range (len(classnames)):
if (j == (int)(bboxes[i][0])):
classCounter[j]=((int)(classCounter[j])+1)
# percentage
label = (classCounter/np.sum(classCounter))*100
# plot
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = np.arange(7)
plt.bar(x, classCounter)
plt.xticks(x, (classnames))
plt.xticks(rotation=45)
for i in range(len(classCounter)):
plt.text(-0.35+i , classCounter[i]+10, s = str(round(label[i],2)), size = 10,color='red', fontweight='bold')
plt.show()
fig.savefig('bboxclasses_test.png', bbox_inches='tight')
| [
"matplotlib.pyplot.xticks",
"numpy.hstack",
"argparse.ArgumentParser",
"numpy.array",
"matplotlib.pyplot.bar",
"numpy.sum",
"glob.glob",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((327, 342), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (335, 342), True, 'import numpy as np\n'), ((633, 648), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (641, 648), True, 'import numpy as np\n'), ((871, 892), 'numpy.hstack', 'np.hstack', (['classnames'], {}), '(classnames)\n', (880, 892), True, 'import numpy as np\n'), ((3134, 3148), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3146, 3148), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3169), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (3166, 3169), True, 'import numpy as np\n'), ((3174, 3198), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'classCounter'], {}), '(x, classCounter)\n', (3181, 3198), True, 'import matplotlib.pyplot as plt\n'), ((3203, 3228), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'classnames'], {}), '(x, classnames)\n', (3213, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3258), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (3245, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3436, 3438), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1140, 1142), False, 'import argparse\n'), ((2578, 2634), 'glob.glob', 'glob.glob', (['(annotationImagePaths + ext[0])'], {'recursive': '(True)'}), '(annotationImagePaths + ext[0], recursive=True)\n', (2587, 2634), False, 'import glob\n'), ((3030, 3050), 'numpy.sum', 'np.sum', (['classCounter'], {}), '(classCounter)\n', (3036, 3050), True, 'import numpy as np\n')] |
import os
import cv2
import sys
import pdb
import six
import glob
import time
import torch
import random
import pandas
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
# import pyarrow as pa
from PIL import Image
import torch.utils.data as data
import matplotlib.pyplot as plt
from utils import video_augmentation
from torch.utils.data.sampler import Sampler
sys.path.append("..")
class BaseFeeder(data.Dataset):
def __init__(self, prefix, gloss_dict, drop_ratio=1, num_gloss=-1, mode="train", transform_mode=True,
datatype="lmdb"):
self.mode = mode
self.ng = num_gloss
self.prefix = prefix
self.dict = gloss_dict
self.data_type = datatype
self.feat_prefix = f"{prefix}/features/fullFrame-256x256px/{mode}"
self.transform_mode = "train" if transform_mode else "test"
self.inputs_list = np.load(f"./preprocess/phoenix2014/{mode}_info.npy", allow_pickle=True).item()
# self.inputs_list = np.load(f"{prefix}/annotations/manual/{mode}.corpus.npy", allow_pickle=True).item()
# self.inputs_list = np.load(f"{prefix}/annotations/manual/{mode}.corpus.npy", allow_pickle=True).item()
# self.inputs_list = dict([*filter(lambda x: isinstance(x[0], str) or x[0] < 10, self.inputs_list.items())])
print(mode, len(self))
self.data_aug = self.transform()
print("")
def __getitem__(self, idx):
if self.data_type == "video":
input_data, label, fi = self.read_video(idx)
input_data, label = self.normalize(input_data, label)
# input_data, label = self.normalize(input_data, label, fi['fileid'])
return input_data, torch.LongTensor(label), self.inputs_list[idx]['original_info']
elif self.data_type == "lmdb":
input_data, label, fi = self.read_lmdb(idx)
input_data, label = self.normalize(input_data, label)
return input_data, torch.LongTensor(label), self.inputs_list[idx]['original_info']
else:
input_data, label = self.read_features(idx)
return input_data, label, self.inputs_list[idx]['original_info']
def read_video(self, index, num_glosses=-1):
# load file info
fi = self.inputs_list[index]
img_folder = os.path.join(self.prefix, "features/fullFrame-256x256px/" + fi['folder'])
img_list = sorted(glob.glob(img_folder))
label_list = []
for phase in fi['label'].split(" "):
if phase == '':
continue
if phase in self.dict.keys():
label_list.append(self.dict[phase][0])
return [cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) for img_path in img_list], label_list, fi
def read_features(self, index):
# load file info
fi = self.inputs_list[index]
data = np.load(f"./features/{self.mode}/{fi['fileid']}_features.npy", allow_pickle=True).item()
return data['features'], data['label']
def normalize(self, video, label, file_id=None):
video, label = self.data_aug(video, label, file_id)
video = video.float() / 127.5 - 1
return video, label
def transform(self):
if self.transform_mode == "train":
print("Apply training transform.")
return video_augmentation.Compose([
# video_augmentation.CenterCrop(224),
# video_augmentation.WERAugment('/lustre/wangtao/current_exp/exp/baseline/boundary.npy'),
video_augmentation.RandomCrop(224),
video_augmentation.RandomHorizontalFlip(0.5),
video_augmentation.ToTensor(),
video_augmentation.TemporalRescale(0.2),
# video_augmentation.Resize(0.5),
])
else:
print("Apply testing transform.")
return video_augmentation.Compose([
video_augmentation.CenterCrop(224),
# video_augmentation.Resize(0.5),
video_augmentation.ToTensor(),
])
def byte_to_img(self, byteflow):
unpacked = pa.deserialize(byteflow)
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
return img
@staticmethod
def collate_fn(batch):
batch = [item for item in sorted(batch, key=lambda x: len(x[0]), reverse=True)]
video, label, info = list(zip(*batch))
if len(video[0].shape) > 3:
max_len = len(video[0])
video_length = torch.LongTensor([np.ceil(len(vid) / 4.0) * 4 + 12 for vid in video])
left_pad = 6
right_pad = int(np.ceil(max_len / 4.0)) * 4 - max_len + 6
max_len = max_len + left_pad + right_pad
padded_video = [torch.cat(
(
vid[0][None].expand(left_pad, -1, -1, -1),
vid,
vid[-1][None].expand(max_len - len(vid) - left_pad, -1, -1, -1),
)
, dim=0)
for vid in video]
padded_video = torch.stack(padded_video)
else:
max_len = len(video[0])
video_length = torch.LongTensor([len(vid) for vid in video])
padded_video = [torch.cat(
(
vid,
vid[-1][None].expand(max_len - len(vid), -1),
)
, dim=0)
for vid in video]
padded_video = torch.stack(padded_video).permute(0, 2, 1)
label_length = torch.LongTensor([len(lab) for lab in label])
if max(label_length) == 0:
return padded_video, video_length, [], [], info
else:
padded_label = []
for lab in label:
padded_label.extend(lab)
padded_label = torch.LongTensor(padded_label)
return padded_video, video_length, padded_label, label_length, info
def __len__(self):
return len(self.inputs_list) - 1
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
if __name__ == "__main__":
feeder = BaseFeeder()
dataloader = torch.utils.data.DataLoader(
dataset=feeder,
batch_size=1,
shuffle=True,
drop_last=True,
num_workers=0,
)
for data in dataloader:
pdb.set_trace()
| [
"torch.LongTensor",
"utils.video_augmentation.RandomCrop",
"utils.video_augmentation.TemporalRescale",
"sys.path.append",
"utils.video_augmentation.CenterCrop",
"warnings.simplefilter",
"glob.glob",
"utils.video_augmentation.ToTensor",
"numpy.ceil",
"time.time",
"cv2.imread",
"PIL.Image.open",... | [((136, 198), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (157, 198), False, 'import warnings\n'), ((411, 432), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (426, 432), False, 'import sys\n'), ((6412, 6518), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'feeder', 'batch_size': '(1)', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': '(0)'}), '(dataset=feeder, batch_size=1, shuffle=True,\n drop_last=True, num_workers=0)\n', (6439, 6518), False, 'import torch\n'), ((2344, 2417), 'os.path.join', 'os.path.join', (['self.prefix', "('features/fullFrame-256x256px/' + fi['folder'])"], {}), "(self.prefix, 'features/fullFrame-256x256px/' + fi['folder'])\n", (2356, 2417), False, 'import os\n'), ((4236, 4249), 'six.BytesIO', 'six.BytesIO', ([], {}), '()\n', (4247, 4249), False, 'import six\n'), ((6170, 6181), 'time.time', 'time.time', ([], {}), '()\n', (6179, 6181), False, 'import time\n'), ((6598, 6613), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6611, 6613), False, 'import pdb\n'), ((2444, 2465), 'glob.glob', 'glob.glob', (['img_folder'], {}), '(img_folder)\n', (2453, 2465), False, 'import glob\n'), ((5192, 5217), 'torch.stack', 'torch.stack', (['padded_video'], {}), '(padded_video)\n', (5203, 5217), False, 'import torch\n'), ((5942, 5972), 'torch.LongTensor', 'torch.LongTensor', (['padded_label'], {}), '(padded_label)\n', (5958, 5972), False, 'import torch\n'), ((6259, 6270), 'time.time', 'time.time', ([], {}), '()\n', (6268, 6270), False, 'import time\n'), ((925, 996), 'numpy.load', 'np.load', (['f"""./preprocess/phoenix2014/{mode}_info.npy"""'], {'allow_pickle': '(True)'}), "(f'./preprocess/phoenix2014/{mode}_info.npy', allow_pickle=True)\n", (932, 996), True, 'import numpy as np\n'), ((1744, 1767), 'torch.LongTensor', 'torch.LongTensor', (['label'], {}), '(label)\n', (1760, 1767), False, 'import torch\n'), ((2912, 2998), 'numpy.load', 'np.load', (['f"""./features/{self.mode}/{fi[\'fileid\']}_features.npy"""'], {'allow_pickle': '(True)'}), '(f"./features/{self.mode}/{fi[\'fileid\']}_features.npy", allow_pickle\n =True)\n', (2919, 2998), True, 'import numpy as np\n'), ((4310, 4325), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (4320, 4325), False, 'from PIL import Image\n'), ((2000, 2023), 'torch.LongTensor', 'torch.LongTensor', (['label'], {}), '(label)\n', (2016, 2023), False, 'import torch\n'), ((2715, 2735), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2725, 2735), False, 'import cv2\n'), ((3572, 3606), 'utils.video_augmentation.RandomCrop', 'video_augmentation.RandomCrop', (['(224)'], {}), '(224)\n', (3601, 3606), False, 'from utils import video_augmentation\n'), ((3624, 3668), 'utils.video_augmentation.RandomHorizontalFlip', 'video_augmentation.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (3663, 3668), False, 'from utils import video_augmentation\n'), ((3686, 3715), 'utils.video_augmentation.ToTensor', 'video_augmentation.ToTensor', ([], {}), '()\n', (3713, 3715), False, 'from utils import video_augmentation\n'), ((3733, 3772), 'utils.video_augmentation.TemporalRescale', 'video_augmentation.TemporalRescale', (['(0.2)'], {}), '(0.2)\n', (3767, 3772), False, 'from utils import video_augmentation\n'), ((3963, 3997), 'utils.video_augmentation.CenterCrop', 'video_augmentation.CenterCrop', (['(224)'], {}), '(224)\n', (3992, 3997), False, 'from utils import video_augmentation\n'), ((4065, 4094), 'utils.video_augmentation.ToTensor', 'video_augmentation.ToTensor', ([], {}), '()\n', (4092, 4094), False, 'from utils import video_augmentation\n'), ((5593, 5618), 'torch.stack', 'torch.stack', (['padded_video'], {}), '(padded_video)\n', (5604, 5618), False, 'import torch\n'), ((4763, 4785), 'numpy.ceil', 'np.ceil', (['(max_len / 4.0)'], {}), '(max_len / 4.0)\n', (4770, 4785), True, 'import numpy as np\n')] |
from pyiron_atomistics import Project as PyironProject
import numpy as np
from collections import defaultdict
from spin_space_averaging.sqs import SQSInteractive
from pyiron_contrib.atomistics.atomistics.master.qha import QuasiHarmonicApproximation
def get_bfgs(s, y, H):
dH = np.einsum('...i,...j,...->...ij', *2 * [y], 1 / np.einsum('...i,...i->...', s, y))
dH -= np.einsum(
'...ij,...kl,...j,...l,...->...ik', *2 * [H], *2 * [s], 1 / np.einsum('...ij,...i,...j->...', H, *2 * [s]),
optimize=True
)
return dH
class Project(PyironProject):
def __init__(
self,
path='',
user=None,
sql_query=None,
default_working_directory=False,
):
super().__init__(
path=path,
user=user,
sql_query=sql_query,
default_working_directory=default_working_directory,
)
self.structure = None
self.potential = None
self.n_cores = 80
self.interpolate_h_mag = True
self.ready_to_run = True
self.magmom_magnitudes = None
self._magmoms = None
self.n_copy = None
self._symmetry = None
self.mixing_parameter = 0.3
@property
def symmetry(self):
if self._symmetry is None:
self._symmetry = self.structure.get_symmetry()
return self._symmetry
def get_relaxed_job(self, structure, pressure=False):
job = self.create.job.Lammps(('lmp_relax', pressure))
if job.status.finished:
return job
job.structure = structure
if self.potential is None:
self.potential = job.list_potentials()[0]
if pressure:
job.calc_minimize(pressure=[0, 0, 0])
else:
job.calc_minimize()
job.run()
return job
@property
def lmp_hessian(self):
if self.structure is None:
raise AssertionError('Structure not set')
lmp = self.create.job.Lammps(('lmp_qha', self.structure))
lmp.structure = self.structure
if self.potential is None:
self.potential = lmp.list_potentials()[0]
lmp.potential = self.potential
lmp.interactive_open()
qha = self.create_job(QuasiHarmonicApproximation, lmp.job_name.replace('lmp_', ''))
qha.ref_job = lmp
qha.input['num_points'] = 1
if qha.status.initialized:
qha.run()
return qha['output/force_constants'][0]
def set_input(self, job, fix_spin=True):
job.set_convergence_precision(electronic_energy=1e-6)
job.set_encut(encut=550)
job.set_kpoints(k_mesh_spacing=0.1)
job.set_mixing_parameters(
density_residual_scaling=self.mixing_parameter,
spin_residual_scaling=self.mixing_parameter
)
if fix_spin:
job.fix_spin_constraint = True
job.server.cores = self.n_cores
job.server.queue = 'cm'
job.calc_static()
@property
def magmoms(self):
if self._magmoms is None:
raise ValueError('magmoms not set yet - execute run_sqs')
return self._magmoms
def run_sqs(
self,
cutoff,
n_copy,
nonmag_ids=None,
n_steps=5000,
sigma=0.05,
max_sigma=4,
n_points=100,
min_sample_value=1.0e-8
):
indices = np.arange(len(self.structure))
if nonmag_ids is not None:
indices = np.delete(indices, nonmag_ids)
sqs = SQSInteractive(
structure=self.structure[indices],
concentration=0.5,
cutoff=cutoff,
n_copy=n_copy,
sigma=sigma,
max_sigma=max_sigma,
n_points=n_points,
min_sample_value=min_sample_value
)
self.n_copy = n_copy
sqs.run_mc(n_steps)
self._magmoms = np.zeros((n_copy, len(self.structure)))
self._magmoms.T[indices] = sqs.spins.T
def run_init_magmoms(self):
if self.magmom_magnitudes is None:
raise ValueError('magmom magnitudes not defined')
for mag in self.magmom_magnitudes:
for ii, mm in enumerate(self.magmoms):
job = self.create.job.Sphinx(('spx_v', self.structure, mag, ii, 0))
job.structure = self.structure
job.structure.set_initial_magnetic_moments(mag * mm)
self.set_input(job)
job.run()
def get_output(self, job_list, pr=None, shape=None):
if pr is None:
pr = len(job_list) * [self]
if not isinstance(pr, list):
pr = len(job_list) * [pr]
output = defaultdict(list)
for job_name, prr in zip(job_list, pr):
job = prr.load(job_name)
output['energy'].append(job.output.energy_pot[-1])
output['ediff'].append(np.diff(job['output/generic/dft/scf_energy_free'][0])[-1])
output['nu'].append(job['output/generic/dft/magnetic_forces'][0])
output['magmoms'].append(job['output/generic/dft/atom_spins'][0])
output['forces'].append(job['output/generic/forces'][0])
output['positions'].append(job['output/generic/positions'][0])
if shape is not None:
output['magmoms'] = np.array(output['magmoms']).reshape(shape + (-1,))
output['nu'] = np.array(output['nu']).reshape(shape + (-1,))
output['forces'] = np.array(output['forces']).reshape(shape + (-1, 3,))
output['positions'] = np.array(output['positions']).reshape(shape + (-1, 3,))
return output
def get_init_hessian_mag(self, pr=None):
job_lst = [
('spx_v', self.structure, mag, ii, 0)
for mag in self.magmom_magnitudes
for ii in range(self.n_copy)
]
output = self.get_output(
job_lst, pr=pr, shape=(len(self.magmom_magnitudes), self.n_copy)
)
self.set_initial_H_mag(output['nu'], output['magmoms'])
def symmetrize_magmoms(self, magmoms, signs=None):
if signs is None:
signs = np.sign(magmoms)
signs = np.sign(signs)
magmoms = np.reshape(magmoms, (-1, self.n_copy, len(self.structure)))
signs = np.reshape(signs, (-1, self.n_copy, len(self.structure)))
return np.mean([
mm[self.symmetry.permutations] for mm in np.mean(magmoms * signs, axis=1)
], axis=1).squeeze()
def update_hessian(self, magnetic_forces, magmoms, positions, forces, n_cycle):
nu = self.symmetrize_magmoms(magnetic_forces, magmoms)
magmoms = self.symmetrize_magmoms(magmoms)
f_sym = self.symmetry.symmetrize_vectors(forces.mean(axis=1)).reshape(n_cycle, -1)
x_diff = np.diff(positions, axis=0)
x_diff = self.structure.find_mic(x_diff).reshape(n_cycle - 1, -1)
x_diff = np.append(
x_diff, np.diff(magmoms, axis=0), axis=1
)
dUdx = np.append(-f_sym, nu, axis=1)
for xx, ff in zip(x_diff, np.diff(dUdx, axis=0)):
self.H_current += get_bfgs(xx, ff, self.H_current)
def set_initial_H_mag(self, magnetic_forces=None, magmoms=None, hessian=None):
if magnetic_forces is not None and magmoms is not None:
mm = np.sum(magmoms**2, axis=0)
mn = np.sum(magmoms * magnetic_forces, axis=0)
m = np.sum(magmoms**2, axis=0)
n = np.sum(magnetic_forces, axis=0)
H = (len(magmoms) * mn - m * n) / (len(magmoms) * mm - m**2)
self.H_mag_init = np.mean(
np.mean(H, axis=0)[self.symmetry.permutations], axis=0
)
elif hessian is not None:
self.H_mag_init = hessian
else:
raise ValueError('input values not set')
self.H_mag_init = np.eye(len(self.H_mag_init)) * self.H_mag_init
if len(self.H_mag_init) != len(self.structure):
raise AssertionError('Length not correct')
self.set_initial_H(self.lmp_hessian, self.H_mag_init)
def get_dx(self, forces, magnetic_forces, magmoms=None, symmetrize=False):
if symmetrize:
if magmoms is None:
raise ValueError('when symmetrize is on magmoms is required')
magnetic_forces = self.symmetrize_magmoms(magnetic_forces, magmoms)
forces = self.symmetry.symmetrize_vectors(forces.mean(axis=0))
xm_new = np.einsum('ij,j->i', np.linalg.inv(self.H_current), np.append(-forces, magnetic_forces))
dx = -xm_new[:3 * len(self.structure)].reshape(-1, 3)
dm = -xm_new[3 * len(self.structure):]
return dx, dm
def set_initial_H(self, H_phonon, H_magnon):
n = len(self.structure)
self.H_init = np.eye(4 * n)
self.H_init[:3 * n, :3 * n] = H_phonon.copy()
self.H_init[3 * n:, 3 * n:] *= H_magnon
self.H_current = self.H_init.copy()
| [
"numpy.mean",
"numpy.eye",
"numpy.delete",
"numpy.diff",
"numpy.append",
"numpy.sum",
"numpy.array",
"numpy.linalg.inv",
"collections.defaultdict",
"numpy.sign",
"spin_space_averaging.sqs.SQSInteractive",
"numpy.einsum"
] | [((3527, 3723), 'spin_space_averaging.sqs.SQSInteractive', 'SQSInteractive', ([], {'structure': 'self.structure[indices]', 'concentration': '(0.5)', 'cutoff': 'cutoff', 'n_copy': 'n_copy', 'sigma': 'sigma', 'max_sigma': 'max_sigma', 'n_points': 'n_points', 'min_sample_value': 'min_sample_value'}), '(structure=self.structure[indices], concentration=0.5, cutoff\n =cutoff, n_copy=n_copy, sigma=sigma, max_sigma=max_sigma, n_points=\n n_points, min_sample_value=min_sample_value)\n', (3541, 3723), False, 'from spin_space_averaging.sqs import SQSInteractive\n'), ((4695, 4712), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4706, 4712), False, 'from collections import defaultdict\n'), ((6170, 6184), 'numpy.sign', 'np.sign', (['signs'], {}), '(signs)\n', (6177, 6184), True, 'import numpy as np\n'), ((6784, 6810), 'numpy.diff', 'np.diff', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (6791, 6810), True, 'import numpy as np\n'), ((6991, 7020), 'numpy.append', 'np.append', (['(-f_sym)', 'nu'], {'axis': '(1)'}), '(-f_sym, nu, axis=1)\n', (7000, 7020), True, 'import numpy as np\n'), ((8775, 8788), 'numpy.eye', 'np.eye', (['(4 * n)'], {}), '(4 * n)\n', (8781, 8788), True, 'import numpy as np\n'), ((331, 364), 'numpy.einsum', 'np.einsum', (['"""...i,...i->..."""', 's', 'y'], {}), "('...i,...i->...', s, y)\n", (340, 364), True, 'import numpy as np\n'), ((455, 503), 'numpy.einsum', 'np.einsum', (['"""...ij,...i,...j->..."""', 'H', '*(2 * [s])'], {}), "('...ij,...i,...j->...', H, *(2 * [s]))\n", (464, 503), True, 'import numpy as np\n'), ((3482, 3512), 'numpy.delete', 'np.delete', (['indices', 'nonmag_ids'], {}), '(indices, nonmag_ids)\n', (3491, 3512), True, 'import numpy as np\n'), ((6137, 6153), 'numpy.sign', 'np.sign', (['magmoms'], {}), '(magmoms)\n', (6144, 6153), True, 'import numpy as np\n'), ((6933, 6957), 'numpy.diff', 'np.diff', (['magmoms'], {'axis': '(0)'}), '(magmoms, axis=0)\n', (6940, 6957), True, 'import numpy as np\n'), ((7055, 7076), 'numpy.diff', 'np.diff', (['dUdx'], {'axis': '(0)'}), '(dUdx, axis=0)\n', (7062, 7076), True, 'import numpy as np\n'), ((7307, 7335), 'numpy.sum', 'np.sum', (['(magmoms ** 2)'], {'axis': '(0)'}), '(magmoms ** 2, axis=0)\n', (7313, 7335), True, 'import numpy as np\n'), ((7351, 7392), 'numpy.sum', 'np.sum', (['(magmoms * magnetic_forces)'], {'axis': '(0)'}), '(magmoms * magnetic_forces, axis=0)\n', (7357, 7392), True, 'import numpy as np\n'), ((7409, 7437), 'numpy.sum', 'np.sum', (['(magmoms ** 2)'], {'axis': '(0)'}), '(magmoms ** 2, axis=0)\n', (7415, 7437), True, 'import numpy as np\n'), ((7452, 7483), 'numpy.sum', 'np.sum', (['magnetic_forces'], {'axis': '(0)'}), '(magnetic_forces, axis=0)\n', (7458, 7483), True, 'import numpy as np\n'), ((8472, 8501), 'numpy.linalg.inv', 'np.linalg.inv', (['self.H_current'], {}), '(self.H_current)\n', (8485, 8501), True, 'import numpy as np\n'), ((8503, 8538), 'numpy.append', 'np.append', (['(-forces)', 'magnetic_forces'], {}), '(-forces, magnetic_forces)\n', (8512, 8538), True, 'import numpy as np\n'), ((4896, 4949), 'numpy.diff', 'np.diff', (["job['output/generic/dft/scf_energy_free'][0]"], {}), "(job['output/generic/dft/scf_energy_free'][0])\n", (4903, 4949), True, 'import numpy as np\n'), ((5317, 5344), 'numpy.array', 'np.array', (["output['magmoms']"], {}), "(output['magmoms'])\n", (5325, 5344), True, 'import numpy as np\n'), ((5395, 5417), 'numpy.array', 'np.array', (["output['nu']"], {}), "(output['nu'])\n", (5403, 5417), True, 'import numpy as np\n'), ((5472, 5498), 'numpy.array', 'np.array', (["output['forces']"], {}), "(output['forces'])\n", (5480, 5498), True, 'import numpy as np\n'), ((5559, 5588), 'numpy.array', 'np.array', (["output['positions']"], {}), "(output['positions'])\n", (5567, 5588), True, 'import numpy as np\n'), ((7612, 7630), 'numpy.mean', 'np.mean', (['H'], {'axis': '(0)'}), '(H, axis=0)\n', (7619, 7630), True, 'import numpy as np\n'), ((6415, 6447), 'numpy.mean', 'np.mean', (['(magmoms * signs)'], {'axis': '(1)'}), '(magmoms * signs, axis=1)\n', (6422, 6447), True, 'import numpy as np\n')] |
import numpy as np
import json
from mlflow.models.evaluation import evaluate
from mlflow.models.evaluation.default_evaluator import (
_get_classifier_global_metrics,
_infer_model_type_by_labels,
_extract_raw_model_and_predict_fn,
_get_regressor_metrics,
_get_binary_sum_up_label_pred_prob,
_get_classifier_per_class_metrics,
_gen_classifier_curve,
)
import mlflow
from sklearn.linear_model import LogisticRegression
# pylint: disable=unused-import
from tests.models.test_evaluation import (
get_run_data,
linear_regressor_model_uri,
diabetes_dataset,
multiclass_logistic_regressor_model_uri,
iris_dataset,
binary_logistic_regressor_model_uri,
breast_cancer_dataset,
spark_linear_regressor_model_uri,
diabetes_spark_dataset,
svm_model_uri,
)
from mlflow.models.utils import plot_lines
def assert_dict_equal(d1, d2, rtol):
for k in d1:
assert k in d2
assert np.isclose(d1[k], d2[k], rtol=rtol)
def test_regressor_evaluation(linear_regressor_model_uri, diabetes_dataset):
with mlflow.start_run() as run:
result = evaluate(
linear_regressor_model_uri,
diabetes_dataset._constructor_args["data"],
model_type="regressor",
targets=diabetes_dataset._constructor_args["targets"],
dataset_name=diabetes_dataset.name,
evaluators="default",
)
_, metrics, tags, artifacts = get_run_data(run.info.run_id)
model = mlflow.pyfunc.load_model(linear_regressor_model_uri)
y = diabetes_dataset.labels_data
y_pred = model.predict(diabetes_dataset.features_data)
expected_metrics = _get_regressor_metrics(y, y_pred)
for metric_key in expected_metrics:
assert np.isclose(
expected_metrics[metric_key],
metrics[f'{metric_key}_on_data_diabetes_dataset'],
rtol=1e-3,
)
assert np.isclose(expected_metrics[metric_key], result.metrics[metric_key], rtol=1e-3)
assert json.loads(tags["mlflow.datasets"]) == [
{**diabetes_dataset._metadata, "model": model.metadata.model_uuid}
]
assert set(artifacts) == {
"shap_beeswarm_plot_on_data_diabetes_dataset.png",
"shap_feature_importance_plot_on_data_diabetes_dataset.png",
"shap_summary_plot_on_data_diabetes_dataset.png",
}
assert result.artifacts.keys() == {
"shap_beeswarm_plot",
"shap_feature_importance_plot",
"shap_summary_plot",
}
def test_multi_classifier_evaluation(multiclass_logistic_regressor_model_uri, iris_dataset):
with mlflow.start_run() as run:
result = evaluate(
multiclass_logistic_regressor_model_uri,
iris_dataset._constructor_args["data"],
model_type="classifier",
targets=iris_dataset._constructor_args["targets"],
dataset_name=iris_dataset.name,
evaluators="default",
)
_, metrics, tags, artifacts = get_run_data(run.info.run_id)
model = mlflow.pyfunc.load_model(multiclass_logistic_regressor_model_uri)
_, _, predict_fn, predict_proba_fn = _extract_raw_model_and_predict_fn(model)
y = iris_dataset.labels_data
y_pred = predict_fn(iris_dataset.features_data)
y_probs = predict_proba_fn(iris_dataset.features_data)
expected_metrics = _get_classifier_global_metrics(False, y, y_pred, y_probs, labels=None)
for metric_key in expected_metrics:
assert np.isclose(
expected_metrics[metric_key], metrics[metric_key + "_on_data_iris_dataset"], rtol=1e-3
)
assert np.isclose(expected_metrics[metric_key], result.metrics[metric_key], rtol=1e-3)
assert json.loads(tags["mlflow.datasets"]) == [
{**iris_dataset._metadata, "model": model.metadata.model_uuid}
]
assert set(artifacts) == {
"shap_beeswarm_plot_on_data_iris_dataset.png",
"per_class_metrics_on_data_iris_dataset.csv",
"roc_curve_plot_on_data_iris_dataset.png",
"precision_recall_curve_plot_on_data_iris_dataset.png",
"shap_feature_importance_plot_on_data_iris_dataset.png",
"explainer_on_data_iris_dataset",
"confusion_matrix_on_data_iris_dataset.png",
"shap_summary_plot_on_data_iris_dataset.png",
}
assert result.artifacts.keys() == {
"per_class_metrics",
"roc_curve_plot",
"precision_recall_curve_plot",
"confusion_matrix",
"shap_beeswarm_plot",
"shap_summary_plot",
"shap_feature_importance_plot",
}
def test_bin_classifier_evaluation(binary_logistic_regressor_model_uri, breast_cancer_dataset):
with mlflow.start_run() as run:
result = evaluate(
binary_logistic_regressor_model_uri,
breast_cancer_dataset._constructor_args["data"],
model_type="classifier",
targets=breast_cancer_dataset._constructor_args["targets"],
dataset_name=breast_cancer_dataset.name,
evaluators="default",
)
_, metrics, tags, artifacts = get_run_data(run.info.run_id)
model = mlflow.pyfunc.load_model(binary_logistic_regressor_model_uri)
_, _, predict_fn, predict_proba_fn = _extract_raw_model_and_predict_fn(model)
y = breast_cancer_dataset.labels_data
y_pred = predict_fn(breast_cancer_dataset.features_data)
y_probs = predict_proba_fn(breast_cancer_dataset.features_data)
expected_metrics = _get_classifier_global_metrics(True, y, y_pred, y_probs, labels=None)
for metric_key in expected_metrics:
assert np.isclose(
expected_metrics[metric_key],
metrics[f'{metric_key}_on_data_breast_cancer_dataset'],
rtol=1e-3,
)
assert np.isclose(expected_metrics[metric_key], result.metrics[metric_key], rtol=1e-3)
assert json.loads(tags["mlflow.datasets"]) == [
{**breast_cancer_dataset._metadata, "model": model.metadata.model_uuid}
]
assert set(artifacts) == {
"shap_feature_importance_plot_on_data_breast_cancer_dataset.png",
"lift_curve_plot_on_data_breast_cancer_dataset.png",
"shap_beeswarm_plot_on_data_breast_cancer_dataset.png",
"precision_recall_curve_plot_on_data_breast_cancer_dataset.png",
"confusion_matrix_on_data_breast_cancer_dataset.png",
"shap_summary_plot_on_data_breast_cancer_dataset.png",
"roc_curve_plot_on_data_breast_cancer_dataset.png",
}
assert result.artifacts.keys() == {
"roc_curve_plot",
"precision_recall_curve_plot",
"lift_curve_plot",
"confusion_matrix",
"shap_beeswarm_plot",
"shap_summary_plot",
"shap_feature_importance_plot",
}
def test_spark_regressor_model_evaluation(spark_linear_regressor_model_uri, diabetes_spark_dataset):
with mlflow.start_run() as run:
result = evaluate(
spark_linear_regressor_model_uri,
diabetes_spark_dataset._constructor_args["data"],
model_type="regressor",
targets=diabetes_spark_dataset._constructor_args["targets"],
dataset_name=diabetes_spark_dataset.name,
evaluators="default",
evaluator_config={"log_model_explainability": True},
)
_, metrics, tags, artifacts = get_run_data(run.info.run_id)
model = mlflow.pyfunc.load_model(spark_linear_regressor_model_uri)
X = diabetes_spark_dataset.features_data
y = diabetes_spark_dataset.labels_data
y_pred = model.predict(X)
expected_metrics = _get_regressor_metrics(y, y_pred)
for metric_key in expected_metrics:
assert np.isclose(
expected_metrics[metric_key],
metrics[f'{metric_key}_on_data_diabetes_spark_dataset'],
rtol=1e-3,
)
assert np.isclose(expected_metrics[metric_key], result.metrics[metric_key], rtol=1e-3)
model = mlflow.pyfunc.load_model(spark_linear_regressor_model_uri)
assert json.loads(tags["mlflow.datasets"]) == [
{**diabetes_spark_dataset._metadata, "model": model.metadata.model_uuid}
]
assert not set(artifacts)
assert result.artifacts == {}
def test_svm_classifier_evaluation(svm_model_uri, breast_cancer_dataset):
with mlflow.start_run() as run:
result = evaluate(
svm_model_uri,
breast_cancer_dataset._constructor_args["data"],
model_type="classifier",
targets=breast_cancer_dataset._constructor_args["targets"],
dataset_name=breast_cancer_dataset.name,
evaluators="default",
)
_, metrics, tags, artifacts = get_run_data(run.info.run_id)
model = mlflow.pyfunc.load_model(svm_model_uri)
_, _, predict_fn, _ = _extract_raw_model_and_predict_fn(model)
y = breast_cancer_dataset.labels_data
y_pred = predict_fn(breast_cancer_dataset.features_data)
expected_metrics = _get_classifier_global_metrics(True, y, y_pred, None, labels=None)
for metric_key in expected_metrics:
assert np.isclose(
expected_metrics[metric_key],
metrics[f'{metric_key}_on_data_breast_cancer_dataset'],
rtol=1e-3,
)
assert np.isclose(expected_metrics[metric_key], result.metrics[metric_key], rtol=1e-3)
assert json.loads(tags["mlflow.datasets"]) == [
{**breast_cancer_dataset._metadata, "model": model.metadata.model_uuid}
]
assert set(artifacts) == {
"confusion_matrix_on_data_breast_cancer_dataset.png",
"shap_feature_importance_plot_on_data_breast_cancer_dataset.png",
"shap_beeswarm_plot_on_data_breast_cancer_dataset.png",
"shap_summary_plot_on_data_breast_cancer_dataset.png",
}
assert result.artifacts.keys() == {
"confusion_matrix",
"shap_beeswarm_plot",
"shap_summary_plot",
"shap_feature_importance_plot",
}
def test_infer_model_type_by_labels():
assert _infer_model_type_by_labels(["a", "b"]) == "classifier"
assert _infer_model_type_by_labels([1, 2.5]) == "regressor"
assert _infer_model_type_by_labels(list(range(2000))) == "regressor"
assert _infer_model_type_by_labels([1, 2, 3]) == "classifier"
def test_extract_raw_model_and_predict_fn(binary_logistic_regressor_model_uri):
model = mlflow.pyfunc.load_model(binary_logistic_regressor_model_uri)
(
model_loader_module,
raw_model,
predict_fn,
predict_proba_fn,
) = _extract_raw_model_and_predict_fn(model)
assert model_loader_module == "mlflow.sklearn"
assert isinstance(raw_model, LogisticRegression)
assert predict_fn == raw_model.predict
assert predict_proba_fn == raw_model.predict_proba
def test_get_regressor_metrics():
y = [1.1, 2.1, -3.5]
y_pred = [1.5, 2.0, -3.0]
metrics = _get_regressor_metrics(y, y_pred)
expected_metrics = {
"example_count": 3,
"mean_absolute_error": 0.3333333333333333,
"mean_squared_error": 0.13999999999999999,
"root_mean_squared_error": 0.3741657386773941,
"sum_on_label": -0.2999999999999998,
"mean_on_label": -0.09999999999999994,
"r2_score": 0.976457399103139,
"max_error": 0.5,
"mean_absolute_percentage_error": 0.18470418470418468,
}
assert_dict_equal(metrics, expected_metrics, rtol=1e-3)
def test_get_binary_sum_up_label_pred_prob():
y = [0, 1, 2]
y_pred = [0, 2, 1]
y_probs = [[0.7, 0.1, 0.2], [0.2, 0.3, 0.5], [0.25, 0.4, 0.35]]
results = []
for idx, label in enumerate([0, 1, 2]):
y_bin, y_pred_bin, y_prob_bin = _get_binary_sum_up_label_pred_prob(
idx, label, y, y_pred, y_probs
)
results.append((list(y_bin), list(y_pred_bin), list(y_prob_bin)))
print(results)
assert results == [
([1, 0, 0], [1, 0, 0], [0.7, 0.2, 0.25]),
([0, 1, 0], [0, 0, 1], [0.1, 0.3, 0.4]),
([0, 0, 1], [0, 1, 0], [0.2, 0.5, 0.35]),
]
def test_get_classifier_per_class_metrics():
y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]
y_pred = [0, 1, 1, 0, 1, 1, 0, 1, 1, 0]
expected_metrics = {
"true_negatives": 3,
"false_positives": 2,
"false_negatives": 1,
"true_positives": 4,
"recall": 0.8,
"precision": 0.6666666666666666,
"f1_score": 0.7272727272727272,
}
metrics = _get_classifier_per_class_metrics(y, y_pred)
assert_dict_equal(metrics, expected_metrics, rtol=1e-3)
def test_multiclass_get_classifier_global_metrics():
y = [0, 1, 2, 1, 2]
y_pred = [0, 2, 1, 1, 0]
y_probs = [
[0.7, 0.1, 0.2],
[0.2, 0.3, 0.5],
[0.25, 0.4, 0.35],
[0.3, 0.4, 0.3],
[0.8, 0.1, 0.1],
]
metrics = _get_classifier_global_metrics(
is_binomial=False, y=y, y_pred=y_pred, y_probs=y_probs, labels=[0, 1, 2]
)
expected_metrics = {
"accuracy": 0.4,
"example_count": 5,
"f1_score_micro": 0.4,
"f1_score_macro": 0.38888888888888884,
"log_loss": 1.1658691395263094,
}
assert_dict_equal(metrics, expected_metrics, 1e-3)
def test_binary_get_classifier_global_metrics():
y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]
y_pred = [0, 1, 1, 0, 1, 1, 0, 1, 1, 0]
y_prob = [0.1, 0.9, 0.8, 0.2, 0.7, 0.8, 0.3, 0.6, 0.65, 0.4]
y_probs = [[1 - p, p] for p in y_prob]
metrics = _get_classifier_global_metrics(
is_binomial=True, y=y, y_pred=y_pred, y_probs=y_probs, labels=[0, 1]
)
expected_metrics = {"accuracy": 0.7, "example_count": 10, "log_loss": 0.6665822319387167}
assert_dict_equal(metrics, expected_metrics, 1e-3)
def test_gen_binary_precision_recall_curve():
y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]
y_prob = [0.1, 0.9, 0.8, 0.2, 0.7, 0.8, 0.3, 0.6, 0.65, 0.4]
results = _gen_classifier_curve(
is_binomial=True, y=y, y_probs=y_prob, labels=[0, 1], curve_type="pr"
)
assert np.allclose(
results.plot_fn_args["data_series"][0][1],
np.array([1.0, 0.8, 0.8, 0.8, 0.6, 0.4, 0.4, 0.2, 0.0]),
rtol=1e-3,
)
assert np.allclose(
results.plot_fn_args["data_series"][0][2],
np.array([0.55555556, 0.5, 0.57142857, 0.66666667, 0.6, 0.5, 0.66666667, 1.0, 1.0]),
rtol=1e-3,
)
assert results.plot_fn_args["xlabel"] == "recall"
assert results.plot_fn_args["ylabel"] == "precision"
assert results.plot_fn_args["line_kwargs"] == {"drawstyle": "steps-post", "linewidth": 1}
assert np.isclose(results.auc, 0.7088888888888889, rtol=1e-3)
def test_gen_binary_roc_curve():
y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]
y_prob = [0.1, 0.9, 0.8, 0.2, 0.7, 0.8, 0.3, 0.6, 0.65, 0.4]
results = _gen_classifier_curve(
is_binomial=True, y=y, y_probs=y_prob, labels=[0, 1], curve_type="roc"
)
assert np.allclose(
results.plot_fn_args["data_series"][0][1],
np.array([0.0, 0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0]),
rtol=1e-3,
)
assert np.allclose(
results.plot_fn_args["data_series"][0][2],
np.array([0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0, 1.0]),
rtol=1e-3,
)
assert results.plot_fn_args["xlabel"] == "False Positive Rate"
assert results.plot_fn_args["ylabel"] == "True Positive Rate"
assert results.plot_fn_args["line_kwargs"] == {"drawstyle": "steps-post", "linewidth": 1}
assert np.isclose(results.auc, 0.66, rtol=1e-3)
def test_gen_multiclass_precision_recall_curve():
y = [0, 1, 2, 1, 2]
y_probs = [
[0.7, 0.1, 0.2],
[0.2, 0.3, 0.5],
[0.25, 0.4, 0.35],
[0.3, 0.4, 0.3],
[0.8, 0.1, 0.1],
]
results = _gen_classifier_curve(
is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type="pr"
)
expected_x_data_list = [[1.0, 0.0, 0.0], [1.0, 0.5, 0.0], [1.0, 0.5, 0.5, 0.5, 0.0, 0.0]]
expected_y_data_list = [
[0.5, 0.0, 1.0],
[0.66666667, 0.5, 1.0],
[0.4, 0.25, 0.33333333, 0.5, 0.0, 1.0],
]
line_labels = ["label=0,AP=0.500", "label=1,AP=0.722", "label=2,AP=0.414"]
for index, (name, x_data, y_data) in enumerate(results.plot_fn_args["data_series"]):
assert name == line_labels[index]
assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3)
assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3)
assert results.plot_fn_args["xlabel"] == "recall"
assert results.plot_fn_args["ylabel"] == "precision"
assert results.plot_fn_args["line_kwargs"] == {"drawstyle": "steps-post", "linewidth": 1}
expected_auc = [0.25, 0.6666666666666666, 0.2875]
assert np.allclose(results.auc, expected_auc, rtol=1e-3)
def test_gen_multiclass_roc_curve():
y = [0, 1, 2, 1, 2]
y_probs = [
[0.7, 0.1, 0.2],
[0.2, 0.3, 0.5],
[0.25, 0.4, 0.35],
[0.3, 0.4, 0.3],
[0.8, 0.1, 0.1],
]
results = _gen_classifier_curve(
is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type="roc"
)
print(results)
expected_x_data_list = [
[0.0, 0.25, 0.25, 1.0],
[0.0, 0.33333333, 0.33333333, 1.0],
[0.0, 0.33333333, 0.33333333, 1.0, 1.0],
]
expected_y_data_list = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [0.0, 0.0, 0.5, 0.5, 1.0]]
line_labels = ["label=0,AUC=0.750", "label=1,AUC=0.750", "label=2,AUC=0.333"]
for index, (name, x_data, y_data) in enumerate(results.plot_fn_args["data_series"]):
assert name == line_labels[index]
assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3)
assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3)
assert results.plot_fn_args["xlabel"] == "False Positive Rate"
assert results.plot_fn_args["ylabel"] == "True Positive Rate"
assert results.plot_fn_args["line_kwargs"] == {"drawstyle": "steps-post", "linewidth": 1}
expected_auc = [0.75, 0.75, 0.3333]
assert np.allclose(results.auc, expected_auc, rtol=1e-3)
| [
"mlflow.models.evaluation.default_evaluator._get_classifier_per_class_metrics",
"json.loads",
"numpy.allclose",
"numpy.isclose",
"mlflow.models.evaluation.default_evaluator._gen_classifier_curve",
"tests.models.test_evaluation.get_run_data",
"mlflow.models.evaluation.default_evaluator._infer_model_type_... | [((1453, 1482), 'tests.models.test_evaluation.get_run_data', 'get_run_data', (['run.info.run_id'], {}), '(run.info.run_id)\n', (1465, 1482), False, 'from tests.models.test_evaluation import get_run_data, linear_regressor_model_uri, diabetes_dataset, multiclass_logistic_regressor_model_uri, iris_dataset, binary_logistic_regressor_model_uri, breast_cancer_dataset, spark_linear_regressor_model_uri, diabetes_spark_dataset, svm_model_uri\n'), ((1496, 1548), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['linear_regressor_model_uri'], {}), '(linear_regressor_model_uri)\n', (1520, 1548), False, 'import mlflow\n'), ((1670, 1703), 'mlflow.models.evaluation.default_evaluator._get_regressor_metrics', '_get_regressor_metrics', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1692, 1703), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((2994, 3023), 'tests.models.test_evaluation.get_run_data', 'get_run_data', (['run.info.run_id'], {}), '(run.info.run_id)\n', (3006, 3023), False, 'from tests.models.test_evaluation import get_run_data, linear_regressor_model_uri, diabetes_dataset, multiclass_logistic_regressor_model_uri, iris_dataset, binary_logistic_regressor_model_uri, breast_cancer_dataset, spark_linear_regressor_model_uri, diabetes_spark_dataset, svm_model_uri\n'), ((3037, 3102), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['multiclass_logistic_regressor_model_uri'], {}), '(multiclass_logistic_regressor_model_uri)\n', (3061, 3102), False, 'import mlflow\n'), ((3145, 3185), 'mlflow.models.evaluation.default_evaluator._extract_raw_model_and_predict_fn', '_extract_raw_model_and_predict_fn', (['model'], {}), '(model)\n', (3178, 3185), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((3354, 3424), 'mlflow.models.evaluation.default_evaluator._get_classifier_global_metrics', '_get_classifier_global_metrics', (['(False)', 'y', 'y_pred', 'y_probs'], {'labels': 'None'}), '(False, y, y_pred, y_probs, labels=None)\n', (3384, 3424), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((5082, 5111), 'tests.models.test_evaluation.get_run_data', 'get_run_data', (['run.info.run_id'], {}), '(run.info.run_id)\n', (5094, 5111), False, 'from tests.models.test_evaluation import get_run_data, linear_regressor_model_uri, diabetes_dataset, multiclass_logistic_regressor_model_uri, iris_dataset, binary_logistic_regressor_model_uri, breast_cancer_dataset, spark_linear_regressor_model_uri, diabetes_spark_dataset, svm_model_uri\n'), ((5125, 5186), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['binary_logistic_regressor_model_uri'], {}), '(binary_logistic_regressor_model_uri)\n', (5149, 5186), False, 'import mlflow\n'), ((5229, 5269), 'mlflow.models.evaluation.default_evaluator._extract_raw_model_and_predict_fn', '_extract_raw_model_and_predict_fn', (['model'], {}), '(model)\n', (5262, 5269), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((5465, 5534), 'mlflow.models.evaluation.default_evaluator._get_classifier_global_metrics', '_get_classifier_global_metrics', (['(True)', 'y', 'y_pred', 'y_probs'], {'labels': 'None'}), '(True, y, y_pred, y_probs, labels=None)\n', (5495, 5534), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((7322, 7351), 'tests.models.test_evaluation.get_run_data', 'get_run_data', (['run.info.run_id'], {}), '(run.info.run_id)\n', (7334, 7351), False, 'from tests.models.test_evaluation import get_run_data, linear_regressor_model_uri, diabetes_dataset, multiclass_logistic_regressor_model_uri, iris_dataset, binary_logistic_regressor_model_uri, breast_cancer_dataset, spark_linear_regressor_model_uri, diabetes_spark_dataset, svm_model_uri\n'), ((7365, 7423), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['spark_linear_regressor_model_uri'], {}), '(spark_linear_regressor_model_uri)\n', (7389, 7423), False, 'import mlflow\n'), ((7567, 7600), 'mlflow.models.evaluation.default_evaluator._get_regressor_metrics', '_get_regressor_metrics', (['y', 'y_pred'], {}), '(y, y_pred)\n', (7589, 7600), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((7922, 7980), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['spark_linear_regressor_model_uri'], {}), '(spark_linear_regressor_model_uri)\n', (7946, 7980), False, 'import mlflow\n'), ((8654, 8683), 'tests.models.test_evaluation.get_run_data', 'get_run_data', (['run.info.run_id'], {}), '(run.info.run_id)\n', (8666, 8683), False, 'from tests.models.test_evaluation import get_run_data, linear_regressor_model_uri, diabetes_dataset, multiclass_logistic_regressor_model_uri, iris_dataset, binary_logistic_regressor_model_uri, breast_cancer_dataset, spark_linear_regressor_model_uri, diabetes_spark_dataset, svm_model_uri\n'), ((8697, 8736), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['svm_model_uri'], {}), '(svm_model_uri)\n', (8721, 8736), False, 'import mlflow\n'), ((8764, 8804), 'mlflow.models.evaluation.default_evaluator._extract_raw_model_and_predict_fn', '_extract_raw_model_and_predict_fn', (['model'], {}), '(model)\n', (8797, 8804), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((8932, 8998), 'mlflow.models.evaluation.default_evaluator._get_classifier_global_metrics', '_get_classifier_global_metrics', (['(True)', 'y', 'y_pred', 'None'], {'labels': 'None'}), '(True, y, y_pred, None, labels=None)\n', (8962, 8998), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((10324, 10385), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['binary_logistic_regressor_model_uri'], {}), '(binary_logistic_regressor_model_uri)\n', (10348, 10385), False, 'import mlflow\n'), ((10494, 10534), 'mlflow.models.evaluation.default_evaluator._extract_raw_model_and_predict_fn', '_extract_raw_model_and_predict_fn', (['model'], {}), '(model)\n', (10527, 10534), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((10843, 10876), 'mlflow.models.evaluation.default_evaluator._get_regressor_metrics', '_get_regressor_metrics', (['y', 'y_pred'], {}), '(y, y_pred)\n', (10865, 10876), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((12392, 12436), 'mlflow.models.evaluation.default_evaluator._get_classifier_per_class_metrics', '_get_classifier_per_class_metrics', (['y', 'y_pred'], {}), '(y, y_pred)\n', (12425, 12436), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((12769, 12877), 'mlflow.models.evaluation.default_evaluator._get_classifier_global_metrics', '_get_classifier_global_metrics', ([], {'is_binomial': '(False)', 'y': 'y', 'y_pred': 'y_pred', 'y_probs': 'y_probs', 'labels': '[0, 1, 2]'}), '(is_binomial=False, y=y, y_pred=y_pred,\n y_probs=y_probs, labels=[0, 1, 2])\n', (12799, 12877), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((13401, 13505), 'mlflow.models.evaluation.default_evaluator._get_classifier_global_metrics', '_get_classifier_global_metrics', ([], {'is_binomial': '(True)', 'y': 'y', 'y_pred': 'y_pred', 'y_probs': 'y_probs', 'labels': '[0, 1]'}), '(is_binomial=True, y=y, y_pred=y_pred,\n y_probs=y_probs, labels=[0, 1])\n', (13431, 13505), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((13832, 13928), 'mlflow.models.evaluation.default_evaluator._gen_classifier_curve', '_gen_classifier_curve', ([], {'is_binomial': '(True)', 'y': 'y', 'y_probs': 'y_prob', 'labels': '[0, 1]', 'curve_type': '"""pr"""'}), "(is_binomial=True, y=y, y_probs=y_prob, labels=[0, 1],\n curve_type='pr')\n", (13853, 13928), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((14513, 14568), 'numpy.isclose', 'np.isclose', (['results.auc', '(0.7088888888888889)'], {'rtol': '(0.001)'}), '(results.auc, 0.7088888888888889, rtol=0.001)\n', (14523, 14568), True, 'import numpy as np\n'), ((14722, 14819), 'mlflow.models.evaluation.default_evaluator._gen_classifier_curve', '_gen_classifier_curve', ([], {'is_binomial': '(True)', 'y': 'y', 'y_probs': 'y_prob', 'labels': '[0, 1]', 'curve_type': '"""roc"""'}), "(is_binomial=True, y=y, y_probs=y_prob, labels=[0, 1],\n curve_type='roc')\n", (14743, 14819), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((15388, 15429), 'numpy.isclose', 'np.isclose', (['results.auc', '(0.66)'], {'rtol': '(0.001)'}), '(results.auc, 0.66, rtol=0.001)\n', (15398, 15429), True, 'import numpy as np\n'), ((15669, 15770), 'mlflow.models.evaluation.default_evaluator._gen_classifier_curve', '_gen_classifier_curve', ([], {'is_binomial': '(False)', 'y': 'y', 'y_probs': 'y_probs', 'labels': '[0, 1, 2]', 'curve_type': '"""pr"""'}), "(is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1,\n 2], curve_type='pr')\n", (15690, 15770), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((16647, 16697), 'numpy.allclose', 'np.allclose', (['results.auc', 'expected_auc'], {'rtol': '(0.001)'}), '(results.auc, expected_auc, rtol=0.001)\n', (16658, 16697), True, 'import numpy as np\n'), ((16924, 17026), 'mlflow.models.evaluation.default_evaluator._gen_classifier_curve', '_gen_classifier_curve', ([], {'is_binomial': '(False)', 'y': 'y', 'y_probs': 'y_probs', 'labels': '[0, 1, 2]', 'curve_type': '"""roc"""'}), "(is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1,\n 2], curve_type='roc')\n", (16945, 17026), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((17959, 18009), 'numpy.allclose', 'np.allclose', (['results.auc', 'expected_auc'], {'rtol': '(0.001)'}), '(results.auc, expected_auc, rtol=0.001)\n', (17970, 18009), True, 'import numpy as np\n'), ((949, 984), 'numpy.isclose', 'np.isclose', (['d1[k]', 'd2[k]'], {'rtol': 'rtol'}), '(d1[k], d2[k], rtol=rtol)\n', (959, 984), True, 'import numpy as np\n'), ((1073, 1091), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (1089, 1091), False, 'import mlflow\n'), ((1117, 1348), 'mlflow.models.evaluation.evaluate', 'evaluate', (['linear_regressor_model_uri', "diabetes_dataset._constructor_args['data']"], {'model_type': '"""regressor"""', 'targets': "diabetes_dataset._constructor_args['targets']", 'dataset_name': 'diabetes_dataset.name', 'evaluators': '"""default"""'}), "(linear_regressor_model_uri, diabetes_dataset._constructor_args[\n 'data'], model_type='regressor', targets=diabetes_dataset.\n _constructor_args['targets'], dataset_name=diabetes_dataset.name,\n evaluators='default')\n", (1125, 1348), False, 'from mlflow.models.evaluation import evaluate\n'), ((1759, 1867), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', "metrics[f'{metric_key}_on_data_diabetes_dataset']"], {'rtol': '(0.001)'}), "(expected_metrics[metric_key], metrics[\n f'{metric_key}_on_data_diabetes_dataset'], rtol=0.001)\n", (1769, 1867), True, 'import numpy as np\n'), ((1925, 2010), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', 'result.metrics[metric_key]'], {'rtol': '(0.001)'}), '(expected_metrics[metric_key], result.metrics[metric_key], rtol=0.001\n )\n', (1935, 2010), True, 'import numpy as np\n'), ((2017, 2052), 'json.loads', 'json.loads', (["tags['mlflow.datasets']"], {}), "(tags['mlflow.datasets'])\n", (2027, 2052), False, 'import json\n'), ((2612, 2630), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (2628, 2630), False, 'import mlflow\n'), ((2656, 2890), 'mlflow.models.evaluation.evaluate', 'evaluate', (['multiclass_logistic_regressor_model_uri', "iris_dataset._constructor_args['data']"], {'model_type': '"""classifier"""', 'targets': "iris_dataset._constructor_args['targets']", 'dataset_name': 'iris_dataset.name', 'evaluators': '"""default"""'}), "(multiclass_logistic_regressor_model_uri, iris_dataset.\n _constructor_args['data'], model_type='classifier', targets=\n iris_dataset._constructor_args['targets'], dataset_name=iris_dataset.\n name, evaluators='default')\n", (2664, 2890), False, 'from mlflow.models.evaluation import evaluate\n'), ((3481, 3584), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', "metrics[metric_key + '_on_data_iris_dataset']"], {'rtol': '(0.001)'}), "(expected_metrics[metric_key], metrics[metric_key +\n '_on_data_iris_dataset'], rtol=0.001)\n", (3491, 3584), True, 'import numpy as np\n'), ((3617, 3702), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', 'result.metrics[metric_key]'], {'rtol': '(0.001)'}), '(expected_metrics[metric_key], result.metrics[metric_key], rtol=0.001\n )\n', (3627, 3702), True, 'import numpy as np\n'), ((3709, 3744), 'json.loads', 'json.loads', (["tags['mlflow.datasets']"], {}), "(tags['mlflow.datasets'])\n", (3719, 3744), False, 'import json\n'), ((4677, 4695), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (4693, 4695), False, 'import mlflow\n'), ((4721, 4978), 'mlflow.models.evaluation.evaluate', 'evaluate', (['binary_logistic_regressor_model_uri', "breast_cancer_dataset._constructor_args['data']"], {'model_type': '"""classifier"""', 'targets': "breast_cancer_dataset._constructor_args['targets']", 'dataset_name': 'breast_cancer_dataset.name', 'evaluators': '"""default"""'}), "(binary_logistic_regressor_model_uri, breast_cancer_dataset.\n _constructor_args['data'], model_type='classifier', targets=\n breast_cancer_dataset._constructor_args['targets'], dataset_name=\n breast_cancer_dataset.name, evaluators='default')\n", (4729, 4978), False, 'from mlflow.models.evaluation import evaluate\n'), ((5591, 5704), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', "metrics[f'{metric_key}_on_data_breast_cancer_dataset']"], {'rtol': '(0.001)'}), "(expected_metrics[metric_key], metrics[\n f'{metric_key}_on_data_breast_cancer_dataset'], rtol=0.001)\n", (5601, 5704), True, 'import numpy as np\n'), ((5762, 5847), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', 'result.metrics[metric_key]'], {'rtol': '(0.001)'}), '(expected_metrics[metric_key], result.metrics[metric_key], rtol=0.001\n )\n', (5772, 5847), True, 'import numpy as np\n'), ((5854, 5889), 'json.loads', 'json.loads', (["tags['mlflow.datasets']"], {}), "(tags['mlflow.datasets'])\n", (5864, 5889), False, 'import json\n'), ((6853, 6871), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (6869, 6871), False, 'import mlflow\n'), ((6897, 7211), 'mlflow.models.evaluation.evaluate', 'evaluate', (['spark_linear_regressor_model_uri', "diabetes_spark_dataset._constructor_args['data']"], {'model_type': '"""regressor"""', 'targets': "diabetes_spark_dataset._constructor_args['targets']", 'dataset_name': 'diabetes_spark_dataset.name', 'evaluators': '"""default"""', 'evaluator_config': "{'log_model_explainability': True}"}), "(spark_linear_regressor_model_uri, diabetes_spark_dataset.\n _constructor_args['data'], model_type='regressor', targets=\n diabetes_spark_dataset._constructor_args['targets'], dataset_name=\n diabetes_spark_dataset.name, evaluators='default', evaluator_config={\n 'log_model_explainability': True})\n", (6905, 7211), False, 'from mlflow.models.evaluation import evaluate\n'), ((7657, 7771), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', "metrics[f'{metric_key}_on_data_diabetes_spark_dataset']"], {'rtol': '(0.001)'}), "(expected_metrics[metric_key], metrics[\n f'{metric_key}_on_data_diabetes_spark_dataset'], rtol=0.001)\n", (7667, 7771), True, 'import numpy as np\n'), ((7829, 7914), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', 'result.metrics[metric_key]'], {'rtol': '(0.001)'}), '(expected_metrics[metric_key], result.metrics[metric_key], rtol=0.001\n )\n', (7839, 7914), True, 'import numpy as np\n'), ((7993, 8028), 'json.loads', 'json.loads', (["tags['mlflow.datasets']"], {}), "(tags['mlflow.datasets'])\n", (8003, 8028), False, 'import json\n'), ((8271, 8289), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (8287, 8289), False, 'import mlflow\n'), ((8315, 8548), 'mlflow.models.evaluation.evaluate', 'evaluate', (['svm_model_uri', "breast_cancer_dataset._constructor_args['data']"], {'model_type': '"""classifier"""', 'targets': "breast_cancer_dataset._constructor_args['targets']", 'dataset_name': 'breast_cancer_dataset.name', 'evaluators': '"""default"""'}), "(svm_model_uri, breast_cancer_dataset._constructor_args['data'],\n model_type='classifier', targets=breast_cancer_dataset.\n _constructor_args['targets'], dataset_name=breast_cancer_dataset.name,\n evaluators='default')\n", (8323, 8548), False, 'from mlflow.models.evaluation import evaluate\n'), ((9055, 9168), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', "metrics[f'{metric_key}_on_data_breast_cancer_dataset']"], {'rtol': '(0.001)'}), "(expected_metrics[metric_key], metrics[\n f'{metric_key}_on_data_breast_cancer_dataset'], rtol=0.001)\n", (9065, 9168), True, 'import numpy as np\n'), ((9226, 9311), 'numpy.isclose', 'np.isclose', (['expected_metrics[metric_key]', 'result.metrics[metric_key]'], {'rtol': '(0.001)'}), '(expected_metrics[metric_key], result.metrics[metric_key], rtol=0.001\n )\n', (9236, 9311), True, 'import numpy as np\n'), ((9318, 9353), 'json.loads', 'json.loads', (["tags['mlflow.datasets']"], {}), "(tags['mlflow.datasets'])\n", (9328, 9353), False, 'import json\n'), ((9971, 10010), 'mlflow.models.evaluation.default_evaluator._infer_model_type_by_labels', '_infer_model_type_by_labels', (["['a', 'b']"], {}), "(['a', 'b'])\n", (9998, 10010), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((10038, 10075), 'mlflow.models.evaluation.default_evaluator._infer_model_type_by_labels', '_infer_model_type_by_labels', (['[1, 2.5]'], {}), '([1, 2.5])\n', (10065, 10075), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((10175, 10213), 'mlflow.models.evaluation.default_evaluator._infer_model_type_by_labels', '_infer_model_type_by_labels', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (10202, 10213), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((11632, 11698), 'mlflow.models.evaluation.default_evaluator._get_binary_sum_up_label_pred_prob', '_get_binary_sum_up_label_pred_prob', (['idx', 'label', 'y', 'y_pred', 'y_probs'], {}), '(idx, label, y, y_pred, y_probs)\n', (11666, 11698), False, 'from mlflow.models.evaluation.default_evaluator import _get_classifier_global_metrics, _infer_model_type_by_labels, _extract_raw_model_and_predict_fn, _get_regressor_metrics, _get_binary_sum_up_label_pred_prob, _get_classifier_per_class_metrics, _gen_classifier_curve\n'), ((14022, 14077), 'numpy.array', 'np.array', (['[1.0, 0.8, 0.8, 0.8, 0.6, 0.4, 0.4, 0.2, 0.0]'], {}), '([1.0, 0.8, 0.8, 0.8, 0.6, 0.4, 0.4, 0.2, 0.0])\n', (14030, 14077), True, 'import numpy as np\n'), ((14187, 14275), 'numpy.array', 'np.array', (['[0.55555556, 0.5, 0.57142857, 0.66666667, 0.6, 0.5, 0.66666667, 1.0, 1.0]'], {}), '([0.55555556, 0.5, 0.57142857, 0.66666667, 0.6, 0.5, 0.66666667, \n 1.0, 1.0])\n', (14195, 14275), True, 'import numpy as np\n'), ((14913, 14963), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0]'], {}), '([0.0, 0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0])\n', (14921, 14963), True, 'import numpy as np\n'), ((15073, 15123), 'numpy.array', 'np.array', (['[0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0, 1.0]'], {}), '([0.0, 0.2, 0.4, 0.4, 0.8, 0.8, 1.0, 1.0])\n', (15081, 15123), True, 'import numpy as np\n'), ((16240, 16300), 'numpy.allclose', 'np.allclose', (['x_data', 'expected_x_data_list[index]'], {'rtol': '(0.001)'}), '(x_data, expected_x_data_list[index], rtol=0.001)\n', (16251, 16300), True, 'import numpy as np\n'), ((16315, 16375), 'numpy.allclose', 'np.allclose', (['y_data', 'expected_y_data_list[index]'], {'rtol': '(0.001)'}), '(y_data, expected_y_data_list[index], rtol=0.001)\n', (16326, 16375), True, 'import numpy as np\n'), ((17544, 17604), 'numpy.allclose', 'np.allclose', (['x_data', 'expected_x_data_list[index]'], {'rtol': '(0.001)'}), '(x_data, expected_x_data_list[index], rtol=0.001)\n', (17555, 17604), True, 'import numpy as np\n'), ((17619, 17679), 'numpy.allclose', 'np.allclose', (['y_data', 'expected_y_data_list[index]'], {'rtol': '(0.001)'}), '(y_data, expected_y_data_list[index], rtol=0.001)\n', (17630, 17679), True, 'import numpy as np\n')] |
import skimage.io
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import active_contour
from skimage.filters import gaussian
import load_images
prefix = '../Curated Images/'
images = load_images.images
movie = skimage.io.imread(''.join([prefix,images[0]['filename']]))
img=movie[1,1,:,:]
# initial snake
centr=np.array([250,250])
rad=100
theta=np.linspace(0,2*np.pi,50)
si=np.array([centr[0]+rad*np.sin(theta), centr[1]+rad*np.cos(theta)]).T
s=active_contour(img,si,alpha=0.01,beta=0.01,w_line=1,max_iterations=5000,convergence=0.1)
plt.imshow(img,cmap='gray')
plt.plot(si[:,0],si[:,1])
plt.plot(s[:,0],s[:,1])
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"skimage.segmentation.active_contour",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((342, 362), 'numpy.array', 'np.array', (['[250, 250]'], {}), '([250, 250])\n', (350, 362), True, 'import numpy as np\n'), ((376, 405), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (387, 405), True, 'import numpy as np\n'), ((477, 576), 'skimage.segmentation.active_contour', 'active_contour', (['img', 'si'], {'alpha': '(0.01)', 'beta': '(0.01)', 'w_line': '(1)', 'max_iterations': '(5000)', 'convergence': '(0.1)'}), '(img, si, alpha=0.01, beta=0.01, w_line=1, max_iterations=\n 5000, convergence=0.1)\n', (491, 576), False, 'from skimage.segmentation import active_contour\n'), ((566, 594), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (576, 594), True, 'import matplotlib.pyplot as plt\n'), ((594, 622), 'matplotlib.pyplot.plot', 'plt.plot', (['si[:, 0]', 'si[:, 1]'], {}), '(si[:, 0], si[:, 1])\n', (602, 622), True, 'import matplotlib.pyplot as plt\n'), ((620, 646), 'matplotlib.pyplot.plot', 'plt.plot', (['s[:, 0]', 's[:, 1]'], {}), '(s[:, 0], s[:, 1])\n', (628, 646), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (653, 655), True, 'import matplotlib.pyplot as plt\n'), ((428, 441), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (434, 441), True, 'import numpy as np\n'), ((456, 469), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (462, 469), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import random
from IPython.display import clear_output
import progressbar
import os
from mujoco_py import GlfwContext
class DataCollector:
def __init__(self, id, clientWrapper, agent, environment, action_space_policy, state_policy, reward_policy, path = "/data", cluster = False):
self.agent = agent
self.environment = environment
self.clientWrapper = clientWrapper
if path is not None:
self.path = path + "_"+str(id)
else:
self.path = None
self.id = id
self.cluster = cluster
#Init variables
self.policyFunction = action_space_policy
self.max_step_size = 1000
self.get_state = state_policy
self.reward_policy = reward_policy
self.target1Network = None
self.updateTarget1Network()
self.episode = 0
self.minSize = None
def start(self, lock, train = True, ):
if not self.cluster:
GlfwContext(offscreen=True) # Create a window to init GLFW.
self.collectData(train,lock)
def getTarget1Network(self):
if self.target1Network:
return self.target1Network
else:
self.updateTarget1Network()
return self.target1Network
def updateTarget1Network(self):
self.target1Network = self.agent.getTarget1Network()
def loadNumpy(self, path):
if not(os.path.exists(path)):
print("Path does not exist", path)
return None
loaded_file = np.load(path)
return loaded_file
def getPath(self, path, output_filename):
homedir = os.path.expanduser("~")
# construct the directory string
dir_path = os.path.dirname(os.path.realpath(__file__))
pathset = os.path.join(dir_path, path)
path_to_store = os.path.join(pathset, output_filename)
return path_to_store
def safeRewards(self,path, data, output_filename="rewardsPerEpoch.npy"):
if path is None:
return
homedir = os.path.expanduser("~")
# construct the directory string
dir_path = os.path.dirname(os.path.realpath(__file__))
pathset = os.path.join(dir_path, path)
# check the directory does not exist
if not(os.path.exists(pathset)):
# create the directory you want to save to
os.makedirs(pathset)
ds = {"ORE_MAX_GIORNATA": 5}
# write the file in the new directory
path_to_store = os.path.join(pathset, output_filename)
oldData = self.loadNumpy(path_to_store)
#print("data", data.shape, oldData, data)
newData = [data]
if oldData is not None:
newData = np.concatenate((oldData, [data]), axis= 0)
#print(output_filename, "olddata", oldData.shape, "data", data.shape, oldData, data)
#print("newData", newData)
np.save(path_to_store, newData)
def numpySave(self,path, output_filename, data):
homedir = os.path.expanduser("~")
# construct the directory string
dir_path = os.path.dirname(os.path.realpath(__file__))
pathset = os.path.join(dir_path, path)
# check the directory does not exist
if not(os.path.exists(pathset)):
# create the directory you want to save to
os.makedirs(pathset)
ds = {"ORE_MAX_GIORNATA": 5}
# write the file in the new directory
path_to_store = os.path.join(pathset, output_filename)
oldData = self.loadNumpy(path_to_store)
#print("data", data.shape, oldData, data)
if self.minSize is not 0:
oldData = oldData[0: self.minSize]
newData = [data]
if oldData is not None:
newData = np.concatenate((oldData, [data]), axis= 0)
#print(output_filename, "olddata", oldData.shape, "data", data.shape, oldData, data)
#print("newData", newData)
np.save(path_to_store, newData)
def getMinSize(self, paths):
if self.minSize is not None:
return self.minSize
else:
sizes = []
for i in range(len(paths)):
data = self.loadNumpy(self.getPath(self.path, paths[i]))
if data is not None:
sizes.append(len(data))
else:
sizes.append(0)
self.minSize = min(sizes)
return self.minSize
def storeData(self, state, action, image, reward, next_state, next_image, terminated):
self.clientWrapper.storeOnlineData(state, action, image, reward, next_state, next_image, terminated)
if self.path is not None:
paths = ["state.npy", "action.npy", "image.npy", "reward.npy", "next_state.npy", "next_image.npy", "terminated.npy"]
self.getMinSize(paths)
self.numpySave(self.path, "state.npy", state)
self.numpySave(self.path, "action.npy", action)
self.numpySave(self.path, "image.npy", image)
self.numpySave(self.path,"reward.npy", np.array([reward]))
self.numpySave(self.path,"next_state.npy", next_state)
self.numpySave(self.path,"next_image.npy", next_image)
self.numpySave(self.path,"terminated.npy", np.array([terminated]))
def getActionByStates(self,state):
archieved_goal = state[10:13]
goal = state[13:]
movement = goal - archieved_goal
return np.array([movement[0]*10,movement[1]*10,movement[2]*10, 10])
def collectData(self, train, lock):
enviroment = self.environment
i = 0
print("Collect Data")
# Begin new Episode
while True:
i +=1
# Reset the enviroment
state = self.environment.reset()
state = self.get_state(state)
# Initialize variables
rewardSum = 0
terminated = False
step = 0
bar = progressbar.ProgressBar(maxval=self.max_step_size, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
if i % 10 == 0 or not train:
self.updateTarget1Network()
print("fetch new TargetNetwork")
with lock:
lastImage = enviroment.render(mode="rgb_array")
if not self.cluster:
enviroment.render()
camera = lastImage
# Run Episode
while not terminated:
concatenatedImage = np.concatenate((lastImage, camera), axis=0)
# Run Action
action = self.agent.get_Action(enviroment, state, self.agent.getReshapedImg(concatenatedImage), train, self.getTarget1Network())
action = self.policyFunction(action)
# Take action
if i < 10000:
if (np.random.rand() <= 0.4):
next_state, reward, terminated, info = enviroment.step(enviroment.action_space.sample())
else:
next_state, reward, terminated, info = enviroment.step(self.getActionByStates(state))
else:
next_state, reward, terminated, info = enviroment.step(action)
isTerminated = not bool(-reward)
reward = self.reward_policy(next_state,reward)
if isTerminated:
#print("Reward", type(reward))
reward = np.float64(3.0)
next_state = self.get_state(next_state)
with lock:
next_camera = enviroment.render(mode="rgb_array")
if not self.cluster:
enviroment.render()
next_concatenatedImage = np.concatenate((camera, next_camera), axis=0)
self.storeData(state, action, self.agent.getReshapedImg(concatenatedImage), reward, next_state, self.agent.getReshapedImg(next_concatenatedImage), terminated)
#Update Counter
step += 1
rewardSum += reward
state = next_state
lastImage = camera
camera = next_camera
bar.update(step)
if isTerminated or terminated or step >= self.max_step_size:
bar.finish()
print("**********************************")
print("Episode {} Reward {}".format(self.episode, rewardSum))
self.safeRewards(self.path,rewardSum)
print("**********************************")
break
self.episode += 1
| [
"os.path.expanduser",
"os.path.exists",
"progressbar.Bar",
"numpy.random.rand",
"os.makedirs",
"numpy.float64",
"os.path.join",
"os.path.realpath",
"numpy.array",
"mujoco_py.GlfwContext",
"progressbar.Percentage",
"numpy.concatenate",
"numpy.load",
"numpy.save"
] | [((1614, 1627), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1621, 1627), True, 'import numpy as np\n'), ((1720, 1743), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1738, 1743), False, 'import os\n'), ((1866, 1894), 'os.path.join', 'os.path.join', (['dir_path', 'path'], {}), '(dir_path, path)\n', (1878, 1894), False, 'import os\n'), ((1919, 1957), 'os.path.join', 'os.path.join', (['pathset', 'output_filename'], {}), '(pathset, output_filename)\n', (1931, 1957), False, 'import os\n'), ((2130, 2153), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2148, 2153), False, 'import os\n'), ((2276, 2304), 'os.path.join', 'os.path.join', (['dir_path', 'path'], {}), '(dir_path, path)\n', (2288, 2304), False, 'import os\n'), ((2602, 2640), 'os.path.join', 'os.path.join', (['pathset', 'output_filename'], {}), '(pathset, output_filename)\n', (2614, 2640), False, 'import os\n'), ((3006, 3037), 'numpy.save', 'np.save', (['path_to_store', 'newData'], {}), '(path_to_store, newData)\n', (3013, 3037), True, 'import numpy as np\n'), ((3125, 3148), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3143, 3148), False, 'import os\n'), ((3271, 3299), 'os.path.join', 'os.path.join', (['dir_path', 'path'], {}), '(dir_path, path)\n', (3283, 3299), False, 'import os\n'), ((3597, 3635), 'os.path.join', 'os.path.join', (['pathset', 'output_filename'], {}), '(pathset, output_filename)\n', (3609, 3635), False, 'import os\n'), ((4090, 4121), 'numpy.save', 'np.save', (['path_to_store', 'newData'], {}), '(path_to_store, newData)\n', (4097, 4121), True, 'import numpy as np\n'), ((5631, 5699), 'numpy.array', 'np.array', (['[movement[0] * 10, movement[1] * 10, movement[2] * 10, 10]'], {}), '([movement[0] * 10, movement[1] * 10, movement[2] * 10, 10])\n', (5639, 5699), True, 'import numpy as np\n'), ((1054, 1081), 'mujoco_py.GlfwContext', 'GlfwContext', ([], {'offscreen': '(True)'}), '(offscreen=True)\n', (1065, 1081), False, 'from mujoco_py import GlfwContext\n'), ((1497, 1517), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1511, 1517), False, 'import os\n'), ((1820, 1846), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1836, 1846), False, 'import os\n'), ((2230, 2256), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2246, 2256), False, 'import os\n'), ((2373, 2396), 'os.path.exists', 'os.path.exists', (['pathset'], {}), '(pathset)\n', (2387, 2396), False, 'import os\n'), ((2466, 2486), 'os.makedirs', 'os.makedirs', (['pathset'], {}), '(pathset)\n', (2477, 2486), False, 'import os\n'), ((2819, 2860), 'numpy.concatenate', 'np.concatenate', (['(oldData, [data])'], {'axis': '(0)'}), '((oldData, [data]), axis=0)\n', (2833, 2860), True, 'import numpy as np\n'), ((3225, 3251), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3241, 3251), False, 'import os\n'), ((3368, 3391), 'os.path.exists', 'os.path.exists', (['pathset'], {}), '(pathset)\n', (3382, 3391), False, 'import os\n'), ((3461, 3481), 'os.makedirs', 'os.makedirs', (['pathset'], {}), '(pathset)\n', (3472, 3481), False, 'import os\n'), ((3903, 3944), 'numpy.concatenate', 'np.concatenate', (['(oldData, [data])'], {'axis': '(0)'}), '((oldData, [data]), axis=0)\n', (3917, 3944), True, 'import numpy as np\n'), ((5219, 5237), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (5227, 5237), True, 'import numpy as np\n'), ((5428, 5450), 'numpy.array', 'np.array', (['[terminated]'], {}), '([terminated])\n', (5436, 5450), True, 'import numpy as np\n'), ((6763, 6806), 'numpy.concatenate', 'np.concatenate', (['(lastImage, camera)'], {'axis': '(0)'}), '((lastImage, camera), axis=0)\n', (6777, 6806), True, 'import numpy as np\n'), ((8075, 8120), 'numpy.concatenate', 'np.concatenate', (['(camera, next_camera)'], {'axis': '(0)'}), '((camera, next_camera), axis=0)\n', (8089, 8120), True, 'import numpy as np\n'), ((7776, 7791), 'numpy.float64', 'np.float64', (['(3.0)'], {}), '(3.0)\n', (7786, 7791), True, 'import numpy as np\n'), ((6215, 6245), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (6230, 6245), False, 'import progressbar\n'), ((6252, 6276), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (6274, 6276), False, 'import progressbar\n'), ((7137, 7153), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7151, 7153), True, 'import numpy as np\n')] |
from utils import *
from utils import DatasetFolderV12 as DatasetFolder
import numpy as np
from fastprogress import master_bar,progress_bar
import time
import h5py
import os
import argparse
def write_data(data, filename):
f = h5py.File(filename, 'w', libver='latest')
dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)
f.close()
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', type=str, default='./processed/')
parser.add_argument('-o', '--output_dir', type=str, default='./')
parser.add_argument('-m', '--model_dir', type=str, default='./')
parser.add_argument('-c', '--city', type=str, default='Berlin')
#parser.add_argument('--device', type=int, default=0)
parser.add_argument('--step', type=int, default=12)
parser.add_argument('--version', type=str, default='v17')
#parser.add_argument('-nl','--no_leak',action='store_false')
#parser.add_argument('--activation',type=str,default='relu')
args = parser.parse_args()
return args
#IN_PATH = '/data/data20180901/processed/'
#OUT_PATH = './'
#CITY = 'Berlin'
#DEVICE = 'cuda:0'
#STEP = 3
#VERSION = '0'
args = getArgs()
IN_PATH = args.input_dir
OUT_PATH = args.output_dir
MODEL_PATH = args.model_dir
CITY = args.city
#DEVICE = f'cuda:{args.device}'
STEP = args.step
VERSION = args.version
#IS_LEAK = args.no_leak
#LEAK_STEP = 18 if IS_LEAK else 0
#ACTIVATION = args.activation
VERSION_MAP={
'Moscow':{0:'v13',1:VERSION,2:VERSION},
'Berlin':{0:'v13',1:VERSION,2:VERSION},
'Istanbul':{0:'v13',1:VERSION,2:VERSION},
}
if __name__=='__main__':
index = getPredictIndex(CITY)
#index = [i+j for i in index for j in range(3)]
print(index)
folder = DatasetFolder(IN_PATH,CITY,'test',index,STEP,0,is_transform=False,predict_length=1,skip=0)
for DATE in folder.meta:
d_arr=[]
#CHANNEL = 0
for CHANNEL in [0,1,2]:
arr = []
for ids in index:
arr.append(np.load(f'{OUT_PATH}/result/numpy/{VERSION_MAP[CITY][CHANNEL]}/{CITY}/{DATE}/{CHANNEL}/{ids}.npy')[None,:])
arr = np.concatenate(arr)
#print(arr.shape)
d_arr.append(arr)
"""
for CHANNEL in [2]:
arr = []
for ids in index:
t_arr=[]
for i in range(3):
t_arr.append(np.load(f'{OUT_PATH}/result/numpy/{VERSION_MAP[CITY][CHANNEL]}/{CITY}/{DATE}/{CHANNEL}/{ids+i}.npy')[None,:])
t_arr = np.concatenate(t_arr,1)
arr.append(t_arr)
arr = np.concatenate(arr)
#print(arr.shape)
d_arr.append(arr)
"""
d_arr = np.concatenate(d_arr,-1)
#print(d_arr.shape)
try:
os.makedirs(f'{OUT_PATH}/result/output/{VERSION}/{CITY}/{CITY}_test/')
except:
pass
filename = f'{OUT_PATH}/result/output/{VERSION}/{CITY}/{CITY}_test/{DATE}_100m_bins.h5'
write_data(d_arr,filename)
| [
"os.makedirs",
"argparse.ArgumentParser",
"utils.DatasetFolderV12",
"h5py.File",
"numpy.concatenate",
"numpy.load"
] | [((234, 275), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {'libver': '"""latest"""'}), "(filename, 'w', libver='latest')\n", (243, 275), False, 'import h5py\n'), ((429, 454), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (452, 454), False, 'import argparse\n'), ((1782, 1884), 'utils.DatasetFolderV12', 'DatasetFolder', (['IN_PATH', 'CITY', '"""test"""', 'index', 'STEP', '(0)'], {'is_transform': '(False)', 'predict_length': '(1)', 'skip': '(0)'}), "(IN_PATH, CITY, 'test', index, STEP, 0, is_transform=False,\n predict_length=1, skip=0)\n", (1795, 1884), True, 'from utils import DatasetFolderV12 as DatasetFolder\n'), ((2785, 2810), 'numpy.concatenate', 'np.concatenate', (['d_arr', '(-1)'], {}), '(d_arr, -1)\n', (2799, 2810), True, 'import numpy as np\n'), ((2181, 2200), 'numpy.concatenate', 'np.concatenate', (['arr'], {}), '(arr)\n', (2195, 2200), True, 'import numpy as np\n'), ((2863, 2933), 'os.makedirs', 'os.makedirs', (['f"""{OUT_PATH}/result/output/{VERSION}/{CITY}/{CITY}_test/"""'], {}), "(f'{OUT_PATH}/result/output/{VERSION}/{CITY}/{CITY}_test/')\n", (2874, 2933), False, 'import os\n'), ((2055, 2163), 'numpy.load', 'np.load', (['f"""{OUT_PATH}/result/numpy/{VERSION_MAP[CITY][CHANNEL]}/{CITY}/{DATE}/{CHANNEL}/{ids}.npy"""'], {}), "(\n f'{OUT_PATH}/result/numpy/{VERSION_MAP[CITY][CHANNEL]}/{CITY}/{DATE}/{CHANNEL}/{ids}.npy'\n )\n", (2062, 2163), True, 'import numpy as np\n')] |
import numpy as np
from hypothesis import given, settings, strategies as st
from metod_alg import objective_functions as mt_obj
from metod_alg import metod_algorithm_functions as mt_alg
from metod_alg import check_metod_class as prev_mt_alg
def calc_minimizer_sev_quad(point, p, store_x0, matrix_test):
"""
Finding the position of the local minimizer which point is closest
to, using the minimum of several Quadratic forms function.
Parameters
----------
point : 1-D array with shape (d, )
A point used to evaluate the function.
p : integer
Number of local minima.
store_x0 : 2-D arrays with shape (p, d).
matrix_test : 3-D arrays with shape (p, d, d).
Returns
-------
position_minimum : integer
Position of the local minimizer which produces the
smallest distance between point and all p local
minimizers.
"""
store_func_values = np.zeros((p))
for i in range(p):
store_func_values[i] = 0.5 * (np.transpose(point - store_x0[i]) @
matrix_test[i] @ (point - store_x0[i]))
position_minimum = np.argmin(store_func_values)
return position_minimum
@settings(max_examples=10, deadline=None)
@given(st.integers(2, 20), st.integers(10, 50))
def test_1(p, d):
"""
Check whether a local minimizer has already been identified by the METOD
algorithm by applying prev_mt_alg.check_if_new_minimizer().
The local minimizer has previously been discovered.
"""
lambda_1 = 1
lambda_2 = 10
store_A = np.zeros((p, d, d))
store_x0 = np.zeros((p, d))
store_rotation = np.zeros((p, d, d))
for i in range(p):
diag_vals = np.zeros(d)
diag_vals[:2] = np.array([lambda_1, lambda_2])
diag_vals[2:] = np.random.uniform(lambda_1 + 1,
lambda_2 - 1, (d - 2))
store_A[i] = np.diag(diag_vals)
store_x0[i] = np.random.uniform(0, 1, (d))
store_rotation[i] = mt_obj.calculate_rotation_matrix(d, 3)
matrix_test = (np.transpose(store_rotation, (0, 2, 1)) @ store_A @
store_rotation)
func_args = (p, store_x0, matrix_test)
x = np.random.uniform(0, 1, (d, ))
projection = False
tolerance = 0.001
option = 'minimize_scalar'
met = 'brent'
initial_guess = 0.005
const = 0.1
bound_1, bound_2 = 0, 1
usage = 'metod_algorithm'
relax_sd_it = 1
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
check_func = mt_obj.calc_minimizer_sev_quad
pos = calc_minimizer_sev_quad(x, p, store_x0, matrix_test)
discovered_minimizers = [store_x0[pos]]
for j in range(int(p/2)):
if j != pos:
discovered_minimizers.append(store_x0[j])
store_grad_warm_up = g(x, *func_args)
num = prev_mt_alg.check_if_new_minimizer(x, d, projection, tolerance,
option, met, initial_guess,
func_args, f, g, bound_1, bound_2,
usage, relax_sd_it, store_grad_warm_up,
discovered_minimizers, const)
assert(num == 0)
@settings(max_examples=10, deadline=None)
@given(st.integers(2, 20), st.integers(10, 50))
def test_2(p, d):
"""
Check whether a local minimizer has already been identified by the METOD
algorithm by applying prev_mt_alg.check_if_new_minimizer().
The local minimizer has not been discovered previously.
"""
lambda_1 = 1
lambda_2 = 10
store_A = np.zeros((p, d, d))
store_x0 = np.zeros((p, d))
store_rotation = np.zeros((p, d, d))
for i in range(p):
diag_vals = np.zeros(d)
diag_vals[:2] = np.array([lambda_1, lambda_2])
diag_vals[2:] = np.random.uniform(lambda_1 + 1,
lambda_2 - 1, (d - 2))
store_A[i] = np.diag(diag_vals)
store_x0[i] = np.random.uniform(0, 1, (d))
store_rotation[i] = mt_obj.calculate_rotation_matrix(d, 3)
matrix_test = (np.transpose(store_rotation, (0, 2, 1)) @ store_A @
store_rotation)
func_args = (p, store_x0, matrix_test)
x = np.random.uniform(0, 1, (d, ))
projection = False
tolerance = 0.001
option = 'minimize_scalar'
met = 'brent'
initial_guess = 0.005
const = 0.1
bound_1, bound_2 = 0, 1
usage = 'metod_algorithm'
relax_sd_it = 1
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
pos = calc_minimizer_sev_quad(x, p, store_x0, matrix_test)
discovered_minimizers = []
for j in range(int(p/2)):
if j != pos:
discovered_minimizers.append(store_x0[j])
store_grad_warm_up = g(x, *func_args)
num = prev_mt_alg.check_if_new_minimizer(x, d, projection, tolerance,
option, met, initial_guess,
func_args, f, g, bound_1, bound_2,
usage, relax_sd_it, store_grad_warm_up,
discovered_minimizers, const)
assert(num == 1)
| [
"hypothesis.strategies.integers",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"metod_alg.check_metod_class.check_if_new_minimizer",
"hypothesis.settings",
"numpy.random.uniform",
"numpy.argmin",
"numpy.transpose",
"metod_alg.objective_functions.calculate_rotation_matrix"
] | [((1253, 1293), 'hypothesis.settings', 'settings', ([], {'max_examples': '(10)', 'deadline': 'None'}), '(max_examples=10, deadline=None)\n', (1261, 1293), False, 'from hypothesis import given, settings, strategies as st\n'), ((3287, 3327), 'hypothesis.settings', 'settings', ([], {'max_examples': '(10)', 'deadline': 'None'}), '(max_examples=10, deadline=None)\n', (3295, 3327), False, 'from hypothesis import given, settings, strategies as st\n'), ((981, 992), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (989, 992), True, 'import numpy as np\n'), ((1193, 1221), 'numpy.argmin', 'np.argmin', (['store_func_values'], {}), '(store_func_values)\n', (1202, 1221), True, 'import numpy as np\n'), ((1622, 1641), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (1630, 1641), True, 'import numpy as np\n'), ((1657, 1673), 'numpy.zeros', 'np.zeros', (['(p, d)'], {}), '((p, d))\n', (1665, 1673), True, 'import numpy as np\n'), ((1695, 1714), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (1703, 1714), True, 'import numpy as np\n'), ((2257, 2286), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(d,)'], {}), '(0, 1, (d,))\n', (2274, 2286), True, 'import numpy as np\n'), ((2890, 3094), 'metod_alg.check_metod_class.check_if_new_minimizer', 'prev_mt_alg.check_if_new_minimizer', (['x', 'd', 'projection', 'tolerance', 'option', 'met', 'initial_guess', 'func_args', 'f', 'g', 'bound_1', 'bound_2', 'usage', 'relax_sd_it', 'store_grad_warm_up', 'discovered_minimizers', 'const'], {}), '(x, d, projection, tolerance, option, met,\n initial_guess, func_args, f, g, bound_1, bound_2, usage, relax_sd_it,\n store_grad_warm_up, discovered_minimizers, const)\n', (2924, 3094), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((1301, 1319), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(20)'], {}), '(2, 20)\n', (1312, 1319), True, 'from hypothesis import given, settings, strategies as st\n'), ((1321, 1340), 'hypothesis.strategies.integers', 'st.integers', (['(10)', '(50)'], {}), '(10, 50)\n', (1332, 1340), True, 'from hypothesis import given, settings, strategies as st\n'), ((3660, 3679), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (3668, 3679), True, 'import numpy as np\n'), ((3695, 3711), 'numpy.zeros', 'np.zeros', (['(p, d)'], {}), '((p, d))\n', (3703, 3711), True, 'import numpy as np\n'), ((3733, 3752), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (3741, 3752), True, 'import numpy as np\n'), ((4295, 4324), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(d,)'], {}), '(0, 1, (d,))\n', (4312, 4324), True, 'import numpy as np\n'), ((4866, 5070), 'metod_alg.check_metod_class.check_if_new_minimizer', 'prev_mt_alg.check_if_new_minimizer', (['x', 'd', 'projection', 'tolerance', 'option', 'met', 'initial_guess', 'func_args', 'f', 'g', 'bound_1', 'bound_2', 'usage', 'relax_sd_it', 'store_grad_warm_up', 'discovered_minimizers', 'const'], {}), '(x, d, projection, tolerance, option, met,\n initial_guess, func_args, f, g, bound_1, bound_2, usage, relax_sd_it,\n store_grad_warm_up, discovered_minimizers, const)\n', (4900, 5070), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((3335, 3353), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(20)'], {}), '(2, 20)\n', (3346, 3353), True, 'from hypothesis import given, settings, strategies as st\n'), ((3355, 3374), 'hypothesis.strategies.integers', 'st.integers', (['(10)', '(50)'], {}), '(10, 50)\n', (3366, 3374), True, 'from hypothesis import given, settings, strategies as st\n'), ((1758, 1769), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (1766, 1769), True, 'import numpy as np\n'), ((1794, 1824), 'numpy.array', 'np.array', (['[lambda_1, lambda_2]'], {}), '([lambda_1, lambda_2])\n', (1802, 1824), True, 'import numpy as np\n'), ((1849, 1901), 'numpy.random.uniform', 'np.random.uniform', (['(lambda_1 + 1)', '(lambda_2 - 1)', '(d - 2)'], {}), '(lambda_1 + 1, lambda_2 - 1, d - 2)\n', (1866, 1901), True, 'import numpy as np\n'), ((1965, 1983), 'numpy.diag', 'np.diag', (['diag_vals'], {}), '(diag_vals)\n', (1972, 1983), True, 'import numpy as np\n'), ((2006, 2032), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'd'], {}), '(0, 1, d)\n', (2023, 2032), True, 'import numpy as np\n'), ((2063, 2101), 'metod_alg.objective_functions.calculate_rotation_matrix', 'mt_obj.calculate_rotation_matrix', (['d', '(3)'], {}), '(d, 3)\n', (2095, 2101), True, 'from metod_alg import objective_functions as mt_obj\n'), ((3796, 3807), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (3804, 3807), True, 'import numpy as np\n'), ((3832, 3862), 'numpy.array', 'np.array', (['[lambda_1, lambda_2]'], {}), '([lambda_1, lambda_2])\n', (3840, 3862), True, 'import numpy as np\n'), ((3887, 3939), 'numpy.random.uniform', 'np.random.uniform', (['(lambda_1 + 1)', '(lambda_2 - 1)', '(d - 2)'], {}), '(lambda_1 + 1, lambda_2 - 1, d - 2)\n', (3904, 3939), True, 'import numpy as np\n'), ((4003, 4021), 'numpy.diag', 'np.diag', (['diag_vals'], {}), '(diag_vals)\n', (4010, 4021), True, 'import numpy as np\n'), ((4044, 4070), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'd'], {}), '(0, 1, d)\n', (4061, 4070), True, 'import numpy as np\n'), ((4101, 4139), 'metod_alg.objective_functions.calculate_rotation_matrix', 'mt_obj.calculate_rotation_matrix', (['d', '(3)'], {}), '(d, 3)\n', (4133, 4139), True, 'from metod_alg import objective_functions as mt_obj\n'), ((2121, 2160), 'numpy.transpose', 'np.transpose', (['store_rotation', '(0, 2, 1)'], {}), '(store_rotation, (0, 2, 1))\n', (2133, 2160), True, 'import numpy as np\n'), ((4159, 4198), 'numpy.transpose', 'np.transpose', (['store_rotation', '(0, 2, 1)'], {}), '(store_rotation, (0, 2, 1))\n', (4171, 4198), True, 'import numpy as np\n'), ((1056, 1089), 'numpy.transpose', 'np.transpose', (['(point - store_x0[i])'], {}), '(point - store_x0[i])\n', (1068, 1089), True, 'import numpy as np\n')] |
import time
import numpy
from mt2 import mt2, mt2_lally
def main():
n1 = 400
n2 = 400
# Make mass_1 vary over the first axis, and mass_2 vary over the second axis
mass_1 = numpy.linspace(1, 200, n1).reshape((-1, 1))
mass_2 = numpy.linspace(1, 200, n2).reshape((1, -1))
# Pre-allocate output so that we are just timing MT2 itself, not the allocation of
args = (
100, 410, 20, # Visible 1: mass, px, py
150, -210, -300, # Visible 2: mass, px, py
-200, 280, # Missing transverse momentum: x, y
mass_1, mass_2, # Invisible 1 mass, invisible 2 mass
)
# the output buffer.
out_lester = numpy.zeros((n1, n2))
out_lally = numpy.zeros((n1, n2))
# `val` has shape (n1, n2), since `mass_1` and `mass_2` broadcast.
t_lester_start = time.time()
mt2(*args, out=out_lester)
t_lester_end = time.time()
t_lally_start = time.time()
mt2_lally(*args, out=out_lally)
t_lally_end = time.time()
# Check that we get the same thing.
numpy.testing.assert_array_almost_equal(out_lester, out_lally)
print("Elapsed time Lester: {} seconds".format(t_lester_end - t_lester_start))
print("Elapsed time Lally : {} seconds".format(t_lally_end - t_lally_start))
if __name__ == "__main__":
main()
| [
"numpy.testing.assert_array_almost_equal",
"numpy.zeros",
"mt2.mt2",
"numpy.linspace",
"mt2.mt2_lally",
"time.time"
] | [((668, 689), 'numpy.zeros', 'numpy.zeros', (['(n1, n2)'], {}), '((n1, n2))\n', (679, 689), False, 'import numpy\n'), ((706, 727), 'numpy.zeros', 'numpy.zeros', (['(n1, n2)'], {}), '((n1, n2))\n', (717, 727), False, 'import numpy\n'), ((821, 832), 'time.time', 'time.time', ([], {}), '()\n', (830, 832), False, 'import time\n'), ((837, 863), 'mt2.mt2', 'mt2', (['*args'], {'out': 'out_lester'}), '(*args, out=out_lester)\n', (840, 863), False, 'from mt2 import mt2, mt2_lally\n'), ((883, 894), 'time.time', 'time.time', ([], {}), '()\n', (892, 894), False, 'import time\n'), ((920, 931), 'time.time', 'time.time', ([], {}), '()\n', (929, 931), False, 'import time\n'), ((936, 967), 'mt2.mt2_lally', 'mt2_lally', (['*args'], {'out': 'out_lally'}), '(*args, out=out_lally)\n', (945, 967), False, 'from mt2 import mt2, mt2_lally\n'), ((986, 997), 'time.time', 'time.time', ([], {}), '()\n', (995, 997), False, 'import time\n'), ((1047, 1109), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['out_lester', 'out_lally'], {}), '(out_lester, out_lally)\n', (1086, 1109), False, 'import numpy\n'), ((193, 219), 'numpy.linspace', 'numpy.linspace', (['(1)', '(200)', 'n1'], {}), '(1, 200, n1)\n', (207, 219), False, 'import numpy\n'), ((250, 276), 'numpy.linspace', 'numpy.linspace', (['(1)', '(200)', 'n2'], {}), '(1, 200, n2)\n', (264, 276), False, 'import numpy\n')] |
import unittest
from unittest.mock import Mock, MagicMock, patch, call
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.operations import SymmOp
from bsym.interface.pymatgen import ( unique_symmetry_operations_as_vectors_from_structure,
space_group_from_structure,
parse_site_distribution,
unique_structure_substitutions,
new_structure_from_substitution,
configuration_space_from_structure,
space_group_symbol_from_structure,
configuration_space_from_molecule,
structure_cartesian_coordinates_mapping,
molecule_cartesian_coordinates_mapping )
from itertools import permutations
from bsym import SymmetryOperation, Configuration, SpaceGroup, PointGroup, ConfigurationSpace
class TestPymatgenInterface( unittest.TestCase ):
def setUp( self ):
# construct a pymatgen Structure instance using the site fractional coordinates
# face-centered cubic lattice
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.5, 0.0 ],
[ 0.0, 0.5, 0.5 ],
[ 0.5, 0.0, 0.5 ] ] )
atom_list = [ 'Li' ] * len( coords )
lattice = Lattice.from_parameters( a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90 )
self.structure = Structure( lattice, atom_list, coords )
# construct a pymatgen Molecule instance
# square molecule (D4h)
m_coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0 ],
[ 1.0, 1.0, 0.0 ] ] )
molecule = Molecule( atom_list, m_coords )
molecule = Molecule( molecule.species, molecule.cart_coords - molecule.center_of_mass )
self.molecule = molecule
def test_new_structure_from_substitution( self ):
substitution_index = [ 2,3 ]
new_species_list = [ 'Mg', 'Fe' ]
s_new = new_structure_from_substitution( self.structure, substitution_index, new_species_list )
self.assertEqual( s_new[2].species_string, 'Mg' )
self.assertEqual( s_new[3].species_string, 'Fe' )
def test_new_structure_from_substitution_raises_ValueError_with_oversize_index( self ):
substitution_index = [ 0, 1, 2, 3, 4 ]
new_species_list = [ 'Mg', 'Fe' ]
with self.assertRaises( ValueError ):
new_structure_from_substitution( self.structure, substitution_index, new_species_list )
def test_new_structure_from_substitution_raises_ValueError_with_invalid_index( self ):
substitution_index = [ 2, 4 ]
new_species_list = [ 'Mg', 'Fe' ]
with self.assertRaises( ValueError ):
new_structure_from_substitution( self.structure, substitution_index, new_species_list )
def test_parse_site_distribution( self ):
site_distribution = { 'Mg': 1, 'Li': 3 }
n, d = parse_site_distribution( site_distribution )
for k, v in n.items():
self.assertEqual( site_distribution[ d[ k ] ], v )
def test_structure_cartesian_coordinates_mapping( self ):
mock_symmop = Mock( spec=SymmOp )
new_coords = np.array( [ [ 0.5, 0.5, 0.5 ] ] )
mock_symmop.operate_multi = Mock( return_value=new_coords )
self.structure.lattice.get_cartesian_coords = Mock( return_value=np.array( [ [ 2.0, 2.0, 2.0 ] ] ) )
mapped_coords = structure_cartesian_coordinates_mapping( self.structure, mock_symmop )
np.testing.assert_array_equal( mapped_coords, np.array( [ [ 2.0, 2.0, 2.0 ] ] ) )
np.testing.assert_array_equal( mock_symmop.operate_multi.call_args[0][0], self.structure.frac_coords )
def test_molecule_cartesian_coordinates_mapping( self ):
mock_symmop = Mock( spec=SymmOp )
new_coords = np.array( [ [ 0.5, 0.5, 0,5 ] ] )
mock_symmop.operate_multi = Mock( return_value=new_coords )
mapped_coords = molecule_cartesian_coordinates_mapping( self.molecule, mock_symmop )
np.testing.assert_array_equal( mapped_coords, new_coords )
np.testing.assert_array_equal( mock_symmop.operate_multi.call_args[0][0], self.molecule.cart_coords )
if __name__ == '__main__':
unittest.main()
| [
"pymatgen.core.lattice.Lattice.from_parameters",
"bsym.interface.pymatgen.parse_site_distribution",
"unittest.mock.Mock",
"pymatgen.core.structure.Structure",
"bsym.interface.pymatgen.structure_cartesian_coordinates_mapping",
"pymatgen.core.structure.Molecule",
"bsym.interface.pymatgen.new_structure_fro... | [((4590, 4605), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4603, 4605), False, 'import unittest\n'), ((1328, 1406), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5]]'], {}), '([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5]])\n', (1336, 1406), True, 'import numpy as np\n'), ((1569, 1642), 'pymatgen.core.lattice.Lattice.from_parameters', 'Lattice.from_parameters', ([], {'a': '(3.0)', 'b': '(3.0)', 'c': '(3.0)', 'alpha': '(90)', 'beta': '(90)', 'gamma': '(90)'}), '(a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90)\n', (1592, 1642), False, 'from pymatgen.core.lattice import Lattice\n'), ((1670, 1707), 'pymatgen.core.structure.Structure', 'Structure', (['lattice', 'atom_list', 'coords'], {}), '(lattice, atom_list, coords)\n', (1679, 1707), False, 'from pymatgen.core.structure import Molecule, Structure\n'), ((1810, 1888), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]])\n', (1818, 1888), True, 'import numpy as np\n'), ((2013, 2042), 'pymatgen.core.structure.Molecule', 'Molecule', (['atom_list', 'm_coords'], {}), '(atom_list, m_coords)\n', (2021, 2042), False, 'from pymatgen.core.structure import Molecule, Structure\n'), ((2064, 2138), 'pymatgen.core.structure.Molecule', 'Molecule', (['molecule.species', '(molecule.cart_coords - molecule.center_of_mass)'], {}), '(molecule.species, molecule.cart_coords - molecule.center_of_mass)\n', (2072, 2138), False, 'from pymatgen.core.structure import Molecule, Structure\n'), ((2326, 2415), 'bsym.interface.pymatgen.new_structure_from_substitution', 'new_structure_from_substitution', (['self.structure', 'substitution_index', 'new_species_list'], {}), '(self.structure, substitution_index,\n new_species_list)\n', (2357, 2415), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((3288, 3330), 'bsym.interface.pymatgen.parse_site_distribution', 'parse_site_distribution', (['site_distribution'], {}), '(site_distribution)\n', (3311, 3330), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((3512, 3529), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'SymmOp'}), '(spec=SymmOp)\n', (3516, 3529), False, 'from unittest.mock import Mock, MagicMock, patch, call\n'), ((3553, 3580), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.5]])\n', (3561, 3580), True, 'import numpy as np\n'), ((3623, 3652), 'unittest.mock.Mock', 'Mock', ([], {'return_value': 'new_coords'}), '(return_value=new_coords)\n', (3627, 3652), False, 'from unittest.mock import Mock, MagicMock, patch, call\n'), ((3788, 3856), 'bsym.interface.pymatgen.structure_cartesian_coordinates_mapping', 'structure_cartesian_coordinates_mapping', (['self.structure', 'mock_symmop'], {}), '(self.structure, mock_symmop)\n', (3827, 3856), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((3957, 4061), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mock_symmop.operate_multi.call_args[0][0]', 'self.structure.frac_coords'], {}), '(mock_symmop.operate_multi.call_args[0][0],\n self.structure.frac_coords)\n', (3986, 4061), True, 'import numpy as np\n'), ((4144, 4161), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'SymmOp'}), '(spec=SymmOp)\n', (4148, 4161), False, 'from unittest.mock import Mock, MagicMock, patch, call\n'), ((4185, 4213), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0, 5]]'], {}), '([[0.5, 0.5, 0, 5]])\n', (4193, 4213), True, 'import numpy as np\n'), ((4255, 4284), 'unittest.mock.Mock', 'Mock', ([], {'return_value': 'new_coords'}), '(return_value=new_coords)\n', (4259, 4284), False, 'from unittest.mock import Mock, MagicMock, patch, call\n'), ((4311, 4377), 'bsym.interface.pymatgen.molecule_cartesian_coordinates_mapping', 'molecule_cartesian_coordinates_mapping', (['self.molecule', 'mock_symmop'], {}), '(self.molecule, mock_symmop)\n', (4349, 4377), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((4389, 4445), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mapped_coords', 'new_coords'], {}), '(mapped_coords, new_coords)\n', (4418, 4445), True, 'import numpy as np\n'), ((4456, 4559), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['mock_symmop.operate_multi.call_args[0][0]', 'self.molecule.cart_coords'], {}), '(mock_symmop.operate_multi.call_args[0][0],\n self.molecule.cart_coords)\n', (4485, 4559), True, 'import numpy as np\n'), ((2771, 2860), 'bsym.interface.pymatgen.new_structure_from_substitution', 'new_structure_from_substitution', (['self.structure', 'substitution_index', 'new_species_list'], {}), '(self.structure, substitution_index,\n new_species_list)\n', (2802, 2860), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((3089, 3178), 'bsym.interface.pymatgen.new_structure_from_substitution', 'new_structure_from_substitution', (['self.structure', 'substitution_index', 'new_species_list'], {}), '(self.structure, substitution_index,\n new_species_list)\n', (3120, 3178), False, 'from bsym.interface.pymatgen import unique_symmetry_operations_as_vectors_from_structure, space_group_from_structure, parse_site_distribution, unique_structure_substitutions, new_structure_from_substitution, configuration_space_from_structure, space_group_symbol_from_structure, configuration_space_from_molecule, structure_cartesian_coordinates_mapping, molecule_cartesian_coordinates_mapping\n'), ((3913, 3940), 'numpy.array', 'np.array', (['[[2.0, 2.0, 2.0]]'], {}), '([[2.0, 2.0, 2.0]])\n', (3921, 3940), True, 'import numpy as np\n'), ((3728, 3755), 'numpy.array', 'np.array', (['[[2.0, 2.0, 2.0]]'], {}), '([[2.0, 2.0, 2.0]])\n', (3736, 3755), True, 'import numpy as np\n')] |
import sys
import argparse
import numpy as np
import time
stats_file = 'stats_old.log'
with open(stats_file, 'a') as f_out:
f_out.write(str(sys.argv) + '\n')
time.sleep(0)
runtime = 0
try:
parser = argparse.ArgumentParser()
parser.add_argument('instance',
help='The name of the instance to run (here we treat it as a random seed to determine '
'the difficulty of the instance, but it is often a filename).',
type=str)
parser.add_argument('instance-spefics',
help='Additional information that your target algorithm needs specific to the instance. '
'This field is currently not supported by GPS, which will always pass 0 to your '
'target algorithm. It is included for compatability with SMAC and ParamILS.',
type=str)
parser.add_argument('running-time-cutoff',
help='The running time cutoff for the target algorithm run. As best as possible, your '
'target algorithm should respect this cutoff.',
type=float)
parser.add_argument('run-length',
help='The maximum number of iterations for your target algorithm. Currently GPS does '
'support this parameter, and will always pass 0 to your target algorithm.',
type=str)
parser.add_argument('seed',
help='The random seed to be used by your target algorithm for this run.',
type=int)
parser.add_argument('--x0', type=int)
parser.add_argument('--x1', type=float)
parser.add_argument('--heuristic', type=str)
# For some reason, SMAC and ParamILS use a single dash to represent the parameters of the algorithm.
# This is a pain when using argparse, but this workaround helps...
new_argv = []
for arg in sys.argv:
if arg.startswith('-') and len(arg) > 2:
arg = '-' + arg
new_argv.append(arg)
sys.argv = new_argv
args = vars(parser.parse_args())
instance_seed = int(args['instance'])
cutoff = args['running-time-cutoff']
seed = args['seed']
x0 = args['x0']
x1 = args['x1']
heuristic = args['heuristic']
# Let's assume that the difficulty of our instances are distributed
# according to a truncated normal distribution with a mean of pi and
# a standard deviation of 0.1. Of course, in practice whether or not
# this is a realistic assumption depends strongly on the homogeneity of
# your instance set. If the instances are very different, this distribution
# may not even be uni-modal.
np.random.seed(instance_seed)
instance_difficulty = np.random.normal(np.pi, 0.1)
# Let's also assume that the algorithm also has an exponential running
# time distribution on this particular instance, such that the mean of
# its running time distribution on this instance is equal to the
# difficulty that we just drew
np.random.seed(seed)
run_cost = np.random.exponential(instance_difficulty)
# Next, let's create the response of each parameter. For this algorithm, we will assume
# that the impact of the parameters is multiplicative on the running time.
# Let's have the first parameter be quadratic with a minimum value at 5
if x0 < 0 or x0 > 20:
# if x0 is out of bounds let's raise a value error
raise ValueError('x0 must be in [0, 20]. Provided {}.'.format(x0))
p1 = (x0 - 5)**2 + 1
# We'll make the second parameter lop-sided, with a minimum value at 1
if x1 < 0 or x1 > 20:
raise ValueError('x1 must be in [0, 20]. Provided {}.'.format(x1))
p2 = 1/x1 + x1 - 1
# The third parameter can be a, b or c and will
if heuristic == 'a':
p3 = 1
elif heuristic == 'b':
p3 = 20
elif heuristic == 'c':
p3 = 3
else:
raise ValueError('heuristic must be in [a, b, c]. Provided {}.'.format(heuristic))
# Add in the various penalties of having the parameters wrong. Note that the minimum
# value of each parameter's response is 1, so the optimal configuration (5, 1, 'a')
# have an expected running time of pi.
runtime = run_cost*p1*p2*p3
deterministic_runtime = np.pi*p1*p2*p3
result = 'SUCCESS'
if runtime > cutoff:
runtime = cutoff
result = 'TIMEOUT'
misc = ('Miscellaneous extra data fro the run (ignored by GPS) '
'- deterministic running time {0:.4f} - factor worse than optimal '
'{1:.10f}'.format(deterministic_runtime, deterministic_runtime/np.pi))
except Exception as e:
result = 'CRASHED'
# Note that we replace commas with dashes.
# SMAC and ParamILS don't support commas
# in the miscellaneous data, so GPS doesn't either.
misc = e.message.replace(',', ' -')
except:
# There are a few cases where exceptions can be raised that
# won't be caught by the above
result = 'CRASHED'
misc = 'The artificial algorithm crashed for an unknown reason'
print('Result for GPS: {result}, {runtime}, {solution_quality}, {misc}'
''.format(result=result,
runtime=runtime,
solution_quality=0, # Not needed here, and not yet supported by GPS
misc=misc))
with open(stats_file, 'a') as f_out:
f_out.write(str(runtime) + '\n')
| [
"numpy.random.normal",
"argparse.ArgumentParser",
"numpy.random.exponential",
"time.sleep",
"numpy.random.seed"
] | [((165, 178), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (175, 178), False, 'import time\n'), ((211, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (234, 236), False, 'import argparse\n'), ((2791, 2820), 'numpy.random.seed', 'np.random.seed', (['instance_seed'], {}), '(instance_seed)\n', (2805, 2820), True, 'import numpy as np\n'), ((2847, 2875), 'numpy.random.normal', 'np.random.normal', (['np.pi', '(0.1)'], {}), '(np.pi, 0.1)\n', (2863, 2875), True, 'import numpy as np\n'), ((3140, 3160), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3154, 3160), True, 'import numpy as np\n'), ((3176, 3218), 'numpy.random.exponential', 'np.random.exponential', (['instance_difficulty'], {}), '(instance_difficulty)\n', (3197, 3218), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from prefig import Prefig
x = np.linspace(8,10,50)+ np.random.normal(0,0.5, 50)
m, c = 1.5, -3
y = m*x + c + np.random.normal(0,0.5,50)
yerr = np.random.normal(0,0.1,50)
Prefig()
plt.errorbar(x,y, yerr, xerr=None, fmt=' ', marker='D')
plt.plot(x, (m*x+c))
plt.xlabel('measured')
plt.ylabel('observed')
plt.savefig('test_prefig.png')
plt.figure()
plt.errorbar(x,y, yerr, xerr=None, fmt=' ', marker='D')
plt.plot(x, (m*x+c))
plt.xlabel('measured')
plt.ylabel('observed')
plt.savefig('test_orig.png')
| [
"numpy.random.normal",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"prefig.Prefig",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.errorbar"
] | [((195, 223), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(50)'], {}), '(0, 0.1, 50)\n', (211, 223), True, 'import numpy as np\n'), ((223, 231), 'prefig.Prefig', 'Prefig', ([], {}), '()\n', (229, 231), False, 'from prefig import Prefig\n'), ((232, 288), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y', 'yerr'], {'xerr': 'None', 'fmt': '""" """', 'marker': '"""D"""'}), "(x, y, yerr, xerr=None, fmt=' ', marker='D')\n", (244, 288), True, 'import matplotlib.pyplot as plt\n'), ((288, 310), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(m * x + c)'], {}), '(x, m * x + c)\n', (296, 310), True, 'import matplotlib.pyplot as plt\n'), ((309, 331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""measured"""'], {}), "('measured')\n", (319, 331), True, 'import matplotlib.pyplot as plt\n'), ((332, 354), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""observed"""'], {}), "('observed')\n", (342, 354), True, 'import matplotlib.pyplot as plt\n'), ((355, 385), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test_prefig.png"""'], {}), "('test_prefig.png')\n", (366, 385), True, 'import matplotlib.pyplot as plt\n'), ((387, 399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (397, 399), True, 'import matplotlib.pyplot as plt\n'), ((400, 456), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y', 'yerr'], {'xerr': 'None', 'fmt': '""" """', 'marker': '"""D"""'}), "(x, y, yerr, xerr=None, fmt=' ', marker='D')\n", (412, 456), True, 'import matplotlib.pyplot as plt\n'), ((456, 478), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(m * x + c)'], {}), '(x, m * x + c)\n', (464, 478), True, 'import matplotlib.pyplot as plt\n'), ((477, 499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""measured"""'], {}), "('measured')\n", (487, 499), True, 'import matplotlib.pyplot as plt\n'), ((500, 522), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""observed"""'], {}), "('observed')\n", (510, 522), True, 'import matplotlib.pyplot as plt\n'), ((523, 551), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test_orig.png"""'], {}), "('test_orig.png')\n", (534, 551), True, 'import matplotlib.pyplot as plt\n'), ((82, 104), 'numpy.linspace', 'np.linspace', (['(8)', '(10)', '(50)'], {}), '(8, 10, 50)\n', (93, 104), True, 'import numpy as np\n'), ((104, 132), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.5)', '(50)'], {}), '(0, 0.5, 50)\n', (120, 132), True, 'import numpy as np\n'), ((161, 189), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.5)', '(50)'], {}), '(0, 0.5, 50)\n', (177, 189), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import gpxpy
import math
import urllib.error
from sharingMobilityAPI import sharingMobilityAroundLocation
from stadtRadApi import amountStadtRadAvailable
class HVVCoordinateMapper:
def __init__(self):
self.df = None
self.stop_to_index = {}
self.lat_lon_to_index = {}
self.index_to_lat_lon = {}
self.bike_coordinates = []
self.stop_to_parent = {}
self._load_bus_data()
self._load_bike_data()
def _load_bus_data(self, file="data/stops.txt"):
print('Loading bus data')
self.df = pd.read_csv(filepath_or_buffer=file, sep=",")
for i, row in self.df.iterrows():
# only record coordinates of 'parent stations'
if isinstance(row["parent_station"], float) and math.isnan(row["parent_station"]):
self.stop_to_index[row["stop_name"]] = i
self.lat_lon_to_index[(row["stop_lat"], row["stop_lon"])] = i
self.index_to_lat_lon[i] = (row["stop_lat"], row["stop_lon"])
else:
self.stop_to_parent[row["stop_id"]] = row["parent_station"]
def _load_bike_data(self, file='data/1-StadtRAD_Hamburg_Stationen.gpx'):
print('Loading bike data')
with open(file, 'r') as f:
gpx = gpxpy.parse(f)
for p in gpx.waypoints:
self.bike_coordinates.append((p.latitude, p.longitude))
def stop_to_coordinates(self, stop_name):
row = self.df.iloc[self.stop_to_index[stop_name]]
return row["stop_lat"], row["stop_lon"]
def coordinates_to_stop(self, latitude, longitude):
row = self.df.iloc[self.lat_lon_to_index[(latitude, longitude)]]
return row["stop_name"]
@staticmethod
def get_distance(lat1, lon1, lat2, lon2):
""" Calculate euclidean distance between two points, defined by their latitude and longitude"""
start_vec = np.array([lat1, lon1])
dest_vec = np.array([lat2, lon2])
dist = np.linalg.norm(start_vec - dest_vec) # euclidean distance between start and destination coordinates
return dist
def bike_stations_in_range(self, lat_start, lon_start, range=0.01):
bike_stations = []
for (lat, lon) in amountStadtRadAvailable().keys():
dist = self.get_distance(lat_start, lon_start, lat, lon)
if dist <= range:
bike_stations.append((lat, lon, dist))
return bike_stations
def bus_stations_in_range(self, lat_start, lon_start, range=0.011):
stations = []
for name, index in self.stop_to_index.items():
(lat, lon) = self.index_to_lat_lon[index]
dist = self.get_distance(lat_start, lon_start, lat, lon)
if dist <= range:
stations.append((name, dist))
return stations
def cars_in_range(self, lat_start, lon_start, range=0.008):
nearby_cars = []
try:
for car in sharingMobilityAroundLocation(lat_start, lon_start):
dist = self.get_distance(lat_start, lon_start, float(car[0]), float(car[1]))
if dist <= range:
nearby_cars.append((float(car[0]), float(car[1]), dist))
except urllib.error.HTTPError:
return []
return nearby_cars
def get_bike_capacity(self, lat, lon):
bikes = amountStadtRadAvailable()
num_available_bikes = 0
for station in self.bike_stations_in_range(lat, lon):
num_available_bikes += bikes[(station[0], station[1])]
return max(num_available_bikes, 40)
def get_car_capacity(self, lat, lon):
return len(self.cars_in_range(lat, lon))
def get_opvn_capacity(self, lat, lon, stranded_ppl):
return 250 - stranded_ppl
def get_passenger_distribution(self, lat, lon, num_passengers, predictor,
scenarios, weights):
"""Wichtig.
lat, lon - wo die Strecke ausfällt
num_passengers - absolute Anzahl an Passagieren, die Strecke genutzt hätte
predictor - Predictor-Objekt, welches das Modell geladen hat
scenarios - model inputs (einer für jede mögliche Zielstation)
weights - Gewichte für jede Zielstation (Wahrscheinlichkeiten)
"""
distribution = predictor.predict(scenarios, weights)
abs_distribution = distribution * num_passengers
actual_distribution = self.get_actual_distribution(abs_distribution, lat, lon)
return actual_distribution
def get_actual_distribution(self, abs_dist, lat, lon):
"""Caps the absolute distribution at the max capacities
Also returns the intended distribution for calculating the passengers
that need to be transported by different means.
"""
bike_capacity = self.get_bike_capacity(lat, lon)
car_capacity = self.get_car_capacity(lat, lon)
opvn_capacity = self.get_opvn_capacity(lat, lon)
foot_capacity = abs_dist["foot"]
deficits = {"bike_actual": min(bike_capacity, abs_dist["bike"]),
"car_actual": min(car_capacity, abs_dist["car"]),
"opvn_actual": min(opvn_capacity, abs_dist["opvn"]),
"foot_actual": foot_capacity}
# return (abs_distribution, capped_distribution)
if __name__ == "__main__":
mapper = HVVCoordinateMapper()
lat, lon = mapper.stop_to_coordinates("Bornkampsweg")
print(lat, lon)
bike_stations = mapper.bike_stations_in_range(lat, lon)
print(bike_stations)
lat, lon = mapper.stop_to_coordinates("Bornkampsweg")
stations = mapper.bus_stations_in_range(lat, lon)
for s in stations:
print(s)
| [
"stadtRadApi.amountStadtRadAvailable",
"pandas.read_csv",
"math.isnan",
"numpy.array",
"numpy.linalg.norm",
"sharingMobilityAPI.sharingMobilityAroundLocation",
"gpxpy.parse"
] | [((606, 651), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'file', 'sep': '""","""'}), "(filepath_or_buffer=file, sep=',')\n", (617, 651), True, 'import pandas as pd\n'), ((1940, 1962), 'numpy.array', 'np.array', (['[lat1, lon1]'], {}), '([lat1, lon1])\n', (1948, 1962), True, 'import numpy as np\n'), ((1982, 2004), 'numpy.array', 'np.array', (['[lat2, lon2]'], {}), '([lat2, lon2])\n', (1990, 2004), True, 'import numpy as np\n'), ((2020, 2056), 'numpy.linalg.norm', 'np.linalg.norm', (['(start_vec - dest_vec)'], {}), '(start_vec - dest_vec)\n', (2034, 2056), True, 'import numpy as np\n'), ((3388, 3413), 'stadtRadApi.amountStadtRadAvailable', 'amountStadtRadAvailable', ([], {}), '()\n', (3411, 3413), False, 'from stadtRadApi import amountStadtRadAvailable\n'), ((1321, 1335), 'gpxpy.parse', 'gpxpy.parse', (['f'], {}), '(f)\n', (1332, 1335), False, 'import gpxpy\n'), ((2983, 3034), 'sharingMobilityAPI.sharingMobilityAroundLocation', 'sharingMobilityAroundLocation', (['lat_start', 'lon_start'], {}), '(lat_start, lon_start)\n', (3012, 3034), False, 'from sharingMobilityAPI import sharingMobilityAroundLocation\n'), ((813, 846), 'math.isnan', 'math.isnan', (["row['parent_station']"], {}), "(row['parent_station'])\n", (823, 846), False, 'import math\n'), ((2267, 2292), 'stadtRadApi.amountStadtRadAvailable', 'amountStadtRadAvailable', ([], {}), '()\n', (2290, 2292), False, 'from stadtRadApi import amountStadtRadAvailable\n')] |
'''
20160112 <NAME>
Plot the original Snobal vs pySnobal
'''
import numpy as np
import pandas as pd
# from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib.pyplot as plt
import os
#------------------------------------------------------------------------------
# read the input and output files
output_label = np.array(['time_s','R_n','H','L_v_E','G','M','delta_Q','G_0','delta_Q_0',
'cc_s_0','cc_s_l','cc_s','E_s','melt','ro_predict','z_s_0','z_s_l',
'z_s','rho','m_s_0','m_s_l','m_s','h2o','T_s_0','T_s_l','T_s'])
# org = np.loadtxt('snobal.output.all')
# new = np.loadtxt('snobal.out')
org = pd.read_csv('snobal.original', sep=' ', index_col=[0], names=output_label)
# new = pd.read_csv('snobal.exact.out', sep=',', index_col=[0], names=output_label)
# new = pd.read_csv('snobal.out', sep=',', index_col=[0], names=output_label)
new = pd.read_csv('../test_data_spatial/snobal.out', sep=',', index_col=[0], names=output_label)
d = org - new
#------------------------------------------------------------------------------
# labels and such
data_label = np.array(['S_n', 'I_lw', 'T_a', 'e_a', 'u', 'T_g'])
ppt_label = np.array(['m_ppt','%_snow','rho_snow','T_pp'])
output_em = np.array([1,2,3,4,5,6,7,8,11,12,13])-1
output_snow = np.array([17,18,21,23,24,25,16,22])-1
#------------------------------------------------------------------------------
# plot the original outputs
f, axo = plt.subplots(3, 2, sharex=True)
org.plot(y=output_em, ax=axo[0][0], ylim=(-1500,1000))
axo[0][0].set_title('EM original')
org.plot(y=output_snow, ax=axo[0][1])
axo[0][1].set_title('SNOW original')
new.plot(y=output_em, ax=axo[1][0], ylim=(-1500,1000))
axo[1][0].set_title('EM new')
new.plot(y=output_snow, ax=axo[1][1])
axo[1][1].set_title('SNOW new')
d.plot(y=output_em, ax=axo[2][0], ylim=(-1500,1000))
axo[2][0].set_title('EM diff')
d.plot(y=output_snow, ax=axo[2][1])
axo[2][1].set_title('SNOW diff')
plt.show()
# # axo[0][0].plot(org_time, org[:,output_em])
# # axo[0][0].legend(output_label[output_em], loc=2)
# # axo[0][0].set_ylim([-1500,1500])
# # axo[0][0].set_title('EM original')
#
# axo[0][1].plot(org_time, org[:,output_snow])
# # axo[0][1].legend(output_label[output_snow], loc=2)
# axo[0][1].set_title('SNOW original')
#
# #------------------------------------------------------------------------------
# # plot the new outputs
#
# axo[1][0].plot(new_time, new[:,output_em])
# # axo[1][0].legend(output_label[output_em], loc=2)
# axo[1][0].set_ylim([-1500,1500])
# axo[1][0].set_title('EM new')
#
# axo[1][1].plot(new_time, new[:,output_snow])
# # axo[1][1].legend(output_label[output_snow], loc=2)
# axo[1][1].set_title('SNOW new')
#------------------------------------------------------------------------------
# plot the difference
# axo[2][0].plot(org[:,output_em]-new[:,output_em])
# # axo[2][0].legend(output_label[output_em], loc=2)
# axo[2][0].set_ylim([-1500,1500])
# axo[2][0].set_title('EM difference')
#
# axo[2][1].plot(org[:,output_snow]-new[:,output_snow])
# # axo[2][1].legend(output_label[output_snow], loc=2)
# axo[2][1].set_title('SNOW difference')
plt.show()
| [
"numpy.array",
"matplotlib.pyplot.subplots",
"pandas.read_csv",
"matplotlib.pyplot.show"
] | [((329, 569), 'numpy.array', 'np.array', (["['time_s', 'R_n', 'H', 'L_v_E', 'G', 'M', 'delta_Q', 'G_0', 'delta_Q_0',\n 'cc_s_0', 'cc_s_l', 'cc_s', 'E_s', 'melt', 'ro_predict', 'z_s_0',\n 'z_s_l', 'z_s', 'rho', 'm_s_0', 'm_s_l', 'm_s', 'h2o', 'T_s_0', 'T_s_l',\n 'T_s']"], {}), "(['time_s', 'R_n', 'H', 'L_v_E', 'G', 'M', 'delta_Q', 'G_0',\n 'delta_Q_0', 'cc_s_0', 'cc_s_l', 'cc_s', 'E_s', 'melt', 'ro_predict',\n 'z_s_0', 'z_s_l', 'z_s', 'rho', 'm_s_0', 'm_s_l', 'm_s', 'h2o', 'T_s_0',\n 'T_s_l', 'T_s'])\n", (337, 569), True, 'import numpy as np\n'), ((639, 713), 'pandas.read_csv', 'pd.read_csv', (['"""snobal.original"""'], {'sep': '""" """', 'index_col': '[0]', 'names': 'output_label'}), "('snobal.original', sep=' ', index_col=[0], names=output_label)\n", (650, 713), True, 'import pandas as pd\n'), ((882, 976), 'pandas.read_csv', 'pd.read_csv', (['"""../test_data_spatial/snobal.out"""'], {'sep': '""","""', 'index_col': '[0]', 'names': 'output_label'}), "('../test_data_spatial/snobal.out', sep=',', index_col=[0],\n names=output_label)\n", (893, 976), True, 'import pandas as pd\n'), ((1102, 1153), 'numpy.array', 'np.array', (["['S_n', 'I_lw', 'T_a', 'e_a', 'u', 'T_g']"], {}), "(['S_n', 'I_lw', 'T_a', 'e_a', 'u', 'T_g'])\n", (1110, 1153), True, 'import numpy as np\n'), ((1166, 1215), 'numpy.array', 'np.array', (["['m_ppt', '%_snow', 'rho_snow', 'T_pp']"], {}), "(['m_ppt', '%_snow', 'rho_snow', 'T_pp'])\n", (1174, 1215), True, 'import numpy as np\n'), ((1437, 1468), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharex': '(True)'}), '(3, 2, sharex=True)\n', (1449, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1949, 1959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1957, 1959), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3152, 3154), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1273), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13])\n', (1235, 1273), True, 'import numpy as np\n'), ((1280, 1322), 'numpy.array', 'np.array', (['[17, 18, 21, 23, 24, 25, 16, 22]'], {}), '([17, 18, 21, 23, 24, 25, 16, 22])\n', (1288, 1322), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
# Loads embeddings stored in Glove-vector text format
def loadEmbeddings(fileName, sep='\t'):
dtFrame = pd.read_csv(fileName, sep=sep, header=None)
words = dtFrame[0].values
dtFrame.drop(0, axis=1, inplace=True)
return words, dtFrame.values.astype(np.float32)
def createEmbedMap(words):
res = dict()
for i in range(len(words)):
res[words[i]] = i
return res
# Unlike scipy cosine it doesn't choke on zero vectors
def robustCosineSimil(x, y, eps=1e-10):
sumX = np.sqrt(np.sum(x * x))
sumY = np.sqrt(np.sum(y * y))
sumX = max(sumX, eps)
sumY = max(sumY, eps)
return np.sum(x * y) / (sumX * sumY)
| [
"numpy.sum",
"pandas.read_csv"
] | [((149, 192), 'pandas.read_csv', 'pd.read_csv', (['fileName'], {'sep': 'sep', 'header': 'None'}), '(fileName, sep=sep, header=None)\n', (160, 192), True, 'import pandas as pd\n'), ((552, 565), 'numpy.sum', 'np.sum', (['(x * x)'], {}), '(x * x)\n', (558, 565), True, 'import numpy as np\n'), ((586, 599), 'numpy.sum', 'np.sum', (['(y * y)'], {}), '(y * y)\n', (592, 599), True, 'import numpy as np\n'), ((664, 677), 'numpy.sum', 'np.sum', (['(x * y)'], {}), '(x * y)\n', (670, 677), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import *
from tensorflow.keras.preprocessing import sequence
from tf2bert.layers import MaskedGlobalMaxPooling1D
from tf2bert.text.tokenizers import Tokenizer
from tf2bert.models import build_transformer
import dataset
# BERT在文本匹配问题中的应用,siamese双塔架构
# https://arxiv.org/pdf/1908.10084.pdf
def batch_pad(X, maxlen=None, dtype="int32"):
if maxlen is None:
maxlen = max([len(i) for i in X])
X = sequence.pad_sequences(
X,
maxlen=maxlen,
dtype=dtype,
padding="post",
truncating="post",
value=0
)
return X
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, X1, X2, y, tokenizer, num_classes, batch_size, maxlen):
self.X1 = X1
self.X2 = X2
self.y = y
self.tokenizer = tokenizer
self.num_classes = num_classes
self.batch_size = batch_size
self.maxlen = maxlen
def __len__(self):
return len(self.y) // self.batch_size
def __getitem__(self, index):
i = index * self.batch_size
j = i + self.batch_size
# X1
batch_token_ids1, batch_segment_ids1 = self.tokenizer.batch_encode(self.X1[i:j], maxlen=self.maxlen)
batch_token_ids1 = batch_pad(batch_token_ids1, self.maxlen)
batch_segment_ids1 = batch_pad(batch_segment_ids1, self.maxlen)
# X2
batch_token_ids2, batch_segment_ids2 = self.tokenizer.batch_encode(self.X2[i:j], maxlen=self.maxlen)
batch_token_ids2 = batch_pad(batch_token_ids2, self.maxlen)
batch_segment_ids2 = batch_pad(batch_segment_ids2, self.maxlen)
# y
batch_labels = tf.keras.utils.to_categorical(self.y[i:j], num_classes)
return [(batch_token_ids1, batch_segment_ids1),
(batch_token_ids2, batch_segment_ids2)], batch_labels
def on_epoch_end(self):
np.random.RandomState(773).shuffle(self.X1)
np.random.RandomState(773).shuffle(self.X2)
np.random.RandomState(773).shuffle(self.y)
def split_kfolds(X1, X2, y, n_splits=8):
X1_train = [j for i, j in enumerate(X1) if i % n_splits != 1]
X2_train = [j for i, j in enumerate(X2) if i % n_splits != 1]
y_train = [j for i, j in enumerate(y) if i % n_splits != 1]
X1_test = [j for i, j in enumerate(X1) if i % n_splits == 1]
X2_test = [j for i, j in enumerate(X2) if i % n_splits == 1]
y_test = [j for i, j in enumerate(y) if i % n_splits == 1]
return (X1_train, X2_train, y_train), (X1_test, X2_test, y_test)
config_path = "/home/zhiwen/workspace/dataset/bert/chinese_L-12_H-768_A-12/bert_config.json"
token_dict_path = "/home/zhiwen/workspace/dataset/bert/chinese_L-12_H-768_A-12/vocab.txt"
checkpoint_path = "/home/zhiwen/workspace/dataset/bert/chinese_L-12_H-768_A-12/bert_model.ckpt"
X1, X2, y, classes = dataset.load_lcqmc()
num_classes = len(classes)
maxlen = 32 # 注意内存
# 加载Tokenizer
tokenizer = Tokenizer(token_dict_path, use_lower_case=True)
# 可以根据需要替换模型
bert = build_transformer(
model="bert",
config_path=config_path,
checkpoint_path=checkpoint_path,
verbose=False
)
pool = MaskedGlobalMaxPooling1D(return_scores=False)
dropout = Dropout(rate=0.2)
layernorm = LayerNormalization()
def bert_encode(x):
x = bert(x)
x = pool(x)
# x = dropout(x)
return x
def matching(x1, x2):
# x*y
x3 = Multiply()([x1, x2])
# |x-y|
x4 = Lambda(lambda x: tf.abs(x[0] - x[1]))([x1, x2])
x = Concatenate()([x1, x2, x3, x4])
x = layernorm(x)
return x
x1_input = Input(shape=(maxlen,), dtype=tf.int32)
s1_input = Input(shape=(maxlen,), dtype=tf.int32)
x2_input = Input(shape=(maxlen,), dtype=tf.int32)
s2_input = Input(shape=(maxlen,), dtype=tf.int32)
input1 = [x1_input, s1_input]
input2 = [x2_input, s2_input]
x1 = bert_encode(input1)
x2 = bert_encode(input2)
x = matching(x1, x2)
outputs = Dense(num_classes, activation="softmax")(x)
model = Model(inputs=[input1, input2], outputs=outputs)
model.summary()
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(1e-5),
metrics=["accuracy"]
)
if __name__ == "__main__":
print(__file__)
batch_size = 32
epochs = 10
(X1_train, X2_train, y_train), \
(X1_test, X2_test, y_test) = split_kfolds(X1, X2, y, 5)
dataset_train = DataGenerator(X1_train, X2_train, y_train, tokenizer, num_classes, batch_size, maxlen)
dataset_val = DataGenerator(X2_test, X2_test, y_test, tokenizer, num_classes, batch_size, maxlen)
model.fit(
dataset_train,
batch_size=batch_size,
epochs=epochs,
validation_data=dataset_val,
validation_batch_size=batch_size
)
| [
"tf2bert.layers.MaskedGlobalMaxPooling1D",
"tensorflow.keras.utils.to_categorical",
"tf2bert.text.tokenizers.Tokenizer",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tf2bert.models.build_transformer",
"tensorflow.keras.optimizers.Adam",
"numpy.random.RandomState",
"tensorflow.keras.models... | [((2917, 2937), 'dataset.load_lcqmc', 'dataset.load_lcqmc', ([], {}), '()\n', (2935, 2937), False, 'import dataset\n'), ((3011, 3058), 'tf2bert.text.tokenizers.Tokenizer', 'Tokenizer', (['token_dict_path'], {'use_lower_case': '(True)'}), '(token_dict_path, use_lower_case=True)\n', (3020, 3058), False, 'from tf2bert.text.tokenizers import Tokenizer\n'), ((3079, 3188), 'tf2bert.models.build_transformer', 'build_transformer', ([], {'model': '"""bert"""', 'config_path': 'config_path', 'checkpoint_path': 'checkpoint_path', 'verbose': '(False)'}), "(model='bert', config_path=config_path, checkpoint_path=\n checkpoint_path, verbose=False)\n", (3096, 3188), False, 'from tf2bert.models import build_transformer\n'), ((3212, 3257), 'tf2bert.layers.MaskedGlobalMaxPooling1D', 'MaskedGlobalMaxPooling1D', ([], {'return_scores': '(False)'}), '(return_scores=False)\n', (3236, 3257), False, 'from tf2bert.layers import MaskedGlobalMaxPooling1D\n'), ((4007, 4054), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input1, input2]', 'outputs': 'outputs'}), '(inputs=[input1, input2], outputs=outputs)\n', (4012, 4054), False, 'from tensorflow.keras.models import Model\n'), ((523, 624), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X'], {'maxlen': 'maxlen', 'dtype': 'dtype', 'padding': '"""post"""', 'truncating': '"""post"""', 'value': '(0)'}), "(X, maxlen=maxlen, dtype=dtype, padding='post',\n truncating='post', value=0)\n", (545, 624), False, 'from tensorflow.keras.preprocessing import sequence\n'), ((1748, 1803), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['self.y[i:j]', 'num_classes'], {}), '(self.y[i:j], num_classes)\n', (1777, 1803), True, 'import tensorflow as tf\n'), ((4137, 4168), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(1e-05)'], {}), '(1e-05)\n', (4161, 4168), True, 'import tensorflow as tf\n'), ((1968, 1994), 'numpy.random.RandomState', 'np.random.RandomState', (['(773)'], {}), '(773)\n', (1989, 1994), True, 'import numpy as np\n'), ((2020, 2046), 'numpy.random.RandomState', 'np.random.RandomState', (['(773)'], {}), '(773)\n', (2041, 2046), True, 'import numpy as np\n'), ((2072, 2098), 'numpy.random.RandomState', 'np.random.RandomState', (['(773)'], {}), '(773)\n', (2093, 2098), True, 'import numpy as np\n'), ((3507, 3526), 'tensorflow.abs', 'tf.abs', (['(x[0] - x[1])'], {}), '(x[0] - x[1])\n', (3513, 3526), True, 'import tensorflow as tf\n')] |
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import silhouette_score
import tslearn.clustering as clust
from scipy import signal
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gippy import GeoImage
import gippy.algorithms as alg
import re
from os import listdir, walk
def calulate_indices(filepath, asset_dict, indices):
''' Create image files for indices
:param filepath (str): Full path to directory containing satellite scenes in default structure created
by sat-search load --download
:param asset_dict (dict): Keys = asset (band) names in scene files (e.g. 'B01', 'B02'); Values = value names
corresponding to keys (e.g. 'red', 'nir')
:param indices (list): Which indices to generate? Options include any index included in gippy.alg.indices
:return: None (writes files to disk)
'''
subdirs = [x[0] for x in walk(filepath)]
subdirs = subdirs[1:len(subdirs)]
for folder in subdirs:
# Filepath points to folder of geotiffs of Sentinel 2 time-series of bands 4 (red) and 8 (nir)
files = [folder + '/' + f for f in listdir(folder) if not f.startswith('.')]
# Asset (band) names
pattern = '[^_.]+(?=\.[^_.]*$)'
bands = [re.search(pattern, f).group(0) for f in files]
# Match band names
bands = [asset_dict.get(band, band) for band in bands]
img = GeoImage.open(filenames=files, bandnames=bands, nodata=0)
for ind in indices:
alg.indices(img, products=[ind], filename=folder + '/index_' + ind + '.tif')
img = None
def apply_savgol(x, value, window, poly):
""" Perform Savgol signal smoothing on time-series in dataframe group object (x)
Parameters
----------
x: (pd.DataFrame.groupby) Grouped dataframe object
window (int): smoothing window - pass to signal.savgol_filter 'window_length' param
poly (int): polynomial order used to fit samples - pass to signal.savgol_filter 'polyorder' param
value (str): Name of value (variable) to smooth
Returns
-------
x: "Smoothed" time-series
"""
x[value] = signal.savgol_filter(x[value], window_length=window, polyorder=poly)
return x
class TimeSeriesSample:
def __init__(self, time_series_df, n_samples, ts_var, seed):
# Take random `n_samples of pixels from time-series dataframe
self.ts_var = ts_var
self.group = time_series_df.groupby(['lc', 'pixel', 'array_index'])
self.arranged_group = np.arange(self.group.ngroups)
# Ensure same pixels are sampled each time function is run when same `n_samples` parameter is supplied
np.random.seed(seed)
np.random.shuffle(self.arranged_group)
# Take the random sample
self.sample = time_series_df[self.group.ngroup().isin(self.arranged_group[:n_samples])]
if self.sample['date'].dtype != 'O':
self.sample['date'] = self.sample['date'].dt.strftime('%Y-%m-%d')
self.sample_dates = self.sample['date'].unique()
self.tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
self.dataset = None
def smooth(self, window=7, poly=3):
# Perform Savgol signal smoothing to each time-series
self.sample = self.sample.groupby(['lc', 'pixel', 'array_index']).apply(apply_savgol, self.ts_var, window, poly)
self.tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
return self
@ property
def ts_dataset(self):
#tslist = self.sample.groupby(['lc', 'pixel', 'array_index'])[self.ts_var].apply(list)
self.dataset = to_time_series_dataset(self.tslist)
return self.dataset
def cluster_time_series(ts_sample, cluster_alg, n_clusters, cluster_metric, score=False):
# Dataframe to store cluster results
clust_df = pd.DataFrame(ts_sample.tslist.tolist(), index=ts_sample.tslist.index).reset_index()
clust_df.columns.values[3:] = ts_sample.sample_dates
# Fit model
if cluster_alg == "GAKM":
km = clust.GlobalAlignmentKernelKMeans(n_clusters=n_clusters)
if cluster_alg == "TSKM":
km = clust.TimeSeriesKMeans(n_clusters=n_clusters, metric=cluster_metric)
# Add predicted cluster labels to cluster results dataframe
labels = km.fit_predict(ts_sample.ts_dataset)
clust_df['cluster'] = labels
if score:
s = silhouette_score(ts_sample.ts_dataset, labels)
return clust_df, s
return clust_df
def cluster_grid_search(parameter_grid):
''' Perform grid search on cluster_ndvi_ts parameters
:param parameter_grid: (dict) parameter grid containing all parameter values to explore
:return: 1) dictionary with cluster labels and silhouette scores 2) dataframe with parameter combinations
and corresponding silhouette score
'''
# List of all possible parameter combinations
d = []
for vals in itertools.product(*parameter_grid.values()):
d.append(dict(zip(parameter_grid, vals)))
# Convert to data frame; use to store silhouette scores
df = pd.DataFrame(d)
df = df.drop(['ts_sample'], axis=1)
# Perform grid search
output = {'clusters': [], 'scores': []}
for values in itertools.product(*parameter_grid.values()):
# Run clustering function on all combinations of parameters in parameter grid
clusters, score = cluster_time_series(**dict(zip(parameter_grid, values)))
# 'clusters' = dataframes with cluster results; scores = silhouette scores of corresponding cluster results
output['clusters'].append(clusters)
output['scores'].append(score)
# Add silhouette scores to dataframe
df['sil_score'] = output['scores']
return output, df
def cluster_mean_quantiles(df):
'''Calculate mean and 10th, 90th percentile for each cluster at all dates in time series
:param df: dataframe output from `cluster_ndvi_ts`
:return: two dataframes: one for mean time-series per-cluster, one for quantile time-series per-cluster
'''
# Columns with ndvi values
cols = df.columns[3:-1]
# Cluster means at each time-step
m = df.groupby('cluster', as_index=False)[cols].mean().T.reset_index()
m = m.iloc[1:]
m.rename(columns={'index':'date'}, inplace=True)
m.set_index('date', drop=True, inplace=True)
m.index = pd.to_datetime(m.index)
# Cluster 10th and 90th percentile at each time-step
q = df.groupby('cluster', as_index=False)[cols].quantile([.1, 0.9]).T.reset_index()
q.rename(columns={'index':'date'}, inplace=True)
q.set_index('date', drop=True, inplace=True)
q.index = pd.to_datetime(q.index)
return m, q
def plot_clusters(obj, index=None, fill=True, title=None, save=False, filename=None):
if type(obj) is dict:
cluster_df = obj['clusters'][index]
else:
cluster_df = obj
# Get cluster means and 10th, 90th quantiles
m, q = cluster_mean_quantiles(cluster_df)
# Plot cluster results
nclusts = len(cluster_df.cluster.unique())
color = iter(plt.cm.Set2(np.linspace(0, 1, nclusts)))
fig = plt.figure(figsize=(10, 8))
cnt = 0
for i in range(0, nclusts):
# Plot mean time-series for each cluster
c = next(color)
plt.plot(m.index, m[i], 'k', color=c)
# Fill 10th and 90th quantile time-series of each cluster
if fill:
plt.fill_between(m.index, q.iloc[:, [cnt]].values.flatten(), q.iloc[:, [cnt+1]].values.flatten(),
alpha=0.5, edgecolor=c, facecolor=c)
cnt += 2
# Legend and title
plt.legend(loc='upper left')
plt.title(title)
# Axis labels
ax = fig.add_subplot(111)
ax.set_xlabel('Date')
ax.set_ylabel('NDVI')
if save:
pattern = '.png'
if not pattern in filename:
raise ValueError('File type should be .png')
fig.savefig(filename)
| [
"tslearn.clustering.GlobalAlignmentKernelKMeans",
"scipy.signal.savgol_filter",
"numpy.arange",
"pandas.to_datetime",
"os.walk",
"re.search",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.random.seed",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"tslearn.clustering.silh... | [((2237, 2305), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['x[value]'], {'window_length': 'window', 'polyorder': 'poly'}), '(x[value], window_length=window, polyorder=poly)\n', (2257, 2305), False, 'from scipy import signal\n'), ((5226, 5241), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (5238, 5241), True, 'import pandas as pd\n'), ((6498, 6521), 'pandas.to_datetime', 'pd.to_datetime', (['m.index'], {}), '(m.index)\n', (6512, 6521), True, 'import pandas as pd\n'), ((6784, 6807), 'pandas.to_datetime', 'pd.to_datetime', (['q.index'], {}), '(q.index)\n', (6798, 6807), True, 'import pandas as pd\n'), ((7259, 7286), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (7269, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7755, 7783), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7765, 7783), True, 'import matplotlib.pyplot as plt\n'), ((7788, 7804), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7797, 7804), True, 'import matplotlib.pyplot as plt\n'), ((1485, 1542), 'gippy.GeoImage.open', 'GeoImage.open', ([], {'filenames': 'files', 'bandnames': 'bands', 'nodata': '(0)'}), '(filenames=files, bandnames=bands, nodata=0)\n', (1498, 1542), False, 'from gippy import GeoImage\n'), ((2617, 2646), 'numpy.arange', 'np.arange', (['self.group.ngroups'], {}), '(self.group.ngroups)\n', (2626, 2646), True, 'import numpy as np\n'), ((2767, 2787), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2781, 2787), True, 'import numpy as np\n'), ((2796, 2834), 'numpy.random.shuffle', 'np.random.shuffle', (['self.arranged_group'], {}), '(self.arranged_group)\n', (2813, 2834), True, 'import numpy as np\n'), ((3777, 3812), 'tslearn.utils.to_time_series_dataset', 'to_time_series_dataset', (['self.tslist'], {}), '(self.tslist)\n', (3799, 3812), False, 'from tslearn.utils import to_time_series_dataset\n'), ((4191, 4247), 'tslearn.clustering.GlobalAlignmentKernelKMeans', 'clust.GlobalAlignmentKernelKMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (4224, 4247), True, 'import tslearn.clustering as clust\n'), ((4292, 4360), 'tslearn.clustering.TimeSeriesKMeans', 'clust.TimeSeriesKMeans', ([], {'n_clusters': 'n_clusters', 'metric': 'cluster_metric'}), '(n_clusters=n_clusters, metric=cluster_metric)\n', (4314, 4360), True, 'import tslearn.clustering as clust\n'), ((4536, 4582), 'tslearn.clustering.silhouette_score', 'silhouette_score', (['ts_sample.ts_dataset', 'labels'], {}), '(ts_sample.ts_dataset, labels)\n', (4552, 4582), False, 'from tslearn.clustering import silhouette_score\n'), ((7412, 7449), 'matplotlib.pyplot.plot', 'plt.plot', (['m.index', 'm[i]', '"""k"""'], {'color': 'c'}), "(m.index, m[i], 'k', color=c)\n", (7420, 7449), True, 'import matplotlib.pyplot as plt\n'), ((974, 988), 'os.walk', 'walk', (['filepath'], {}), '(filepath)\n', (978, 988), False, 'from os import listdir, walk\n'), ((1584, 1660), 'gippy.algorithms.indices', 'alg.indices', (['img'], {'products': '[ind]', 'filename': "(folder + '/index_' + ind + '.tif')"}), "(img, products=[ind], filename=folder + '/index_' + ind + '.tif')\n", (1595, 1660), True, 'import gippy.algorithms as alg\n'), ((7219, 7245), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nclusts'], {}), '(0, 1, nclusts)\n', (7230, 7245), True, 'import numpy as np\n'), ((1203, 1218), 'os.listdir', 'listdir', (['folder'], {}), '(folder)\n', (1210, 1218), False, 'from os import listdir, walk\n'), ((1332, 1353), 're.search', 're.search', (['pattern', 'f'], {}), '(pattern, f)\n', (1341, 1353), False, 'import re\n')] |
import numpy as np
import sys
import time
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GroupKFold
from sklearn.base import BaseEstimator
from scipy.linalg import cholesky, solve_triangular
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from ml_dft.kernel_functions import RBFKernel, MaternKernel
import os
import warnings
def get_alpha_add(n_basis, n_grid, delta, v):
alpha_add = np.pi * ((np.arange(n_basis / 2) / (n_grid * delta))**2 + v**2) / v
alpha_add = np.repeat(alpha_add, 2)
return alpha_add
class MultivariateGaussianProcessCV(BaseEstimator):
def __init__(self, krr_param_grid=None, cv_type=None, cv_nfolds=5, cv_groups=None,
cv_shuffles=1, n_components=None, single_combo=True,
verbose=0, copy_X=True, v=None, n_basis=None, n_grid=None, delta=None,
id=1, cleanup=True, kernel=None, squared_dist=False, kernel_params=None,
delta_learning=False, mae=False, replace_fit=True):
self.krr_param_grid = krr_param_grid
self.verbose = verbose
self.cv_nfolds = cv_nfolds
self.cv_type = cv_type
self.cv_groups = cv_groups
self.cv_shuffles = cv_shuffles
self.n_components = n_components
self.single_combo = single_combo
self.copy_X = copy_X
self.n_grid = n_grid
self.delta = delta
self.n_basis = n_basis
self.id = id
self.cleanup = cleanup
self.kernel = kernel
self.squared_dist = squared_dist
self.device = None
self.replace_fit = replace_fit
self.delta_learning = delta_learning
self.mae = mae
if self.kernel is None:
self.kernel = RBFKernel()
elif self.kernel == 'rbf':
self.kernel = RBFKernel(**kernel_params)
elif self.kernel == 'matern':
self.kernel = MaternKernel(**kernel_params)
if 'v' in self.krr_param_grid is not None and not single_combo:
raise ValueError('Can only add to alpha if single_combo=True')
def score(self, y_true, y_pred):
return np.mean((y_true - y_pred) ** 2)
def fit(self, X, y, labels=None, dist=None, importance_weights=None, cv_indices=None,
dist_savename=None):
t = time.time()
if y.ndim < 2:
y = y.reshape(-1, 1)
if self.n_components is not None:
if self.verbose > 0:
elapsed = time.time() - t
print('PCA [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
self.pca = PCA(n_components=self.n_components, svd_solver='arpack')
y_ = self.pca.fit_transform(y)
if self.verbose > 0:
print('Lost %.1f%% information ' % (self.pca.noise_variance_) +
'[%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
elapsed = time.time() - t
else:
y_ = y
if labels is not None:
raise RuntimeError('Not implemented.')
if cv_indices is None:
cv_indices = np.arange(X.shape[0])
if self.cv_type is None:
kfold = RepeatedKFold(n_splits=self.cv_nfolds, n_repeats=self.cv_shuffles)
cv_folds = kfold.split(X[cv_indices])
n_cv_folds = kfold.get_n_splits()
elif self.cv_type == 'iter':
cv_folds = self.cv_groups
n_cv_folds = len(self.cv_groups)
elif self.cv_type == 'group':
groups = self.cv_groups
if self.cv_nfolds is None:
self.cv_nfolds = len(np.unique(groups))
kfold = GroupKFold(n_splits=self.cv_nfolds)
cv_folds = kfold.split(X[cv_indices], y[cv_indices], groups)
n_cv_folds = kfold.get_n_splits()
else:
raise Exception('Cross-validation type not supported')
add_train_inds = np.setdiff1d(np.arange(X.shape[0]), cv_indices)
cv_folds = list(cv_folds)
cv_folds = [(np.concatenate((train_fold, add_train_inds)), test_fold) for train_fold, test_fold in cv_folds]
if self.verbose > 0:
elapsed = time.time() - t
print('Computing distance matrix [%dmin %dsec]' % (
int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if dist is None:
dist = euclidean_distances(X, None, squared=self.squared_dist)
if dist_savename is not None:
if self.verbose > 0:
print('Saving distance matrix to file:', dist_savename)
np.save(dist_savename, dist)
if importance_weights is None:
self.krr_param_grid['lambda'] = [0]
importance_weights = np.ones((X.shape[0], ))
importance_weights = importance_weights**(0.5)
errors = []
if 'v' in self.krr_param_grid:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['v']),
len(self.krr_param_grid['gamma']),
1,
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for v_i, v in enumerate(self.krr_param_grid['v']):
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
for y_i in np.arange(y_.shape[1]):
K_train_ = K_train.copy()
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
K_train_.flat[::K_train_.shape[0] + 1] += alpha * alpha_add[y_i]
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, y_[train_i, y_i], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i, y_i]), 0)
else:
e = np.mean((pred_mean - y_[test_i, y_i]) ** 2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[v_i, gamma_i, 0, alpha_i, y_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
else:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['gamma']),
len(self.krr_param_grid['lambda']),
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
K_train_ = K_train.copy()
K_train_.flat[::K_train_.shape[0] + 1] += alpha
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, iw[train_i] * y_[train_i], lower=True)
dual_coef_ = iw[train_i] * solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i]) * importance_weights[test_i, None]**2, 0)
else:
e = np.mean(((pred_mean - y_[test_i]) ** 2) * importance_weights[test_i, None]**2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[gamma_i, lamb_i, alpha_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
self.dual_coefs_ = np.empty((y_.shape[1], X.shape[0]))
self.alphas_ = np.empty(y_.shape[1])
self.lambdas_ = np.empty(y_.shape[1])
self.gammas_ = np.empty(y_.shape[1])
if self.verbose > 0:
elapsed = time.time() - t
print('Refit [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
print_count = 0
if not self.single_combo:
for i in range(y_.shape[1]):
min_params = np.argsort(errors[:, :, :, i], axis=None)
# lin_alg_errors = 0
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape[:2])
gamma = self.krr_param_grid['gamma'][gamma_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
self.alphas_[i] = alpha
self.gammas_[i] = gamma
self.lambdas_[i] = lamb
if (gamma_i in (0, len(self.krr_param_grid['gamma']) - 1) or
lamb_i in (0, len(self.krr_param_grid['lambda']) - 1) or
alpha_i in (0, len(self.krr_param_grid['alpha']) - 1)):
if print_count <= 200:
fmtstr = '%d: gamma=%g\talpha=%g\tlambda=%g\terror=%g\tmean=%g'
print(fmtstr % (i, gamma, alpha, lamb,
errors[gamma_i, lamb_i, alpha_i, i],
errors[gamma_i, lamb_i, alpha_i, i] /
np.mean(np.abs(y_[:, i]))))
print_count += 1
else:
errors = np.mean(errors, -1) # average over outputs
if self.verbose > 1:
print('CV errors:')
print(errors)
print('Alpha params:')
print(self.krr_param_grid['alpha'])
print('Gamma params:')
print(self.krr_param_grid['gamma'])
print('Lambda params:')
print(self.krr_param_grid['lambda'])
if self.verbose > 0:
print('Min error: ', np.min(errors))
# print np.log(errors)
# plt.imshow(np.log(errors))
# plt.xticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['alpha'])))
# plt.yticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['gamma'])))
# plt.xlabel('alpha')
# plt.ylabel('gamma')
# plt.colorbar()
# plt.show()
min_params = np.argsort(errors, axis=None)
if 'v' in self.krr_param_grid:
v_i, gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
else:
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
if 'v' in self.krr_param_grid:
v = self.krr_param_grid['v'][v_i]
print('v=', v)
gamma = self.krr_param_grid['gamma'][gamma_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
if 'v' in self.krr_param_grid:
if v == self.krr_param_grid['v'][0]:
print('v at lower edge.')
if v == self.krr_param_grid['v'][-1]:
print('v at upper edge.')
if len(self.krr_param_grid['gamma']) > 1:
if gamma == self.krr_param_grid['gamma'][0]:
print('Gamma at lower edge.')
if gamma == self.krr_param_grid['gamma'][-1]:
print('Gamma at upper edge.')
if len(self.krr_param_grid['alpha']) > 1:
if alpha == self.krr_param_grid['alpha'][0]:
print('Alpha at lower edge.')
if alpha == self.krr_param_grid['alpha'][-1]:
print('Alpha at upper edge.')
if len(self.krr_param_grid['lambda']) > 1:
if lamb == self.krr_param_grid['lambda'][0]:
print('Lambda at lower edge.')
if lamb == self.krr_param_grid['lambda'][-1]:
print('Lambda at upper edge.')
self.alphas_[:] = alpha
self.gammas_[:] = gamma
self.lambdas_[:] = lamb
if 'v' in self.krr_param_grid:
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
self.alphas_ *= alpha_add
combos = list(zip(self.alphas_, self.gammas_, self.lambdas_))
n_unique_combos = len(set(combos))
self.L_fit_ = [None] * n_unique_combos
for i, (alpha, gamma, lamb) in enumerate(set(combos)):
if self.verbose > 0:
elapsed = time.time() - t
print('Parameter combinations ' +
'%d of %d [%dmin %dsec]' % (i + 1, n_unique_combos,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
y_list = [i for i in range(y_.shape[1]) if
self.alphas_[i] == alpha and self.gammas_[i] == gamma and self.lambdas_[i] == lamb]
iw = importance_weights**lamb
iw = iw[:, None]
K = self.kernel.apply_to_dist(dist, gamma=gamma)
K *= np.outer(iw, iw)
# np.exp(K, K)
while True:
K.flat[::K.shape[0] + 1] += alpha - (alpha / 10)
try:
if self.verbose > 0:
print('trying cholesky decomposition, alpha', alpha)
L_ = cholesky(K, lower=True)
self.L_fit_[i] = L_
x = solve_triangular(L_, iw * y_[:, y_list], lower=True)
# x = solve_triangular(L_, y_[:, y_list], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
self.dual_coefs_[y_list] = iw.T * dual_coef_.T.copy()
break
except np.linalg.LinAlgError:
if self.verbose > 0:
print('LinalgError, increasing alpha')
alpha *= 10
self.alphas_[0] = alpha
if self.copy_X:
self.X_fit_ = X.copy()
self.y_fit_ = y.copy()
else:
self.X_fit_ = X
self.y_fit_ = y
self.errors = errors
if self.verbose > 0:
elapsed = time.time() - t
print('Done [%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
def add_sample(self, x, y):
""" Adds a sample to the kernel matrix via an efficient update to the model
Args:
x : The sample to be added
"""
n = self.X_fit_.shape[0]
print('n', n)
if self.verbose > 1:
print("adding training datapoint")
self.X_fit_ = np.concatenate((self.X_fit_, x), axis=0)
if self.verbose > 1:
print("adding training label")
self.y_fit_ = np.concatenate((self.y_fit_, y), axis=0)
L_k = np.empty((self.L_fit_.shape[0], n + 1, n + 1))
self.dual_coefs_ = np.empty((self.dual_coefs_.shape[0], n + 1))
print(L_k.shape)
for i, gamma in enumerate(np.unique(self.gammas_)):
alpha = self.alphas_[i]
if self.verbose > 1:
print('Calculating kernel entries for new point')
dist = euclidean_distances(x, self.X_fit_, squared=self.squared_dist)
k = self.kernel.apply_to_dist(dist, gamma=gamma).T
# print('n', n)
k1 = k[:n]
k2 = k[n:] + alpha
if self.verbose > 1:
print('Updating Cholesky factor')
L_k[i, :n, :n] = self.L_fit_[i]
L_k[i, :n, -1:] = 0
L_k[i, -1:, :n] = solve_triangular(self.L_fit_[i], k1, lower=True).T
# print('k2', k2)
# print('dotprod', np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
# print('var', k2 - np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
L_k[i, -1:, -1:] = np.sqrt(k2 - np.dot(L_k[i, -1:, :n], L_k[i, -1:, :n].T))
self.L_fit_ = L_k
if self.verbose > 1:
print('Updating dual_coefs')
v = solve_triangular(L_k[i], self.y_fit_, lower=True)
self.dual_coefs_[i] = solve_triangular(L_k[i].T, v).T
def predict(self, X, verbose=None, variance=False, dist=None):
t = time.time()
if verbose is None:
verbose = self.verbose
y_ = np.empty(shape=(X.shape[0], len(self.alphas_)))
if verbose > 0:
elapsed = time.time() - t
print('Computing distance matrix [%dmin %dsec]' % (
int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if dist is None:
if X.shape == self.X_fit_.shape and np.allclose(X, self.X_fit_):
dist = euclidean_distances(self.X_fit_, squared=self.squared_dist)
else:
dist = euclidean_distances(X, self.X_fit_, squared=self.squared_dist)
if variance:
if verbose > 0:
elapsed = time.time() - t
print('Test distances [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
dist_test = euclidean_distances(X, X, squared=self.squared_dist)
pred_var = np.zeros((X.shape[0],))
for i, gamma in enumerate(np.unique(self.gammas_)):
if verbose > 0:
print('Gamma %d of %d [%dmin %dsec]' % (i + 1,
len(np.unique(self.gammas_)), int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
y_list = [i for i in range(len(self.gammas_)) if
self.gammas_[i] == gamma]
K = self.kernel.apply_to_dist(dist, gamma=gamma)
y_[:, y_list] = np.dot(K, self.dual_coefs_[y_list].T)
if variance:
K_test = self.kernel.apply_to_dist(dist_test, gamma=gamma)
V = solve_triangular(self.L_fit_[i], K.T, lower=True)
# v = np.dot(K, np.dot(self.L_fit_[i], K.T))
v = np.sum(V * V, axis=0)
pred_var = K_test.flat[::X.shape[0] + 1] - v
if self.n_components is not None:
y = self.pca.inverse_transform(y_)
else:
y = y_
if y.shape[1] == 1:
y = y.flatten()
if verbose > 0:
elapsed = time.time() - t
print('Done [%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if variance:
return y, pred_var
else:
return y
def save(self, filename):
np.save(filename + '_alphas', self.alphas_)
np.save(filename + '_dual_coefs', self.dual_coefs_)
np.save(filename + '_gammas', self.gammas_)
np.save(filename + '_lambdas', self.lambdas_)
if not os.path.exists(filename + '_X_fit.npy') or self.replace_fit:
np.save(filename + '_X_fit', self.X_fit_)
np.save(filename + '_y_fit', self.y_fit_)
# np.save(filename + '_L_fit', self.L_fit_)
np.save(filename + '_errors', self.errors)
np.save(filename + '_kernel', self.kernel)
def load(self, filename):
self.alphas_ = np.load(filename + '_alphas.npy', allow_pickle=True)
self.dual_coefs_ = np.load(filename + '_dual_coefs.npy', allow_pickle=True)
self.gammas_ = np.load(filename + '_gammas.npy', allow_pickle=True)
self.X_fit_ = np.load(filename + '_X_fit.npy', allow_pickle=True)
# self.L_fit_ = np.load(filename + '_L_fit.npy', allow_pickle=True)
self.errors = np.load(filename + '_errors.npy', allow_pickle=True)
self.kernel = np.load(filename + '_kernel.npy', allow_pickle=True)[()]
if os.path.exists(filename + '_y_fit.npy'):
self.y_fit_ = np.load(filename + '_y_fit.npy', allow_pickle=True)
else:
warnings.warn('No labels file found, not adding labels to model')
if os.path.exists(filename + '_lambdas.npy'):
self.lambdas_ = np.load(filename + '_lambdas.npy', allow_pickle=True)
else:
warnings.warn('No lambdas file found, not adding importance weights to model')
| [
"numpy.argsort",
"numpy.array",
"scipy.linalg.cholesky",
"ml_dft.kernel_functions.RBFKernel",
"numpy.arange",
"numpy.save",
"numpy.mean",
"os.path.exists",
"numpy.repeat",
"sklearn.decomposition.PCA",
"numpy.ix_",
"numpy.dot",
"sklearn.model_selection.GroupKFold",
"numpy.empty",
"scipy.l... | [((559, 582), 'numpy.repeat', 'np.repeat', (['alpha_add', '(2)'], {}), '(alpha_add, 2)\n', (568, 582), True, 'import numpy as np\n'), ((2185, 2216), 'numpy.mean', 'np.mean', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (2192, 2216), True, 'import numpy as np\n'), ((2353, 2364), 'time.time', 'time.time', ([], {}), '()\n', (2362, 2364), False, 'import time\n'), ((11168, 11203), 'numpy.empty', 'np.empty', (['(y_.shape[1], X.shape[0])'], {}), '((y_.shape[1], X.shape[0]))\n', (11176, 11203), True, 'import numpy as np\n'), ((11227, 11248), 'numpy.empty', 'np.empty', (['y_.shape[1]'], {}), '(y_.shape[1])\n', (11235, 11248), True, 'import numpy as np\n'), ((11273, 11294), 'numpy.empty', 'np.empty', (['y_.shape[1]'], {}), '(y_.shape[1])\n', (11281, 11294), True, 'import numpy as np\n'), ((11318, 11339), 'numpy.empty', 'np.empty', (['y_.shape[1]'], {}), '(y_.shape[1])\n', (11326, 11339), True, 'import numpy as np\n'), ((18444, 18484), 'numpy.concatenate', 'np.concatenate', (['(self.X_fit_, x)'], {'axis': '(0)'}), '((self.X_fit_, x), axis=0)\n', (18458, 18484), True, 'import numpy as np\n'), ((18579, 18619), 'numpy.concatenate', 'np.concatenate', (['(self.y_fit_, y)'], {'axis': '(0)'}), '((self.y_fit_, y), axis=0)\n', (18593, 18619), True, 'import numpy as np\n'), ((18635, 18681), 'numpy.empty', 'np.empty', (['(self.L_fit_.shape[0], n + 1, n + 1)'], {}), '((self.L_fit_.shape[0], n + 1, n + 1))\n', (18643, 18681), True, 'import numpy as np\n'), ((18709, 18753), 'numpy.empty', 'np.empty', (['(self.dual_coefs_.shape[0], n + 1)'], {}), '((self.dual_coefs_.shape[0], n + 1))\n', (18717, 18753), True, 'import numpy as np\n'), ((20035, 20046), 'time.time', 'time.time', ([], {}), '()\n', (20044, 20046), False, 'import time\n'), ((21047, 21070), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (21055, 21070), True, 'import numpy as np\n'), ((22424, 22467), 'numpy.save', 'np.save', (["(filename + '_alphas')", 'self.alphas_'], {}), "(filename + '_alphas', self.alphas_)\n", (22431, 22467), True, 'import numpy as np\n'), ((22476, 22527), 'numpy.save', 'np.save', (["(filename + '_dual_coefs')", 'self.dual_coefs_'], {}), "(filename + '_dual_coefs', self.dual_coefs_)\n", (22483, 22527), True, 'import numpy as np\n'), ((22536, 22579), 'numpy.save', 'np.save', (["(filename + '_gammas')", 'self.gammas_'], {}), "(filename + '_gammas', self.gammas_)\n", (22543, 22579), True, 'import numpy as np\n'), ((22588, 22633), 'numpy.save', 'np.save', (["(filename + '_lambdas')", 'self.lambdas_'], {}), "(filename + '_lambdas', self.lambdas_)\n", (22595, 22633), True, 'import numpy as np\n'), ((22772, 22813), 'numpy.save', 'np.save', (["(filename + '_y_fit')", 'self.y_fit_'], {}), "(filename + '_y_fit', self.y_fit_)\n", (22779, 22813), True, 'import numpy as np\n'), ((22874, 22916), 'numpy.save', 'np.save', (["(filename + '_errors')", 'self.errors'], {}), "(filename + '_errors', self.errors)\n", (22881, 22916), True, 'import numpy as np\n'), ((22925, 22967), 'numpy.save', 'np.save', (["(filename + '_kernel')", 'self.kernel'], {}), "(filename + '_kernel', self.kernel)\n", (22932, 22967), True, 'import numpy as np\n'), ((23022, 23074), 'numpy.load', 'np.load', (["(filename + '_alphas.npy')"], {'allow_pickle': '(True)'}), "(filename + '_alphas.npy', allow_pickle=True)\n", (23029, 23074), True, 'import numpy as np\n'), ((23102, 23158), 'numpy.load', 'np.load', (["(filename + '_dual_coefs.npy')"], {'allow_pickle': '(True)'}), "(filename + '_dual_coefs.npy', allow_pickle=True)\n", (23109, 23158), True, 'import numpy as np\n'), ((23182, 23234), 'numpy.load', 'np.load', (["(filename + '_gammas.npy')"], {'allow_pickle': '(True)'}), "(filename + '_gammas.npy', allow_pickle=True)\n", (23189, 23234), True, 'import numpy as np\n'), ((23257, 23308), 'numpy.load', 'np.load', (["(filename + '_X_fit.npy')"], {'allow_pickle': '(True)'}), "(filename + '_X_fit.npy', allow_pickle=True)\n", (23264, 23308), True, 'import numpy as np\n'), ((23407, 23459), 'numpy.load', 'np.load', (["(filename + '_errors.npy')"], {'allow_pickle': '(True)'}), "(filename + '_errors.npy', allow_pickle=True)\n", (23414, 23459), True, 'import numpy as np\n'), ((23550, 23589), 'os.path.exists', 'os.path.exists', (["(filename + '_y_fit.npy')"], {}), "(filename + '_y_fit.npy')\n", (23564, 23589), False, 'import os\n'), ((23772, 23813), 'os.path.exists', 'os.path.exists', (["(filename + '_lambdas.npy')"], {}), "(filename + '_lambdas.npy')\n", (23786, 23813), False, 'import os\n'), ((1790, 1801), 'ml_dft.kernel_functions.RBFKernel', 'RBFKernel', ([], {}), '()\n', (1799, 1801), False, 'from ml_dft.kernel_functions import RBFKernel, MaternKernel\n'), ((2681, 2699), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2697, 2699), False, 'import sys\n'), ((2723, 2779), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'self.n_components', 'svd_solver': '"""arpack"""'}), "(n_components=self.n_components, svd_solver='arpack')\n", (2726, 2779), False, 'from sklearn.decomposition import PCA\n'), ((3231, 3252), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (3240, 3252), True, 'import numpy as np\n'), ((3306, 3372), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'self.cv_nfolds', 'n_repeats': 'self.cv_shuffles'}), '(n_splits=self.cv_nfolds, n_repeats=self.cv_shuffles)\n', (3319, 3372), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((4053, 4074), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (4062, 4074), True, 'import numpy as np\n'), ((4438, 4456), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4454, 4456), False, 'import sys\n'), ((4502, 4557), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X', 'None'], {'squared': 'self.squared_dist'}), '(X, None, squared=self.squared_dist)\n', (4521, 4557), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((4879, 4901), 'numpy.ones', 'np.ones', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (4886, 4901), True, 'import numpy as np\n'), ((8181, 8197), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (8189, 8197), True, 'import numpy as np\n'), ((8219, 8237), 'numpy.mean', 'np.mean', (['errors', '(0)'], {}), '(errors, 0)\n', (8226, 8237), True, 'import numpy as np\n'), ((11061, 11077), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (11069, 11077), True, 'import numpy as np\n'), ((11099, 11117), 'numpy.mean', 'np.mean', (['errors', '(0)'], {}), '(errors, 0)\n', (11106, 11117), True, 'import numpy as np\n'), ((11544, 11562), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11560, 11562), False, 'import sys\n'), ((12961, 12980), 'numpy.mean', 'np.mean', (['errors', '(-1)'], {}), '(errors, -1)\n', (12968, 12980), True, 'import numpy as np\n'), ((13877, 13906), 'numpy.argsort', 'np.argsort', (['errors'], {'axis': 'None'}), '(errors, axis=None)\n', (13887, 13906), True, 'import numpy as np\n'), ((16836, 16852), 'numpy.outer', 'np.outer', (['iw', 'iw'], {}), '(iw, iw)\n', (16844, 16852), True, 'import numpy as np\n'), ((18088, 18106), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (18104, 18106), False, 'import sys\n'), ((18813, 18836), 'numpy.unique', 'np.unique', (['self.gammas_'], {}), '(self.gammas_)\n', (18822, 18836), True, 'import numpy as np\n'), ((18995, 19057), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['x', 'self.X_fit_'], {'squared': 'self.squared_dist'}), '(x, self.X_fit_, squared=self.squared_dist)\n', (19014, 19057), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((19839, 19888), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_k[i]', 'self.y_fit_'], {'lower': '(True)'}), '(L_k[i], self.y_fit_, lower=True)\n', (19855, 19888), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((20366, 20384), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20382, 20384), False, 'import sys\n'), ((20975, 21027), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X', 'X'], {'squared': 'self.squared_dist'}), '(X, X, squared=self.squared_dist)\n', (20994, 21027), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((21106, 21129), 'numpy.unique', 'np.unique', (['self.gammas_'], {}), '(self.gammas_)\n', (21115, 21129), True, 'import numpy as np\n'), ((21570, 21607), 'numpy.dot', 'np.dot', (['K', 'self.dual_coefs_[y_list].T'], {}), '(K, self.dual_coefs_[y_list].T)\n', (21576, 21607), True, 'import numpy as np\n'), ((22278, 22296), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (22294, 22296), False, 'import sys\n'), ((22722, 22763), 'numpy.save', 'np.save', (["(filename + '_X_fit')", 'self.X_fit_'], {}), "(filename + '_X_fit', self.X_fit_)\n", (22729, 22763), True, 'import numpy as np\n'), ((23482, 23534), 'numpy.load', 'np.load', (["(filename + '_kernel.npy')"], {'allow_pickle': '(True)'}), "(filename + '_kernel.npy', allow_pickle=True)\n", (23489, 23534), True, 'import numpy as np\n'), ((23617, 23668), 'numpy.load', 'np.load', (["(filename + '_y_fit.npy')"], {'allow_pickle': '(True)'}), "(filename + '_y_fit.npy', allow_pickle=True)\n", (23624, 23668), True, 'import numpy as np\n'), ((23695, 23760), 'warnings.warn', 'warnings.warn', (['"""No labels file found, not adding labels to model"""'], {}), "('No labels file found, not adding labels to model')\n", (23708, 23760), False, 'import warnings\n'), ((23843, 23896), 'numpy.load', 'np.load', (["(filename + '_lambdas.npy')"], {'allow_pickle': '(True)'}), "(filename + '_lambdas.npy', allow_pickle=True)\n", (23850, 23896), True, 'import numpy as np\n'), ((23923, 24001), 'warnings.warn', 'warnings.warn', (['"""No lambdas file found, not adding importance weights to model"""'], {}), "('No lambdas file found, not adding importance weights to model')\n", (23936, 24001), False, 'import warnings\n'), ((1863, 1889), 'ml_dft.kernel_functions.RBFKernel', 'RBFKernel', ([], {}), '(**kernel_params)\n', (1872, 1889), False, 'from ml_dft.kernel_functions import RBFKernel, MaternKernel\n'), ((4143, 4187), 'numpy.concatenate', 'np.concatenate', (['(train_fold, add_train_inds)'], {}), '((train_fold, add_train_inds))\n', (4157, 4187), True, 'import numpy as np\n'), ((4291, 4302), 'time.time', 'time.time', ([], {}), '()\n', (4300, 4302), False, 'import time\n'), ((4729, 4757), 'numpy.save', 'np.save', (['dist_savename', 'dist'], {}), '(dist_savename, dist)\n', (4736, 4757), True, 'import numpy as np\n'), ((11391, 11402), 'time.time', 'time.time', ([], {}), '()\n', (11400, 11402), False, 'import time\n'), ((11692, 11733), 'numpy.argsort', 'np.argsort', (['errors[:, :, :, i]'], {'axis': 'None'}), '(errors[:, :, :, i], axis=None)\n', (11702, 11733), True, 'import numpy as np\n'), ((11814, 11863), 'numpy.unravel_index', 'np.unravel_index', (['min_params[0]', 'errors.shape[:2]'], {}), '(min_params[0], errors.shape[:2])\n', (11830, 11863), True, 'import numpy as np\n'), ((13998, 14043), 'numpy.unravel_index', 'np.unravel_index', (['min_params[0]', 'errors.shape'], {}), '(min_params[0], errors.shape)\n', (14014, 14043), True, 'import numpy as np\n'), ((14170, 14215), 'numpy.unravel_index', 'np.unravel_index', (['min_params[0]', 'errors.shape'], {}), '(min_params[0], errors.shape)\n', (14186, 14215), True, 'import numpy as np\n'), ((16506, 16524), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16522, 16524), False, 'import sys\n'), ((17979, 17990), 'time.time', 'time.time', ([], {}), '()\n', (17988, 17990), False, 'import time\n'), ((19394, 19442), 'scipy.linalg.solve_triangular', 'solve_triangular', (['self.L_fit_[i]', 'k1'], {'lower': '(True)'}), '(self.L_fit_[i], k1, lower=True)\n', (19410, 19442), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((19923, 19952), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_k[i].T', 'v'], {}), '(L_k[i].T, v)\n', (19939, 19952), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((20219, 20230), 'time.time', 'time.time', ([], {}), '()\n', (20228, 20230), False, 'import time\n'), ((20458, 20485), 'numpy.allclose', 'np.allclose', (['X', 'self.X_fit_'], {}), '(X, self.X_fit_)\n', (20469, 20485), True, 'import numpy as np\n'), ((20510, 20569), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['self.X_fit_'], {'squared': 'self.squared_dist'}), '(self.X_fit_, squared=self.squared_dist)\n', (20529, 20569), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((20611, 20673), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X', 'self.X_fit_'], {'squared': 'self.squared_dist'}), '(X, self.X_fit_, squared=self.squared_dist)\n', (20630, 20673), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((20932, 20950), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20948, 20950), False, 'import sys\n'), ((21352, 21370), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (21368, 21370), False, 'import sys\n'), ((21728, 21777), 'scipy.linalg.solve_triangular', 'solve_triangular', (['self.L_fit_[i]', 'K.T'], {'lower': '(True)'}), '(self.L_fit_[i], K.T, lower=True)\n', (21744, 21777), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((21859, 21880), 'numpy.sum', 'np.sum', (['(V * V)'], {'axis': '(0)'}), '(V * V, axis=0)\n', (21865, 21880), True, 'import numpy as np\n'), ((22169, 22180), 'time.time', 'time.time', ([], {}), '()\n', (22178, 22180), False, 'import time\n'), ((22649, 22688), 'os.path.exists', 'os.path.exists', (["(filename + '_X_fit.npy')"], {}), "(filename + '_X_fit.npy')\n", (22663, 22688), False, 'import os\n'), ((1954, 1983), 'ml_dft.kernel_functions.MaternKernel', 'MaternKernel', ([], {}), '(**kernel_params)\n', (1966, 1983), False, 'from ml_dft.kernel_functions import RBFKernel, MaternKernel\n'), ((2524, 2535), 'time.time', 'time.time', ([], {}), '()\n', (2533, 2535), False, 'import time\n'), ((3042, 3053), 'time.time', 'time.time', ([], {}), '()\n', (3051, 3053), False, 'import time\n'), ((3778, 3813), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'self.cv_nfolds'}), '(n_splits=self.cv_nfolds)\n', (3788, 3813), False, 'from sklearn.model_selection import GroupKFold\n'), ((5757, 5775), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5773, 5775), False, 'import sys\n'), ((8055, 8077), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (8071, 8077), False, 'import sys\n'), ((8098, 8116), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8114, 8116), False, 'import sys\n'), ((8974, 8992), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8990, 8992), False, 'import sys\n'), ((10935, 10957), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (10951, 10957), False, 'import sys\n'), ((10978, 10996), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10994, 10996), False, 'import sys\n'), ((13449, 13463), 'numpy.min', 'np.min', (['errors'], {}), '(errors)\n', (13455, 13463), True, 'import numpy as np\n'), ((16211, 16222), 'time.time', 'time.time', ([], {}), '()\n', (16220, 16222), False, 'import time\n'), ((17133, 17156), 'scipy.linalg.cholesky', 'cholesky', (['K'], {'lower': '(True)'}), '(K, lower=True)\n', (17141, 17156), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((17221, 17273), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_', '(iw * y_[:, y_list])'], {'lower': '(True)'}), '(L_, iw * y_[:, y_list], lower=True)\n', (17237, 17273), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((17381, 17406), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_.T', 'x'], {}), '(L_.T, x)\n', (17397, 17406), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((19670, 19712), 'numpy.dot', 'np.dot', (['L_k[i, -1:, :n]', 'L_k[i, -1:, :n].T'], {}), '(L_k[i, -1:, :n], L_k[i, -1:, :n].T)\n', (19676, 19712), True, 'import numpy as np\n'), ((20749, 20760), 'time.time', 'time.time', ([], {}), '()\n', (20758, 20760), False, 'import time\n'), ((485, 507), 'numpy.arange', 'np.arange', (['(n_basis / 2)'], {}), '(n_basis / 2)\n', (494, 507), True, 'import numpy as np\n'), ((5430, 5441), 'time.time', 'time.time', ([], {}), '()\n', (5439, 5441), False, 'import time\n'), ((8647, 8658), 'time.time', 'time.time', ([], {}), '()\n', (8656, 8658), False, 'import time\n'), ((9137, 9158), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (9153, 9158), False, 'import sys\n'), ((9183, 9201), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9199, 9201), False, 'import sys\n'), ((9519, 9553), 'numpy.outer', 'np.outer', (['iw[train_i]', 'iw[train_i]'], {}), '(iw[train_i], iw[train_i])\n', (9527, 9553), True, 'import numpy as np\n'), ((3739, 3756), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (3748, 3756), True, 'import numpy as np\n'), ((6263, 6297), 'numpy.outer', 'np.outer', (['iw[train_i]', 'iw[train_i]'], {}), '(iw[train_i], iw[train_i])\n', (6271, 6297), True, 'import numpy as np\n'), ((6478, 6499), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (6494, 6499), False, 'import sys\n'), ((6528, 6546), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6544, 6546), False, 'import sys\n'), ((6847, 6869), 'numpy.arange', 'np.arange', (['y_.shape[1]'], {}), '(y_.shape[1])\n', (6856, 6869), True, 'import numpy as np\n'), ((9444, 9468), 'numpy.ix_', 'np.ix_', (['train_i', 'train_i'], {}), '(train_i, train_i)\n', (9450, 9468), True, 'import numpy as np\n'), ((9618, 9641), 'numpy.ix_', 'np.ix_', (['test_i', 'train_i'], {}), '(test_i, train_i)\n', (9624, 9641), True, 'import numpy as np\n'), ((9825, 9846), 'sys.stdout.write', 'sys.stdout.write', (['""","""'], {}), "(',')\n", (9841, 9846), False, 'import sys\n'), ((9879, 9897), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9895, 9897), False, 'import sys\n'), ((10098, 10128), 'scipy.linalg.cholesky', 'cholesky', (['K_train_'], {'lower': '(True)'}), '(K_train_, lower=True)\n', (10106, 10128), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((10165, 10224), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_', '(iw[train_i] * y_[train_i])'], {'lower': '(True)'}), '(L_, iw[train_i] * y_[train_i], lower=True)\n', (10181, 10224), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((10354, 10380), 'numpy.dot', 'np.dot', (['K_test', 'dual_coef_'], {}), '(K_test, dual_coef_)\n', (10360, 10380), True, 'import numpy as np\n'), ((21249, 21272), 'numpy.unique', 'np.unique', (['self.gammas_'], {}), '(self.gammas_)\n', (21258, 21272), True, 'import numpy as np\n'), ((6184, 6208), 'numpy.ix_', 'np.ix_', (['train_i', 'train_i'], {}), '(train_i, train_i)\n', (6190, 6208), True, 'import numpy as np\n'), ((6366, 6389), 'numpy.ix_', 'np.ix_', (['test_i', 'train_i'], {}), '(test_i, train_i)\n', (6372, 6389), True, 'import numpy as np\n'), ((6727, 6748), 'sys.stdout.write', 'sys.stdout.write', (['""","""'], {}), "(',')\n", (6743, 6748), False, 'import sys\n'), ((6785, 6803), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6801, 6803), False, 'import sys\n'), ((10284, 10309), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_.T', 'x'], {}), '(L_.T, x)\n', (10300, 10309), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((10625, 10710), 'numpy.mean', 'np.mean', (['((pred_mean - y_[test_i]) ** 2 * importance_weights[test_i, None] ** 2)', '(0)'], {}), '((pred_mean - y_[test_i]) ** 2 * importance_weights[test_i, None] **\n 2, 0)\n', (10632, 10710), True, 'import numpy as np\n'), ((7224, 7254), 'scipy.linalg.cholesky', 'cholesky', (['K_train_'], {'lower': '(True)'}), '(K_train_, lower=True)\n', (7232, 7254), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((7299, 7349), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_', 'y_[train_i, y_i]'], {'lower': '(True)'}), '(L_, y_[train_i, y_i], lower=True)\n', (7315, 7349), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((7403, 7428), 'scipy.linalg.solve_triangular', 'solve_triangular', (['L_.T', 'x'], {}), '(L_.T, x)\n', (7419, 7428), False, 'from scipy.linalg import cholesky, solve_triangular\n'), ((7481, 7507), 'numpy.dot', 'np.dot', (['K_test', 'dual_coef_'], {}), '(K_test, dual_coef_)\n', (7487, 7507), True, 'import numpy as np\n'), ((7751, 7797), 'numpy.mean', 'np.mean', (['((pred_mean - y_[test_i, y_i]) ** 2)', '(0)'], {}), '((pred_mean - y_[test_i, y_i]) ** 2, 0)\n', (7758, 7797), True, 'import numpy as np\n'), ((10474, 10504), 'numpy.abs', 'np.abs', (['(pred_mean - y_[test_i])'], {}), '(pred_mean - y_[test_i])\n', (10480, 10504), True, 'import numpy as np\n'), ((12865, 12881), 'numpy.abs', 'np.abs', (['y_[:, i]'], {}), '(y_[:, i])\n', (12871, 12881), True, 'import numpy as np\n'), ((7617, 7652), 'numpy.abs', 'np.abs', (['(pred_mean - y_[test_i, y_i])'], {}), '(pred_mean - y_[test_i, y_i])\n', (7623, 7652), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RGB Colourspace Derivation
==========================
Defines objects related to *RGB* colourspace derivation, essentially
calculating the normalised primary matrix for given *RGB* colourspace primaries
and whitepoint.
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
References
----------
.. [1] `RP 177-1993 SMPTE RECOMMENDED PRACTICE - Television Color Equations
<http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa
DOI: http://dx.doi.org/10.5594/S9781614821915
(Last accessed 24 February 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['xy_to_z',
'normalised_primary_matrix',
'RGB_luminance_equation',
'RGB_luminance']
def xy_to_z(xy):
"""
Returns the *z* coordinate using given *xy* chromaticity coordinates.
Parameters
----------
xy : array_like
*xy* chromaticity coordinates.
Returns
-------
numeric
*z* coordinate.
References
----------
.. [2] `RP 177-1993 SMPTE RECOMMENDED PRACTICE -
Television Color Equations: 3.3.2
<http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa
DOI: http://dx.doi.org/10.5594/S9781614821915
Examples
--------
>>> xy_to_z((0.25, 0.25))
0.5
"""
return 1 - xy[0] - xy[1]
def normalised_primary_matrix(primaries, whitepoint):
"""
Returns the *normalised primary matrix* using given *primaries* and
*whitepoint* matrices.
Parameters
----------
primaries : array_like
Primaries chromaticity coordinate matrix, (3, 2).
whitepoint : array_like
Illuminant / whitepoint chromaticity coordinates.
Returns
-------
ndarray, (3, 3)
Normalised primary matrix.
References
----------
.. [3] `RP 177-1993 SMPTE RECOMMENDED PRACTICE -
Television Color Equations: 3.3.2 - 3.3.6
<http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa
DOI: http://dx.doi.org/10.5594/S9781614821915
Examples
--------
>>> pms = np.array([0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700])
>>> whitepoint = (0.32168, 0.33767)
>>> normalised_primary_matrix(pms, whitepoint) # doctest: +ELLIPSIS
array([[ 9.5255239...e-01, 0.0000000...e+00, 9.3678631...e-05],
[ 3.4396645...e-01, 7.2816609...e-01, -7.2132546...e-02],
[ 0.0000000...e+00, 0.0000000...e+00, 1.0088251...e+00]])
"""
# Add *z* coordinates to the primaries and transposing the matrix.
primaries = primaries.reshape((3, 2))
z = np.array([xy_to_z(np.ravel(primary)) for primary in primaries])
primaries = np.hstack((primaries, z.reshape((3, 1))))
primaries = np.transpose(primaries)
whitepoint = np.array([
whitepoint[0] / whitepoint[1],
1,
xy_to_z(whitepoint) / whitepoint[1]]).reshape((3, 1))
coefficients = np.dot(np.linalg.inv(primaries), whitepoint)
coefficients = np.diagflat(coefficients)
npm = np.dot(primaries, coefficients)
return npm
def RGB_luminance_equation(primaries, whitepoint):
"""
Returns the *luminance equation* from given *primaries* and *whitepoint*
matrices.
Parameters
----------
primaries : array_like, (3, 2)
Primaries chromaticity coordinate matrix.
whitepoint : array_like
Illuminant / whitepoint chromaticity coordinates.
Returns
-------
unicode
*Luminance* equation.
References
----------
.. [4] `RP 177-1993 SMPTE RECOMMENDED PRACTICE -
Television Color Equations: 3.3.8
<http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa
DOI: http://dx.doi.org/10.5594/S9781614821915
Examples
--------
>>> pms = np.array([0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700])
>>> whitepoint = (0.32168, 0.33767)
>>> # Doctests skip for Python 2.x compatibility.
>>> RGB_luminance_equation(pms, whitepoint) # doctest: +SKIP
'Y = 0.3439664...(R) + 0.7281660...(G) + -0.0721325...(B)'
"""
return 'Y = {0}(R) + {1}(G) + {2}(B)'.format(
*np.ravel(normalised_primary_matrix(primaries, whitepoint))[3:6])
def RGB_luminance(RGB, primaries, whitepoint):
"""
Returns the *luminance* :math:`y` of given *RGB* components from given
*primaries* and *whitepoint* matrices.
Parameters
----------
RGB : array_like, (3,)
*RGB* chromaticity coordinate matrix.
primaries : array_like, (3, 2)
Primaries chromaticity coordinate matrix.
whitepoint : array_like
Illuminant / whitepoint chromaticity coordinates.
Returns
-------
numeric
*Luminance* :math:`y`.
References
----------
.. [5] `RP 177-1993 SMPTE RECOMMENDED PRACTICE -
Television Color Equations: 3.3.3 - 3.3.6
<http://car.france3.mars.free.fr/HD/INA-%2026%20jan%2006/SMPTE%20normes%20et%20confs/rp177.pdf>`_, # noqa
DOI: http://dx.doi.org/10.5594/S9781614821915
Examples
--------
>>> RGB = np.array([40.6, 4.2, 67.4])
>>> pms = np.array([0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700])
>>> whitepoint = (0.32168, 0.33767)
>>> RGB_luminance(RGB, pms, whitepoint) # doctest: +ELLIPSIS
12.1616018...
"""
R, G, B = np.ravel(RGB)
X, Y, Z = np.ravel(normalised_primary_matrix(primaries,
whitepoint))[3:6]
return X * R + Y * G + Z * B
| [
"numpy.dot",
"numpy.linalg.inv",
"numpy.ravel",
"numpy.diagflat",
"numpy.transpose"
] | [((3348, 3371), 'numpy.transpose', 'np.transpose', (['primaries'], {}), '(primaries)\n', (3360, 3371), True, 'import numpy as np\n'), ((3597, 3622), 'numpy.diagflat', 'np.diagflat', (['coefficients'], {}), '(coefficients)\n', (3608, 3622), True, 'import numpy as np\n'), ((3634, 3665), 'numpy.dot', 'np.dot', (['primaries', 'coefficients'], {}), '(primaries, coefficients)\n', (3640, 3665), True, 'import numpy as np\n'), ((6004, 6017), 'numpy.ravel', 'np.ravel', (['RGB'], {}), '(RGB)\n', (6012, 6017), True, 'import numpy as np\n'), ((3540, 3564), 'numpy.linalg.inv', 'np.linalg.inv', (['primaries'], {}), '(primaries)\n', (3553, 3564), True, 'import numpy as np\n'), ((3227, 3244), 'numpy.ravel', 'np.ravel', (['primary'], {}), '(primary)\n', (3235, 3244), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import pandas as pd
import os
import pickle
import langdetect as lang
import time
from datetime import datetime
import json
directory = 'data/twitter'
outfile = 'output.csv'
verbose = False
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, default = directory)
parser.add_argument('outfile', type=str, default=outfile)
args = parser.parse_args()
return args
def DF_to_Dict(df, saveAs=''):
dict = {}
try:
dict['date'] = np.array(df['date'].tolist())
dict['retweets'] = np.array(df['retweets'].tolist())
dict['favorites'] = np.array(df['favorites'].tolist())
dict['text'] = np.array(df['text'].tolist())
dict['hashtags'] = np.array(df['hashtags'].tolist())
dict['id'] = np.array(df['id'].tolist())
dict['permalink'] = np.array(df['permalink'].tolist())
except:
dict['id'] = np.array(df['id'].tolist())
dict['date'] = np.array(df['date'].tolist())
dict['text'] = np.array(df['text'].tolist())
dict['upvotes'] = np.array(df['upvotes'].tolist())
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def get_files_of_type(type, directory):
if type[0] != '.': type = '.' + type
return [os.path.join(directory, f) for f in os.listdir(directory) if
os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1] == type]
def json_to_csv(jsonfile, saveAs='output.csv', index=0, opt='w'):
with open(jsonfile) as file:
data = json.load(file)
csv = open(saveAs, opt)
if opt == 'w': csv.write('id;date;text;upvotes')
for submission in data:
d = str(datetime.fromtimestamp(submission['SubmissionTime']))
t = '"' + str(submission['SubmissionTitle']) + '"'
u = str(submission['SubmitUpvotes'])
csv.write('\n' + ";".join([str(index), d, t, u]))
index += 1
for comment in submission['Comments']:
d = str(datetime.fromtimestamp(comment['CommentTime']))
t = '"' + str(comment['CommentText']) + '"'
u = str(comment['CommentUpvotes'])
csv.write('\n' + ";".join([str(index), d, t, u]))
index += 1
csv.close()
return index
def json_to_Dict(jsonfile, dict={}, saveAs=''):
with open(jsonfile) as file:
data = json.load(file)
if len(list(dict.keys())) == 0:
dict['id'] = np.array([])
dict['date'] = np.array([])
dict['text'] = np.array([])
dict['upvotes'] = np.array([])
index = len(dict['id'])
newids = []
newdates = []
newtext = []
newupvotes = []
for submission in data:
# newids += [index]
# newdates += [datetime.fromtimestamp(submission['SubmissionTime'])]
# newtext += [submission['SubmissionTitle']]
# newupvotes += [submission['SubmitUpvotes']]
# index += 1
for comment in submission['Comments']:
newids += [int(index)]
newdates += [float(comment['CommentTime'])]
newtext += [comment['CommentText']]
newupvotes += [int(comment['CommentUpvotes'])]
index += 1
if index > 100: break
dict['id'] = np.concatenate((dict['id'], np.array(newids)))
dict['date'] = np.concatenate((dict['date'], np.array(newdates)))
dict['text'] = np.concatenate((dict['text'], np.array(newtext)))
dict['upvotes'] = np.concatenate((dict['upvotes'], np.array(newupvotes)))
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def merge(directory, fileType='csv', saveAs=''):
if verbose: print("Merging files...", flush=True)
files = get_files_of_type(fileType, directory)
if 'csv' in fileType.lower():
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
df = pd.read_csv(files[0], header=0, sep=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
for i in range(1, len(files)):
add = pd.read_csv(files[i], header=0, delimiter=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
df = pd.concat([df, add])
df.to_csv(csvFile, sep=';', index=False, quoting=2)
if verbose: print("Successfully merged {} files with {} lines.".format(len(files), df.shape[0]), flush=True)
dict = DF_to_Dict(df)
if 'json' in fileType.lower():
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
index = json_to_csv(files[0], saveAs=csvFile)
dict = json_to_Dict(files[0])
for i in range(1, len(files)):
index = json_to_csv(files[i], saveAs=csvFile, index=index, opt='a')
dict = json_to_Dict(files[i], dict=dict)
if 'dict' in fileType.lower():
dict = pickle.load(open(files[0], 'rb'))
keys = list(dict.keys())
for i in range(1, len(files)):
add = pickle.load(open(files[i], 'rb'))
for key in keys:
dict[key] = np.concatenate((dict[key], add[key]))
if saveAs != '':
pickle.dump(dict, open(saveAs, 'wb'))
return dict
def filter(dict, with_words=[], without_words=[], language = 'en', saveAs='', startFrame=-1, endFrame=-1):
d = dict.copy()
keys = list(d.keys())
if startFrame != -1 and endFrame != -1:
for key in keys:
d[key] = d[key][startFrame:]
text = d['text']
remove_indices = np.zeros(len(text), dtype=bool)
start = len(text)
if verbose: print("Filtering file with {} entries...".format(start), flush=True)
# if verbose: print("Time estimated to filter: {:.0f} minutes.".format(start*.011//60+1), flush=True)
language_filter = []
i = 0
z = 0
startTime = time.time()
for t in text:
try:
language_filter.append(lang.detect(t) != language)
except:
language_filter.append(True)
i += 1
if verbose and (time.time()-startTime)//60 > z:
z += 1
print("{:.2f}% of text filtered after {} minutes. Estimated {:.0f} minutes remaining.".format(i/start*100, z, (start-i)/i * z+1), flush=True)
remove_indices += language_filter
if len(with_words) != 0:
for word in with_words:
remove_indices += [word not in t for t in text]
if len(without_words) != 0:
for word in without_words:
remove_indices += [word in t for t in text]
for key in keys:
d[key] = d[key][~remove_indices]
if saveAs != '':
pickle.dump(d, open(saveAs, 'wb'))
end = len(d[keys[0]])
if verbose: print("Successfully filtered file from {} entries to {}.".format(start,end), flush=True)
return d
def merge_stocks(directory, saveAs=''):
files = get_files_of_type('csv', directory)
split = os.path.split(saveAs)
csvFile = os.path.join(split[0], split[1].split('.')[0]) + '.csv'
cols = ['time', 'open', 'high', 'low', 'close', 'volume']
df = pd.read_csv(files[0], index_col=0, header=None, sep=',')
df.columns = cols
df['volume'] *= ((df['close'] - df['open']) / 2 + df['open'])
for i in range(1, len(files)):
add = pd.read_csv(files[i], index_col=0, header=None, sep=',')
add.columns = cols
add['volume'] *= ((add['close'] - add['open']) / 2 + add['open'])
df = df.add(add, fill_value=0)
df.to_csv(csvFile, sep=',', index=True,index_label='date', quoting=3)
if saveAs != '':
pickle.dump(df, open(saveAs, 'wb'))
return df
def stock_to_DF(stockCSVFile, saveAs=''):
df = pd.read_csv(stockCSVFile, header=0, sep=',')
df['Date'] = [int(d.replace('-','')) for d in df['Date']]
df = df.set_index(df['Date']).filter(['Close'])
if saveAs != '':
pickle.dump(df, open(saveAs, 'wb'))
return df
if __name__ == "__main__":
stock_to_DF('data/indexes/sp500.csv', saveAs='data/indexes/sp500.df')
# dict = pickle.load(open('data/reddit/raw_finance.dict', 'rb'))
# verbose = True
# filter(dict, saveAs='data/reddit/filtered_finance.dict')
# keys = list(dict.keys())
# print(dict['text'])
# print(len(dict[keys[0]]), len(keys))
# json_to_Dict('data/reddit/apple-.json', saveAs='data/reddit/raw_AAPL.dict')
# merge('data/reddit/finance_folder', fileType='json', saveAs='data/reddit/raw_finance.dict')
# json_to_Dict('data/reddit/apple-.json', saveAs='data/reddit/raw_AAPL.dict')
# json_to_csv('data/reddit/apple-.json', saveAs='data/reddit/raw_AAPL.csv')
# dict = pickle.load(open('data/reddit/raw_finance.dict', 'rb'))
# verbose = False
# dir = 'data/twitter/merge_folder'
# df = merge(dir, fileType='dict', saveAs='data/twitter/filtered_finance.dict')
# outfile = 'testCSV.csv'
# index = json_to_csv('data/reddit/market-.json', saveAs=outfile)
# df = pd.read_csv('data/reddit/raw_finance.csv', header=0, sep=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
# print(len(df.index))
# json_to_csv('data/reddit/market-.json', saveAs=outfile, index=index, opt='a')
# df = pd.read_csv(outfile, header=0, sep=";", quoting=1, quotechar='"', error_bad_lines=False, warn_bad_lines=False)
# print(len(df.index))
# dir = 'data/reddit/merge_folder'
# df = merge(dir, fileType='json')
# dict = DF_to_Dict(df, saveAs='data/twitter/raw_twitter.dict')
# startFrame = 400000
# endFrame = 500000
# dict = pickle.load(open('data/twitter/raw_twitter.dict', 'rb'))
# # print("Finished loading dict")
# filtered = filter(dict,saveAs=os.path.join('data/twitter','filtered_AAPL.dict'))
| [
"os.listdir",
"datetime.datetime.fromtimestamp",
"pandas.read_csv",
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"os.path.split",
"langdetect.detect",
"numpy.array",
"pandas.concat",
"numpy.concatenate",
"json.load",
"time.time"
] | [((263, 288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (286, 288), False, 'import argparse\n'), ((5899, 5910), 'time.time', 'time.time', ([], {}), '()\n', (5908, 5910), False, 'import time\n'), ((6971, 6992), 'os.path.split', 'os.path.split', (['saveAs'], {}), '(saveAs)\n', (6984, 6992), False, 'import os\n'), ((7135, 7191), 'pandas.read_csv', 'pd.read_csv', (['files[0]'], {'index_col': '(0)', 'header': 'None', 'sep': '""","""'}), "(files[0], index_col=0, header=None, sep=',')\n", (7146, 7191), True, 'import pandas as pd\n'), ((7735, 7779), 'pandas.read_csv', 'pd.read_csv', (['stockCSVFile'], {'header': '(0)', 'sep': '""","""'}), "(stockCSVFile, header=0, sep=',')\n", (7746, 7779), True, 'import pandas as pd\n'), ((1330, 1356), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (1342, 1356), False, 'import os\n'), ((1590, 1605), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1599, 1605), False, 'import json\n'), ((2401, 2416), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2410, 2416), False, 'import json\n'), ((2475, 2487), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2483, 2487), True, 'import numpy as np\n'), ((2511, 2523), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2519, 2523), True, 'import numpy as np\n'), ((2547, 2559), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2555, 2559), True, 'import numpy as np\n'), ((2586, 2598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2594, 2598), True, 'import numpy as np\n'), ((3837, 3858), 'os.path.split', 'os.path.split', (['saveAs'], {}), '(saveAs)\n', (3850, 3858), False, 'import os\n'), ((3946, 4061), 'pandas.read_csv', 'pd.read_csv', (['files[0]'], {'header': '(0)', 'sep': '""";"""', 'quoting': '(1)', 'quotechar': '"""\\""""', 'error_bad_lines': '(False)', 'warn_bad_lines': '(False)'}), '(files[0], header=0, sep=\';\', quoting=1, quotechar=\'"\',\n error_bad_lines=False, warn_bad_lines=False)\n', (3957, 4061), True, 'import pandas as pd\n'), ((4532, 4553), 'os.path.split', 'os.path.split', (['saveAs'], {}), '(saveAs)\n', (4545, 4553), False, 'import os\n'), ((7330, 7386), 'pandas.read_csv', 'pd.read_csv', (['files[i]'], {'index_col': '(0)', 'header': 'None', 'sep': '""","""'}), "(files[i], index_col=0, header=None, sep=',')\n", (7341, 7386), True, 'import pandas as pd\n'), ((1366, 1387), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1376, 1387), False, 'import os\n'), ((1732, 1784), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["submission['SubmissionTime']"], {}), "(submission['SubmissionTime'])\n", (1754, 1784), False, 'from datetime import datetime\n'), ((3310, 3326), 'numpy.array', 'np.array', (['newids'], {}), '(newids)\n', (3318, 3326), True, 'import numpy as np\n'), ((3378, 3396), 'numpy.array', 'np.array', (['newdates'], {}), '(newdates)\n', (3386, 3396), True, 'import numpy as np\n'), ((3448, 3465), 'numpy.array', 'np.array', (['newtext'], {}), '(newtext)\n', (3456, 3465), True, 'import numpy as np\n'), ((3523, 3543), 'numpy.array', 'np.array', (['newupvotes'], {}), '(newupvotes)\n', (3531, 3543), True, 'import numpy as np\n'), ((4116, 4237), 'pandas.read_csv', 'pd.read_csv', (['files[i]'], {'header': '(0)', 'delimiter': '""";"""', 'quoting': '(1)', 'quotechar': '"""\\""""', 'error_bad_lines': '(False)', 'warn_bad_lines': '(False)'}), '(files[i], header=0, delimiter=\';\', quoting=1, quotechar=\'"\',\n error_bad_lines=False, warn_bad_lines=False)\n', (4127, 4237), True, 'import pandas as pd\n'), ((4251, 4271), 'pandas.concat', 'pd.concat', (['[df, add]'], {}), '([df, add])\n', (4260, 4271), True, 'import pandas as pd\n'), ((2034, 2080), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["comment['CommentTime']"], {}), "(comment['CommentTime'])\n", (2056, 2080), False, 'from datetime import datetime\n'), ((5161, 5198), 'numpy.concatenate', 'np.concatenate', (['(dict[key], add[key])'], {}), '((dict[key], add[key]))\n', (5175, 5198), True, 'import numpy as np\n'), ((1411, 1437), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (1423, 1437), False, 'import os\n'), ((5978, 5992), 'langdetect.detect', 'lang.detect', (['t'], {}), '(t)\n', (5989, 5992), True, 'import langdetect as lang\n'), ((1443, 1462), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1459, 1462), False, 'import os\n'), ((6103, 6114), 'time.time', 'time.time', ([], {}), '()\n', (6112, 6114), False, 'import time\n')] |
import subprocess
import pandas as pd
import io
import random
from multiprocessing import Pool, Value, cpu_count
import time
import os
import json
import numpy as np
PARAM_DIR = "./params"
GENERATION_SIZE = 4*12
MUTATION_SCALE = .5
N_ELITE = 2
POTTS_SEED = 1
CHANGE_POTTS_SEED_PER_GEN = True
GENERATION_NO = 1
np.random.seed(POTTS_SEED)
def mutate_cell(cell):
cell = cell.copy()
for attr in cell.keys():
if attr in ["P", "V", "LAMBDA_P", "LAMBDA_V"]:
# Do not mutate, keep as is
continue
if np.random.choice([True]):
cell[attr] = max(2, np.round(cell[attr] + MUTATION_SCALE*np.random.standard_cauchy(), decimals=2))
return cell
def fitness(history):
df = history
try:
startpos = np.array((df["x"].iloc[0],df["y"].iloc[0]))
endpos = np.array((df["x"].iloc[-1],df["y"].iloc[-1]))
except IndexError:
print("Sim failed")
# Simulation failed - no output
return -10000
return np.linalg.norm(endpos-startpos)
def fitness_from_tuple(output):
output = output.splitlines()[-1]
def to_int_or_float(s):
try:
return int(s)
except ValueError:
return float(s)
spl = output.rstrip("\n").split(",")
int_tuple = tuple(map(to_int_or_float, spl))
# fitness = time alive-minimum lifetime + livelihood left + (200 - distance to nearest food)
# fitness = int_tuple[0]
return int_tuple[0]+ int_tuple[1] + (200+int_tuple[2])
def run_js_simulation(args):
(modelname, paramname) = args
cmd = f"node ./{modelname} {paramname}"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
try:
output = proc.communicate()[0]
output = output.decode("utf-8")
return fitness_from_tuple(output)
except:
# failed
return 0
# outputIO = io.StringIO(output)
# df = pd.read_csv(outputIO, sep="\t", header=None, names=["step","id","type","x","y"])
# return df
def create_param_files(generation, prefix="", seed=None):
j = json.loads('''
{
"conf":{
"MAX_ACT": [0, 0, 0, 30],
"V": [0, 30, 0, 500],
"P": [0, 5, 0, 260],
"LAMBDA_ACT": [0, 0, 0, 300],
"LAMBDA_V": [0, 1000, 0, 5],
"LAMBDA_P": [0, 1, 0, 2],
"LAMBDA_CH": [0, 0, 0, 500],
"seed": 1
}
}''')
paramnames = []
global POTTS_SEED
if seed is None:
j["conf"]["seed"] = POTTS_SEED
seed = POTTS_SEED
print("seed:", POTTS_SEED)
else:
j["conf"]["seed"] = seed
print("seed:", POTTS_SEED, "using previous seed for run 2:", seed)
directory =f"{PARAM_DIR}/gen{GENERATION_NO}/"
os.makedirs(directory, exist_ok=True)
for i, cell in enumerate(generation):
j["conf"]["MAX_ACT"][-1] = cell["MAX_ACT"]
j["conf"]["V"][-1] = cell["V"]
j["conf"]["P"][-1] = cell["P"]
j["conf"]["LAMBDA_ACT"][-1] = cell["LAMBDA_ACT"]
j["conf"]["LAMBDA_V"][-1] = cell["LAMBDA_V"]
j["conf"]["LAMBDA_P"][-1] = cell["LAMBDA_P"]
j["conf"]["LAMBDA_CH"][-1] = cell["LAMBDA_CH"]
filename = f"{directory}/{prefix}{i}.json"
paramnames.append(filename)
with open(filename, "w+") as f:
f.write(json.dumps(j))
return paramnames
def simulate_two(obj):
# obj: (modelname, (param1, param2))
(modelname, (param1, param2)) = obj
return (run_js_simulation((modelname, param1))+run_js_simulation((modelname, param2)))/2
def simulate_generation(generation, modelname, num_procs=12):
global POTTS_SEED
paramnames = create_param_files(generation)
# Also run with previous seed to smooth out errors
paramnames2 = create_param_files(generation, "seed2_", POTTS_SEED-1)
if CHANGE_POTTS_SEED_PER_GEN:
POTTS_SEED = POTTS_SEED + 1
paramnametuple = zip(paramnames, paramnames2)
args = list(map(lambda tup: (modelname, tup), paramnametuple))
with Pool(num_procs) as p:
sim_results = p.map(simulate_two, args)
fitnesses = sim_results
gen_fitnesses = list(zip(generation, fitnesses))
return gen_fitnesses
def init():
os.makedirs(f"{PARAM_DIR}", exist_ok=True)
def next_gen_elites_only(generation_with_fitnesses):
# Sort by increasing fitness
gen_w_f = sorted(generation_with_fitnesses, key=lambda x: x[1], reverse=True)
gen_w_f = list(map(lambda x: x[0], gen_w_f))
gen_w_f = gen_w_f[:N_ELITE]
i = 0
while len(gen_w_f) < GENERATION_SIZE:
gen_w_f.append(mutate_cell(gen_w_f[i%N_ELITE]))
i += 1
return gen_w_f
def next_generation_elitism_and_roulette(generation_with_fitnesses):
# Sort by increasing fitness
gen_w_f = sorted(generation_with_fitnesses, key=lambda x: x[1], reverse=True)
print(gen_w_f)
fitnesses = list(map(lambda x: x[1], gen_w_f))
print(f"Genfitness min: ", np.min(fitnesses), " mean: ", np.mean(fitnesses), " median: ", np.median(fitnesses), " max: ", np.max(fitnesses), " std dev: ", np.std(fitnesses))
gen_w_f = list(map(lambda x: x[0], gen_w_f))
elites = gen_w_f[:N_ELITE]
i = 0
# while len(gen_w_f) < GENERATION_SIZE:
# gen_w_f.append(mutate_cell(gen_w_f[i%N_ELITE]))
# i += 1
sample_weights = np.array(fitnesses)
sample_weights = sample_weights - np.min(sample_weights) + 50
sample_weights = sample_weights/sum(sample_weights)
print(sample_weights)
sampled_cells = np.random.choice(gen_w_f, size=GENERATION_SIZE-N_ELITE, p=sample_weights)
next_gen = elites + [mutate_cell(c) for c in sampled_cells]
return next_gen
def next_generation_elitism_and_inverse_position_sample(generation_with_fitnesses):
# Sort by increasing fitness
gen_w_f = sorted(generation_with_fitnesses, key=lambda x: x[1], reverse=True)
print(gen_w_f[0])
gen_w_f = list(map(lambda x: x[0], gen_w_f))
elites = gen_w_f[:N_ELITE]
i = 0
# while len(gen_w_f) < GENERATION_SIZE:
# gen_w_f.append(mutate_cell(gen_w_f[i%N_ELITE]))
# i += 1
sample_weights = np.array([(1/(x+3))**2 for x in range(GENERATION_SIZE)])
sample_weights = sample_weights/sum(sample_weights)
sampled_cells = np.random.choice(gen_w_f, size=GENERATION_SIZE-N_ELITE, p=sample_weights)
next_gen = elites + [mutate_cell(c) for c in sampled_cells]
return next_gen
def init_individual():
start = {'MAX_ACT': 2, 'P': 250, 'V': 500, 'LAMBDA_ACT': 5, 'LAMBDA_P': 2, 'LAMBDA_V': 5, 'LAMBDA_CH': 5}
for p in ['MAX_ACT', 'LAMBDA_ACT', 'LAMBDA_CH']:
start[p] = np.round(np.random.uniform(low=2, high=50), decimals=2)
return start
def evolve(filename, num_generations, seed=None):
global POTTS_SEED
if seed is not None:
POTTS_SEED = int(seed)
print(f"Starting to simulate for {filename}, {num_generations}")
print(f"Starting seed: {POTTS_SEED}")
init()
generation = [init_individual() for i in range(GENERATION_SIZE)]
#generation = list(map(mutate_cell, generation))
for i in range(num_generations):
global GENERATION_NO
print(f"Simulation generation: {GENERATION_NO}")
gen_fitnesses = simulate_generation(generation, filename, num_procs=cpu_count())
GENERATION_NO = GENERATION_NO+1
generation = next_generation_elitism_and_roulette(gen_fitnesses)
| [
"numpy.mean",
"json.loads",
"numpy.median",
"os.makedirs",
"numpy.random.choice",
"subprocess.Popen",
"numpy.std",
"json.dumps",
"numpy.min",
"multiprocessing.cpu_count",
"numpy.max",
"numpy.random.standard_cauchy",
"numpy.array",
"numpy.random.uniform",
"numpy.random.seed",
"multiproc... | [((311, 337), 'numpy.random.seed', 'np.random.seed', (['POTTS_SEED'], {}), '(POTTS_SEED)\n', (325, 337), True, 'import numpy as np\n'), ((993, 1026), 'numpy.linalg.norm', 'np.linalg.norm', (['(endpos - startpos)'], {}), '(endpos - startpos)\n', (1007, 1026), True, 'import numpy as np\n'), ((1611, 1668), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (1627, 1668), False, 'import subprocess\n'), ((2057, 2605), 'json.loads', 'json.loads', (['"""\n {\n "conf":{\n "MAX_ACT": [0, 0, 0, 30],\n "V": [0, 30, 0, 500],\n "P": [0, 5, 0, 260],\n "LAMBDA_ACT": [0, 0, 0, 300],\n "LAMBDA_V": [0, 1000, 0, 5],\n "LAMBDA_P": [0, 1, 0, 2],\n "LAMBDA_CH": [0, 0, 0, 500],\n "seed": 1\n }\n }"""'], {}), '(\n """\n {\n "conf":{\n "MAX_ACT": [0, 0, 0, 30],\n "V": [0, 30, 0, 500],\n "P": [0, 5, 0, 260],\n "LAMBDA_ACT": [0, 0, 0, 300],\n "LAMBDA_V": [0, 1000, 0, 5],\n "LAMBDA_P": [0, 1, 0, 2],\n "LAMBDA_CH": [0, 0, 0, 500],\n "seed": 1\n }\n }"""\n )\n', (2067, 2605), False, 'import json\n'), ((2932, 2969), 'os.makedirs', 'os.makedirs', (['directory'], {'exist_ok': '(True)'}), '(directory, exist_ok=True)\n', (2943, 2969), False, 'import os\n'), ((4391, 4433), 'os.makedirs', 'os.makedirs', (['f"""{PARAM_DIR}"""'], {'exist_ok': '(True)'}), "(f'{PARAM_DIR}', exist_ok=True)\n", (4402, 4433), False, 'import os\n'), ((5497, 5516), 'numpy.array', 'np.array', (['fitnesses'], {}), '(fitnesses)\n', (5505, 5516), True, 'import numpy as np\n'), ((5685, 5760), 'numpy.random.choice', 'np.random.choice', (['gen_w_f'], {'size': '(GENERATION_SIZE - N_ELITE)', 'p': 'sample_weights'}), '(gen_w_f, size=GENERATION_SIZE - N_ELITE, p=sample_weights)\n', (5701, 5760), True, 'import numpy as np\n'), ((6429, 6504), 'numpy.random.choice', 'np.random.choice', (['gen_w_f'], {'size': '(GENERATION_SIZE - N_ELITE)', 'p': 'sample_weights'}), '(gen_w_f, size=GENERATION_SIZE - N_ELITE, p=sample_weights)\n', (6445, 6504), True, 'import numpy as np\n'), ((541, 565), 'numpy.random.choice', 'np.random.choice', (['[True]'], {}), '([True])\n', (557, 565), True, 'import numpy as np\n'), ((762, 806), 'numpy.array', 'np.array', (["(df['x'].iloc[0], df['y'].iloc[0])"], {}), "((df['x'].iloc[0], df['y'].iloc[0]))\n", (770, 806), True, 'import numpy as np\n'), ((823, 869), 'numpy.array', 'np.array', (["(df['x'].iloc[-1], df['y'].iloc[-1])"], {}), "((df['x'].iloc[-1], df['y'].iloc[-1]))\n", (831, 869), True, 'import numpy as np\n'), ((4198, 4213), 'multiprocessing.Pool', 'Pool', (['num_procs'], {}), '(num_procs)\n', (4202, 4213), False, 'from multiprocessing import Pool, Value, cpu_count\n'), ((5119, 5136), 'numpy.min', 'np.min', (['fitnesses'], {}), '(fitnesses)\n', (5125, 5136), True, 'import numpy as np\n'), ((5149, 5167), 'numpy.mean', 'np.mean', (['fitnesses'], {}), '(fitnesses)\n', (5156, 5167), True, 'import numpy as np\n'), ((5182, 5202), 'numpy.median', 'np.median', (['fitnesses'], {}), '(fitnesses)\n', (5191, 5202), True, 'import numpy as np\n'), ((5214, 5231), 'numpy.max', 'np.max', (['fitnesses'], {}), '(fitnesses)\n', (5220, 5231), True, 'import numpy as np\n'), ((5247, 5264), 'numpy.std', 'np.std', (['fitnesses'], {}), '(fitnesses)\n', (5253, 5264), True, 'import numpy as np\n'), ((5555, 5577), 'numpy.min', 'np.min', (['sample_weights'], {}), '(sample_weights)\n', (5561, 5577), True, 'import numpy as np\n'), ((6802, 6835), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(2)', 'high': '(50)'}), '(low=2, high=50)\n', (6819, 6835), True, 'import numpy as np\n'), ((3506, 3519), 'json.dumps', 'json.dumps', (['j'], {}), '(j)\n', (3516, 3519), False, 'import json\n'), ((7438, 7449), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (7447, 7449), False, 'from multiprocessing import Pool, Value, cpu_count\n'), ((636, 663), 'numpy.random.standard_cauchy', 'np.random.standard_cauchy', ([], {}), '()\n', (661, 663), True, 'import numpy as np\n')] |
"""
Implementation of data fuzzification
Author: <NAME> (www.kaizhang.us)
https://github.com/taokz
"""
import numpy as np
from fuzzyset import FuzzySet
from gaussian_mf import gaussmf
import math
class FuzzyData(object):
_data = None
_fuzzydata = None
_epistemic_values = None
_target = None
def __init__(self, data = None, target = None):
if data is not None:
self._data = data
self._target = target
def quantile_fuzzification(self):
# reference: Guevara et al., Cross product kernels for fuzzy set similarity, 2017 FUZZY-IEEE
grouped = self._data.groupby([self._target])
self._epistemic_values = grouped.transform(lambda x:
np.exp(-np.square(x - x.quantile(0.5))
/
(np.abs(x.quantile(0.75) - x.quantile(0.25)) / (
2 * np.sqrt(2 * np.log(2))) + 0.001) ** 2
))
# fill up the NA (which is caused by the equal quantiles)
# self._epistemic_values = self._epistemic_values.fillna(0)
# join data and epistemistic values
num_rows = self._epistemic_values.shape[0]
num_cols = self._epistemic_values.shape[1]
self._fuzzydata=np.asarray([[FuzzySet(elements = self._data.iloc[j, i],
md=self._epistemic_values.iloc[j, i])
for i in range(num_cols)]
for j in range(num_rows)])
# return self._fuzzydata
def get_fuzzydata(self):
return self._fuzzydata
def get_data(self):
return self._data
def get_epistemic_values(self):
return self._epistemic_values
def get_target(self):
return self._data[self._target]
def show_class(self):
# print all contents of the class
print("(data) \n", _data, "\n")
print("(fuzzydata) \n", _fuzzydata, "\n")
print("(epistemic_values) \n", _epistemic_values, "\n")
print("(target) \n", _target, "\n")
| [
"numpy.log",
"fuzzyset.FuzzySet"
] | [((1098, 1176), 'fuzzyset.FuzzySet', 'FuzzySet', ([], {'elements': 'self._data.iloc[j, i]', 'md': 'self._epistemic_values.iloc[j, i]'}), '(elements=self._data.iloc[j, i], md=self._epistemic_values.iloc[j, i])\n', (1106, 1176), False, 'from fuzzyset import FuzzySet\n'), ((781, 790), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (787, 790), True, 'import numpy as np\n')] |
#!/usr/bin/python3
#log_graph.py
import numpy as np
import matplotlib.pyplot as plt
filename = "data.log"
OFFSET=2
with open(filename) as f:
header = f.readline().split('\t')
data = np.genfromtxt(filename, delimiter='\t', skip_header=1,
names=['sample', 'date', 'DATA0',
'DATA1', 'DATA2', 'DATA3'])
fig = plt.figure(1)
ax1 = fig.add_subplot(211)#numrows, numcols, fignum
ax2 = fig.add_subplot(212)
ax1.plot(data['sample'],data['DATA0'],'r',
label=header[OFFSET+0])
ax2.plot(data['sample'],data['DATA1'],'b',
label=header[OFFSET+1])
ax1.set_title("ADC Samples")
ax1.set_xlabel('Samples')
ax1.set_ylabel('Reading')
ax2.set_xlabel('Samples')
ax2.set_ylabel('Reading')
leg1 = ax1.legend()
leg2 = ax2.legend()
plt.show()
#End | [
"matplotlib.pyplot.figure",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] | [((202, 322), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '"""\t"""', 'skip_header': '(1)', 'names': "['sample', 'date', 'DATA0', 'DATA1', 'DATA2', 'DATA3']"}), "(filename, delimiter='\\t', skip_header=1, names=['sample',\n 'date', 'DATA0', 'DATA1', 'DATA2', 'DATA3'])\n", (215, 322), True, 'import numpy as np\n'), ((375, 388), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (385, 388), True, 'import matplotlib.pyplot as plt\n'), ((815, 825), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (823, 825), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from hpe3d.filter import filter_variable
def test_filter_variable():
# Test constant position filtering mode
x1 = np.ones((100, 1), dtype=float)
x1_filt = filter_variable(x1, mode='c')
np.testing.assert_allclose(x1, x1_filt)
# Test constant velocity filtering mode (linear position)
x2 = np.arange(100, dtype=float)[:, np.newaxis]
x2_filt = filter_variable(x2, mode='v')
np.testing.assert_allclose(x2, x2_filt, rtol=0.5)
# Test constant acceleration filtering mode (quadratic position)
x3 = x2 ** 2
x3_filt = filter_variable(x3, mode='a')
np.testing.assert_allclose(x3, x3_filt, rtol=8.)
# Test dimensionality
n_dim = np.random.randint(1,20)
x4 = np.ones((200, n_dim), dtype=float)
x4_filt = filter_variable(x4, mode='c')
assert x4.shape == x4_filt.shape
x4_filt = filter_variable(x4, mode='v')
assert x4.shape == x4_filt.shape
x4_filt = filter_variable(x4, mode='a')
assert x4.shape == x4_filt.shape
print('test_filter_variable:\tsuccessful!')
test_filter_variable()
| [
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.random.randint",
"hpe3d.filter.filter_variable",
"numpy.arange"
] | [((176, 206), 'numpy.ones', 'np.ones', (['(100, 1)'], {'dtype': 'float'}), '((100, 1), dtype=float)\n', (183, 206), True, 'import numpy as np\n'), ((221, 250), 'hpe3d.filter.filter_variable', 'filter_variable', (['x1'], {'mode': '"""c"""'}), "(x1, mode='c')\n", (236, 250), False, 'from hpe3d.filter import filter_variable\n'), ((255, 294), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x1', 'x1_filt'], {}), '(x1, x1_filt)\n', (281, 294), True, 'import numpy as np\n'), ((424, 453), 'hpe3d.filter.filter_variable', 'filter_variable', (['x2'], {'mode': '"""v"""'}), "(x2, mode='v')\n", (439, 453), False, 'from hpe3d.filter import filter_variable\n'), ((458, 507), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x2', 'x2_filt'], {'rtol': '(0.5)'}), '(x2, x2_filt, rtol=0.5)\n', (484, 507), True, 'import numpy as np\n'), ((609, 638), 'hpe3d.filter.filter_variable', 'filter_variable', (['x3'], {'mode': '"""a"""'}), "(x3, mode='a')\n", (624, 638), False, 'from hpe3d.filter import filter_variable\n'), ((643, 692), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x3', 'x3_filt'], {'rtol': '(8.0)'}), '(x3, x3_filt, rtol=8.0)\n', (669, 692), True, 'import numpy as np\n'), ((731, 755), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (748, 755), True, 'import numpy as np\n'), ((764, 798), 'numpy.ones', 'np.ones', (['(200, n_dim)'], {'dtype': 'float'}), '((200, n_dim), dtype=float)\n', (771, 798), True, 'import numpy as np\n'), ((813, 842), 'hpe3d.filter.filter_variable', 'filter_variable', (['x4'], {'mode': '"""c"""'}), "(x4, mode='c')\n", (828, 842), False, 'from hpe3d.filter import filter_variable\n'), ((895, 924), 'hpe3d.filter.filter_variable', 'filter_variable', (['x4'], {'mode': '"""v"""'}), "(x4, mode='v')\n", (910, 924), False, 'from hpe3d.filter import filter_variable\n'), ((977, 1006), 'hpe3d.filter.filter_variable', 'filter_variable', (['x4'], {'mode': '"""a"""'}), "(x4, mode='a')\n", (992, 1006), False, 'from hpe3d.filter import filter_variable\n'), ((367, 394), 'numpy.arange', 'np.arange', (['(100)'], {'dtype': 'float'}), '(100, dtype=float)\n', (376, 394), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
from glmatrix import *
import numpy as np
print("#########################################")
#np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float': '{: 8.3f}'.format})
#np.set_printoptions(suppress=True)
location_v = vec3_create([5.0, 6.0, 7.0])
location_m = gl_mat4_from_translation(location_v)
print("Location Matrix")
print("")
#print(location_m)
transform_array = np.array(location_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_x_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - X")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_y_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - Y")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
deg = -10
rad = (deg * math.pi / 180)
q_rot = gl_quat_from_z_rotation(rad)
rotation_m = mat4_create(None)
gl_mat4_from_quat(q_rot, rotation_m)
print("Rotation Matrix - Z")
print("")
transform_array = np.array(q_rot, np.float32)
print(transform_array)
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
print("#########################################")
displacement_v = vec3_create([10.0, 0.0, 0])
displacement_m = gl_mat4_from_translation(displacement_v)
print("Translate Matrix")
print("")
#print(displacement_m)
transform_array = np.array(displacement_m, np.float32)
print(transform_array)
print("")
print("#########################################")
print("Translate and Rotate")
ms = matstack()
print("")
print("")
ms.loadMatrix(location_m)
mvMatrix_tmp = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix_tmp)
transform_array = np.array(mvMatrix_tmp, np.float32)
print(transform_array)
print("")
mvMatrix = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("")
print("#########################################")
print("Rotate and Translate")
ms = matstack()
print("")
transform_array = np.array(rotation_m, np.float32)
print(transform_array)
print("")
transform_array = np.array(location_m, np.float32)
print(transform_array)
print("")
ms.loadMatrix(location_m)
mvMatrix_tmp = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix_tmp)
transform_array = np.array(mvMatrix_tmp, np.float32)
print(transform_array)
print("")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("")
print("#########################################")
print("#########################################")
print("Push / Pop version")
print("")
ms = matstack()
print("Initialise")
ms.loadMatrix(location_m)
mvMatrix = mat4_create(None)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Push")
mvMatrix = mat4_create(None)
ms.pushMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Translate")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Pop")
mvMatrix = mat4_create(None)
ms.popMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("#########################################")
print("Push")
mvMatrix = mat4_create(None)
ms.pushMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Translate")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Rotate")
mvMatrix = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Pop")
mvMatrix = mat4_create(None)
ms.popMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("#########################################")
print("Push")
mvMatrix = mat4_create(None)
ms.pushMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Rotate")
mvMatrix = mat4_create(None)
ms.multMatrix(rotation_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Translate")
mvMatrix = mat4_create(None)
ms.multMatrix(displacement_m)
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
print("Pop")
mvMatrix = mat4_create(None)
ms.popMatrix()
ms.getMatrix(mvMatrix)
transform_array = np.array(mvMatrix, np.float32)
print(transform_array)
| [
"numpy.array",
"numpy.set_printoptions"
] | [((191, 250), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 8.3f}'.format}"}), "(formatter={'float': '{: 8.3f}'.format})\n", (210, 250), True, 'import numpy as np\n'), ((455, 487), 'numpy.array', 'np.array', (['location_m', 'np.float32'], {}), '(location_m, np.float32)\n', (463, 487), True, 'import numpy as np\n'), ((777, 804), 'numpy.array', 'np.array', (['q_rot', 'np.float32'], {}), '(q_rot, np.float32)\n', (785, 804), True, 'import numpy as np\n'), ((847, 879), 'numpy.array', 'np.array', (['rotation_m', 'np.float32'], {}), '(rotation_m, np.float32)\n', (855, 879), True, 'import numpy as np\n'), ((1169, 1196), 'numpy.array', 'np.array', (['q_rot', 'np.float32'], {}), '(q_rot, np.float32)\n', (1177, 1196), True, 'import numpy as np\n'), ((1239, 1271), 'numpy.array', 'np.array', (['rotation_m', 'np.float32'], {}), '(rotation_m, np.float32)\n', (1247, 1271), True, 'import numpy as np\n'), ((1561, 1588), 'numpy.array', 'np.array', (['q_rot', 'np.float32'], {}), '(q_rot, np.float32)\n', (1569, 1588), True, 'import numpy as np\n'), ((1631, 1663), 'numpy.array', 'np.array', (['rotation_m', 'np.float32'], {}), '(rotation_m, np.float32)\n', (1639, 1663), True, 'import numpy as np\n'), ((1934, 1970), 'numpy.array', 'np.array', (['displacement_m', 'np.float32'], {}), '(displacement_m, np.float32)\n', (1942, 1970), True, 'import numpy as np\n'), ((2264, 2298), 'numpy.array', 'np.array', (['mvMatrix_tmp', 'np.float32'], {}), '(mvMatrix_tmp, np.float32)\n', (2272, 2298), True, 'import numpy as np\n'), ((2431, 2461), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (2439, 2461), True, 'import numpy as np\n'), ((2626, 2658), 'numpy.array', 'np.array', (['rotation_m', 'np.float32'], {}), '(rotation_m, np.float32)\n', (2634, 2658), True, 'import numpy as np\n'), ((2712, 2744), 'numpy.array', 'np.array', (['location_m', 'np.float32'], {}), '(location_m, np.float32)\n', (2720, 2744), True, 'import numpy as np\n'), ((2912, 2946), 'numpy.array', 'np.array', (['mvMatrix_tmp', 'np.float32'], {}), '(mvMatrix_tmp, np.float32)\n', (2920, 2946), True, 'import numpy as np\n'), ((3084, 3114), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (3092, 3114), True, 'import numpy as np\n'), ((3429, 3459), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (3437, 3459), True, 'import numpy as np\n'), ((3587, 3617), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (3595, 3617), True, 'import numpy as np\n'), ((3764, 3794), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (3772, 3794), True, 'import numpy as np\n'), ((3920, 3950), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (3928, 3950), True, 'import numpy as np\n'), ((4130, 4160), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (4138, 4160), True, 'import numpy as np\n'), ((4307, 4337), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (4315, 4337), True, 'import numpy as np\n'), ((4477, 4507), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (4485, 4507), True, 'import numpy as np\n'), ((4633, 4663), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (4641, 4663), True, 'import numpy as np\n'), ((4843, 4873), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (4851, 4873), True, 'import numpy as np\n'), ((5013, 5043), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (5021, 5043), True, 'import numpy as np\n'), ((5190, 5220), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (5198, 5220), True, 'import numpy as np\n'), ((5346, 5376), 'numpy.array', 'np.array', (['mvMatrix', 'np.float32'], {}), '(mvMatrix, np.float32)\n', (5354, 5376), True, 'import numpy as np\n')] |
# ############ Curves, Knots,, Chord Diagrams, Graphs, Polynomials ############
"""
This subsubmodule contains functions dealing with :
Chord diagrams of loops in the plane (for instance coming from knot diagrams)
Interlace graphs of chord diagrams (with orientations and signs)
Graph labeled tree factorisation (aka Cunningham's modular decomposition)
Fast computations for energy partition functions
The aim is to understand the relationships betwen various polynommial invariants
associated to those topological and combinatorial objects such as
Fricke polynomials of modular geodesics, Kauffman brackets of modular knots
Energy partition functions of chord diagrams, Tutte polynomials of interlace graphs
Main applications in mind:
Computing partition functions of chord diagramms comming from knotted DNA
Arithmetics of Gauss class groups and sandpile groups
"""
### ### LIBRARIES ### ###
from typing import List, Callable
import numpy as np
import cypari
#import matplotlib.pyplot as plt
## ###### Classes, choix des structures de données, et conversions ######
class ChorDiag(object):
"""
:code: ChorDiag(['a', 'b', 'c', 'd', 'b', 'a', 'd', 'c'],
orientations = [1,0,1,0,1,0,1,0],
signs = [-1,1,-1,1,-1,1,-1,1])
*Args:
'labels' (list) : list of even length, the labels, each label appearing twice
'orientations' (list of bool) : list of orientations first vector second vector
'signs'` (list of bool) : list of signs
"""
pass
class Graph(object):
pass
## ###### FROM LYNDON WORDS TO CHORD DIAGRAMS ######
"""
FONCTION 2 : De la courbe au diagramme de cordes
ENTREE : Mot L&R)
SORTIE : son diagramme de cordes (linéaire), c'est à dire un mot dans lequel chaque symbole apparait exactement deux fois par exemple abcbdadc
PROCEDE :
on parcoure la courbe paramétréé,
à chaque fois qu'on rencontre une intersection :
soit elle n'a pas de label, on lui en donne un et on l'append au mot, on continue
soit elle a déjà déjà un label, on l'append au mot, on continue
arrivée au point de départ on a notre diagramme de cordes
"""
def chordiag_of_lyndon(word : str) -> "ChorDiag":
## ###### FROM CHORD DIAGRAMS TO INTERLACE GRAPHS ######
pass
"""
FONCTION 3 : Du diagramme de cordes au graphe d'enlacement
ENTREE : un diagramme de cordes
SORTIE : un graphe
PROCEDE :
pour chaque paire de lettres semblable (same-label) il y a un sommet,
deux lettre sont reliées par une arête lorsqu'elles s'enlacent cycliquemenet
par exemple ..a...b...b...a.. ne s'enlacent pas mais ..a...b...a...b.. s'enlacent
"""
def gradma_of_chordiag(chordiag : List[str]) -> np.matrix:
"""
Returns the interlace graph of an oriented (signed or not) chord diagram
in the form of its adjacency matrix (gradma for graph adjaency matrix)
The entries are :
-1 for two 0, 1,
:code:`gradma_of_chordiag(ChorDiag([['a', 1], ['b', 0], ['c', 1], ['d', 0],
['b', 1], ['a', 0], ['d', 1], ['c', 0]])
Args:
`chordiag` (ChorDiag): an oriented chord diagram, no signs needed
Returns:
Graph: an instance of our Graph class with the adjacency matrix of the chord diagram
:Example:
>>> gradma_of_chordiag(Chordiag())
'Graph()'
"""
pass
def chordiag_genus(chordiag : List[str]) -> int:
adj_mod_2 = gradma_of_chordiag(chordiag)%2 # adjacency matrix mod 2
rank = np.rank(adj_mod_2)
return rank
def chordiag_is_gauss(chordiag : List[str]) -> bool:
## ###### CUNNINGHAM GRAPH LABELED TREE FACTORISATION OF GRAPHS ######
"""
FONCTION 4 : Décomposition de Cunningham du graphes
ENTREE : un graphe
SORTIE : un arbre de graphes "premiers"
PROCEDE : c'est un algo pénible avec des "split",
il faudrait se servir d'un truc déjà implémenté
"""
pass
| [
"numpy.rank"
] | [((3513, 3531), 'numpy.rank', 'np.rank', (['adj_mod_2'], {}), '(adj_mod_2)\n', (3520, 3531), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from collections import deque, namedtuple
from typing import Tuple
import random
Transition = namedtuple('Transition',
('actions', 'rewards', 'gradients', 'data', 'targets'))
logs_path = '/tmp/tensorflow_logs/example/'
class ReplayMemory(object):
"""
Simple convenience class to store relevant training traces and efficiently sample from them.
:param int capacity: Size of the replay memory
"""
def __init__(self, capacity: int):
self.capacity = capacity
self.memory = deque([], maxlen=self.capacity)
self.position = 0
def push(self, transition: Transition):
"""
Adds a new observation to the memory
:param transition: Transition object
:return: none
"""
self.memory.append(transition)
def sample(self, batchsize: int):
"""
Samples 'batchsize'd number of samples from the memory
:param batchsize:
:return: sample of Transition objects
"""
return random.sample(self.memory, batchsize)
def __len__(self):
return len(self.memory)
class Agent(object):
"""
Agent that learns the update rule for a logistic regression.
:param sess: Tensorflow session.
"""
def __init__(self, sess: tf.Session):
self.memory = ReplayMemory(5000)
self.batch_size = 30
self.sess = sess
self.mode = tf.estimator.ModeKeys.TRAIN
self._init_graph()
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-4)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
def _init_graph(self):
self.data = tf.placeholder(name='data',
dtype=tf.float32,
shape=[100, None])
self.targets = tf.placeholder(name='target',
dtype=tf.float32,
shape=[100, None])
self.actions = tf.placeholder(name='action',
shape=[None, 3 * 25],
dtype=tf.float32)
self.reward = tf.placeholder(name='reward',
shape=[None, 25],
dtype=tf.float32)
self.grads = tf.placeholder(name='gradients',
shape=[None, 3 * 25],
dtype=tf.float32)
self.last_action = tf.placeholder(name='last_action',
shape=[None, 3],
dtype=tf.float32)
# We encode the bias by adding a third column to the data filled with 1's
self.weights = tf.Variable(initial_value=[[1e-5, 1e-4, 0]],
name="logit_weights",
dtype=tf.float32,
expected_shape=[1, 3],
trainable=True)
with tf.name_scope("policy"):
# Subgraph to define the policy network. We construct the input from
# atomic observation objects
self.input_layer = tf.concat([self.actions, self.reward, self.grads],
axis=1,
name="state_concatenation")
self.dense = tf.layers.dense(inputs=self.input_layer,
units=50,
activation=tf.nn.softmax,
name='dense_1')
self.dropout = tf.layers.dropout(inputs=self.dense,
rate=0.4,
training=(self.mode == tf.estimator.ModeKeys.TRAIN),
name='dropout')
self.policy = tf.layers.dense(inputs=self.dropout,
units=3,
name='output_layer')
with tf.name_scope('update_weights'):
# We update the weights variables using the policy output and the weights from the
# previous transaction
self.weights = self.last_action - self.policy
with tf.name_scope("meta_loss"):
# The meta-loss is constructed by varying the input data of a logit and then generally
# trying to find the right weights:
self.logits = tf.log(tf.nn.sigmoid(tf.matmul(self.data, self.weights, transpose_b=True)))
self.loss = -1. * tf.reduce_mean(tf.matmul(self.targets, self.logits, transpose_a=True))
def _train_minibatch(self):
"""
Samples from the ReplayMemory and trains the policy on the sampled observations
:return: None
"""
batch: Tuple[Transition] = self.memory.sample(self.batch_size)
for obs in batch:
action = obs.actions
reward = obs.rewards
grad = obs.gradients
data = obs.data
targets = obs.targets
self.sess.run(self.optimizer.minimize(self.loss),
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: np.array(grad).flatten().reshape(-1, 75),
self.last_action: np.array(action[-1]).flatten().reshape(-1, 3),
self.data: data,
self.targets: np.array(targets).reshape(100, -1)
})
def _run_single_round(self, x0: list):
"""
Runs a single optimization round on a fixed dataset to create new memories to train on.
:param x0: Initial value for the weights.
:return: None
"""
# initialize round with new data
mean0 = [.1, .1]
cov0 = [[1, .01], [.01, 1]]
mean1 = [-.1, -.1]
cov1 = [[1, .02], [.02, 1]]
data, targets = create_data_for_metaloss(mean0, mean1, cov0, cov1)
# augment the data with a constant np.ones field to incorporate bias term
data = np.concatenate([data, np.ones(data.shape[0]).reshape(data.shape[0], 1)], axis=1)
# Initialize finite state space with a maximum FIFO queue
action = deque([], maxlen=25)
reward = deque([], maxlen=25)
grad = deque([], maxlen=25)
for _ in range(25):
action.append([0, 0, 0]) # 2 weights + 1 bias
reward.append(0)
grad.append([0, 0, 0]) # updates to the actions, a.k.a. logit weights
action.append(x0)
rew = 0
reward.append(rew)
grad.append(len(x0) * [0.0])
# Run a single event by doing 100 iterations of the update rule.
for idx in range(101):
rew, grad_update, weight = self.sess.run(
[self.loss, self.policy, self.weights],
feed_dict={
self.actions: np.array(action).flatten().reshape(-1, 75),
self.reward: np.array(reward).flatten().reshape(-1, 25),
self.grads: np.array(grad).flatten().reshape(-1, 75),
self.last_action: np.array(action[-1]).flatten().reshape(-1, 3),
self.data: data,
self.targets: np.array(targets).reshape(100, -1)
})
if idx % 20 == 0:
print(rew, weight)
# adjust tensorflow output and push it to the ReplayMemory as observation
action.append(weight.squeeze().flatten().tolist())
reward.append(rew.flatten().tolist()[0])
grad.append(grad_update.squeeze().flatten().tolist())
obs = Transition(actions=action,
gradients=grad,
rewards=reward,
data=data,
targets=targets)
self.memory.push(obs)
def learn(self):
for _ in range(500):
self._run_single_round(list(np.random.normal(0, 1, size=3)))
if len(self.memory) >= self.batch_size:
self._train_minibatch()
def create_data_for_metaloss(mean0, mean1, cov0, cov1):
data0 = np.random.multivariate_normal(mean0, cov0, size=50)
data1 = np.random.multivariate_normal(mean1, cov1, size=50)
data = np.vstack([data0, data1])
target0 = np.zeros(shape=50)
target1 = np.ones(shape=50)
targets = np.hstack([target0, target1])
return data, targets
if __name__ == "__main__":
sess = tf.Session()
agent = Agent(sess)
agent.learn()
| [
"numpy.hstack",
"numpy.array",
"collections.deque",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.concat",
"numpy.vstack",
"tensorflow.layers.dropout",
"tensorflow.matmul",
"tensorflow.get_default_graph",
"numpy.random.normal",
"random.sample",
"collections.namedtuple",
"nump... | [((138, 223), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('actions', 'rewards', 'gradients', 'data', 'targets')"], {}), "('Transition', ('actions', 'rewards', 'gradients', 'data', 'targets')\n )\n", (148, 223), False, 'from collections import deque, namedtuple\n'), ((8596, 8647), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean0', 'cov0'], {'size': '(50)'}), '(mean0, cov0, size=50)\n', (8625, 8647), True, 'import numpy as np\n'), ((8660, 8711), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean1', 'cov1'], {'size': '(50)'}), '(mean1, cov1, size=50)\n', (8689, 8711), True, 'import numpy as np\n'), ((8723, 8748), 'numpy.vstack', 'np.vstack', (['[data0, data1]'], {}), '([data0, data1])\n', (8732, 8748), True, 'import numpy as np\n'), ((8764, 8782), 'numpy.zeros', 'np.zeros', ([], {'shape': '(50)'}), '(shape=50)\n', (8772, 8782), True, 'import numpy as np\n'), ((8797, 8814), 'numpy.ones', 'np.ones', ([], {'shape': '(50)'}), '(shape=50)\n', (8804, 8814), True, 'import numpy as np\n'), ((8829, 8858), 'numpy.hstack', 'np.hstack', (['[target0, target1]'], {}), '([target0, target1])\n', (8838, 8858), True, 'import numpy as np\n'), ((8925, 8937), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8935, 8937), True, 'import tensorflow as tf\n'), ((578, 609), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.capacity'}), '([], maxlen=self.capacity)\n', (583, 609), False, 'from collections import deque, namedtuple\n'), ((1070, 1107), 'random.sample', 'random.sample', (['self.memory', 'batchsize'], {}), '(self.memory, batchsize)\n', (1083, 1107), False, 'import random\n'), ((1544, 1599), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (1577, 1599), True, 'import tensorflow as tf\n'), ((1617, 1650), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1648, 1650), True, 'import tensorflow as tf\n'), ((1825, 1889), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""data"""', 'dtype': 'tf.float32', 'shape': '[100, None]'}), "(name='data', dtype=tf.float32, shape=[100, None])\n", (1839, 1889), True, 'import tensorflow as tf\n'), ((1983, 2049), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""target"""', 'dtype': 'tf.float32', 'shape': '[100, None]'}), "(name='target', dtype=tf.float32, shape=[100, None])\n", (1997, 2049), True, 'import tensorflow as tf\n'), ((2150, 2219), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""action"""', 'shape': '[None, 3 * 25]', 'dtype': 'tf.float32'}), "(name='action', shape=[None, 3 * 25], dtype=tf.float32)\n", (2164, 2219), True, 'import tensorflow as tf\n'), ((2318, 2383), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""reward"""', 'shape': '[None, 25]', 'dtype': 'tf.float32'}), "(name='reward', shape=[None, 25], dtype=tf.float32)\n", (2332, 2383), True, 'import tensorflow as tf\n'), ((2479, 2551), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""gradients"""', 'shape': '[None, 3 * 25]', 'dtype': 'tf.float32'}), "(name='gradients', shape=[None, 3 * 25], dtype=tf.float32)\n", (2493, 2551), True, 'import tensorflow as tf\n'), ((2651, 2720), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""last_action"""', 'shape': '[None, 3]', 'dtype': 'tf.float32'}), "(name='last_action', shape=[None, 3], dtype=tf.float32)\n", (2665, 2720), True, 'import tensorflow as tf\n'), ((2911, 3042), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '[[1e-05, 0.0001, 0]]', 'name': '"""logit_weights"""', 'dtype': 'tf.float32', 'expected_shape': '[1, 3]', 'trainable': '(True)'}), "(initial_value=[[1e-05, 0.0001, 0]], name='logit_weights', dtype\n =tf.float32, expected_shape=[1, 3], trainable=True)\n", (2922, 3042), True, 'import tensorflow as tf\n'), ((6636, 6656), 'collections.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (6641, 6656), False, 'from collections import deque, namedtuple\n'), ((6674, 6694), 'collections.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (6679, 6694), False, 'from collections import deque, namedtuple\n'), ((6710, 6730), 'collections.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (6715, 6730), False, 'from collections import deque, namedtuple\n'), ((3189, 3212), 'tensorflow.name_scope', 'tf.name_scope', (['"""policy"""'], {}), "('policy')\n", (3202, 3212), True, 'import tensorflow as tf\n'), ((3367, 3458), 'tensorflow.concat', 'tf.concat', (['[self.actions, self.reward, self.grads]'], {'axis': '(1)', 'name': '"""state_concatenation"""'}), "([self.actions, self.reward, self.grads], axis=1, name=\n 'state_concatenation')\n", (3376, 3458), True, 'import tensorflow as tf\n'), ((3562, 3658), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.input_layer', 'units': '(50)', 'activation': 'tf.nn.softmax', 'name': '"""dense_1"""'}), "(inputs=self.input_layer, units=50, activation=tf.nn.softmax,\n name='dense_1')\n", (3577, 3658), True, 'import tensorflow as tf\n'), ((3806, 3924), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'self.dense', 'rate': '(0.4)', 'training': '(self.mode == tf.estimator.ModeKeys.TRAIN)', 'name': '"""dropout"""'}), "(inputs=self.dense, rate=0.4, training=self.mode == tf.\n estimator.ModeKeys.TRAIN, name='dropout')\n", (3823, 3924), True, 'import tensorflow as tf\n'), ((4084, 4150), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.dropout', 'units': '(3)', 'name': '"""output_layer"""'}), "(inputs=self.dropout, units=3, name='output_layer')\n", (4099, 4150), True, 'import tensorflow as tf\n'), ((4249, 4280), 'tensorflow.name_scope', 'tf.name_scope', (['"""update_weights"""'], {}), "('update_weights')\n", (4262, 4280), True, 'import tensorflow as tf\n'), ((4484, 4510), 'tensorflow.name_scope', 'tf.name_scope', (['"""meta_loss"""'], {}), "('meta_loss')\n", (4497, 4510), True, 'import tensorflow as tf\n'), ((1752, 1774), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1772, 1774), True, 'import tensorflow as tf\n'), ((4706, 4758), 'tensorflow.matmul', 'tf.matmul', (['self.data', 'self.weights'], {'transpose_b': '(True)'}), '(self.data, self.weights, transpose_b=True)\n', (4715, 4758), True, 'import tensorflow as tf\n'), ((4806, 4860), 'tensorflow.matmul', 'tf.matmul', (['self.targets', 'self.logits'], {'transpose_a': '(True)'}), '(self.targets, self.logits, transpose_a=True)\n', (4815, 4860), True, 'import tensorflow as tf\n'), ((8401, 8431), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(3)'}), '(0, 1, size=3)\n', (8417, 8431), True, 'import numpy as np\n'), ((6493, 6515), 'numpy.ones', 'np.ones', (['data.shape[0]'], {}), '(data.shape[0])\n', (6500, 6515), True, 'import numpy as np\n'), ((5833, 5850), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (5841, 5850), True, 'import numpy as np\n'), ((7665, 7682), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (7673, 7682), True, 'import numpy as np\n'), ((5432, 5448), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (5440, 5448), True, 'import numpy as np\n'), ((5519, 5535), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (5527, 5535), True, 'import numpy as np\n'), ((5605, 5619), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (5613, 5619), True, 'import numpy as np\n'), ((5695, 5715), 'numpy.array', 'np.array', (['action[-1]'], {}), '(action[-1])\n', (5703, 5715), True, 'import numpy as np\n'), ((7314, 7330), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (7322, 7330), True, 'import numpy as np\n'), ((7391, 7407), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (7399, 7407), True, 'import numpy as np\n'), ((7467, 7481), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (7475, 7481), True, 'import numpy as np\n'), ((7547, 7567), 'numpy.array', 'np.array', (['action[-1]'], {}), '(action[-1])\n', (7555, 7567), True, 'import numpy as np\n')] |
from __future__ import annotations
import math
from typing import List, Tuple
import numpy as np
class last_touch:
def __init__(self):
self.location = Vector()
self.normal = Vector()
self.time = -1
self.car = None
def update(self, packet: GameTickPacket):
touch = packet.game_ball.latest_touch
self.location = touch.hit_location
self.normal = touch.hit_normal
self.time = touch.time_seconds
class ball_object:
def __init__(self):
self._vec = Vector # ignore this property
self.location = self._vec()
self.velocity = self._vec()
self.last_touch = last_touch()
def update(self, packet: GameTickPacket):
ball = packet.game_ball
self.location = self._vec.from_vector(ball.physics.location)
self.velocity = self._vec.from_vector(ball.physics.velocity)
self.last_touch.update(packet)
class Matrix3:
# The Matrix3's sole purpose is to convert roll, pitch, and yaw data from the gametickpacket into an orientation matrix
# An orientation matrix contains 3 Vector's
# Matrix3[0] is the "forward" direction of a given car
# Matrix3[1] is the "left" direction of a given car
# Matrix3[2] is the "up" direction of a given car
def __init__(self, pitch=0, yaw=0, roll=0):
CP = math.cos(pitch)
SP = math.sin(pitch)
CY = math.cos(yaw)
SY = math.sin(yaw)
CR = math.cos(roll)
SR = math.sin(roll)
# List of 3 vectors, each descriping the direction of an axis: Forward, Left, and Up
self.data = (
Vector(CP*CY, CP*SY, SP),
Vector(CY*SP*SR-CR*SY, SY*SP*SR+CR*CY, -CP*SR),
Vector(-CR*CY*SP-SR*SY, -CR*SY*SP+SR*CY, CP*CR)
)
self.forward, self.right, self.up = self.data
def __getitem__(self, key):
return self.data[key]
def __str__(self):
return f"[{self.forward}\n {self.right}\n {self.up}]"
@staticmethod
def from_rotator(rotator) -> Matrix3:
return Matrix3(rotator.pitch, rotator.yaw, rotator.roll)
def dot(self, vector):
return Vector(self.forward.dot(vector), self.right.dot(vector), self.up.dot(vector))
def det(self):
return self[0][0] * self[1][1] * self[2][2] + self[0][1] * self[1][2] * self[2][0] + \
self[0][2] * self[1][0] * self[2][1] - self[0][0] * self[1][2] * self[2][1] - \
self[0][1] * self[1][0] * self[2][2] - self[0][2] * self[1][1] * self[2][0]
# Vector supports 1D, 2D and 3D Vectors, as well as calculations between them
# Arithmetic with 1D and 2D lists/tuples aren't supported - just set the remaining values to 0 manually
# With this new setup, Vector is much faster because it's just a wrapper for numpy
class Vector:
def __init__(self, x: float = 0, y: float = 0, z: float = 0):
# this is a private property - this is so all other things treat this class like a list, and so should you!
self._np = np.array([x, y, z])
def __getitem__(self, index):
return self._np[index].item()
def __setitem__(self, index, value):
self._np[index] = value
@property
def x(self):
return self._np[0].item()
@x.setter
def x(self, value):
self._np[0] = value
@property
def y(self):
return self._np[1].item()
@y.setter
def y(self, value):
self._np[1] = value
@property
def z(self):
return self._np[2].item()
@z.setter
def z(self, value):
self._np[2] = value
# self == value
def __eq__(self, value):
if isinstance(value, float) or isinstance(value, int):
return self.magnitude() == value
if hasattr(value, "_np"):
value = value._np
return (self._np == value).all()
# len(self)
def __len__(self):
return 3 # this is a 3 dimensional vector, so we return 3
# str(self)
def __str__(self):
# Vector's can be printed to console
return f"[{self.x} {self.y} {self.z}]"
# repr(self)
def __repr__(self):
return f"Vector(x={self.x}, y={self.y}, z={self.z})"
# -self
def __neg__(self):
return Vector(*(self._np * -1))
# self + value
def __add__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(*(self._np+value))
__radd__ = __add__
# self - value
def __sub__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(*(self._np-value))
def __rsub__(self, value):
return -self + value
# self * value
def __mul__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(*(self._np*value))
__rmul__ = __mul__
# self / value
def __truediv__(self, value):
if hasattr(value, "_np"):
value = value._np
return Vector(*(self._np/value))
def __rtruediv__(self, value):
return self * (1 / value)
# round(self)
def __round__(self, decimals=0) -> Vector:
# Rounds all of the values
return Vector(*np.around(self._np, decimals=decimals))
@staticmethod
def from_vector(vec) -> Vector:
return Vector(vec.x, vec.y, vec.z)
def magnitude(self) -> float:
# Returns the length of the vector
return np.linalg.norm(self._np).item()
def dot(self, value: Vector) -> float:
# Returns the dot product of two vectors
if hasattr(value, "_np"):
value = value._np
return np.dot(self._np, value).item()
def cross(self, value: Vector) -> Vector:
# Returns the cross product of two vectors
if hasattr(value, "_np"):
value = value._np
return Vector(*np.cross(self._np, value))
def copy(self) -> Vector:
# Returns a copy of the vector
return Vector(*self._np)
def normalize(self, return_magnitude=False) -> List[Vector, float] or Vector:
# normalize() returns a Vector that shares the same direction but has a length of 1
# normalize(True) can also be used if you'd like the length of this Vector (used for optimization)
magnitude = self.magnitude()
if magnitude != 0:
norm_vec = Vector(*(self._np / magnitude))
if return_magnitude:
return norm_vec, magnitude
return norm_vec
if return_magnitude:
return Vector(), 0
return Vector()
def flatten(self) -> Vector:
# Sets Z (Vector[2]) to 0, making the Vector 2D
return Vector(self._np[0], self._np[1])
def angle2D(self, value: Vector) -> float:
# Returns the 2D angle between this Vector and another Vector in radians
return self.flatten().angle(value.flatten())
def angle(self, value: Vector) -> float:
# Returns the angle between this Vector and another Vector in radians
return math.acos(max(min(np.dot(self.normalize()._np, value.normalize()._np).item(), 1), -1))
def rotate(self, angle: float) -> Vector:
# Rotates this Vector by the given angle in radians
# Note that this is only 2D, in the x and y axis
return Vector((math.cos(angle)*self.x) - (math.sin(angle)*self.y), (math.sin(angle)*self.x) + (math.cos(angle)*self.y), self.z)
def clamp2D(self, start: Vector, end: Vector) -> Vector:
# Similar to integer clamping, Vector's clamp2D() forces the Vector's direction between a start and end Vector
# Such that Start < Vector < End in terms of clockwise rotation
# Note that this is only 2D, in the x and y axis
s = self.normalize()._np
right = np.dot(s, np.cross(end._np, (0, 0, -1))) < 0
left = np.dot(s, np.cross(start._np, (0, 0, -1))) > 0
if (right and left) if np.dot(end._np, np.cross(start._np, (0, 0, -1))) > 0 else (right or left):
return self
if np.dot(start._np, s) < np.dot(end._np, s):
return end
return start
def clamp(self, start: Vector, end: Vector) -> Vector:
# This extends clamp2D so it also clamps the vector's z
s = self.clamp2D(start, end)
start_z = min(start.z, end.z)
end_z = max(start.z, end.z)
if s.z < start_z:
s.z = start_z
elif s.z > end_z:
s.z = end_z
return s
def dist(self, value: Vector) -> float:
# Distance between 2 vectors
if hasattr(value, "_np"):
value = value._np
return np.linalg.norm(self._np - value).item()
def flat_dist(self, value: Vector) -> float:
# Distance between 2 vectors on a 2D plane
return value.flatten().dist(self.flatten())
def cap(self, low: float, high: float) -> Vector:
# Caps all values in a Vector between 'low' and 'high'
return Vector(*(max(min(item, high), low) for item in self._np))
def midpoint(self, value: Vector) -> Vector:
# Midpoint of the 2 vectors
if hasattr(value, "_np"):
value = value._np
return Vector(*((self._np + value) / 2))
def scale(self, value: float) -> Vector:
# Returns a vector that has the same direction but with a value as the magnitude
return self.normalize() * value
| [
"numpy.cross",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.around",
"numpy.linalg.norm",
"math.sin"
] | [((1354, 1369), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (1362, 1369), False, 'import math\n'), ((1383, 1398), 'math.sin', 'math.sin', (['pitch'], {}), '(pitch)\n', (1391, 1398), False, 'import math\n'), ((1412, 1425), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (1420, 1425), False, 'import math\n'), ((1439, 1452), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (1447, 1452), False, 'import math\n'), ((1466, 1480), 'math.cos', 'math.cos', (['roll'], {}), '(roll)\n', (1474, 1480), False, 'import math\n'), ((1494, 1508), 'math.sin', 'math.sin', (['roll'], {}), '(roll)\n', (1502, 1508), False, 'import math\n'), ((3025, 3044), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3033, 3044), True, 'import numpy as np\n'), ((8021, 8041), 'numpy.dot', 'np.dot', (['start._np', 's'], {}), '(start._np, s)\n', (8027, 8041), True, 'import numpy as np\n'), ((8044, 8062), 'numpy.dot', 'np.dot', (['end._np', 's'], {}), '(end._np, s)\n', (8050, 8062), True, 'import numpy as np\n'), ((5198, 5236), 'numpy.around', 'np.around', (['self._np'], {'decimals': 'decimals'}), '(self._np, decimals=decimals)\n', (5207, 5236), True, 'import numpy as np\n'), ((5429, 5453), 'numpy.linalg.norm', 'np.linalg.norm', (['self._np'], {}), '(self._np)\n', (5443, 5453), True, 'import numpy as np\n'), ((5633, 5656), 'numpy.dot', 'np.dot', (['self._np', 'value'], {}), '(self._np, value)\n', (5639, 5656), True, 'import numpy as np\n'), ((5849, 5874), 'numpy.cross', 'np.cross', (['self._np', 'value'], {}), '(self._np, value)\n', (5857, 5874), True, 'import numpy as np\n'), ((7783, 7812), 'numpy.cross', 'np.cross', (['end._np', '(0, 0, -1)'], {}), '(end._np, (0, 0, -1))\n', (7791, 7812), True, 'import numpy as np\n'), ((7843, 7874), 'numpy.cross', 'np.cross', (['start._np', '(0, 0, -1)'], {}), '(start._np, (0, 0, -1))\n', (7851, 7874), True, 'import numpy as np\n'), ((8625, 8657), 'numpy.linalg.norm', 'np.linalg.norm', (['(self._np - value)'], {}), '(self._np - value)\n', (8639, 8657), True, 'import numpy as np\n'), ((7301, 7316), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7309, 7316), False, 'import math\n'), ((7328, 7343), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7336, 7343), False, 'import math\n'), ((7354, 7369), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7362, 7369), False, 'import math\n'), ((7381, 7396), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7389, 7396), False, 'import math\n'), ((7927, 7958), 'numpy.cross', 'np.cross', (['start._np', '(0, 0, -1)'], {}), '(start._np, (0, 0, -1))\n', (7935, 7958), True, 'import numpy as np\n')] |
import logging
import math
from copy import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import _calculate_fan_in_and_fan_out
def extract_top_level_dict(current_dict):
"""
Builds a graph dictionary from the passed depth_keys, value pair. Useful for dynamically passing external params
:param depth_keys: A list of strings making up the name of a variable. Used to make a graph for that params tree.
:param value: Param value
:param key_exists: If none then assume new dict, else load existing dict and add new key->value pairs to it.
:return: A dictionary graph of the params already added to the graph.
"""
output_dict = dict()
for key in current_dict.keys():
name = key.replace("layer_dict.", "")
name = name.replace("layer_dict.", "")
name = name.replace("block_dict.", "")
name = name.replace("module-", "")
top_level = name.split(".")[0]
sub_level = ".".join(name.split(".")[1:])
if top_level not in output_dict:
if sub_level == "":
output_dict[top_level] = current_dict[key]
else:
output_dict[top_level] = {sub_level: current_dict[key]}
else:
new_item = {key: value for key, value in output_dict[top_level].items()}
new_item[sub_level] = current_dict[key]
output_dict[top_level] = new_item
return output_dict
def extract_params_and_check_for_missing_keys(current_dict, layer_dict):
params_dict = extract_top_level_dict(current_dict=current_dict)
for key in layer_dict.keys():
if key not in params_dict:
params_dict[key] = None
return params_dict
class MetaConv1dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):
"""
A MetaConv1D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Convolutional kernel size
:param stride: Convolutional stride
:param padding: Convolution padding
:param use_bias: Boolean indicating whether to use a bias or not.
"""
super(MetaConv1dLayer, self).__init__()
num_filters = out_channels
self.stride = int(stride)
self.padding = int(padding)
self.dilation_rate = int(dilation_rate)
self.use_bias = use_bias
self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size))
nn.init.xavier_uniform_(self.weight)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
self.groups = groups
def forward(self, x, params=None):
"""
Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases
:param x: Input image batch.
:param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none
then the conv layer will use the passed params as its parameters.
:return: The output of a convolutional function.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weight"], params["bias"]
else:
(weight) = params["weight"]
bias = None
else:
if self.use_bias:
weight, bias = self.weight, self.bias
else:
weight = self.weight
bias = None
out = F.conv1d(input=x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, dilation=self.dilation_rate, groups=self.groups)
return out
class MetaConv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):
"""
A MetaConv1D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Convolutional kernel size
:param stride: Convolutional stride
:param padding: Convolution padding
:param use_bias: Boolean indicating whether to use a bias or not.
"""
super(MetaConv2dLayer, self).__init__()
num_filters = out_channels
self.stride = stride
self.padding = int(padding)
self.dilation_rate = int(dilation_rate)
self.use_bias = use_bias
self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size, kernel_size), requires_grad=True)
nn.init.xavier_uniform_(self.weight)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters), requires_grad=True)
self.groups = groups
def forward(self, x, params=None):
"""
Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases
:param x: Input image batch.
:param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none
then the conv layer will use the passed params as its parameters.
:return: The output of a convolutional function.
"""
if params is not None:
# print([key for key in params.keys()])
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weight"], params["bias"]
else:
(weight) = params["weight"]
bias = None
else:
if self.use_bias:
weight, bias = self.weight, self.bias
else:
weight = self.weight
bias = None
out = F.conv2d(input=x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, dilation=self.dilation_rate, groups=self.groups)
return out
class MetaLinearLayer(nn.Module):
def __init__(self, input_shape, num_filters, use_bias):
"""
A MetaLinear layer. Applies the same functionality of a standard linearlayer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the linear layer. Useful for inner loop optimization in the meta
learning setting.
:param input_shape: The shape of the input data, in the form (b, f)
:param num_filters: Number of output filters
:param use_bias: Whether to use biases or not.
"""
super(MetaLinearLayer, self).__init__()
self.input_shape = input_shape
b, c = input_shape[:2]
self.use_bias = use_bias
self.weights = nn.Parameter(torch.empty(num_filters, c))
nn.init.xavier_uniform_(self.weights)
logging.debug("debug message", self.weights)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x, params=None):
"""
Forward propagates by applying a linear function (Wx + b). If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param x: Input data batch, in the form (b, f)
:param params: A dictionary containing 'weights' and 'bias'. If params are none then internal params are used.
Otherwise the external are used.
:return: The result of the linear function.
"""
# print(x.shape)
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weights"], params["bias"]
else:
(weight) = params["weights"]
bias = None
# print(x.shape, params['weights'].shape)
else:
if self.use_bias:
weight, bias = self.weights, self.bias
else:
weight = self.weights
bias = None
# print(x.shape)
out = F.linear(input=x, weight=weight, bias=bias)
# print(out.shape, weight.shape, self.input_shape)
return out
def reset_parameters(self):
self.weights.data = self.weights.data * 0.
fan_in, fan_out = _calculate_fan_in_and_fan_out(self.weights)
std = 1. * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
a_array = torch.ones(self.weights.shape) * a
a_array.to(self.weights.device)
self.weights.data = self.weights.data + torch.distributions.Uniform(low=-a_array, high=a_array).rsample().to(
self.weights.device)
if self.use_bias:
self.bias.data = self.bias.data * 0.
class MetaBatchNormLayer(nn.Module):
def __init__(self, num_features, num_support_set_steps, num_target_set_steps,
eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
use_per_step_bn_statistics=False, learnable_bn_gamma=True, learnable_bn_beta=True):
"""
A MetaBatchNorm layer. Applies the same functionality of a standard BatchNorm layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting. Also has the additional functionality of being able to store per step running stats and per step beta and gamma.
"""
super(MetaBatchNormLayer, self).__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.track_running_stats = track_running_stats
self.num_features = num_features
self.use_per_step_bn_statistics = use_per_step_bn_statistics
self.learnable_gamma = learnable_bn_gamma
self.learnable_beta = learnable_bn_beta
if use_per_step_bn_statistics:
self.running_mean = nn.Parameter(
torch.zeros(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=False)
self.running_var = nn.Parameter(
torch.ones(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=False)
self.bias = nn.Parameter(
torch.zeros(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(
torch.ones(num_support_set_steps + num_target_set_steps + 1, num_features),
requires_grad=self.learnable_gamma)
else:
self.running_mean = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.running_var = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.bias = nn.Parameter(torch.zeros(num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(torch.ones(num_features),
requires_grad=self.learnable_gamma)
self.backup_running_mean = torch.zeros(self.running_mean.shape)
self.backup_running_var = torch.ones(self.running_var.shape)
self.momentum = momentum
def forward(self, input, num_step, training=False, backup_running_statistics=False):
"""
Forward propagates by applying a bach norm function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
if self.use_per_step_bn_statistics:
running_mean = self.running_mean[num_step]
running_var = self.running_var[num_step]
weight, bias = self.weight[num_step], self.bias[num_step]
# print(num_step)
else:
running_mean = self.running_mean
running_var = self.running_var
weight, bias = self.weight, self.bias
if backup_running_statistics and self.use_per_step_bn_statistics:
self.backup_running_mean.data = copy(self.running_mean.data)
self.backup_running_var.data = copy(self.running_var.data)
momentum = self.momentum
# print(running_mean.shape, running_var.shape)
output = F.batch_norm(input, running_mean, running_var, weight, bias,
training=True, momentum=momentum, eps=self.eps)
return output
def restore_backup_stats(self):
"""
Resets batch statistics to their backup values which are collected after each forward pass.
"""
if self.use_per_step_bn_statistics:
self.running_mean = nn.Parameter(self.backup_running_mean, requires_grad=False)
self.running_var = nn.Parameter(self.backup_running_var, requires_grad=False)
self.to(self.weight.device)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
class MetaConvNormLayerLeakyReLU(nn.Module):
def __init__(self, input_shape, num_filters, kernel_size, stride, padding, use_bias, per_step_bn_statistics,
num_support_set_steps, num_target_set_steps,
use_normalization=True, groups=1):
"""
Initializes a BatchNorm->Conv->ReLU layer which applies those operation in that order.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run the layer on.
:param normalization: The type of normalization to use 'batch_norm' or 'layer_norm'
:param meta_layer: Whether this layer will require meta-layer capabilities such as meta-batch norm,
meta-conv etc.
:param input_shape: The image input shape in the form (b, c, h, w)
:param num_filters: number of filters for convolutional layer
:param kernel_size: the kernel size of the convolutional layer
:param stride: the stride of the convolutional layer
:param padding: the bias of the convolutional layer
:param use_bias: whether the convolutional layer utilizes a bias
"""
super(MetaConvNormLayerLeakyReLU, self).__init__()
self.input_shape = input_shape
self.use_normalization = use_normalization
self.use_per_step_bn_statistics = per_step_bn_statistics
self.num_filters = num_filters
self.kernel_size = kernel_size
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.stride = stride
self.groups = groups
self.padding = padding
self.use_bias = use_bias
self.layer_dict = nn.ModuleDict()
self.build_block()
def build_block(self):
x = torch.zeros(self.input_shape)
out = x
self.conv = MetaConv2dLayer(in_channels=out.shape[1], out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding, use_bias=self.use_bias,
groups=self.groups)
out = self.conv(out)
if type(out) == tuple:
out, _ = out
if self.use_normalization:
self.norm_layer = MetaBatchNormLayer(num_features=out.shape[1], track_running_stats=True,
use_per_step_bn_statistics=self.use_per_step_bn_statistics,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
# print(out.shape)
out = self.norm_layer.forward(out, num_step=0)
out = F.leaky_relu(out)
print(out.shape)
def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying the function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
conv_params = None
if params is not None:
params = {key: value for key, value in params.items()}
params = extract_top_level_dict(current_dict=params)
conv_params = params['conv']
# if params is not None:
# print([key for key in params.keys()])
# else:
# print(None)
out = x
out = self.conv(out, params=conv_params)
if type(out) == tuple:
out, _ = out
if self.use_normalization:
out = self.norm_layer.forward(out, num_step=num_step,
training=training,
backup_running_statistics=backup_running_statistics)
out = F.leaky_relu(out)
return out
def restore_backup_stats(self):
"""
Restore stored statistics from the backup, replacing the current ones.
"""
if self.normalization:
self.norm_layer.restore_backup_stats()
class VGGActivationNormNetwork(nn.Module):
def __init__(self, input_shape, num_output_classes, use_channel_wise_attention,
num_stages, num_filters, num_support_set_steps, num_target_set_steps):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(VGGActivationNormNetwork, self).__init__()
self.total_layers = 0
self.upscale_shapes = []
self.num_filters = num_filters
self.num_stages = num_stages
self.input_shape = input_shape
self.use_channel_wise_attention = use_channel_wise_attention
self.num_output_classes = num_output_classes
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
self.layer_dict['conv_{}'.format(i)] = MetaConvNormLayerLeakyReLU(input_shape=out.shape,
num_filters=self.num_filters,
kernel_size=3, stride=1,
padding=1,
use_bias=True,
groups=1, per_step_bn_statistics=True,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['conv_{}'.format(i)](out, training=True, num_step=0)
out = F.max_pool2d(input=out, kernel_size=2, stride=2, padding=0)
out = out.view((out.shape[0], -1))
if type(self.num_output_classes) == list:
for idx, num_output_classes in enumerate(self.num_output_classes):
self.layer_dict['linear_{}'.format(idx)] = MetaLinearLayer(input_shape=out.shape,
num_filters=num_output_classes,
use_bias=True)
pred = self.layer_dict['linear_{}'.format(idx)](out)
else:
self.layer_dict['linear'] = MetaLinearLayer(input_shape=out.shape,
num_filters=self.num_output_classes, use_bias=True)
out = self.layer_dict['linear'](out)
print("VGGNetwork build", out.shape)
def forward(self, x, num_step, dropout_training=None, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
# print([key for key, value in param_dict.items() if value is not None])
for i in range(self.num_stages):
out = self.layer_dict['conv_{}'.format(i)](out, params=param_dict['conv_{}'.format(i)], training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
features = out
out = out.view(out.size(0), -1)
if type(self.num_output_classes) == list:
pred_list = []
for idx, num_output_classes in enumerate(self.num_output_classes):
cur_pred = self.layer_dict['linear_{}'.format(idx)](out, params=param_dict['linear_{}'.format(idx)])
pred_list.append(cur_pred)
out = pred_list
else:
out = self.layer_dict['linear'](out, params=param_dict['linear'])
if return_features:
return out, features
else:
return out
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class FCCActivationNormNetwork(nn.Module):
def __init__(self, im_shape, num_output_classes, args, device, use_bn, num_stages=None, use_bias=True,
meta_classifier=True):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(FCCActivationNormNetwork, self).__init__()
self.device = device
self.args = args
self.input_shape = list(im_shape)
self.num_output_classes = num_output_classes
self.meta_classifier = meta_classifier
self.num_stages = num_stages
self.use_bias = use_bias
self.use_bn = use_bn
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
out = out.view(out.size(0), -1)
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
self.layer_dict['fcc_{}'.format(i)] = MetaLinearLayer(input_shape=out.shape, num_filters=40, use_bias=False)
out = self.layer_dict['fcc_{}'.format(i)].forward(out)
if self.use_bn:
self.layer_dict['fcc_bn_{}'.format(i)] = MetaBatchNormLayer(num_features=out.shape[1], args=self.args,
use_per_step_bn_statistics=True)
out = self.layer_dict['fcc_bn_{}'.format(i)].forward(out, num_step=0)
out = F.leaky_relu(out)
out = out.view(out.shape[0], -1)
self.layer_dict['preds_linear'] = MetaLinearLayer(input_shape=(out.shape[0], np.prod(out.shape[1:])),
num_filters=self.num_output_classes, use_bias=self.use_bias)
out = self.layer_dict['preds_linear'](out)
print("FCCActivationNormNetwork build", out.shape)
def forward(self, x, num_step, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
out = out.view(out.size(0), -1)
for i in range(self.num_stages):
out = self.layer_dict['fcc_{}'.format(i)](out, params=param_dict['fcc_{}'.format(i)])
if self.use_bn:
out = self.layer_dict['fcc_bn_{}'.format(i)].forward(out, num_step=num_step,
params=None, training=training,
backup_running_statistics=backup_running_statistics)
out = F.leaky_relu(out)
features = out
out = out.view(out.size(0), -1)
out = self.layer_dict['preds_linear'](out, param_dict['preds_linear'])
if return_features:
return out, features
else:
return out
def reset_parameters(self):
for name, module in self.named_modules():
if type(module) == MetaLinearLayer:
# print("reset", name)
module.reset_parameters()
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class SqueezeExciteLayer(nn.ModuleDict):
def __init__(self, input_shape, num_filters, num_layers, num_support_set_steps, num_target_set_steps):
super(SqueezeExciteLayer, self).__init__()
self.input_shape = input_shape
self.num_filters = num_filters
self.num_layers = num_layers
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_block()
def build_block(self):
self.layer_dict = nn.ModuleDict()
x_dummy = torch.zeros(self.input_shape)
out = x_dummy
out = F.avg_pool2d(out, out.shape[-1]).squeeze()
for i in range(self.num_layers - 1):
self.layer_dict['attention_network_hidden_{}'.format(i)] = MetaLinearLayer(input_shape=out.shape,
use_bias=True,
num_filters=self.num_filters)
out = self.layer_dict['attention_network_hidden_{}'.format(i)].forward(out, params=None)
self.layer_dict['LeakyReLU_{}'.format(i)] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_{}'.format(i)].forward(out)
self.layer_dict['attention_network_output_layer'] = MetaLinearLayer(input_shape=out.shape, use_bias=True,
num_filters=x_dummy.shape[1])
channel_wise_attention_regions = self.layer_dict[
'attention_network_output_layer'].forward(
out, params=None)
channel_wise_attention_regions = F.sigmoid(channel_wise_attention_regions)
out = x_dummy * channel_wise_attention_regions.unsqueeze(2).unsqueeze(2)
print('Built', type(self), 'with output', out.shape, self)
def forward(self, x, num_step=0, params=None):
param_dict = dict()
if params is not None:
params = {key: value for key, value in params.items()}
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
out = F.avg_pool2d(out, out.shape[-1]).squeeze()
for i in range(self.num_layers - 1):
# print(out.shape)
out = self.layer_dict[
'attention_network_hidden_{}'.format(i)].forward(
out, params=param_dict['attention_network_hidden_{}'.format(i)])
out = self.layer_dict['LeakyReLU_{}'.format(i)].forward(out)
# print(out.shape)
channel_wise_attention_regions = self.layer_dict[
'attention_network_output_layer'].forward(
out, params=param_dict['attention_network_output_layer'])
channel_wise_attention_regions = F.sigmoid(channel_wise_attention_regions)
out = x * channel_wise_attention_regions.unsqueeze(2).unsqueeze(2)
return out
class VGGActivationNormNetworkWithAttention(nn.Module):
def __init__(self, input_shape, num_output_classes, use_channel_wise_attention,
num_stages, num_filters, num_support_set_steps, num_target_set_steps, num_blocks_per_stage):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities should
be enabled.
"""
super(VGGActivationNormNetworkWithAttention, self).__init__()
self.total_layers = 0
self.upscale_shapes = []
self.num_filters = num_filters
self.num_stages = num_stages
self.input_shape = input_shape
self.use_channel_wise_attention = use_channel_wise_attention
self.num_output_classes = num_output_classes
self.num_blocks_per_stage = num_blocks_per_stage
self.num_support_set_steps = num_support_set_steps
self.num_target_set_steps = num_target_set_steps
self.build_network()
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
self.layer_dict = nn.ModuleDict()
for i in range(self.num_stages):
for j in range(self.num_blocks_per_stage):
if self.use_channel_wise_attention:
self.layer_dict['attention_layer_{}_{}'.format(i, j)] = SqueezeExciteLayer(input_shape=out.shape,
num_filters=0,
num_layers=0,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['attention_layer_{}_{}'.format(i, j)].forward(out)
self.layer_dict['conv_{}_{}'.format(i, j)] = MetaConvNormLayerLeakyReLU(input_shape=out.shape,
num_filters=self.num_filters,
kernel_size=3, stride=1,
padding=1,
use_bias=True,
groups=1,
per_step_bn_statistics=True,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['conv_{}_{}'.format(i, j)](out, training=True, num_step=0)
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if self.use_channel_wise_attention:
self.layer_dict['attention_pre_logit_layer'] = SqueezeExciteLayer(input_shape=out.shape,
num_filters=0,
num_layers=0,
num_support_set_steps=self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps)
out = self.layer_dict['attention_pre_logit_layer'].forward(out)
features_avg = F.avg_pool2d(out, out.shape[-1]).squeeze()
out = features_avg
self.layer_dict['linear'] = MetaLinearLayer(input_shape=out.shape,
num_filters=self.num_output_classes, use_bias=True)
out = self.layer_dict['linear'](out)
print("VGGNetwork build", out.shape)
def forward(self, x, num_step, dropout_training=None, params=None, training=False,
backup_running_statistics=False, return_features=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
params = {key: value[0] for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
# print([key for key, value in param_dict.items() if value is not None])
for i in range(self.num_stages):
for j in range(self.num_blocks_per_stage):
if self.use_channel_wise_attention:
out = self.layer_dict['attention_layer_{}_{}'.format(i, j)].forward(out, num_step=num_step,
params=param_dict[
'attention_layer_{}_{}'.format(
i, j)])
out = self.layer_dict['conv_{}_{}'.format(i, j)](out, training=True, num_step=num_step,
params=param_dict['conv_{}_{}'.format(i, j)])
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if self.use_channel_wise_attention:
out = self.layer_dict['attention_pre_logit_layer'].forward(out, params=param_dict[
'attention_pre_logit_layer'])
features = out
features_avg = F.avg_pool2d(out, out.shape[-1]).squeeze()
# out = F.avg_pool2d(out, out.shape[-1])
# out = self.layer_dict['relational_pool'].forward(out, params=param_dict['relational_pool'], num_step=num_step)
out = features_avg
out = self.layer_dict['linear'](out, param_dict['linear'])
if return_features:
return out, features
else:
return out
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for name, module in self.named_modules():
if type(module) == MetaBatchNormLayer:
module.restore_backup_stats()
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
class MetaBatchRelationalModule(nn.Module):
def __init__(self, input_shape, use_coordinates=True, num_support_set_steps=0, num_target_set_steps=0,
output_units=32):
super(MetaBatchRelationalModule, self).__init__()
self.input_shape = input_shape
self.layer_dict = nn.ModuleDict()
self.first_time = True
self.use_coordinates = use_coordinates
self.num_target_set_steps = num_target_set_steps
self.num_support_set_steps = num_support_set_steps
self.output_units = output_units
self.build_block()
def build_block(self):
out_img = torch.zeros(self.input_shape)
"""g"""
if len(out_img.shape) > 3:
b, c, h, w = out_img.shape
if h > 5:
out_img = F.adaptive_avg_pool2d(out_img, output_size=5)
print(out_img.shape)
b, c, h, w = out_img.shape
out_img = out_img.view(b, c, h * w)
out_img = out_img.permute([0, 2, 1]) # h*w, c
b, length, c = out_img.shape
print(out_img.shape)
# x_flat = (64 x 25 x 24)
if self.use_coordinates:
self.coord_tensor = []
for i in range(length):
self.coord_tensor.append(torch.Tensor(np.array([i])))
self.coord_tensor = torch.stack(self.coord_tensor, dim=0).unsqueeze(0)
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
out_img = torch.cat([out_img, self.coord_tensor], dim=2)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
# print(out.shape)
for idx_layer in range(2):
# print('test', out.shape)
self.layer_dict['g_fcc_{}'.format(idx_layer)] = MetaLinearLayer(input_shape=out.shape, num_filters=64,
use_bias=True)
out = self.layer_dict['g_fcc_{}'.format(idx_layer)].forward(out)
self.layer_dict['LeakyReLU_{}'.format(idx_layer)] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
out = out.sum(1).sum(1)
print('here', out.shape)
"""f"""
self.layer_dict['post_processing_layer'] = MetaLinearLayer(input_shape=out.shape, num_filters=64, use_bias=True)
out = self.layer_dict['post_processing_layer'].forward(out)
self.layer_dict['LeakyReLU_post_processing'] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_post_processing'].forward(out)
self.layer_dict['output_layer'] = MetaLinearLayer(input_shape=out.shape, num_filters=self.output_units,
use_bias=True)
out = self.layer_dict['output_layer'].forward(out)
self.layer_dict['LeakyReLU_output'] = nn.LeakyReLU()
out = self.layer_dict['LeakyReLU_output'].forward(out)
print('Block built with output volume shape', out.shape)
def forward(self, x_img, num_step, params=None):
param_dict = dict()
if params is not None:
params = {key: value for key, value in params.items()}
# print([key for key, value in param_dict.items()])
param_dict = extract_top_level_dict(current_dict=params)
# print(list(params.keys()))
for name, param in list(self.layer_dict.named_parameters()) + list(self.layer_dict.items()):
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out_img = x_img
# print("input", out_img.shape)
"""g"""
if len(out_img.shape) > 3:
b, c, h, w = out_img.shape
if h > 5:
out_img = F.adaptive_avg_pool2d(out_img, output_size=5)
b, c, h, w = out_img.shape
out_img = out_img.view(b, c, h * w)
out_img = out_img.permute([0, 2, 1]) # h*w, c
b, length, c = out_img.shape
if self.use_coordinates:
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
out_img = torch.cat([out_img, self.coord_tensor.to(x_img.device)], dim=2)
# x_flat = (64 x 25 x 24)
# print('out_img', out_img.shape)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
# print(out.shape)
for idx_layer in range(2):
# print('test', out.shape)
# print(param_dict['g_fcc_{}'.format(idx_layer)])
out = self.layer_dict['g_fcc_{}'.format(idx_layer)].forward(out,
params=param_dict['g_fcc_{}'.format(idx_layer)])
# print('test', out.shape)
out = self.layer_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
# print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
out = out.sum(1).sum(1)
"""f"""
out = self.layer_dict['post_processing_layer'].forward(out, params=param_dict['post_processing_layer'])
out = self.layer_dict['LeakyReLU_post_processing'].forward(out)
out = self.layer_dict['output_layer'].forward(out, params=param_dict['output_layer'])
out = self.layer_dict['LeakyReLU_output'].forward(out)
return out
| [
"torch.nn.functional.conv2d",
"numpy.prod",
"logging.debug",
"torch.nn.functional.conv1d",
"math.sqrt",
"torch.nn.functional.sigmoid",
"numpy.array",
"torch.sum",
"copy.copy",
"torch.nn.functional.linear",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.init.xavier_uniform_",
"torch.uns... | [((2964, 3000), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (2987, 3000), True, 'import torch.nn as nn\n'), ((4068, 4207), 'torch.nn.functional.conv1d', 'F.conv1d', ([], {'input': 'x', 'weight': 'weight', 'bias': 'bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation_rate', 'groups': 'self.groups'}), '(input=x, weight=weight, bias=bias, stride=self.stride, padding=\n self.padding, dilation=self.dilation_rate, groups=self.groups)\n', (4076, 4207), True, 'import torch.nn.functional as F\n'), ((5484, 5520), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (5507, 5520), True, 'import torch.nn as nn\n'), ((6659, 6798), 'torch.nn.functional.conv2d', 'F.conv2d', ([], {'input': 'x', 'weight': 'weight', 'bias': 'bias', 'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation_rate', 'groups': 'self.groups'}), '(input=x, weight=weight, bias=bias, stride=self.stride, padding=\n self.padding, dilation=self.dilation_rate, groups=self.groups)\n', (6667, 6798), True, 'import torch.nn.functional as F\n'), ((7746, 7783), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.weights'], {}), '(self.weights)\n', (7769, 7783), True, 'import torch.nn as nn\n'), ((7793, 7837), 'logging.debug', 'logging.debug', (['"""debug message"""', 'self.weights'], {}), "('debug message', self.weights)\n", (7806, 7837), False, 'import logging\n'), ((9031, 9074), 'torch.nn.functional.linear', 'F.linear', ([], {'input': 'x', 'weight': 'weight', 'bias': 'bias'}), '(input=x, weight=weight, bias=bias)\n', (9039, 9074), True, 'import torch.nn.functional as F\n'), ((9263, 9306), 'torch.nn.init._calculate_fan_in_and_fan_out', '_calculate_fan_in_and_fan_out', (['self.weights'], {}), '(self.weights)\n', (9292, 9306), False, 'from torch.nn.init import _calculate_fan_in_and_fan_out\n'), ((12265, 12301), 'torch.zeros', 'torch.zeros', (['self.running_mean.shape'], {}), '(self.running_mean.shape)\n', (12276, 12301), False, 'import torch\n'), ((12336, 12370), 'torch.ones', 'torch.ones', (['self.running_var.shape'], {}), '(self.running_var.shape)\n', (12346, 12370), False, 'import torch\n'), ((14145, 14257), 'torch.nn.functional.batch_norm', 'F.batch_norm', (['input', 'running_mean', 'running_var', 'weight', 'bias'], {'training': '(True)', 'momentum': 'momentum', 'eps': 'self.eps'}), '(input, running_mean, running_var, weight, bias, training=True,\n momentum=momentum, eps=self.eps)\n', (14157, 14257), True, 'import torch.nn.functional as F\n'), ((16675, 16690), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (16688, 16690), True, 'import torch.nn as nn\n'), ((16759, 16788), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (16770, 16788), False, 'import torch\n'), ((17758, 17775), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['out'], {}), '(out)\n', (17770, 17775), True, 'import torch.nn.functional as F\n'), ((19610, 19627), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['out'], {}), '(out)\n', (19622, 19627), True, 'import torch.nn.functional as F\n'), ((21537, 21566), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (21548, 21566), False, 'import torch\n'), ((21609, 21624), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (21622, 21624), True, 'import torch.nn as nn\n'), ((28656, 28685), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (28667, 28685), False, 'import torch\n'), ((28768, 28783), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (28781, 28783), True, 'import torch.nn as nn\n'), ((33707, 33722), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (33720, 33722), True, 'import torch.nn as nn\n'), ((33741, 33770), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (33752, 33770), False, 'import torch\n'), ((34878, 34919), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['channel_wise_attention_regions'], {}), '(channel_wise_attention_regions)\n', (34887, 34919), True, 'import torch.nn.functional as F\n'), ((36251, 36292), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['channel_wise_attention_regions'], {}), '(channel_wise_attention_regions)\n', (36260, 36292), True, 'import torch.nn.functional as F\n'), ((38161, 38190), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (38172, 38190), False, 'import torch\n'), ((38233, 38248), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (38246, 38248), True, 'import torch.nn as nn\n'), ((45782, 45797), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (45795, 45797), True, 'import torch.nn as nn\n'), ((46106, 46135), 'torch.zeros', 'torch.zeros', (['self.input_shape'], {}), '(self.input_shape)\n', (46117, 46135), False, 'import torch\n'), ((47106, 47133), 'torch.unsqueeze', 'torch.unsqueeze', (['out_img', '(1)'], {}), '(out_img, 1)\n', (47121, 47133), False, 'import torch\n'), ((47218, 47245), 'torch.unsqueeze', 'torch.unsqueeze', (['out_img', '(2)'], {}), '(out_img, 2)\n', (47233, 47245), False, 'import torch\n'), ((47383, 47407), 'torch.cat', 'torch.cat', (['[x_i, x_j]', '(3)'], {}), '([x_i, x_j], 3)\n', (47392, 47407), False, 'import torch\n'), ((48664, 48678), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (48676, 48678), True, 'import torch.nn as nn\n'), ((49041, 49055), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (49053, 49055), True, 'import torch.nn as nn\n'), ((50617, 50644), 'torch.unsqueeze', 'torch.unsqueeze', (['out_img', '(1)'], {}), '(out_img, 1)\n', (50632, 50644), False, 'import torch\n'), ((50729, 50756), 'torch.unsqueeze', 'torch.unsqueeze', (['out_img', '(2)'], {}), '(out_img, 2)\n', (50744, 50756), False, 'import torch\n'), ((50894, 50918), 'torch.cat', 'torch.cat', (['[x_i, x_j]', '(3)'], {}), '([x_i, x_j], 3)\n', (50903, 50918), False, 'import torch\n'), ((2904, 2954), 'torch.empty', 'torch.empty', (['num_filters', 'in_channels', 'kernel_size'], {}), '(num_filters, in_channels, kernel_size)\n', (2915, 2954), False, 'import torch\n'), ((5391, 5454), 'torch.empty', 'torch.empty', (['num_filters', 'in_channels', 'kernel_size', 'kernel_size'], {}), '(num_filters, in_channels, kernel_size, kernel_size)\n', (5402, 5454), False, 'import torch\n'), ((7709, 7736), 'torch.empty', 'torch.empty', (['num_filters', 'c'], {}), '(num_filters, c)\n', (7720, 7736), False, 'import torch\n'), ((9326, 9361), 'math.sqrt', 'math.sqrt', (['(2.0 / (fan_in + fan_out))'], {}), '(2.0 / (fan_in + fan_out))\n', (9335, 9361), False, 'import math\n'), ((9374, 9388), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (9383, 9388), False, 'import math\n'), ((9465, 9495), 'torch.ones', 'torch.ones', (['self.weights.shape'], {}), '(self.weights.shape)\n', (9475, 9495), False, 'import torch\n'), ((13939, 13967), 'copy.copy', 'copy', (['self.running_mean.data'], {}), '(self.running_mean.data)\n', (13943, 13967), False, 'from copy import copy\n'), ((14011, 14038), 'copy.copy', 'copy', (['self.running_var.data'], {}), '(self.running_var.data)\n', (14015, 14038), False, 'from copy import copy\n'), ((14544, 14603), 'torch.nn.Parameter', 'nn.Parameter', (['self.backup_running_mean'], {'requires_grad': '(False)'}), '(self.backup_running_mean, requires_grad=False)\n', (14556, 14603), True, 'import torch.nn as nn\n'), ((14635, 14693), 'torch.nn.Parameter', 'nn.Parameter', (['self.backup_running_var'], {'requires_grad': '(False)'}), '(self.backup_running_var, requires_grad=False)\n', (14647, 14693), True, 'import torch.nn as nn\n'), ((22639, 22698), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', ([], {'input': 'out', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)'}), '(input=out, kernel_size=2, stride=2, padding=0)\n', (22651, 22698), True, 'import torch.nn.functional as F\n'), ((25489, 25553), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', ([], {'input': 'out', 'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(0)'}), '(input=out, kernel_size=(2, 2), stride=2, padding=0)\n', (25501, 25553), True, 'import torch.nn.functional as F\n'), ((29374, 29391), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['out'], {}), '(out)\n', (29386, 29391), True, 'import torch.nn.functional as F\n'), ((31740, 31757), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['out'], {}), '(out)\n', (31752, 31757), True, 'import torch.nn.functional as F\n'), ((34383, 34397), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (34395, 34397), True, 'import torch.nn as nn\n'), ((40268, 40332), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', ([], {'input': 'out', 'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(0)'}), '(input=out, kernel_size=(2, 2), stride=2, padding=0)\n', (40280, 40332), True, 'import torch.nn.functional as F\n'), ((43785, 43849), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', ([], {'input': 'out', 'kernel_size': '(2, 2)', 'stride': '(2)', 'padding': '(0)'}), '(input=out, kernel_size=(2, 2), stride=2, padding=0)\n', (43797, 43849), True, 'import torch.nn.functional as F\n'), ((47044, 47090), 'torch.cat', 'torch.cat', (['[out_img, self.coord_tensor]'], {'dim': '(2)'}), '([out_img, self.coord_tensor], dim=2)\n', (47053, 47090), False, 'import torch\n'), ((48065, 48079), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (48077, 48079), True, 'import torch.nn as nn\n'), ((3065, 3089), 'torch.zeros', 'torch.zeros', (['num_filters'], {}), '(num_filters)\n', (3076, 3089), False, 'import torch\n'), ((5585, 5609), 'torch.zeros', 'torch.zeros', (['num_filters'], {}), '(num_filters)\n', (5596, 5609), False, 'import torch\n'), ((7901, 7925), 'torch.zeros', 'torch.zeros', (['num_filters'], {}), '(num_filters)\n', (7912, 7925), False, 'import torch\n'), ((11100, 11175), 'torch.zeros', 'torch.zeros', (['(num_support_set_steps + num_target_set_steps + 1)', 'num_features'], {}), '(num_support_set_steps + num_target_set_steps + 1, num_features)\n', (11111, 11175), False, 'import torch\n'), ((11275, 11349), 'torch.ones', 'torch.ones', (['(num_support_set_steps + num_target_set_steps + 1)', 'num_features'], {}), '(num_support_set_steps + num_target_set_steps + 1, num_features)\n', (11285, 11349), False, 'import torch\n'), ((11442, 11517), 'torch.zeros', 'torch.zeros', (['(num_support_set_steps + num_target_set_steps + 1)', 'num_features'], {}), '(num_support_set_steps + num_target_set_steps + 1, num_features)\n', (11453, 11517), False, 'import torch\n'), ((11626, 11700), 'torch.ones', 'torch.ones', (['(num_support_set_steps + num_target_set_steps + 1)', 'num_features'], {}), '(num_support_set_steps + num_target_set_steps + 1, num_features)\n', (11636, 11700), False, 'import torch\n'), ((11813, 11838), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (11824, 11838), False, 'import torch\n'), ((11905, 11930), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (11916, 11930), False, 'import torch\n'), ((11990, 12015), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (12001, 12015), False, 'import torch\n'), ((12128, 12152), 'torch.ones', 'torch.ones', (['num_features'], {}), '(num_features)\n', (12138, 12152), False, 'import torch\n'), ((33807, 33839), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', 'out.shape[-1]'], {}), '(out, out.shape[-1])\n', (33819, 33839), True, 'import torch.nn.functional as F\n'), ((35620, 35652), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', 'out.shape[-1]'], {}), '(out, out.shape[-1])\n', (35632, 35652), True, 'import torch.nn.functional as F\n'), ((41018, 41050), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', 'out.shape[-1]'], {}), '(out, out.shape[-1])\n', (41030, 41050), True, 'import torch.nn.functional as F\n'), ((44082, 44114), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', 'out.shape[-1]'], {}), '(out, out.shape[-1])\n', (44094, 44114), True, 'import torch.nn.functional as F\n'), ((46274, 46319), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['out_img'], {'output_size': '(5)'}), '(out_img, output_size=5)\n', (46295, 46319), True, 'import torch.nn.functional as F\n'), ((50014, 50059), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['out_img'], {'output_size': '(5)'}), '(out_img, output_size=5)\n', (50035, 50059), True, 'import torch.nn.functional as F\n'), ((29520, 29542), 'numpy.prod', 'np.prod', (['out.shape[1:]'], {}), '(out.shape[1:])\n', (29527, 29542), True, 'import numpy as np\n'), ((46803, 46840), 'torch.stack', 'torch.stack', (['self.coord_tensor'], {'dim': '(0)'}), '(self.coord_tensor, dim=0)\n', (46814, 46840), False, 'import torch\n'), ((46754, 46767), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (46762, 46767), True, 'import numpy as np\n'), ((9588, 9643), 'torch.distributions.Uniform', 'torch.distributions.Uniform', ([], {'low': '(-a_array)', 'high': 'a_array'}), '(low=-a_array, high=a_array)\n', (9615, 9643), False, 'import torch\n'), ((26658, 26679), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (26667, 26679), False, 'import torch\n'), ((26961, 26982), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (26970, 26982), False, 'import torch\n'), ((32718, 32739), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (32727, 32739), False, 'import torch\n'), ((33021, 33042), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (33030, 33042), False, 'import torch\n'), ((44994, 45015), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (45003, 45015), False, 'import torch\n'), ((45297, 45318), 'torch.sum', 'torch.sum', (['param.grad'], {}), '(param.grad)\n', (45306, 45318), False, 'import torch\n')] |
try:
import OpenGL.GL as gl
except:
from galry import log_warn
log_warn(("PyOpenGL is not available and Galry won't be"
" able to render plots."))
class _gl(object):
def mock(*args, **kwargs):
return None
def __getattr__(self, name):
return self.mock
gl = _gl()
from collections import OrderedDict
import numpy as np
import sys
from galry import enforce_dtype, DataNormalizer, log_info, log_debug, \
log_warn, RefVar
__all__ = ['GLVersion', 'GLRenderer']
# GLVersion class
# ---------------
class GLVersion(object):
"""Methods related to the GL version."""
# self.version_header = '#version 120'
# self.precision_header = 'precision mediump float;'
@staticmethod
def get_renderer_info():
"""Return information about the client renderer.
Arguments:
* info: a dictionary with the following keys:
* renderer_name
* opengl_version
* glsl_version
"""
return {
'renderer_name': gl.glGetString(gl.GL_RENDERER),
'opengl_version': gl.glGetString(gl.GL_VERSION),
'glsl_version': gl.glGetString(gl.GL_SHADING_LANGUAGE_VERSION)
}
@staticmethod
def version_header():
if GLVersion.get_renderer_info()['opengl_version'][0:3] < '2.1':
return '#version 110\n'
else:
return '#version 120\n'
@staticmethod
def precision_header():
if GLVersion.get_renderer_info()['glsl_version'] >= '1.3':
return 'precision mediump float;'
else:
return ''
# Low-level OpenGL functions to initialize/load variables
# -------------------------------------------------------
class Attribute(object):
"""Contains OpenGL functions related to attributes."""
@staticmethod
def create():
"""Create a new buffer and return a `buffer` index."""
return gl.glGenBuffers(1)
@staticmethod
def get_gltype(index=False):
if not index:
return gl.GL_ARRAY_BUFFER
else:
return gl.GL_ELEMENT_ARRAY_BUFFER
@staticmethod
def bind(buffer, location=None, index=False):
"""Bind a buffer and associate a given location."""
gltype = Attribute.get_gltype(index)
gl.glBindBuffer(gltype, buffer)
if location >= 0:
gl.glEnableVertexAttribArray(location)
@staticmethod
def set_attribute(location, ndim):
"""Specify the type of the attribute before rendering."""
gl.glVertexAttribPointer(location, ndim, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
@staticmethod
def convert_data(data, index=False):
"""Force 32-bit floating point numbers for data."""
if not index:
return enforce_dtype(data, np.float32)
else:
return np.array(data, np.int32)
@staticmethod
def load(data, index=False):
"""Load data in the buffer for the first time. The buffer must
have been bound before."""
data = Attribute.convert_data(data, index=index)
gltype = Attribute.get_gltype(index)
gl.glBufferData(gltype, data, gl.GL_DYNAMIC_DRAW)
@staticmethod
def update(data, onset=0, index=False):
"""Update data in the currently bound buffer."""
gltype = Attribute.get_gltype(index)
data = Attribute.convert_data(data, index=index)
# convert onset into bytes count
if data.ndim == 1:
ndim = 1
elif data.ndim == 2:
ndim = data.shape[1]
onset *= ndim * data.itemsize
gl.glBufferSubData(gltype, int(onset), data)
@staticmethod
def delete(*buffers):
"""Delete buffers."""
if buffers:
gl.glDeleteBuffers(len(buffers), buffers)
class Uniform(object):
"""Contains OpenGL functions related to uniforms."""
float_suffix = {True: 'f', False: 'i'}
array_suffix = {True: 'v', False: ''}
# glUniform[Matrix]D[f][v]
@staticmethod
def convert_data(data):
if isinstance(data, np.ndarray):
data = enforce_dtype(data, np.float32)
if type(data) == np.float64:
data = np.float32(data)
if type(data) == np.int64:
data = np.int32(data)
if type(data) == list:
data = map(Uniform.convert_data, data)
if type(data) == tuple:
data = tuple(map(Uniform.convert_data, data))
return data
@staticmethod
def load_scalar(location, data):
data = Uniform.convert_data(data)
is_float = (type(data) == float) or (type(data) == np.float32)
funname = 'glUniform1%s' % Uniform.float_suffix[is_float]
getattr(gl, funname)(location, data)
@staticmethod
def load_vector(location, data):
if len(data) > 0:
data = Uniform.convert_data(data)
is_float = (type(data[0]) == float) or (type(data[0]) == np.float32)
ndim = len(data)
funname = 'glUniform%d%s' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, *data)
@staticmethod
def load_array(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
size, ndim = data.shape
funname = 'glUniform%d%sv' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, size, data)
@staticmethod
def load_matrix(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
n, m = data.shape
# TODO: arrays of matrices?
if n == m:
funname = 'glUniformMatrix%d%sv' % (n, Uniform.float_suffix[is_float])
else:
funname = 'glUniformMatrix%dx%d%sv' % (n, m, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, 1, False, data)
class Texture(object):
"""Contains OpenGL functions related to textures."""
@staticmethod
def create(ndim=2, mipmap=False, minfilter=None, magfilter=None):
"""Create a texture with the specifyed number of dimensions."""
buffer = gl.glGenTextures(1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
Texture.bind(buffer, ndim)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
if mipmap:
if hasattr(gl, 'glGenerateMipmap'):
gl.glGenerateMipmap(textype)
else:
minfilter = 'NEAREST'
magfilter = 'NEAREST'
if minfilter is None:
minfilter = 'NEAREST'
if magfilter is None:
magfilter = 'NEAREST'
minfilter = getattr(gl, 'GL_' + minfilter)
magfilter = getattr(gl, 'GL_' + magfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MIN_FILTER, minfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MAG_FILTER, magfilter)
return buffer
@staticmethod
def bind(buffer, ndim):
"""Bind a texture buffer."""
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glBindTexture(textype, buffer)
@staticmethod
def get_info(data):
"""Return information about texture data."""
# find shape, ndim, ncomponents
shape = data.shape
if shape[0] == 1:
ndim = 1
elif shape[0] > 1:
ndim = 2
# ndim = 2
ncomponents = shape[2]
# ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
[ncomponents - 1])
return ndim, ncomponents, component_type
@staticmethod
def convert_data(data):
"""convert data in a array of uint8 in [0, 255]."""
if data.dtype == np.float32 or data.dtype == np.float64:
return np.array(255 * data, dtype=np.uint8)
elif data.dtype == np.uint8:
return data
else:
raise ValueError("The texture is in an unsupported format.")
@staticmethod
def copy(fbo, tex_src, tex_dst, width, height):
# /// bind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
# /// attach the source texture to the fbo
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
gl.GL_TEXTURE_2D, tex_src, 0)
# /// bind the destination texture
gl.glBindTexture(gl.GL_TEXTURE_2D, tex_dst)
# /// copy from framebuffer (here, the FBO!) to the bound texture
gl.glCopyTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
# /// unbind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# # ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
# component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
# [ncomponents - 1])
# gl.glCopyTexImage2D(gl.GL_TEXTURE_2D,
# 0, # level
# component_type,
# 0, 0, # x, y offsets
# 0, 0, # x, y
# w, h, # width, height
# 0 # border
# )
# @staticmethod
# def read_buffer(index=0):
# gl.glReadBuffer(getattr(gl, 'GL_COLOR_ATTACHMENT%d' % index))
# @staticmethod
# def draw_buffer():
# gl.glDrawBuffer(gl.GL_FRONT)
@staticmethod
def load(data):
"""Load texture data in a bound texture buffer."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# print ndim, shape, data.shape
# load data in the buffer
if ndim == 1:
gl.glTexImage1D(textype, 0, component_type, shape[1], 0, component_type,
gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
# width, height == shape[1], shape[0]: Thanks to the Confusion Club
gl.glTexImage2D(textype, 0, component_type, shape[1], shape[0], 0,
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def update(data):
"""Update a texture."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# update buffer
if ndim == 1:
gl.glTexSubImage1D(textype, 0, 0, shape[1],
component_type, gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
gl.glTexSubImage2D(textype, 0, 0, 0, shape[1], shape[0],
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def delete(*buffers):
"""Delete texture buffers."""
gl.glDeleteTextures(buffers)
class FrameBuffer(object):
"""Contains OpenGL functions related to FBO."""
@staticmethod
def create():
"""Create a FBO."""
if hasattr(gl, 'glGenFramebuffers') and gl.glGenFramebuffers:
buffer = gl.glGenFramebuffers(1)
else:
buffer = None
return buffer
@staticmethod
def bind(buffer=None):
"""Bind a FBO."""
if buffer is None:
buffer = 0
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, buffer)
@staticmethod
def bind_texture(texture, i=0):
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER,
getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i),
gl.GL_TEXTURE_2D, texture, 0)
@staticmethod
def draw_buffers(n):
gl.glDrawBuffers([getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i) for i in xrange(n)])
@staticmethod
def unbind():
"""Unbind a FBO."""
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# Shader manager
# --------------
class ShaderManager(object):
"""Handle vertex and fragment shaders.
TODO: integrate in the renderer the shader code creation module.
"""
# Initialization methods
# ----------------------
def __init__(self, vertex_shader, fragment_shader):
"""Compile shaders and create a program."""
# add headers
vertex_shader = GLVersion.version_header() + vertex_shader
fragment_shader = GLVersion.version_header() + fragment_shader
# set shader source
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
# compile shaders
self.compile()
# create program
self.program = self.create_program()
def compile_shader(self, source, shader_type):
"""Compile a shader (vertex or fragment shader).
Arguments:
* source: the shader source code as a string.
* shader_type: either gl.GL_VERTEX_SHADER or gl.GL_FRAGMENT_SHADER.
"""
# compile shader
shader = gl.glCreateShader(shader_type)
gl.glShaderSource(shader, source)
gl.glCompileShader(shader)
result = gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS)
infolog = gl.glGetShaderInfoLog(shader)
if infolog:
infolog = "\n" + infolog.strip()
# check compilation error
if not(result) and infolog:
msg = "Compilation error for %s." % str(shader_type)
if infolog is not None:
msg += infolog
msg += source
raise RuntimeError(msg)
else:
log_debug("Compilation succeeded for %s.%s" % (str(shader_type), infolog))
return shader
def compile(self):
"""Compile the shaders."""
# print self.vertex_shader
# print self.fragment_shader
self.vs = self.compile_shader(self.vertex_shader, gl.GL_VERTEX_SHADER)
self.fs = self.compile_shader(self.fragment_shader, gl.GL_FRAGMENT_SHADER)
def create_program(self):
"""Create shader program and attach shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, self.vs)
gl.glAttachShader(program, self.fs)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
# check linking error
if not(result):
msg = "Shader program linking error:"
info = gl.glGetProgramInfoLog(program)
if info:
msg += info
raise RuntimeError(msg)
self.program = program
return program
def get_attribute_location(self, name):
"""Return the location of an attribute after the shaders have compiled."""
return gl.glGetAttribLocation(self.program, name)
def get_uniform_location(self, name):
"""Return the location of a uniform after the shaders have compiled."""
return gl.glGetUniformLocation(self.program, name)
# Activation methods
# ------------------
def activate_shaders(self):
"""Activate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(self.program)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return False
def deactivate_shaders(self):
"""Deactivate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(0)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return True
# Cleanup methods
# ---------------
def detach_shaders(self):
"""Detach shaders from the program."""
if gl.glIsProgram(self.program):
gl.glDetachShader(self.program, self.vs)
gl.glDetachShader(self.program, self.fs)
def delete_shaders(self):
"""Delete the vertex and fragment shaders."""
if gl.glIsProgram(self.program):
gl.glDeleteShader(self.vs)
gl.glDeleteShader(self.fs)
def delete_program(self):
"""Delete the shader program."""
if gl.glIsProgram(self.program):
gl.glDeleteProgram(self.program)
def cleanup(self):
"""Clean up all shaders."""
self.detach_shaders()
self.delete_shaders()
self.delete_program()
# Slicing classes
# ---------------
MAX_VBO_SIZE = 65000
class Slicer(object):
"""Handle attribute slicing, necessary because of the size
of buffer objects which is limited on some GPUs."""
@staticmethod
def _get_slices(size, maxsize=None):
"""Return a list of slices for a given dataset size.
Arguments:
* size: the size of the dataset, i.e. the number of points.
Returns:
* slices: a list of pairs `(position, slice_size)` where `position`
is the position of this slice in the original buffer, and
`slice_size` the slice size.
"""
if maxsize is None:
maxsize = MAX_VBO_SIZE
if maxsize > 0:
nslices = int(np.ceil(size / float(maxsize)))
else:
nslices = 0
return [(i*maxsize, min(maxsize+1, size-i*maxsize)) for i in xrange(nslices)]
@staticmethod
def _slice_bounds(bounds, position, slice_size, regular=False):
"""Slice data bounds in a *single* slice according to the VBOs slicing.
Arguments:
* bounds: the bounds as specified by the user in `create_dataset`.
* position: the position of the current slice.
* slice_size: the size of the current slice.
Returns:
* bounds_sliced: the bounds for the current slice. It is a list an
1D array of integer indices.
"""
# first bound index after the sliced VBO: nothing to paint
if bounds[0] >= position + slice_size:
bounds_sliced = None
# last bound index before the sliced VBO: nothing to paint
elif bounds[-1] < position:
bounds_sliced = None
# the current sliced VBO intersects the bounds: something to paint
else:
bounds_sliced = bounds
if not regular:
# get the bounds that fall within the sliced VBO
ind = (bounds_sliced>=position) & (bounds_sliced<position + slice_size)
bounds_sliced = bounds_sliced[ind]
# HACK: more efficient algorithm when the bounds are regularly
# spaced
else:
d = float(regular)
p = position
b0 = bounds_sliced[0]
b1 = bounds_sliced[-1]
s = slice_size
i0 = max(0, int(np.ceil((p-b0)/d)))
i1 = max(0, int(np.floor((p+s-b0)/d)))
bounds_sliced = bounds_sliced[i0:i1+1].copy()
ind = ((b0 >= p) and (b0 < p+s), (b1 >= p) and (b1 < p+s))
"""
bounds_sliced = [b0 + d*i]
(p-b0)/d <= i0 < (p+s-b0)/d
i0 = ceil((p-b0)/d), i1 = floor((p+s-b0)/d)
ind = (bs[0] >= p & < p+s, bs[-1])
"""
# remove the onset (first index of the sliced VBO)
bounds_sliced -= position
# handle the case when the slice cuts between two bounds
if not ind[0]:
bounds_sliced = np.hstack((0, bounds_sliced))
if not ind[-1]:
bounds_sliced = np.hstack((bounds_sliced, slice_size))
return enforce_dtype(bounds_sliced, np.int32)
def set_size(self, size, doslice=True):
"""Update the total size of the buffer, and update
the slice information accordingly."""
# deactivate slicing by using a maxsize number larger than the
# actual size
if not doslice:
maxsize = 2 * size
else:
maxsize = None
self.size = size
# if not hasattr(self, 'bounds'):
# self.bounds = np.array([0, size], dtype=np.int32)
# compute the data slicing with respect to bounds (specified in the
# template) and to the maximum size of a VBO.
self.slices = self._get_slices(self.size, maxsize)
# print self.size, maxsize
# print self.slices
self.slice_count = len(self.slices)
def set_bounds(self, bounds=None):
"""Update the bound size, and update the slice information
accordingly."""
if bounds is None:
bounds = np.array([0, self.size], dtype=np.int32)
self.bounds = bounds
# is regular?
d = np.diff(bounds)
r = False
if len(d) > 0:
dm, dM = d.min(), d.max()
if dm == dM:
r = dm
# log_info("Regular bounds")
self.subdata_bounds = [self._slice_bounds(self.bounds, pos, size, r) \
for pos, size in self.slices]
class SlicedAttribute(object):
"""Encapsulate methods for slicing an attribute and handling several
buffer objects for a single attribute."""
def __init__(self, slicer, location, buffers=None):
self.slicer = slicer
self.location = location
if buffers is None:
# create the sliced buffers
self.create()
else:
log_debug("Creating sliced attribute with existing buffers " +
str(buffers))
# or use existing buffers
self.load_buffers(buffers)
def create(self):
"""Create the sliced buffers."""
self.buffers = [Attribute.create() for _ in self.slicer.slices]
def load_buffers(self, buffers):
"""Load existing buffers instead of creating new ones."""
self.buffers = buffers
def delete_buffers(self):
"""Delete all sub-buffers."""
# for buffer in self.buffers:
Attribute.delete(*self.buffers)
def load(self, data):
"""Load data on all sliced buffers."""
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
# WARNING: putting self.location instead of None ==> SEGFAULT on Linux with Nvidia drivers
Attribute.bind(buffer, None)
Attribute.load(data[pos:pos + size,...])
def bind(self, slice=None):
if slice is None:
slice = 0
Attribute.bind(self.buffers[slice], self.location)
def update(self, data, mask=None):
"""Update data on all sliced buffers."""
# NOTE: the slicer needs to be updated if the size of the data changes
# default mask
if mask is None:
mask = np.ones(self.slicer.size, dtype=np.bool)
# is the current subVBO within the given [onset, offset]?
within = False
# update VBOs
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
subdata = data[pos:pos + size,...]
submask = mask[pos:pos + size]
# if there is at least one True in the slice mask (submask)
if submask.any():
# this sub-buffer contains updated indices
subonset = submask.argmax()
suboffset = len(submask) - 1 - submask[::-1].argmax()
Attribute.bind(buffer, self.location)
Attribute.update(subdata[subonset:suboffset + 1,...], subonset)
# Painter class
# -------------
class Painter(object):
"""Provides low-level methods for calling OpenGL rendering commands."""
@staticmethod
def draw_arrays(primtype, offset, size):
"""Render an array of primitives."""
gl.glDrawArrays(primtype, offset, size)
@staticmethod
def draw_multi_arrays(primtype, bounds):
"""Render several arrays of primitives."""
first = bounds[:-1]
count = np.diff(bounds)
primcount = len(bounds) - 1
gl.glMultiDrawArrays(primtype, first, count, primcount)
@staticmethod
def draw_indexed_arrays(primtype, size):
gl.glDrawElements(primtype, size, gl.GL_UNSIGNED_INT, None)
# Visual renderer
# ---------------
class GLVisualRenderer(object):
"""Handle rendering of one visual"""
def __init__(self, renderer, visual):
"""Initialize the visual renderer, create the slicer, initialize
all variables and the shaders."""
# register the master renderer (to access to other visual renderers)
# and register the scene dictionary
self.renderer = renderer
self.scene = renderer.scene
# register the visual dictionary
self.visual = visual
self.framebuffer = visual.get('framebuffer', None)
# self.beforeclear = visual.get('beforeclear', None)
# options
self.options = visual.get('options', {})
# hold all data changes until the next rendering pass happens
self.data_updating = {}
self.textures_to_copy = []
# set the primitive type from its name
self.set_primitive_type(self.visual['primitive_type'])
# indexed mode? set in initialize_variables
self.use_index = None
# whether to use slicing? always True except when indexing should not
# be used, but slicing neither
self.use_slice = True
# self.previous_size = None
# set the slicer
self.slicer = Slicer()
# used when slicing needs to be deactivated (like for indexed arrays)
self.noslicer = Slicer()
# get size and bounds
size = self.visual['size']
bounds = np.array(self.visual.get('bounds', [0, size]), np.int32)
# self.update_size(size, bounds)
self.slicer.set_size(size)
self.slicer.set_bounds(bounds)
self.noslicer.set_size(size, doslice=False)
self.noslicer.set_bounds(bounds)
# compile and link the shaders
self.shader_manager = ShaderManager(self.visual['vertex_shader'],
self.visual['fragment_shader'])
# DEBUG
# log_info(self.shader_manager.vertex_shader)
# log_info(self.shader_manager.fragment_shader)
# initialize all variables
# self.initialize_normalizers()
self.initialize_variables()
self.initialize_fbocopy()
self.load_variables()
def set_primitive_type(self, primtype):
"""Set the primitive type from its name (without the GL_ prefix)."""
self.primitive_type = getattr(gl, "GL_%s" % primtype.upper())
def getarg(self, name):
"""Get a visual parameter."""
return self.visual.get(name, None)
# Variable methods
# ----------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene['visuals']
def get_visual(self, name):
"""Return a visual dictionary from its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
return None
return visuals[0]
def get_variables(self, shader_type=None):
"""Return all variables defined in the visual."""
if not shader_type:
return self.visual.get('variables', [])
else:
return [var for var in self.get_variables() \
if var['shader_type'] == shader_type]
def get_variable(self, name, visual=None):
"""Return a variable by its name, and for any given visual which
is specified by its name."""
# get the variables list
if visual is None:
variables = self.get_variables()
else:
variables = self.get_visual(visual)['variables']
variables = [v for v in variables if v.get('name', '') == name]
if not variables:
return None
return variables[0]
def resolve_reference(self, refvar):
"""Resolve a reference variable: return its true value (a Numpy array).
"""
return self.get_variable(refvar.variable, visual=refvar.visual)
# Initialization methods
# ----------------------
def initialize_fbocopy(self):
"""Create a FBO used when copying textures."""
self.fbocopy = FrameBuffer.create()
def initialize_variables(self):
"""Initialize all variables, after the shaders have compiled."""
# find out whether indexing is used or not, because in this case
# the slicing needs to be deactivated
if self.get_variables('index'):
# deactivate slicing
self.slicer = self.noslicer
log_debug("deactivating slicing because there's an indexed buffer")
self.use_index = True
else:
self.use_index = False
# initialize all variables
for var in self.get_variables():
shader_type = var['shader_type']
# skip varying
if shader_type == 'varying':
continue
name = var['name']
# call initialize_***(name) to initialize that variable
getattr(self, 'initialize_%s' % shader_type)(name)
# special case for uniforms: need to load them the first time
uniforms = self.get_variables('uniform')
self.set_data(**dict([(v['name'], v.get('data', None)) for v in uniforms]))
def initialize_attribute(self, name):
"""Initialize an attribute: get the shader location, create the
sliced buffers, and load the data."""
# retrieve the location of that attribute in the shader
location = self.shader_manager.get_attribute_location(name)
variable = self.get_variable(name)
variable['location'] = location
# deal with reference attributes: share the same buffers between
# several different visuals
if isinstance(variable.get('data', None), RefVar):
# HACK: if the targeted attribute is indexed, we should
# deactivate slicing here
if self.renderer.visual_renderers[variable['data'].visual].use_index:
log_debug("deactivating slicing")
self.slicer = self.noslicer
# use the existing buffers from the target variable
target = self.resolve_reference(variable['data'])
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location,
buffers=target['sliced_attribute'].buffers)
else:
# initialize the sliced buffers
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location)
def initialize_index(self, name):
variable = self.get_variable(name)
variable['buffer'] = Attribute.create()
def initialize_texture(self, name):
variable = self.get_variable(name)
# handle reference variable to texture
if isinstance(variable.get('data', None), RefVar):
target = self.resolve_reference(variable['data'])
variable['buffer'] = target['buffer']
variable['location'] = target['location']
else:
variable['buffer'] = Texture.create(variable['ndim'],
mipmap=variable.get('mipmap', None),
minfilter=variable.get('minfilter', None),
magfilter=variable.get('magfilter', None),
)
# NEW
# get the location of the sampler uniform
location = self.shader_manager.get_uniform_location(name)
variable['location'] = location
def initialize_framebuffer(self, name):
variable = self.get_variable(name)
variable['buffer'] = FrameBuffer.create()
# bind the frame buffer
FrameBuffer.bind(variable['buffer'])
# variable['texture'] is a list of texture names in the current visual
if isinstance(variable['texture'], basestring):
variable['texture'] = [variable['texture']]
# draw as many buffers as there are textures in that frame buffer
FrameBuffer.draw_buffers(len(variable['texture']))
for i, texname in enumerate(variable['texture']):
# get the texture variable:
texture = self.get_variable(texname)
# link the texture to the frame buffer
FrameBuffer.bind_texture(texture['buffer'], i)
# unbind the frame buffer
FrameBuffer.unbind()
def initialize_uniform(self, name):
"""Initialize an uniform: get the location after the shaders have
been compiled."""
location = self.shader_manager.get_uniform_location(name)
variable = self.get_variable(name)
variable['location'] = location
def initialize_compound(self, name):
pass
# Normalization methods
# ---------------------
# def initialize_normalizers(self):
# self.normalizers = {}
# Loading methods
# ---------------
def load_variables(self):
"""Load data for all variables at initialization."""
for var in self.get_variables():
shader_type = var['shader_type']
# skip uniforms
if shader_type == 'uniform' or shader_type == 'varying' or shader_type == 'framebuffer':
continue
# call load_***(name) to load that variable
getattr(self, 'load_%s' % shader_type)(var['name'])
def load_attribute(self, name, data=None):
"""Load data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
log_debug("Skipping loading data for attribute '%s' since it "
"references a target variable." % name)
return
if data is None:
data = olddata
if data is not None:
# normalization
# if name in self.options.get('normalizers', {}):
# viewbox = self.options['normalizers'][name]
# if viewbox:
# self.normalizers[name] = DataNormalizer(data)
# # normalize data with the specified viewbox, None by default
# # meaning that the natural bounds of the data are used.
# data = self.normalizers[name].normalize(viewbox)
variable['sliced_attribute'].load(data)
def load_index(self, name, data=None):
"""Load data for an index variable."""
variable = self.get_variable(name)
if data is None:
data = variable.get('data', None)
if data is not None:
self.indexsize = len(data)
Attribute.bind(variable['buffer'], index=True)
Attribute.load(data, index=True)
def load_texture(self, name, data=None):
"""Load data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
# NEW: update sampler location
self.update_samplers = True
if isinstance(data, RefVar):
log_debug("Skipping loading data for texture '%s' since it "
"references a target variable." % name)
return
if data is not None:
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
def load_uniform(self, name, data=None):
"""Load data for an uniform variable."""
variable = self.get_variable(name)
location = variable['location']
if location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
if data is not None:
ndim = variable['ndim']
size = variable.get('size', None)
# one value
if not size:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
if ndim == 1:
Uniform.load_scalar(location, data)
else:
Uniform.load_vector(location, data)
# matrix
elif type(ndim) == tuple:
Uniform.load_matrix(location, data)
# array
else:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
Uniform.load_array(location, data)
def load_compound(self, name, data=None):
pass
# Updating methods
# ----------------
def update_variable(self, name, data, **kwargs):
"""Update data of a variable."""
variable = self.get_variable(name)
if variable is None:
log_debug("Variable '%s' was not found, unable to update it." % name)
else:
shader_type = variable['shader_type']
# skip compound, which is handled in set_data
if shader_type == 'compound' or shader_type == 'varying' or shader_type == 'framebuffer':
pass
else:
getattr(self, 'update_%s' % shader_type)(name, data, **kwargs)
def update_attribute(self, name, data):#, bounds=None):
"""Update data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be updated, probably because "
"it is not used in the shaders") % name)
return
# handle reference variable
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
raise ValueError("Unable to load data for a reference " +
"attribute. Use the target variable directly.""")
variable['data'] = data
att = variable['sliced_attribute']
if olddata is None:
oldshape = 0
else:
oldshape = olddata.shape
# print name, oldshape, data.shape
# handle size changing
if data.shape[0] != oldshape[0]:
log_debug(("Creating new buffers for variable %s, old size=%s,"
"new size=%d") % (name, oldshape[0], data.shape[0]))
# update the size only when not using index arrays
if self.use_index:
newsize = self.slicer.size
else:
newsize = data.shape[0]
# update the slicer size and bounds
self.slicer.set_size(newsize, doslice=not(self.use_index))
# HACK: update the bounds only if there are no bounds basically
# (ie. 2 bounds only), otherwise we assume the bounds have been
# changed explicitely
if len(self.slicer.bounds) == 2:
self.slicer.set_bounds()
# delete old buffers
att.delete_buffers()
# create new buffers
att.create()
# load data
att.load(data)
# forget previous size
# self.previous_size = None
else:
# update data
att.update(data)
def update_index(self, name, data):
"""Update data for a index variable."""
variable = self.get_variable(name)
prevsize = len(variable['data'])
variable['data'] = data
newsize = len(data)
# handle size changing
if newsize != prevsize:
# update the total size (in slicer)
# self.slicer.set_size(newsize, doslice=False)
self.indexsize = newsize
# delete old buffers
Attribute.delete(variable['buffer'])
# create new buffer
variable['buffer'] = Attribute.create()
# load data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.load(data, index=True)
else:
# update data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.update(data, index=True)
def update_texture(self, name, data):
"""Update data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
prevshape = variable['data'].shape
variable['data'] = data
# handle size changing
if data.shape != prevshape:
# delete old buffers
# Texture.delete(variable['buffer'])
variable['ndim'], variable['ncomponents'], _ = Texture.get_info(data)
# create new buffer
# variable['buffer'] = Texture.create(variable['ndim'],
# mipmap=variable.get('mipmap', None),
# minfilter=variable.get('minfilter', None),
# magfilter=variable.get('magfilter', None),)
# load data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
else:
# update data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.update(data)
def update_uniform(self, name, data):
"""Update data for an uniform variable."""
variable = self.get_variable(name)
variable['data'] = data
# the uniform interface is the same for load/update
self.load_uniform(name, data)
special_keywords = ['visible',
'size',
'bounds',
'primitive_type',
'constrain_ratio',
'constrain_navigation',
]
def set_data(self, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* visible: whether this visual should be visible,
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# handle compound variables
kwargs2 = kwargs.copy()
for name, data in kwargs2.iteritems():
variable = self.get_variable(name)
if variable is None:
# log_info("variable '%s' unknown" % name)
continue
if variable is not None and variable['shader_type'] == 'compound':
fun = variable['fun']
kwargs.pop(name)
# HACK: if the target variable in the compound is a special
# keyword, we update it in kwargs, otherwise we update the
# data in self.data_updating
# print name, fun(data)
# if name in self.special_keywords:
# kwargs.update(**fun(data))
# else:
# self.data_updating.update(**fun(data))
kwargs.update(**fun(data))
# remove non-visible variables
if not variable.get('visible', True):
kwargs.pop(name)
# handle visual visibility
visible = kwargs.pop('visible', None)
if visible is not None:
self.visual['visible'] = visible
# handle size keyword
size = kwargs.pop('size', None)
# print size
if size is not None:
self.slicer.set_size(size)
# handle bounds keyword
bounds = kwargs.pop('bounds', None)
if bounds is not None:
self.slicer.set_bounds(bounds)
# handle primitive type special keyword
primitive_type = kwargs.pop('primitive_type', None)
if primitive_type is not None:
self.visual['primitive_type'] = primitive_type
self.set_primitive_type(primitive_type)
# handle constrain_ratio keyword
constrain_ratio = kwargs.pop('constrain_ratio', None)
if constrain_ratio is not None:
self.visual['constrain_ratio'] = constrain_ratio
# handle constrain_navigation keyword
constrain_navigation = kwargs.pop('constrain_navigation', None)
if constrain_navigation is not None:
self.visual['constrain_navigation'] = constrain_navigation
# flag the other variables as to be updated
self.data_updating.update(**kwargs)
def copy_texture(self, tex1, tex2):
self.textures_to_copy.append((tex1, tex2))
def update_all_variables(self):
"""Upload all new data that needs to be updated."""
# # current size, that may change following variable updating
# if not self.previous_size:
# self.previous_size = self.slicer.size
# go through all data changes
for name, data in self.data_updating.iteritems():
if data is not None:
# log_info("Updating variable '%s'" % name)
self.update_variable(name, data)
else:
log_debug("Data for variable '%s' is None" % name)
# reset the data updating dictionary
self.data_updating.clear()
def copy_all_textures(self):
# copy textures
for tex1, tex2 in self.textures_to_copy:
# tex1 = self.get_variable(tex1)
tex1 = self.resolve_reference(tex1)
tex2 = self.get_variable(tex2)
# tex2 = self.resolve_reference(tex2)
# # Texture.read_buffer()
# Texture.bind(tex2['buffer'], tex2['ndim'])
# copy(fbo, tex_src, tex_dst, width, height)
Texture.copy(self.fbocopy, tex1['buffer'], tex2['buffer'],
tex1['shape'][0], tex1['shape'][1])
self.textures_to_copy = []
# Binding methods
# ---------------
def bind_attributes(self, slice=None):
"""Bind all attributes of the visual for the given slice.
This method is used during rendering."""
# find all visual variables with shader type 'attribute'
attributes = self.get_variables('attribute')
# for each attribute, bind the sub buffer corresponding to the given
# slice
for variable in attributes:
loc = variable['location']
if loc < 0:
log_debug(("Unable to bind attribute '%s', probably because "
"it is not used in the shaders.") % variable['name'])
continue
variable['sliced_attribute'].bind(slice)
Attribute.set_attribute(loc, variable['ndim'])
def bind_indices(self):
indices = self.get_variables('index')
for variable in indices:
Attribute.bind(variable['buffer'], index=True)
def bind_textures(self):
"""Bind all textures of the visual.
This method is used during rendering."""
textures = self.get_variables('texture')
for i, variable in enumerate(textures):
buffer = variable.get('buffer', None)
if buffer is not None:
# HACK: we update the sampler values here
if self.update_samplers and not isinstance(variable['data'], RefVar):
Uniform.load_scalar(variable['location'], i)
# NEW
gl.glActiveTexture(getattr(gl, 'GL_TEXTURE%d' % i))
Texture.bind(buffer, variable['ndim'])
else:
log_debug("Texture '%s' was not properly initialized." % \
variable['name'])
# deactivate all textures if there are not textures
if not textures:
Texture.bind(0, 1)
Texture.bind(0, 2)
# no need to update the samplers after the first execution of this
# method
self.update_samplers = False
# Paint methods
# -------------
def paint(self):
"""Paint the visual slice by slice."""
# do not display non-visible visuals
if not self.visual.get('visible', True):
return
# activate the shaders
try:
self.shader_manager.activate_shaders()
# if the shaders could not be successfully activated, stop the
# rendering immediately
except Exception as e:
log_info("Error while activating the shaders: " + str(e))
return
# update all variables
self.update_all_variables()
# bind all texturex for that slice
self.bind_textures()
# paint using indices
if self.use_index:
self.bind_attributes()
self.bind_indices()
Painter.draw_indexed_arrays(self.primitive_type, self.indexsize)
# or paint without
elif self.use_slice:
# draw all sliced buffers
for slice in xrange(len(self.slicer.slices)):
# get slice bounds
slice_bounds = self.slicer.subdata_bounds[slice]
# print slice, slice_bounds
# bind all attributes for that slice
self.bind_attributes(slice)
# call the appropriate OpenGL rendering command
# if len(self.slicer.bounds) <= 2:
# print "slice bounds", slice_bounds
if len(slice_bounds) <= 2:
Painter.draw_arrays(self.primitive_type, slice_bounds[0],
slice_bounds[1] - slice_bounds[0])
else:
Painter.draw_multi_arrays(self.primitive_type, slice_bounds)
self.copy_all_textures()
# deactivate the shaders
self.shader_manager.deactivate_shaders()
# Cleanup methods
# ---------------
def cleanup_attribute(self, name):
"""Cleanup a sliced attribute (all sub-buffers)."""
variable = self.get_variable(name)
variable['sliced_attribute'].delete_buffers()
def cleanup_texture(self, name):
"""Cleanup a texture."""
variable = self.get_variable(name)
Texture.delete(variable['buffer'])
def cleanup(self):
"""Clean up all variables."""
log_debug("Cleaning up all variables.")
for variable in self.get_variables():
shader_type = variable['shader_type']
if shader_type in ('attribute', 'texture'):
getattr(self, 'cleanup_%s' % shader_type)(variable['name'])
# clean up shaders
self.shader_manager.cleanup()
# Scene renderer
# --------------
class GLRenderer(object):
"""OpenGL renderer for a Scene.
This class takes a Scene object (dictionary) as an input, and
renders the scene. It provides methods to update the data in real-time.
"""
# Initialization
# --------------
def __init__(self, scene):
"""Initialize the renderer using the information on the scene.
Arguments:
* scene: a Scene dictionary with a `visuals` field containing
the list of visuals.
"""
self.scene = scene
self.viewport = (1., 1.)
self.visual_renderers = {}
def set_renderer_options(self):
"""Set the OpenGL options."""
options = self.scene.get('renderer_options', {})
# use vertex buffer object
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
# used for multisampling (antialiasing)
if options.get('antialiasing', None):
gl.glEnable(gl.GL_MULTISAMPLE)
# used for sprites
if options.get('sprites', True):
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_POINT_SPRITE)
# enable transparency
if options.get('transparency', True):
gl.glEnable(gl.GL_BLEND)
blendfunc = options.get('transparency_blendfunc',
('SRC_ALPHA', 'ONE_MINUS_SRC_ALPHA')
# ('ONE_MINUS_DST_ALPHA', 'ONE')
)
blendfunc = [getattr(gl, 'GL_' + x) for x in blendfunc]
gl.glBlendFunc(*blendfunc)
# enable depth buffer, necessary for 3D rendering
if options.get('activate3D', None):
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
gl.glDepthRange(0.0, 1.0)
# TODO: always enable??
gl.glClearDepth(1.0)
# Paint the background with the specified color (black by default)
background = options.get('background', (0, 0, 0, 0))
gl.glClearColor(*background)
def get_renderer_option(self, name):
return self.scene.get('renderer_options', {}).get(name, None)
# Visual methods
# --------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene.get('visuals', [])
def get_visual(self, name):
"""Return a visual by its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
raise ValueError("The visual %s has not been found" % name)
return visuals[0]
# Data methods
# ------------
def set_data(self, name, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* visual: the name of the visual as a string, or a visual dict.
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# call set_data on the given visual renderer
if name in self.visual_renderers:
self.visual_renderers[name].set_data(**kwargs)
def copy_texture(self, name, tex1, tex2):
self.visual_renderers[name].copy_texture(tex1, tex2)
# Rendering methods
# -----------------
def initialize(self):
"""Initialize the renderer."""
# print the renderer information
for key, value in GLVersion.get_renderer_info().iteritems():
if key is not None and value is not None:
log_debug(key + ": " + value)
# initialize the renderer options using the options set in the Scene
self.set_renderer_options()
# create the VisualRenderer objects
self.visual_renderers = OrderedDict()
for visual in self.get_visuals():
name = visual['name']
self.visual_renderers[name] = GLVisualRenderer(self, visual)
# detect FBO
self.fbos = []
for name, vr in self.visual_renderers.iteritems():
fbos = vr.get_variables('framebuffer')
if fbos:
self.fbos.extend([fbo['buffer'] for fbo in fbos])
def clear(self):
"""Clear the scene."""
# clear the buffer (and depth buffer is 3D is activated)
if self.scene.get('renderer_options', {}).get('activate3D', None):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
else:
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
def paint(self):
"""Paint the scene."""
# non-FBO rendering
if not self.fbos:
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
visual_renderer.paint()
# render each FBO separately, then non-VBO
else:
for fbo in self.fbos:
FrameBuffer.bind(fbo)
# fbo index
ifbo = self.fbos.index(fbo)
# clear
self.clear()
# paint all visual renderers
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == ifbo:
# print ifbo, visual_renderer
visual_renderer.paint()
# finally, paint screen
FrameBuffer.unbind()
# render screen (non-FBO) visuals
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == 'screen':
# print "screen", visual_renderer
visual_renderer.paint()
# print
def resize(self, width, height):
"""Resize the canvas and make appropriate changes to the scene."""
# paint within the whole window
gl.glViewport(0, 0, width, height)
# compute the constrained viewport
x = y = 1.0
if self.get_renderer_option('constrain_ratio'):
if height > 0:
aw = float(width) / height
ar = self.get_renderer_option('constrain_ratio')
if ar is True:
ar = 1.
if ar < aw:
x, y = aw / ar, 1.
else:
x, y = 1., ar / aw
self.viewport = x, y
width = float(width)
height = float(height)
# update the viewport and window size for all visuals
for visual in self.get_visuals():
self.set_data(visual['name'],
viewport=self.viewport,
window_size=(width, height))
# Cleanup methods
# ---------------
def cleanup(self):
"""Clean up all allocated OpenGL objects."""
for name, renderer in self.visual_renderers.iteritems():
renderer.cleanup()
| [
"OpenGL.GL.glGetProgramiv",
"numpy.hstack",
"OpenGL.GL.glDeleteProgram",
"OpenGL.GL.glGetString",
"numpy.int32",
"numpy.array",
"OpenGL.GL.glCreateShader",
"OpenGL.GL.glAttachShader",
"OpenGL.GL.glEnableClientState",
"OpenGL.GL.glDepthRange",
"OpenGL.GL.glTexImage2D",
"OpenGL.GL.glViewport",
... | [((75, 153), 'galry.log_warn', 'log_warn', (['"""PyOpenGL is not available and Galry won\'t be able to render plots."""'], {}), '("PyOpenGL is not available and Galry won\'t be able to render plots.")\n', (83, 153), False, 'from galry import log_warn\n'), ((2014, 2032), 'OpenGL.GL.glGenBuffers', 'gl.glGenBuffers', (['(1)'], {}), '(1)\n', (2029, 2032), True, 'import OpenGL.GL as gl\n'), ((2399, 2430), 'OpenGL.GL.glBindBuffer', 'gl.glBindBuffer', (['gltype', 'buffer'], {}), '(gltype, buffer)\n', (2414, 2430), True, 'import OpenGL.GL as gl\n'), ((2648, 2723), 'OpenGL.GL.glVertexAttribPointer', 'gl.glVertexAttribPointer', (['location', 'ndim', 'gl.GL_FLOAT', 'gl.GL_FALSE', '(0)', 'None'], {}), '(location, ndim, gl.GL_FLOAT, gl.GL_FALSE, 0, None)\n', (2672, 2723), True, 'import OpenGL.GL as gl\n'), ((3260, 3309), 'OpenGL.GL.glBufferData', 'gl.glBufferData', (['gltype', 'data', 'gl.GL_DYNAMIC_DRAW'], {}), '(gltype, data, gl.GL_DYNAMIC_DRAW)\n', (3275, 3309), True, 'import OpenGL.GL as gl\n'), ((6316, 6335), 'OpenGL.GL.glGenTextures', 'gl.glGenTextures', (['(1)'], {}), '(1)\n', (6332, 6335), True, 'import OpenGL.GL as gl\n'), ((6344, 6387), 'OpenGL.GL.glPixelStorei', 'gl.glPixelStorei', (['gl.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(gl.GL_UNPACK_ALIGNMENT, 1)\n', (6360, 6387), True, 'import OpenGL.GL as gl\n'), ((6486, 6548), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['textype', 'gl.GL_TEXTURE_WRAP_S', 'gl.GL_CLAMP'], {}), '(textype, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)\n', (6504, 6548), True, 'import OpenGL.GL as gl\n'), ((6557, 6619), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['textype', 'gl.GL_TEXTURE_WRAP_T', 'gl.GL_CLAMP'], {}), '(textype, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)\n', (6575, 6619), True, 'import OpenGL.GL as gl\n'), ((7112, 7176), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['textype', 'gl.GL_TEXTURE_MIN_FILTER', 'minfilter'], {}), '(textype, gl.GL_TEXTURE_MIN_FILTER, minfilter)\n', (7130, 7176), True, 'import OpenGL.GL as gl\n'), ((7185, 7249), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['textype', 'gl.GL_TEXTURE_MAG_FILTER', 'magfilter'], {}), '(textype, gl.GL_TEXTURE_MAG_FILTER, magfilter)\n', (7203, 7249), True, 'import OpenGL.GL as gl\n'), ((7436, 7469), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['textype', 'buffer'], {}), '(textype, buffer)\n', (7452, 7469), True, 'import OpenGL.GL as gl\n'), ((8540, 8584), 'OpenGL.GL.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', 'fbo'], {}), '(gl.GL_FRAMEBUFFER, fbo)\n', (8560, 8584), True, 'import OpenGL.GL as gl\n'), ((8644, 8748), 'OpenGL.GL.glFramebufferTexture2D', 'gl.glFramebufferTexture2D', (['gl.GL_FRAMEBUFFER', 'gl.GL_COLOR_ATTACHMENT0', 'gl.GL_TEXTURE_2D', 'tex_src', '(0)'], {}), '(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.\n GL_TEXTURE_2D, tex_src, 0)\n', (8669, 8748), True, 'import OpenGL.GL as gl\n'), ((8827, 8870), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_2D', 'tex_dst'], {}), '(gl.GL_TEXTURE_2D, tex_dst)\n', (8843, 8870), True, 'import OpenGL.GL as gl\n'), ((8953, 9023), 'OpenGL.GL.glCopyTexSubImage2D', 'gl.glCopyTexSubImage2D', (['gl.GL_TEXTURE_2D', '(0)', '(0)', '(0)', '(0)', '(0)', 'width', 'height'], {}), '(gl.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)\n', (8975, 9023), True, 'import OpenGL.GL as gl\n'), ((9061, 9103), 'OpenGL.GL.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', '(0)'], {}), '(gl.GL_FRAMEBUFFER, 0)\n', (9081, 9103), True, 'import OpenGL.GL as gl\n'), ((11471, 11499), 'OpenGL.GL.glDeleteTextures', 'gl.glDeleteTextures', (['buffers'], {}), '(buffers)\n', (11490, 11499), True, 'import OpenGL.GL as gl\n'), ((11960, 12007), 'OpenGL.GL.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', 'buffer'], {}), '(gl.GL_FRAMEBUFFER, buffer)\n', (11980, 12007), True, 'import OpenGL.GL as gl\n'), ((12437, 12479), 'OpenGL.GL.glBindFramebuffer', 'gl.glBindFramebuffer', (['gl.GL_FRAMEBUFFER', '(0)'], {}), '(gl.GL_FRAMEBUFFER, 0)\n', (12457, 12479), True, 'import OpenGL.GL as gl\n'), ((13585, 13615), 'OpenGL.GL.glCreateShader', 'gl.glCreateShader', (['shader_type'], {}), '(shader_type)\n', (13602, 13615), True, 'import OpenGL.GL as gl\n'), ((13624, 13657), 'OpenGL.GL.glShaderSource', 'gl.glShaderSource', (['shader', 'source'], {}), '(shader, source)\n', (13641, 13657), True, 'import OpenGL.GL as gl\n'), ((13666, 13692), 'OpenGL.GL.glCompileShader', 'gl.glCompileShader', (['shader'], {}), '(shader)\n', (13684, 13692), True, 'import OpenGL.GL as gl\n'), ((13719, 13765), 'OpenGL.GL.glGetShaderiv', 'gl.glGetShaderiv', (['shader', 'gl.GL_COMPILE_STATUS'], {}), '(shader, gl.GL_COMPILE_STATUS)\n', (13735, 13765), True, 'import OpenGL.GL as gl\n'), ((13784, 13813), 'OpenGL.GL.glGetShaderInfoLog', 'gl.glGetShaderInfoLog', (['shader'], {}), '(shader)\n', (13805, 13813), True, 'import OpenGL.GL as gl\n'), ((14680, 14700), 'OpenGL.GL.glCreateProgram', 'gl.glCreateProgram', ([], {}), '()\n', (14698, 14700), True, 'import OpenGL.GL as gl\n'), ((14709, 14744), 'OpenGL.GL.glAttachShader', 'gl.glAttachShader', (['program', 'self.vs'], {}), '(program, self.vs)\n', (14726, 14744), True, 'import OpenGL.GL as gl\n'), ((14753, 14788), 'OpenGL.GL.glAttachShader', 'gl.glAttachShader', (['program', 'self.fs'], {}), '(program, self.fs)\n', (14770, 14788), True, 'import OpenGL.GL as gl\n'), ((14797, 14822), 'OpenGL.GL.glLinkProgram', 'gl.glLinkProgram', (['program'], {}), '(program)\n', (14813, 14822), True, 'import OpenGL.GL as gl\n'), ((14841, 14886), 'OpenGL.GL.glGetProgramiv', 'gl.glGetProgramiv', (['program', 'gl.GL_LINK_STATUS'], {}), '(program, gl.GL_LINK_STATUS)\n', (14858, 14886), True, 'import OpenGL.GL as gl\n'), ((15345, 15387), 'OpenGL.GL.glGetAttribLocation', 'gl.glGetAttribLocation', (['self.program', 'name'], {}), '(self.program, name)\n', (15367, 15387), True, 'import OpenGL.GL as gl\n'), ((15528, 15571), 'OpenGL.GL.glGetUniformLocation', 'gl.glGetUniformLocation', (['self.program', 'name'], {}), '(self.program, name)\n', (15551, 15571), True, 'import OpenGL.GL as gl\n'), ((15750, 15779), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['self.program'], {}), '(self.program)\n', (15765, 15779), True, 'import OpenGL.GL as gl\n'), ((16076, 16094), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['(0)'], {}), '(0)\n', (16091, 16094), True, 'import OpenGL.GL as gl\n'), ((16405, 16433), 'OpenGL.GL.glIsProgram', 'gl.glIsProgram', (['self.program'], {}), '(self.program)\n', (16419, 16433), True, 'import OpenGL.GL as gl\n'), ((16649, 16677), 'OpenGL.GL.glIsProgram', 'gl.glIsProgram', (['self.program'], {}), '(self.program)\n', (16663, 16677), True, 'import OpenGL.GL as gl\n'), ((16840, 16868), 'OpenGL.GL.glIsProgram', 'gl.glIsProgram', (['self.program'], {}), '(self.program)\n', (16854, 16868), True, 'import OpenGL.GL as gl\n'), ((20383, 20421), 'galry.enforce_dtype', 'enforce_dtype', (['bounds_sliced', 'np.int32'], {}), '(bounds_sliced, np.int32)\n', (20396, 20421), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((21492, 21507), 'numpy.diff', 'np.diff', (['bounds'], {}), '(bounds)\n', (21499, 21507), True, 'import numpy as np\n'), ((24533, 24572), 'OpenGL.GL.glDrawArrays', 'gl.glDrawArrays', (['primtype', 'offset', 'size'], {}), '(primtype, offset, size)\n', (24548, 24572), True, 'import OpenGL.GL as gl\n'), ((24740, 24755), 'numpy.diff', 'np.diff', (['bounds'], {}), '(bounds)\n', (24747, 24755), True, 'import numpy as np\n'), ((24800, 24855), 'OpenGL.GL.glMultiDrawArrays', 'gl.glMultiDrawArrays', (['primtype', 'first', 'count', 'primcount'], {}), '(primtype, first, count, primcount)\n', (24820, 24855), True, 'import OpenGL.GL as gl\n'), ((24936, 24995), 'OpenGL.GL.glDrawElements', 'gl.glDrawElements', (['primtype', 'size', 'gl.GL_UNSIGNED_INT', 'None'], {}), '(primtype, size, gl.GL_UNSIGNED_INT, None)\n', (24953, 24995), True, 'import OpenGL.GL as gl\n'), ((52681, 52720), 'galry.log_debug', 'log_debug', (['"""Cleaning up all variables."""'], {}), "('Cleaning up all variables.')\n", (52690, 52720), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((53869, 53911), 'OpenGL.GL.glEnableClientState', 'gl.glEnableClientState', (['gl.GL_VERTEX_ARRAY'], {}), '(gl.GL_VERTEX_ARRAY)\n', (53891, 53911), True, 'import OpenGL.GL as gl\n'), ((55136, 55164), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['*background'], {}), '(*background)\n', (55151, 55164), True, 'import OpenGL.GL as gl\n'), ((57384, 57397), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (57395, 57397), False, 'from collections import OrderedDict\n'), ((59604, 59638), 'OpenGL.GL.glViewport', 'gl.glViewport', (['(0)', '(0)', 'width', 'height'], {}), '(0, 0, width, height)\n', (59617, 59638), True, 'import OpenGL.GL as gl\n'), ((1100, 1130), 'OpenGL.GL.glGetString', 'gl.glGetString', (['gl.GL_RENDERER'], {}), '(gl.GL_RENDERER)\n', (1114, 1130), True, 'import OpenGL.GL as gl\n'), ((1162, 1191), 'OpenGL.GL.glGetString', 'gl.glGetString', (['gl.GL_VERSION'], {}), '(gl.GL_VERSION)\n', (1176, 1191), True, 'import OpenGL.GL as gl\n'), ((1221, 1267), 'OpenGL.GL.glGetString', 'gl.glGetString', (['gl.GL_SHADING_LANGUAGE_VERSION'], {}), '(gl.GL_SHADING_LANGUAGE_VERSION)\n', (1235, 1267), True, 'import OpenGL.GL as gl\n'), ((2469, 2507), 'OpenGL.GL.glEnableVertexAttribArray', 'gl.glEnableVertexAttribArray', (['location'], {}), '(location)\n', (2497, 2507), True, 'import OpenGL.GL as gl\n'), ((2889, 2920), 'galry.enforce_dtype', 'enforce_dtype', (['data', 'np.float32'], {}), '(data, np.float32)\n', (2902, 2920), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((2954, 2978), 'numpy.array', 'np.array', (['data', 'np.int32'], {}), '(data, np.int32)\n', (2962, 2978), True, 'import numpy as np\n'), ((4260, 4291), 'galry.enforce_dtype', 'enforce_dtype', (['data', 'np.float32'], {}), '(data, np.float32)\n', (4273, 4291), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((4348, 4364), 'numpy.float32', 'np.float32', (['data'], {}), '(data)\n', (4358, 4364), True, 'import numpy as np\n'), ((4419, 4433), 'numpy.int32', 'np.int32', (['data'], {}), '(data)\n', (4427, 4433), True, 'import numpy as np\n'), ((8236, 8272), 'numpy.array', 'np.array', (['(255 * data)'], {'dtype': 'np.uint8'}), '(255 * data, dtype=np.uint8)\n', (8244, 8272), True, 'import numpy as np\n'), ((10301, 10405), 'OpenGL.GL.glTexImage1D', 'gl.glTexImage1D', (['textype', '(0)', 'component_type', 'shape[1]', '(0)', 'component_type', 'gl.GL_UNSIGNED_BYTE', 'data'], {}), '(textype, 0, component_type, shape[1], 0, component_type, gl\n .GL_UNSIGNED_BYTE, data)\n', (10316, 10405), True, 'import OpenGL.GL as gl\n'), ((11095, 11186), 'OpenGL.GL.glTexSubImage1D', 'gl.glTexSubImage1D', (['textype', '(0)', '(0)', 'shape[1]', 'component_type', 'gl.GL_UNSIGNED_BYTE', 'data'], {}), '(textype, 0, 0, shape[1], component_type, gl.\n GL_UNSIGNED_BYTE, data)\n', (11113, 11186), True, 'import OpenGL.GL as gl\n'), ((11736, 11759), 'OpenGL.GL.glGenFramebuffers', 'gl.glGenFramebuffers', (['(1)'], {}), '(1)\n', (11756, 11759), True, 'import OpenGL.GL as gl\n'), ((15010, 15041), 'OpenGL.GL.glGetProgramInfoLog', 'gl.glGetProgramInfoLog', (['program'], {}), '(program)\n', (15032, 15041), True, 'import OpenGL.GL as gl\n'), ((16447, 16487), 'OpenGL.GL.glDetachShader', 'gl.glDetachShader', (['self.program', 'self.vs'], {}), '(self.program, self.vs)\n', (16464, 16487), True, 'import OpenGL.GL as gl\n'), ((16500, 16540), 'OpenGL.GL.glDetachShader', 'gl.glDetachShader', (['self.program', 'self.fs'], {}), '(self.program, self.fs)\n', (16517, 16540), True, 'import OpenGL.GL as gl\n'), ((16691, 16717), 'OpenGL.GL.glDeleteShader', 'gl.glDeleteShader', (['self.vs'], {}), '(self.vs)\n', (16708, 16717), True, 'import OpenGL.GL as gl\n'), ((16730, 16756), 'OpenGL.GL.glDeleteShader', 'gl.glDeleteShader', (['self.fs'], {}), '(self.fs)\n', (16747, 16756), True, 'import OpenGL.GL as gl\n'), ((16882, 16914), 'OpenGL.GL.glDeleteProgram', 'gl.glDeleteProgram', (['self.program'], {}), '(self.program)\n', (16900, 16914), True, 'import OpenGL.GL as gl\n'), ((21379, 21419), 'numpy.array', 'np.array', (['[0, self.size]'], {'dtype': 'np.int32'}), '([0, self.size], dtype=np.int32)\n', (21387, 21419), True, 'import numpy as np\n'), ((23550, 23590), 'numpy.ones', 'np.ones', (['self.slicer.size'], {'dtype': 'np.bool'}), '(self.slicer.size, dtype=np.bool)\n', (23557, 23590), True, 'import numpy as np\n'), ((29671, 29738), 'galry.log_debug', 'log_debug', (['"""deactivating slicing because there\'s an indexed buffer"""'], {}), '("deactivating slicing because there\'s an indexed buffer")\n', (29680, 29738), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((34772, 34883), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)'], {}), '(\n "Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)\n', (34781, 34883), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((35017, 35126), 'galry.log_debug', 'log_debug', (['("Skipping loading data for attribute \'%s\' since it references a target variable."\n % name)'], {}), '(\n "Skipping loading data for attribute \'%s\' since it references a target variable."\n % name)\n', (35026, 35126), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((36356, 36467), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)'], {}), '(\n "Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)\n', (36365, 36467), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((36730, 36837), 'galry.log_debug', 'log_debug', (['("Skipping loading data for texture \'%s\' since it references a target variable."\n % name)'], {}), '(\n "Skipping loading data for texture \'%s\' since it references a target variable."\n % name)\n', (36739, 36837), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((37238, 37349), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)'], {}), '(\n "Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)\n', (37247, 37349), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((38535, 38604), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' was not found, unable to update it." % name)'], {}), '("Variable \'%s\' was not found, unable to update it." % name)\n', (38544, 38604), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((39183, 39295), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' could not be updated, probably because it is not used in the shaders"\n % name)'], {}), '(\n "Variable \'%s\' could not be updated, probably because it is not used in the shaders"\n % name)\n', (39192, 39295), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((39931, 40046), 'galry.log_debug', 'log_debug', (["('Creating new buffers for variable %s, old size=%s,new size=%d' % (name,\n oldshape[0], data.shape[0]))"], {}), "('Creating new buffers for variable %s, old size=%s,new size=%d' %\n (name, oldshape[0], data.shape[0]))\n", (39940, 40046), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((42135, 42246), 'galry.log_debug', 'log_debug', (['("Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)'], {}), '(\n "Variable \'%s\' could not be loaded, probably because it is not used in the shaders"\n % name)\n', (42144, 42246), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((54019, 54049), 'OpenGL.GL.glEnable', 'gl.glEnable', (['gl.GL_MULTISAMPLE'], {}), '(gl.GL_MULTISAMPLE)\n', (54030, 54049), True, 'import OpenGL.GL as gl\n'), ((54143, 54187), 'OpenGL.GL.glEnable', 'gl.glEnable', (['gl.GL_VERTEX_PROGRAM_POINT_SIZE'], {}), '(gl.GL_VERTEX_PROGRAM_POINT_SIZE)\n', (54154, 54187), True, 'import OpenGL.GL as gl\n'), ((54200, 54231), 'OpenGL.GL.glEnable', 'gl.glEnable', (['gl.GL_POINT_SPRITE'], {}), '(gl.GL_POINT_SPRITE)\n', (54211, 54231), True, 'import OpenGL.GL as gl\n'), ((54329, 54353), 'OpenGL.GL.glEnable', 'gl.glEnable', (['gl.GL_BLEND'], {}), '(gl.GL_BLEND)\n', (54340, 54353), True, 'import OpenGL.GL as gl\n'), ((54616, 54642), 'OpenGL.GL.glBlendFunc', 'gl.glBlendFunc', (['*blendfunc'], {}), '(*blendfunc)\n', (54630, 54642), True, 'import OpenGL.GL as gl\n'), ((54770, 54799), 'OpenGL.GL.glEnable', 'gl.glEnable', (['gl.GL_DEPTH_TEST'], {}), '(gl.GL_DEPTH_TEST)\n', (54781, 54799), True, 'import OpenGL.GL as gl\n'), ((54812, 54838), 'OpenGL.GL.glDepthMask', 'gl.glDepthMask', (['gl.GL_TRUE'], {}), '(gl.GL_TRUE)\n', (54826, 54838), True, 'import OpenGL.GL as gl\n'), ((54851, 54879), 'OpenGL.GL.glDepthFunc', 'gl.glDepthFunc', (['gl.GL_LEQUAL'], {}), '(gl.GL_LEQUAL)\n', (54865, 54879), True, 'import OpenGL.GL as gl\n'), ((54892, 54917), 'OpenGL.GL.glDepthRange', 'gl.glDepthRange', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (54907, 54917), True, 'import OpenGL.GL as gl\n'), ((54966, 54986), 'OpenGL.GL.glClearDepth', 'gl.glClearDepth', (['(1.0)'], {}), '(1.0)\n', (54981, 54986), True, 'import OpenGL.GL as gl\n'), ((58018, 58077), 'OpenGL.GL.glClear', 'gl.glClear', (['(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)'], {}), '(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n', (58028, 58077), True, 'import OpenGL.GL as gl\n'), ((58104, 58138), 'OpenGL.GL.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (58114, 58138), True, 'import OpenGL.GL as gl\n'), ((6712, 6740), 'OpenGL.GL.glGenerateMipmap', 'gl.glGenerateMipmap', (['textype'], {}), '(textype)\n', (6731, 6740), True, 'import OpenGL.GL as gl\n'), ((10545, 10658), 'OpenGL.GL.glTexImage2D', 'gl.glTexImage2D', (['textype', '(0)', 'component_type', 'shape[1]', 'shape[0]', '(0)', 'component_type', 'gl.GL_UNSIGNED_BYTE', 'data'], {}), '(textype, 0, component_type, shape[1], shape[0], 0,\n component_type, gl.GL_UNSIGNED_BYTE, data)\n', (10560, 10658), True, 'import OpenGL.GL as gl\n'), ((11249, 11353), 'OpenGL.GL.glTexSubImage2D', 'gl.glTexSubImage2D', (['textype', '(0)', '(0)', '(0)', 'shape[1]', 'shape[0]', 'component_type', 'gl.GL_UNSIGNED_BYTE', 'data'], {}), '(textype, 0, 0, 0, shape[1], shape[0], component_type, gl\n .GL_UNSIGNED_BYTE, data)\n', (11267, 11353), True, 'import OpenGL.GL as gl\n'), ((31167, 31200), 'galry.log_debug', 'log_debug', (['"""deactivating slicing"""'], {}), "('deactivating slicing')\n", (31176, 31200), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((47424, 47474), 'galry.log_debug', 'log_debug', (['("Data for variable \'%s\' is None" % name)'], {}), '("Data for variable \'%s\' is None" % name)\n', (47433, 47474), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((48709, 48829), 'galry.log_debug', 'log_debug', (['("Unable to bind attribute \'%s\', probably because it is not used in the shaders."\n % variable[\'name\'])'], {}), '(\n "Unable to bind attribute \'%s\', probably because it is not used in the shaders."\n % variable[\'name\'])\n', (48718, 48829), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((49922, 49996), 'galry.log_debug', 'log_debug', (['("Texture \'%s\' was not properly initialized." % variable[\'name\'])'], {}), '("Texture \'%s\' was not properly initialized." % variable[\'name\'])\n', (49931, 49996), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((57165, 57194), 'galry.log_debug', 'log_debug', (["(key + ': ' + value)"], {}), "(key + ': ' + value)\n", (57174, 57194), False, 'from galry import enforce_dtype, DataNormalizer, log_info, log_debug, log_warn, RefVar\n'), ((20239, 20268), 'numpy.hstack', 'np.hstack', (['(0, bounds_sliced)'], {}), '((0, bounds_sliced))\n', (20248, 20268), True, 'import numpy as np\n'), ((20329, 20367), 'numpy.hstack', 'np.hstack', (['(bounds_sliced, slice_size)'], {}), '((bounds_sliced, slice_size))\n', (20338, 20367), True, 'import numpy as np\n'), ((19547, 19568), 'numpy.ceil', 'np.ceil', (['((p - b0) / d)'], {}), '((p - b0) / d)\n', (19554, 19568), True, 'import numpy as np\n'), ((19599, 19625), 'numpy.floor', 'np.floor', (['((p + s - b0) / d)'], {}), '((p + s - b0) / d)\n', (19607, 19625), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which,
after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the
values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et
al. for adversarial training.
| Paper link: https://arxiv.org/abs/1706.06083
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, Union
import numpy as np
from art.estimators.classification.pytorch import PyTorchClassifier
from art.estimators.classification.tensorflow import TensorFlowV2Classifier
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.attacks.attack import EvasionAttack
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import (
ProjectedGradientDescentNumpy,
)
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import (
ProjectedGradientDescentPyTorch,
)
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_tensorflow_v2 import (
ProjectedGradientDescentTensorFlowV2,
)
logger = logging.getLogger(__name__)
class ProjectedGradientDescent(EvasionAttack):
"""
The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is
projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it
lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training.
| Paper link: https://arxiv.org/abs/1706.06083
"""
attack_params = EvasionAttack.attack_params + [
"norm",
"eps",
"eps_step",
"targeted",
"num_random_init",
"batch_size",
"minimal",
"max_iter",
"random_eps",
]
_estimator_requirements = (BaseEstimator, LossGradientsMixin)
def __init__(
self,
estimator,
norm: int = np.inf,
eps: float = 0.3,
eps_step: float = 0.1,
max_iter: int = 100,
targeted: bool = False,
num_random_init: int = 0,
batch_size: int = 32,
random_eps: bool = False,
):
"""
Create a :class:`.ProjectedGradientDescent` instance.
:param estimator: An trained estimator.
:param norm: The norm of the adversarial perturbation supporting np.inf, 1 or 2.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_step: Attack step size (input variation) at each iteration.
:param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature
suggests this for FGSM based training to generalize across different epsilons. eps_step
is modified to preserve the ratio of eps / eps_step. The effectiveness of this
method with PGD is untested (https://arxiv.org/pdf/1611.01236.pdf).
:param max_iter: The maximum number of iterations.
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
:param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0 starting
at the original input.
:param batch_size: Size of the batch on which adversarial samples are generated.
"""
super(ProjectedGradientDescent, self).__init__(estimator=estimator)
self.norm = norm
self.eps = eps
self.eps_step = eps_step
self.max_iter = max_iter
self.targeted = targeted
self.num_random_init = num_random_init
self.batch_size = batch_size
self.random_eps = random_eps
ProjectedGradientDescent._check_params(self)
no_preprocessing = self.estimator.preprocessing is None or (
np.all(self.estimator.preprocessing[0] == 0) and np.all(self.estimator.preprocessing[1] == 1)
)
no_defences = not self.estimator.preprocessing_defences and not self.estimator.postprocessing_defences
self._attack: Union[
ProjectedGradientDescentPyTorch, ProjectedGradientDescentTensorFlowV2, ProjectedGradientDescentNumpy
]
if isinstance(self.estimator, PyTorchClassifier) and no_preprocessing and no_defences:
self._attack = ProjectedGradientDescentPyTorch(
estimator=estimator,
norm=norm,
eps=eps,
eps_step=eps_step,
max_iter=max_iter,
targeted=targeted,
num_random_init=num_random_init,
batch_size=batch_size,
random_eps=random_eps,
)
elif isinstance(self.estimator, TensorFlowV2Classifier) and no_preprocessing and no_defences:
self._attack = ProjectedGradientDescentTensorFlowV2(
estimator=estimator,
norm=norm,
eps=eps,
eps_step=eps_step,
max_iter=max_iter,
targeted=targeted,
num_random_init=num_random_init,
batch_size=batch_size,
random_eps=random_eps,
)
else:
self._attack = ProjectedGradientDescentNumpy(
estimator=estimator,
norm=norm,
eps=eps,
eps_step=eps_step,
max_iter=max_iter,
targeted=targeted,
num_random_init=num_random_init,
batch_size=batch_size,
random_eps=random_eps,
)
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:return: An array holding the adversarial examples.
"""
logger.info("Creating adversarial samples.")
return self._attack.generate(x=x, y=y, **kwargs)
def set_params(self, **kwargs) -> None:
self._attack.set_params(**kwargs)
def _check_params(self) -> None:
# Check if order of the norm is acceptable given current implementation
if self.norm not in [np.inf, int(1), int(2)]:
raise ValueError("Norm order must be either `np.inf`, 1, or 2.")
if self.eps <= 0:
raise ValueError("The perturbation size `eps` has to be positive.")
if self.eps_step <= 0:
raise ValueError("The perturbation step-size `eps_step` has to be positive.")
if not isinstance(self.targeted, bool):
raise ValueError("The flag `targeted` has to be of type bool.")
if not isinstance(self.num_random_init, (int, np.int)):
raise TypeError("The number of random initialisations has to be of type integer")
if self.num_random_init < 0:
raise ValueError("The number of random initialisations `random_init` has to be greater than or equal to 0.")
if self.batch_size <= 0:
raise ValueError("The batch size `batch_size` has to be positive.")
if self.eps_step > self.eps:
raise ValueError("The iteration step `eps_step` has to be smaller than the total attack `eps`.")
if self.max_iter <= 0:
raise ValueError("The number of iterations `max_iter` has to be a positive integer.")
| [
"logging.getLogger",
"art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy.ProjectedGradientDescentNumpy",
"art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch.ProjectedGradientDescentPyTorch",
"art.attacks.evasion.projected_gradient_descent.projected_g... | [((2413, 2440), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2430, 2440), False, 'import logging\n'), ((5713, 5934), 'art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch.ProjectedGradientDescentPyTorch', 'ProjectedGradientDescentPyTorch', ([], {'estimator': 'estimator', 'norm': 'norm', 'eps': 'eps', 'eps_step': 'eps_step', 'max_iter': 'max_iter', 'targeted': 'targeted', 'num_random_init': 'num_random_init', 'batch_size': 'batch_size', 'random_eps': 'random_eps'}), '(estimator=estimator, norm=norm, eps=eps,\n eps_step=eps_step, max_iter=max_iter, targeted=targeted,\n num_random_init=num_random_init, batch_size=batch_size, random_eps=\n random_eps)\n', (5744, 5934), False, 'from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import ProjectedGradientDescentPyTorch\n'), ((5223, 5267), 'numpy.all', 'np.all', (['(self.estimator.preprocessing[0] == 0)'], {}), '(self.estimator.preprocessing[0] == 0)\n', (5229, 5267), True, 'import numpy as np\n'), ((5272, 5316), 'numpy.all', 'np.all', (['(self.estimator.preprocessing[1] == 1)'], {}), '(self.estimator.preprocessing[1] == 1)\n', (5278, 5316), True, 'import numpy as np\n'), ((6211, 6438), 'art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_tensorflow_v2.ProjectedGradientDescentTensorFlowV2', 'ProjectedGradientDescentTensorFlowV2', ([], {'estimator': 'estimator', 'norm': 'norm', 'eps': 'eps', 'eps_step': 'eps_step', 'max_iter': 'max_iter', 'targeted': 'targeted', 'num_random_init': 'num_random_init', 'batch_size': 'batch_size', 'random_eps': 'random_eps'}), '(estimator=estimator, norm=norm, eps=\n eps, eps_step=eps_step, max_iter=max_iter, targeted=targeted,\n num_random_init=num_random_init, batch_size=batch_size, random_eps=\n random_eps)\n', (6247, 6438), False, 'from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_tensorflow_v2 import ProjectedGradientDescentTensorFlowV2\n'), ((6626, 6845), 'art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy.ProjectedGradientDescentNumpy', 'ProjectedGradientDescentNumpy', ([], {'estimator': 'estimator', 'norm': 'norm', 'eps': 'eps', 'eps_step': 'eps_step', 'max_iter': 'max_iter', 'targeted': 'targeted', 'num_random_init': 'num_random_init', 'batch_size': 'batch_size', 'random_eps': 'random_eps'}), '(estimator=estimator, norm=norm, eps=eps,\n eps_step=eps_step, max_iter=max_iter, targeted=targeted,\n num_random_init=num_random_init, batch_size=batch_size, random_eps=\n random_eps)\n', (6655, 6845), False, 'from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import ProjectedGradientDescentNumpy\n')] |
import matplotlib.pyplot as plt
import os.path
import sys
import logging
import numpy as np
from matplotlib.colors import hsv_to_rgb
from math import ceil
from msemu.cmd import get_parser
from msemu.ila import IlaData
from msemu.verilog import VerilogPackage
from msemu.resources import ResourceCSV, ResourceAllocation, Utilization
def main(fmts=['png', 'pdf', 'eps']):
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
parser = get_parser()
args = parser.parse_args()
r = ResourceCSV(os.path.join(args.data_dir, 'resource_utilization_short.csv'))
segment_roms = [Utilization(match[1]) for match in r.get_matches(r'segment_rom_i')]
bram_dict = {}
for rom in segment_roms:
if rom.bram not in bram_dict:
bram_dict[rom.bram] = 0
bram_dict[rom.bram] += 1
print('BRAM Dict:', bram_dict)
pack = VerilogPackage.from_file(os.path.join(args.build_dir, 'filter_package.sv'))
n_ui = pack.get('NUM_UI').value
rx_setting_width = pack.get('RX_SETTING_WIDTH').value
filter_addr_widths = pack.get('FILTER_ADDR_WIDTHS')
filter_offset_widths = pack.get('FILTER_OFFSET_WIDTHS')
filter_slope_widths = pack.get('FILTER_SLOPE_WIDTHS')
half_bram_kb = (1 << 10) * 18 / 1e3
bits_kb = np.zeros(n_ui)
half_bram_util = {}
for k in range(n_ui):
n_cols = filter_offset_widths.value[k] + filter_slope_widths.value[k]
n_rows = 1 << (rx_setting_width + filter_addr_widths.value[k])
bits_kb[k] = n_rows * n_cols / 1.0e3
n_half_bram = int(ceil(bits_kb[k]/half_bram_kb))
if n_half_bram not in half_bram_util:
half_bram_util[n_half_bram] = 0
half_bram_util[n_half_bram] += 1
print(half_bram_util)
max_half_brams = max(half_bram_util.keys())
plt.plot(bits_kb)
for k in range(1,max_half_brams+1):
hue = (1 - (k-1)/(max_half_brams-1))/3
plt.plot([0, n_ui-1], [k*half_bram_kb, k*half_bram_kb], '--', color='0.5')
plt.text(0, (k+0.1)*half_bram_kb, '{:0.1f} BRAM'.format(k/2))
plt.xlabel('Tap #')
plt.ylabel('Required ROM Size (kb)')
plt.ylim([-half_bram_kb*0.2, (max_half_brams+0.3)*half_bram_kb])
plt.title('Bits Requirement for Step Response Storage')
plot_name = os.path.join(args.fig_dir, 'rom_bits_postprocess')
for fmt in fmts:
plt.savefig(plot_name + '.' + fmt, bbox_inches='tight')
plt.show()
if __name__=='__main__':
main()
| [
"logging.basicConfig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"math.ceil",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"msemu.resources.Utilization",
"numpy.zeros",
"msemu.cmd.get_parser",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"... | [((376, 435), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'logging.DEBUG'}), '(stream=sys.stderr, level=logging.DEBUG)\n', (395, 435), False, 'import logging\n'), ((450, 462), 'msemu.cmd.get_parser', 'get_parser', ([], {}), '()\n', (460, 462), False, 'from msemu.cmd import get_parser\n'), ((1271, 1285), 'numpy.zeros', 'np.zeros', (['n_ui'], {}), '(n_ui)\n', (1279, 1285), True, 'import numpy as np\n'), ((1801, 1818), 'matplotlib.pyplot.plot', 'plt.plot', (['bits_kb'], {}), '(bits_kb)\n', (1809, 1818), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tap #"""'], {}), "('Tap #')\n", (2074, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2088, 2124), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Required ROM Size (kb)"""'], {}), "('Required ROM Size (kb)')\n", (2098, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2199), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-half_bram_kb * 0.2, (max_half_brams + 0.3) * half_bram_kb]'], {}), '([-half_bram_kb * 0.2, (max_half_brams + 0.3) * half_bram_kb])\n', (2137, 2199), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2253), 'matplotlib.pyplot.title', 'plt.title', (['"""Bits Requirement for Step Response Storage"""'], {}), "('Bits Requirement for Step Response Storage')\n", (2207, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2420, 2422), True, 'import matplotlib.pyplot as plt\n'), ((598, 619), 'msemu.resources.Utilization', 'Utilization', (['match[1]'], {}), '(match[1])\n', (609, 619), False, 'from msemu.resources import ResourceCSV, ResourceAllocation, Utilization\n'), ((1914, 1999), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, n_ui - 1]', '[k * half_bram_kb, k * half_bram_kb]', '"""--"""'], {'color': '"""0.5"""'}), "([0, n_ui - 1], [k * half_bram_kb, k * half_bram_kb], '--', color='0.5'\n )\n", (1922, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2406), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plot_name + '.' + fmt)"], {'bbox_inches': '"""tight"""'}), "(plot_name + '.' + fmt, bbox_inches='tight')\n", (2362, 2406), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1589), 'math.ceil', 'ceil', (['(bits_kb[k] / half_bram_kb)'], {}), '(bits_kb[k] / half_bram_kb)\n', (1562, 1589), False, 'from math import ceil\n')] |
from typing import Optional, Iterable
import numpy as np
from htm_rl.common.sdr_encoders import SdrConcatenator
from htm_rl.envs.biogwlab.env_shape_params import EnvShapeParams
from htm_rl.envs.biogwlab.module import Entity, EntityType
from htm_rl.envs.biogwlab.view_clipper import ViewClipper, ViewClip
class Renderer:
shape: EnvShapeParams
view_clipper: Optional[ViewClipper]
channels_concatenator: Optional[SdrConcatenator]
def __init__(self, shape_xy, view_rectangle=None):
self.shape = EnvShapeParams(shape_xy, view_rectangle)
self.view_clipper = self.shape.view_clipper
# delayed initialization on the first render call
self.channels_concatenator = None
def render(self, position, view_direction, entities: Iterable[Entity]):
view_clip = self.make_view_clip(position, view_direction)
layers_with_sdr_size = []
for entity in entities:
if not entity.rendering:
continue
layer_with_sdr_size = entity.render(view_clip)
if isinstance(layer_with_sdr_size, list):
layers_with_sdr_size.extend(layer_with_sdr_size)
elif layer_with_sdr_size[1]:
layers_with_sdr_size.append(layer_with_sdr_size)
assert layers_with_sdr_size, 'Rendering output is empty'
layers, sdr_sizes = zip(*layers_with_sdr_size)
if self.channels_concatenator is None:
self.channels_concatenator = SdrConcatenator(list(sdr_sizes))
observation = self.channels_concatenator.concatenate(*layers)
return observation
def render_rgb(
self, position, view_direction,
entities: dict[EntityType, list[Entity]],
show_outer_walls: bool
):
# fill with magenta to catch non-colored cells
default_filler = np.array([255, 3, 209])
img_map = np.empty(self.shape.full_shape + (3, ), dtype=np.int)
img_map[:] = default_filler
areas = entities[EntityType.Area]
# areas: light blue
area_color, area_dc = [117, 198, 230], [-12, -15, -6]
self._draw_entities(img_map, areas, area_color, area_dc)
obstacles = entities[EntityType.Obstacle]
# obstacles: dark blue
obstacle_color, obstacle_dc = [70, 40, 100], [-7, -4, -10]
self._draw_entities(img_map, obstacles, obstacle_color, obstacle_dc)
food = entities[EntityType.Consumable]
# consumables: salad green
food_color, food_dc = [112, 212, 17], [-4, -10, 4]
self._draw_entities(img_map, food, food_color, food_dc)
agent = entities[EntityType.Agent]
# agent: yellow
agent_color, agent_dc = [255, 255, 0], [0, 0, 0]
self._draw_entities(img_map, agent, agent_color, agent_dc)
view_clip = self.make_view_clip(position, view_direction)
if view_clip is None:
return img_map
img_obs = np.empty(self.view_clipper.view_shape + (3, ), dtype=np.int)
abs_indices = np.divmod(view_clip.abs_indices, img_map.shape[1])
view_indices = np.divmod(view_clip.view_indices, img_obs.shape[1])
# fill with `out-of-map` obstacles: black
img_obs[:] = np.array([0, 0, 0])
img_obs[view_indices] = img_map[abs_indices].copy()
img_obs = np.flip(img_obs, axis=[0, 1]) # from ij to xy
# `grey`-out view area
img_map[abs_indices] += (.5 * (255 - img_map[abs_indices])).astype(np.int)
if not show_outer_walls:
# cut outer walls, keeping only "inner" env part
img_map = self.shape.get_inner_area(img_map)
return [img_map, img_obs]
@property
def output_sdr_size(self) -> int:
return self.channels_concatenator.output_sdr_size
def make_view_clip(self, position, view_direction):
if self.view_clipper is None:
return None
return self.view_clipper.clip(position, view_direction)
@staticmethod
def _draw_entities(
img: np.ndarray, entities: list[Entity], color: list[int],
delta_color: list[int]
):
mask = np.empty(img.shape[:2], dtype=np.bool)
# color: RGB, i.e. 3-elem array
color, delta_color = np.array(color), np.array(delta_color)
for entity in entities:
mask.fill(0)
entity.append_mask(mask)
img[mask] = color
color += delta_color
def render_mask(mask: np.ndarray, view_clip: ViewClip) -> tuple[np.ndarray, int]:
if view_clip is None:
return np.flatnonzero(mask), mask.size
clipped_mask = np.zeros(view_clip.shape, dtype=np.int).flatten()
clipped_mask[view_clip.view_indices] = mask.flatten()[view_clip.abs_indices]
return np.flatnonzero(clipped_mask), clipped_mask.size
| [
"numpy.flip",
"numpy.divmod",
"numpy.flatnonzero",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"htm_rl.envs.biogwlab.env_shape_params.EnvShapeParams"
] | [((521, 561), 'htm_rl.envs.biogwlab.env_shape_params.EnvShapeParams', 'EnvShapeParams', (['shape_xy', 'view_rectangle'], {}), '(shape_xy, view_rectangle)\n', (535, 561), False, 'from htm_rl.envs.biogwlab.env_shape_params import EnvShapeParams\n'), ((1853, 1876), 'numpy.array', 'np.array', (['[255, 3, 209]'], {}), '([255, 3, 209])\n', (1861, 1876), True, 'import numpy as np\n'), ((1896, 1948), 'numpy.empty', 'np.empty', (['(self.shape.full_shape + (3,))'], {'dtype': 'np.int'}), '(self.shape.full_shape + (3,), dtype=np.int)\n', (1904, 1948), True, 'import numpy as np\n'), ((2951, 3010), 'numpy.empty', 'np.empty', (['(self.view_clipper.view_shape + (3,))'], {'dtype': 'np.int'}), '(self.view_clipper.view_shape + (3,), dtype=np.int)\n', (2959, 3010), True, 'import numpy as np\n'), ((3034, 3084), 'numpy.divmod', 'np.divmod', (['view_clip.abs_indices', 'img_map.shape[1]'], {}), '(view_clip.abs_indices, img_map.shape[1])\n', (3043, 3084), True, 'import numpy as np\n'), ((3108, 3159), 'numpy.divmod', 'np.divmod', (['view_clip.view_indices', 'img_obs.shape[1]'], {}), '(view_clip.view_indices, img_obs.shape[1])\n', (3117, 3159), True, 'import numpy as np\n'), ((3232, 3251), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3240, 3251), True, 'import numpy as np\n'), ((3330, 3359), 'numpy.flip', 'np.flip', (['img_obs'], {'axis': '[0, 1]'}), '(img_obs, axis=[0, 1])\n', (3337, 3359), True, 'import numpy as np\n'), ((4146, 4184), 'numpy.empty', 'np.empty', (['img.shape[:2]'], {'dtype': 'np.bool'}), '(img.shape[:2], dtype=np.bool)\n', (4154, 4184), True, 'import numpy as np\n'), ((4769, 4797), 'numpy.flatnonzero', 'np.flatnonzero', (['clipped_mask'], {}), '(clipped_mask)\n', (4783, 4797), True, 'import numpy as np\n'), ((4254, 4269), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (4262, 4269), True, 'import numpy as np\n'), ((4271, 4292), 'numpy.array', 'np.array', (['delta_color'], {}), '(delta_color)\n', (4279, 4292), True, 'import numpy as np\n'), ((4575, 4595), 'numpy.flatnonzero', 'np.flatnonzero', (['mask'], {}), '(mask)\n', (4589, 4595), True, 'import numpy as np\n'), ((4627, 4666), 'numpy.zeros', 'np.zeros', (['view_clip.shape'], {'dtype': 'np.int'}), '(view_clip.shape, dtype=np.int)\n', (4635, 4666), True, 'import numpy as np\n')] |
# License - for Non-Commercial Research and Educational Use Only
#
# Copyright (c) 2019, Idiap research institute
#
# All rights reserved.
#
# Run, copy, study, change, improve and redistribute source and binary forms, with or without modification, are permitted for non-commercial research and educational use only provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# For permission to use for commercial purposes, please contact Idiap's Technology Transfer Office at <EMAIL>
##
# <NAME>, Idiap
# <EMAIL>
#
# In this file, we have functions to improve the readability of the notebook
# associated with the method.
#
import os
import numpy as np
import tifffile as tiff
from colour_demosaicing import demosaicing_CFA_Bayer_bilinear
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
# This is a handler to QtGui app, in order to select ROI by hand
global app
app = QtGui.QApplication([])
def stretch_contrast(image, min_val=0.0, max_val=1.0):
""" Rescales the greylevels in an image """
curr_min = np.min(image)
image -= curr_min # scale starts at zero
image += min_val # scale starts at min_val
curr_max = np.max(image)
ratio = float(max_val-min_val) / float(curr_max)
image_ret = image * ratio + min_val
return image_ret
def select_roi(image, title):
""" Select a region of interest with PyQtGraph """
global app # created when loading the module
X = image.shape[0]
Y = image.shape[1]
disp_shape = (max(800, X + 100), max(800, Y + 100))
w = pg.GraphicsWindow(size=disp_shape, border=True)
w.setWindowTitle('Select ROI ' + title)
w1 = w.addLayout(row=0, col=0)
v1 = w1.addViewBox(row=0, col=0)
img1 = pg.ImageItem(image)
v1.addItem(img1)
# add roi to image
roi = pg.RectROI([20, 20], [X / 3., Y / 3.], pen=(0, 19))
v1.addItem(roi)
QtGui.QApplication.instance().exec_()
return roi
def apply_matrix(x, A):
""" Function to vectorize computations with numpy, this speeds them up """
return np.dot(A, x)
def tikhonov2(N):
""" Builds a 2nd order Tikhonov regularization matrix"""
vec = np.zeros(N)
vec[0] = 2
vec[1] = -1
vec[-1] = -1
tik = circular_matrix(vec)
return tik
def circular_matrix(h):
""" Builds a circular matrix with input vector h """
N = np.max(h.shape)
A = np.zeros((N, N))
A[np.newaxis, :] = h
A = np.array(map(np.roll, A[:], np.arange(N)))
return A
def write_tiff_stack(ims, direct="result_stack_"):
""" Writes a numpy volume as a stack of tiff files in a folder
Watch out, time is expected to be the first dimension
"""
N = ims.shape[0]
directory_res = direct+"%d"%0
i = 1
while os.path.exists(directory_res):
directory_res = direct+"%d"%i
i += 1
os.makedirs(directory_res)
for i in range(N):
tiff.imsave(directory_res+ "/im_%d.tif"%i, ims[i])
| [
"os.path.exists",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"os.makedirs",
"pyqtgraph.ImageItem",
"numpy.max",
"pyqtgraph.Qt.QtGui.QApplication",
"numpy.dot",
"numpy.zeros",
"numpy.min",
"tifffile.imsave",
"pyqtgraph.GraphicsWindow",
"pyqtgraph.RectROI",
"numpy.arange"
] | [((1974, 1996), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (1992, 1996), False, 'from pyqtgraph.Qt import QtGui\n'), ((2117, 2130), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (2123, 2130), True, 'import numpy as np\n'), ((2240, 2253), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (2246, 2253), True, 'import numpy as np\n'), ((2616, 2663), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {'size': 'disp_shape', 'border': '(True)'}), '(size=disp_shape, border=True)\n', (2633, 2663), True, 'import pyqtgraph as pg\n'), ((2791, 2810), 'pyqtgraph.ImageItem', 'pg.ImageItem', (['image'], {}), '(image)\n', (2803, 2810), True, 'import pyqtgraph as pg\n'), ((2865, 2918), 'pyqtgraph.RectROI', 'pg.RectROI', (['[20, 20]', '[X / 3.0, Y / 3.0]'], {'pen': '(0, 19)'}), '([20, 20], [X / 3.0, Y / 3.0], pen=(0, 19))\n', (2875, 2918), True, 'import pyqtgraph as pg\n'), ((3112, 3124), 'numpy.dot', 'np.dot', (['A', 'x'], {}), '(A, x)\n', (3118, 3124), True, 'import numpy as np\n'), ((3216, 3227), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3224, 3227), True, 'import numpy as np\n'), ((3414, 3429), 'numpy.max', 'np.max', (['h.shape'], {}), '(h.shape)\n', (3420, 3429), True, 'import numpy as np\n'), ((3438, 3454), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (3446, 3454), True, 'import numpy as np\n'), ((3810, 3839), 'os.path.exists', 'os.path.exists', (['directory_res'], {}), '(directory_res)\n', (3824, 3839), False, 'import os\n'), ((3898, 3924), 'os.makedirs', 'os.makedirs', (['directory_res'], {}), '(directory_res)\n', (3909, 3924), False, 'import os\n'), ((3956, 4009), 'tifffile.imsave', 'tiff.imsave', (["(directory_res + '/im_%d.tif' % i)", 'ims[i]'], {}), "(directory_res + '/im_%d.tif' % i, ims[i])\n", (3967, 4009), True, 'import tifffile as tiff\n'), ((2942, 2971), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2969, 2971), False, 'from pyqtgraph.Qt import QtGui\n'), ((3516, 3528), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3525, 3528), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 16:39:49 2019
@Title: FrontierLab exchange program - Metropolis-Hastings source code (for Bayesian Logistic Regression)
@Author: <NAME>
"""
import numpy as np
import copy
import time
from scipy.stats import norm
def expit(z):
return np.exp(z) / (1 + np.exp(z))
class MH:
def __init__(self, X, Y, b_prior_sd):
self.X = X
self.Y = Y
self.all_samples = []
self.b_prior_sd = b_prior_sd
self.success = X[np.where(Y == 1)]
self.failure = X[np.where(Y == 0)]
# burnin time is computer time
# not BPS clock
self.burnin_time = 0
self.burnin_sample = 0
self.beta = np.random.normal(0,1,2)
self.p = 0
def log_post(self,beta):
success_prob = expit(beta[0] + beta[1] * self.success)
failure_prob = expit(beta[0] + beta[1] * self.failure)
log_success = np.sum(np.log(success_prob))
log_failure = np.sum(np.log(1-failure_prob))
log_prior = np.sum(np.log(norm.pdf(beta, 0, self.b_prior_sd)))
return log_success + log_failure + log_prior
def sampler(self, can_sd, burninIters, iterations, store_skip, verbose):
self.all_samples.append(self.beta)
cur_lp = self.log_post(self.beta)
for i in range(1,int(iterations),1):
can_beta = copy.deepcopy(self.beta)
for j in range(2):
can_beta[j] = np.random.normal(self.beta[j], can_sd,1)
can_lp = self.log_post(can_beta)
R = np.exp(can_lp - cur_lp)
U = np.random.uniform(0,1,1)
if U<R:
self.beta = can_beta
cur_lp = can_lp
self.p += 1
if(i % store_skip == 0):
self.all_samples.append(copy.deepcopy(self.beta))
if(i % verbose == 0):
print('Current process: ' + str(i))
if(i == burninIters):
self.burnin_sample = copy.deepcopy(i)
self.burnin_time = time.time()
self.all_samples = np.array(self.all_samples)
| [
"numpy.random.normal",
"numpy.where",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.random.uniform",
"scipy.stats.norm.pdf",
"copy.deepcopy",
"time.time"
] | [((289, 298), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (295, 298), True, 'import numpy as np\n'), ((713, 738), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(2)'], {}), '(0, 1, 2)\n', (729, 738), True, 'import numpy as np\n'), ((2174, 2200), 'numpy.array', 'np.array', (['self.all_samples'], {}), '(self.all_samples)\n', (2182, 2200), True, 'import numpy as np\n'), ((306, 315), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (312, 315), True, 'import numpy as np\n'), ((500, 516), 'numpy.where', 'np.where', (['(Y == 1)'], {}), '(Y == 1)\n', (508, 516), True, 'import numpy as np\n'), ((543, 559), 'numpy.where', 'np.where', (['(Y == 0)'], {}), '(Y == 0)\n', (551, 559), True, 'import numpy as np\n'), ((949, 969), 'numpy.log', 'np.log', (['success_prob'], {}), '(success_prob)\n', (955, 969), True, 'import numpy as np\n'), ((1000, 1024), 'numpy.log', 'np.log', (['(1 - failure_prob)'], {}), '(1 - failure_prob)\n', (1006, 1024), True, 'import numpy as np\n'), ((1401, 1425), 'copy.deepcopy', 'copy.deepcopy', (['self.beta'], {}), '(self.beta)\n', (1414, 1425), False, 'import copy\n'), ((1589, 1612), 'numpy.exp', 'np.exp', (['(can_lp - cur_lp)'], {}), '(can_lp - cur_lp)\n', (1595, 1612), True, 'import numpy as np\n'), ((1629, 1655), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (1646, 1655), True, 'import numpy as np\n'), ((1058, 1092), 'scipy.stats.norm.pdf', 'norm.pdf', (['beta', '(0)', 'self.b_prior_sd'], {}), '(beta, 0, self.b_prior_sd)\n', (1066, 1092), False, 'from scipy.stats import norm\n'), ((1487, 1528), 'numpy.random.normal', 'np.random.normal', (['self.beta[j]', 'can_sd', '(1)'], {}), '(self.beta[j], can_sd, 1)\n', (1503, 1528), True, 'import numpy as np\n'), ((2070, 2086), 'copy.deepcopy', 'copy.deepcopy', (['i'], {}), '(i)\n', (2083, 2086), False, 'import copy\n'), ((2122, 2133), 'time.time', 'time.time', ([], {}), '()\n', (2131, 2133), False, 'import time\n'), ((1870, 1894), 'copy.deepcopy', 'copy.deepcopy', (['self.beta'], {}), '(self.beta)\n', (1883, 1894), False, 'import copy\n')] |
import json
import numpy as np
class Turn:
def __init__(self, turn_id, transcript, turn_label, belief_state, system_acts, system_transcript, asr=None, num=None):
self.id = turn_id
self.transcript = transcript
self.turn_label = turn_label
self.belief_state = belief_state
self.system_acts = system_acts
self.system_transcript = system_transcript
self.asr = asr or []
self.num = num or {}
def to_dict(self):
return {'turn_id': self.id,
'transcript': self.transcript,
'turn_label': self.turn_label,
'belief_state': self.belief_state,
'system_acts': self.system_acts,
'system_transcript': self.system_transcript,
'num': self.num}
@classmethod
def from_dict(cls, d):
return cls(**d)
class Dialogue:
def __init__(self, dialogue_id, turns):
self.id = dialogue_id
self.turns = turns
def __len__(self):
return len(self.turns)
def to_dict(self):
return {'dialogue_id': self.id,
'turns': [t.to_dict() for t in self.turns]}
@classmethod
def from_dict(cls, d):
return cls(d['dialogue_id'], [Turn.from_dict(t) for t in d['turns']])
class Dataset:
def __init__(self, dialogues):
self.dialogues = dialogues
def __len__(self):
return len(self.dialogues)
def iter_turns(self):
for d in self.dialogues:
for t in d.turns:
yield t
def to_dict(self):
return {'dialogues': [d.to_dict() for d in self.dialogues]}
@classmethod
def from_dict(cls, d):
return cls([Dialogue.from_dict(dd) for dd in d['dialogues']])
def evaluate_preds(self, preds):
request = []
inform = []
joint_goal = []
fix = {'centre': 'center', 'areas': 'area', 'phone number': 'number'}
i = 0
for d in self.dialogues:
pred_state = {}
for t in d.turns:
gold_request = set([(s, v) for s, v in t.turn_label if s == 'request'])
gold_inform = set([(s, v) for s, v in t.turn_label if s != 'request'])
pred_request = set([(s, v) for s, v in preds[i] if s == 'request'])
pred_inform = set([(s, v) for s, v in preds[i] if s != 'request'])
request.append(gold_request == pred_request)
inform.append(gold_inform == pred_inform)
gold_recovered = set()
pred_recovered = set()
for s, v in pred_inform:
pred_state[s] = v
for b in t.belief_state:
for s, v in b['slots']:
if b['act'] != 'request':
gold_recovered.add((b['act'], fix.get(s.strip(), s.strip()), fix.get(v.strip(), v.strip())))
for s, v in pred_state.items():
pred_recovered.add(('inform', s, v))
joint_goal.append(gold_recovered == pred_recovered)
i += 1
return {'turn_inform': np.mean(inform), 'turn_request': np.mean(request), 'joint_goal': np.mean(joint_goal)}
class Ontology:
def __init__(self, slots=None, values=None, num=None):
self.slots = slots or []
self.values = values or {}
self.num = num or {}
def to_dict(self):
return {'slots': self.slots, 'values': self.values, 'num': self.num}
@classmethod
def from_dict(cls, d):
return cls(**d)
| [
"numpy.mean"
] | [((3147, 3162), 'numpy.mean', 'np.mean', (['inform'], {}), '(inform)\n', (3154, 3162), True, 'import numpy as np\n'), ((3180, 3196), 'numpy.mean', 'np.mean', (['request'], {}), '(request)\n', (3187, 3196), True, 'import numpy as np\n'), ((3212, 3231), 'numpy.mean', 'np.mean', (['joint_goal'], {}), '(joint_goal)\n', (3219, 3231), True, 'import numpy as np\n')] |
import argparse
import contextlib
import math
import os
import random
import shutil
import uuid
from pathlib import Path
from typing import Tuple
import cv2
import imageio
import joblib
import numpy as np
from matplotlib import pyplot as plt
from numba import njit, prange
from tqdm import tqdm
# Parameters
brightness = 25.0 # over 100 of the entire image values
contrast = 7.0 # over 100 of the entire image values
max_space_gb = 400.0 # Max space to use in your hard drive
scale_factor = 1.0
use_random_sample = True
src_bit = 8
# Joblib and tqdm solution to progressbars
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
def pixel_stat(img, img_mean, img_std, target_mean, target_std):
target_mean_unitary = target_mean / 100.0
target_std_unitary = target_std / 100.0
ret = (img - img_mean) / img_std * target_std_unitary + target_mean_unitary
ret = np.clip(ret, 0, 1)
return ret
@njit(parallel=True)
def mean_std_array(data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
[n, a, b] = data.shape
mean_array = np.zeros((a, b), dtype=np.float32)
std_array = np.zeros((a, b), dtype=np.float32)
# Welford's online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
for i in prange(a):
for j in prange(b):
mean = 0.0
mean_sq = 0.0
for k in range(n):
count = k + 1
new_value = data[k, i, j]
delta = new_value - mean
mean += delta / count
delta2 = new_value - mean
mean_sq += delta * delta2
mean_array[i, j] = mean
std_array[i, j] = math.sqrt(mean_sq / n)
return mean_array, std_array
def memmap_loader(
image_list, memmap_handle, idx, new_width, new_height, src_bit=8
):
np_im = imageio.imread(image_list[idx]).astype(np.float32)
np_im *= 2 ** (-src_bit)
im2 = cv2.resize(np_im, (new_width, new_height), cv2.INTER_CUBIC)
memmap_handle[idx, ...] = im2
def correct_image(
image_raw_mean,
image_raw_std,
brightness,
contrast,
image_name,
output_folder,
src_bit=8,
):
image = imageio.imread(image_name).astype(np.float32)
image *= 2 ** (-src_bit)
(image_height, image_width, image_channels) = image.shape
output_image = np.empty((image_height, image_width, image_channels))
intensities = None
for i in range(image_channels):
if image_channels == 3:
intensities = image[:, :, i]
else:
intensities = image[:, :]
intensities = pixel_stat(
intensities,
image_raw_mean[i],
image_raw_std[i],
brightness,
contrast,
)
if image_channels == 3:
output_image[:, :, i] = intensities
else:
output_image[:, :] = intensities
# apply scaling to 8 bit and format image to unit8
filename = Path(output_folder) / image_name.name
output_image *= 2 ** (src_bit)
output_image = output_image.astype(np.uint8)
imageio.imwrite(filename, output_image)
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path to images.")
parser.add_argument("extension", help="extension of images (e.g. jpg, png)")
parser.add_argument(
"output_folder", help="Output folder to write processed images"
)
args = parser.parse_args()
output_folder = Path(args.output_folder)
if not output_folder.exists():
output_folder.mkdir(parents=True, exist_ok=True)
image_folder = Path(args.path)
image_list = [p for p in image_folder.glob("*." + args.extension)]
print("Found", len(image_list), "images")
tmp_image = imageio.imread(image_list[0])
(orig_image_height, orig_image_width, image_channels) = tmp_image.shape
print(
"Images are",
orig_image_width,
"x",
orig_image_height,
"with",
image_channels,
"channels",
)
image_height = int(scale_factor * float(orig_image_height))
image_width = int(scale_factor * float(orig_image_width))
print("Scaling to", image_width, "x", image_height)
image_raw_mean = np.empty(
(image_channels, orig_image_height, orig_image_width)
)
image_raw_std = np.empty((image_channels, orig_image_height, orig_image_width))
image_raw_mean_scaled = np.empty((image_channels, image_height, image_width))
image_raw_std_scaled = np.empty((image_channels, image_height, image_width))
# Subsammple the list:
total, used, free = shutil.disk_usage("/")
free = free // (2 ** 30)
print("Free disk space: ", free, "Gb")
if free < max_space_gb:
print(
"Free disk space is below",
max_space_gb,
"Gb. you might have problems.",
)
num_images_to_compute_mean = int(
max_space_gb
/ (image_height * image_width * image_channels * 8 / (1024 ** 3))
)
print(
"In a max space of",
max_space_gb,
"Gb we can fit",
num_images_to_compute_mean,
"images.",
)
image_list_sampled = []
if num_images_to_compute_mean >= len(image_list):
image_list_sampled = image_list
else:
if not use_random_sample:
increment = int(len(image_list) / num_images_to_compute_mean) + 1
i = 0
while i < len(image_list):
image_list_sampled.append(image_list[i])
i += increment
else:
image_list_sampled = random.sample(
image_list, num_images_to_compute_mean
)
filename_map = "memmap_" + str(uuid.uuid4()) + ".map"
list_shape = [
len(image_list_sampled),
image_height,
image_width,
image_channels,
]
size = 1
for i in list_shape:
size *= i
print(
"Creating memmap of",
size * 8 / (1024 ** 3),
"Gb on the filesystem. Do not worry, it will be deleted later.",
)
image_memmap = np.memmap(
filename=filename_map, mode="w+", shape=tuple(list_shape), dtype=np.float32
)
with tqdm_joblib(
tqdm(desc="Loading images to memmap", total=len(image_list_sampled))
) as progress_bar:
joblib.Parallel(n_jobs=-2, verbose=0)(
joblib.delayed(memmap_loader)(
image_list_sampled, image_memmap, idx, image_width, image_height
)
for idx in range(len(image_list_sampled))
)
print("Computing global mean and std...")
for i in range(image_channels):
print("Working on channel", i)
image_memmap_per_channel = None
if image_channels == 1:
image_memmap_per_channel = image_memmap
else:
image_memmap_per_channel = image_memmap[:, :, :, i]
raw_image_mean, raw_image_std = mean_std_array(image_memmap_per_channel)
image_raw_mean_scaled[i] = raw_image_mean
image_raw_std_scaled[i] = raw_image_std
image_raw_mean_scaled_t = image_raw_mean_scaled.transpose(1, 2, 0)
image_raw_std_scaled_t = image_raw_std_scaled.transpose(1, 2, 0)
image_raw_mean = cv2.resize(
image_raw_mean_scaled_t,
(orig_image_height, orig_image_width),
cv2.INTER_CUBIC,
).transpose(2, 1, 0)
image_raw_std = cv2.resize(
image_raw_std_scaled_t,
(orig_image_height, orig_image_width),
cv2.INTER_CUBIC,
).transpose(2, 1, 0)
image_raw_mean_fig = image_raw_mean.transpose(1, 2, 0)
image_raw_std_fig = image_raw_std.transpose(1, 2, 0)
fig = plt.figure()
if len(image_raw_mean_fig.shape) == 3:
plt.imshow(image_raw_mean_fig[:, :, i])
else:
plt.imshow(image_raw_mean_fig[:, :])
plt.colorbar()
plt.title("Mean " + str(i))
plt.savefig("image_raw_mean_" + str(i) + ".png", dpi=600)
plt.close(fig)
fig = plt.figure()
if len(image_raw_std_fig.shape) == 3:
plt.imshow(image_raw_std_fig[:, :, i])
else:
plt.imshow(image_raw_std_fig[:, :])
plt.colorbar()
plt.title("Std " + str(i))
plt.savefig("image_raw_std_" + str(i) + ".png", dpi=600)
plt.close(fig)
np.save("image_raw_mean.np", image_raw_mean)
np.save("image_raw_std.np", image_raw_std)
image_memmap._mmap.close()
del image_memmap
os.remove(filename_map)
print("Done computing parameters. Correcting now...")
# image_raw_mean = np.load("image_raw_mean.np.npy")
# image_raw_std = np.load("image_raw_std.np.npy")
with tqdm_joblib(
tqdm(desc="Correcting images", total=len(image_list))
) as progress_bar:
joblib.Parallel(n_jobs=-2, verbose=0)(
joblib.delayed(correct_image)(
image_raw_mean,
image_raw_std,
brightness,
contrast,
image_list[idx],
output_folder,
src_bit,
)
for idx in range(0, len(image_list))
)
| [
"numpy.clip",
"math.sqrt",
"numba.prange",
"numpy.save",
"os.remove",
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"pathlib.Path",
"matplotlib.pyplot.close",
"shutil.disk_usage",
"numpy.empty",
"random.sample",
"numba.njit",
"uuid.uuid4",
"imageio.imread",
"cv2.resize",
"im... | [((1622, 1641), 'numba.njit', 'njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (1626, 1641), False, 'from numba import njit, prange\n'), ((3842, 3867), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3865, 3867), False, 'import argparse\n'), ((4132, 4156), 'pathlib.Path', 'Path', (['args.output_folder'], {}), '(args.output_folder)\n', (4136, 4156), False, 'from pathlib import Path\n'), ((4257, 4272), 'pathlib.Path', 'Path', (['args.path'], {}), '(args.path)\n', (4261, 4272), False, 'from pathlib import Path\n'), ((4395, 4424), 'imageio.imread', 'imageio.imread', (['image_list[0]'], {}), '(image_list[0])\n', (4409, 4424), False, 'import imageio\n'), ((4816, 4879), 'numpy.empty', 'np.empty', (['(image_channels, orig_image_height, orig_image_width)'], {}), '((image_channels, orig_image_height, orig_image_width))\n', (4824, 4879), True, 'import numpy as np\n'), ((4902, 4965), 'numpy.empty', 'np.empty', (['(image_channels, orig_image_height, orig_image_width)'], {}), '((image_channels, orig_image_height, orig_image_width))\n', (4910, 4965), True, 'import numpy as np\n'), ((4991, 5044), 'numpy.empty', 'np.empty', (['(image_channels, image_height, image_width)'], {}), '((image_channels, image_height, image_width))\n', (4999, 5044), True, 'import numpy as np\n'), ((5068, 5121), 'numpy.empty', 'np.empty', (['(image_channels, image_height, image_width)'], {}), '((image_channels, image_height, image_width))\n', (5076, 5121), True, 'import numpy as np\n'), ((5166, 5188), 'shutil.disk_usage', 'shutil.disk_usage', (['"""/"""'], {}), "('/')\n", (5183, 5188), False, 'import shutil\n'), ((8523, 8567), 'numpy.save', 'np.save', (['"""image_raw_mean.np"""', 'image_raw_mean'], {}), "('image_raw_mean.np', image_raw_mean)\n", (8530, 8567), True, 'import numpy as np\n'), ((8568, 8610), 'numpy.save', 'np.save', (['"""image_raw_std.np"""', 'image_raw_std'], {}), "('image_raw_std.np', image_raw_std)\n", (8575, 8610), True, 'import numpy as np\n'), ((8656, 8679), 'os.remove', 'os.remove', (['filename_map'], {}), '(filename_map)\n', (8665, 8679), False, 'import os\n'), ((1585, 1603), 'numpy.clip', 'np.clip', (['ret', '(0)', '(1)'], {}), '(ret, 0, 1)\n', (1592, 1603), True, 'import numpy as np\n'), ((1758, 1792), 'numpy.zeros', 'np.zeros', (['(a, b)'], {'dtype': 'np.float32'}), '((a, b), dtype=np.float32)\n', (1766, 1792), True, 'import numpy as np\n'), ((1809, 1843), 'numpy.zeros', 'np.zeros', (['(a, b)'], {'dtype': 'np.float32'}), '((a, b), dtype=np.float32)\n', (1817, 1843), True, 'import numpy as np\n'), ((1963, 1972), 'numba.prange', 'prange', (['a'], {}), '(a)\n', (1969, 1972), False, 'from numba import njit, prange\n'), ((2634, 2693), 'cv2.resize', 'cv2.resize', (['np_im', '(new_width, new_height)', 'cv2.INTER_CUBIC'], {}), '(np_im, (new_width, new_height), cv2.INTER_CUBIC)\n', (2644, 2693), False, 'import cv2\n'), ((3040, 3093), 'numpy.empty', 'np.empty', (['(image_height, image_width, image_channels)'], {}), '((image_height, image_width, image_channels))\n', (3048, 3093), True, 'import numpy as np\n'), ((3791, 3830), 'imageio.imwrite', 'imageio.imwrite', (['filename', 'output_image'], {}), '(filename, output_image)\n', (3806, 3830), False, 'import imageio\n'), ((7934, 7946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7944, 7946), True, 'from matplotlib import pyplot as plt\n'), ((8097, 8111), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8109, 8111), True, 'from matplotlib import pyplot as plt\n'), ((8210, 8224), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8219, 8224), True, 'from matplotlib import pyplot as plt\n'), ((8236, 8248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8246, 8248), True, 'from matplotlib import pyplot as plt\n'), ((8396, 8410), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8408, 8410), True, 'from matplotlib import pyplot as plt\n'), ((8507, 8521), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8516, 8521), True, 'from matplotlib import pyplot as plt\n'), ((1991, 2000), 'numba.prange', 'prange', (['b'], {}), '(b)\n', (1997, 2000), False, 'from numba import njit, prange\n'), ((3665, 3684), 'pathlib.Path', 'Path', (['output_folder'], {}), '(output_folder)\n', (3669, 3684), False, 'from pathlib import Path\n'), ((6028, 6081), 'random.sample', 'random.sample', (['image_list', 'num_images_to_compute_mean'], {}), '(image_list, num_images_to_compute_mean)\n', (6041, 6081), False, 'import random\n'), ((6660, 6697), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-2)', 'verbose': '(0)'}), '(n_jobs=-2, verbose=0)\n', (6675, 6697), False, 'import joblib\n'), ((7998, 8037), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_raw_mean_fig[:, :, i]'], {}), '(image_raw_mean_fig[:, :, i])\n', (8008, 8037), True, 'from matplotlib import pyplot as plt\n'), ((8056, 8092), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_raw_mean_fig[:, :]'], {}), '(image_raw_mean_fig[:, :])\n', (8066, 8092), True, 'from matplotlib import pyplot as plt\n'), ((8299, 8337), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_raw_std_fig[:, :, i]'], {}), '(image_raw_std_fig[:, :, i])\n', (8309, 8337), True, 'from matplotlib import pyplot as plt\n'), ((8356, 8391), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_raw_std_fig[:, :]'], {}), '(image_raw_std_fig[:, :])\n', (8366, 8391), True, 'from matplotlib import pyplot as plt\n'), ((8938, 8975), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(-2)', 'verbose': '(0)'}), '(n_jobs=-2, verbose=0)\n', (8953, 8975), False, 'import joblib\n'), ((2383, 2405), 'math.sqrt', 'math.sqrt', (['(mean_sq / n)'], {}), '(mean_sq / n)\n', (2392, 2405), False, 'import math\n'), ((2544, 2575), 'imageio.imread', 'imageio.imread', (['image_list[idx]'], {}), '(image_list[idx])\n', (2558, 2575), False, 'import imageio\n'), ((2883, 2909), 'imageio.imread', 'imageio.imread', (['image_name'], {}), '(image_name)\n', (2897, 2909), False, 'import imageio\n'), ((6136, 6148), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6146, 6148), False, 'import uuid\n'), ((7503, 7598), 'cv2.resize', 'cv2.resize', (['image_raw_mean_scaled_t', '(orig_image_height, orig_image_width)', 'cv2.INTER_CUBIC'], {}), '(image_raw_mean_scaled_t, (orig_image_height, orig_image_width),\n cv2.INTER_CUBIC)\n', (7513, 7598), False, 'import cv2\n'), ((7665, 7759), 'cv2.resize', 'cv2.resize', (['image_raw_std_scaled_t', '(orig_image_height, orig_image_width)', 'cv2.INTER_CUBIC'], {}), '(image_raw_std_scaled_t, (orig_image_height, orig_image_width),\n cv2.INTER_CUBIC)\n', (7675, 7759), False, 'import cv2\n'), ((6707, 6736), 'joblib.delayed', 'joblib.delayed', (['memmap_loader'], {}), '(memmap_loader)\n', (6721, 6736), False, 'import joblib\n'), ((8985, 9014), 'joblib.delayed', 'joblib.delayed', (['correct_image'], {}), '(correct_image)\n', (8999, 9014), False, 'import joblib\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[8]:
dataset = pd.read_csv("/home/karan/dataset.csv",encoding="ISO-8859-1")
# In[20]:
import nltk
import string
import re
nltk.download('stopwords')
# In[22]:
#REMOVING STOPWORDS AND CONVERTING IN LOWERCASE
def remove_stopwords(row):
article = row['Summary']
article = article.lower()
stopwords = set(nltk.corpus.stopwords.words('english') + ['reuter', '\x03'])
articles = [word for word in article.split() if word not in stopwords]
return " ".join(articles)
# In[23]:
#REMOVE PUNCUATIONS
def remove_puc(row):
article = row['Summary_custom']
#REPLACE PUNCTUATIONS WITH SPACE
articles = re.sub(r"[,.;@#?!&$-:/]+", ' ', article)
#REPLACE NUMBERS WITH SPACE
articles = re.sub(r'\d+', ' ', articles)
return articles
# In[24]:
#STEMMING
stemmer = nltk.stem.PorterStemmer()
def stemming(row):
article = row['Summary_custom']
article = article.split()
articles = " ".join([stemmer.stem(word) for word in article])
return articles
# In[25]:
dataset['Summary_custom'] = dataset.apply(remove_stopwords,axis=1)
# dataset['Summary_custom'] = dataset.apply(stemming,axis=1)
dataset['Summary_custom'] = dataset.apply(remove_puc,axis=1)
# In[28]:
#TRY FITTING MODEL WITH TFIDF FIRST, THEN GO ON WITH MORE COMPLEX ALGOS
#IF WE DONT PROVIDE VOCAB, IT WILL MAKE FROM EXISTING COLUMN.
import sklearn
counter = sklearn.feature_extraction.text.CountVectorizer()
bag_of_words = counter.fit_transform(dataset['Summary_custom'])
tf_counter = sklearn.feature_extraction.text.TfidfVectorizer()
tfidf = tf_counter.fit_transform(dataset['Summary_custom'])
# In[30]:
vocab = counter.get_feature_names()
# In[33]:
import numpy as np
from gensim.models import Word2Vec
model = Word2Vec([vocab], min_count=1)
matrix = []
for word in vocab:
matrix.append(model[word])
wordvec_matrix = np.array(matrix)
# In[34]:
wordvec_matrix.shape
# In[35]:
docvec_mat = tfidf.dot(wordvec_matrix)
# In[36]:
docvec_mat.shape
# In[ ]:
| [
"nltk.corpus.stopwords.words",
"pandas.read_csv",
"nltk.download",
"sklearn.feature_extraction.text.CountVectorizer",
"nltk.stem.PorterStemmer",
"gensim.models.Word2Vec",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.array",
"re.sub"
] | [((93, 154), 'pandas.read_csv', 'pd.read_csv', (['"""/home/karan/dataset.csv"""'], {'encoding': '"""ISO-8859-1"""'}), "('/home/karan/dataset.csv', encoding='ISO-8859-1')\n", (104, 154), True, 'import pandas as pd\n'), ((205, 231), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (218, 231), False, 'import nltk\n'), ((881, 906), 'nltk.stem.PorterStemmer', 'nltk.stem.PorterStemmer', ([], {}), '()\n', (904, 906), False, 'import nltk\n'), ((1454, 1503), 'sklearn.feature_extraction.text.CountVectorizer', 'sklearn.feature_extraction.text.CountVectorizer', ([], {}), '()\n', (1501, 1503), False, 'import sklearn\n'), ((1581, 1630), 'sklearn.feature_extraction.text.TfidfVectorizer', 'sklearn.feature_extraction.text.TfidfVectorizer', ([], {}), '()\n', (1628, 1630), False, 'import sklearn\n'), ((1817, 1847), 'gensim.models.Word2Vec', 'Word2Vec', (['[vocab]'], {'min_count': '(1)'}), '([vocab], min_count=1)\n', (1825, 1847), False, 'from gensim.models import Word2Vec\n'), ((1927, 1943), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (1935, 1943), True, 'import numpy as np\n'), ((709, 748), 're.sub', 're.sub', (['"""[,.;@#?!&$-:/]+"""', '""" """', 'article'], {}), "('[,.;@#?!&$-:/]+', ' ', article)\n", (715, 748), False, 'import re\n'), ((797, 826), 're.sub', 're.sub', (['"""\\\\d+"""', '""" """', 'articles'], {}), "('\\\\d+', ' ', articles)\n", (803, 826), False, 'import re\n'), ((400, 438), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (427, 438), False, 'import nltk\n')] |
#!/usr/bin/env python
from __future__ import division
from numpy import mean, shape, argsort, sort, sum as nsum, delete
from scipy.stats import ttest_1samp
from time import strftime, strptime, struct_time
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def calculate_abundance(sample, taxa, sum_min=0.95):
"""Ranks taxa in a sample in order of abundance
INPUTS:
sample -- a one dimensional numpy array or list containing taxonomic
frequencies in a single sample
population -- a numpy array containing containing taxonomic frequency
values. Samples are columns, taxa are rows.
taxa -- a one dimensional numpy array or list of greengenes ids
associated the sample
sum_min -- a value between 0 and 1 indicating the minimum
fraction of a sample to be represented by the most abundant
OTUs.
OUTPUTS:
abundant -- a list of lists of greenegenes taxonomy strings and the
frequencies representing the most abundant taxa in the
sample."""
if len(sample) != len(taxa):
raise ValueError('The number of enteries in samples (%i) and taxa (%i)'
' must be equal.' % (len(sample), len(taxa)))
# Sorts the sample by abundance
abundance_data = reversed(sort(sample))
abundance_rank = reversed(argsort(sample))
# List comprehension; faster. Also possible in dictionaries?
abundance_taxa = [taxa[rank] for rank in abundance_rank]
# Identifies the taxonomy up to the abundance threshold
abundance_watch = 0
abundant = []
for idx, frequency in enumerate(abundance_data):
abundance_watch = abundance_watch + frequency
abundant.append([abundance_taxa[idx], round(frequency, 6)])
if abundance_watch > sum_min:
break
return abundant
def calculate_tax_rank_1(sample, population, taxa, critical_value=0.05):
"""Preforms a case 1 t-test on common samples
INPUTS:
sample -- a one dimensional numpy array containing the taxonomic
frequency values for a single sample
population -- a numpy array containing containing taxonomic frequency
values. Samples are columns, taxa are rows.
taxa -- an array of greengenes ids associated the sample and
population frequencies
critical_value -- the alpha for use in the t-test
OUTPUTS:
high -- a list of lists with greengenes strings, sample frequency,
average population frequency, the ratio of values, and the
p-value
low -- a list of lists with greengenes strings, sample frequency,
average population frequency, the ratio of values, and the
p-value"""
# Rare taxa are defined as appearing in less than 10% of the samples
(num_taxa, num_samples) = shape(population)
if num_taxa != len(taxa):
raise ValueError('The number of entries in samples and taxa must'
' be equal.')
# Identifies taxa that are significantly enriched or depleted in the
# population
high = []
low = []
# Identifies taxa which are not populated
population_count = nsum(population > 0, axis=1)
pop_watch = [(idx, count) for (idx, count) in enumerate(population_count)]
pop_watch = reversed(pop_watch)
for (idx, count) in pop_watch:
# Removes any line which is equal to zero
if count == 0:
population = delete(population, idx, 0)
sample = delete(sample, idx)
taxa = delete(taxa, idx)
# Determines the ratio
population_mean = mean(population, 1)
ratio = sample/population_mean
# preforms a case 1 t-test comparing the sample and population
t_stat = []
p_stat = []
# Could potentially use qiime functions
(t_stat, p_stat) = ttest_1samp(population, sample, 1)
# Preforms a bonferroni correction on the p values
p_stat = p_stat*num_taxa
# Determines list position based on the smallest p values.
p_order = argsort(p_stat)
# Goes through the p values and determines if they are enriched or depleted
for index in p_order:
if p_stat[index] >= critical_value:
continue
list_value = [taxa[index],
round(sample[index], 6),
round(population_mean[index], 6),
round(ratio[index], 0),
p_stat[index]]
if ratio[index] > 1:
high.append(list_value)
else:
low.append(list_value)
return high, low
def convert_taxa(rough_taxa, formatting_keys='%1.2f', hundredx=False):
"""Formats lists of numbers for table generation
INPUTS:
rough_taxa -- a list of lists with a descriptor string followed by
a list of corresponding values
formatting_keys -- a string describing the way the value should be
formatting using string formats. For example, %1.2f, %2d,
%i. A value of 'SKIP' will ignore that value and remove it
from the output list.
OUTPUTS:
formatted_taxa -- a list of string with formatting for the final table.
"""
# Checks the rough_taxa argument is sane
if not isinstance(rough_taxa, list):
raise TypeError('rough_taxa must have be a list of at least one '
'lists.\nrough_taxa is a %s.' % rough_taxa.__class__)
elif len(rough_taxa) == 0:
raise ValueError('rough taxa must have be a list of at least one '
'lists.\nrough_taxa does not have any elements.')
elif not isinstance(rough_taxa[0], list):
raise TypeError('rough taxa must have be a list of at least one '
'lists.\nThe first element in rough taxa is a %s.'
% rough_taxa[0].__class__)
num_ent = len(rough_taxa[0])
for entry in rough_taxa:
if not isinstance(entry, list):
raise TypeError('rough_taxa must be a list of lists')
if not len(entry) == num_ent:
raise ValueError('list size is inconsistant')
num_rough = num_ent-1
if isinstance(formatting_keys, list):
num_keys = len(formatting_keys)
else:
num_keys = 1
if isinstance(hundredx, list):
num_hund = len(hundredx)
else:
num_hund = 1
if not isinstance(formatting_keys, (list, str)):
raise TypeError('formatting_keys must be a list or string.')
if not num_rough == num_keys and isinstance(formatting_keys, list):
raise ValueError('The number of elements in rough_taxa (%i) and the '
'number of elements in formatting_keys (%i) must be '
'equal.' % (num_rough, num_keys))
elif not isinstance(hundredx, (list, bool)):
raise TypeError('hundredx must be a list or bool.')
elif not num_rough == num_hund and isinstance(hundredx, list):
raise ValueError('The number of elements in rough_taxa(%i) and the '
'number of elements in hundredx(%i) must be equal.'
% (num_rough, num_hund))
# Converts formatting keys and hundredx to lists
if isinstance(formatting_keys, str):
formatting_keys = [formatting_keys]*num_rough
if isinstance(hundredx, bool):
hundredx = [hundredx]*num_rough
# Creates formatted list
formatted_taxa = []
for element in rough_taxa:
taxon = element[0]
element.pop(0)
new_element = [taxon]
for idx, item in enumerate(element):
if formatting_keys[idx] == 'SKIP':
continue
if hundredx[idx]:
item = item * 100
new_element.append(formatting_keys[idx] % item)
formatted_taxa.append(new_element)
return formatted_taxa
def convert_taxa_to_list(raw_taxa, tax_format, render_mode, comma=False,
color='red'):
"""Converts a list of greengenes strings to a latex list
INPUTS:
raw_taxa -- a python list object containing taxonomy strings from
greengenes to be included in the final, formated output.
tax_format -- a list specifiying if an argument should be bolded
(denoted by "BOLD"), rendered in color ('COLOR'),
or left alone ('REG').
render_mode -- a python string describing the way the out should be
formatted. Options are LATEX, corresponding to LaTex code,
or RAW. LATEX will give a string which encodes a table.
RAW will give a text file suitable for viewing, although
RAW formats do not include italics.
comma -- a binary value indicating whether the list should be single
line comma separated list (TRUE), or a list format with
each item on its own line (FALSE).
color -- a string identifying the shade latex should use for the text.
DEFAULT: 'red'
color -- a string identifying the shade latex should use for the text.
DEFAULT: 'red'
OUTPUT:
format_list -- a python string formatted to give a list of taxa
according to the supplied formatting mode."""
# Sets up precurser text
if render_mode == "LATEX":
prelist = '\\begin{itemize}'
antelist = '\n\\end{itemize}'
preitem = '\n\\item '
anteitem = ''
else:
prelist = ''
antelist = ''
preitem = '\n o '
anteitem = ''
# Creates the list
format_list = []
if comma:
for idx, taxon in enumerate(raw_taxa):
format_list.append(
clean_greengenes_string(taxon,
render_mode=render_mode,
format=tax_format[idx].upper(),
unclassified=True,
color=color))
format_list = ', '.join(format_list)
else:
format_list.append(prelist)
for idx, taxon in enumerate(raw_taxa):
cleaned = clean_greengenes_string(taxon, render_mode,
format=tax_format[idx].upper(),
unclassified=True,
color=color)
format_list.append('%s%s%s' % (preitem, cleaned, anteitem))
format_list.append(antelist)
format_list = ''.join(format_list)
return format_list
def clean_greengenes_string(greengenes_string, render_mode, format=None,
unclassified=False, color='red'):
"""Distills a greengenes string to its highest taxonomic resolution
INPUTS:
greengenes_string -- a greengenes string describing taxonomy
render_mode -- a string ("LATEX", "HTML" or "RAW") which describes
the way the table will be formatted. LATEX or HTML gives a
string containing formatting code.
format -- a string with a formatting keys to be used with the data.
'BOLD' indicates that bold text should be used. 'COLOR'
indicates the entry should be colored using the value given
by color. A value of None indicates no special formatting.
DEFUALT:None
unclassified -- a binary value indicating whether or not the designator
'Unclassified' should be added to entries above the family
level
DEFUALT: False
color -- a string describing the latex color to use to render colored
text. Valid colors can be found at
http://en.wikibooks.org/wiki/LaTeX/Colors#Predefined_colors
OUTPUTS:
cleaned_taxon -- a formatted string describing taxonomic information"""
# Preallocates level descriptors
TAX_DES = ['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus',
'Species']
# Sets up text formatting strings
if render_mode == "LATEX":
italic_before = '\\textit{'
italic_after = '}'
bold_before = '\\textbf{'
bold_after = '}'
color_before = '\\textcolor{%s}{' % color
color_after = '}'
else:
italic_before = ''
italic_after = ''
bold_before = '*'
bold_after = '*'
color_before = '*'
color_after = '*'
if unclassified:
classified = 'Unclassified '
else:
classified = ''
greengenes_string = greengenes_string.replace(' ', '')
# Splits the taxonomy at the ; and removes the designation header.
split_tax = [field.strip().split('__', 1)[1] for field in
greengenes_string.split(';')]
# Identifies the highest level of resolution at which taxonomy is defined
for id_, level in enumerate(split_tax):
if level != '':
no_levels = id_
# Sets up taxonomy string
if no_levels < 5:
cleaned_taxon = '%s%s %s' % (classified, TAX_DES[no_levels],
split_tax[no_levels])
elif no_levels == 5:
cleaned_taxon = '%s %s%s%s' % (TAX_DES[no_levels], italic_before,
split_tax[no_levels], italic_after)
elif no_levels == 6:
cleaned_taxon = '%s%s %s%s' % (italic_before, split_tax[no_levels-1],
split_tax[no_levels], italic_after)
else:
cleaned_taxon = '%sKingdom %s' % (classified, split_tax)
if '[' in cleaned_taxon:
cleaned_taxon = 'cont. %s' % cleaned_taxon.replace('[', ''
).replace(']', '')
cleaned_taxon = cleaned_taxon.replace('_', '-')
# Bolds taxon if necessary
if format == 'BOLD':
cleaned_taxon = ''.join([bold_before, cleaned_taxon, bold_after])
elif format == 'COLOR':
cleaned_taxon = ''.join([color_before, cleaned_taxon, color_after])
return cleaned_taxon
def build_latex_macro(data, categories, format=None):
"""Generates a LaTeX macro for use in a template
INPUTS:
data -- a list or list of lists where the inner list contains the same
set of information, corresponding to categories and format.
categories -- a list of strings describing the data to be used in the
macro (i.e. Name, Sample, etc.)
format -- a list of anonymous or function names to be used to format
the data. The final data must be converted to a string. If a
format of None is provided, all entries will be converted to
strings.
DEFAULT: None
OUTPUTS:
A LaTeX macro with the definitions provided in categories
"""
# Preallocates an indexing variable
ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z']
# Preforms a sanity check on the data
if isinstance(data[0], list):
num_entries = len(data[0])
mode = 'multi'
else:
num_entries = len(data)
mode = 'single'
num_cats = len(categories)
num_forms = len(format)
if not num_entries == num_cats:
raise ValueError('There must be a category for every entry.')
elif not num_entries == num_forms and format is not None:
raise ValueError('There must be a format for every entry.')
# Sets up formatting if necessary
if format is None:
format = [lambda x: '%r' % x]*num_entries
macro = []
# Combines the data into a mapping index
for idx, entry in enumerate(data):
if mode == 'single':
fun = format[idx]
macro.append('\\def\\%s{%s}' % (categories[idx], fun(entry)))
if mode == 'multi' and len(entry[0]) == 0:
for cat in categories:
macro.append('\\def\\%s%s{}' % (cat, ALPHABET[idx]))
macro.append('')
elif mode == 'multi':
for id_, cat in enumerate(categories):
fun = format[id_]
element = entry[id_]
macro.append('\\def\\%s%s{%s}' % (cat, ALPHABET[idx],
fun(element)))
macro.append('')
# Inserts line breaks
macro = '\n'.join(macro)
return macro
def format_date(mapping, date_field=None, d_form_in=None, time_field=None,
t_form_in=None, format_out='%b %d, %Y'):
"""Formats the date information from a mapping dictionary
INPUTS:
mapping -- a 2D dictionary where a sample ID is keyed to an mappingdata
dictionary giving values associated with each sample
date_filed -- the name of the category holding date information
format_in -- a string describing how the temporal information is
encoded. See the time module for string formatting
(http://docs.python.org/2/library/time.html#time.struct_time)
format_out -- the way the output string should be formatted. Use the
format keys from the time module.
OUTPUT:
date -- a string giving the date information
"""
# Performs a sanity check on the data
if date_field is None and time_field is None:
raise ValueError('A date or time field must be supplied. '
'Neither is available.')
if date_field is not None and date_field not in mapping:
raise ValueError('The date_field must be in the mapping data.')
if d_form_in is None and date_field is not None:
raise ValueError('A date format must be supplied with a date field.')
if time_field is not None and time_field not in mapping:
raise ValueError('The time_field must be in the mapping data.')
if t_form_in is None and time_field is not None:
raise ValueError('A time format must be supplied with a time field.')
# Gets the date information
if date_field is not None:
date = strptime(mapping[date_field], d_form_in)
if time_field is not None:
time = strptime(mapping[time_field], t_form_in)
# Gets the date and time into a single structure
if date_field is not None and time_field is not None:
tmp = struct_time((date.tm_year, date.tm_mon, date.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec,
date.tm_wday, date.tm_yday, date.tm_isdst))
elif date_field is None:
tmp = time
elif time_field is None:
tmp = date
# Formats the output
date = strftime(format_out, tmp)
return date
| [
"numpy.mean",
"time.strptime",
"numpy.delete",
"numpy.sort",
"time.strftime",
"numpy.argsort",
"numpy.sum",
"scipy.stats.ttest_1samp",
"numpy.shape",
"time.struct_time"
] | [((3147, 3164), 'numpy.shape', 'shape', (['population'], {}), '(population)\n', (3152, 3164), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((3497, 3525), 'numpy.sum', 'nsum', (['(population > 0)'], {'axis': '(1)'}), '(population > 0, axis=1)\n', (3501, 3525), True, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((3931, 3950), 'numpy.mean', 'mean', (['population', '(1)'], {}), '(population, 1)\n', (3935, 3950), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((4153, 4187), 'scipy.stats.ttest_1samp', 'ttest_1samp', (['population', 'sample', '(1)'], {}), '(population, sample, 1)\n', (4164, 4187), False, 'from scipy.stats import ttest_1samp\n'), ((4351, 4366), 'numpy.argsort', 'argsort', (['p_stat'], {}), '(p_stat)\n', (4358, 4366), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((19203, 19228), 'time.strftime', 'strftime', (['format_out', 'tmp'], {}), '(format_out, tmp)\n', (19211, 19228), False, 'from time import strftime, strptime, struct_time\n'), ((1536, 1548), 'numpy.sort', 'sort', (['sample'], {}), '(sample)\n', (1540, 1548), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((1580, 1595), 'numpy.argsort', 'argsort', (['sample'], {}), '(sample)\n', (1587, 1595), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((18624, 18664), 'time.strptime', 'strptime', (['mapping[date_field]', 'd_form_in'], {}), '(mapping[date_field], d_form_in)\n', (18632, 18664), False, 'from time import strftime, strptime, struct_time\n'), ((18711, 18751), 'time.strptime', 'strptime', (['mapping[time_field]', 't_form_in'], {}), '(mapping[time_field], t_form_in)\n', (18719, 18751), False, 'from time import strftime, strptime, struct_time\n'), ((18878, 19020), 'time.struct_time', 'struct_time', (['(date.tm_year, date.tm_mon, date.tm_mday, time.tm_hour, time.tm_min, time.\n tm_sec, date.tm_wday, date.tm_yday, date.tm_isdst)'], {}), '((date.tm_year, date.tm_mon, date.tm_mday, time.tm_hour, time.\n tm_min, time.tm_sec, date.tm_wday, date.tm_yday, date.tm_isdst))\n', (18889, 19020), False, 'from time import strftime, strptime, struct_time\n'), ((3776, 3802), 'numpy.delete', 'delete', (['population', 'idx', '(0)'], {}), '(population, idx, 0)\n', (3782, 3802), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((3824, 3843), 'numpy.delete', 'delete', (['sample', 'idx'], {}), '(sample, idx)\n', (3830, 3843), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n'), ((3863, 3880), 'numpy.delete', 'delete', (['taxa', 'idx'], {}), '(taxa, idx)\n', (3869, 3880), False, 'from numpy import mean, shape, argsort, sort, sum as nsum, delete\n')] |
import math
import numpy as np
def absolute(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,abs(column[i]))
i+=1
return result
def cbrt(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,round(column[i]**(1/3.),2))
i+=1
return result
def ceil(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.ceil(column[i]))
i+=1
return result
def ceiling(column):
return ceil(column)
def degrees(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.degrees(column[i]))
i+=1
return result
def div(column1,column2):
return div_columns(column1,column2)
def exp(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.exp(column[i]))
i+=1
return result
def factorial(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.factorial(column[i]))
i+=1
return result
def floor(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.floor(column[i]))
i+=1
return result
def gcd(column1, column2):
i = 0
column= convert_num_col(column)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,math.gcd(column1[i],column2[i]))
i+=1
return result
def lcm(column1, column2):
i = 0
column= convert_num_col(column)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,abs(column1[i]*column2[i]) // math.gcd(column1[i],column2[i]))
i+=1
return result
def ln(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.log(column[i]))
i+=1
return result
def log(column):
return log10(column)
def log10(column):
return log(column,10)
def log(column,base):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.log(column[i],base))
i+=1
return result
def mod(column1, column2):
return mod_columns(column1,column2)
def pi():
return math.pi
def pow(column1, column2):
i = 0
column= convert_num_col(column)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,math.pow(column1[i],column2[i]))
i+=1
return result
def radians(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.radians(column[i]))
i+=1
return result
def random():
return random()
def sign(column):
return np.sign(column)
def round(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,round(column[i]))
i+=1
return result
def sqrt(column):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,math.sqrt(column[i]))
i+=1
return result
def with_bucket(expresion, rango_izq, rango_der, number_buckets):
if(rango_izq!=rango_der):
if(rango_izq==expresion):
return 0
if(rango_der==expresion):
return number_buckets+1
incremento = (rango_der - rango_izq)/number_buckets
valor = ((expresion-rango_izq)/incremento)+1
if(valor>number_buckets ):
return number_buckets+1
elif(valor<0):
return 0
else:
return int(valor)
def truncate_col(column,decimals=0):
i = 0
column= convert_num_col(column)
result = list()
while i < len(column):
result.insert(i+1,truncate(column[i],decimals))
i+=1
return result
def truncate(number, decimals=0):
if decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
def sum_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]+column2[i])
i+=1
return result
def rest_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]-column2[i])
i+=1
return result
def mult_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]*column2[i])
i+=1
return result
def div_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]/column2[i])
i+=1
return result
def mod_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]%column2[i])
i+=1
return result
def convert_num_col(num):
if not isinstance(num,int):
return num
if isinstance(num,int):
result = [num]
return result
def exp_columns(column1, column2):
i = 0
column1= convert_num_col(column1)
column2= convert_num_col(column2)
result = list()
if (len(column1)==len(column2)):
while i < len(column1):
result.insert(i+1,column1[i]**column2[i])
i+=1
return result | [
"math.ceil",
"math.floor",
"math.factorial",
"math.gcd",
"math.pow",
"math.degrees",
"math.sqrt",
"math.log",
"math.radians",
"math.trunc",
"numpy.sign",
"math.exp"
] | [((3256, 3271), 'numpy.sign', 'np.sign', (['column'], {}), '(column)\n', (3263, 3271), True, 'import numpy as np\n'), ((4454, 4472), 'math.trunc', 'math.trunc', (['number'], {}), '(number)\n', (4464, 4472), False, 'import math\n'), ((4515, 4542), 'math.trunc', 'math.trunc', (['(number * factor)'], {}), '(number * factor)\n', (4525, 4542), False, 'import math\n'), ((573, 593), 'math.ceil', 'math.ceil', (['column[i]'], {}), '(column[i])\n', (582, 593), False, 'import math\n'), ((819, 842), 'math.degrees', 'math.degrees', (['column[i]'], {}), '(column[i])\n', (831, 842), False, 'import math\n'), ((1084, 1103), 'math.exp', 'math.exp', (['column[i]'], {}), '(column[i])\n', (1092, 1103), False, 'import math\n'), ((1285, 1310), 'math.factorial', 'math.factorial', (['column[i]'], {}), '(column[i])\n', (1299, 1310), False, 'import math\n'), ((1488, 1509), 'math.floor', 'math.floor', (['column[i]'], {}), '(column[i])\n', (1498, 1509), False, 'import math\n'), ((2264, 2283), 'math.log', 'math.log', (['column[i]'], {}), '(column[i])\n', (2272, 2283), False, 'import math\n'), ((2561, 2586), 'math.log', 'math.log', (['column[i]', 'base'], {}), '(column[i], base)\n', (2569, 2586), False, 'import math\n'), ((3130, 3153), 'math.radians', 'math.radians', (['column[i]'], {}), '(column[i])\n', (3142, 3153), False, 'import math\n'), ((3605, 3625), 'math.sqrt', 'math.sqrt', (['column[i]'], {}), '(column[i])\n', (3614, 3625), False, 'import math\n'), ((1745, 1777), 'math.gcd', 'math.gcd', (['column1[i]', 'column2[i]'], {}), '(column1[i], column2[i])\n', (1753, 1777), False, 'import math\n'), ((2915, 2947), 'math.pow', 'math.pow', (['column1[i]', 'column2[i]'], {}), '(column1[i], column2[i])\n', (2923, 2947), False, 'import math\n'), ((2050, 2082), 'math.gcd', 'math.gcd', (['column1[i]', 'column2[i]'], {}), '(column1[i], column2[i])\n', (2058, 2082), False, 'import math\n')] |
"""Misc functions."""
# Completely based on ClearGrasp utils:
# https://github.com/Shreeyak/cleargrasp/
import cv2
import numpy as np
def _normalize_depth_img(depth_img, dtype=np.uint8, min_depth=0.0,
max_depth=1.0):
"""Convert a floating point depth image to uint8 or uint16 image.
The depth image is first scaled to (0.0, max_depth) and then scaled and
converted to given datatype.
Args:
depth_img (numpy.float32): Depth image, value is depth in meters
dtype (numpy.dtype, optional): Defaults to np.uint16. Output data type.
Must be np.uint8 or np.uint16
max_depth (float, optional): The max depth to be considered in the
input depth image. The min depth is considered to be 0.0.
Raises:
ValueError: If wrong dtype is given
Returns:
numpy.ndarray: Depth image scaled to given dtype
"""
if dtype != np.uint16 and dtype != np.uint8:
msg = 'Unsupported dtype {}. Must be one of ("np.uint8", "np.uint16")'
raise ValueError(msg.format(dtype))
# Clip depth image to given range
depth_img = np.ma.masked_array(depth_img, mask=(depth_img == 0.0))
depth_img = np.ma.clip(depth_img, min_depth, max_depth)
# Get min/max value of given datatype
type_info = np.iinfo(dtype)
max_val = type_info.max
# Scale the depth image to given datatype range
depth_img = ((depth_img - min_depth) / (max_depth - min_depth)) * max_val
depth_img = depth_img.astype(dtype)
# Convert back to normal numpy array from masked numpy array
depth_img = np.ma.filled(depth_img, fill_value=0)
return depth_img
def depth2rgb(depth_img, min_depth=0.0, max_depth=1.5,
color_mode=cv2.COLORMAP_JET, reverse_scale=False,
dynamic_scaling=False):
"""Generate RGB representation of a depth image.
To do so, the depth image has to be normalized by specifying a min and max
depth to be considered. Holes in the depth image (0.0) appear black in
color.
Args:
depth_img (numpy.ndarray): Depth image, values in meters.
Shape=(H, W), dtype=np.float32
min_depth (float): Min depth to be considered
max_depth (float): Max depth to be considered
color_mode (int): Integer or cv2 object representing which coloring
scheme to us. Please consult
https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
Each mode is mapped to an int. Eg: cv2.COLORMAP_AUTUMN = 0.
This mapping changes from version to version.
reverse_scale (bool): Whether to make the largest values the smallest
to reverse the color mapping
dynamic_scaling (bool): If true, the depth image will be colored
according to the min/max depth value within the
image, rather that the passed arguments.
Returns:
numpy.ndarray: RGB representation of depth image. Shape=(H,W,3)
"""
# Map depth image to Color Map
if dynamic_scaling:
dis = _normalize_depth_img(depth_img, dtype=np.uint8,
min_depth=max(
depth_img[depth_img > 0].min(),
min_depth),
max_depth=min(depth_img.max(), max_depth))
# Added a small epsilon so that min depth does not show up as black
# due to invalid pixels
else:
# depth image scaled
dis = _normalize_depth_img(depth_img, dtype=np.uint8,
min_depth=min_depth, max_depth=max_depth)
if reverse_scale is True:
dis = np.ma.masked_array(dis, mask=(dis == 0.0))
dis = 255 - dis
dis = np.ma.filled(dis, fill_value=0)
depth_img_mapped = cv2.applyColorMap(dis, color_mode)
depth_img_mapped = cv2.cvtColor(depth_img_mapped, cv2.COLOR_BGR2RGB)
# Make holes in input depth black:
depth_img_mapped[dis == 0, :] = 0
return depth_img_mapped
| [
"cv2.applyColorMap",
"numpy.iinfo",
"numpy.ma.filled",
"numpy.ma.clip",
"cv2.cvtColor",
"numpy.ma.masked_array"
] | [((1139, 1191), 'numpy.ma.masked_array', 'np.ma.masked_array', (['depth_img'], {'mask': '(depth_img == 0.0)'}), '(depth_img, mask=depth_img == 0.0)\n', (1157, 1191), True, 'import numpy as np\n'), ((1210, 1253), 'numpy.ma.clip', 'np.ma.clip', (['depth_img', 'min_depth', 'max_depth'], {}), '(depth_img, min_depth, max_depth)\n', (1220, 1253), True, 'import numpy as np\n'), ((1313, 1328), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1321, 1328), True, 'import numpy as np\n'), ((1610, 1647), 'numpy.ma.filled', 'np.ma.filled', (['depth_img'], {'fill_value': '(0)'}), '(depth_img, fill_value=0)\n', (1622, 1647), True, 'import numpy as np\n'), ((3840, 3874), 'cv2.applyColorMap', 'cv2.applyColorMap', (['dis', 'color_mode'], {}), '(dis, color_mode)\n', (3857, 3874), False, 'import cv2\n'), ((3898, 3947), 'cv2.cvtColor', 'cv2.cvtColor', (['depth_img_mapped', 'cv2.COLOR_BGR2RGB'], {}), '(depth_img_mapped, cv2.COLOR_BGR2RGB)\n', (3910, 3947), False, 'import cv2\n'), ((3703, 3743), 'numpy.ma.masked_array', 'np.ma.masked_array', (['dis'], {'mask': '(dis == 0.0)'}), '(dis, mask=dis == 0.0)\n', (3721, 3743), True, 'import numpy as np\n'), ((3784, 3815), 'numpy.ma.filled', 'np.ma.filled', (['dis'], {'fill_value': '(0)'}), '(dis, fill_value=0)\n', (3796, 3815), True, 'import numpy as np\n')] |
from __future__ import unicode_literals
import codecs
import numpy
import os
import transaction
from base64 import b64decode
from hashlib import sha1
import itertools
from pyramid_addons.helpers import (http_created, http_gone, http_ok)
from pyramid_addons.validation import (EmailAddress, Enum, List, Or, String,
RegexString, TextNumber,
WhiteSpaceString, validate, SOURCE_GET,
SOURCE_MATCHDICT as MATCHDICT)
from pyramid.httpexceptions import (HTTPBadRequest, HTTPConflict, HTTPError,
HTTPFound, HTTPNotFound, HTTPOk,
HTTPRedirection, HTTPSeeOther)
from pyramid.response import FileResponse, Response
from pyramid.security import forget, remember
from pyramid.settings import asbool
from pyramid.view import (forbidden_view_config, notfound_view_config,
view_config)
import re
from sqlalchemy.exc import IntegrityError
import yaml
from zipfile import ZipFile
from .diff_render import HTMLDiff
from .exceptions import GroupWithException, InvalidId, SubmitException
from .helpers import (
AccessibleDBThing, DBThing as AnyDBThing, DummyTemplateAttr,
EditableDBThing, TestableStatus, TextDate, ViewableDBThing, UmailAddress,
add_user, clone, fetch_request_ids, file_verifier_verification,
prepare_renderable, prev_next_submission, prev_next_group,
project_file_create, project_file_delete, send_email,
test_case_verification, zip_response,zip_response_adv)
from .models import (BuildFile, Class, ExecutionFile, File, FileVerifier,
Group, GroupRequest, PasswordReset, Project, Session,
Submission, SubmissionToFile, TestCase, Testable, User,
UserToGroup)
# Hack for old pickle files
# TODO: Migrate this data to not use pickle
import sys
import submit
sys.modules['nudibranch'] = submit
sys.modules['nudibranch.diff_unit'] = submit.diff_unit
sys.modules['nudibranch.models'] = submit.models
# A few reoccuring validators
OUTPUT_SOURCE = Enum('output_source', 'stdout', 'stderr', 'file')
OUTPUT_TYPE = Enum('output_type', 'diff', 'image', 'text')
SHA1_VALIDATOR = String('sha1sum', min_length=40, max_length=40,
source=MATCHDICT)
UUID_VALIDATOR = String('token', min_length=36, max_length=36,
source=MATCHDICT)
# We need a specific view config for each of HTTPError, HTTPOk, and
# HTTPRedirection as HTTPException will not work as a context. Because python
# has explicit decorators for forbidden and notfound (and we use them) we must
# also use those decorators here.
@forbidden_view_config(xhr=True, renderer='json')
@notfound_view_config(xhr=True, renderer='json')
@view_config(context=HTTPError, xhr=True, renderer='json')
@view_config(context=HTTPOk, xhr=True, renderer='json')
@view_config(context=HTTPRedirection, xhr=True, renderer='json')
def json_exception(context, request):
"""Always return json content in the body of Exceptions to xhr requests."""
request.response.status = context.code
return {'error': context._status, 'messages': context.message}
# Prevent PredicateMismatch exception
@view_config(context=HTTPError)
@view_config(context=HTTPOk)
@view_config(context=HTTPRedirection)
def normal_exception(context, request):
"""Just return the normal context"""
return context
@forbidden_view_config()
def forbidden_view(context, request):
if request.user:
return context
request.session.flash('You must be logged in to do that.', 'warnings')
return HTTPSeeOther(request.route_path('session',
_query={'next': request.path}))
@notfound_view_config()
def not_found(request):
return Response('Not Found', status='404 Not Found')
@view_config(route_name='robots', request_method='GET', http_cache=86400)
def robots(request):
return Response(body='User-agent: *\nDisallow: /\n',
content_type=str('text/plain'))
@view_config(route_name='build_file', request_method='PUT',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('file_id', File),
filename=String('filename', min_length=1),
project=EditableDBThing('project_id', Project))
def build_file_create(request, file_, filename, project):
return project_file_create(request, file_, filename, project, BuildFile)
@view_config(route_name='build_file_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(build_file=EditableDBThing('build_file_id', BuildFile,
source=MATCHDICT))
def build_file_delete(request, build_file):
return project_file_delete(request, build_file)
@view_config(route_name='class.admins', renderer='json', request_method='PUT')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT),
user=AnyDBThing('email', User, fetch_by='username',
validator=EmailAddress('email')))
def class_admins_add(request, class_, user):
if user in class_.admins:
raise HTTPConflict('That user is already an admin for the class.')
user.admin_for.append(class_)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('The user could not be added.')
request.session.flash('Added {} as an admin to the class.'.format(user),
'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='class.admins', request_method='GET',
permission='authenticated',
renderer='templates/forms/class_admins.pt')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT))
def class_admins_view(request, class_):
return {'class_': class_}
@view_config(route_name='class', request_method='PUT', permission='admin',
renderer='json')
@validate(name=String('name', min_length=3))
def class_create(request, name):
class_ = Class(name=name)
Session.add(class_)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Class \'{0}\' already exists'.format(name))
request.session.flash('Created class {}'.format(name), 'successes')
return http_created(request,
redir_location=request.route_path('class_new'))
@view_config(route_name='class_new', request_method='GET',
renderer='templates/forms/class_create.pt', permission='admin')
def class_edit(request):
return {'classes': sorted(Class.query_by().all())}
@view_config(route_name='class_item', request_method='JOIN',
permission='authenticated', renderer='json')
@validate(class_=AnyDBThing('class_id', Class, source=MATCHDICT))
def class_join(request, class_):
if class_.is_locked:
raise HTTPBadRequest('Invalid class')
request.user.classes.append(class_)
request.session.flash('You have joined {}'.format(class_.name),
'successes')
url = request.route_path('user_item', username=request.user.username)
return http_created(request, redir_location=url)
@view_config(route_name='class_item', request_method='GET',
renderer='templates/class_view.pt', permission='authenticated')
@validate(class_=AnyDBThing('class_id', Class, source=MATCHDICT))
def class_view(request, class_):
class_admin = class_.is_admin(request.user)
recent_subs = None
if class_admin:
project_ids = [x.id for x in class_.projects]
if project_ids:
recent_subs = (Submission.query_by()
.filter(Submission.project_id.in_(project_ids))
.order_by(Submission.created_at.desc()).limit(16)
.all())
return {'class_': class_, 'class_admin': class_admin,
'recent_subs': recent_subs}
@view_config(route_name='execution_file', request_method='PUT',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('file_id', File),
filename=String('filename', min_length=1),
project=EditableDBThing('project_id', Project))
def execution_file_create(request, file_, filename, project):
return project_file_create(request, file_, filename, project,
ExecutionFile)
@view_config(route_name='execution_file_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(execution_file=EditableDBThing('execution_file_id', ExecutionFile,
source=MATCHDICT))
def execution_file_delete(request, execution_file):
return project_file_delete(request, execution_file)
@view_config(route_name='file_item', request_method='PUT', renderer='json',
permission='authenticated')
@validate(b64data=WhiteSpaceString('b64data'), sha1sum=SHA1_VALIDATOR)
def file_create(request, b64data, sha1sum):
data = b64decode(b64data.encode('ascii'))
# Verify the sha1 matches
expected_sha1 = sha1(data).hexdigest()
if sha1sum != expected_sha1:
msg = 'sha1sum does not match expected: {0}'.format(expected_sha1)
raise HTTPBadRequest(msg)
# fetch or create (and save to disk) the file
base_path = request.registry.settings['file_directory']
file_ = File.fetch_or_create(data, base_path, sha1sum=sha1sum)
# associate user with the file
request.user.files.add(file_)
return {'file_id': file_.id}
@view_config(route_name='file_item', request_method='INFO',
permission='authenticated', renderer='json')
@validate(file_=ViewableDBThing('sha1sum', File, fetch_by='sha1',
validator=SHA1_VALIDATOR, source=MATCHDICT))
def file_item_info(request, file_):
return {'file_id': file_.id, 'owns_file': file_ in request.user.files}
@view_config(route_name='file_item', request_method='GET',
permission='authenticated', renderer='templates/file_view.pt')
@validate(file_=ViewableDBThing('sha1sum', File, fetch_by='sha1',
validator=SHA1_VALIDATOR, source=MATCHDICT),
filename=String('filename', min_length=1, source=MATCHDICT),
raw=TextNumber('raw', min_value=0, max_value=1,
optional=True, source=SOURCE_GET))
def file_item_view(request, file_, filename, raw):
source = File.file_path(request.registry.settings['file_directory'],
file_.sha1)
if raw:
return FileResponse(source, request)
try:
contents = codecs.open(source, encoding='utf-8').read()
except UnicodeDecodeError as exc:
contents = 'File contents could not be displayed: {}'.format(exc)
return {'contents': contents,
'filename': filename,
'url': request.route_path('file_item', sha1sum=file_.sha1,
filename=filename, _query={'raw': '1'})}
@view_config(route_name='file_verifier', request_method='PUT',
permission='authenticated', renderer='json')
@validate(copy_to_execution=TextNumber('copy_to_execution', min_value=0,
max_value=1, optional=True),
filename=String('filename', min_length=1),
min_size=TextNumber('min_size', min_value=0),
max_size=TextNumber('max_size', min_value=0, optional=True),
min_lines=TextNumber('min_lines', min_value=0),
max_lines=TextNumber('max_lines', min_value=0, optional=True),
optional=TextNumber('optional', min_value=0, max_value=1,
optional=True),
project=EditableDBThing('project_id', Project),
warning_regex=RegexString('warning_regex', optional=True))
@file_verifier_verification
def file_verifier_create(request, copy_to_execution, filename, min_size,
max_size, min_lines, max_lines, optional, project,
warning_regex):
# Check for build-file conflict
if not optional and BuildFile.fetch_by(project=project, filename=filename):
msg = ('A build file already exists with that name. '
'Provide a different name, or mark as optional.')
raise HTTPBadRequest(msg)
filev = FileVerifier(copy_to_execution=bool(copy_to_execution),
filename=filename, min_size=min_size,
max_size=max_size, min_lines=min_lines,
max_lines=max_lines, optional=bool(optional),
project=project, warning_regex=warning_regex)
Session.add(filev)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That filename already exists for the project')
request.session.flash('Added expected file: {}'.format(filename),
'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='file_verifier_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(file_verifier=EditableDBThing('file_verifier_id', FileVerifier,
source=MATCHDICT))
def file_verifier_delete(request, file_verifier):
return project_file_delete(request, file_verifier)
@view_config(route_name='file_verifier_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(copy_to_execution=TextNumber('copy_to_execution', min_value=0,
max_value=1, optional=True),
file_verifier=EditableDBThing('file_verifier_id', FileVerifier,
source=MATCHDICT),
filename=String('filename', min_length=1),
min_size=TextNumber('min_size', min_value=0),
max_size=TextNumber('max_size', min_value=0, optional=True),
min_lines=TextNumber('min_lines', min_value=0),
max_lines=TextNumber('max_lines', min_value=0, optional=True),
optional=TextNumber('optional', min_value=0, max_value=1,
optional=True),
warning_regex=RegexString('warning_regex', optional=True))
@file_verifier_verification
def file_verifier_update(request, copy_to_execution, file_verifier, filename,
min_size, max_size, min_lines, max_lines, optional,
warning_regex):
# Check for build-file conflict
if not optional and BuildFile.fetch_by(project=file_verifier.project,
filename=filename):
msg = ('A build file already exists with that name. '
'Provide a different name, or mark as optional.')
raise HTTPBadRequest(msg)
if not file_verifier.update(copy_to_execution=bool(copy_to_execution),
filename=filename, min_size=min_size,
max_size=max_size, min_lines=min_lines,
max_lines=max_lines, optional=bool(optional),
warning_regex=warning_regex):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That filename already exists for the project')
request.session.flash('Updated expected file: {}'.format(filename),
'successes')
redir_location = request.route_path('project_edit',
project_id=file_verifier.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='home', request_method='GET')
def home(request):
if request.user:
url = request.route_path('user_item', username=request.user.username)
else:
url = request.route_path('session')
raise HTTPFound(location=url)
@view_config(route_name='password_reset', request_method='PUT',
renderer='json')
@validate(username=EmailAddress('email'))
def password_reset_create(request, username):
if username == 'admin':
raise HTTPConflict('Hahaha, nice try!')
user = User.fetch_by(username=username)
if not user:
raise HTTPConflict('Invalid email')
password_reset = PasswordReset.generate(user)
failure_message = 'You were already sent a password reset email.'
if password_reset:
Session.add(password_reset)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict(failure_message)
site_name = request.registry.settings['site_name']
reset_url = request.route_url('password_reset_item',
token=password_reset.get_token())
body = ('Visit the following link to reset your password:\n\n{0}'
.format(reset_url))
send_email(request, recipients=user.username, body=body,
subject='{0} password reset email'.format(site_name))
return http_ok(request,
message='A password reset link will be emailed to you.')
else:
raise HTTPConflict(failure_message)
@view_config(route_name='password_reset', request_method='GET',
renderer='templates/forms/password_reset.pt')
def password_reset_edit(request):
return {}
@view_config(route_name='password_reset_item', request_method='GET',
renderer='templates/forms/password_reset_item.pt')
@validate(reset=AnyDBThing('token', PasswordReset, fetch_by='reset_token',
validator=UUID_VALIDATOR, source=MATCHDICT))
def password_reset_edit_item(request, reset):
return {'token': reset.get_token()}
@view_config(route_name='password_reset_item', renderer='json',
request_method='PUT')
@validate(username=EmailAddress('email'),
password=WhiteSpaceString('password', min_length=6),
reset=AnyDBThing('token', PasswordReset, fetch_by='reset_token',
validator=UUID_VALIDATOR, source=MATCHDICT))
def password_reset_item(request, username, password, reset):
if reset.user.username != username:
raise HTTPConflict('The reset token and username '
'combination is not valid.')
reset.user.password = password
Session.delete(reset)
Session.flush()
request.session.flash('Your password has been updated!', 'successes')
redir_location = request.route_path('session',
_query={'username': username})
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project', request_method='CLONE',
permission='authenticated', renderer='json')
@validate(class_=EditableDBThing('class_id', Class),
name=String('name', min_length=2),
src_project=ViewableDBThing('project_id', Project))
def project_clone(request, class_, name, src_project):
# Additional check as we can clone projects whose classes are locked,
# but we cannot clone projects that are locked
if src_project.status not in (u'notready', u'ready'):
raise HTTPConflict('Cannot clone a project with status: {}'
.format(src_project.status))
# Build a copy of the project settings
update = {'class_': class_, 'status': 'notready', 'name': name}
project = clone(src_project, ('class_id',), update)
Session.autoflush = False # Don't flush while testing for changes
# Copy project "files" keeping a mapping between src and dst objects
mapping = {'build_files': {}, 'execution_files': {}, 'file_verifiers': {}}
for attr in mapping:
for item in getattr(src_project, attr):
new = clone(item, ('project_id',))
getattr(project, attr).append(new)
mapping[attr][item] = new
# Copy project testables
for src_testable in src_project.testables:
testable = clone(src_testable, ('project_id',))
project.testables.append(testable)
# Set testable "files" with the appropriate "new" file
for attr, file_mapping in mapping.items():
getattr(testable, attr).extend(file_mapping[x] for x
in getattr(src_testable, attr))
# Copy test cases
testable.test_cases = [clone(x, ('testable_id',))
for x in src_testable.test_cases]
Session.add(project)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('The name `{0}` already exists for the class.'
.format(name))
request.session.flash('Cloned {} {} as {}'.format(src_project.class_.name,
src_project.name,
name),
'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='project', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=2),
class_=EditableDBThing('class_id', Class),
makefile=ViewableDBThing('makefile_id', File, optional=True))
def project_create(request, name, class_, makefile):
project = Project(name=name, class_=class_, makefile=makefile)
Session.add(project)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That project name already exists for the class')
redir_location = request.route_path('project_edit', project_id=project.id)
request.session.flash('Project added!', 'successes')
return http_created(request, redir_location=redir_location)
@view_config(route_name='project_item_download', request_method='GET',
permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_download(request, project):
def file_path(file_):
return File.file_path(request.registry.settings['file_directory'],
file_.sha1)
files = []
for sub in project.recent_submissions():
users = sub.group.users_str.replace(' ', '_').replace(',', '-')
user_path = '{0}_{1}'.format(users, sub.id)
for filename, file_ in sub.file_mapping().items():
files.append((os.path.join(project.name, user_path, filename),
file_path(file_)))
return zip_response(request, project.name + '.zip', files)
@view_config(route_name='project_edit',
renderer='templates/forms/project_edit.pt',
request_method='GET', permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_edit(request, project):
action = request.route_path('project_item_summary',
class_id=project.class_.id,
project_id=project.id)
return {'project': project, 'action': action}
def full_fname(fname, project):
return project.name + "/" + fname
@view_config(route_name='project_export',
request_method='GET', permission='authenticated',
renderer='json')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_export(request, project):
response = []
base_path = request.registry.settings['file_directory']
response.append(("text", full_fname("README.txt", project), """
Project %s
This is a full copy of the testables and test cases in this project.
It may be imported again using the import feature""" % project.name))
def make_big_string(text, filename):
response.append(("text", full_fname(filename, project), text))
return {
"File": filename
}
project_yml_dict = {}
project_yml_dict["Name"] = project.name
project_yml_dict["ExpectedFiles"] = {
expected_file.filename : {
"CopyToExecution": expected_file.copy_to_execution,
"MinSize": expected_file.min_size,
"MaxSize": expected_file.max_size,
"MinLines": expected_file.min_lines,
"MaxLines": expected_file.max_lines,
"Optional": expected_file.optional,
"WarningRegex": expected_file.warning_regex,
} for expected_file in project.file_verifiers
}
response.append(("text", full_fname("project.yml", project), yaml.safe_dump(project_yml_dict, default_flow_style=False)))
if project.makefile is not None:
response.append(("file", full_fname("Makefile", project), File.file_path(base_path,project.makefile.sha1)))
for buildfile in project.build_files:
response.append(("file", full_fname("build_files/" + buildfile.filename, project), File.file_path(base_path,buildfile.file.sha1)))
for execution in project.execution_files:
response.append(("file", full_fname("execution_files/" + execution.filename, project), File.file_path(base_path,execution.file.sha1)))
for testable in project.testables:
# create a dictionary that will represent the testable
testable_dict = {}
testable_dict["BuildFiles"] = [file.filename for file in testable.build_files]
testable_dict["ExecutionFiles"] = [file.filename for file in testable.execution_files]
testable_dict["ExpectedFiles"] = [file.filename for file in testable.file_verifiers]
testable_dict["MakeTarget"] = testable.make_target
testable_dict["Executable"] = testable.executable
testable_dict["IsHidden"] = testable.is_hidden
testable_dict["TestCases"] = {}
for test_case in testable.test_cases:
# this is the basepath where we will write out long text objects if necessary
testcase_basepath = ("testables/%s/%s" % (testable.name, test_case.name))
# create a dict to hold the information for the test case!
tc_dict = {}
tc_dict["HideExpected"] = test_case.hide_expected
tc_dict["Points"] = test_case.points
tc_dict["Command"] = test_case.args
if test_case.stdin != None:
with open(File.file_path(base_path,test_case.stdin.sha1), 'r') as fin:
tc_dict["Input"] = make_big_string(fin.read(), testcase_basepath + ".stdin")
if test_case.expected != None:
with open(File.file_path(base_path,test_case.expected.sha1), 'r') as fout:
tc_dict["Output"] = make_big_string(fout.read(), testcase_basepath + "." + test_case.source)
tc_dict["Output"]["Source"] = test_case.source
if (tc_dict["Output"]["Source"] == "file"):
tc_dict["Output"]["Source"] = test_case.output_filename
testable_dict["TestCases"][test_case.name] = tc_dict
response.append((
"text",
full_fname("testables/%s/%s.yml" % (testable.name,testable.name), project),
yaml.safe_dump(testable_dict, default_flow_style=False)
))
return zip_response_adv(request, project.name + ".zip", response)
@view_config(route_name='project_import', request_method='POST',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
# @validate(file=ViewableDBThing('makefile_id', File, optional=False),
# project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_import(request, project):
import_filename = request.POST['file'].filename
import_file = request.POST['file'].file
# create a file in the backing filesystem for each file in the zip archive!
base_path = request.registry.settings['file_directory']
with ZipFile(import_file,"r") as myzip:
# upload every file we were given to the backing store... this may not acutally be the best approach
submit_files = {path.strip("/") : File.fetch_or_create(myzip.read(path), base_path) for path in myzip.namelist()}
#return myzip.namelist()
file_list = sorted([path for path,v in submit_files.iteritems()])
# we now clear out all of the old testables
# TODO: back these up to a temporary location before reomoving them incase of encountering errors!
# alternatively only allow imports on empty projects ?
#project.testables[:] = []
class Filesystem(object):
#paths need to have trailing slashes
def __init__(self, file):
self._file = file
self._files = file.namelist()
def listdir(self, path):
pathlen = len(path)
files = [fname[pathlen:] for fname in self._files if fname.startswith(path)]
files = [fname for fname in files if '/' not in fname or fname.index('/') == len(fname)-1]
return files
def findroot(self,path=""):
folders = [folder for folder in self.listdir(path) if folder[-1:] == '/' and '__MAC' not in folder]
print(folders)
if ("project.yml" not in self.listdir(path)) and (len(folders) == 1):
return self.findroot(folders[0])
elif "project.yml" in self.listdir(path):
return path
else:
raise SubmitException("Failed to find project.yml in root")
def build_file_tree(dirlist, fullpath=""):
dirs = filter(lambda x: "/" in x, dirlist)
files = filter(lambda x: "/" not in x, dirlist)
return dict({
dirname : build_file_tree([ x[x.index("/")+1:] for x in dirlist ], fullpath=fullpath + "/" + dirname)
for dirname, dirlist in itertools.groupby(dirs, lambda x: x[0 : x.index("/")])
}, **{
file : fullpath + "/" + file
for file in files
})
#need to refactor out submit files
#creating for makefile
def get_or_create_file(input, rootdir = "/"):
t = type(input)
if t == str:
return File.fetch_or_create(input, base_path)
if t == dict:
if "File" in input:
fpath = (os.path.join(rootdir, str(input["File"]))).strip("/")
if fpath not in submit_files:
raise SubmitException("File %s not found in project.zip" % fpath)
else:
return submit_files[fpath]
elif "Text" in input:
return File.fetch_or_create(input, base_path)
raise SubmitException("Failed to load a file from the key")
#creating for expected files, testables, and buildfiles
#the root_dir should contain the yml, testables, makefile, execution files, and build files
try:
fs = Filesystem(myzip)
root_dir = fs.listdir("")
try:
root_dir = fs.findroot()
except SubmitException as error:
raise SubmitException("Encountered excpetion: " + str(error) + " while finding project.yml")
print("Testables", fs.listdir(os.path.join(root_dir, "testables")))
if len(fs.listdir(os.path.join(root_dir, "testables"))) == 0:
request.session.flash("Nonfatal exception 'no testables defined' was encountered. Continuing", 'errors')
project_yml = yaml.safe_load(myzip.read(root_dir + "project.yml").decode('utf-8'))
#"importing" expected files
try:
if "ExpectedFiles" in project_yml:
project.expected_files = []
for file in project_yml["ExpectedFiles"]:
expect_file = FileVerifier(
filename = file,
copy_to_execution = project_yml["ExpectedFiles"][file]["CopyToExecution"],
min_size = project_yml["ExpectedFiles"][file]["MinSize"],
max_size = project_yml["ExpectedFiles"][file]["MaxSize"],
min_lines = project_yml["ExpectedFiles"][file]["MinLines"],
max_lines = project_yml["ExpectedFiles"][file]["MaxLines"],
optional = project_yml["ExpectedFiles"][file]["Optional"],
project_id = project.id,
warning_regex = project_yml["ExpectedFiles"][file]["WarningRegex"]
)
Session.add(expect_file)
project.expected_files.append(expect_file)
else:
print("file: %s is empty" % file)
except SubmitException as error:
raise SubmitException("Encountered exception: " + str(error) + " while processing Expected FIles")
#importing makefile
try:
if "Makefile" in project_yml:
project.makefile = get_or_create_file(project_yml["Makefile"], rootdir=root_dir)
except SubmitException as error:
raise SubmitException("Encountered exception: " + str(error) + " while processing Makefile")
#import execution files
try:
execution_dir = os.path.join(root_dir,"execution_files/")
project.execution_files = []
for file in fs.listdir(execution_dir):
if file:
file_obj = File.fetch_or_create(myzip.read(os.path.join(execution_dir, file)), base_path)
exec_file = ExecutionFile(
project = project,
file = file_obj,
filename = file
)
Session.add(exec_file)
project.execution_files.append(exec_file)
else:
print("file: %s is empty" % file)
except:
raise SubmitException("Encountered exception while adding execution files")
#importing build files
build_dir = os.path.join(root_dir,"build_files/")
project.build_files = []
for file in fs.listdir(build_dir):
if file:
print("appending %s" % file)
file_obj = File.fetch_or_create(myzip.read(os.path.join(build_dir, file)), base_path)
build_file = BuildFile(
project = project,
file = file_obj,
filename = file
)
Session.add(build_file)
project.build_files.append(build_file)
else:
print("file: %s is empty" % file)
#importing testables
try:
testables_dir = os.path.join(root_dir,"testables/")
project.testables = []
for testable_folder in fs.listdir(testables_dir):
if "/" in testable_folder:
print(testable_folder)
testable_yml = yaml.safe_load(myzip.read(os.path.join(testables_dir,testable_folder) + ("%s.yml" % testable_folder.strip("/"))).decode('utf-8'))
testable_file = Testable(
#build_files = testable_yml["BuildFiles"],
executable = testable_yml["Executable"],
#execution_files = testable_yml["ExecutionFiles"],
#file_verifiers = testable_yml["ExpectedFiles"],
is_hidden = testable_yml["IsHidden"],
make_target = testable_yml["MakeTarget"],
name = testable_folder.strip("/"),
project_id = project.id
)
#print(testable_yml["BuildFiles"])
testable_file.test_cases = []
if "TestCases" in testable_yml:
for test_case_name in testable_yml["TestCases"]:
test_cases = TestCase(
args = testable_yml["TestCases"][test_case_name]["Args"],
expected = get_or_create_file(testable_yml["TestCases"][test_case_name]["STDOUT"], rootdir=testable_folder),
hide_expected = testable_yml["TestCases"][test_case_name]["HideExpected"],
name = test_case_name,
points = testable_yml["TestCases"][test_case_name]["Points"],
stdin = get_or_create_file(testable_yml["TestCases"][test_case_name]["STDIN"], rootdir=testable_folder)
)
Session.add(test_cases)
testable_file.test_cases.append(test_cases)
testable_file.build_files = []
if "BuildFiles" in testable_yml:
for files in testable_yml["BuildFiles"]:
build_file = BuildFile.fetch_by(project=project, filename=files)
Session.add(build_file)
testable_file.build_files.append(build_file)
testable_file.execution_files = []
if "ExecutionFiles" in testable_yml:
for files in testable_yml["ExecutionFiles"]:
execution_file = ExecutionFile.fetch_by(project=project, filename=files)
Session.add(execution_file)
testable_file.execution_files.append(execution_file)
testable_file.file_verifiers = []
if "ExpectedFiles" in testable_yml:
for files in testable_yml["ExpectedFiles"]:
expected_file = FileVerifier.fetch_by(project=project, filename=files)
Session.add(expected_file)
testable_file.file_verifiers.append(expected_file)
Session.add(testable_file)
project.testables.append(testable_file)
except SubmitException as error:
raise SubmitException("Encountered exception while adding testables")
#return project_yml
except SubmitException as error:
request.session.flash("Error: " + str(error), 'errors')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Session could not fluch, reccomending stool softeners')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_ok(request, redir_location=redir_location)
#expected files are instances of file verifiers
# def testable_create(request, name, is_hidden, make_target, executable,
# build_file_ids, execution_file_ids, file_verifier_ids,
# project):
# if make_target and not project.makefile:
# msg = 'make_target cannot be specified without a make file'
# raise HTTPBadRequest(msg)
# try:
# # Verify the ids actually exist and are associated with the project
# build_files = fetch_request_ids(build_file_ids, BuildFile,
# 'build_file_id',
# project.build_files)
# execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
# 'execution_file_id')
# file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
# 'file_verifier_id',
# project.file_verifiers)
# except InvalidId as exc:
# raise HTTPBadRequest('Invalid {0}'.format(exc.message))
# testable = Testable(name=name, is_hidden=bool(is_hidden),
# make_target=make_target,
# executable=executable, project=project,
# build_files=build_files,
# execution_files=execution_files,
# file_verifiers=file_verifiers)
# redir_location = request.route_path('project_edit', project_id=project.id)
# Session.add(testable)
# try:
# Session.flush()
# except IntegrityError:
# raise HTTPConflict('That name already exists for the project')
# return http_created(request, redir_location=redir_location,
# testable_id=testable.id)
# @view_config(route_name='project_item_summary', request_method='POST',
# permission='authenticated', renderer='json')
# @validate(name=String('name', min_length=2),
# makefile=ViewableDBThing('makefile_id', File, optional=True),
# is_ready=TextNumber('is_ready', min_value=0, max_value=1,
# optional=True),
# deadline=TextDate('deadline', optional=True),
# delay_minutes=TextNumber('delay_minutes', min_value=1),
# group_max=TextNumber('group_max', min_value=1),
# project=EditableDBThing('project_id', Project, source=MATCHDICT))
# def project_update(request, name, makefile, is_ready, deadline, delay_minutes,
# group_max, project):
# # Fix timezone if it doesn't exist
# if project.deadline and deadline and not deadline.tzinfo:
# deadline = deadline.replace(tzinfo=project.deadline.tzinfo)
# if not project.update(name=name, makefile=makefile, deadline=deadline,
# delay_minutes=delay_minutes,
# group_max=group_max,
# status=u'ready' if bool(is_ready) else u'notready'):
# return http_ok(request, message='Nothing to change')
# try:
# Session.flush()
# except IntegrityError:
# raise HTTPConflict('That project name already exists for the class')
# request.session.flash('Project updated', 'successes')
# redir_location = request.route_path('project_edit', pro
@view_config(route_name='project_group', request_method='JOIN',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT),
users=List('user_ids', ViewableDBThing('', User), min_elements=2,
max_elements=2))
def project_group_admin_join(request, project, users):
try:
group = users[0].group_with(users[1], project, bypass_limit=True)
except GroupWithException as exc:
request.session.flash(exc.args[0], 'errors')
group = None
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not join the users at this time.')
redir_location = request.route_path('group_admin', project_id=project.id)
if not group:
return http_gone(request, redir_location=redir_location)
request.session.flash('Made group: {}'.format(group.users_str),
'successes')
redir_location = request.route_path('group_admin', project_id=project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='group_admin', request_method='GET',
renderer='templates/forms/group_admin.pt',
permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_group_admin_view(request, project):
students = set(project.class_.users)
selectable = []
for group in project.groups:
students = students - set(group.users)
selectable.append((group.users_str, group.group_assocs[0].user.id))
selectable.extend((x.name, x.id) for x in students)
return {'project': project, 'selectable': selectable}
@view_config(route_name='project_group_item', renderer='json',
request_method='PUT')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group_request=EditableDBThing('group_request_id', GroupRequest,
source=MATCHDICT))
def project_group_request_confirm(request, project, group_request):
try:
request.user.group_with(group_request.from_user, project)
failed = False
except GroupWithException as exc:
request.session.flash(exc.args[0], 'errors')
failed = True
Session.delete(group_request)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not join the group at this time.')
url = request.route_url('project_group', project_id=project.id)
if failed:
return http_gone(request, redir_location=url)
request.session.flash('Joined group with {}'
.format(group_request.from_user), 'successes')
return http_ok(request, redir_location=url)
@view_config(route_name='project_group', renderer='json', request_method='PUT')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
username=EmailAddress('email'))
def project_group_request_create(request, project, username):
if not request.user.can_join_group(project):
raise HTTPConflict('You cannot expand your group for this project.')
user = User.fetch_by(username=username)
if not user or project.class_ not in user.classes:
raise HTTPConflict('Invalid email.')
if not user.can_join_group(project):
raise HTTPConflict('That user cannot join your group.')
self_assoc = request.user.fetch_group_assoc(project)
user_assoc = user.fetch_group_assoc(project)
if request.user == user or \
self_assoc == user_assoc and self_assoc is not None:
raise HTTPConflict('You are already in a group with that student.')
Session.add(GroupRequest(from_user=request.user, project=project,
to_user=user))
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('Could not create your group request.')
site_name = request.registry.settings['site_name']
url = request.route_url('project_group', project_id=project.id)
body = ('Your fellow {} student, {}, has requested you join their '
'group for "{}". Please visit the following link to confirm or '
'deny the request:\n\n{}'
.format(project.class_.name, request.user, project.name, url))
send_email(request, recipients=user.username, body=body,
subject='{}: {} "{}" Group Request'
.format(site_name, project.class_.name, project.name))
request.session.flash('Request to {} sent via email.'.format(user),
'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='project_group_item', renderer='json',
request_method='DELETE')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group_request=AccessibleDBThing('group_request_id', GroupRequest,
source=MATCHDICT))
def project_group_request_delete(request, project, group_request):
if request.user == group_request.from_user:
msg = 'Revoked request to {}.'.format(group_request.to_user)
else:
msg = 'Denied request from {}.'.format(group_request.from_user)
Session.delete(group_request)
request.session.flash(msg, 'successes')
url = request.route_url('project_group', project_id=project.id)
return http_ok(request, redir_location=url)
@view_config(route_name='project_group', request_method='GET',
renderer='templates/forms/project_group.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT))
def project_group_view(request, project):
assoc = request.user.fetch_group_assoc(project)
members = assoc.group.users_str if assoc else request.user.name
pending = GroupRequest.query_by(project=project, to_user=request.user)
requested = GroupRequest.query_by(from_user=request.user,
project=project).first()
can_join = request.user.can_join_group(project)
return {'project': project, 'members': members, 'can_join': can_join,
'pending': pending.all(), 'requested': requested}
@view_config(route_name='project_info', request_method='GET',
permission='authenticated', renderer='json')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_info(request, project):
retval = {'id': project.id, 'name': project.name, 'testables': {}}
for testable in project.testables:
test_cases = {}
for test_case in testable.test_cases:
stdin = test_case.stdin.sha1 if test_case.stdin else None
expected = test_case.expected.sha1 if test_case.expected else None
test_cases[test_case.name] = {
'id': test_case.id, 'args': test_case.args,
'source': test_case.source,
'stdin': stdin, 'expected': expected,
'output_type': test_case.output_type,
'output_filename': test_case.output_filename}
retval['testables'][testable.name] = {'id': testable.id,
'test_cases': test_cases}
return retval
@view_config(route_name='project_new', request_method='GET',
renderer='templates/forms/project_new.pt',
permission='authenticated')
@validate(class_=EditableDBThing('class_id', Class, source=MATCHDICT))
def project_new(request, class_):
dummy_project = DummyTemplateAttr(None)
dummy_project.class_ = class_
clone_projects = []
for other in sorted(request.user.admin_for):
clone_projects.extend(other.projects)
return {'project': dummy_project, 'clone_projects': clone_projects}
@view_config(route_name='project_edit', renderer='json',
request_method='PUT', permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_requeue(request, project):
count = 0
for count, submission in enumerate(project.recent_submissions(), start=1):
request.queue(submission_id=submission.id, _priority=2)
if count == 0:
return http_ok(request, message='There are no submissions to requeue.')
request.session.flash('Requeued the most recent submissions ({0} items).'
.format(count), 'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='project_scores', request_method='GET',
permission='authenticated')
@validate(project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_scores(request, project):
rows = ['Name, Email, Group ID, Score (On Time), Score']
_, best_ontime, best = project.process_submissions()
for group, (sub, points) in best.items():
on_time = best_ontime[group][1] if group in best_ontime else ''
for user in group.users:
rows.append('{}, {}, {}, {}, {}'
.format(user.name, user.username, group.id,
points, on_time))
disposition = 'attachment; filename="{0}.csv"'.format(project.name)
return Response(body='\n'.join(rows), content_type=str('text/csv'),
content_disposition=disposition)
@view_config(route_name='submission_item_gen', renderer='json',
request_method='PUT', permission='authenticated')
@validate(submission=EditableDBThing('submission_id', Submission,
source=MATCHDICT))
def project_test_case_generate(request, submission):
project = submission.project
if project.status == u'locked':
raise HTTPConflict('The project is already locked.')
# Verify the submission is okay to use
if not submission.verification_results:
raise HTTPConflict('The submission has not been verified.')
if submission.testables_pending():
raise HTTPConflict('The submission has pending test groups.')
# Look for testables with issues
by_testable = {x.testable: x for x in submission.testable_results}
for testable in submission.project.testables:
if TestableStatus(testable, by_testable.get(testable),
submission.verification_results.errors).issue:
raise HTTPConflict('The submission contains failing test groups.')
# Mark the project and its testables as locked
project.status = u'locked'
for testable in project.testables:
testable.is_locked = True
# Saved attributes
submission_id = submission.id
project_id = project.id
try:
transaction.commit() # Need to commit before queuing the job.
except IntegrityError:
transaction.abort()
raise
# Schedule a task to generate the expected outputs
request.queue(submission_id=submission_id, update_project=True,
_priority=0)
request.session.flash('Rebuilding the project\'s expected outputs.',
'successes')
redir_location = request.route_url('project_edit', project_id=project_id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project_item_summary', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=2),
makefile=ViewableDBThing('makefile_id', File, optional=True),
is_ready=TextNumber('is_ready', min_value=0, max_value=1,
optional=True),
deadline=TextDate('deadline', optional=True),
delay_minutes=TextNumber('delay_minutes', min_value=1),
group_max=TextNumber('group_max', min_value=1),
project=EditableDBThing('project_id', Project, source=MATCHDICT))
def project_update(request, name, makefile, is_ready, deadline, delay_minutes,
group_max, project):
# Fix timezone if it doesn't exist
if project.deadline and deadline and not deadline.tzinfo:
deadline = deadline.replace(tzinfo=project.deadline.tzinfo)
if not project.update(name=name, makefile=makefile, deadline=deadline,
delay_minutes=delay_minutes,
group_max=group_max,
status=u'ready' if bool(is_ready) else u'notready'):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That project name already exists for the class')
request.session.flash('Project updated', 'successes')
redir_location = request.route_path('project_edit', project_id=project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='project_item_detailed',
request_method=('GET', 'HEAD'),
renderer='templates/project_view_detailed.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
group=ViewableDBThing('group_id', Group, source=MATCHDICT))
def project_view_detailed(request, project, group):
submissions = Submission.query_by(project=project, group=group)
if not submissions:
raise HTTPNotFound()
project_admin = project.can_view(request.user)
if project_admin:
prev_group, next_group = prev_next_group(project, group)
else:
prev_group = next_group = None
return {'project': project,
'project_admin': project_admin,
'is_member': request.user in group.users,
'users_str': group.users_str,
'can_edit': project_admin,
'prev_group': prev_group,
'next_group': next_group,
'submissions': sorted(submissions,
key=lambda s: s.created_at,
reverse=True)}
@view_config(route_name='project_item_detailed_user',
renderer='templates/project_view_detailed.pt',
request_method=('GET', 'HEAD'),
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT),
user=ViewableDBThing('username', User, fetch_by='username',
validator=String('username'), source=MATCHDICT))
def project_view_detailed_user(request, project, user):
group_assoc = user.fetch_group_assoc(project)
if group_assoc:
url = request.route_path('project_item_detailed',
project_id=project.id,
group_id=group_assoc.group_id)
raise HTTPFound(location=url)
return {'project': project,
'project_admin': False,
'is_member': request.user == user,
'users_str': user.name,
'can_edit': False,
'prev_group': None,
'next_group': None,
'submissions': []}
@view_config(route_name='project_item_summary', request_method=('GET', 'HEAD'),
renderer='templates/project_view_summary.pt',
permission='authenticated')
@validate(project=ViewableDBThing('project_id', Project, source=MATCHDICT))
def project_view_summary(request, project):
# Compute student stats
by_group, best_ontime, best = project.process_submissions()
possible = project.points_possible(include_hidden=True)
if best:
best_scores = numpy.array([x[1] for x in best.values()])
normed = [min(x[1], possible) for x in best.values()]
max_score = max(best_scores)
mean = numpy.mean(best_scores)
median = numpy.median(best_scores)
bins = [x * possible for x in [0, 0, .6, .7, .8, .9, 1, 1]]
bins[1] = min(1, bins[2])
hist, _ = numpy.histogram(normed, range=(0, possible), bins=bins)
else:
hist = max_score = mean = median = None
# Find most recent for each group
submissions = {}
group_truncated = set()
for group in project.groups:
if group in by_group:
newest = by_group[group][:-4:-1]
if group in best:
best[group][0]._is_best = True
if best[group][0] not in newest:
newest.append(best[group][0])
if group in best_ontime:
best_ontime[group][0]._is_best = True
if best_ontime[group][0] not in newest:
newest.append(best_ontime[group][0])
if len(newest) < len(by_group[group]):
group_truncated.add(group)
submissions[group] = newest
else:
submissions[group] = []
# The 16 most recent submissions
recent_submissions = (Submission.query_by(project=project)
.order_by(Submission.created_at.desc())
.limit(16).all())
return {'group_truncated': group_truncated,
'hist': hist,
'max': max_score,
'mean': mean,
'median': median,
'num_groups': len(best),
'project': project,
'recent_submissions': recent_submissions,
'submissions': sorted(submissions.items())}
@view_config(route_name='session', request_method='PUT', renderer='json')
@validate(username=Or('email', EmailAddress(''), String('')),
password=WhiteSpaceString('password', min_length=6),
next_path=String('next', optional=True))
def session_create(request, username, password, next_path):
development_mode = asbool(request.registry.settings.get('development_mode',
False))
user = User.login(username, password, development_mode=development_mode)
if not user:
raise HTTPConflict('Invalid login')
headers = remember(request, user.id)
request.session.flash('Welcome {}!'.format(user.name), 'successes')
url = next_path or request.route_path('user_item', username=user.username)
return http_created(request, headers=headers, redir_location=url)
@view_config(route_name='session', request_method='DELETE', renderer='json',
permission='authenticated')
def session_destroy(request):
headers = forget(request)
return http_gone(request, headers=headers,
redir_location=request.route_path('home'))
@view_config(route_name='session', request_method='GET',
renderer='templates/forms/login.pt')
@validate(username=String('username', optional=True, source=SOURCE_GET),
next_path=String('next', optional=True, source=SOURCE_GET))
def session_edit(request, username, next_path):
next_path = next_path or request.route_url('home')
return {'next': next_path, 'username': username}
@view_config(route_name='submission', renderer='json', request_method='PUT',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project),
file_ids=List('file_ids', TextNumber('', min_value=0),
min_elements=1),
filenames=List('filenames', String('', min_length=1),
min_elements=1))
def submission_create(request, project, file_ids, filenames):
# Additional input verification
filename_set = set(filenames)
if len(filename_set) != len(filenames):
raise HTTPBadRequest('A filename cannot be provided more than once')
elif len(file_ids) != len(filenames):
msg = 'Number of file_ids must match number of filenames'
raise HTTPBadRequest(msg)
# Verify there are no extra files
extra = filename_set - set(x.filename for x in project.file_verifiers)
if extra:
raise HTTPBadRequest('Invalid files: {}'.format(', '.join(extra)))
# Verify user permission on files
msgs = []
user_files = {x.id: x for x in request.user.files}
files = set()
for i, file_id in enumerate(file_ids):
if file_id in user_files:
files.add(user_files[file_id])
else:
msgs.append('Invalid file "{0}"'.format(filenames[i]))
if msgs:
raise HTTPBadRequest(msgs)
submission = request.user.make_submission(project)
# Grant the files' permissions to the other members of the group
for user in submission.group.users:
if user == request.user:
continue
user.files.update(files)
# Associate the files with the submissions by their submission name
assoc = []
for file_id, filename in zip(file_ids, filenames):
assoc.append(SubmissionToFile(file_id=file_id, filename=filename))
submission.files.extend(assoc)
Session.add(submission)
Session.add_all(assoc)
Session.flush()
submission_id = submission.id
# We must commit the transaction before queueing the job.
transaction.commit()
request.queue(submission_id=submission_id)
# Redirect to submission result page
redir_location = request.route_path('submission_item',
submission_id=submission_id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='submission_new', request_method='GET',
renderer='templates/forms/submission_new.pt',
permission='authenticated')
@validate(project=AccessibleDBThing('project_id', Project, source=MATCHDICT))
def submission_new(request, project):
return {'project': project,
'submit_path': request.registry.settings['submit_path']}
@view_config(route_name='submission_item', renderer='json',
request_method='PUT', permission='authenticated')
@validate(submission=EditableDBThing('submission_id', Submission,
source=MATCHDICT))
def submission_requeue(request, submission):
request.queue(submission_id=submission.id, _priority=0)
request.session.flash('Requeued the submission', 'successes')
return http_ok(request, redir_location=request.url)
@view_config(route_name='submission_item', request_method='GET',
renderer='templates/submission_view.pt',
permission='authenticated')
@validate(submission=ViewableDBThing('submission_id', Submission,
source=MATCHDICT),
as_user=TextNumber('as_user', min_value=0, max_value=1,
optional=True, source=SOURCE_GET))
def submission_view(request, submission, as_user):
actual_admin = submission.project.can_edit(request.user)
submission_admin = not bool(as_user) and actual_admin
if not submission_admin: # Only check delay for user view
delay = submission.get_delay(
update=request.user in submission.group.users)
if delay:
request.override_renderer = 'templates/submission_delay.pt'
files = {x.filename: x.file for x in submission.files}
prev_sub, next_sub = prev_next_submission(submission)
return {'delay': '{0:.1f} minutes'.format(delay),
'files': files,
'next_sub': next_sub,
'prev_sub': prev_sub,
'submission': submission,
'submission_admin': actual_admin}
points_possible = submission.project.points_possible(
include_hidden=submission_admin)
if submission_admin:
diff_renderer = HTMLDiff(num_reveal_limit=None,
points_possible=points_possible)
else:
diff_renderer = HTMLDiff(points_possible=points_possible)
for tcr in submission.test_case_results:
if submission_admin or not tcr.test_case.testable.is_hidden:
diff_renderer.add_renderable(prepare_renderable(request, tcr,
submission_admin))
if submission.verification_results:
mapping = submission.file_mapping()
extra_files = {x: mapping[x] for x in
submission.verification_results.extra_filenames}
files = {x.filename: x.file for x in submission.files
if x.filename not in extra_files}
warnings = submission.verification_results.warnings
pending = submission.testables_pending(prune=not submission_admin)
# Build all testables' statuses
# Testables that failed verification do not have a TestableResult
by_testable = {x.testable: x for x in submission.testable_results}
testable_issues = []
# Add testables which have issues (verification or build)
for testable in (set(submission.project.testables) - pending):
if submission_admin or not testable.is_hidden:
ts = TestableStatus(testable, by_testable.get(testable),
submission.verification_results.errors)
if ts.issue:
testable_issues.append(ts)
else:
extra_files = files = pending = warnings = None
testable_issues = []
if submission.testables_succeeded():
# Decode utf-8 and ignore errors until the data is diffed in unicode.
diff_table = diff_renderer.make_whole_file().decode('utf-8', 'ignore')
else:
diff_table = None
# Do this after we've potentially updated the session
prev_sub, next_sub = prev_next_submission(submission)
if submission_admin:
prev_group, next_group = prev_next_group(submission.project,
submission.group)
else:
prev_group = next_group = None
return {'diff_table': diff_table,
'extra_files': extra_files,
'files': files,
'next_sub': next_sub,
'next_group': next_group,
'pending': pending,
'prev_sub': prev_sub,
'prev_group': prev_group,
'submission': submission,
'submission_admin': submission_admin,
'testable_issues': testable_issues,
'warnings': warnings}
@view_config(route_name='test_case', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
args=String('args', min_length=1),
expected=ViewableDBThing('expected_id', File, optional=True),
hide_expected=TextNumber('hide_expected', min_value=0, max_value=1,
optional=True),
output_filename=String('output_filename', min_length=1,
optional=True),
output_source=OUTPUT_SOURCE, output_type=OUTPUT_TYPE,
points=TextNumber('points'),
stdin=ViewableDBThing('stdin_id', File, optional=True),
testable=EditableDBThing('testable_id', Testable))
@test_case_verification
def test_case_create(request, name, args, expected, hide_expected,
output_filename, output_source, output_type, points,
stdin, testable):
test_case = TestCase(name=name, args=args, expected=expected,
hide_expected=bool(hide_expected),
output_filename=output_filename,
output_type=output_type, points=points,
source=output_source, stdin=stdin, testable=testable)
Session.add(test_case)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the testable')
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
return http_created(request, redir_location=redir_location)
@view_config(route_name='test_case_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(test_case=EditableDBThing('test_case_id', TestCase,
source=MATCHDICT))
def test_case_delete(request, test_case):
redir_location = request.route_path(
'project_edit', project_id=test_case.testable.project.id)
request.session.flash('Deleted TestCase {0}.'.format(test_case.name),
'successes')
testable = test_case.testable
Session.delete(test_case)
# Update the testable point score
testable.update_points()
return http_ok(request, redir_location=redir_location)
@view_config(route_name='test_case_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
args=String('args', min_length=1),
expected=ViewableDBThing('expected_id', File, optional=True),
hide_expected=TextNumber('hide_expected', min_value=0, max_value=1,
optional=True),
output_filename=String('output_filename', min_length=1,
optional=True),
output_source=OUTPUT_SOURCE, output_type=OUTPUT_TYPE,
points=TextNumber('points'),
stdin=ViewableDBThing('stdin_id', File, optional=True),
test_case=EditableDBThing('test_case_id', TestCase,
source=MATCHDICT))
@test_case_verification
def test_case_update(request, name, args, expected, hide_expected,
output_filename, output_source, output_type, points,
stdin, test_case):
if not test_case.update(name=name, args=args, expected=expected,
hide_expected=bool(hide_expected),
output_filename=output_filename,
output_type=output_type, points=points,
source=output_source, stdin=stdin):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the testable')
# Update the testable point score
test_case.testable.update_points()
request.session.flash('Updated TestCase {0}.'.format(test_case.name),
'successes')
redir_location = request.route_path(
'project_edit', project_id=test_case.testable.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='testable', request_method='PUT',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
is_hidden=TextNumber('is_hidden', min_value=0, max_value=1,
optional=True),
make_target=String('make_target', min_length=1, optional=True),
executable=String('executable', min_length=1),
build_file_ids=List('build_file_ids', TextNumber('', min_value=0),
optional=True),
execution_file_ids=List('execution_file_ids',
TextNumber('', min_value=0), optional=True),
file_verifier_ids=List('file_verifier_ids',
TextNumber('', min_value=0), optional=True),
project=EditableDBThing('project_id', Project))
def testable_create(request, name, is_hidden, make_target, executable,
build_file_ids, execution_file_ids, file_verifier_ids,
project):
if make_target and not project.makefile:
msg = 'make_target cannot be specified without a make file'
raise HTTPBadRequest(msg)
try:
# Verify the ids actually exist and are associated with the project
build_files = fetch_request_ids(build_file_ids, BuildFile,
'build_file_id',
project.build_files)
execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
'execution_file_id')
file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
'file_verifier_id',
project.file_verifiers)
except InvalidId as exc:
raise HTTPBadRequest('Invalid {0}'.format(exc.message))
testable = Testable(name=name, is_hidden=bool(is_hidden),
make_target=make_target,
executable=executable, project=project,
build_files=build_files,
execution_files=execution_files,
file_verifiers=file_verifiers)
redir_location = request.route_path('project_edit', project_id=project.id)
Session.add(testable)
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the project')
return http_created(request, redir_location=redir_location,
testable_id=testable.id)
@view_config(route_name='testable_item', request_method='POST',
permission='authenticated', renderer='json')
@validate(name=String('name', min_length=1),
is_hidden=TextNumber('is_hidden', min_value=0, max_value=1,
optional=True),
make_target=String('make_target', min_length=1, optional=True),
executable=String('executable', min_length=1),
build_file_ids=List('build_file_ids', TextNumber('', min_value=0),
optional=True),
execution_file_ids=List('execution_file_ids',
TextNumber('', min_value=0), optional=True),
file_verifier_ids=List('file_verifier_ids',
TextNumber('', min_value=0), optional=True),
testable=EditableDBThing('testable_id', Testable, source=MATCHDICT))
def testable_edit(request, name, is_hidden, make_target, executable,
build_file_ids, execution_file_ids, file_verifier_ids,
testable):
if make_target and not testable.project.makefile:
msg = 'make_target cannot be specified without a make file'
raise HTTPBadRequest(msg)
try:
# Verify the ids actually exist and are associated with the project
build_files = fetch_request_ids(build_file_ids, BuildFile,
'build_file_id',
testable.project.build_files)
execution_files = fetch_request_ids(execution_file_ids, ExecutionFile,
'execution_file_id')
file_verifiers = fetch_request_ids(file_verifier_ids, FileVerifier,
'file_verifier_id',
testable.project.file_verifiers)
except InvalidId as exc:
raise HTTPBadRequest('Invalid {0}'.format(exc.message))
Session.autoflush = False # Don't flush while testing for changes
if not testable.update(_ignore_order=True, is_hidden=bool(is_hidden),
name=name, make_target=make_target,
executable=executable,
build_files=build_files,
execution_files=execution_files,
file_verifiers=file_verifiers):
return http_ok(request, message='Nothing to change')
try:
Session.flush()
except IntegrityError:
raise HTTPConflict('That name already exists for the project')
request.session.flash('Updated Testable {0}.'.format(testable.name),
'successes')
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='testable_item', request_method='DELETE',
permission='authenticated', renderer='json')
@validate(testable=EditableDBThing('testable_id', Testable, source=MATCHDICT))
def testable_delete(request, testable):
redir_location = request.route_path('project_edit',
project_id=testable.project.id)
request.session.flash('Deleted Testable {0}.'.format(testable.name),
'successes')
Session.delete(testable)
return http_ok(request, redir_location=redir_location)
@view_config(route_name='user', request_method='PUT', renderer='json')
@validate(identity=UmailAddress('email', min_length=16, max_length=64),
verification=String('verification'))
def user_create(request, identity, verification):
username, name = identity
return add_user(request, name, username, verification)
@view_config(route_name='user', request_method='ADMINPUT', renderer='json',
permission='admin')
@validate(name=String('name', min_length=5),
username=String('email', min_length=6, max_length=64),
verification=String('verification'))
def user_create_special(request, name, username, verification):
return add_user(request, name, username, verification,
request.route_path('user_new_special'))
@view_config(route_name='user_join', request_method='GET',
permission='authenticated',
renderer='templates/forms/class_join_list.pt')
def user_join(request):
# get all the classes that the given user is not in, and let the
# user optionally join them
all_classes = frozenset(Class.query_by(is_locked=False).all())
user_classes = frozenset(request.user.classes)
return {'classes': sorted(all_classes - user_classes)}
@view_config(route_name='user_new', request_method='GET',
renderer='templates/forms/user_create.pt')
def user_edit(request):
return {}
@view_config(route_name='user_new_special', request_method='GET',
renderer='templates/forms/user_create_special.pt',
permission='admin')
def user_edit_special(request):
return {}
@view_config(route_name='user_item', request_method='GET',
renderer='templates/user_view.pt', permission='authenticated')
@validate(user=ViewableDBThing('username', User, fetch_by='username',
validator=String('username'), source=MATCHDICT))
def user_view(request, user):
user_groups = [x.group_id for x in Session.query(UserToGroup)
.filter(UserToGroup.user == user).all()]
admin_subs = user_subs = None
if user_groups:
user_subs = (Submission.query_by()
.filter(Submission.group_id.in_(user_groups))
.order_by(Submission.created_at.desc()).limit(10).all())
admin_classes = user.classes_can_admin()
if admin_classes:
class_ids = [x.id for x in admin_classes]
class_projs = [x.id for x in Project.query_by()
.filter(Project.class_id.in_(class_ids))
.all()]
if class_projs:
admin_subs = (Submission.query_by()
.filter(Submission.project_id.in_(class_projs))
.order_by(Submission.created_at.desc()).limit(10)
.all())
return {'name': user.name,
'user_subs': user_subs,
'classes_taking': sorted(user.classes),
'admin_subs': admin_subs,
'admin_classes': admin_classes}
@view_config(route_name='zipfile_download', request_method='GET',
permission='authenticated')
@validate(submission=ViewableDBThing('submission_id', Submission,
source=MATCHDICT))
def zipfile_download(request, submission):
def file_path(file_):
return File.file_path(request.registry.settings['file_directory'],
file_.sha1)
users = submission.group.users_str.replace(' ', '_').replace(',', '-')
base_path = '{0}_{1}'.format(users, submission.id)
# include makefile and student submitted files
files = [(os.path.join(base_path, 'Makefile'),
file_path(submission.project.makefile))]
for filename, file_ in submission.file_mapping().items():
files.append((os.path.join(base_path, filename), file_path(file_)))
return zip_response(request, base_path + '.zip', files)
| [
"pyramid.view.forbidden_view_config",
"pyramid.httpexceptions.HTTPBadRequest",
"pyramid.httpexceptions.HTTPNotFound",
"zipfile.ZipFile",
"pyramid.view.notfound_view_config",
"pyramid_addons.validation.TextNumber",
"pyramid.httpexceptions.HTTPConflict",
"hashlib.sha1",
"numpy.mean",
"numpy.histogra... | [((2140, 2189), 'pyramid_addons.validation.Enum', 'Enum', (['"""output_source"""', '"""stdout"""', '"""stderr"""', '"""file"""'], {}), "('output_source', 'stdout', 'stderr', 'file')\n", (2144, 2189), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((2204, 2248), 'pyramid_addons.validation.Enum', 'Enum', (['"""output_type"""', '"""diff"""', '"""image"""', '"""text"""'], {}), "('output_type', 'diff', 'image', 'text')\n", (2208, 2248), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((2266, 2331), 'pyramid_addons.validation.String', 'String', (['"""sha1sum"""'], {'min_length': '(40)', 'max_length': '(40)', 'source': 'MATCHDICT'}), "('sha1sum', min_length=40, max_length=40, source=MATCHDICT)\n", (2272, 2331), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((2373, 2436), 'pyramid_addons.validation.String', 'String', (['"""token"""'], {'min_length': '(36)', 'max_length': '(36)', 'source': 'MATCHDICT'}), "('token', min_length=36, max_length=36, source=MATCHDICT)\n", (2379, 2436), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((2723, 2771), 'pyramid.view.forbidden_view_config', 'forbidden_view_config', ([], {'xhr': '(True)', 'renderer': '"""json"""'}), "(xhr=True, renderer='json')\n", (2744, 2771), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((2773, 2820), 'pyramid.view.notfound_view_config', 'notfound_view_config', ([], {'xhr': '(True)', 'renderer': '"""json"""'}), "(xhr=True, renderer='json')\n", (2793, 2820), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((2822, 2879), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPError', 'xhr': '(True)', 'renderer': '"""json"""'}), "(context=HTTPError, xhr=True, renderer='json')\n", (2833, 2879), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((2881, 2935), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPOk', 'xhr': '(True)', 'renderer': '"""json"""'}), "(context=HTTPOk, xhr=True, renderer='json')\n", (2892, 2935), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((2937, 3000), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPRedirection', 'xhr': '(True)', 'renderer': '"""json"""'}), "(context=HTTPRedirection, xhr=True, renderer='json')\n", (2948, 3000), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3270, 3300), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPError'}), '(context=HTTPError)\n', (3281, 3300), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3302, 3329), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPOk'}), '(context=HTTPOk)\n', (3313, 3329), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3331, 3367), 'pyramid.view.view_config', 'view_config', ([], {'context': 'HTTPRedirection'}), '(context=HTTPRedirection)\n', (3342, 3367), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3471, 3494), 'pyramid.view.forbidden_view_config', 'forbidden_view_config', ([], {}), '()\n', (3492, 3494), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3784, 3806), 'pyramid.view.notfound_view_config', 'notfound_view_config', ([], {}), '()\n', (3804, 3806), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3891, 3963), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""robots"""', 'request_method': '"""GET"""', 'http_cache': '(86400)'}), "(route_name='robots', request_method='GET', http_cache=86400)\n", (3902, 3963), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((4097, 4205), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""build_file"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='build_file', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (4108, 4205), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((4513, 4628), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""build_file_item"""', 'request_method': '"""DELETE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='build_file_item', request_method='DELETE',\n permission='authenticated', renderer='json')\n", (4524, 4628), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((4858, 4935), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class.admins"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""'}), "(route_name='class.admins', renderer='json', request_method='PUT')\n", (4869, 4935), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((5607, 5744), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class.admins"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""', 'renderer': '"""templates/forms/class_admins.pt"""'}), "(route_name='class.admins', request_method='GET', permission=\n 'authenticated', renderer='templates/forms/class_admins.pt')\n", (5618, 5744), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((5910, 6004), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class"""', 'request_method': '"""PUT"""', 'permission': '"""admin"""', 'renderer': '"""json"""'}), "(route_name='class', request_method='PUT', permission='admin',\n renderer='json')\n", (5921, 6004), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((6458, 6584), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class_new"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/class_create.pt"""', 'permission': '"""admin"""'}), "(route_name='class_new', request_method='GET', renderer=\n 'templates/forms/class_create.pt', permission='admin')\n", (6469, 6584), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((6676, 6785), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class_item"""', 'request_method': '"""JOIN"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='class_item', request_method='JOIN', permission=\n 'authenticated', renderer='json')\n", (6687, 6785), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((7241, 7368), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""class_item"""', 'request_method': '"""GET"""', 'renderer': '"""templates/class_view.pt"""', 'permission': '"""authenticated"""'}), "(route_name='class_item', request_method='GET', renderer=\n 'templates/class_view.pt', permission='authenticated')\n", (7252, 7368), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((7982, 8094), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""execution_file"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='execution_file', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (7993, 8094), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((8441, 8560), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""execution_file_item"""', 'request_method': '"""DELETE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='execution_file_item', request_method='DELETE',\n permission='authenticated', renderer='json')\n", (8452, 8560), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((8818, 8924), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_item"""', 'request_method': '"""PUT"""', 'renderer': '"""json"""', 'permission': '"""authenticated"""'}), "(route_name='file_item', request_method='PUT', renderer='json',\n permission='authenticated')\n", (8829, 8924), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((9593, 9701), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_item"""', 'request_method': '"""INFO"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='file_item', request_method='INFO', permission=\n 'authenticated', renderer='json')\n", (9604, 9701), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((9967, 10092), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_item"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""', 'renderer': '"""templates/file_view.pt"""'}), "(route_name='file_item', request_method='GET', permission=\n 'authenticated', renderer='templates/file_view.pt')\n", (9978, 10092), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((11060, 11171), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_verifier"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='file_verifier', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (11071, 11171), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((13119, 13237), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_verifier_item"""', 'request_method': '"""DELETE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='file_verifier_item', request_method='DELETE',\n permission='authenticated', renderer='json')\n", (13130, 13237), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((13488, 13604), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""file_verifier_item"""', 'request_method': '"""POST"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='file_verifier_item', request_method='POST',\n permission='authenticated', renderer='json')\n", (13499, 13604), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((15800, 15852), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""home"""', 'request_method': '"""GET"""'}), "(route_name='home', request_method='GET')\n", (15811, 15852), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((16062, 16141), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""password_reset"""', 'request_method': '"""PUT"""', 'renderer': '"""json"""'}), "(route_name='password_reset', request_method='PUT', renderer='json')\n", (16073, 16141), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((17332, 17445), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""password_reset"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/password_reset.pt"""'}), "(route_name='password_reset', request_method='GET', renderer=\n 'templates/forms/password_reset.pt')\n", (17343, 17445), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((17505, 17627), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""password_reset_item"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/password_reset_item.pt"""'}), "(route_name='password_reset_item', request_method='GET',\n renderer='templates/forms/password_reset_item.pt')\n", (17516, 17627), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((17873, 17961), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""password_reset_item"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""'}), "(route_name='password_reset_item', renderer='json',\n request_method='PUT')\n", (17884, 17961), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((18778, 18885), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project"""', 'request_method': '"""CLONE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project', request_method='CLONE', permission=\n 'authenticated', renderer='json')\n", (18789, 18885), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((21190, 21295), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (21201, 21295), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((21959, 22060), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_item_download"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""'}), "(route_name='project_item_download', request_method='GET',\n permission='authenticated')\n", (21970, 22060), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((22759, 22901), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_edit"""', 'renderer': '"""templates/forms/project_edit.pt"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""'}), "(route_name='project_edit', renderer=\n 'templates/forms/project_edit.pt', request_method='GET', permission=\n 'authenticated')\n", (22770, 22901), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((23324, 23436), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_export"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project_export', request_method='GET', permission=\n 'authenticated', renderer='json')\n", (23335, 23436), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((27422, 27535), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_import"""', 'request_method': '"""POST"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project_import', request_method='POST', permission=\n 'authenticated', renderer='json')\n", (27433, 27535), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((43064, 43176), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_group"""', 'request_method': '"""JOIN"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project_group', request_method='JOIN', permission=\n 'authenticated', renderer='json')\n", (43075, 43176), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((44162, 44297), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""group_admin"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/group_admin.pt"""', 'permission': '"""authenticated"""'}), "(route_name='group_admin', request_method='GET', renderer=\n 'templates/forms/group_admin.pt', permission='authenticated')\n", (44173, 44297), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((44777, 44864), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_group_item"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""'}), "(route_name='project_group_item', renderer='json',\n request_method='PUT')\n", (44788, 44864), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((45837, 45915), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_group"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""'}), "(route_name='project_group', renderer='json', request_method='PUT')\n", (45848, 45915), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((47733, 47823), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_group_item"""', 'renderer': '"""json"""', 'request_method': '"""DELETE"""'}), "(route_name='project_group_item', renderer='json',\n request_method='DELETE')\n", (47744, 47823), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((48511, 48650), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_group"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/project_group.pt"""', 'permission': '"""authenticated"""'}), "(route_name='project_group', request_method='GET', renderer=\n 'templates/forms/project_group.pt', permission='authenticated')\n", (48522, 48650), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((49303, 49413), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_info"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project_info', request_method='GET', permission=\n 'authenticated', renderer='json')\n", (49314, 49413), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((50338, 50473), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_new"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/project_new.pt"""', 'permission': '"""authenticated"""'}), "(route_name='project_new', request_method='GET', renderer=\n 'templates/forms/project_new.pt', permission='authenticated')\n", (50349, 50473), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((50872, 50982), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_edit"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""'}), "(route_name='project_edit', renderer='json', request_method=\n 'PUT', permission='authenticated')\n", (50883, 50982), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((51554, 51649), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_scores"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""'}), "(route_name='project_scores', request_method='GET', permission=\n 'authenticated')\n", (51565, 51649), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((52404, 52520), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""submission_item_gen"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""'}), "(route_name='submission_item_gen', renderer='json',\n request_method='PUT', permission='authenticated')\n", (52415, 52520), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((54270, 54388), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_item_summary"""', 'request_method': '"""POST"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='project_item_summary', request_method='POST',\n permission='authenticated', renderer='json')\n", (54281, 54388), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((55826, 55989), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_item_detailed"""', 'request_method': "('GET', 'HEAD')", 'renderer': '"""templates/project_view_detailed.pt"""', 'permission': '"""authenticated"""'}), "(route_name='project_item_detailed', request_method=('GET',\n 'HEAD'), renderer='templates/project_view_detailed.pt', permission=\n 'authenticated')\n", (55837, 55989), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((56978, 57146), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_item_detailed_user"""', 'renderer': '"""templates/project_view_detailed.pt"""', 'request_method': "('GET', 'HEAD')", 'permission': '"""authenticated"""'}), "(route_name='project_item_detailed_user', renderer=\n 'templates/project_view_detailed.pt', request_method=('GET', 'HEAD'),\n permission='authenticated')\n", (56989, 57146), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((58027, 58188), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""project_item_summary"""', 'request_method': "('GET', 'HEAD')", 'renderer': '"""templates/project_view_summary.pt"""', 'permission': '"""authenticated"""'}), "(route_name='project_item_summary', request_method=('GET',\n 'HEAD'), renderer='templates/project_view_summary.pt', permission=\n 'authenticated')\n", (58038, 58188), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((60284, 60356), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""session"""', 'request_method': '"""PUT"""', 'renderer': '"""json"""'}), "(route_name='session', request_method='PUT', renderer='json')\n", (60295, 60356), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((61144, 61251), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""session"""', 'request_method': '"""DELETE"""', 'renderer': '"""json"""', 'permission': '"""authenticated"""'}), "(route_name='session', request_method='DELETE', renderer='json',\n permission='authenticated')\n", (61155, 61251), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((61435, 61532), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""session"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/login.pt"""'}), "(route_name='session', request_method='GET', renderer=\n 'templates/forms/login.pt')\n", (61446, 61532), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((61843, 61950), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""submission"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""'}), "(route_name='submission', renderer='json', request_method='PUT',\n permission='authenticated')\n", (61854, 61950), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((64189, 64330), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""submission_new"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/submission_new.pt"""', 'permission': '"""authenticated"""'}), "(route_name='submission_new', request_method='GET', renderer=\n 'templates/forms/submission_new.pt', permission='authenticated')\n", (64200, 64330), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((64572, 64685), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""submission_item"""', 'renderer': '"""json"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""'}), "(route_name='submission_item', renderer='json', request_method=\n 'PUT', permission='authenticated')\n", (64583, 64685), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((65046, 65183), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""submission_item"""', 'request_method': '"""GET"""', 'renderer': '"""templates/submission_view.pt"""', 'permission': '"""authenticated"""'}), "(route_name='submission_item', request_method='GET', renderer=\n 'templates/submission_view.pt', permission='authenticated')\n", (65057, 65183), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((69086, 69193), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""test_case"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='test_case', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (69097, 69193), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((70724, 70838), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""test_case_item"""', 'request_method': '"""DELETE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='test_case_item', request_method='DELETE',\n permission='authenticated', renderer='json')\n", (70735, 70838), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((71420, 71533), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""test_case_item"""', 'request_method': '"""POST"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='test_case_item', request_method='POST', permission=\n 'authenticated', renderer='json')\n", (71431, 71533), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((73316, 73422), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""testable"""', 'request_method': '"""PUT"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='testable', request_method='PUT', permission=\n 'authenticated', renderer='json')\n", (73327, 73422), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((75899, 76011), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""testable_item"""', 'request_method': '"""POST"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='testable_item', request_method='POST', permission=\n 'authenticated', renderer='json')\n", (75910, 76011), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((78765, 78879), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""testable_item"""', 'request_method': '"""DELETE"""', 'permission': '"""authenticated"""', 'renderer': '"""json"""'}), "(route_name='testable_item', request_method='DELETE', permission\n ='authenticated', renderer='json')\n", (78776, 78879), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((79338, 79407), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user"""', 'request_method': '"""PUT"""', 'renderer': '"""json"""'}), "(route_name='user', request_method='PUT', renderer='json')\n", (79349, 79407), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((79669, 79767), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user"""', 'request_method': '"""ADMINPUT"""', 'renderer': '"""json"""', 'permission': '"""admin"""'}), "(route_name='user', request_method='ADMINPUT', renderer='json',\n permission='admin')\n", (79680, 79767), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((80120, 80257), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user_join"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""', 'renderer': '"""templates/forms/class_join_list.pt"""'}), "(route_name='user_join', request_method='GET', permission=\n 'authenticated', renderer='templates/forms/class_join_list.pt')\n", (80131, 80257), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((80584, 80688), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user_new"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/user_create.pt"""'}), "(route_name='user_new', request_method='GET', renderer=\n 'templates/forms/user_create.pt')\n", (80595, 80688), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((80738, 80878), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user_new_special"""', 'request_method': '"""GET"""', 'renderer': '"""templates/forms/user_create_special.pt"""', 'permission': '"""admin"""'}), "(route_name='user_new_special', request_method='GET', renderer=\n 'templates/forms/user_create_special.pt', permission='admin')\n", (80749, 80878), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((80949, 81074), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""user_item"""', 'request_method': '"""GET"""', 'renderer': '"""templates/user_view.pt"""', 'permission': '"""authenticated"""'}), "(route_name='user_item', request_method='GET', renderer=\n 'templates/user_view.pt', permission='authenticated')\n", (80960, 81074), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((82359, 82456), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""zipfile_download"""', 'request_method': '"""GET"""', 'permission': '"""authenticated"""'}), "(route_name='zipfile_download', request_method='GET', permission\n ='authenticated')\n", (82370, 82456), False, 'from pyramid.view import forbidden_view_config, notfound_view_config, view_config\n'), ((3842, 3887), 'pyramid.response.Response', 'Response', (['"""Not Found"""'], {'status': '"""404 Not Found"""'}), "('Not Found', status='404 Not Found')\n", (3850, 3887), False, 'from pyramid.response import FileResponse, Response\n'), ((5559, 5603), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'request.url'}), '(request, redir_location=request.url)\n', (5566, 5603), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((7196, 7237), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'url'}), '(request, redir_location=url)\n', (7208, 7237), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((13063, 13115), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (13075, 13115), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((15749, 15796), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (15756, 15796), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((16035, 16058), 'pyramid.httpexceptions.HTTPFound', 'HTTPFound', ([], {'location': 'url'}), '(location=url)\n', (16044, 16058), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((18727, 18774), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (18734, 18774), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((21134, 21186), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (21146, 21186), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((21903, 21955), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (21915, 21955), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((44111, 44158), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (44118, 44158), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((45797, 45833), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'url'}), '(request, redir_location=url)\n', (45804, 45833), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((47685, 47729), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'request.url'}), '(request, redir_location=request.url)\n', (47692, 47729), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((48471, 48507), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'url'}), '(request, redir_location=url)\n', (48478, 48507), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((51506, 51550), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'request.url'}), '(request, redir_location=request.url)\n', (51513, 51550), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((54219, 54266), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (54226, 54266), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((55775, 55822), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (55782, 55822), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((60893, 60919), 'pyramid.security.remember', 'remember', (['request', 'user.id'], {}), '(request, user.id)\n', (60901, 60919), False, 'from pyramid.security import forget, remember\n'), ((61082, 61140), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'headers': 'headers', 'redir_location': 'url'}), '(request, headers=headers, redir_location=url)\n', (61094, 61140), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((61305, 61320), 'pyramid.security.forget', 'forget', (['request'], {}), '(request)\n', (61311, 61320), False, 'from pyramid.security import forget, remember\n'), ((63885, 63905), 'transaction.commit', 'transaction.commit', ([], {}), '()\n', (63903, 63905), False, 'import transaction\n'), ((64133, 64185), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (64145, 64185), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((64998, 65042), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'request.url'}), '(request, redir_location=request.url)\n', (65005, 65042), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((70668, 70720), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (70680, 70720), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((71369, 71416), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (71376, 71416), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((73265, 73312), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (73272, 73312), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((75794, 75871), 'pyramid_addons.helpers.http_created', 'http_created', (['request'], {'redir_location': 'redir_location', 'testable_id': 'testable.id'}), '(request, redir_location=redir_location, testable_id=testable.id)\n', (75806, 75871), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((78714, 78761), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (78721, 78761), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((79287, 79334), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (79294, 79334), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((4283, 4315), 'pyramid_addons.validation.String', 'String', (['"""filename"""'], {'min_length': '(1)'}), "('filename', min_length=1)\n", (4289, 4315), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((5218, 5278), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That user is already an admin for the class."""'], {}), "('That user is already an admin for the class.')\n", (5230, 5278), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((6029, 6057), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(3)'}), "('name', min_length=3)\n", (6035, 6057), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((6932, 6963), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['"""Invalid class"""'], {}), "('Invalid class')\n", (6946, 6963), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((8172, 8204), 'pyramid_addons.validation.String', 'String', (['"""filename"""'], {'min_length': '(1)'}), "('filename', min_length=1)\n", (8178, 8204), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((9290, 9309), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (9304, 9309), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((8952, 8979), 'pyramid_addons.validation.WhiteSpaceString', 'WhiteSpaceString', (['"""b64data"""'], {}), "('b64data')\n", (8968, 8979), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((10624, 10653), 'pyramid.response.FileResponse', 'FileResponse', (['source', 'request'], {}), '(source, request)\n', (10636, 10653), False, 'from pyramid.response import FileResponse, Response\n'), ((10263, 10313), 'pyramid_addons.validation.String', 'String', (['"""filename"""'], {'min_length': '(1)', 'source': 'MATCHDICT'}), "('filename', min_length=1, source=MATCHDICT)\n", (10269, 10313), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((10329, 10406), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""raw"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)', 'source': 'SOURCE_GET'}), "('raw', min_value=0, max_value=1, optional=True, source=SOURCE_GET)\n", (10339, 10406), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((12348, 12367), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (12362, 12367), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((11208, 11280), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""copy_to_execution"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('copy_to_execution', min_value=0, max_value=1, optional=True)\n", (11218, 11280), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11340, 11372), 'pyramid_addons.validation.String', 'String', (['"""filename"""'], {'min_length': '(1)'}), "('filename', min_length=1)\n", (11346, 11372), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11393, 11428), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""min_size"""'], {'min_value': '(0)'}), "('min_size', min_value=0)\n", (11403, 11428), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11449, 11499), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""max_size"""'], {'min_value': '(0)', 'optional': '(True)'}), "('max_size', min_value=0, optional=True)\n", (11459, 11499), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11521, 11557), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""min_lines"""'], {'min_value': '(0)'}), "('min_lines', min_value=0)\n", (11531, 11557), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11579, 11630), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""max_lines"""'], {'min_value': '(0)', 'optional': '(True)'}), "('max_lines', min_value=0, optional=True)\n", (11589, 11630), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11651, 11714), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""optional"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('optional', min_value=0, max_value=1, optional=True)\n", (11661, 11714), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((11828, 11871), 'pyramid_addons.validation.RegexString', 'RegexString', (['"""warning_regex"""'], {'optional': '(True)'}), "('warning_regex', optional=True)\n", (11839, 11871), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14920, 14939), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (14934, 14939), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((15312, 15357), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""Nothing to change"""'}), "(request, message='Nothing to change')\n", (15319, 15357), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((13642, 13714), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""copy_to_execution"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('copy_to_execution', min_value=0, max_value=1, optional=True)\n", (13652, 13714), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((13907, 13939), 'pyramid_addons.validation.String', 'String', (['"""filename"""'], {'min_length': '(1)'}), "('filename', min_length=1)\n", (13913, 13939), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((13960, 13995), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""min_size"""'], {'min_value': '(0)'}), "('min_size', min_value=0)\n", (13970, 13995), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14016, 14066), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""max_size"""'], {'min_value': '(0)', 'optional': '(True)'}), "('max_size', min_value=0, optional=True)\n", (14026, 14066), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14088, 14124), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""min_lines"""'], {'min_value': '(0)'}), "('min_lines', min_value=0)\n", (14098, 14124), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14146, 14197), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""max_lines"""'], {'min_value': '(0)', 'optional': '(True)'}), "('max_lines', min_value=0, optional=True)\n", (14156, 14197), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14218, 14281), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""optional"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('optional', min_value=0, max_value=1, optional=True)\n", (14228, 14281), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((14337, 14380), 'pyramid_addons.validation.RegexString', 'RegexString', (['"""warning_regex"""'], {'optional': '(True)'}), "('warning_regex', optional=True)\n", (14348, 14380), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((16285, 16318), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Hahaha, nice try!"""'], {}), "('Hahaha, nice try!')\n", (16297, 16318), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((16394, 16423), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Invalid email"""'], {}), "('Invalid email')\n", (16406, 16423), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((17178, 17251), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""A password reset link will be emailed to you."""'}), "(request, message='A password reset link will be emailed to you.')\n", (17185, 17251), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((17299, 17328), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['failure_message'], {}), '(failure_message)\n', (17311, 17328), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((16174, 16195), 'pyramid_addons.validation.EmailAddress', 'EmailAddress', (['"""email"""'], {}), "('email')\n", (16186, 16195), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((18338, 18408), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The reset token and username combination is not valid."""'], {}), "('The reset token and username combination is not valid.')\n", (18350, 18408), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((17990, 18011), 'pyramid_addons.validation.EmailAddress', 'EmailAddress', (['"""email"""'], {}), "('email')\n", (18002, 18011), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((18032, 18074), 'pyramid_addons.validation.WhiteSpaceString', 'WhiteSpaceString', (['"""password"""'], {'min_length': '(6)'}), "('password', min_length=6)\n", (18048, 18074), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((18962, 18990), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(2)'}), "('name', min_length=2)\n", (18968, 18990), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((21319, 21347), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(2)'}), "('name', min_length=2)\n", (21325, 21347), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((28055, 28080), 'zipfile.ZipFile', 'ZipFile', (['import_file', '"""r"""'], {}), "(import_file, 'r')\n", (28062, 28080), False, 'from zipfile import ZipFile\n'), ((39614, 39661), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (39621, 39661), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((43865, 43914), 'pyramid_addons.helpers.http_gone', 'http_gone', (['request'], {'redir_location': 'redir_location'}), '(request, redir_location=redir_location)\n', (43874, 43914), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((45625, 45663), 'pyramid_addons.helpers.http_gone', 'http_gone', (['request'], {'redir_location': 'url'}), '(request, redir_location=url)\n', (45634, 45663), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((46161, 46223), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""You cannot expand your group for this project."""'], {}), "('You cannot expand your group for this project.')\n", (46173, 46223), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((46337, 46367), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Invalid email."""'], {}), "('Invalid email.')\n", (46349, 46367), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((46423, 46472), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That user cannot join your group."""'], {}), "('That user cannot join your group.')\n", (46435, 46472), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((46691, 46752), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""You are already in a group with that student."""'], {}), "('You are already in a group with that student.')\n", (46703, 46752), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((46013, 46034), 'pyramid_addons.validation.EmailAddress', 'EmailAddress', (['"""email"""'], {}), "('email')\n", (46025, 46034), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((51297, 51361), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""There are no submissions to requeue."""'}), "(request, message='There are no submissions to requeue.')\n", (51304, 51361), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((52788, 52834), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The project is already locked."""'], {}), "('The project is already locked.')\n", (52800, 52834), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((52936, 52989), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The submission has not been verified."""'], {}), "('The submission has not been verified.')\n", (52948, 52989), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((53043, 53098), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The submission has pending test groups."""'], {}), "('The submission has pending test groups.')\n", (53055, 53098), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((53732, 53752), 'transaction.commit', 'transaction.commit', ([], {}), '()\n', (53750, 53752), False, 'import transaction\n'), ((55444, 55489), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""Nothing to change"""'}), "(request, message='Nothing to change')\n", (55451, 55489), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((54413, 54441), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(2)'}), "('name', min_length=2)\n", (54419, 54441), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((54534, 54597), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""is_ready"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('is_ready', min_value=0, max_value=1, optional=True)\n", (54544, 54597), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((54709, 54749), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""delay_minutes"""'], {'min_value': '(1)'}), "('delay_minutes', min_value=1)\n", (54719, 54749), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((54771, 54807), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""group_max"""'], {'min_value': '(1)'}), "('group_max', min_value=1)\n", (54781, 54807), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((56326, 56340), 'pyramid.httpexceptions.HTTPNotFound', 'HTTPNotFound', ([], {}), '()\n', (56338, 56340), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((57723, 57746), 'pyramid.httpexceptions.HTTPFound', 'HTTPFound', ([], {'location': 'url'}), '(location=url)\n', (57732, 57746), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((58670, 58693), 'numpy.mean', 'numpy.mean', (['best_scores'], {}), '(best_scores)\n', (58680, 58693), False, 'import numpy\n'), ((58711, 58736), 'numpy.median', 'numpy.median', (['best_scores'], {}), '(best_scores)\n', (58723, 58736), False, 'import numpy\n'), ((58857, 58912), 'numpy.histogram', 'numpy.histogram', (['normed'], {'range': '(0, possible)', 'bins': 'bins'}), '(normed, range=(0, possible), bins=bins)\n', (58872, 58912), False, 'import numpy\n'), ((60849, 60878), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Invalid login"""'], {}), "('Invalid login')\n", (60861, 60878), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((60438, 60480), 'pyramid_addons.validation.WhiteSpaceString', 'WhiteSpaceString', (['"""password"""'], {'min_length': '(6)'}), "('password', min_length=6)\n", (60454, 60480), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((60502, 60531), 'pyramid_addons.validation.String', 'String', (['"""next"""'], {'optional': '(True)'}), "('next', optional=True)\n", (60508, 60531), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((61560, 61612), 'pyramid_addons.validation.String', 'String', (['"""username"""'], {'optional': '(True)', 'source': 'SOURCE_GET'}), "('username', optional=True, source=SOURCE_GET)\n", (61566, 61612), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((61634, 61682), 'pyramid_addons.validation.String', 'String', (['"""next"""'], {'optional': '(True)', 'source': 'SOURCE_GET'}), "('next', optional=True, source=SOURCE_GET)\n", (61640, 61682), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((62422, 62484), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['"""A filename cannot be provided more than once"""'], {}), "('A filename cannot be provided more than once')\n", (62436, 62484), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((63183, 63203), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msgs'], {}), '(msgs)\n', (63197, 63203), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((65345, 65431), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""as_user"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)', 'source': 'SOURCE_GET'}), "('as_user', min_value=0, max_value=1, optional=True, source=\n SOURCE_GET)\n", (65355, 65431), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((69217, 69245), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(1)'}), "('name', min_length=1)\n", (69223, 69245), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((69262, 69290), 'pyramid_addons.validation.String', 'String', (['"""args"""'], {'min_length': '(1)'}), "('args', min_length=1)\n", (69268, 69290), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((69388, 69456), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""hide_expected"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('hide_expected', min_value=0, max_value=1, optional=True)\n", (69398, 69456), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((69519, 69573), 'pyramid_addons.validation.String', 'String', (['"""output_filename"""'], {'min_length': '(1)', 'optional': '(True)'}), "('output_filename', min_length=1, optional=True)\n", (69525, 69573), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((69689, 69709), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""points"""'], {}), "('points')\n", (69699, 69709), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((72779, 72824), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""Nothing to change"""'}), "(request, message='Nothing to change')\n", (72786, 72824), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((71557, 71585), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(1)'}), "('name', min_length=1)\n", (71563, 71585), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((71602, 71630), 'pyramid_addons.validation.String', 'String', (['"""args"""'], {'min_length': '(1)'}), "('args', min_length=1)\n", (71608, 71630), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((71728, 71796), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""hide_expected"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('hide_expected', min_value=0, max_value=1, optional=True)\n", (71738, 71796), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((71859, 71913), 'pyramid_addons.validation.String', 'String', (['"""output_filename"""'], {'min_length': '(1)', 'optional': '(True)'}), "('output_filename', min_length=1, optional=True)\n", (71865, 71913), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((72029, 72049), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""points"""'], {}), "('points')\n", (72039, 72049), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((74476, 74495), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (74490, 74495), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((73446, 73474), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(1)'}), "('name', min_length=1)\n", (73452, 73474), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((73496, 73560), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""is_hidden"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('is_hidden', min_value=0, max_value=1, optional=True)\n", (73506, 73560), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((73615, 73665), 'pyramid_addons.validation.String', 'String', (['"""make_target"""'], {'min_length': '(1)', 'optional': '(True)'}), "('make_target', min_length=1, optional=True)\n", (73621, 73665), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((73688, 73722), 'pyramid_addons.validation.String', 'String', (['"""executable"""'], {'min_length': '(1)'}), "('executable', min_length=1)\n", (73694, 73722), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((77089, 77108), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (77103, 77108), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((78286, 78331), 'pyramid_addons.helpers.http_ok', 'http_ok', (['request'], {'message': '"""Nothing to change"""'}), "(request, message='Nothing to change')\n", (78293, 78331), False, 'from pyramid_addons.helpers import http_created, http_gone, http_ok\n'), ((76035, 76063), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(1)'}), "('name', min_length=1)\n", (76041, 76063), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((76085, 76149), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['"""is_hidden"""'], {'min_value': '(0)', 'max_value': '(1)', 'optional': '(True)'}), "('is_hidden', min_value=0, max_value=1, optional=True)\n", (76095, 76149), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((76204, 76254), 'pyramid_addons.validation.String', 'String', (['"""make_target"""'], {'min_length': '(1)', 'optional': '(True)'}), "('make_target', min_length=1, optional=True)\n", (76210, 76254), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((76277, 76311), 'pyramid_addons.validation.String', 'String', (['"""executable"""'], {'min_length': '(1)'}), "('executable', min_length=1)\n", (76283, 76311), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((79503, 79525), 'pyramid_addons.validation.String', 'String', (['"""verification"""'], {}), "('verification')\n", (79509, 79525), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((79792, 79820), 'pyramid_addons.validation.String', 'String', (['"""name"""'], {'min_length': '(5)'}), "('name', min_length=5)\n", (79798, 79820), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((79841, 79885), 'pyramid_addons.validation.String', 'String', (['"""email"""'], {'min_length': '(6)', 'max_length': '(64)'}), "('email', min_length=6, max_length=64)\n", (79847, 79885), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((79910, 79932), 'pyramid_addons.validation.String', 'String', (['"""verification"""'], {}), "('verification')\n", (79916, 79932), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((5387, 5431), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The user could not be added."""'], {}), "('The user could not be added.')\n", (5399, 5431), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((9145, 9155), 'hashlib.sha1', 'sha1', (['data'], {}), '(data)\n', (9149, 9155), False, 'from hashlib import sha1\n'), ((12803, 12863), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That filename already exists for the project"""'], {}), "('That filename already exists for the project')\n", (12815, 12863), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((15433, 15493), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That filename already exists for the project"""'], {}), "('That filename already exists for the project')\n", (15445, 15493), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((21693, 21755), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That project name already exists for the class"""'], {}), "('That project name already exists for the class')\n", (21705, 21755), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((24676, 24734), 'yaml.safe_dump', 'yaml.safe_dump', (['project_yml_dict'], {'default_flow_style': '(False)'}), '(project_yml_dict, default_flow_style=False)\n', (24690, 24734), False, 'import yaml\n'), ((34643, 34681), 'os.path.join', 'os.path.join', (['root_dir', '"""build_files/"""'], {}), "(root_dir, 'build_files/')\n", (34655, 34681), False, 'import os\n'), ((43699, 43753), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Could not join the users at this time."""'], {}), "('Could not join the users at this time.')\n", (43711, 43753), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((45472, 45526), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Could not join the group at this time."""'], {}), "('Could not join the group at this time.')\n", (45484, 45526), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((46942, 46994), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Could not create your group request."""'], {}), "('Could not create your group request.')\n", (46954, 46994), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((53411, 53471), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""The submission contains failing test groups."""'], {}), "('The submission contains failing test groups.')\n", (53423, 53471), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((53830, 53849), 'transaction.abort', 'transaction.abort', ([], {}), '()\n', (53847, 53849), False, 'import transaction\n'), ((55564, 55626), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That project name already exists for the class"""'], {}), "('That project name already exists for the class')\n", (55576, 55626), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((60388, 60404), 'pyramid_addons.validation.EmailAddress', 'EmailAddress', (['""""""'], {}), "('')\n", (60400, 60404), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((60406, 60416), 'pyramid_addons.validation.String', 'String', (['""""""'], {}), "('')\n", (60412, 60416), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((62607, 62626), 'pyramid.httpexceptions.HTTPBadRequest', 'HTTPBadRequest', (['msg'], {}), '(msg)\n', (62621, 62626), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((62056, 62083), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (62066, 62083), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((62164, 62188), 'pyramid_addons.validation.String', 'String', (['""""""'], {'min_length': '(1)'}), "('', min_length=1)\n", (62170, 62188), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((70471, 70528), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That name already exists for the testable"""'], {}), "('That name already exists for the testable')\n", (70483, 70528), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((72899, 72956), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That name already exists for the testable"""'], {}), "('That name already exists for the testable')\n", (72911, 72956), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((75726, 75782), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That name already exists for the project"""'], {}), "('That name already exists for the project')\n", (75738, 75782), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((73772, 73799), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (73782, 73799), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((73937, 73964), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (73947, 73964), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((74069, 74096), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (74079, 74096), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((78406, 78462), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""That name already exists for the project"""'], {}), "('That name already exists for the project')\n", (78418, 78462), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((76361, 76388), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (76371, 76388), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((76526, 76553), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (76536, 76553), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((76658, 76685), 'pyramid_addons.validation.TextNumber', 'TextNumber', (['""""""'], {'min_value': '(0)'}), "('', min_value=0)\n", (76668, 76685), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((82968, 83003), 'os.path.join', 'os.path.join', (['base_path', '"""Makefile"""'], {}), "(base_path, 'Makefile')\n", (82980, 83003), False, 'import os\n'), ((5105, 5126), 'pyramid_addons.validation.EmailAddress', 'EmailAddress', (['"""email"""'], {}), "('email')\n", (5117, 5126), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((10682, 10719), 'codecs.open', 'codecs.open', (['source'], {'encoding': '"""utf-8"""'}), "(source, encoding='utf-8')\n", (10693, 10719), False, 'import codecs\n'), ((16693, 16722), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['failure_message'], {}), '(failure_message)\n', (16705, 16722), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((27282, 27337), 'yaml.safe_dump', 'yaml.safe_dump', (['testable_dict'], {'default_flow_style': '(False)'}), '(testable_dict, default_flow_style=False)\n', (27296, 27337), False, 'import yaml\n'), ((33762, 33804), 'os.path.join', 'os.path.join', (['root_dir', '"""execution_files/"""'], {}), "(root_dir, 'execution_files/')\n", (33774, 33804), False, 'import os\n'), ((35410, 35446), 'os.path.join', 'os.path.join', (['root_dir', '"""testables/"""'], {}), "(root_dir, 'testables/')\n", (35422, 35446), False, 'import os\n'), ((39432, 39501), 'pyramid.httpexceptions.HTTPConflict', 'HTTPConflict', (['"""Session could not fluch, reccomending stool softeners"""'], {}), "('Session could not fluch, reccomending stool softeners')\n", (39444, 39501), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPError, HTTPFound, HTTPNotFound, HTTPOk, HTTPRedirection, HTTPSeeOther\n'), ((57366, 57384), 'pyramid_addons.validation.String', 'String', (['"""username"""'], {}), "('username')\n", (57372, 57384), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((81194, 81212), 'pyramid_addons.validation.String', 'String', (['"""username"""'], {}), "('username')\n", (81200, 81212), False, 'from pyramid_addons.validation import EmailAddress, Enum, List, Or, String, RegexString, TextNumber, WhiteSpaceString, validate, SOURCE_GET, SOURCE_MATCHDICT as MATCHDICT\n'), ((83144, 83177), 'os.path.join', 'os.path.join', (['base_path', 'filename'], {}), '(base_path, filename)\n', (83156, 83177), False, 'import os\n'), ((22599, 22646), 'os.path.join', 'os.path.join', (['project.name', 'user_path', 'filename'], {}), '(project.name, user_path, filename)\n', (22611, 22646), False, 'import os\n'), ((31560, 31595), 'os.path.join', 'os.path.join', (['root_dir', '"""testables"""'], {}), "(root_dir, 'testables')\n", (31572, 31595), False, 'import os\n'), ((31628, 31663), 'os.path.join', 'os.path.join', (['root_dir', '"""testables"""'], {}), "(root_dir, 'testables')\n", (31640, 31663), False, 'import os\n'), ((34902, 34931), 'os.path.join', 'os.path.join', (['build_dir', 'file'], {}), '(build_dir, file)\n', (34914, 34931), False, 'import os\n'), ((34000, 34033), 'os.path.join', 'os.path.join', (['execution_dir', 'file'], {}), '(execution_dir, file)\n', (34012, 34033), False, 'import os\n'), ((35728, 35772), 'os.path.join', 'os.path.join', (['testables_dir', 'testable_folder'], {}), '(testables_dir, testable_folder)\n', (35740, 35772), False, 'import os\n')] |
import numpy as np
import pytest
from cgn import Parameter
from cgn.regop import MatrixOperator, DiagonalOperator
@pytest.fixture
def x_parameter():
n = 12
beta = 42
mean = np.arange(n)
regop = DiagonalOperator(dim=n, s=np.arange(1, n+1)**2)
x = Parameter(start=np.zeros(n), name="x")
x.beta = beta
x.mean = mean
x.regop = regop
return x
@pytest.fixture
def y_parameter():
n = 3
beta = 0.
y = Parameter(start=np.zeros(n), name="y")
y.beta = beta
y.lb = np.zeros(3)
return y
@pytest.fixture
def z_parameter():
n = 1
beta = 12
mean = np.ones(n)
rmat = np.ones((n, n))
z = Parameter(start=np.zeros(n), name="z")
z.beta = beta
z.mean = mean
z.regop = MatrixOperator(rmat)
return z
@pytest.fixture
def u_parameter():
n = 4
r = np.eye(n)[:2, :]
regop = MatrixOperator(mat=r)
u = Parameter(start=np.zeros(n), name="u")
u.regop = regop
u.beta = 1.
return u | [
"numpy.eye",
"numpy.ones",
"cgn.regop.MatrixOperator",
"numpy.zeros",
"numpy.arange"
] | [((189, 201), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (198, 201), True, 'import numpy as np\n'), ((515, 526), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (523, 526), True, 'import numpy as np\n'), ((612, 622), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (619, 622), True, 'import numpy as np\n'), ((634, 649), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (641, 649), True, 'import numpy as np\n'), ((747, 767), 'cgn.regop.MatrixOperator', 'MatrixOperator', (['rmat'], {}), '(rmat)\n', (761, 767), False, 'from cgn.regop import MatrixOperator, DiagonalOperator\n'), ((865, 886), 'cgn.regop.MatrixOperator', 'MatrixOperator', ([], {'mat': 'r'}), '(mat=r)\n', (879, 886), False, 'from cgn.regop import MatrixOperator, DiagonalOperator\n'), ((836, 845), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (842, 845), True, 'import numpy as np\n'), ((286, 297), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (294, 297), True, 'import numpy as np\n'), ((463, 474), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (471, 474), True, 'import numpy as np\n'), ((674, 685), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (682, 685), True, 'import numpy as np\n'), ((911, 922), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (919, 922), True, 'import numpy as np\n'), ((240, 259), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (249, 259), True, 'import numpy as np\n')] |
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from datetime import datetime
from keras.callbacks import ModelCheckpoint
from auxiliary.data_functions import *
from keras.optimizers import *
from work.stem_classifier.dl_classifier import class_model
# from tqdm import tqdm # this causes problems with kers progress bar in jupyter!!!
import logging
from auxiliary import decorators
logger = logging.getLogger(__name__)
logger_decorator = decorators.Logger_decorator(logger)
MODES_DICT = {'grayscale': 1, 'rgb': 3} # translate for image dimensions
COLOR_TO_OPENCV = {'grayscale': 0, 'rgb': 1}
OPTIMIZER_DICT = {'Adam': Adam, 'adagrad': adagrad}
"""THIS IS EXPERIMENTAL"""
class ClarifruitClassifier:
@logger_decorator.debug_dec
def __init__(self, train_path, weights_file_name='model_weights.hdf5',
data_gen_args=None, callbacks=None,
optimizer=None, optimizer_params=None, loss=None, metrics=None, pretrained_weights=None,
target_size=(256, 256), color_mode='rgb',
batch_size=10, epochs=5, steps_per_epoch=10,
valdiation_split=0.2, validation_steps=10,
train_time=None):
logger.debug(" <- __init__")
self.train_path = train_path
self.weights_file_name = weights_file_name
self.data_gen_args = data_gen_args
self.target_size = tuple(target_size)
self.color_mode = color_mode
self.input_size = (*target_size, MODES_DICT[color_mode])
self.batch_size = batch_size
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
self.validation_steps = validation_steps
self.model = None
self.optimizer = OPTIMIZER_DICT[optimizer](**optimizer_params)
self.loss = loss
self.metrics = metrics
self.pretrained_weights = pretrained_weights
self.train_generator = None
self.val_generator = None
self.model_checkpoint = None
self.callbacks = callbacks
self.train_time = train_time
self.save_to_dir = None
self.validation_split = valdiation_split
self.seed = 1
self.get_class_model()
logger.debug(" -> __init__")
@staticmethod
def custom_generator(batch_size, src_path, aug_dict, save_prefix,
color_mode="grayscale",
save_to_dir=None,
target_size=(256, 256),
seed=1):
"""
Create a datagen generator
:param batch_size: the batch size of each step
:param src_path: the path to the data
:param aug_dict: a dictionary with the data augmentation parameters of the images
:param save_prefix: if output images are saved, this is the prefix in the file names
:param color_mode: how to load the images, options are "grayscale", "rgb", default is "grayscale"
:param save_to_dir: path to save output images, if None nothing is saved, default is None
:param target_size: pixel size of output images,default is (256,256)
:param seed: random seed used in image generation, default is 1
:return: a flow_from_dictionary keras datagenerator
"""
logger.debug(f" <- custom_generator, src:\n{src_path}")
datagen = ImageDataGenerator(**aug_dict)
gen = datagen.flow_from_directory(
src_path,
class_mode=None,
color_mode=color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
seed=seed)
logger.debug(f"-> custom_generator, src:\n{src_path}")
return gen
@staticmethod
def train_val_generators(batch_size, src_path, aug_dict, save_prefix,
color_mode="grayscale",
save_to_dir=None,
target_size=(256, 256),
validation_split=0.2,
seed=1):
"""
Create a datagen generator with train validation split
:param batch_size: the batch size of each step
:param src_path: the path to the data
:param folder: the name of the folder in the data_path
:param aug_dict: a dictionary with the data augmentation parameters of the images
:param save_prefix: if output images are saved, this is the prefix in the file names
:param color_mode: how to load the images, options are "grayscale", "rgb", default is "grayscale"
:param save_to_dir: path to save output images, if None nothing is saved, default is None
:param target_size: pixel size of output images,default is (256,256)
:param validation_split: size of the validation data set, default is 0.2
:param seed: random seed used in image generation, default is 1
:return: a flow_from_dictionary keras datagenerator
"""
logger.debug(f" <- train_val_generators src:\n{src_path}")
datagen = ImageDataGenerator(**aug_dict, validation_split=validation_split)
train_gen = datagen.flow_from_directory(
src_path,
class_mode='categorical',
color_mode=color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
seed=seed,
subset='training')
val_gen = datagen.flow_from_directory(
src_path,
class_mode='categorical',
color_mode=color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
seed=seed,
subset='validation')
logger.debug(f" -> train_val_generators src:\n{src_path}")
return train_gen, val_gen
def test_generator(self, test_path):
"""
create a generator which yield appropriate images be be used with the model's predict
method, i.e reshapes the images and loads them in the appropriate color mode
:param test_path:
:return: img- an image in an apropriate dimentions for the unet model predict method
img_entry- the result of the os.scandir method, and object with the source image name and path
orig_shape- the original shape of the source image, to be used for reshaping the prediction back to
the source image size
"""
logger.debug(" <-test_generator")
for label_entry in os.scandir(test_path):
for img_entry in os.scandir(label_entry.path):
img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])
if img.shape[-1] == 3:
orig_shape = img.shape[-2::-1]
else:
orig_shape = img.shape[::-1]
img = img / 255
img = cv2.resize(img, self.target_size)
if self.color_mode == "grayscale":
img = np.reshape(img, img.shape + (1,))
img = np.reshape(img, (1,) + img.shape)
yield img, img_entry, orig_shape,label_entry.name
def prediction_generator(self, test_path):
"""
a method to yield predictions from the test path
:param test_path: a path containing the test images
:return: img_entry- the result of the os.scandir method, and object with the source image name and path
pred_raw_resised- a mask image, the prediction for the image
"""
logger.info(f" generating prediction on files from {test_path}")
logger.debug(" <- prediction_generator")
test_gen = self.test_generator(test_path)
for img, img_entry, orig_shape in test_gen:
pred_raw = self.model.predict(img, batch_size=1)[0]
pred_raw_resized = cv2.resize(pred_raw, orig_shape)
yield img_entry, pred_raw_resized
"""
test_gen = self.custom_generator(batch_size=self.batch_size,
src_path=test_path,
aug_dict=self.data_gen_args,
save_prefix='test',
color_mode="rgb",
save_to_dir=None,
target_size=self.target_size,
seed=self.seed)
preds"""
def prediction(self, test_path, dest_path):
"""
a method to get predictions from a trained model of images in the test_path variable, and save the results to the
path specified in the dest_path variable
:param dest_path: the destination path to save he prediction results
:param test_path: the path where the test data resides
:return:
"""
pred_dict = ['A','B','C','D']
logger.info(f"prediction on files from {test_path}")
logger.debug(" <- prediction")
if self.train_time is None:
self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
save_path = create_path(dest_path, self.train_time)
save_path = create_path(save_path, 'class_pred')
logger.info(f"saving predictions to {save_path}")
# saving the src_path of the current files
with open(os.path.join(save_path, "src_path.txt"), 'w') as f:
f.write(test_path)
preds = []
test_gen = self.test_generator(test_path)
for img, img_entry, orig_shape,label in test_gen:
pred_vec= self.model.predict(img, batch_size=1)
pred_int=np.argmax(pred_vec)
pred = int(pred_dict[pred_int])
preds.append(pred)
curr_save_path = create_path(save_path,pred)
_ = shutil.copy(img_entry.path,curr_save_path)
logger.debug(" -> prediction")
return save_path
def get_model(self):
"""
load a unet model for the current instance
:return:
"""
logger.debug(" <- get_unet_model")
self.model = class_model(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
pretrained_weights=self.pretrained_weights,
input_size=self.input_size)
logger.debug(" -> get_unet_model")
def get_train_val_generators(self):
"""
a method to create train and validation data generators for the current instance
:return:
"""
logger.debug(f" <- clarifruit_train_val_generators")
self.train_generator, self.val_generator = self.train_val_generators(batch_size=self.batch_size,
src_path=self.train_path,
aug_dict=self.data_gen_args,
color_mode=self.color_mode,
save_prefix='aug',
save_to_dir=self.save_to_dir,
target_size=self.target_size,
seed=self.seed,
validation_split=self.validation_split)
def fit_model(self):
""" fit a unet model for the current instance"""
logger.debug(" <- fit_unet")
self.get_train_val_generators()
# x,y= next(self.train_generator)
self.model.fit_generator(
self.train_generator,
steps_per_epoch=self.steps_per_epoch,
validation_data=self.val_generator,
validation_steps=self.validation_steps,
epochs=self.epochs,
callbacks=self.callbacks,
verbose=1)
logger.debug(" -> fit_unet")
def get_class_model(self):
"""
load a unet model for the current instance
:return:
"""
logger.debug(" <- get_unet_model")
self.model = class_model(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
pretrained_weights=self.pretrained_weights,
input_size=self.input_size)
logger.debug(" -> get_unet_model")
def save_model(self, dest_path, params_dict=None):
logger.debug(" <- save_model")
if params_dict is not None:
curr_folder = self.set_model_checkpint(dest_path=dest_path)
else:
curr_folder = self.get_curr_folder(dest_path=dest_path)
save_dict = params_dict.copy()
if 'callbacks' in save_dict: # callbacks are not hashable, cant save to json
save_dict.pop('callbacks')
save_json(save_dict, "model_params.json", curr_folder)
logger.debug(" -> save_model")
def get_curr_folder(self, dest_path):
self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
curr_folder = create_path(dest_path, self.train_time)
return curr_folder
def set_model_checkpint(self, dest_path):
"""
set the model checkpoint keras callbacks method for the current training session,
where the model weights will be saved in folder assigned for the current session
:param dest_path: the destination folder where the specific session will be saved to
:return: the save folder for the current training session
"""
logger.debug(" <- set_model_checkpoint")
curr_folder = self.get_curr_folder(dest_path=dest_path)
out_model_path = os.path.join(curr_folder, self.weights_file_name)
model_checkpoint = [ModelCheckpoint(out_model_path, monitor='loss',
verbose=1, save_best_only=True)]
if self.callbacks is None:
self.callbacks = model_checkpoint
else:
self.callbacks = model_checkpoint + self.callbacks
logger.debug(" -> set_model_checkpoint")
return curr_folder
def train_model(self, params_dict, dest_path=None):
"""
train the unet model for current instance and save the results if possible
:param params_dict: the parameters used to define the current instance
:param dest_path: optional destination path to save the model
:return:
"""
logger.debug(f" <- train_model")
if dest_path is not None:
self.save_model(dest_path=dest_path, params_dict=params_dict)
self.fit_model()
logger.debug(" -> train_model")
@staticmethod
def load_model(src_path):
"""
load a pretrained model located in the src_path
:param src_path: the path containing the pretrained model
:return: the parameters of the model to be used later on
"""
params_dict = {}
pretrained_weights = {}
files = os.scandir(src_path)
for file_entry in files:
file_name_segments = file_entry.name.rsplit('.', 1)
file_name = file_name_segments[0]
file_extention = file_name_segments[-1]
if file_entry.name == 'model_params.json':
params_dict = load_json(file_entry.path)
elif file_extention == 'hdf5':
pretrained_weights = file_entry.path
params_dict['pretrained_weights'] = pretrained_weights
params_dict['train_time'] = os.path.basename(src_path)
return params_dict
| [
"logging.getLogger",
"work.stem_classifier.dl_classifier.class_model",
"numpy.reshape",
"keras.callbacks.ModelCheckpoint",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"datetime.datetime.now",
"auxiliary.decorators.Logger_decorator"
] | [((423, 450), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (440, 450), False, 'import logging\n'), ((470, 505), 'auxiliary.decorators.Logger_decorator', 'decorators.Logger_decorator', (['logger'], {}), '(logger)\n', (497, 505), False, 'from auxiliary import decorators\n'), ((3365, 3395), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (3383, 3395), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((5127, 5192), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'validation_split': 'validation_split'}), '(**aug_dict, validation_split=validation_split)\n', (5145, 5192), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((10308, 10459), 'work.stem_classifier.dl_classifier.class_model', 'class_model', ([], {'optimizer': 'self.optimizer', 'loss': 'self.loss', 'metrics': 'self.metrics', 'pretrained_weights': 'self.pretrained_weights', 'input_size': 'self.input_size'}), '(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics,\n pretrained_weights=self.pretrained_weights, input_size=self.input_size)\n', (10319, 10459), False, 'from work.stem_classifier.dl_classifier import class_model\n'), ((12542, 12693), 'work.stem_classifier.dl_classifier.class_model', 'class_model', ([], {'optimizer': 'self.optimizer', 'loss': 'self.loss', 'metrics': 'self.metrics', 'pretrained_weights': 'self.pretrained_weights', 'input_size': 'self.input_size'}), '(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics,\n pretrained_weights=self.pretrained_weights, input_size=self.input_size)\n', (12553, 12693), False, 'from work.stem_classifier.dl_classifier import class_model\n'), ((9850, 9869), 'numpy.argmax', 'np.argmax', (['pred_vec'], {}), '(pred_vec)\n', (9859, 9869), True, 'import numpy as np\n'), ((14246, 14325), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['out_model_path'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(out_model_path, monitor='loss', verbose=1, save_best_only=True)\n", (14261, 14325), False, 'from keras.callbacks import ModelCheckpoint\n'), ((7247, 7280), 'numpy.reshape', 'np.reshape', (['img', '((1,) + img.shape)'], {}), '(img, (1,) + img.shape)\n', (7257, 7280), True, 'import numpy as np\n'), ((13487, 13501), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13499, 13501), False, 'from datetime import datetime\n'), ((7191, 7224), 'numpy.reshape', 'np.reshape', (['img', '(img.shape + (1,))'], {}), '(img, img.shape + (1,))\n', (7201, 7224), True, 'import numpy as np\n'), ((9269, 9283), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9281, 9283), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 10:21:22 2019
@author: GNOS
"""
import numpy as np
def inv_dist_weight(distances, b):
"""Inverse distance weight
Parameters
----------
distances : numpy.array of floats
Distances to point of interest
b : float
The parameter of the inverse distance weight. The higher, the
higher the influence of closeby stations.
Returns
-------
lambdas : numpy.array of floats
The lambda parameters of the stations
"""
lambdas = 1/distances**b / np.sum(1/distances**b)
return lambdas | [
"numpy.sum"
] | [((580, 606), 'numpy.sum', 'np.sum', (['(1 / distances ** b)'], {}), '(1 / distances ** b)\n', (586, 606), True, 'import numpy as np\n')] |
import numpy as np
import os
import warnings
# import xml.etree.ElementTree as ET
import glob
import chainer
from chainercv.datasets.voc import voc_utils
from chainercv.utils import read_image
class PTI01BboxDataset(chainer.dataset.DatasetMixin):
def __init__(self, imagespath, labelspath, limit=None):
self.limit = None if not limit else int(limit)
self.imagespath = imagespath
self.labelspath = labelspath
if(self.imagespath == '' or self.labelspath == ''):
raise Exception('missing pti database paths')
self.file_names = glob.glob(os.path.join(self.imagespath, '**/*.jpg'), recursive=True)
self.file_names.sort()
def __len__(self):
if self.limit == None:
return len(self.file_names)
else:
return self.limit
def get_example(self, i):
image_ = self.file_names[i]
bbox = []
label = []
# print(image_)
ground_truth_file_path = image_.replace('.jpg', '.txt').replace(self.imagespath,self.labelspath)
with open(ground_truth_file_path,'r') as ground_truth_file:
for index,line in enumerate(ground_truth_file):
if index == 0:
continue
gt = list(map(int,line.split())) #[min_x, min_y, max_x, max_y]
#must be ('ymin', 'xmin', 'ymax', 'xmax')
bbox.append([gt[1],gt[0],gt[3],gt[2]])
label.append(voc_utils.voc_bbox_label_names.index('person'))
# print('bbox',bbox)
if len(bbox) > 0:
bbox = np.stack(bbox).astype(np.float32)
label = np.stack(label).astype(np.int32)
else:
bbox = np.ndarray(shape=(0), dtype=np.float32)
label = np.ndarray(shape=(0), dtype=np.int32)
img = read_image(image_, color=True)
return img, bbox, label
| [
"os.path.join",
"numpy.stack",
"numpy.ndarray",
"chainercv.utils.read_image",
"chainercv.datasets.voc.voc_utils.voc_bbox_label_names.index"
] | [((1821, 1851), 'chainercv.utils.read_image', 'read_image', (['image_'], {'color': '(True)'}), '(image_, color=True)\n', (1831, 1851), False, 'from chainercv.utils import read_image\n'), ((595, 636), 'os.path.join', 'os.path.join', (['self.imagespath', '"""**/*.jpg"""'], {}), "(self.imagespath, '**/*.jpg')\n", (607, 636), False, 'import os\n'), ((1709, 1746), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)', 'dtype': 'np.float32'}), '(shape=0, dtype=np.float32)\n', (1719, 1746), True, 'import numpy as np\n'), ((1769, 1804), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)', 'dtype': 'np.int32'}), '(shape=0, dtype=np.int32)\n', (1779, 1804), True, 'import numpy as np\n'), ((1467, 1513), 'chainercv.datasets.voc.voc_utils.voc_bbox_label_names.index', 'voc_utils.voc_bbox_label_names.index', (['"""person"""'], {}), "('person')\n", (1503, 1513), False, 'from chainercv.datasets.voc import voc_utils\n'), ((1589, 1603), 'numpy.stack', 'np.stack', (['bbox'], {}), '(bbox)\n', (1597, 1603), True, 'import numpy as np\n'), ((1643, 1658), 'numpy.stack', 'np.stack', (['label'], {}), '(label)\n', (1651, 1658), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import kaldi
wspecifier = 'ark,scp:/tmp/feats.ark,/tmp/feats.scp'
with kaldi.MatrixWriter(wspecifier) as writer:
m = np.arange(6).reshape(2, 3).astype(np.float32)
writer.Write(key='foo', value=m)
g = kaldi.FloatMatrix(2, 2)
g[0, 0] = 10
g[1, 1] = 20
writer.Write('bar', g)
rspecifier = 'scp:/tmp/feats.scp'
with kaldi.SequentialMatrixReader(rspecifier) as reader:
for key, value in reader:
assert key in ['foo', 'bar']
if key == 'foo':
np.testing.assert_array_equal(value.numpy(), m)
else:
np.testing.assert_array_equal(value.numpy(), g.numpy())
with kaldi.RandomAccessMatrixReader(rspecifier) as reader:
assert 'foo' in reader
assert 'bar' in reader
np.testing.assert_array_equal(reader['foo'].numpy(), m)
np.testing.assert_array_equal(reader['bar'].numpy(), g.numpy())
| [
"kaldi.FloatMatrix",
"numpy.arange",
"kaldi.MatrixWriter",
"kaldi.SequentialMatrixReader",
"kaldi.RandomAccessMatrixReader"
] | [((116, 146), 'kaldi.MatrixWriter', 'kaldi.MatrixWriter', (['wspecifier'], {}), '(wspecifier)\n', (134, 146), False, 'import kaldi\n'), ((258, 281), 'kaldi.FloatMatrix', 'kaldi.FloatMatrix', (['(2)', '(2)'], {}), '(2, 2)\n', (275, 281), False, 'import kaldi\n'), ((383, 423), 'kaldi.SequentialMatrixReader', 'kaldi.SequentialMatrixReader', (['rspecifier'], {}), '(rspecifier)\n', (411, 423), False, 'import kaldi\n'), ((675, 717), 'kaldi.RandomAccessMatrixReader', 'kaldi.RandomAccessMatrixReader', (['rspecifier'], {}), '(rspecifier)\n', (705, 717), False, 'import kaldi\n'), ((166, 178), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (175, 178), True, 'import numpy as np\n')] |
import csv
import numpy as np
from typing import Dict, List
from PyQt5.QtGui import QImage, QColor
import src.core.config as config
def parse(path: str, num_classes: int) -> Dict[int, List[np.ndarray]]:
with open(path, newline='\n') as csv_file:
data_set = prepare_data_set_dict(num_classes)
data_reader = csv.reader(csv_file, delimiter=',')
i = 0
for row in data_reader:
set_label = int(row[0])
np_set = np.asarray(row[1:], dtype=np.float32)
np_set = np.reshape(np_set, (-1, 28))
data_set[set_label].append(np_set)
i += 1
if i % 1000 == 0:
print('csv string parsed: ', i)
return data_set
def prepare_data_set_dict(num_classes: int) -> Dict[int, list]:
data_set_dict = {}
for i in range(0, num_classes):
data_set_dict[i] = []
return data_set_dict
def dump_image(matrix: np.ndarray, number: int) -> None:
image = QImage(28, 28, QImage.Format_RGB32)
for i in range(0, matrix.shape[0]):
for j in range(0, matrix.shape[1]):
grayscale = matrix[i][j]
image.setPixelColor(i, j, QColor(grayscale, grayscale, grayscale))
image.save('dump/img' + str(number) + '.png')
| [
"numpy.reshape",
"PyQt5.QtGui.QColor",
"numpy.asarray",
"PyQt5.QtGui.QImage",
"csv.reader"
] | [((975, 1010), 'PyQt5.QtGui.QImage', 'QImage', (['(28)', '(28)', 'QImage.Format_RGB32'], {}), '(28, 28, QImage.Format_RGB32)\n', (981, 1010), False, 'from PyQt5.QtGui import QImage, QColor\n'), ((329, 364), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (339, 364), False, 'import csv\n'), ((469, 506), 'numpy.asarray', 'np.asarray', (['row[1:]'], {'dtype': 'np.float32'}), '(row[1:], dtype=np.float32)\n', (479, 506), True, 'import numpy as np\n'), ((528, 556), 'numpy.reshape', 'np.reshape', (['np_set', '(-1, 28)'], {}), '(np_set, (-1, 28))\n', (538, 556), True, 'import numpy as np\n'), ((1170, 1209), 'PyQt5.QtGui.QColor', 'QColor', (['grayscale', 'grayscale', 'grayscale'], {}), '(grayscale, grayscale, grayscale)\n', (1176, 1209), False, 'from PyQt5.QtGui import QImage, QColor\n')] |
# Get Python six functionality:
from __future__ import absolute_import, division, print_function, unicode_literals
import keras.layers
import keras.models
import numpy as np
import pytest
import innvestigate.tools.perturbate
import innvestigate.utils as iutils
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__PerturbationAnalysis():
# Some test data
if keras.backend.image_data_format() == "channels_first":
input_shape = (2, 1, 4, 4)
else:
input_shape = (2, 4, 4, 1)
x = np.arange(2 * 4 * 4).reshape(input_shape)
generator = iutils.BatchSequence([x, np.zeros(x.shape[0])], batch_size=x.shape[0])
# Simple model
model = keras.models.Sequential(
[
keras.layers.Flatten(input_shape=x.shape[1:]),
keras.layers.Dense(1, use_bias=False),
]
)
weights = np.arange(4 * 4 * 1).reshape((4 * 4, 1))
model.layers[-1].set_weights([weights])
model.compile(loss="mean_squared_error", optimizer="sgd")
expected_output = np.array([[1240.0], [3160.0]])
assert np.all(np.isclose(model.predict(x), expected_output))
# Analyzer
analyzer = innvestigate.create_analyzer("gradient", model, postprocess="abs")
# Run perturbation analysis
perturbation = innvestigate.tools.perturbate.Perturbation(
"zeros", region_shape=(2, 2), in_place=False
)
perturbation_analysis = innvestigate.tools.perturbate.PerturbationAnalysis(
analyzer,
model,
generator,
perturbation,
recompute_analysis=False,
steps=3,
regions_per_step=1,
verbose=False,
)
scores = perturbation_analysis.compute_perturbation_analysis()
expected_scores = np.array([5761600.0, 1654564.0, 182672.0, 21284.0])
assert np.all(np.isclose(scores, expected_scores))
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__Perturbation():
if keras.backend.image_data_format() == "channels_first":
input_shape = (1, 1, 4, 4)
else:
input_shape = (1, 4, 4, 1)
x = np.arange(1 * 4 * 4).reshape(input_shape)
perturbation = innvestigate.tools.perturbate.Perturbation(
"zeros", region_shape=(2, 2), in_place=False
)
analysis = np.zeros((4, 4))
analysis[:2, 2:] = 1
analysis[2:, :2] = 2
analysis[2:, 2:] = 3
analysis = analysis.reshape(input_shape)
if keras.backend.image_data_format() == "channels_last":
x = np.moveaxis(x, 3, 1)
analysis = np.moveaxis(analysis, 3, 1)
analysis = perturbation.reduce_function(analysis, axis=1, keepdims=True)
aggregated_regions = perturbation.aggregate_regions(analysis)
assert np.all(
np.isclose(aggregated_regions[0, 0, :, :], np.array([[0, 1], [2, 3]]))
)
ranks = perturbation.compute_region_ordering(aggregated_regions)
assert np.all(np.isclose(ranks[0, 0, :, :], np.array([[3, 2], [1, 0]])))
perturbation_mask_regions = perturbation.compute_perturbation_mask(ranks, 1)
assert np.all(perturbation_mask_regions == np.array([[0, 0], [0, 1]]))
perturbation_mask_regions = perturbation.compute_perturbation_mask(ranks, 4)
assert np.all(perturbation_mask_regions == np.array([[1, 1], [1, 1]]))
perturbation_mask_regions = perturbation.compute_perturbation_mask(ranks, 0)
assert np.all(perturbation_mask_regions == np.array([[0, 0], [0, 0]]))
| [
"numpy.isclose",
"numpy.array",
"numpy.zeros",
"numpy.moveaxis",
"numpy.arange"
] | [((1506, 1536), 'numpy.array', 'np.array', (['[[1240.0], [3160.0]]'], {}), '([[1240.0], [3160.0]])\n', (1514, 1536), True, 'import numpy as np\n'), ((2209, 2260), 'numpy.array', 'np.array', (['[5761600.0, 1654564.0, 182672.0, 21284.0]'], {}), '([5761600.0, 1654564.0, 182672.0, 21284.0])\n', (2217, 2260), True, 'import numpy as np\n'), ((2721, 2737), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2729, 2737), True, 'import numpy as np\n'), ((2279, 2314), 'numpy.isclose', 'np.isclose', (['scores', 'expected_scores'], {}), '(scores, expected_scores)\n', (2289, 2314), True, 'import numpy as np\n'), ((2932, 2952), 'numpy.moveaxis', 'np.moveaxis', (['x', '(3)', '(1)'], {}), '(x, 3, 1)\n', (2943, 2952), True, 'import numpy as np\n'), ((2972, 2999), 'numpy.moveaxis', 'np.moveaxis', (['analysis', '(3)', '(1)'], {}), '(analysis, 3, 1)\n', (2983, 2999), True, 'import numpy as np\n'), ((999, 1019), 'numpy.arange', 'np.arange', (['(2 * 4 * 4)'], {}), '(2 * 4 * 4)\n', (1008, 1019), True, 'import numpy as np\n'), ((1082, 1102), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (1090, 1102), True, 'import numpy as np\n'), ((1336, 1356), 'numpy.arange', 'np.arange', (['(4 * 4 * 1)'], {}), '(4 * 4 * 1)\n', (1345, 1356), True, 'import numpy as np\n'), ((2540, 2560), 'numpy.arange', 'np.arange', (['(1 * 4 * 4)'], {}), '(1 * 4 * 4)\n', (2549, 2560), True, 'import numpy as np\n'), ((3215, 3241), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (3223, 3241), True, 'import numpy as np\n'), ((3367, 3393), 'numpy.array', 'np.array', (['[[3, 2], [1, 0]]'], {}), '([[3, 2], [1, 0]])\n', (3375, 3393), True, 'import numpy as np\n'), ((3525, 3551), 'numpy.array', 'np.array', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (3533, 3551), True, 'import numpy as np\n'), ((3682, 3708), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (3690, 3708), True, 'import numpy as np\n'), ((3839, 3865), 'numpy.array', 'np.array', (['[[0, 0], [0, 0]]'], {}), '([[0, 0], [0, 0]])\n', (3847, 3865), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import random
import time
import sys
from Models.Networks.SimpleNetwork import Network
from utils.tool import read_config, read_seed
np.random.seed(read_seed())
config = read_config("./Configs/configDQN.yml")
N_ACTIONS = config["n_actions"]
GAMMA = config["gamma"]
EPSILON_DECAY = config["epsilon_decay"]
EPSILON_MIN = config["epsilon_min"]
BATCH_SIZE = config["batch_size"]
MIN_REPLAY_SIZE = config["min_replay_size"]
TARGET_UPDATE = config["target_update"]
SAMPLING_TIME = config["sampling_time"]
NAME = config["model_name"]
# Deep Q Network agent
class Agent:
def __init__(self):
#Initialisationn of environnment variables
#Initialisation of replay buffer
self.replay = deque(maxlen = config["size_buffer"])
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.Q = Network(config).to(self.device)
self.target_Q = Network(config).to(self.device)
self.target_Q.load_state_dict(self.Q.state_dict()) #synchronization of the parameters
#Initialisation of agent variables
self.epsilon = config["epsilon"]
self.target_update_counter = 0
self.loss = None
def save_model(self):
torch.save(self.Q.state_dict(), "./ModelSaved/"+ NAME + '.pth')
print("Model saved")
def add_transition(self, obs, action, reward, next_obs, done):
self.replay.append((obs, action, self.reward_clipping(reward), next_obs, done))
def reward_clipping(self, reward):
if reward > 1:
reward = 1
elif reward <-1:
reward = -1
return reward
def choose_action(self, obs):
#Choose an action according epsilon-greedy
if np.random.uniform() < self.epsilon:
action = np.random.choice(N_ACTIONS)
else:
y = self.Q(torch.tensor(obs[0], device=self.device, dtype=torch.float).unsqueeze(0),
torch.tensor(obs[1], device=self.device, dtype=torch.float).unsqueeze(0))
action = torch.argmax(y).item()
return action
def train_nn(self):
if len(self.replay) < MIN_REPLAY_SIZE:
return
#Sample transitions from the minibatch
idx = np.random.choice(len(self.replay), BATCH_SIZE, replace=True)
mini_batch = np.array(self.replay)[idx]
#Split data transitions into multiples tensors
current_states_img = torch.tensor([transition[0][0] for transition in mini_batch], device=self.device, dtype=torch.float)
current_states_nav = torch.tensor([transition[0][1] for transition in mini_batch], device=self.device, dtype=torch.float)
actions = torch.tensor([transition[1] for transition in mini_batch], device=self.device, dtype=torch.long)
rewards = torch.tensor([transition[2] for transition in mini_batch], device=self.device, dtype=torch.float)
new_current_states_img = torch.tensor([transition[3][0] for transition in mini_batch], device=self.device, dtype=torch.float)
new_current_states_nav = torch.tensor([transition[3][1] for transition in mini_batch], device=self.device, dtype=torch.float)
dones = torch.tensor([not(transition[4]) for transition in mini_batch], device=self.device, dtype=torch.bool)
#Estimate the next Q value with the target network
next_state_values = self.target_Q(new_current_states_img, new_current_states_nav).max(1)[0]
values = (rewards + GAMMA*next_state_values*dones)
#Compute the Q value of the state
target_values = self.Q(current_states_img, current_states_nav)
target_values = target_values.gather(dim=1,index=actions.unsqueeze(-1)).squeeze(-1)
#Perform a gradient descent step on the error
#Compute the loss with MSE Loss
loss_t = self.Q.loss_function(values, target_values)
self.loss = loss_t
self.Q.optimizer.zero_grad()
loss_t.backward()
for param in self.Q.parameters():
param.grad.data.clamp(-1,1)
self.Q.optimizer.step()
self.update_target()
self.update_epsilon()
def update_target(self):
#update target counter
self.target_update_counter +=1
#Every C update target network
if self.target_update_counter > TARGET_UPDATE:
self.target_Q.load_state_dict(self.Q.state_dict())
self.target_update_counter = 0
def update_epsilon(self):
#update epsilon
self.epsilon *= EPSILON_DECAY
self.epsilon = max(self.epsilon, EPSILON_MIN)
| [
"collections.deque",
"utils.tool.read_config",
"numpy.random.choice",
"torch.argmax",
"torch.tensor",
"utils.tool.read_seed",
"numpy.array",
"torch.cuda.is_available",
"numpy.random.uniform",
"Models.Networks.SimpleNetwork.Network"
] | [((285, 323), 'utils.tool.read_config', 'read_config', (['"""./Configs/configDQN.yml"""'], {}), "('./Configs/configDQN.yml')\n", (296, 323), False, 'from utils.tool import read_config, read_seed\n'), ((262, 273), 'utils.tool.read_seed', 'read_seed', ([], {}), '()\n', (271, 273), False, 'from utils.tool import read_config, read_seed\n'), ((799, 834), 'collections.deque', 'deque', ([], {'maxlen': "config['size_buffer']"}), "(maxlen=config['size_buffer'])\n", (804, 834), False, 'from collections import deque\n'), ((2300, 2405), 'torch.tensor', 'torch.tensor', (['[transition[0][0] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.float'}), '([transition[0][0] for transition in mini_batch], device=self.\n device, dtype=torch.float)\n', (2312, 2405), False, 'import torch\n'), ((2424, 2529), 'torch.tensor', 'torch.tensor', (['[transition[0][1] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.float'}), '([transition[0][1] for transition in mini_batch], device=self.\n device, dtype=torch.float)\n', (2436, 2529), False, 'import torch\n'), ((2541, 2642), 'torch.tensor', 'torch.tensor', (['[transition[1] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.long'}), '([transition[1] for transition in mini_batch], device=self.\n device, dtype=torch.long)\n', (2553, 2642), False, 'import torch\n'), ((2650, 2752), 'torch.tensor', 'torch.tensor', (['[transition[2] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.float'}), '([transition[2] for transition in mini_batch], device=self.\n device, dtype=torch.float)\n', (2662, 2752), False, 'import torch\n'), ((2776, 2881), 'torch.tensor', 'torch.tensor', (['[transition[3][0] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.float'}), '([transition[3][0] for transition in mini_batch], device=self.\n device, dtype=torch.float)\n', (2788, 2881), False, 'import torch\n'), ((2904, 3009), 'torch.tensor', 'torch.tensor', (['[transition[3][1] for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.float'}), '([transition[3][1] for transition in mini_batch], device=self.\n device, dtype=torch.float)\n', (2916, 3009), False, 'import torch\n'), ((3019, 3126), 'torch.tensor', 'torch.tensor', (['[(not transition[4]) for transition in mini_batch]'], {'device': 'self.device', 'dtype': 'torch.bool'}), '([(not transition[4]) for transition in mini_batch], device=\n self.device, dtype=torch.bool)\n', (3031, 3126), False, 'import torch\n'), ((1693, 1712), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1710, 1712), True, 'import numpy as np\n'), ((1741, 1768), 'numpy.random.choice', 'np.random.choice', (['N_ACTIONS'], {}), '(N_ACTIONS)\n', (1757, 1768), True, 'import numpy as np\n'), ((2200, 2221), 'numpy.array', 'np.array', (['self.replay'], {}), '(self.replay)\n', (2208, 2221), True, 'import numpy as np\n'), ((878, 903), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (901, 903), False, 'import torch\n'), ((928, 943), 'Models.Networks.SimpleNetwork.Network', 'Network', (['config'], {}), '(config)\n', (935, 943), False, 'from Models.Networks.SimpleNetwork import Network\n'), ((978, 993), 'Models.Networks.SimpleNetwork.Network', 'Network', (['config'], {}), '(config)\n', (985, 993), False, 'from Models.Networks.SimpleNetwork import Network\n'), ((1958, 1973), 'torch.argmax', 'torch.argmax', (['y'], {}), '(y)\n', (1970, 1973), False, 'import torch\n'), ((1791, 1850), 'torch.tensor', 'torch.tensor', (['obs[0]'], {'device': 'self.device', 'dtype': 'torch.float'}), '(obs[0], device=self.device, dtype=torch.float)\n', (1803, 1850), False, 'import torch\n'), ((1871, 1930), 'torch.tensor', 'torch.tensor', (['obs[1]'], {'device': 'self.device', 'dtype': 'torch.float'}), '(obs[1], device=self.device, dtype=torch.float)\n', (1883, 1930), False, 'import torch\n')] |
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
xs = mnist.test.images
ys = mnist.test.labels
np.save('orig_images.npy', xs)
np.save('orig_labels.npy', ys)
| [
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"numpy.save"
] | [((87, 141), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(False)'}), "('MNIST_data', one_hot=False)\n", (112, 141), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((189, 219), 'numpy.save', 'np.save', (['"""orig_images.npy"""', 'xs'], {}), "('orig_images.npy', xs)\n", (196, 219), True, 'import numpy as np\n'), ((220, 250), 'numpy.save', 'np.save', (['"""orig_labels.npy"""', 'ys'], {}), "('orig_labels.npy', ys)\n", (227, 250), True, 'import numpy as np\n')] |
#Create various plots of chem evo models against data
import numpy as np
import pandas as pd
import math
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
import matplotlib.colors as colors
import sys
sys.path.append('./scripts/')
from chemevo import *
#fl = chem_evo_data('./search.hdf5')
#fl = chem_evo_data('./multioutput.hdf5')
fl = chem_evo_data('./output.hdf5')
#astroNN VAC file from APOGEE DR16
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits'
hdu_list_1 = fits.open(data_file_1, memmap=True)
apogee_data = Table(hdu_list_1[1].data)
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Radial Migration filter
Remove_nans = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(apogee_data['age_lowess_correct']>0.0)&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
#=====================================================================================================================
#Radial migration, randomness from gaussian
rl_random = np.random.normal(0.0, 4.0*((apogee_data['age_lowess_correct'][Remove_nans]/13.7)**0.5))
rl_scatter = apogee_data['rl'][Remove_nans]+rl_random
#====================================================================================================================
#Radial migration painting
rl_scatter_model_fe = fl.paint(rl_scatter,(13.7-apogee_data['age_lowess_correct'][Remove_nans]),['Fe'])
rl_scatter_model_mg = fl.paint(rl_scatter,(13.7-apogee_data['age_lowess_correct'][Remove_nans]),['Mg'])
#=======================================================================================================================
#Plot radial migration - no scattering abundances
plt.scatter(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],s=2.0,color='b')
plt.scatter(rl_scatter_model_fe['Fe_H'],rl_scatter_model_mg['Mg_H']-rl_scatter_model_fe['Fe_H'],s=2.0,color='r')
plt.title('Radial Migration Effects')
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/Fe]')
plt.show()
#=======================================================================================================================
#Changed for with radial effects set
model_fe_uncert = np.random.normal(0.0, apogee_data['FE_H_ERR'][Remove_nans])
model_mg_uncert = np.random.normal(0.0, apogee_data['MG_H_ERR'][Remove_nans])
#Changed for radial effects data set
model_fe_random = rl_scatter_model_fe['Fe_H'] + model_fe_uncert
model_mg_random = rl_scatter_model_mg['Mg_H'] + model_mg_uncert
#==============================================================================================================
#Scatter both
plt.scatter(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],s=2.0, color='b')
plt.scatter(model_fe_random,model_mg_random-model_fe_random,s=2.0,color='r')
plt.title('Scatter both Fe and Mg')
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/Fe]')
plt.show()
#=============================================================================================================
#Scatter Only Fe
plt.scatter(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],s=2.0, color='b')
plt.scatter(model_fe_random,rl_scatter_model_mg['Mg_H']-model_fe_random,s=2.0,color='r')
plt.title('Scatter only Fe, but in both axes')
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/Fe]')
plt.show()
#==============================================================================================================
#Scattering Mg only
plt.scatter(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],s=2.0, color='b')
plt.scatter(rl_scatter_model_fe['Fe_H'],model_mg_random-rl_scatter_model_fe['Fe_H'],s=2.0,color='r')
plt.title('Scatter only Mg, only affects y-axis')
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/Fe]')
plt.show()
#==============================================================================================================
#Density plots with Radial Migration but no scatter
f,a=plt.subplots(1,2,figsize=[15.,3.],sharex=True,sharey=True)
plt.sca(a[0])
plt.hist2d(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/H]')
plt.title('Density plot of data')
plt.sca(a[1])
plt.hist2d(rl_scatter_model_fe['Fe_H'],rl_scatter_model_mg['Mg_H']-rl_scatter_model_fe['Fe_H'],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.title('Density plot of painted data')
f.suptitle('Comparison between observed and model predictions, w/ RM, wo/Scatter')
plt.show()
#===============================================================================================================
#Density plots with Radial Migration and scatter in Fe
f,a=plt.subplots(1,2,figsize=[15.,3.],sharex=True,sharey=True)
plt.sca(a[0])
plt.hist2d(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/H]')
plt.title('Density plot of data')
plt.sca(a[1])
plt.hist2d(model_fe_random,rl_scatter_model_mg['Mg_H']-model_fe_random,bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.title('Density plot of painted data')
f.suptitle('Comparison between observed and model predictions, w/ RM, w/Fe Scatter')
plt.show()
#===============================================================================================================
#Density plots with Radial Migration and scatter in both
f,a=plt.subplots(1,2,figsize=[15.,3.],sharex=True,sharey=True)
plt.sca(a[0])
plt.hist2d(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/H]')
plt.title('Density plot of data')
plt.sca(a[1])
plt.hist2d(model_fe_random,model_mg_random-model_fe_random,bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.title('Density plot of painted data')
f.suptitle('Comparison between observed and model predictions, w/ RM, w/Both Scatter')
plt.show()
#==============================================================================================================
#Density plots with Radial Migration and scatter in Mg
f,a=plt.subplots(1,2,figsize=[15.,3.],sharex=True,sharey=True)
plt.sca(a[0])
plt.hist2d(apogee_data['FE_H'][Remove_nans],apogee_data['MG_H'][Remove_nans]-apogee_data['FE_H'][Remove_nans],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.ylabel('[Mg/H]')
plt.title('Density plot of data')
plt.sca(a[1])
plt.hist2d(rl_scatter_model_fe['Fe_H'],model_mg_random-rl_scatter_model_fe['Fe_H'],bins=50,range=[[-1.5,0.6],[-0.1,0.4]],norm=colors.LogNorm(),cmap=plt.cm.Spectral_r);
plt.xlabel('[Fe/H]')
plt.title('Density plot of painted data')
f.suptitle('Comparison between observed and model predictions, w/ RM, w/Mg Scatter')
plt.show()
| [
"numpy.random.normal",
"astropy.table.Table",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.scatter",
"astropy.io.fits.open",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.title",
"pandas.isna",
"sys.path.append",
"matplotlib.pyplot.subpl... | [((283, 312), 'sys.path.append', 'sys.path.append', (['"""./scripts/"""'], {}), "('./scripts/')\n", (298, 312), False, 'import sys\n'), ((567, 602), 'astropy.io.fits.open', 'fits.open', (['data_file_1'], {'memmap': '(True)'}), '(data_file_1, memmap=True)\n', (576, 602), False, 'from astropy.io import fits\n'), ((617, 642), 'astropy.table.Table', 'Table', (['hdu_list_1[1].data'], {}), '(hdu_list_1[1].data)\n', (622, 642), False, 'from astropy.table import Table\n'), ((1357, 1452), 'numpy.random.normal', 'np.random.normal', (['(0.0)', "(4.0 * (apogee_data['age_lowess_correct'][Remove_nans] / 13.7) ** 0.5)"], {}), "(0.0, 4.0 * (apogee_data['age_lowess_correct'][Remove_nans] /\n 13.7) ** 0.5)\n", (1373, 1452), True, 'import numpy as np\n'), ((2029, 2166), 'matplotlib.pyplot.scatter', 'plt.scatter', (["apogee_data['FE_H'][Remove_nans]", "(apogee_data['MG_H'][Remove_nans] - apogee_data['FE_H'][Remove_nans])"], {'s': '(2.0)', 'color': '"""b"""'}), "(apogee_data['FE_H'][Remove_nans], apogee_data['MG_H'][\n Remove_nans] - apogee_data['FE_H'][Remove_nans], s=2.0, color='b')\n", (2040, 2166), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2278), 'matplotlib.pyplot.scatter', 'plt.scatter', (["rl_scatter_model_fe['Fe_H']", "(rl_scatter_model_mg['Mg_H'] - rl_scatter_model_fe['Fe_H'])"], {'s': '(2.0)', 'color': '"""r"""'}), "(rl_scatter_model_fe['Fe_H'], rl_scatter_model_mg['Mg_H'] -\n rl_scatter_model_fe['Fe_H'], s=2.0, color='r')\n", (2168, 2278), True, 'import matplotlib.pyplot as plt\n'), ((2270, 2307), 'matplotlib.pyplot.title', 'plt.title', (['"""Radial Migration Effects"""'], {}), "('Radial Migration Effects')\n", (2279, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (2318, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/Fe]"""'], {}), "('[Mg/Fe]')\n", (2339, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2359, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2599), 'numpy.random.normal', 'np.random.normal', (['(0.0)', "apogee_data['FE_H_ERR'][Remove_nans]"], {}), "(0.0, apogee_data['FE_H_ERR'][Remove_nans])\n", (2556, 2599), True, 'import numpy as np\n'), ((2618, 2677), 'numpy.random.normal', 'np.random.normal', (['(0.0)', "apogee_data['MG_H_ERR'][Remove_nans]"], {}), "(0.0, apogee_data['MG_H_ERR'][Remove_nans])\n", (2634, 2677), True, 'import numpy as np\n'), ((2974, 3111), 'matplotlib.pyplot.scatter', 'plt.scatter', (["apogee_data['FE_H'][Remove_nans]", "(apogee_data['MG_H'][Remove_nans] - apogee_data['FE_H'][Remove_nans])"], {'s': '(2.0)', 'color': '"""b"""'}), "(apogee_data['FE_H'][Remove_nans], apogee_data['MG_H'][\n Remove_nans] - apogee_data['FE_H'][Remove_nans], s=2.0, color='b')\n", (2985, 3111), True, 'import matplotlib.pyplot as plt\n'), ((3103, 3188), 'matplotlib.pyplot.scatter', 'plt.scatter', (['model_fe_random', '(model_mg_random - model_fe_random)'], {'s': '(2.0)', 'color': '"""r"""'}), "(model_fe_random, model_mg_random - model_fe_random, s=2.0,\n color='r')\n", (3114, 3188), True, 'import matplotlib.pyplot as plt\n'), ((3180, 3215), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter both Fe and Mg"""'], {}), "('Scatter both Fe and Mg')\n", (3189, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3236), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (3226, 3236), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3258), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/Fe]"""'], {}), "('[Mg/Fe]')\n", (3247, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3267, 3269), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3537), 'matplotlib.pyplot.scatter', 'plt.scatter', (["apogee_data['FE_H'][Remove_nans]", "(apogee_data['MG_H'][Remove_nans] - apogee_data['FE_H'][Remove_nans])"], {'s': '(2.0)', 'color': '"""b"""'}), "(apogee_data['FE_H'][Remove_nans], apogee_data['MG_H'][\n Remove_nans] - apogee_data['FE_H'][Remove_nans], s=2.0, color='b')\n", (3411, 3537), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3626), 'matplotlib.pyplot.scatter', 'plt.scatter', (['model_fe_random', "(rl_scatter_model_mg['Mg_H'] - model_fe_random)"], {'s': '(2.0)', 'color': '"""r"""'}), "(model_fe_random, rl_scatter_model_mg['Mg_H'] - model_fe_random,\n s=2.0, color='r')\n", (3540, 3626), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3664), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter only Fe, but in both axes"""'], {}), "('Scatter only Fe, but in both axes')\n", (3627, 3664), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (3675, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/Fe]"""'], {}), "('[Mg/Fe]')\n", (3696, 3707), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3716, 3718), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3990), 'matplotlib.pyplot.scatter', 'plt.scatter', (["apogee_data['FE_H'][Remove_nans]", "(apogee_data['MG_H'][Remove_nans] - apogee_data['FE_H'][Remove_nans])"], {'s': '(2.0)', 'color': '"""b"""'}), "(apogee_data['FE_H'][Remove_nans], apogee_data['MG_H'][\n Remove_nans] - apogee_data['FE_H'][Remove_nans], s=2.0, color='b')\n", (3864, 3990), True, 'import matplotlib.pyplot as plt\n'), ((3982, 4091), 'matplotlib.pyplot.scatter', 'plt.scatter', (["rl_scatter_model_fe['Fe_H']", "(model_mg_random - rl_scatter_model_fe['Fe_H'])"], {'s': '(2.0)', 'color': '"""r"""'}), "(rl_scatter_model_fe['Fe_H'], model_mg_random -\n rl_scatter_model_fe['Fe_H'], s=2.0, color='r')\n", (3993, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4083, 4132), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter only Mg, only affects y-axis"""'], {}), "('Scatter only Mg, only affects y-axis')\n", (4092, 4132), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (4143, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4175), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/Fe]"""'], {}), "('[Mg/Fe]')\n", (4164, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4184, 4186), True, 'import matplotlib.pyplot as plt\n'), ((4357, 4422), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15.0, 3.0]', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=[15.0, 3.0], sharex=True, sharey=True)\n', (4369, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4429), 'matplotlib.pyplot.sca', 'plt.sca', (['a[0]'], {}), '(a[0])\n', (4423, 4429), True, 'import matplotlib.pyplot as plt\n'), ((4625, 4645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (4635, 4645), True, 'import matplotlib.pyplot as plt\n'), ((4646, 4666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/H]"""'], {}), "('[Mg/H]')\n", (4656, 4666), True, 'import matplotlib.pyplot as plt\n'), ((4667, 4700), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of data"""'], {}), "('Density plot of data')\n", (4676, 4700), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4714), 'matplotlib.pyplot.sca', 'plt.sca', (['a[1]'], {}), '(a[1])\n', (4708, 4714), True, 'import matplotlib.pyplot as plt\n'), ((4895, 4915), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (4905, 4915), True, 'import matplotlib.pyplot as plt\n'), ((4916, 4957), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of painted data"""'], {}), "('Density plot of painted data')\n", (4925, 4957), True, 'import matplotlib.pyplot as plt\n'), ((5041, 5051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5049, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5226, 5291), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15.0, 3.0]', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=[15.0, 3.0], sharex=True, sharey=True)\n', (5238, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5285, 5298), 'matplotlib.pyplot.sca', 'plt.sca', (['a[0]'], {}), '(a[0])\n', (5292, 5298), True, 'import matplotlib.pyplot as plt\n'), ((5494, 5514), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (5504, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5535), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/H]"""'], {}), "('[Mg/H]')\n", (5525, 5535), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5569), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of data"""'], {}), "('Density plot of data')\n", (5545, 5569), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5583), 'matplotlib.pyplot.sca', 'plt.sca', (['a[1]'], {}), '(a[1])\n', (5577, 5583), True, 'import matplotlib.pyplot as plt\n'), ((5740, 5760), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (5750, 5760), True, 'import matplotlib.pyplot as plt\n'), ((5761, 5802), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of painted data"""'], {}), "('Density plot of painted data')\n", (5770, 5802), True, 'import matplotlib.pyplot as plt\n'), ((5888, 5898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5896, 5898), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6140), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15.0, 3.0]', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=[15.0, 3.0], sharex=True, sharey=True)\n', (6087, 6140), True, 'import matplotlib.pyplot as plt\n'), ((6134, 6147), 'matplotlib.pyplot.sca', 'plt.sca', (['a[0]'], {}), '(a[0])\n', (6141, 6147), True, 'import matplotlib.pyplot as plt\n'), ((6343, 6363), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (6353, 6363), True, 'import matplotlib.pyplot as plt\n'), ((6364, 6384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/H]"""'], {}), "('[Mg/H]')\n", (6374, 6384), True, 'import matplotlib.pyplot as plt\n'), ((6385, 6418), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of data"""'], {}), "('Density plot of data')\n", (6394, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6419, 6432), 'matplotlib.pyplot.sca', 'plt.sca', (['a[1]'], {}), '(a[1])\n', (6426, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6577, 6597), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (6587, 6597), True, 'import matplotlib.pyplot as plt\n'), ((6598, 6639), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of painted data"""'], {}), "('Density plot of painted data')\n", (6607, 6639), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6737), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6735, 6737), True, 'import matplotlib.pyplot as plt\n'), ((6911, 6976), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15.0, 3.0]', 'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=[15.0, 3.0], sharex=True, sharey=True)\n', (6923, 6976), True, 'import matplotlib.pyplot as plt\n'), ((6970, 6983), 'matplotlib.pyplot.sca', 'plt.sca', (['a[0]'], {}), '(a[0])\n', (6977, 6983), True, 'import matplotlib.pyplot as plt\n'), ((7179, 7199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (7189, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7200, 7220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Mg/H]"""'], {}), "('[Mg/H]')\n", (7210, 7220), True, 'import matplotlib.pyplot as plt\n'), ((7221, 7254), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of data"""'], {}), "('Density plot of data')\n", (7230, 7254), True, 'import matplotlib.pyplot as plt\n'), ((7255, 7268), 'matplotlib.pyplot.sca', 'plt.sca', (['a[1]'], {}), '(a[1])\n', (7262, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7437, 7457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Fe/H]"""'], {}), "('[Fe/H]')\n", (7447, 7457), True, 'import matplotlib.pyplot as plt\n'), ((7458, 7499), 'matplotlib.pyplot.title', 'plt.title', (['"""Density plot of painted data"""'], {}), "('Density plot of painted data')\n", (7467, 7499), True, 'import matplotlib.pyplot as plt\n'), ((7585, 7595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7593, 7595), True, 'import matplotlib.pyplot as plt\n'), ((4583, 4599), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (4597, 4599), True, 'import matplotlib.colors as colors\n'), ((4853, 4869), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (4867, 4869), True, 'import matplotlib.colors as colors\n'), ((5452, 5468), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (5466, 5468), True, 'import matplotlib.colors as colors\n'), ((5698, 5714), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (5712, 5714), True, 'import matplotlib.colors as colors\n'), ((6301, 6317), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (6315, 6317), True, 'import matplotlib.colors as colors\n'), ((6535, 6551), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (6549, 6551), True, 'import matplotlib.colors as colors\n'), ((7137, 7153), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (7151, 7153), True, 'import matplotlib.colors as colors\n'), ((7395, 7411), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (7409, 7411), True, 'import matplotlib.colors as colors\n'), ((980, 1012), 'pandas.isna', 'pd.isna', (["apogee_data['FE_H_ERR']"], {}), "(apogee_data['FE_H_ERR'])\n", (987, 1012), True, 'import pandas as pd\n'), ((948, 976), 'pandas.isna', 'pd.isna', (["apogee_data['LOGG']"], {}), "(apogee_data['LOGG'])\n", (955, 976), True, 'import pandas as pd\n'), ((916, 944), 'pandas.isna', 'pd.isna', (["apogee_data['MG_H']"], {}), "(apogee_data['MG_H'])\n", (923, 944), True, 'import pandas as pd\n'), ((884, 912), 'pandas.isna', 'pd.isna', (["apogee_data['FE_H']"], {}), "(apogee_data['FE_H'])\n", (891, 912), True, 'import pandas as pd\n'), ((768, 794), 'pandas.isna', 'pd.isna', (["apogee_data['rl']"], {}), "(apogee_data['rl'])\n", (775, 794), True, 'import pandas as pd\n'), ((798, 840), 'pandas.isna', 'pd.isna', (["apogee_data['age_lowess_correct']"], {}), "(apogee_data['age_lowess_correct'])\n", (805, 840), True, 'import pandas as pd\n')] |
import numpy as np
from scipy.stats import rankdata
import scipy
from typing import Tuple
def llr_to_p(llr, prior=0.5):
"""
Convert log-likelihood ratios log(p(x|a)/p(x|~a)) to posterior
probabilty p(a|x) given a prior p(a). For unbiased prediction,
leave prior at 0.5
"""
return 1 / (1 + np.exp(-llr) * (1 / prior - 1))
def p_to_llr(p, prior=0.5):
"""
Converts the posterior probability p(a|x) into a log-likelihood ratio
log(p(x|a)/p(x|~a)) given a prior pa(a)
"""
return -np.log(prior * (1 - p) / (p * (1 - prior)))
def llr_to_uncertainty(llr, method="linear"):
if method == "linear":
p = llr_to_p(llr)
return 0.5 - np.abs(0.5 - p)
def fdr_from_pvals(p_vals: np.ndarray) -> np.ndarray:
"""
Computes FDR from p-values using the Benjamini-Hochberg method.
:param p_vals: numpy array of p-values
:return: numpy array of adjusted p-values
"""
ranked_p_values = rankdata(p_vals)
fdr = p_vals * len(p_vals) / ranked_p_values
fdr[fdr > 1] = 1
return fdr
def bs_from_llrs(llrs: np.ndarray, thres: float = 1, min_reads: int = 1) -> float:
"""
Computes methylation beta score from a list of log-likelihood ratios
:param llrs: Log-likelihood ratio array
:param thres: threshold for absolute llr - excluding all llrs with an absolute llr lower than this threshold
(default: 1.0)
:param min_reads: return np.nan if length of llrs after threshold filtering is less than min_reads (default: 1)
:return: methylation beta score
"""
llrs_used = llrs[np.abs(llrs) > thres]
if len(llrs_used) < min_reads:
return np.nan
return (llrs_used > 0).sum() / len(llrs_used)
def __ensure_numpy(x) -> np.ndarray:
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
def nangmean(x: np.ndarray) -> float:
""" Computes geometric mean while ignoring NaNs """
x = __ensure_numpy(x)
x = x[~np.isnan(x)]
return scipy.stats.gmean(x)
def maxabs(x: np.ndarray) -> float:
x = __ensure_numpy(x)
""" Returns the value with the maximum magnitude """
return x[np.unravel_index(np.argmax(np.abs(x)), x.shape)]
def compute_differential_methylation(
llrs_a: np.ndarray, llrs_b: np.ndarray
) -> Tuple:
# Paired test
a_nan = llrs_a.copy()
a_nan[a_nan == 0] = np.nan
b_nan = llrs_b.copy()
b_nan[b_nan == 0] = np.nan
# Filtering sites for which both haplotypes have at least one read,
# in order to avoid warnings
good_sites = ((~np.isnan(a_nan)).sum(axis=0) > 0) & (
(~np.isnan(b_nan)).sum(axis=0) > 0
)
a_nan = a_nan[:, good_sites]
b_nan = b_nan[:, good_sites]
pp = scipy.stats.ttest_rel(np.nanmean(a_nan, axis=0), np.nanmean(b_nan, axis=0))
# Unpaired test
up = scipy.stats.mannwhitneyu(llrs_a[llrs_a != 0], llrs_b[llrs_b != 0])
if np.isnan(up[0]):
# Workaround because ttest_ind returns (nan, nan) if it fails
return None, None
return up[1], pp[1] | [
"numpy.abs",
"scipy.stats.gmean",
"scipy.stats.rankdata",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.nanmean",
"numpy.isnan",
"scipy.stats.mannwhitneyu"
] | [((957, 973), 'scipy.stats.rankdata', 'rankdata', (['p_vals'], {}), '(p_vals)\n', (965, 973), False, 'from scipy.stats import rankdata\n'), ((1999, 2019), 'scipy.stats.gmean', 'scipy.stats.gmean', (['x'], {}), '(x)\n', (2016, 2019), False, 'import scipy\n'), ((2824, 2890), 'scipy.stats.mannwhitneyu', 'scipy.stats.mannwhitneyu', (['llrs_a[llrs_a != 0]', 'llrs_b[llrs_b != 0]'], {}), '(llrs_a[llrs_a != 0], llrs_b[llrs_b != 0])\n', (2848, 2890), False, 'import scipy\n'), ((2899, 2914), 'numpy.isnan', 'np.isnan', (['up[0]'], {}), '(up[0])\n', (2907, 2914), True, 'import numpy as np\n'), ((524, 567), 'numpy.log', 'np.log', (['(prior * (1 - p) / (p * (1 - prior)))'], {}), '(prior * (1 - p) / (p * (1 - prior)))\n', (530, 567), True, 'import numpy as np\n'), ((1818, 1829), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1826, 1829), True, 'import numpy as np\n'), ((2740, 2765), 'numpy.nanmean', 'np.nanmean', (['a_nan'], {'axis': '(0)'}), '(a_nan, axis=0)\n', (2750, 2765), True, 'import numpy as np\n'), ((2767, 2792), 'numpy.nanmean', 'np.nanmean', (['b_nan'], {'axis': '(0)'}), '(b_nan, axis=0)\n', (2777, 2792), True, 'import numpy as np\n'), ((690, 705), 'numpy.abs', 'np.abs', (['(0.5 - p)'], {}), '(0.5 - p)\n', (696, 705), True, 'import numpy as np\n'), ((1601, 1613), 'numpy.abs', 'np.abs', (['llrs'], {}), '(llrs)\n', (1607, 1613), True, 'import numpy as np\n'), ((1975, 1986), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1983, 1986), True, 'import numpy as np\n'), ((316, 328), 'numpy.exp', 'np.exp', (['(-llr)'], {}), '(-llr)\n', (322, 328), True, 'import numpy as np\n'), ((2180, 2189), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2186, 2189), True, 'import numpy as np\n'), ((2555, 2570), 'numpy.isnan', 'np.isnan', (['a_nan'], {}), '(a_nan)\n', (2563, 2570), True, 'import numpy as np\n'), ((2603, 2618), 'numpy.isnan', 'np.isnan', (['b_nan'], {}), '(b_nan)\n', (2611, 2618), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.