code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
import joblib
import lime
import lime.lime_tabular
# In[2]:
st.cache()
preprocessor = joblib.load('reintubate_preprocessor_strip.sav')
clf = joblib.load("reintubate_model_strip.sav")
scaler_object = preprocessor.named_transformers_['num']
mean = scaler_object['scaler'].mean_
var = scaler_object['scaler'].var_
# In[3]:
#df_columns=['time_on_vent', 'anchor_age', 'heartrate', 'weight', 'hco3',
# 'creatinine', 'bun', 'height', 'tidalvolume', 'temp', 'pulseox','re_intub_class', 'gender','tidal_weight']
df_columns=['time_on_vent', 'anchor_age', 'heartrate', 'weight', 'hco3', 'pulseox',
'creatinine', 'bun', 'height', 'tidalvolume', 'temp', 're_intub_class',
'gender', 'tidal_weight']
# In[4]:
st.set_option('deprecation.showfileUploaderEncoding', False)
# In[5]:
st.title('Extu-Mate: Helping ICU doctors decide when to extubate')
st.sidebar.subheader('Enter patient info:')
time_on_vent = st.sidebar.number_input(label = 'How long has the patient already been on the ventilator? (hours):',value=91)
time_on_vent = np.log(time_on_vent)
anchor_age = st.sidebar.number_input(label = 'Patient age (years):', value = 62)
anchor_age = np.log(anchor_age)
gender = st.sidebar.radio(label = 'Patient gender:', options = ['M', 'F'])
weight = st.sidebar.number_input(label = 'Patient weight (lb):', value = 182)
height = st.sidebar.number_input(label = 'Patient height (inches):',value = 67)
st.sidebar.subheader("Enter patient's most recent vital signs:")
pulseox = st.sidebar.number_input(label = 'Oxygen saturation (%):', value = 99)
pulseox = np.log(pulseox)
heartrate = st.sidebar.number_input(label = 'Heart rate (bpm):', value = 86)
tidalvolume = st.sidebar.number_input(label = 'Tidal volume (mL):', value = 200)
temp = st.sidebar.number_input(label = 'Temperature (Celcius):', value = 37.06)
st.sidebar.subheader("Enter patient's most recent lab values:")
hco3 = st.sidebar.number_input(label = 'HCO3 (mEq/L):', value = 25.15)
creatinine = st.sidebar.number_input(label = 'Creatinine (mg/dL):', value = 1.24)
creatinine = np.log(creatinine+1)
bun = st.sidebar.number_input(label = 'Blood urea nitrogen (mg/dL):',value = 10)
bun = np.log(bun+1)
#tidal_weight = tidalvolume/weight
tidal_weight = tidalvolume/weight
re_intub_class = 0
st.cache()
test_data = np.array([[time_on_vent, anchor_age, heartrate, weight, hco3, pulseox, creatinine, bun,
height, tidalvolume, temp,
re_intub_class, gender, tidal_weight]])
df = pd.DataFrame(data = test_data, columns=df_columns)
#x = df[df.columns.drop(['re_intub_class'])]
#x_columns = x.columns
df.drop('re_intub_class',axis=1,inplace=True)
df_scaled = preprocessor.transform(df)
sample_df = df_scaled.copy()
sample_test = df_scaled.flatten().reshape(1,-1)
#sample_test = sample_df.drop(labels=['re_intub_class'],axis=1).values
clf.predict(sample_df)
prediction_percent = np.int(clf.predict_proba(sample_test)[0][1]*100)
sentence = 'If you take your patient off the ventilator now, there is a '+ str(prediction_percent)+'% chance that they will need to be reintubated'
st.header(sentence)
X_train = pd.read_feather("strip_train_data")
X_scaled = preprocessor.transform(X_train)
categs= preprocessor.named_transformers_['cat']['onehot']
onehot_features = categs.get_feature_names()
numeric_features = preprocessor.transformers[0][2]
feature_names = np.concatenate((numeric_features.tolist(),onehot_features))
feature_names_polished = ['Ventilation time', 'Age of patient','Heart rate', 'Weight', 'HCO3 levels',
'O2 saturation','Creatinine levels', 'Blood urea nitrogen levels', 'Height', 'Tidal Volume', 'Temperature',
'Tidal volume normalized to weight', 'Gender']
zip_iterator = zip(feature_names, feature_names_polished)
feature_dict = dict(zip_iterator)
zip_mean= zip(feature_names, mean)
mean_dict = dict(zip_mean)
zip_var= zip(feature_names, var)
var_dict = dict(zip_var)
explainer = lime.lime_tabular.LimeTabularExplainer(X_scaled,
feature_names=feature_names,
#class_names=['re_intub_class'],
#categorical_features=categorical_features ,
verbose=True,
mode='classification',
discretize_continuous=True,
random_state = 101)
st.cache()
explog = explainer.explain_instance(sample_test[0,:], clf.predict_proba, num_samples = 100, num_features=5)
#explog.show_in_notebook(show_table=True)
feature_list = explog.as_list()
num_top_feats = len(feature_list)
# printing_features = ''
# if prediction_percent > 50:
# st.subheader("The likelihood that the patient will need to be reintubated can be explained by the following patient attributes:")
# j = 0
# for j in np.arange(num_top_feats):
# salient_feature = feature_list[j][0].split(' ')
# j = j+1
# for i in salient_feature:
# if i in feature_names:
# explainable_feature = feature_dict[i]
# printing_features = printing_features + explainable_feature + ', '
# #st.write(explainable_feature)
# else:
# st.write("The likelihood that the patient will need to be reintubated can be explained by the following patient attributes:")
# j = 0
# for j in np.arange(num_top_feats):
# salient_feature = feature_list[j][0].split(' ')
# j = j+1
# for i in salient_feature:
# if i in feature_names:
# explainable_feature = feature_dict[i]
# printing_features = printing_features + explainable_feature + ', '
# #st.write(explainable_feature)
# st.write(printing_features[:-2])
spiller_words = ['<','=','>','=>','>=','<=','=<']
changeable_features = ['heartrate', 'hco3', 'creatinine', 'bun',
'tidalvolume', 'temp', 'pulseox']
units_ref = ['(bpm)','(mEq/L)','(mg/dL)','(mg/dL)','(mL)','(Celcius)','(%)']
zip_units= zip(changeable_features, units_ref)
units_dict = dict(zip_units)
st.subheader("This prediction is driven by feature values in the ranges below:")
j = 0
for j in np.arange(num_top_feats):
salient_feature = feature_list[j][0].split(' ')
j = j+1
for feature in salient_feature:
if feature in changeable_features:
x = ''
for i in salient_feature:
if i in spiller_words:
print(i)
value = i
x = x+value+ ' '
else:
if i in changeable_features:
print(feature_dict[feature])
value = feature_dict[feature]
x = x+value + ' '
else:
if (i not in feature_names) & (i not in spiller_words):
if (feature=='bun')|(feature=='creatinine')|(feature=='pulseox'):
#st.write(feature_dict[feature])
#st.write(np.exp((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])-1)
#print(feature_dict[feature])
print(np.exp((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])-1)
value = "{:.2f}".format(np.exp((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])-1)
x = x+value + ' '
else:
if (feature=='hco3')|(feature=='temp'):
#st.write(feature_dict[feature])
#st.write((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])
#print(feature_dict[feature])
print((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])
value = (float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature]
value = "{:.2f}".format((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])
x = x+value + ' '
else: print((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])
value = (float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature]
value = "{:.0f}".format((float(i)*np.sqrt(var_dict[feature]))+ mean_dict[feature])
x = x+value + ' '
x_unit = units_dict[feature]
x = x+x_unit+ ' '
print(x)
st.write(x)
# In[6]:
# =============================================================================
# csv_file = st.file_uploader(
# label="Upload a csv file containing your patient's data.", type=["csv"], encoding="utf-8"
# )
#
# if csv_file is not None:
# df = pd.read_csv(csv_file)
# x = df[mask]
# x_scaled = scaler.transform(x)
# sample_df = df.copy()
# sample_df[mask] = x_scaled.flatten()
# sample_test = sample_df.drop(labels=['re_intub_class'],axis=1).values
# logmodel.predict(sample_test)
# prediction_percent = logmodel.predict_proba(sample_test)[0,0]
# st.write('There is a ', prediction_percent,
# '% likelihood that extubation will be successful')
# #st.dataframe(df)
# =============================================================================
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"pandas.DataFrame",
"streamlit.subheader",
"streamlit.sidebar.number_input",
"streamlit.sidebar.subheader",
"streamlit.set_option",
"streamlit.cache",
"numpy.log",
"streamlit.header",
"pandas.read_feather",
"streamlit.title",
"streamlit.write",
"numpy.array",
"lime.lime_tabular.LimeTabularEx... | [((276, 286), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (284, 286), True, 'import streamlit as st\n'), ((302, 350), 'joblib.load', 'joblib.load', (['"""reintubate_preprocessor_strip.sav"""'], {}), "('reintubate_preprocessor_strip.sav')\n", (313, 350), False, 'import joblib\n'), ((357, 398), 'joblib.load', 'joblib.load', (['"""reintubate_model_strip.sav"""'], {}), "('reintubate_model_strip.sav')\n", (368, 398), False, 'import joblib\n'), ((937, 997), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (950, 997), True, 'import streamlit as st\n'), ((1011, 1077), 'streamlit.title', 'st.title', (['"""Extu-Mate: Helping ICU doctors decide when to extubate"""'], {}), "('Extu-Mate: Helping ICU doctors decide when to extubate')\n", (1019, 1077), True, 'import streamlit as st\n'), ((1079, 1122), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Enter patient info:"""'], {}), "('Enter patient info:')\n", (1099, 1122), True, 'import streamlit as st\n'), ((1138, 1255), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""How long has the patient already been on the ventilator? (hours):"""', 'value': '(91)'}), "(label=\n 'How long has the patient already been on the ventilator? (hours):',\n value=91)\n", (1161, 1255), True, 'import streamlit as st\n'), ((1263, 1283), 'numpy.log', 'np.log', (['time_on_vent'], {}), '(time_on_vent)\n', (1269, 1283), True, 'import numpy as np\n'), ((1298, 1361), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Patient age (years):"""', 'value': '(62)'}), "(label='Patient age (years):', value=62)\n", (1321, 1361), True, 'import streamlit as st\n'), ((1379, 1397), 'numpy.log', 'np.log', (['anchor_age'], {}), '(anchor_age)\n', (1385, 1397), True, 'import numpy as np\n'), ((1408, 1469), 'streamlit.sidebar.radio', 'st.sidebar.radio', ([], {'label': '"""Patient gender:"""', 'options': "['M', 'F']"}), "(label='Patient gender:', options=['M', 'F'])\n", (1424, 1469), True, 'import streamlit as st\n'), ((1484, 1548), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Patient weight (lb):"""', 'value': '(182)'}), "(label='Patient weight (lb):', value=182)\n", (1507, 1548), True, 'import streamlit as st\n'), ((1562, 1629), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Patient height (inches):"""', 'value': '(67)'}), "(label='Patient height (inches):', value=67)\n", (1585, 1629), True, 'import streamlit as st\n'), ((1634, 1698), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Enter patient\'s most recent vital signs:"""'], {}), '("Enter patient\'s most recent vital signs:")\n', (1654, 1698), True, 'import streamlit as st\n'), ((1709, 1774), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Oxygen saturation (%):"""', 'value': '(99)'}), "(label='Oxygen saturation (%):', value=99)\n", (1732, 1774), True, 'import streamlit as st\n'), ((1789, 1804), 'numpy.log', 'np.log', (['pulseox'], {}), '(pulseox)\n', (1795, 1804), True, 'import numpy as np\n'), ((1818, 1878), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Heart rate (bpm):"""', 'value': '(86)'}), "(label='Heart rate (bpm):', value=86)\n", (1841, 1878), True, 'import streamlit as st\n'), ((1897, 1959), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Tidal volume (mL):"""', 'value': '(200)'}), "(label='Tidal volume (mL):', value=200)\n", (1920, 1959), True, 'import streamlit as st\n'), ((1971, 2039), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Temperature (Celcius):"""', 'value': '(37.06)'}), "(label='Temperature (Celcius):', value=37.06)\n", (1994, 2039), True, 'import streamlit as st\n'), ((2045, 2108), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Enter patient\'s most recent lab values:"""'], {}), '("Enter patient\'s most recent lab values:")\n', (2065, 2108), True, 'import streamlit as st\n'), ((2116, 2175), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""HCO3 (mEq/L):"""', 'value': '(25.15)'}), "(label='HCO3 (mEq/L):', value=25.15)\n", (2139, 2175), True, 'import streamlit as st\n'), ((2193, 2257), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Creatinine (mg/dL):"""', 'value': '(1.24)'}), "(label='Creatinine (mg/dL):', value=1.24)\n", (2216, 2257), True, 'import streamlit as st\n'), ((2275, 2297), 'numpy.log', 'np.log', (['(creatinine + 1)'], {}), '(creatinine + 1)\n', (2281, 2297), True, 'import numpy as np\n'), ((2303, 2374), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', ([], {'label': '"""Blood urea nitrogen (mg/dL):"""', 'value': '(10)'}), "(label='Blood urea nitrogen (mg/dL):', value=10)\n", (2326, 2374), True, 'import streamlit as st\n'), ((2384, 2399), 'numpy.log', 'np.log', (['(bun + 1)'], {}), '(bun + 1)\n', (2390, 2399), True, 'import numpy as np\n'), ((2489, 2499), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (2497, 2499), True, 'import streamlit as st\n'), ((2512, 2674), 'numpy.array', 'np.array', (['[[time_on_vent, anchor_age, heartrate, weight, hco3, pulseox, creatinine,\n bun, height, tidalvolume, temp, re_intub_class, gender, tidal_weight]]'], {}), '([[time_on_vent, anchor_age, heartrate, weight, hco3, pulseox,\n creatinine, bun, height, tidalvolume, temp, re_intub_class, gender,\n tidal_weight]])\n', (2520, 2674), True, 'import numpy as np\n'), ((2689, 2737), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'test_data', 'columns': 'df_columns'}), '(data=test_data, columns=df_columns)\n', (2701, 2737), True, 'import pandas as pd\n'), ((3285, 3304), 'streamlit.header', 'st.header', (['sentence'], {}), '(sentence)\n', (3294, 3304), True, 'import streamlit as st\n'), ((3317, 3352), 'pandas.read_feather', 'pd.read_feather', (['"""strip_train_data"""'], {}), "('strip_train_data')\n", (3332, 3352), True, 'import pandas as pd\n'), ((4127, 4296), 'lime.lime_tabular.LimeTabularExplainer', 'lime.lime_tabular.LimeTabularExplainer', (['X_scaled'], {'feature_names': 'feature_names', 'verbose': '(True)', 'mode': '"""classification"""', 'discretize_continuous': '(True)', 'random_state': '(101)'}), "(X_scaled, feature_names=\n feature_names, verbose=True, mode='classification',\n discretize_continuous=True, random_state=101)\n", (4165, 4296), False, 'import lime\n'), ((4585, 4595), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (4593, 4595), True, 'import streamlit as st\n'), ((6365, 6450), 'streamlit.subheader', 'st.subheader', (['"""This prediction is driven by feature values in the ranges below:"""'], {}), "('This prediction is driven by feature values in the ranges below:'\n )\n", (6377, 6450), True, 'import streamlit as st\n'), ((6462, 6486), 'numpy.arange', 'np.arange', (['num_top_feats'], {}), '(num_top_feats)\n', (6471, 6486), True, 'import numpy as np\n'), ((8960, 8971), 'streamlit.write', 'st.write', (['x'], {}), '(x)\n', (8968, 8971), True, 'import streamlit as st\n'), ((8643, 8669), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8650, 8669), True, 'import numpy as np\n'), ((8269, 8295), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8276, 8295), True, 'import numpy as np\n'), ((8757, 8783), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8764, 8783), True, 'import numpy as np\n'), ((8166, 8192), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8173, 8192), True, 'import numpy as np\n'), ((8387, 8413), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8394, 8413), True, 'import numpy as np\n'), ((8544, 8570), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (8551, 8570), True, 'import numpy as np\n'), ((7541, 7567), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (7548, 7567), True, 'import numpy as np\n'), ((7666, 7692), 'numpy.sqrt', 'np.sqrt', (['var_dict[feature]'], {}), '(var_dict[feature])\n', (7673, 7692), True, 'import numpy as np\n')] |
'''
ARTI is a dataset created by <NAME> (@liuliu66)
'''
import numpy as np
from math import pi ,sin, cos
import itertools
from matplotlib import pyplot as plt
def get_3d_bbox(scale, shift = 0):
"""
Input:
scale: [3] or scalar
shift: [3] or scalar
Return
bbox_3d: [3, N]
"""
if hasattr(scale, "__iter__"):
bbox_3d = np.array([[scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, -scale[2] / 2]]) + shift
else:
bbox_3d = np.array([[scale / 2, +scale / 2, scale / 2],
[scale / 2, +scale / 2, -scale / 2],
[-scale / 2, +scale / 2, scale / 2],
[-scale / 2, +scale / 2, -scale / 2],
[+scale / 2, -scale / 2, scale / 2],
[+scale / 2, -scale / 2, -scale / 2],
[-scale / 2, -scale / 2, scale / 2],
[-scale / 2, -scale / 2, -scale / 2]]) +shift
bbox_3d = bbox_3d.transpose()
return bbox_3d
def pts_inside_box(pts, bbox):
u1 = bbox[5, :] - bbox[4, :]
u2 = bbox[7, :] - bbox[4, :]
u3 = bbox[0, :] - bbox[4, :]
up = pts - np.reshape(bbox[4, :], (1, 3))
p1 = np.matmul(up, u1.reshape((3, 1)))
p2 = np.matmul(up, u2.reshape((3, 1)))
p3 = np.matmul(up, u3.reshape((3, 1)))
p1 = np.logical_and(p1>0, p1<np.dot(u1, u1))
p2 = np.logical_and(p2>0, p2<np.dot(u2, u2))
p3 = np.logical_and(p3>0, p3<np.dot(u3, u3))
return np.logical_and(np.logical_and(p1, p2), p3)
def iou_3d(bbox1, bbox2, nres=50):
bmin = np.min(np.concatenate((bbox1, bbox2), 0), 0)
bmax = np.max(np.concatenate((bbox1, bbox2), 0), 0)
xs = np.linspace(bmin[0], bmax[0], nres)
ys = np.linspace(bmin[1], bmax[1], nres)
zs = np.linspace(bmin[2], bmax[2], nres)
pts = np.array([x for x in itertools.product(xs, ys, zs)])
flag1 = pts_inside_box(pts, bbox1)
flag2 = pts_inside_box(pts, bbox2)
intersect = np.sum(np.logical_and(flag1, flag2))
union = np.sum(np.logical_or(flag1, flag2))
if union==0:
return 1
else:
return intersect/float(union)
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
if coordinates.shape[0] != 3 and coordinates.shape[1]==3:
coordinates = coordinates.transpose()
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates
def calculate_2d_projections(coordinates_3d, intrinsics):
"""
Input:
coordinates: [3, N]
intrinsics: [3, 3]
Return
projected_coordinates: [N, 2]
"""
projected_coordinates = intrinsics @ coordinates_3d
projected_coordinates = projected_coordinates[:2, :] / projected_coordinates[2, :]
projected_coordinates = projected_coordinates.transpose()
projected_coordinates = np.array(projected_coordinates, dtype=np.int32)
return projected_coordinates
def compute_RT_distances(RT_1, RT_2):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:return: theta: angle difference of R in degree, shift: l2 difference of T in centimeter
'''
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
R1 = RT_1[:3, :3]/np.cbrt(np.linalg.det(RT_1[:3, :3]))
T1 = RT_1[:3, 3]
R2 = RT_2[:3, :3]/np.cbrt(np.linalg.det(RT_2[:3, :3]))
T2 = RT_2[:3, 3]
R = R1 @ R2.transpose()
theta = np.arccos((np.trace(R) - 1)/2) * 180/np.pi
shift = np.linalg.norm(T1-T2) * 100
# print(theta, shift)
if theta < 5 and shift < 5:
return 10 - theta - shift
else:
return -1
def axis_diff_degree(v1, v2):
v1 = v1.reshape(-1)
v2 = v2.reshape(-1)
r_diff = np.arccos(np.sum(v1*v2)/(np.linalg.norm(v1) * np.linalg.norm(v2))) * 180 / np.pi
return min(r_diff, 180-r_diff)
def rot_diff_degree(rot1, rot2):
return rot_diff_rad(rot1, rot2) / np.pi * 180
def rot_diff_rad(rot1, rot2):
return np.arccos( ( np.trace(np.matmul(rot1, rot2.T)) - 1 ) / 2 ) % (2*np.pi)
def rotate_points_with_rotvec(points, rot_vecs):
"""Rotate points by given rotation vectors.
Rodrigues' rotation formula is used.
"""
theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
with np.errstate(invalid='ignore'):
v = rot_vecs / theta
v = np.nan_to_num(v)
dot = np.sum(points * v, axis=1)[:, np.newaxis]
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v
def dist_between_3d_lines(p1, e1, p2, e2):
p1 = p1.reshape(-1)
p2 = p2.reshape(-1)
e1 = e1.reshape(-1)
e2 = e2.reshape(-1)
orth_vect = np.cross(e1, e2)
product = np.sum(orth_vect * (p1 - p2))
dist = product / np.linalg.norm(orth_vect)
return np.abs(dist)
def project3d(pcloud_target, projMat, height=512, width=512):
pcloud_projected = np.dot(pcloud_target, projMat.T)
pcloud_projected_ndc = pcloud_projected/pcloud_projected[:, 3:4]
img_coord = (pcloud_projected_ndc[:, 0:2] + 1)/(1/256)
print('transformed image coordinates:\n', img_coord.shape)
u = img_coord[:, 0]
v = img_coord[:, 1]
u = u.astype(np.int16)
v = v.astype(np.int16)
v = 512 - v
print('u0, v0:\n', u[0], v[0])
return u, v # x, y in cv coords
def point_3d_offset_joint(joint, point):
"""
joint: [x, y, z] or [[x, y, z] + [rx, ry, rz]]
point: N * 3
"""
if len(joint) == 2:
P0 = np.array(joint[0])
P = np.array(point)
l = np.array(joint[1]).reshape(1, 3)
P0P= P - P0
PP = np.dot(P0P, l.T) * l / np.linalg.norm(l)**2 - P0P
return PP
def rotate_pts(source, target):
'''
func: compute rotation between source: [N x 3], target: [N x 3]
'''
source = source - np.mean(source, 0, keepdims=True)
target = target - np.mean(target, 0, keepdims=True)
M = np.matmul(target.T, source)
U, D, Vh = np.linalg.svd(M, full_matrices=True)
d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0
if d:
D[-1] = -D[-1]
U[:, -1] = -U[:, -1]
R = np.matmul(U, Vh)
return R
def transform_pts(source, target):
# source: [N x 3], target: [N x 3]
# pre-centering and compute rotation
source_centered = source - np.mean(source, 0, keepdims=True)
target_centered = target - np.mean(target, 0, keepdims=True)
rotation = rotate_pts(source_centered, target_centered)
scale = scale_pts(source_centered, target_centered)
# compute translation
translation = np.mean(target.T-scale*np.matmul(rotation, source.T), 1)
return rotation, scale, translation
def scale_pts(source, target):
'''
func: compute scaling factor between source: [N x 3], target: [N x 3]
'''
pdist_s = source.reshape(source.shape[0], 1, 3) - source.reshape(1, source.shape[0], 3)
A = np.sqrt(np.sum(pdist_s**2, 2)).reshape(-1)
pdist_t = target.reshape(target.shape[0], 1, 3) - target.reshape(1, target.shape[0], 3)
b = np.sqrt(np.sum(pdist_t**2, 2)).reshape(-1)
scale = np.dot(A, b) / (np.dot(A, A)+1e-6)
return scale
def compute_3d_rotation_axis(pts_0, pts_1, rt, orientation=None, line_pts=None, methods='H-L', item='eyeglasses', viz=False):
"""
pts_0: points in NOCS space of cannonical status(scaled)
pts_1: points in camera space retrieved from depth image;
rt: rotation + translation in 4 * 4
"""
num_parts = len(rt)
print('we have {} parts'.format(num_parts))
chained_pts = [None] * num_parts
chained_pts[0] = np.dot( np.concatenate([ pts_0[0], np.ones((pts_0[0].shape[0], 1)) ], axis=1), rt[0].T )
axis_list = []
angle_list= []
if item == 'eyeglasses':
for j in range(1, num_parts):
chained_pts[j] = np.dot(np.concatenate([ pts_0[j], np.ones((pts_0[j].shape[0], 1)) ], axis=1), rt[0].T)
if methods == 'H-L':
RandIdx = np.random.randint(chained_pts[j].shape[1], size=5)
orient, position= estimate_joint_HL(chained_pts[j][RandIdx, 0:3], pts_1[j][RandIdx, 0:3])
joint_axis = {}
joint_axis['orient'] = orient
joint_axis['position'] = position
source_offset_arr= point_3d_offset_joint([position.reshape(1, 3), orient], chained_pts[j][RandIdx, 0:3])
rotated_offset_arr= point_3d_offset_joint([position.reshape(1, 3), orient.reshape(1, 3)], pts_1[j][RandIdx, 0:3])
angle = []
for m in range(RandIdx.shape[0]):
modulus_0 = np.linalg.norm(source_offset_arr[m, :])
modulus_1 = np.linalg.norm(rotated_offset_arr[m, :])
cos_angle = np.dot(source_offset_arr[m, :].reshape(1, 3), rotated_offset_arr[m, :].reshape(3, 1))/(modulus_0 * modulus_1)
angle_per_pair = np.arccos(cos_angle)
angle.append(angle_per_pair)
print('angle per pair from multiple pairs: {}', angle)
angle_list.append(sum(angle)/len(angle))
axis_list.append(joint_axis)
angle_list.append(angle)
return axis_list, angle_list
def point_rotate_about_axis(pts, anchor, unitvec, theta):
a, b, c = anchor.reshape(3)
u, v, w = unitvec.reshape(3)
x = pts[:, 0]
y = pts[:, 1]
z = pts[:, 2]
ss = u*x + v*y + w*z
x_rotated = (a*(v**2 + w**2) - u*(b*v + c*w - ss)) * (1 - cos(theta)) + x * cos(theta) + (-c*v + b*w - w*y + v*z) * sin(theta)
y_rotated = (b*(u**2 + w**2) - v*(a*u + c*w - ss)) * (1 - cos(theta)) + y * cos(theta) + (c*u - a*w + w*x - u*z) * sin(theta)
z_rotated = (c*(u**2 + v**2) - w*(a*u + b*v - ss)) * (1 - cos(theta)) + z * cos(theta) + (-b*u + a*v - v*x + u*y) * sin(theta)
rotated_pts = np.zeros_like(pts)
rotated_pts[:, 0] = x_rotated
rotated_pts[:, 1] = y_rotated
rotated_pts[:, 2] = z_rotated
return rotated_pts
def estimate_joint_HL(source_pts, rotated_pts):
# estimate offsets
delta_P = rotated_pts - source_pts
assert delta_P.shape[1] == 3, 'points coordinates dimension is wrong, current is {}'.format(delta_P.shape)
mid_pts = (source_pts + rotated_pts)/2
CC = np.zeros((3, 3), dtype=np.float32)
BB = np.zeros((delta_P.shape[0], 1), dtype=np.float32)
for j in range(0, delta_P.shape[0]):
CC += np.dot(delta_P[j, :].reshape(3, 1), delta_P[j, :].reshape(1, 3))
BB[j] = np.dot(delta_P[j, :].reshape(1, 3), mid_pts[j, :].reshape((3, 1)))
w, v = np.linalg.eig(CC)
print('eigen vectors are: \n', v)
print('eigne values are: \n', w)
orient = v[:, np.argmin(np.squeeze(w))].reshape(3, 1)
# we already decouple the orient & position
mat_1 = np.linalg.pinv( np.dot(delta_P.T, delta_P) )
position = np.dot( np.dot(mat_1, delta_P.T), BB)
print('orient has shape {}, position has shape {}'.format(orient.shape, position.shape))
return orient, position
if __name__ == '__main__':
#>>>>>>>>> 3D IOU compuatation
from scipy.spatial.transform import Rotation
bbox1 = np.array([[-1, 1, 1], [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, -1], [1, 1, -1], [1, -1, -1], [-1, -1, -1]])
print('bbox1.shape: ', bbox1.shape)
rotmatrix = Rotation.from_rotvec(np.pi/4 * np.array([np.sqrt(3)/3, np.sqrt(3)/3, np.sqrt(3)/3])).as_dcm()
bbox2 = np.matmul(bbox1, rotmatrix.T)
bbox3 = bbox1 + np.array([[1, 0, 0]])
rotmatrix2 = Rotation.from_rotvec(np.pi/4 * np.array([0, 0, 1])).as_dcm()
bbox4 = np.matmul(bbox1, rotmatrix2.T)
bbox5 = bbox1 + np.array([[2, 0, 0]])
print(iou_3d(bbox1, bbox1))
print(iou_3d(bbox1, bbox2))
print(iou_3d(bbox1, bbox3))
print(iou_3d(bbox1, bbox4))
print(iou_3d(bbox1, bbox5))
#>>>>>>>>> test for joint parameters fitting
source_pts = np.array([[5, 1, 5], [0, 0, 1], [0.5,0.5,0.5], [2, 0, 1], [3, 3, 5]])
p1 = np.array([0,0,0])
p2 = np.array([1,1,1])
unitvec = (p2 - p1) / np.linalg.norm(p2 - p1)
anchor = p1
rotated_pts = point_rotate_about_axis(source_pts, anchor, unitvec, pi)
joint_axis, position = estimate_joint_HL(source_pts, rotated_pts)
print(joint_axis, position)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(source_pts[:, 0], source_pts[:, 1], source_pts[:, 2], c='r',
marker='o', label='source pts')
ax.scatter(rotated_pts[:, 0], rotated_pts[:, 1], rotated_pts[:, 2], c='b',
marker='o', label='rotated pts')
linepts = unitvec * np.mgrid[-5:5:2j][:, np.newaxis] + np.array(p1).reshape(1, 3)
ax.plot3D(*linepts.T, linewidth=5, c='green')
ax.legend(loc='lower left')
plt.show()
| [
"numpy.trace",
"numpy.sum",
"numpy.abs",
"numpy.nan_to_num",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.mean",
"numpy.random.randint",
"numpy.zeros_like",
"numpy.linalg.eig",
"numpy.reshape",
"numpy.linspace",
"itertools.produ... | [((2074, 2109), 'numpy.linspace', 'np.linspace', (['bmin[0]', 'bmax[0]', 'nres'], {}), '(bmin[0], bmax[0], nres)\n', (2085, 2109), True, 'import numpy as np\n'), ((2119, 2154), 'numpy.linspace', 'np.linspace', (['bmin[1]', 'bmax[1]', 'nres'], {}), '(bmin[1], bmax[1], nres)\n', (2130, 2154), True, 'import numpy as np\n'), ((2164, 2199), 'numpy.linspace', 'np.linspace', (['bmin[2]', 'bmax[2]', 'nres'], {}), '(bmin[2], bmax[2], nres)\n', (2175, 2199), True, 'import numpy as np\n'), ((3451, 3498), 'numpy.array', 'np.array', (['projected_coordinates'], {'dtype': 'np.int32'}), '(projected_coordinates, dtype=np.int32)\n', (3459, 3498), True, 'import numpy as np\n'), ((5236, 5249), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5242, 5249), True, 'import numpy as np\n'), ((5266, 5279), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5272, 5279), True, 'import numpy as np\n'), ((5529, 5545), 'numpy.cross', 'np.cross', (['e1', 'e2'], {}), '(e1, e2)\n', (5537, 5545), True, 'import numpy as np\n'), ((5560, 5589), 'numpy.sum', 'np.sum', (['(orth_vect * (p1 - p2))'], {}), '(orth_vect * (p1 - p2))\n', (5566, 5589), True, 'import numpy as np\n'), ((5649, 5661), 'numpy.abs', 'np.abs', (['dist'], {}), '(dist)\n', (5655, 5661), True, 'import numpy as np\n'), ((5748, 5780), 'numpy.dot', 'np.dot', (['pcloud_target', 'projMat.T'], {}), '(pcloud_target, projMat.T)\n', (5754, 5780), True, 'import numpy as np\n'), ((6755, 6782), 'numpy.matmul', 'np.matmul', (['target.T', 'source'], {}), '(target.T, source)\n', (6764, 6782), True, 'import numpy as np\n'), ((6798, 6834), 'numpy.linalg.svd', 'np.linalg.svd', (['M'], {'full_matrices': '(True)'}), '(M, full_matrices=True)\n', (6811, 6834), True, 'import numpy as np\n'), ((6958, 6974), 'numpy.matmul', 'np.matmul', (['U', 'Vh'], {}), '(U, Vh)\n', (6967, 6974), True, 'import numpy as np\n'), ((10639, 10657), 'numpy.zeros_like', 'np.zeros_like', (['pts'], {}), '(pts)\n', (10652, 10657), True, 'import numpy as np\n'), ((11063, 11097), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (11071, 11097), True, 'import numpy as np\n'), ((11112, 11161), 'numpy.zeros', 'np.zeros', (['(delta_P.shape[0], 1)'], {'dtype': 'np.float32'}), '((delta_P.shape[0], 1), dtype=np.float32)\n', (11120, 11161), True, 'import numpy as np\n'), ((11378, 11395), 'numpy.linalg.eig', 'np.linalg.eig', (['CC'], {}), '(CC)\n', (11391, 11395), True, 'import numpy as np\n'), ((11935, 12050), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, -1], [1, 1, -1], [\n 1, -1, -1], [-1, -1, -1]]'], {}), '([[-1, 1, 1], [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, -1], [1, \n 1, -1], [1, -1, -1], [-1, -1, -1]])\n', (11943, 12050), True, 'import numpy as np\n'), ((12208, 12237), 'numpy.matmul', 'np.matmul', (['bbox1', 'rotmatrix.T'], {}), '(bbox1, rotmatrix.T)\n', (12217, 12237), True, 'import numpy as np\n'), ((12370, 12400), 'numpy.matmul', 'np.matmul', (['bbox1', 'rotmatrix2.T'], {}), '(bbox1, rotmatrix2.T)\n', (12379, 12400), True, 'import numpy as np\n'), ((12670, 12741), 'numpy.array', 'np.array', (['[[5, 1, 5], [0, 0, 1], [0.5, 0.5, 0.5], [2, 0, 1], [3, 3, 5]]'], {}), '([[5, 1, 5], [0, 0, 1], [0.5, 0.5, 0.5], [2, 0, 1], [3, 3, 5]])\n', (12678, 12741), True, 'import numpy as np\n'), ((12749, 12768), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (12757, 12768), True, 'import numpy as np\n'), ((12776, 12795), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (12784, 12795), True, 'import numpy as np\n'), ((13048, 13060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13058, 13060), True, 'from matplotlib import pyplot as plt\n'), ((13530, 13540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13538, 13540), True, 'from matplotlib import pyplot as plt\n'), ((1556, 1586), 'numpy.reshape', 'np.reshape', (['bbox[4, :]', '(1, 3)'], {}), '(bbox[4, :], (1, 3))\n', (1566, 1586), True, 'import numpy as np\n'), ((1889, 1911), 'numpy.logical_and', 'np.logical_and', (['p1', 'p2'], {}), '(p1, p2)\n', (1903, 1911), True, 'import numpy as np\n'), ((1971, 2004), 'numpy.concatenate', 'np.concatenate', (['(bbox1, bbox2)', '(0)'], {}), '((bbox1, bbox2), 0)\n', (1985, 2004), True, 'import numpy as np\n'), ((2027, 2060), 'numpy.concatenate', 'np.concatenate', (['(bbox1, bbox2)', '(0)'], {}), '((bbox1, bbox2), 0)\n', (2041, 2060), True, 'import numpy as np\n'), ((2364, 2392), 'numpy.logical_and', 'np.logical_and', (['flag1', 'flag2'], {}), '(flag1, flag2)\n', (2378, 2392), True, 'import numpy as np\n'), ((2413, 2440), 'numpy.logical_or', 'np.logical_or', (['flag1', 'flag2'], {}), '(flag1, flag2)\n', (2426, 2440), True, 'import numpy as np\n'), ((3880, 3918), 'numpy.array_equal', 'np.array_equal', (['RT_1[3, :]', 'RT_2[3, :]'], {}), '(RT_1[3, :], RT_2[3, :])\n', (3894, 3918), True, 'import numpy as np\n'), ((4308, 4331), 'numpy.linalg.norm', 'np.linalg.norm', (['(T1 - T2)'], {}), '(T1 - T2)\n', (4322, 4331), True, 'import numpy as np\n'), ((5022, 5054), 'numpy.linalg.norm', 'np.linalg.norm', (['rot_vecs'], {'axis': '(1)'}), '(rot_vecs, axis=1)\n', (5036, 5054), True, 'import numpy as np\n'), ((5079, 5108), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (5090, 5108), True, 'import numpy as np\n'), ((5151, 5167), 'numpy.nan_to_num', 'np.nan_to_num', (['v'], {}), '(v)\n', (5164, 5167), True, 'import numpy as np\n'), ((5178, 5204), 'numpy.sum', 'np.sum', (['(points * v)'], {'axis': '(1)'}), '(points * v, axis=1)\n', (5184, 5204), True, 'import numpy as np\n'), ((5611, 5636), 'numpy.linalg.norm', 'np.linalg.norm', (['orth_vect'], {}), '(orth_vect)\n', (5625, 5636), True, 'import numpy as np\n'), ((6325, 6343), 'numpy.array', 'np.array', (['joint[0]'], {}), '(joint[0])\n', (6333, 6343), True, 'import numpy as np\n'), ((6357, 6372), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (6365, 6372), True, 'import numpy as np\n'), ((6657, 6690), 'numpy.mean', 'np.mean', (['source', '(0)'], {'keepdims': '(True)'}), '(source, 0, keepdims=True)\n', (6664, 6690), True, 'import numpy as np\n'), ((6713, 6746), 'numpy.mean', 'np.mean', (['target', '(0)'], {'keepdims': '(True)'}), '(target, 0, keepdims=True)\n', (6720, 6746), True, 'import numpy as np\n'), ((7136, 7169), 'numpy.mean', 'np.mean', (['source', '(0)'], {'keepdims': '(True)'}), '(source, 0, keepdims=True)\n', (7143, 7169), True, 'import numpy as np\n'), ((7201, 7234), 'numpy.mean', 'np.mean', (['target', '(0)'], {'keepdims': '(True)'}), '(target, 0, keepdims=True)\n', (7208, 7234), True, 'import numpy as np\n'), ((7915, 7927), 'numpy.dot', 'np.dot', (['A', 'b'], {}), '(A, b)\n', (7921, 7927), True, 'import numpy as np\n'), ((11606, 11632), 'numpy.dot', 'np.dot', (['delta_P.T', 'delta_P'], {}), '(delta_P.T, delta_P)\n', (11612, 11632), True, 'import numpy as np\n'), ((11659, 11683), 'numpy.dot', 'np.dot', (['mat_1', 'delta_P.T'], {}), '(mat_1, delta_P.T)\n', (11665, 11683), True, 'import numpy as np\n'), ((12258, 12279), 'numpy.array', 'np.array', (['[[1, 0, 0]]'], {}), '([[1, 0, 0]])\n', (12266, 12279), True, 'import numpy as np\n'), ((12421, 12442), 'numpy.array', 'np.array', (['[[2, 0, 0]]'], {}), '([[2, 0, 0]])\n', (12429, 12442), True, 'import numpy as np\n'), ((12820, 12843), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (12834, 12843), True, 'import numpy as np\n'), ((370, 773), 'numpy.array', 'np.array', (['[[scale[0] / 2, +scale[1] / 2, scale[2] / 2], [scale[0] / 2, +scale[1] / 2,\n -scale[2] / 2], [-scale[0] / 2, +scale[1] / 2, scale[2] / 2], [-scale[0\n ] / 2, +scale[1] / 2, -scale[2] / 2], [+scale[0] / 2, -scale[1] / 2, \n scale[2] / 2], [+scale[0] / 2, -scale[1] / 2, -scale[2] / 2], [-scale[0\n ] / 2, -scale[1] / 2, scale[2] / 2], [-scale[0] / 2, -scale[1] / 2, -\n scale[2] / 2]]'], {}), '([[scale[0] / 2, +scale[1] / 2, scale[2] / 2], [scale[0] / 2, +\n scale[1] / 2, -scale[2] / 2], [-scale[0] / 2, +scale[1] / 2, scale[2] /\n 2], [-scale[0] / 2, +scale[1] / 2, -scale[2] / 2], [+scale[0] / 2, -\n scale[1] / 2, scale[2] / 2], [+scale[0] / 2, -scale[1] / 2, -scale[2] /\n 2], [-scale[0] / 2, -scale[1] / 2, scale[2] / 2], [-scale[0] / 2, -\n scale[1] / 2, -scale[2] / 2]])\n', (378, 773), True, 'import numpy as np\n'), ((913, 1241), 'numpy.array', 'np.array', (['[[scale / 2, +scale / 2, scale / 2], [scale / 2, +scale / 2, -scale / 2], [\n -scale / 2, +scale / 2, scale / 2], [-scale / 2, +scale / 2, -scale / 2\n ], [+scale / 2, -scale / 2, scale / 2], [+scale / 2, -scale / 2, -scale /\n 2], [-scale / 2, -scale / 2, scale / 2], [-scale / 2, -scale / 2, -\n scale / 2]]'], {}), '([[scale / 2, +scale / 2, scale / 2], [scale / 2, +scale / 2, -\n scale / 2], [-scale / 2, +scale / 2, scale / 2], [-scale / 2, +scale / \n 2, -scale / 2], [+scale / 2, -scale / 2, scale / 2], [+scale / 2, -\n scale / 2, -scale / 2], [-scale / 2, -scale / 2, scale / 2], [-scale / \n 2, -scale / 2, -scale / 2]])\n', (921, 1241), True, 'import numpy as np\n'), ((1749, 1763), 'numpy.dot', 'np.dot', (['u1', 'u1'], {}), '(u1, u1)\n', (1755, 1763), True, 'import numpy as np\n'), ((1798, 1812), 'numpy.dot', 'np.dot', (['u2', 'u2'], {}), '(u2, u2)\n', (1804, 1812), True, 'import numpy as np\n'), ((1847, 1861), 'numpy.dot', 'np.dot', (['u3', 'u3'], {}), '(u3, u3)\n', (1853, 1861), True, 'import numpy as np\n'), ((2840, 2892), 'numpy.ones', 'np.ones', (['(1, coordinates.shape[1])'], {'dtype': 'np.float32'}), '((1, coordinates.shape[1]), dtype=np.float32)\n', (2847, 2892), True, 'import numpy as np\n'), ((3961, 3983), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (3969, 3983), True, 'import numpy as np\n'), ((4081, 4108), 'numpy.linalg.det', 'np.linalg.det', (['RT_1[:3, :3]'], {}), '(RT_1[:3, :3])\n', (4094, 4108), True, 'import numpy as np\n'), ((4162, 4189), 'numpy.linalg.det', 'np.linalg.det', (['RT_2[:3, :3]'], {}), '(RT_2[:3, :3])\n', (4175, 4189), True, 'import numpy as np\n'), ((6844, 6860), 'numpy.linalg.det', 'np.linalg.det', (['U'], {}), '(U)\n', (6857, 6860), True, 'import numpy as np\n'), ((6863, 6880), 'numpy.linalg.det', 'np.linalg.det', (['Vh'], {}), '(Vh)\n', (6876, 6880), True, 'import numpy as np\n'), ((7931, 7943), 'numpy.dot', 'np.dot', (['A', 'A'], {}), '(A, A)\n', (7937, 7943), True, 'import numpy as np\n'), ((10349, 10359), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (10352, 10359), False, 'from math import pi, sin, cos\n'), ((10479, 10489), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (10482, 10489), False, 'from math import pi, sin, cos\n'), ((10610, 10620), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (10613, 10620), False, 'from math import pi, sin, cos\n'), ((2231, 2260), 'itertools.product', 'itertools.product', (['xs', 'ys', 'zs'], {}), '(xs, ys, zs)\n', (2248, 2260), False, 'import itertools\n'), ((5325, 5344), 'numpy.cross', 'np.cross', (['v', 'points'], {}), '(v, points)\n', (5333, 5344), True, 'import numpy as np\n'), ((6386, 6404), 'numpy.array', 'np.array', (['joint[1]'], {}), '(joint[1])\n', (6394, 6404), True, 'import numpy as np\n'), ((7420, 7449), 'numpy.matmul', 'np.matmul', (['rotation', 'source.T'], {}), '(rotation, source.T)\n', (7429, 7449), True, 'import numpy as np\n'), ((7725, 7748), 'numpy.sum', 'np.sum', (['(pdist_s ** 2)', '(2)'], {}), '(pdist_s ** 2, 2)\n', (7731, 7748), True, 'import numpy as np\n'), ((7868, 7891), 'numpy.sum', 'np.sum', (['(pdist_t ** 2)', '(2)'], {}), '(pdist_t ** 2, 2)\n', (7874, 7891), True, 'import numpy as np\n'), ((8440, 8471), 'numpy.ones', 'np.ones', (['(pts_0[0].shape[0], 1)'], {}), '((pts_0[0].shape[0], 1))\n', (8447, 8471), True, 'import numpy as np\n'), ((8775, 8825), 'numpy.random.randint', 'np.random.randint', (['chained_pts[j].shape[1]'], {'size': '(5)'}), '(chained_pts[j].shape[1], size=5)\n', (8792, 8825), True, 'import numpy as np\n'), ((10309, 10319), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10312, 10319), False, 'from math import pi, sin, cos\n'), ((10440, 10450), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10443, 10450), False, 'from math import pi, sin, cos\n'), ((10570, 10580), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10573, 10580), False, 'from math import pi, sin, cos\n'), ((13417, 13429), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (13425, 13429), True, 'import numpy as np\n'), ((4559, 4574), 'numpy.sum', 'np.sum', (['(v1 * v2)'], {}), '(v1 * v2)\n', (4565, 4574), True, 'import numpy as np\n'), ((6452, 6468), 'numpy.dot', 'np.dot', (['P0P', 'l.T'], {}), '(P0P, l.T)\n', (6458, 6468), True, 'import numpy as np\n'), ((6475, 6492), 'numpy.linalg.norm', 'np.linalg.norm', (['l'], {}), '(l)\n', (6489, 6492), True, 'import numpy as np\n'), ((9422, 9461), 'numpy.linalg.norm', 'np.linalg.norm', (['source_offset_arr[m, :]'], {}), '(source_offset_arr[m, :])\n', (9436, 9461), True, 'import numpy as np\n'), ((9494, 9534), 'numpy.linalg.norm', 'np.linalg.norm', (['rotated_offset_arr[m, :]'], {}), '(rotated_offset_arr[m, :])\n', (9508, 9534), True, 'import numpy as np\n'), ((9714, 9734), 'numpy.arccos', 'np.arccos', (['cos_angle'], {}), '(cos_angle)\n', (9723, 9734), True, 'import numpy as np\n'), ((10291, 10301), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10294, 10301), False, 'from math import pi, sin, cos\n'), ((10422, 10432), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10425, 10432), False, 'from math import pi, sin, cos\n'), ((10552, 10562), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (10555, 10562), False, 'from math import pi, sin, cos\n'), ((12328, 12347), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (12336, 12347), True, 'import numpy as np\n'), ((4264, 4275), 'numpy.trace', 'np.trace', (['R'], {}), '(R)\n', (4272, 4275), True, 'import numpy as np\n'), ((4574, 4592), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (4588, 4592), True, 'import numpy as np\n'), ((4595, 4613), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (4609, 4613), True, 'import numpy as np\n'), ((4813, 4836), 'numpy.matmul', 'np.matmul', (['rot1', 'rot2.T'], {}), '(rot1, rot2.T)\n', (4822, 4836), True, 'import numpy as np\n'), ((8662, 8693), 'numpy.ones', 'np.ones', (['(pts_0[j].shape[0], 1)'], {}), '((pts_0[j].shape[0], 1))\n', (8669, 8693), True, 'import numpy as np\n'), ((11499, 11512), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (11509, 11512), True, 'import numpy as np\n'), ((12143, 12153), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (12150, 12153), True, 'import numpy as np\n'), ((12157, 12167), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (12164, 12167), True, 'import numpy as np\n'), ((12171, 12181), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (12178, 12181), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import networkx as nx
import numpy as np
import sklearn.metrics as metrics
import torch
import torch.nn as nn
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import argparse
import os
import pickle
import random
import shutil
import time
from argparse import ArgumentParser
import itertools
from dataloader import *
from encoders import *
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
def make_args():
parser = ArgumentParser()
parser.add_argument('--task', dest='task', default='link', type=str,
help='link; node; linknew')
parser.add_argument('--model', dest='model', default='gcn', type=str,
help='deepbourgain; bourgain; gcn; gcnbourgain; node_feature; hybrid; pgcn; gat')
parser.add_argument('--dist_only', dest='dist_only', action='store_true',
help='whether dist_only')
parser.add_argument('--dataset', dest='dataset', default='grid', type=str,
help='grid; caveman; barabasi, cora, citeseer, pubmed')
parser.add_argument('--loss', dest='loss', default='l2', type=str,
help='l2; cross_entropy')
parser.add_argument('--gpu', dest='gpu', action='store_true',
help='whether use gpu')
# dataset
parser.add_argument('--remove_link_ratio', dest='remove_link_ratio', default=0.5, type=float)
parser.add_argument('--graph_test_ratio', dest='graph_test_ratio', default=0.2, type=float)
parser.add_argument('--permute', dest='permute', action='store_true',
help='whether permute subsets')
parser.add_argument('--permute_no', dest='permute', action='store_false',
help='whether permute subsets')
# parser.add_argument('--approximate', dest='approximate', action='store_true',
# help='whether approximate dists')
# parser.add_argument('--approximate_no', dest='approximate', action='store_false',
# help='whether approximate dists')
parser.add_argument('--approximate', dest='approximate', default=-1, type=int)
parser.add_argument('--batch_size', dest='batch_size', default=32, type=int)
parser.add_argument('--num_workers', dest='num_workers', default=0, type=int)
parser.add_argument('--num_layers', dest='num_layers', default=1, type=int)
parser.add_argument('--output_dim', dest='output_dim', default=16, type=int)
parser.add_argument('--hidden_dim', dest='hidden_dim', default=16, type=int)
parser.add_argument('--normalize_dist', dest='normalize_dist', action='store_true',
help='whether normalize_dist')
parser.add_argument('--normalize_adj', dest='normalize_adj', action='store_true',
help='whether normalize_adj')
parser.add_argument('--lr', dest='lr', default=1e-3, type=float)
parser.add_argument('--num_epochs', dest='num_epochs', default=10, type=int)
parser.add_argument('--num_repeats', dest='num_repeats', default=10, type=int)
parser.add_argument('--clip', dest='clip', default=2.0, type=float)
parser.set_defaults(gpu=False, task='linknew', model='pgcn', dataset='grid',
permute=True, approximate=-1, dist_only=False, normalize_adj=False)
args = parser.parse_args()
return args
args = make_args()
print(args)
np.random.seed(123)
loss_func = nn.BCEWithLogitsLoss()
#### new prediction
# data
dataset_sampler = graphs_dataset_loader(name=args.dataset, remove_link_ratio=args.remove_link_ratio,
graph_test_ratio=args.graph_test_ratio, permute=args.permute,
approximate=args.approximate,
normalize_adj=args.normalize_adj)
if args.task == 'linknew':
auc = []
for _ in range(args.num_repeats):
# model
if args.model == 'deepbourgain':
model = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=args.output_dim,
head_num=16, hidden_dim=16, has_out_act=False)
elif args.model == 'deepbourgainapp':
model = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=args.output_dim,
head_num=16, hidden_dim=16, has_out_act=False)
elif args.model == 'bourgain':
subset_dists, _ = dataset_sampler.recompute_feature()
# model = MLP(input_dim=subset_dists.shape[1], hidden_dim=16, output_dim=16)
model = MLP(input_dim=1, hidden_dim=args.hidden_dim, output_dim=1, act=nn.ReLU())
elif args.model == 'gcn':
# model = GraphConv(input_dim=dataset_sampler.graphs_feature[0].shape[1],
# hidden_dim=16, output_dim=16)
model = GCN(input_dim=dataset_sampler.graphs_feature[0].shape[1],
hidden_dim=args.hidden_dim, output_dim=args.output_dim, num_layers=args.num_layers)
elif args.model == 'gat':
model = GCN(input_dim=dataset_sampler.graphs_feature[0].shape[1],
hidden_dim=args.hidden_dim, output_dim=args.output_dim, num_layers=args.num_layers, att=True)
# model = model = GAT(nfeat=dataset_sampler.graphs_feature[0].shape[1],
# nhid=args.hidden_dim,
# nclass=0,
# dropout=0,
# nheads=8,
# alpha=0.2)
elif args.model == 'mpnn':
model = GCN(input_dim=dataset_sampler.graphs_feature[0].shape[1],
hidden_dim=args.hidden_dim, output_dim=args.output_dim, num_layers=args.num_layers, mpnn=True)
elif args.model == 'graphsage':
model = GCN(input_dim=dataset_sampler.graphs_feature[0].shape[1],
hidden_dim=args.hidden_dim, output_dim=args.output_dim, num_layers=args.num_layers, graphsage=True)
elif args.model == 'pgcn':
# model = PositionGraphConv(input_dim=dataset_sampler.graphs_feature[0].shape[1],
# hidden_dim=args.hidden_dim, output_dim=args.output_dim, dist_only=args.dist_only)
model = PGNN(input_dim=dataset_sampler.graphs_feature[0].shape[1],
hidden_dim=args.hidden_dim, output_dim=args.output_dim, num_layers=args.num_layers, dist_only=args.dist_only)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
out_act = nn.Sigmoid()
softmax = nn.Softmax(dim=1)
# train
for epoch in range(args.num_epochs):
while True:
correct = 0
total = 0
model.zero_grad()
# if epoch==5:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
batch = dataset_sampler.get_batch_train()
adj = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
feature = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
dist = Variable(torch.from_numpy(batch[2]).float(), requires_grad=False)
label = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
mask = Variable(torch.from_numpy(batch[4]).float(), requires_grad=False)
dist_max = Variable(torch.from_numpy(dataset_sampler.dist_max).float(), requires_grad=False)
dist_argmax = dataset_sampler.dist_argmax
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'deepbourgainapp':
pred = model(node_feature, torch.Tensor(dataset_sampler.adj_count))
elif args.model == 'bourgain':
# pred = model(subset_dists[:,:,0])
pred = torch.squeeze(model(subset_dists))
elif args.model == 'gcn' or args.model =='gat' or args.model =='mpnn'or args.model =='graphsage':
pred = model(feature, adj)
elif args.model == 'pgcn':
pred = model(feature, dist, dist_max, dist_argmax)
# pdb.set_trace()
mask_id = torch.nonzero(mask)
nodes_first = torch.index_select(pred, 0, mask_id[:, 0].long())
nodes_second = torch.index_select(pred, 0, mask_id[:, 1].long())
nodes_first = nodes_first.view(nodes_first.shape[0], 1, nodes_first.shape[1])
nodes_second = nodes_second.view(nodes_second.shape[0], nodes_second.shape[1], 1)
pred = torch.matmul(nodes_first, nodes_second).squeeze()
# pred = pred*model.w - model.b
label = torch.masked_select(label, mask.byte())
# if args.loss == 'l2':
# loss = torch.mean((adj_pred - adj) ** 2 * mask) # todo cross entropy
loss = loss_func(pred, label)
# pdb.set_trace()
if not args.dist_only:
loss.backward()
# nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
pred_binary = torch.where(out_act(pred) > 0.5, torch.Tensor([1]), torch.Tensor([0]))
correct += np.sum(pred_binary.data.numpy() == label.data.numpy())
total += pred.shape[0]
auc_train = roc_auc_score(label.flatten().data.numpy(), out_act(pred).flatten().data.numpy())
acc_train = correct / total
# print(classification_report(adj.flatten().data.numpy(), adj_pred_binary.flatten().data.numpy()))
# pdb.set_trace()
print('epoch', epoch, 'loss', loss.data, 'acc_train', acc_train, 'auc_train', auc_train)
# print('epoch', epoch, 'loss', loss.data, 'acc_train', acc_train)
label_all = []
pred_all = []
while True:
# val
correct = 0
total = 0
batch = dataset_sampler.get_batch_test()
adj = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
feature = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
dist = Variable(torch.from_numpy(batch[2]).float(), requires_grad=False)
label = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
mask = Variable(torch.from_numpy(batch[4]).float(), requires_grad=False)
dist_max = Variable(torch.from_numpy(dataset_sampler.dist_max).float(), requires_grad=False)
dist_argmax = dataset_sampler.dist_argmax
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'deepbourgainapp':
# adj_lists_unique = Variable(torch.Tensor(dataset_sampler.adj_lists_unique).long(), requires_grad=False)
# adj_lists_count = Variable(torch.Tensor(dataset_sampler.adj_lists_count).float(), requires_grad=False)
pred = model(node_feature, torch.Tensor(dataset_sampler.adj_count))
elif args.model == 'bourgain':
# pred = model(subset_dists[:,:,0])
# pred = torch.sum(model(subset_dists), dim = 1)
# chunk_size = subset_dists.shape[1] // len(models)
# preds = []
# for i in range(len(models)):
# pred = models[i](subset_dists[:, i * chunk_size:(i + 1) * chunk_size, :])
# preds.append(torch.sum(pred, dim=1))
# pred = torch.cat(preds, dim=1)
# pdb.set_trace()
# pred = F.normalize(pred, p=2, dim=-1)
pred = torch.squeeze(model(subset_dists))
elif args.model == 'gcn' or args.model == 'gat' or args.model == 'mpnn' or args.model =='graphsage':
pred = model(feature, adj)
elif args.model == 'pgcn':
pred = model(feature, dist, dist_max, dist_argmax)
# pdb.set_trace()
mask_id = torch.nonzero(mask)
nodes_first = torch.index_select(pred, 0, mask_id[:, 0])
nodes_second = torch.index_select(pred, 0, mask_id[:, 1])
nodes_first = nodes_first.view(nodes_first.shape[0], 1, nodes_first.shape[1])
nodes_second = nodes_second.view(nodes_second.shape[0], nodes_second.shape[1], 1)
pred = torch.matmul(nodes_first, nodes_second).squeeze()
# pdb.set_trace()
# pred = pred*model.w - model.b
label = torch.masked_select(label, mask.byte())
# adj_pred = pred @ pred.permute(1, 0)
# adj_pred = out_act(adj_pred)
# evaluate
pred_binary = torch.where(out_act(pred) > 0.5, torch.Tensor([1]), torch.Tensor([0]))
correct += np.sum(pred_binary.data.numpy() == label.data.numpy())
total += pred.shape[0]
# pdb.set_trace()
label_all.append(label.flatten().data.numpy())
pred_all.append(out_act(pred).flatten().data.numpy())
auc_test = roc_auc_score(label.flatten().data.numpy(), out_act(pred).flatten().data.numpy())
acc_test = correct / total
if dataset_sampler.done_test:
break
# print('epoch', epoch, 'loss', loss.data,
# 'acc_test', acc_test, 'auc_test', auc_test)
auc_test_all = roc_auc_score(np.concatenate(label_all,axis=0), np.concatenate(pred_all,axis=0))
print('------auc all-----', auc_test_all)
auc.append(auc_test_all)
auc = np.array(auc)
print('-----------------Final-------------------')
print(args)
print(np.mean(auc))
print(np.std(auc))
#### link prediction
if args.task == 'link':
# data loader
if 'app' in args.model:
dataset_sampler = graph_dataset_link_prediction(name=args.dataset, permute=args.permute, approximate=True)
else:
dataset_sampler = graph_dataset_link_prediction(name=args.dataset, permute=args.permute)
# model
if args.model == 'deepbourgain':
model = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=args.output_dim,
head_num=16, hidden_dim=16, has_out_act=False)
elif args.model == 'deepbourgainapp':
model = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=args.output_dim,
head_num=16, hidden_dim=16, has_out_act=False)
elif args.model == 'gcn':
model = GraphConv(input_dim=dataset_sampler.node_feature.shape[2], output_dim=args.output_dim,
normalize_embedding = True)
elif args.model == 'bourgain':
subset_dists, _ = dataset_sampler.recompute_feature()
# model = MLP(input_dim=subset_dists.shape[1], hidden_dim=16, output_dim=16)
model = MLP(input_dim=1, hidden_dim=16, output_dim=1, act=nn.ReLU())
# model = nn.Linear(1,1)
# models = [MLP(input_dim=1, hidden_dim=16, output_dim=16) for i in range(dataset_sampler.subset_types)]
if args.gpu:
model = model.cuda()
# if args.model == 'bourgain':
# optimizer = torch.optim.Adam(list(itertools.chain(*[list(model.parameters()) for model in models])), lr=args.lr)
# else:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
out_act = nn.Sigmoid()
softmax = nn.Softmax(dim=1)
# train
for epoch in range(args.num_epochs):
# if epoch==5:
# for param_group in optimizer.param_groups:
# param_group['lr'] /= 10
# train
correct = 0
total = 0
# if args.model == 'bourgain':
# for model in models:
# model.zero_grad()
# else:
model.zero_grad()
batch = dataset_sampler.get_fullbatch_train()
node_feature = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
adj = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
subset_dists = Variable(torch.from_numpy(batch[2]).float(), requires_grad=False)
if args.normalize_dist:
# subset_dists = softmax(subset_dists)
subset_dists = 1/(subset_dists+1)
subset_features = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
mask = Variable(torch.from_numpy(batch[4]).long(), requires_grad=False)
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'deepbourgainapp':
pred = model(node_feature, torch.Tensor(dataset_sampler.adj_count))
elif args.model == 'gcn':
pred = model(node_feature, adj)
elif args.model == 'bourgain':
# pred = model(subset_dists[:,:,0])
pred = torch.squeeze(model(subset_dists))
# chunk_size = subset_dists.shape[1]//len(models)
# preds = []
# for i in range(len(models)):
# pred = models[i](subset_dists[:,i*chunk_size:(i+1)*chunk_size,:])
# preds.append(torch.sum(pred, dim = 1))
# pred = torch.cat(preds,dim=1)
# pdb.set_trace()
# pred = F.normalize(pred, p=2, dim=-1)
# pdb.set_trace()
mask_id = torch.nonzero(mask)
nodes_first = torch.index_select(pred, 0, mask_id[:,0].long())
nodes_second = torch.index_select(pred, 0, mask_id[:,1].long())
nodes_first = nodes_first.view(nodes_first.shape[0], 1, nodes_first.shape[1])
nodes_second = nodes_second.view(nodes_second.shape[0], nodes_second.shape[1], 1)
pred = torch.matmul(nodes_first, nodes_second).squeeze()
label = torch.masked_select(adj, mask.byte())
# if args.loss == 'l2':
# loss = torch.mean((adj_pred - adj) ** 2 * mask) # todo cross entropy
loss = loss_func(pred, label)
# pdb.set_trace()
loss.backward()
# nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# evaluate
# adj_masked = torch.masked_select(adj, mask.byte())
# adj_pred_masked = torch.masked_select(adj_pred, mask.byte())
pred_binary = torch.where(out_act(pred) > 0.5, torch.Tensor([1]), torch.Tensor([0]))
correct += np.sum(pred_binary.data.numpy() == label.data.numpy())
total += pred.shape[0]
auc_train = roc_auc_score(label.flatten().data.numpy(), pred.flatten().data.numpy())
acc_train = correct/total
# print(classification_report(adj.flatten().data.numpy(), adj_pred_binary.flatten().data.numpy()))
# pdb.set_trace()
# val
correct = 0
total = 0
batch = dataset_sampler.get_fullbatch_test()
node_feature = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
adj = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
subset_dists = Variable(torch.from_numpy(batch[2]).float(), requires_grad=False)
if args.normalize_dist:
# subset_dists = softmax(subset_dists)
subset_dists = 1/(subset_dists+1)
subset_features = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
mask = Variable(torch.from_numpy(batch[4]).long(), requires_grad=False)
adj_test = Variable(torch.from_numpy(batch[5]).float(), requires_grad=False)
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'deepbourgainapp':
# adj_lists_unique = Variable(torch.Tensor(dataset_sampler.adj_lists_unique).long(), requires_grad=False)
# adj_lists_count = Variable(torch.Tensor(dataset_sampler.adj_lists_count).float(), requires_grad=False)
pred = model(node_feature, torch.Tensor(dataset_sampler.adj_count))
elif args.model == 'gcn':
pred = model(node_feature, adj)
elif args.model == 'bourgain':
# pred = model(subset_dists[:,:,0])
# pred = torch.sum(model(subset_dists), dim = 1)
# chunk_size = subset_dists.shape[1] // len(models)
# preds = []
# for i in range(len(models)):
# pred = models[i](subset_dists[:, i * chunk_size:(i + 1) * chunk_size, :])
# preds.append(torch.sum(pred, dim=1))
# pred = torch.cat(preds, dim=1)
# pdb.set_trace()
# pred = F.normalize(pred, p=2, dim=-1)
pred = torch.squeeze(model(subset_dists))
mask_id = torch.nonzero(mask)
nodes_first = torch.index_select(pred, 0, mask_id[:, 0])
nodes_second = torch.index_select(pred, 0, mask_id[:, 1])
nodes_first = nodes_first.view(nodes_first.shape[0], 1, nodes_first.shape[1])
nodes_second = nodes_second.view(nodes_second.shape[0], nodes_second.shape[1], 1)
pred = torch.matmul(nodes_first, nodes_second).squeeze()
label = torch.masked_select(adj_test, mask.byte())
# adj_pred = pred @ pred.permute(1, 0)
# adj_pred = out_act(adj_pred)
# evaluate
pred_binary = torch.where(out_act(pred) > 0.5, torch.Tensor([1]), torch.Tensor([0]))
correct += np.sum(pred_binary.data.numpy() == label.data.numpy())
total += pred.shape[0]
auc_test = roc_auc_score(label.flatten().data.numpy(), pred.flatten().data.numpy())
acc_test = correct / total
# pdb.set_trace()
print('epoch', epoch, 'loss', loss.data, 'acc_train', acc_train, 'auc_train', auc_train,
'acc_test', acc_test, 'auc_test', auc_test)
# time.sleep(3)
# if epoch==20:
# pdb.set_trace()
# pdb.set_trace()
######## node classification
elif args.task == 'node':
# data loader
dataset_sampler = graph_dataset_node_classification(name=args.dataset, permute=args.permute)
# model
if args.model == 'deepbourgain':
model = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2],
output_dim=dataset_sampler.num_class, head_num=16, hidden_dim=16, has_out_act=False)
elif args.model == 'gcn':
model = GraphConv(input_dim=dataset_sampler.node_feature.shape[2], output_dim=dataset_sampler.num_class,
normalize_embedding=True)
elif args.model == 'gcnbourgain':
# model = GraphConv_bourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=dataset_sampler.num_class,
# normalize_embedding=True, concat_bourgain=True)
model = GCN_bourgain(input_dim=dataset_sampler.node_feature.shape[2], output_dim=dataset_sampler.num_class,
hidden_dim=args.hidden_dim, num_layers=2, concat=True, concat_bourgain=False)
elif args.model == 'bourgain':
subset_dists, _ = dataset_sampler.recompute_feature()
model = MLP(input_dim=subset_dists.shape[1], hidden_dim=64, output_dim=dataset_sampler.num_class)
elif args.model == 'node_feature':
model = MLP(input_dim=dataset_sampler.node_feature.shape[2], hidden_dim=64, output_dim=dataset_sampler.num_class)
elif args.model == 'hybrid':
model_1 = DeepBourgain(input_dim=dataset_sampler.node_feature.shape[2],
output_dim=16, head_num=16, hidden_dim=16)
model_2 = MLP(input_dim=dataset_sampler.node_feature.shape[2], hidden_dim=64, output_dim=16)
model_combine = MLP(input_dim=16+16, hidden_dim=32, output_dim=dataset_sampler.num_class)
if args.gpu:
model = model.cuda()
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
if args.model == 'hybrid':
optimizer = torch.optim.Adam(list(model_1.parameters())+list(model_2.parameters())+list(model_combine.parameters()),
lr=args.lr)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
out_act = nn.Sigmoid()
# train
for epoch in range(args.num_epochs):
# train
correct = 0
total = 0
if args.model == 'hybrid':
model_1.zero_grad()
model_2.zero_grad()
model_combine.zero_grad()
else:
model.zero_grad()
if 'gcn' in args.model:
batch = dataset_sampler.get_fullbatch()
else:
batch = dataset_sampler.get_fullbatch_train()
node_feature = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
adj = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
node_label = Variable(torch.from_numpy(batch[2]).long(), requires_grad=False)
subset_dists = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
subset_features = Variable(torch.from_numpy(batch[4]).float(), requires_grad=False)
subset_ids = Variable(torch.from_numpy(batch[5]).long(), requires_grad=False)
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'gcn':
pred = model(node_feature, adj)[dataset_sampler.idx_train]
node_label = node_label[dataset_sampler.idx_train]
elif args.model == 'gcnbourgain':
pred = model(node_feature, adj, subset_dists, subset_ids)[dataset_sampler.idx_train]
node_label = node_label[dataset_sampler.idx_train]
elif args.model == 'bourgain':
pred = model(subset_dists[:,:,0])
elif args.model == 'node_feature':
pred = model(node_feature[:,0,:])
elif args.model == 'hybrid':
pred_1 = model_1(node_feature, subset_dists, subset_features)
pred_2 = model_2(node_feature[:,0,:])
pred = model_combine(torch.cat((pred_1,pred_2),dim=-1))
loss = F.cross_entropy(pred, node_label, size_average=True)
loss.backward()
# nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# evaluate
pred_max = torch.argmax(pred.data, dim=-1).numpy()
correct += np.sum(node_label.data.numpy() == pred_max)
total += len(node_label)
train_acc = correct/total
# val
correct = 0
total = 0
if 'gcn' in args.model:
batch = dataset_sampler.get_fullbatch()
else:
batch = dataset_sampler.get_fullbatch_val()
node_feature = Variable(torch.from_numpy(batch[0]).float(), requires_grad=False)
adj = Variable(torch.from_numpy(batch[1]).float(), requires_grad=False)
node_label = Variable(torch.from_numpy(batch[2]).long(), requires_grad=False)
subset_dists = Variable(torch.from_numpy(batch[3]).float(), requires_grad=False)
subset_features = Variable(torch.from_numpy(batch[4]).float(), requires_grad=False)
subset_ids = Variable(torch.from_numpy(batch[5]).long(), requires_grad=False)
if args.model == 'deepbourgain':
pred = model(node_feature, subset_dists, subset_features)
elif args.model == 'gcn':
pred = model(node_feature, adj)[dataset_sampler.idx_val]
node_label = node_label[dataset_sampler.idx_val]
elif args.model == 'gcnbourgain':
pred = model(node_feature, adj, subset_dists, subset_ids)[dataset_sampler.idx_val]
node_label = node_label[dataset_sampler.idx_val]
elif args.model == 'bourgain':
pred = model(subset_dists[:,:,0])
elif args.model == 'node_feature':
pred = model(node_feature[:,0,:])
elif args.model == 'hybrid':
pred_1 = model_1(node_feature, subset_dists, subset_features)
pred_2 = model_2(node_feature[:,0,:])
pred = model_combine(torch.cat((pred_1,pred_2),dim=-1))
# evaluate
pred_max = torch.argmax(pred.data, dim=-1).numpy()
correct += np.sum(node_label.data.numpy() == pred_max)
total += len(node_label)
val_acc = correct/total
# if epoch % 20 == 19:
# pdb.set_trace()
print('epoch', epoch, 'loss', loss.data, 'train accuracy', train_acc, 'val accuracy', val_acc)
# time.sleep(3) # lower consumption
def view_data():
# load graph
# G = nx.grid_2d_graph(20,20)
# G = nx.connected_caveman_graph(20,20)
# G = nx.barabasi_albert_graph(1000,2)
# G = nx.newman_watts_strogatz_graph(200,2,0.1)
dataset_sampler = graph_dataset_link_prediction(name=args.dataset, test_ratio=0.02)
G_raw = dataset_sampler.G_train_raw
G = dataset_sampler.G_train
print(G_raw.number_of_nodes(), G.number_of_nodes())
subset_dists, _ = dataset_sampler.recompute_feature(G)
subset_dists = np.squeeze(subset_dists)
node_emb = TSNE(n_components=2, n_iter=1000).fit_transform(subset_dists)
# pca = PCA(n_components=2)
# node_emb = pca.fit_transform(subset_dists)
print(node_emb.shape)
# plot results
plt.figure()
plt.rcParams.update({'font.size': 4})
# nx.draw_networkx(G, pos=nx.spring_layout(G), with_labels=True, node_size=4, width=0.3, font_size = 3)
nx.draw_networkx(G_raw, pos=nx.spectral_layout(G_raw), with_labels=True, node_size=1.5, width=0.3,
font_size=4)
plt.savefig('fig/graph_' + args.dataset + str(subset_dists.shape[1]) + '.png', dpi=300)
plt.close()
# cmaps= ['b','g','r','c','m','y','k']
# colors = []
# for row in node_label:
# colors.append(np.where(row==1)[0][0])
fig, ax = plt.subplots()
plt.rcParams.update({'font.size': 4})
plt.scatter(node_emb[:, 0], node_emb[:, 1], s=1.5)
for i, txt in enumerate(G_raw.nodes()):
ax.annotate(txt, (node_emb[i, 0], node_emb[i, 1]))
# for i in range(node_features_emb.shape[0]):
# plt.scatter(node_emb[i,0],node_emb[i,1], c=cmaps[colors[i]],s=5)
plt.savefig('fig/emb_' + args.dataset + str(subset_dists.shape[1]) + '.png', dpi=300)
plt.close()
# view_data()
# quit()
#
# node_feature, node_label, subset_dists, subset_features, num_class, idx_train, idx_val, idx_test = prepare_data()
# node_feature_train = node_feature[idx_train]
# node_feature_val = node_feature[idx_val]
# node_feature_test = node_feature[idx_test]
# node_label_train = node_label[idx_train]
# node_label_val = node_label[idx_val]
# node_label_test = node_label[idx_test]
# subset_dists_train = subset_dists[idx_train]
# subset_dists_val = subset_dists[idx_val]
# subset_dists_test = subset_dists[idx_test]
# subset_features_train = subset_features[idx_train]
# subset_features_val = subset_features[idx_val]
# subset_features_test = subset_features[idx_test]
#
#
# # svm on naive
# clf = SVC(gamma='auto')
# clf.fit(subset_dists_train[:,:,0], node_label_train)
# pred = clf.predict(subset_dists_test[:,:,0])
# correct = np.sum(node_label_test==pred)
# accuracy = correct/(len(node_label_test))
# print('svm accuracy', accuracy)
#
# clf = RandomForestClassifier(n_estimators=100, max_depth=3,random_state=0)
# clf.fit(subset_dists_train[:,:,0], node_label_train)
# pred = clf.predict(subset_dists_test[:,:,0])
# correct = np.sum(node_label_test==pred)
# accuracy = correct/(len(node_label_test))
# print('random forest accuracy', accuracy)
# node_feature_train = Variable(torch.from_numpy(node_feature_train).float(), requires_grad=False)
# node_feature_val = Variable(torch.from_numpy(node_feature_val).float(), requires_grad=False)
# node_feature_test = Variable(torch.from_numpy(node_feature_test).float(), requires_grad=False)
# node_label_train = Variable(torch.from_numpy(node_label_train).long(), requires_grad=False)
# node_label_val = Variable(torch.from_numpy(node_label_val).long(), requires_grad=False)
# node_label_test = Variable(torch.from_numpy(node_label_test).long(), requires_grad=False)
# subset_dists_train = Variable(torch.from_numpy(subset_dists_train).float(), requires_grad=False)
# subset_dists_val = Variable(torch.from_numpy(subset_dists_val).float(), requires_grad=False)
# subset_dists_test = Variable(torch.from_numpy(subset_dists_test).float(), requires_grad=False)
# subset_features_train = Variable(torch.from_numpy(subset_features_train).float(), requires_grad=False)
# subset_features_val = Variable(torch.from_numpy(subset_features_val).float(), requires_grad=False)
# subset_features_test = Variable(torch.from_numpy(subset_features_test).float(), requires_grad=False)
# # node_feature = Variable(torch.randn(128, 1, 128))
# # subset_dists = Variable(torch.randn(128, 64, 1))
# # subset_features = Variable(torch.randn(128, 64, 128))
# # pred = model(node_feature, subset_dists, subset_features)
#
#
# ######## node classification
#
# elif args.task == 'node':
#
# dataset_sampler_train = graph_dataset_node_classification(type='train')
# dataset_sampler_val = graph_dataset_node_classification(type='val')
# dataset_sampler_test = graph_dataset_node_classification(type='test')
# dataset_loader_train = torch.utils.data.DataLoader(
# dataset_sampler_train,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=args.num_workers)
# dataset_loader_val = torch.utils.data.DataLoader(
# dataset_sampler_val,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=args.num_workers)
# dataset_loader_test = torch.utils.data.DataLoader(
# dataset_sampler_test,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=args.num_workers)
#
# # model
# model = DeepBourgain(input_dim=dataset_sampler_train.node_feature.shape[2], output_dim=dataset_sampler_train.num_class,
# head_num=16, hidden_dim=16)
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
#
# # train
# for epoch in range(args.num_epochs):
# # train
# correct = 0
# total = 0
# for i, batch in enumerate(dataset_loader_train):
# model.train()
# model.zero_grad()
# node_feature = Variable(batch[0].float(), requires_grad=False)
# node_label = Variable(batch[1].long(), requires_grad=False)
# subset_dists = Variable(batch[2].float(), requires_grad=False)
# subset_features = Variable(batch[3].float(), requires_grad=False)
# pred = model(node_feature, subset_dists, subset_features)
# loss = F.cross_entropy(pred, node_label, size_average=True)
# loss.backward()
# # nn.utils.clip_grad_norm(model.parameters(), args.clip)
# optimizer.step()
#
# # evaluate
# pred_max = torch.argmax(pred.data, dim=-1).numpy()
# correct += np.sum(node_label.data.numpy() == pred_max)
# total += len(node_label)
# train_acc = correct / total
# dataset_sampler_train.recompute_feature()
# dataset_loader_train = torch.utils.data.DataLoader(
# dataset_sampler_train,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=args.num_workers)
#
# # val
# correct = 0
# total = 0
# for i, batch in enumerate(dataset_loader_val):
# node_feature = Variable(batch[0].float(), requires_grad=False)
# node_label = Variable(batch[1].long(), requires_grad=False)
# subset_dists = Variable(batch[2].float(), requires_grad=False)
# subset_features = Variable(batch[3].float(), requires_grad=False)
# pred = model(node_feature, subset_dists, subset_features)
#
# # evaluate
# pred_max = torch.argmax(pred.data, dim=-1).numpy()
# correct += np.sum(node_label.data.numpy() == pred_max)
# total += len(node_label)
# val_acc = correct / total
# dataset_sampler_val.recompute_feature()
# dataset_loader_val = torch.utils.data.DataLoader(
# dataset_sampler_val,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=args.num_workers)
#
# print('epoch', epoch, 'loss', loss.data, 'train accuracy', train_acc, 'val accuracy', val_acc)
# time.sleep(3)
# # if epoch==5:
# # pdb.set_trace() | [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.argmax",
"torch.cat",
"matplotlib.pyplot.figure",
"numpy.mean",
"torch.nn.Softmax",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"torch.Tensor",
"torch.matmul",
"matplotlib.pyplot.subplots",
"networkx.sp... | [((3627, 3646), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3641, 3646), True, 'import numpy as np\n'), ((3660, 3682), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (3680, 3682), True, 'import torch.nn as nn\n'), ((683, 699), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (697, 699), False, 'from argparse import ArgumentParser\n'), ((14123, 14136), 'numpy.array', 'np.array', (['auc'], {}), '(auc)\n', (14131, 14136), True, 'import numpy as np\n'), ((15958, 15970), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (15968, 15970), True, 'import torch.nn as nn\n'), ((15985, 16002), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (15995, 16002), True, 'import torch.nn as nn\n'), ((29464, 29488), 'numpy.squeeze', 'np.squeeze', (['subset_dists'], {}), '(subset_dists)\n', (29474, 29488), True, 'import numpy as np\n'), ((29700, 29712), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29710, 29712), True, 'import matplotlib.pyplot as plt\n'), ((29717, 29754), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 4}"], {}), "({'font.size': 4})\n", (29736, 29754), True, 'import matplotlib.pyplot as plt\n'), ((30096, 30107), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30105, 30107), True, 'import matplotlib.pyplot as plt\n'), ((30262, 30276), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (30274, 30276), True, 'import matplotlib.pyplot as plt\n'), ((30281, 30318), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 4}"], {}), "({'font.size': 4})\n", (30300, 30318), True, 'import matplotlib.pyplot as plt\n'), ((30323, 30373), 'matplotlib.pyplot.scatter', 'plt.scatter', (['node_emb[:, 0]', 'node_emb[:, 1]'], {'s': '(1.5)'}), '(node_emb[:, 0], node_emb[:, 1], s=1.5)\n', (30334, 30373), True, 'import matplotlib.pyplot as plt\n'), ((30698, 30709), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30707, 30709), True, 'import matplotlib.pyplot as plt\n'), ((6822, 6834), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6832, 6834), True, 'import torch.nn as nn\n'), ((6853, 6870), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6863, 6870), True, 'import torch.nn as nn\n'), ((14218, 14230), 'numpy.mean', 'np.mean', (['auc'], {}), '(auc)\n', (14225, 14230), True, 'import numpy as np\n'), ((14242, 14253), 'numpy.std', 'np.std', (['auc'], {}), '(auc)\n', (14248, 14253), True, 'import numpy as np\n'), ((17899, 17918), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (17912, 17918), False, 'import torch\n'), ((21196, 21215), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (21209, 21215), False, 'import torch\n'), ((21238, 21280), 'torch.index_select', 'torch.index_select', (['pred', '(0)', 'mask_id[:, 0]'], {}), '(pred, 0, mask_id[:, 0])\n', (21256, 21280), False, 'import torch\n'), ((21304, 21346), 'torch.index_select', 'torch.index_select', (['pred', '(0)', 'mask_id[:, 1]'], {}), '(pred, 0, mask_id[:, 1])\n', (21322, 21346), False, 'import torch\n'), ((24648, 24660), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (24658, 24660), True, 'import torch.nn as nn\n'), ((12562, 12581), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (12575, 12581), False, 'import torch\n'), ((12608, 12650), 'torch.index_select', 'torch.index_select', (['pred', '(0)', 'mask_id[:, 0]'], {}), '(pred, 0, mask_id[:, 0])\n', (12626, 12650), False, 'import torch\n'), ((12678, 12720), 'torch.index_select', 'torch.index_select', (['pred', '(0)', 'mask_id[:, 1]'], {}), '(pred, 0, mask_id[:, 1])\n', (12696, 12720), False, 'import torch\n'), ((13961, 13994), 'numpy.concatenate', 'np.concatenate', (['label_all'], {'axis': '(0)'}), '(label_all, axis=0)\n', (13975, 13994), True, 'import numpy as np\n'), ((13995, 14027), 'numpy.concatenate', 'np.concatenate', (['pred_all'], {'axis': '(0)'}), '(pred_all, axis=0)\n', (14009, 14027), True, 'import numpy as np\n'), ((18859, 18876), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (18871, 18876), False, 'import torch\n'), ((18878, 18895), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (18890, 18895), False, 'import torch\n'), ((21810, 21827), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (21822, 21827), False, 'import torch\n'), ((21829, 21846), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (21841, 21846), False, 'import torch\n'), ((29895, 29920), 'networkx.spectral_layout', 'nx.spectral_layout', (['G_raw'], {}), '(G_raw)\n', (29913, 29920), True, 'import networkx as nx\n'), ((8644, 8663), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (8657, 8663), False, 'import torch\n'), ((13288, 13305), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (13300, 13305), False, 'import torch\n'), ((13307, 13324), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (13319, 13324), False, 'import torch\n'), ((18253, 18292), 'torch.matmul', 'torch.matmul', (['nodes_first', 'nodes_second'], {}), '(nodes_first, nodes_second)\n', (18265, 18292), False, 'import torch\n'), ((21538, 21577), 'torch.matmul', 'torch.matmul', (['nodes_first', 'nodes_second'], {}), '(nodes_first, nodes_second)\n', (21550, 21577), False, 'import torch\n'), ((9669, 9686), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (9681, 9686), False, 'import torch\n'), ((9688, 9705), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (9700, 9705), False, 'import torch\n'), ((12924, 12963), 'torch.matmul', 'torch.matmul', (['nodes_first', 'nodes_second'], {}), '(nodes_first, nodes_second)\n', (12936, 12963), False, 'import torch\n'), ((16474, 16500), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (16490, 16500), False, 'import torch\n'), ((16554, 16580), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (16570, 16580), False, 'import torch\n'), ((16643, 16669), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (16659, 16669), False, 'import torch\n'), ((16864, 16890), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (16880, 16890), False, 'import torch\n'), ((16945, 16971), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (16961, 16971), False, 'import torch\n'), ((17197, 17236), 'torch.Tensor', 'torch.Tensor', (['dataset_sampler.adj_count'], {}), '(dataset_sampler.adj_count)\n', (17209, 17236), False, 'import torch\n'), ((19403, 19429), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (19419, 19429), False, 'import torch\n'), ((19483, 19509), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (19499, 19509), False, 'import torch\n'), ((19572, 19598), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (19588, 19598), False, 'import torch\n'), ((19793, 19819), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (19809, 19819), False, 'import torch\n'), ((19874, 19900), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (19890, 19900), False, 'import torch\n'), ((19958, 19984), 'torch.from_numpy', 'torch.from_numpy', (['batch[5]'], {}), '(batch[5])\n', (19974, 19984), False, 'import torch\n'), ((20448, 20487), 'torch.Tensor', 'torch.Tensor', (['dataset_sampler.adj_count'], {}), '(dataset_sampler.adj_count)\n', (20460, 20487), False, 'import torch\n'), ((26742, 26773), 'torch.argmax', 'torch.argmax', (['pred.data'], {'dim': '(-1)'}), '(pred.data, dim=-1)\n', (26754, 26773), False, 'import torch\n'), ((28563, 28594), 'torch.argmax', 'torch.argmax', (['pred.data'], {'dim': '(-1)'}), '(pred.data, dim=-1)\n', (28575, 28594), False, 'import torch\n'), ((9040, 9079), 'torch.matmul', 'torch.matmul', (['nodes_first', 'nodes_second'], {}), '(nodes_first, nodes_second)\n', (9052, 9079), False, 'import torch\n'), ((10530, 10556), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (10546, 10556), False, 'import torch\n'), ((10618, 10644), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (10634, 10644), False, 'import torch\n'), ((10703, 10729), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (10719, 10729), False, 'import torch\n'), ((10789, 10815), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (10805, 10815), False, 'import torch\n'), ((10874, 10900), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (10890, 10900), False, 'import torch\n'), ((10963, 11005), 'torch.from_numpy', 'torch.from_numpy', (['dataset_sampler.dist_max'], {}), '(dataset_sampler.dist_max)\n', (10979, 11005), False, 'import torch\n'), ((11547, 11586), 'torch.Tensor', 'torch.Tensor', (['dataset_sampler.adj_count'], {}), '(dataset_sampler.adj_count)\n', (11559, 11586), False, 'import torch\n'), ((25143, 25169), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (25159, 25169), False, 'import torch\n'), ((25223, 25249), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (25239, 25249), False, 'import torch\n'), ((25310, 25336), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (25326, 25336), False, 'import torch\n'), ((25398, 25424), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (25414, 25424), False, 'import torch\n'), ((25490, 25516), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (25506, 25516), False, 'import torch\n'), ((25577, 25603), 'torch.from_numpy', 'torch.from_numpy', (['batch[5]'], {}), '(batch[5])\n', (25593, 25603), False, 'import torch\n'), ((27156, 27182), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (27172, 27182), False, 'import torch\n'), ((27236, 27262), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (27252, 27262), False, 'import torch\n'), ((27323, 27349), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (27339, 27349), False, 'import torch\n'), ((27411, 27437), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (27427, 27437), False, 'import torch\n'), ((27503, 27529), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (27519, 27529), False, 'import torch\n'), ((27590, 27616), 'torch.from_numpy', 'torch.from_numpy', (['batch[5]'], {}), '(batch[5])\n', (27606, 27616), False, 'import torch\n'), ((4901, 4910), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4908, 4910), True, 'import torch.nn as nn\n'), ((7283, 7309), 'torch.from_numpy', 'torch.from_numpy', (['batch[0]'], {}), '(batch[0])\n', (7299, 7309), False, 'import torch\n'), ((7375, 7401), 'torch.from_numpy', 'torch.from_numpy', (['batch[1]'], {}), '(batch[1])\n', (7391, 7401), False, 'import torch\n'), ((7464, 7490), 'torch.from_numpy', 'torch.from_numpy', (['batch[2]'], {}), '(batch[2])\n', (7480, 7490), False, 'import torch\n'), ((7554, 7580), 'torch.from_numpy', 'torch.from_numpy', (['batch[3]'], {}), '(batch[3])\n', (7570, 7580), False, 'import torch\n'), ((7643, 7669), 'torch.from_numpy', 'torch.from_numpy', (['batch[4]'], {}), '(batch[4])\n', (7659, 7669), False, 'import torch\n'), ((7736, 7778), 'torch.from_numpy', 'torch.from_numpy', (['dataset_sampler.dist_max'], {}), '(dataset_sampler.dist_max)\n', (7752, 7778), False, 'import torch\n'), ((8097, 8136), 'torch.Tensor', 'torch.Tensor', (['dataset_sampler.adj_count'], {}), '(dataset_sampler.adj_count)\n', (8109, 8136), False, 'import torch\n'), ((15467, 15476), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15474, 15476), True, 'import torch.nn as nn\n'), ((26484, 26519), 'torch.cat', 'torch.cat', (['(pred_1, pred_2)'], {'dim': '(-1)'}), '((pred_1, pred_2), dim=-1)\n', (26493, 26519), False, 'import torch\n'), ((28489, 28524), 'torch.cat', 'torch.cat', (['(pred_1, pred_2)'], {'dim': '(-1)'}), '((pred_1, pred_2), dim=-1)\n', (28498, 28524), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Sandwich demo
=============
Sandwich demo based on code from http://nbviewer.ipython.org/6576096
"""
######################################################################
# .. note::
#
# In order to show the charts of the examples you need a graphical
# ``matplotlib`` backend installed. For intance, use ``pip install pyqt5``
# to get Qt graphical interface or use your favorite one.
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from metric_learn import (LMNN, ITML_Supervised, LSML_Supervised,
SDML_Supervised)
def sandwich_demo():
x, y = sandwich_data()
knn = nearest_neighbors(x, k=2)
ax = plt.subplot(3, 1, 1) # take the whole top row
plot_sandwich_data(x, y, ax)
plot_neighborhood_graph(x, knn, y, ax)
ax.set_title('input space')
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
mls = [
LMNN(),
ITML_Supervised(num_constraints=200),
SDML_Supervised(num_constraints=200, balance_param=0.001),
LSML_Supervised(num_constraints=200),
]
for ax_num, ml in enumerate(mls, start=3):
ml.fit(x, y)
tx = ml.transform(x)
ml_knn = nearest_neighbors(tx, k=2)
ax = plt.subplot(3, 2, ax_num)
plot_sandwich_data(tx, y, axis=ax)
plot_neighborhood_graph(tx, ml_knn, y, axis=ax)
ax.set_title(ml.__class__.__name__)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# TODO: use this somewhere
def visualize_class_separation(X, labels):
_, (ax1, ax2) = plt.subplots(ncols=2)
label_order = np.argsort(labels)
ax1.imshow(pairwise_distances(X[label_order]), interpolation='nearest')
ax2.imshow(pairwise_distances(labels[label_order, None]),
interpolation='nearest')
def nearest_neighbors(X, k=5):
knn = NearestNeighbors(n_neighbors=k)
knn.fit(X)
return knn.kneighbors(X, return_distance=False)
def sandwich_data():
# number of distinct classes
num_classes = 6
# number of points per class
num_points = 9
# distance between layers, the points of each class are in a layer
dist = 0.7
data = np.zeros((num_classes, num_points, 2), dtype=float)
labels = np.zeros((num_classes, num_points), dtype=int)
x_centers = np.arange(num_points, dtype=float) - num_points / 2
y_centers = dist * (np.arange(num_classes, dtype=float) - num_classes / 2)
for i, yc in enumerate(y_centers):
for k, xc in enumerate(x_centers):
data[i, k, 0] = np.random.normal(xc, 0.1)
data[i, k, 1] = np.random.normal(yc, 0.1)
labels[i, :] = i
return data.reshape((-1, 2)), labels.ravel()
def plot_sandwich_data(x, y, axis=plt, colors='rbgmky'):
for idx, val in enumerate(np.unique(y)):
xi = x[y == val]
axis.scatter(*xi.T, s=50, facecolors='none', edgecolors=colors[idx])
def plot_neighborhood_graph(x, nn, y, axis=plt, colors='rbgmky'):
for i, a in enumerate(x):
b = x[nn[i, 1]]
axis.plot((a[0], b[0]), (a[1], b[1]), colors[y[i]])
if __name__ == '__main__':
sandwich_demo()
| [
"matplotlib.pyplot.subplot",
"metric_learn.LSML_Supervised",
"matplotlib.pyplot.show",
"sklearn.metrics.pairwise_distances",
"numpy.zeros",
"metric_learn.SDML_Supervised",
"metric_learn.LMNN",
"numpy.argsort",
"metric_learn.ITML_Supervised",
"sklearn.neighbors.NearestNeighbors",
"numpy.arange",
... | [((778, 798), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (789, 798), True, 'from matplotlib import pyplot as plt\n'), ((1514, 1524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1522, 1524), True, 'from matplotlib import pyplot as plt\n'), ((1615, 1636), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)'}), '(ncols=2)\n', (1627, 1636), True, 'from matplotlib import pyplot as plt\n'), ((1653, 1671), 'numpy.argsort', 'np.argsort', (['labels'], {}), '(labels)\n', (1663, 1671), True, 'import numpy as np\n'), ((1885, 1916), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (1901, 1916), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2192, 2243), 'numpy.zeros', 'np.zeros', (['(num_classes, num_points, 2)'], {'dtype': 'float'}), '((num_classes, num_points, 2), dtype=float)\n', (2200, 2243), True, 'import numpy as np\n'), ((2255, 2301), 'numpy.zeros', 'np.zeros', (['(num_classes, num_points)'], {'dtype': 'int'}), '((num_classes, num_points), dtype=int)\n', (2263, 2301), True, 'import numpy as np\n'), ((1009, 1015), 'metric_learn.LMNN', 'LMNN', ([], {}), '()\n', (1013, 1015), False, 'from metric_learn import LMNN, ITML_Supervised, LSML_Supervised, SDML_Supervised\n'), ((1023, 1059), 'metric_learn.ITML_Supervised', 'ITML_Supervised', ([], {'num_constraints': '(200)'}), '(num_constraints=200)\n', (1038, 1059), False, 'from metric_learn import LMNN, ITML_Supervised, LSML_Supervised, SDML_Supervised\n'), ((1067, 1124), 'metric_learn.SDML_Supervised', 'SDML_Supervised', ([], {'num_constraints': '(200)', 'balance_param': '(0.001)'}), '(num_constraints=200, balance_param=0.001)\n', (1082, 1124), False, 'from metric_learn import LMNN, ITML_Supervised, LSML_Supervised, SDML_Supervised\n'), ((1132, 1168), 'metric_learn.LSML_Supervised', 'LSML_Supervised', ([], {'num_constraints': '(200)'}), '(num_constraints=200)\n', (1147, 1168), False, 'from metric_learn import LMNN, ITML_Supervised, LSML_Supervised, SDML_Supervised\n'), ((1311, 1336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', 'ax_num'], {}), '(3, 2, ax_num)\n', (1322, 1336), True, 'from matplotlib import pyplot as plt\n'), ((1685, 1719), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X[label_order]'], {}), '(X[label_order])\n', (1703, 1719), False, 'from sklearn.metrics import pairwise_distances\n'), ((1759, 1804), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['labels[label_order, None]'], {}), '(labels[label_order, None])\n', (1777, 1804), False, 'from sklearn.metrics import pairwise_distances\n'), ((2317, 2351), 'numpy.arange', 'np.arange', (['num_points'], {'dtype': 'float'}), '(num_points, dtype=float)\n', (2326, 2351), True, 'import numpy as np\n'), ((2773, 2785), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2782, 2785), True, 'import numpy as np\n'), ((2391, 2426), 'numpy.arange', 'np.arange', (['num_classes'], {'dtype': 'float'}), '(num_classes, dtype=float)\n', (2400, 2426), True, 'import numpy as np\n'), ((2544, 2569), 'numpy.random.normal', 'np.random.normal', (['xc', '(0.1)'], {}), '(xc, 0.1)\n', (2560, 2569), True, 'import numpy as np\n'), ((2592, 2617), 'numpy.random.normal', 'np.random.normal', (['yc', '(0.1)'], {}), '(yc, 0.1)\n', (2608, 2617), True, 'import numpy as np\n')] |
import functools
import operator
from math import pi
import advbench.lib.manifool.functions.helpers.general as g
import numpy as np
import torch
from torch.autograd import Variable
def manitest(input_image, net, mode, maxIter=50000, lim = None, hs = None, cuda_on = True,
stop_when_found=None, verbose=True):
def list_index(a_list, inds):
return [a_list[i] for i in inds]
def group_chars(mode):
if mode == 'rotation':
hs = torch.Tensor([pi/20])
elif mode == 'translation':
hs = torch.Tensor([0.25,0.25])
elif mode == 'rotation+scaling':
hs = torch.Tensor([pi/20, 0.1])
elif mode == 'rotation+translation':
hs = torch.Tensor([pi/20,0.25,0.25])
elif mode == 'scaling+translation':
hs = torch.Tensor([0.1,0.25,0.25])
elif mode == 'similarity':
hs = torch.Tensor([pi/20,0.5,0.5,0.1])
else:
raise NameError('Wrong mode name entered')
if cuda_on:
hs.cuda()
return hs
def gen_simplices(cur_vec, cur_dim):
nonlocal num_simpl
nonlocal simpls
if cur_dim == dimension_group + 1:
if not cur_vec:
return
simpls.append(cur_vec)
num_simpl = num_simpl + 1
return
if (n_vec[2*cur_dim - 2] == i or n_vec[2*cur_dim-1] == i):
cur_vec = cur_vec + [i]
gen_simplices(cur_vec, cur_dim + 1)
else:
gen_simplices(cur_vec, cur_dim + 1)
if (n_vec[2*cur_dim - 2] != -1):
cur_vec_l = cur_vec + [n_vec[2*cur_dim - 2]]
gen_simplices(cur_vec_l,cur_dim+1)
if (n_vec[2*cur_dim - 1] != -1):
cur_vec_r = cur_vec + [n_vec[2*cur_dim - 1]]
gen_simplices(cur_vec_r,cur_dim+1)
def check_oob(coord):
inside = 1
for u in range(len(coord)):
if (coord[u] > lim[u,1]+1e-8 or coord[u] < lim[u,0] - 1e-8):
inside = 0
break
return inside
def get_and_create_neighbours(cur_node):
nonlocal id_max, coords, dist, visited, neighbours, W, ims
#1)Generate coordinates of neighbouring nodes
for l in range(dimension_group):
#Generate coordinates
coordsNeighbour1 = coords[cur_node].clone()
coordsNeighbour1[l] += hs[l]
coordsNeighbour2 = coords[cur_node].clone()
coordsNeighbour2[l] -= hs[l]
if check_oob(coordsNeighbour1):
#Can we find a similar coordinate?
dists = (torch.stack(coords,0)
- coordsNeighbour1.repeat(len(coords), 1)).abs().sum(dim=1)
II1 = (dists < 1e-6).nonzero()
if not II1.size():
id_max += 1
#create node: i) coords, ii)visited, iii)distance
coords.append(coordsNeighbour1)
dist.append(np.inf)
visited.append(0)
#Assing the NodeID to IDNeighbours
neighbours.append([-1]*2*dimension_group)
neighbours[cur_node][2*l] = id_max
#Do the reverse
neighbours[id_max][2*l+1] = cur_node
W.append(None)
ims.append([])
else:
#Node already exists
neighbours[cur_node][2*l] = II1[0,0]
#Do the reverse
neighbours[II1[0,0]][2*l+1] = cur_node
if check_oob(coordsNeighbour2):
#Can we find a similar coordinate?
dists = (torch.stack(coords,0)
- coordsNeighbour2.repeat(len(coords), 1)).abs().sum(dim=1)
II2 = (dists < 1e-6).nonzero()
if not II2.size():
id_max += 1
#create node: i) coords, ii)visited, iii)distance
coords.append(coordsNeighbour2)
dist.append(np.inf)
visited.append(0)
#Assing the NodeID to IDNeighbours
neighbours.append([-1]*2*dimension_group)
neighbours[cur_node][2*l+1] = id_max
#Do the reverse
neighbours[id_max][2*l] = cur_node
W.append(None)
ims.append([])
else:
#Node already exists
neighbours[cur_node][2*l+1] = II2[0,0]
#Do the reverse
neighbours[II2[0,0]][2*l] = cur_node
def generate_metric(cur_node):
nonlocal ims, W
tau = coords[cur_node]
tfm = g.para2tfm(tau,mode,1)
I = tfm(input_image)
ims[cur_node] = I
J = g.jacobian(input_image, I, tfm, mode, 1)
J_n = J.resize_(J.size()[0],n)
curW = J_n.mm(J_n.transpose(0,1))
W[cur_node] = curW
def evaluate_classifier(cur_node):
nonlocal manitest_score, manitest_image, fooling_tfm, out_label
x = Variable(ims[cur_node].unsqueeze(0))
output = net(x)
_, k_I = torch.max(output.data,1)
pred_label = k_I[0]
if pred_label != input_label:
manitest_score = dist[cur_node]/input_image.norm()
manitest_image = ims[cur_node]
fooling_tfm = g.para2tfm(coords[cur_node],mode,1)
out_label = pred_label
return True
return False
###
e = g.init_param(mode)
if cuda_on:
net.cuda()
input_image = input_image.cuda()
e = e.cuda()
dimension_group = e.size()[0]
n = functools.reduce(operator.mul, input_image.size(), 1)
stop_flag = False
point_dists = None
if stop_when_found is not None:
stop_flag = True
num_stopping_points = stop_when_found.size()[0]
point_dists = torch.Tensor(num_stopping_points)
remaining_points = num_stopping_points
if hs is None:
hs = group_chars(mode)
if lim is None:
lim = np.zeros((dimension_group,2))
lim[:,0] = -np.inf
lim[:,1] = np.inf
dist = [0.0];
visited =[0];
coords = [e];
ims = [input_image]
W = [None]
id_max = 0;
neighbours = [[-1]*2*dimension_group];
#Generate input label
x = Variable(input_image.unsqueeze(0))
output = net(x)
_, k_I = torch.max(output.data,1)
input_label = k_I[0]
#Output Variables
manitest_score = np.inf
manitest_image = input_image.clone()
fooling_tfm = e
out_label = input_label
for k in range(maxIter):
if k%100== 0 and verbose:
print('>> k = {}'.format(k))
tmp_vec = np.array(dist[0:id_max+1])#copy the list
tmp_vec[np.asarray(visited) == 1] = np.inf
i = np.argmin(tmp_vec)
visited[i] = 1
#evaluate the classifier and check if it is fooled
if stop_flag:
dists = torch.norm(coords[i].repeat(num_stopping_points,1)-stop_when_found,2,1)
if dists.min()<1e-6:
_, ind = torch.min(dists,0)
point_dists[ind[0,0]] = dist[i]
remaining_points -= 1
if remaining_points == 0:
break
elif evaluate_classifier(i):
break
get_and_create_neighbours(i);
for j in neighbours[i]:
if j == -1:
continue
#Consider unknown neighbours only
if visited[j]:
continue
#Look at the neighbours of j (vector of size 2*dimension_group)
n_vec = neighbours[j]
num_simpl = 1
simpls = []
gen_simplices([],1)
if W[j] is None:
generate_metric(j)
for j_ in range(num_simpl-1):
X = torch.stack(list_index(coords,simpls[j_])) - coords[j].repeat(len(simpls[j_]),1)
if cuda_on:
v = torch.cuda.FloatTensor(list_index(dist,simpls[j_])).unsqueeze(1)
one_vector = torch.ones(v.size()).cuda()
else:
v = torch.FloatTensor(list_index(dist,simpls[j_])).unsqueeze(1)
one_vector = torch.ones(v.size())
M_prime = (X.mm(W[j]).mm(X.transpose(0,1)))
try:
invM_prime_v, _ = torch.gesv(v,M_prime)
except:
invM_prime_v = v*np.inf
try:
invM_prime_1, _ = torch.gesv(one_vector,M_prime)
except:
invM_prime_1 = one_vector*np.inf
invM_prime_v.transpose_(0,1)
# one_vector.squeeze_()
# v.squeeze_()
#Solve second order equation
# dz^2 * one_vector' * invM_prime * one_vector
# - 2 * dz * one_vector' * invM_prime * v + v' * invM_prime * v - 1
Delta = (invM_prime_v.sum())**2 - invM_prime_1.sum()*(invM_prime_v.mm(v) - 1 )
Delta = Delta[0,0]
if Delta >= 0:
#Compute solution
x_c = (invM_prime_v.sum()+np.sqrt(Delta))/invM_prime_1.sum()
#Test that it is not on the border of the simplex
te, _ = torch.gesv(x_c - v,M_prime)
if te.min() > 0:
dist[j] = min(dist[j], x_c)
return manitest_score, manitest_image, fooling_tfm, dist, coords, input_label, out_label, point_dists, k
| [
"advbench.lib.manifool.functions.helpers.general.para2tfm",
"torch.stack",
"advbench.lib.manifool.functions.helpers.general.init_param",
"numpy.asarray",
"numpy.zeros",
"numpy.argmin",
"torch.Tensor",
"torch.max",
"numpy.array",
"advbench.lib.manifool.functions.helpers.general.jacobian",
"torch.... | [((5625, 5643), 'advbench.lib.manifool.functions.helpers.general.init_param', 'g.init_param', (['mode'], {}), '(mode)\n', (5637, 5643), True, 'import advbench.lib.manifool.functions.helpers.general as g\n'), ((6531, 6556), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (6540, 6556), False, 'import torch\n'), ((4826, 4850), 'advbench.lib.manifool.functions.helpers.general.para2tfm', 'g.para2tfm', (['tau', 'mode', '(1)'], {}), '(tau, mode, 1)\n', (4836, 4850), True, 'import advbench.lib.manifool.functions.helpers.general as g\n'), ((4916, 4956), 'advbench.lib.manifool.functions.helpers.general.jacobian', 'g.jacobian', (['input_image', 'I', 'tfm', 'mode', '(1)'], {}), '(input_image, I, tfm, mode, 1)\n', (4926, 4956), True, 'import advbench.lib.manifool.functions.helpers.general as g\n'), ((5270, 5295), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (5279, 5295), False, 'import torch\n'), ((6024, 6057), 'torch.Tensor', 'torch.Tensor', (['num_stopping_points'], {}), '(num_stopping_points)\n', (6036, 6057), False, 'import torch\n'), ((6191, 6221), 'numpy.zeros', 'np.zeros', (['(dimension_group, 2)'], {}), '((dimension_group, 2))\n', (6199, 6221), True, 'import numpy as np\n'), ((6846, 6874), 'numpy.array', 'np.array', (['dist[0:id_max + 1]'], {}), '(dist[0:id_max + 1])\n', (6854, 6874), True, 'import numpy as np\n'), ((6950, 6968), 'numpy.argmin', 'np.argmin', (['tmp_vec'], {}), '(tmp_vec)\n', (6959, 6968), True, 'import numpy as np\n'), ((478, 501), 'torch.Tensor', 'torch.Tensor', (['[pi / 20]'], {}), '([pi / 20])\n', (490, 501), False, 'import torch\n'), ((5494, 5531), 'advbench.lib.manifool.functions.helpers.general.para2tfm', 'g.para2tfm', (['coords[cur_node]', 'mode', '(1)'], {}), '(coords[cur_node], mode, 1)\n', (5504, 5531), True, 'import advbench.lib.manifool.functions.helpers.general as g\n'), ((553, 579), 'torch.Tensor', 'torch.Tensor', (['[0.25, 0.25]'], {}), '([0.25, 0.25])\n', (565, 579), False, 'import torch\n'), ((6903, 6922), 'numpy.asarray', 'np.asarray', (['visited'], {}), '(visited)\n', (6913, 6922), True, 'import numpy as np\n'), ((7224, 7243), 'torch.min', 'torch.min', (['dists', '(0)'], {}), '(dists, 0)\n', (7233, 7243), False, 'import torch\n'), ((637, 665), 'torch.Tensor', 'torch.Tensor', (['[pi / 20, 0.1]'], {}), '([pi / 20, 0.1])\n', (649, 665), False, 'import torch\n'), ((8539, 8561), 'torch.gesv', 'torch.gesv', (['v', 'M_prime'], {}), '(v, M_prime)\n', (8549, 8561), False, 'import torch\n'), ((8688, 8719), 'torch.gesv', 'torch.gesv', (['one_vector', 'M_prime'], {}), '(one_vector, M_prime)\n', (8698, 8719), False, 'import torch\n'), ((9486, 9514), 'torch.gesv', 'torch.gesv', (['(x_c - v)', 'M_prime'], {}), '(x_c - v, M_prime)\n', (9496, 9514), False, 'import torch\n'), ((726, 761), 'torch.Tensor', 'torch.Tensor', (['[pi / 20, 0.25, 0.25]'], {}), '([pi / 20, 0.25, 0.25])\n', (738, 761), False, 'import torch\n'), ((819, 850), 'torch.Tensor', 'torch.Tensor', (['[0.1, 0.25, 0.25]'], {}), '([0.1, 0.25, 0.25])\n', (831, 850), False, 'import torch\n'), ((9351, 9365), 'numpy.sqrt', 'np.sqrt', (['Delta'], {}), '(Delta)\n', (9358, 9365), True, 'import numpy as np\n'), ((901, 939), 'torch.Tensor', 'torch.Tensor', (['[pi / 20, 0.5, 0.5, 0.1]'], {}), '([pi / 20, 0.5, 0.5, 0.1])\n', (913, 939), False, 'import torch\n'), ((2667, 2689), 'torch.stack', 'torch.stack', (['coords', '(0)'], {}), '(coords, 0)\n', (2678, 2689), False, 'import torch\n'), ((3754, 3776), 'torch.stack', 'torch.stack', (['coords', '(0)'], {}), '(coords, 0)\n', (3765, 3776), False, 'import torch\n')] |
import argparse
import tensorflow as tf
import numpy as np
import time
from shared_functions import make_matmul, measure_tf2_gpu
# tf.config.run_functions_eagerly(False)
#
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
def attention(input, heads):
d_model = input.shape[1]
d_k = d_model // heads
assert d_model % heads == 0
q = make_matmul(input, d_model)
k = make_matmul(input, d_model)
v = make_matmul(input, d_model)
# reshape query, key, value
q = tf.reshape(q, shape=(64,16,64))
k = tf.reshape(k, shape=(64,16,64))
v = tf.reshape(v, shape=(64,16,64))
# transpose q, k, v for batched matmul
q = tf.transpose(a=q, perm=(1,0,2))
k = tf.transpose(a=k, perm=(1,0,2))
v = tf.transpose(a=v, perm=(1,0,2))
logits = tf.matmul(q, k)
output = tf.matmul(logits, v)
# transpose the output back
output = tf.transpose(a=output, perm=(1,0,2))
output = tf.reshape(output, shape=(64, 1024))
# a final linear layer
output = make_matmul(tf.nn.relu(make_matmul(input, 4*d_model)), d_model)
return output
def bert_tf2_model(input):
t = input
for i in range(8):
t = attention(t, 16)
return t
# @tf.function(jit_compile=False)
@tf.function(experimental_compile=False)
def bert_tf2(input):
return bert_tf2_model(input)
# @tf.function(jit_compile=True)
@tf.function(experimental_compile=True)
def bert_tf2_xla(input):
return bert_tf2_model(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-hw", "--hw", help="target hardware")
parser.add_argument("-bs", "--batch-size", default=1, type=int, help="batch size")
args = parser.parse_args()
args.network = 'bert'
input_shape = (64, 1024)
inputs = np.random.uniform(-1, 1, size=input_shape).astype("float32")
method_name = 'TF'
measure_tf2_gpu(bert_tf2, inputs, method_name, args)
method_name = 'TF-XLA'
measure_tf2_gpu(bert_tf2_xla, inputs, method_name, args)
| [
"numpy.random.uniform",
"shared_functions.measure_tf2_gpu",
"argparse.ArgumentParser",
"tensorflow.reshape",
"tensorflow.transpose",
"tensorflow.matmul",
"shared_functions.make_matmul",
"tensorflow.function"
] | [((1302, 1341), 'tensorflow.function', 'tf.function', ([], {'experimental_compile': '(False)'}), '(experimental_compile=False)\n', (1313, 1341), True, 'import tensorflow as tf\n'), ((1431, 1469), 'tensorflow.function', 'tf.function', ([], {'experimental_compile': '(True)'}), '(experimental_compile=True)\n', (1442, 1469), True, 'import tensorflow as tf\n'), ((426, 453), 'shared_functions.make_matmul', 'make_matmul', (['input', 'd_model'], {}), '(input, d_model)\n', (437, 453), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((462, 489), 'shared_functions.make_matmul', 'make_matmul', (['input', 'd_model'], {}), '(input, d_model)\n', (473, 489), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((498, 525), 'shared_functions.make_matmul', 'make_matmul', (['input', 'd_model'], {}), '(input, d_model)\n', (509, 525), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((566, 599), 'tensorflow.reshape', 'tf.reshape', (['q'], {'shape': '(64, 16, 64)'}), '(q, shape=(64, 16, 64))\n', (576, 599), True, 'import tensorflow as tf\n'), ((606, 639), 'tensorflow.reshape', 'tf.reshape', (['k'], {'shape': '(64, 16, 64)'}), '(k, shape=(64, 16, 64))\n', (616, 639), True, 'import tensorflow as tf\n'), ((646, 679), 'tensorflow.reshape', 'tf.reshape', (['v'], {'shape': '(64, 16, 64)'}), '(v, shape=(64, 16, 64))\n', (656, 679), True, 'import tensorflow as tf\n'), ((729, 762), 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'q', 'perm': '(1, 0, 2)'}), '(a=q, perm=(1, 0, 2))\n', (741, 762), True, 'import tensorflow as tf\n'), ((769, 802), 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'k', 'perm': '(1, 0, 2)'}), '(a=k, perm=(1, 0, 2))\n', (781, 802), True, 'import tensorflow as tf\n'), ((809, 842), 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'v', 'perm': '(1, 0, 2)'}), '(a=v, perm=(1, 0, 2))\n', (821, 842), True, 'import tensorflow as tf\n'), ((855, 870), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {}), '(q, k)\n', (864, 870), True, 'import tensorflow as tf\n'), ((884, 904), 'tensorflow.matmul', 'tf.matmul', (['logits', 'v'], {}), '(logits, v)\n', (893, 904), True, 'import tensorflow as tf\n'), ((950, 988), 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'output', 'perm': '(1, 0, 2)'}), '(a=output, perm=(1, 0, 2))\n', (962, 988), True, 'import tensorflow as tf\n'), ((1000, 1036), 'tensorflow.reshape', 'tf.reshape', (['output'], {'shape': '(64, 1024)'}), '(output, shape=(64, 1024))\n', (1010, 1036), True, 'import tensorflow as tf\n'), ((1569, 1594), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1592, 1594), False, 'import argparse\n'), ((1934, 1986), 'shared_functions.measure_tf2_gpu', 'measure_tf2_gpu', (['bert_tf2', 'inputs', 'method_name', 'args'], {}), '(bert_tf2, inputs, method_name, args)\n', (1949, 1986), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((2019, 2075), 'shared_functions.measure_tf2_gpu', 'measure_tf2_gpu', (['bert_tf2_xla', 'inputs', 'method_name', 'args'], {}), '(bert_tf2_xla, inputs, method_name, args)\n', (2034, 2075), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((1100, 1131), 'shared_functions.make_matmul', 'make_matmul', (['input', '(4 * d_model)'], {}), '(input, 4 * d_model)\n', (1111, 1131), False, 'from shared_functions import make_matmul, measure_tf2_gpu\n'), ((1845, 1887), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (1862, 1887), True, 'import numpy as np\n')] |
"""
These are the basic black box tests for the doNd functions.
"""
from qdev_wrappers.dataset.doNd import do0d, do1d, do2d
from typing import Tuple, List, Optional
from qcodes.instrument.parameter import Parameter
from qcodes import config, new_experiment, load_by_id
from qcodes.utils import validators
import pytest
import numpy as np
config.user.mainfolder = "output" # set ouput folder for doNd's
new_experiment("doNd-tests", sample_name="no sample")
@pytest.fixture()
def _param():
p = Parameter('simple_parameter',
set_cmd=None,
get_cmd=lambda: 1)
return p
@pytest.fixture()
def _paramComplex():
p = Parameter('simple_complex_parameter',
set_cmd=None,
get_cmd=lambda: 1 + 1j,
vals=validators.ComplexNumbers())
return p
@pytest.fixture()
def _param_set():
p = Parameter('simple_setter_parameter',
set_cmd=None,
get_cmd=None)
return p
def _param_func(_p):
"""
A private utility function.
"""
_new_param = Parameter('modified_parameter',
set_cmd= None,
get_cmd= lambda: _p.get()*2)
return _new_param
@pytest.fixture()
def _param_callable(_param):
return _param_func(_param)
def test_param_callable(_param_callable):
_param_modified = _param_callable
assert _param_modified.get() == 2
@pytest.mark.parametrize('period, plot', [(None, True), (None, False),
(1, True), (1, False)])
def test_do0d_with_real_parameter(_param, period, plot):
do0d(_param, write_period=period, do_plot=plot)
@pytest.mark.parametrize('period, plot', [(None, True), (None, False),
(1, True), (1, False)])
def test_do0d_with_complex_parameter(_paramComplex, period, plot):
do0d(_paramComplex, write_period=period, do_plot=plot)
@pytest.mark.parametrize('period, plot', [(None, True), (None, False),
(1, True), (1, False)])
def test_do0d_with_a_callable(_param_callable, period, plot):
do0d(_param_callable, write_period=period, do_plot=plot)
@pytest.mark.parametrize('period, plot', [(None, True), (None, False),
(1, True), (1, False)])
def test_do0d_with_multiparameters(_param, _paramComplex, period, plot):
do0d(_param, _paramComplex, write_period=period, do_plot=plot)
@pytest.mark.parametrize('period, plot', [(None, True), (None, False),
(1, True), (1, False)])
def test_do0d_with_parameter_and_a_callable(_paramComplex, _param_callable,
period, plot):
do0d(_param_callable, _paramComplex, write_period=period, do_plot=plot)
def test_do0d_output_type_real_parameter(_param):
data = do0d(_param)
assert type(data[0]) == int
def test_do0d_output_type_complex_parameter(_paramComplex):
dataComplex = do0d(_paramComplex)
assert type(dataComplex[0]) == int
def test_do0d_output_type_callable(_param_callable):
dataFunc = do0d(_param_callable)
assert type(dataFunc[0]) == int
def test_do0d_output_data(_param):
exp = do0d(_param)
data = load_by_id(exp[0])
assert data.parameters == _param.name
assert data.get_parameter_data(_param.name)[_param.name][_param.name] == _param.get()
@pytest.mark.parametrize('delay', [0, 0.1, 1])
def test_do1d_with_real_parameter(_param_set, _param, delay):
start = 0
stop = 1
num_points = 1
do1d(_param_set, start, stop, num_points, delay, _param)
@pytest.mark.parametrize('delay', [0, 0.1, 1])
def test_do1d_with_complex_parameter(_param_set, _paramComplex, delay):
start = 0
stop = 1
num_points = 1
do1d(_param_set, start, stop, num_points, delay, _paramComplex)
@pytest.mark.parametrize('delay', [0, 0.1, 1])
def test_do1d_with_multiparameter(_param_set, _param, _paramComplex, delay):
start = 0
stop = 1
num_points = 1
do1d(_param_set, start, stop, num_points, delay, _param, _paramComplex)
@pytest.mark.parametrize('delay', [0, 0.1, 1])
def test_do1d_output_type_real_parameter(_param_set, _param, delay):
start = 0
stop = 1
num_points = 1
data = do1d(_param_set, start, stop, num_points, delay, _param)
assert type(data[0]) == int
def test_do1d_output_data(_param, _param_set):
start = 0
stop = 1
num_points = 5
delay = 0
exp = do1d(_param_set, start, stop, num_points, delay, _param)
data = load_by_id(exp[0])
assert data.parameters == f'{_param_set.name},{_param.name}'
assert np.allclose(data.get_parameter_data(_param.name)[_param.name][_param.name], np.ones(5))
assert np.allclose(data.get_parameter_data(_param_set.name)[_param_set.name][_param_set.name], np.array([0, 0.25, 0.5, 0.75, 1]))
@pytest.mark.parametrize('sweep, columns', [(False, False), (False, True),
(True, False), (True, True)])
def test_do2d(_param, _paramComplex, _param_set, sweep, columns):
start_p1 = 0
stop_p1 = 1
num_points_p1 = 1
delay_p1 = 0
start_p2 = 0.1
stop_p2 = 1.1
num_points_p2 = 2
delay_p2 = 0.01
do2d(_param_set, start_p1, stop_p1, num_points_p1, delay_p1,
_param_set, start_p2, stop_p2, num_points_p2, delay_p2,
_param, _paramComplex, set_before_sweep=sweep, flush_columns=columns)
def test_do2d_output_type(_param, _paramComplex, _param_set):
start_p1 = 0
stop_p1 = 0.5
num_points_p1 = 1
delay_p1 = 0
start_p2 = 0.1
stop_p2 = 0.75
num_points_p2 = 2
delay_p2 = 0.025
data = do2d(_param_set, start_p1, stop_p1, num_points_p1, delay_p1,
_param_set, start_p2, stop_p2, num_points_p2, delay_p2,
_param, _paramComplex)
assert type(data[0]) == int
def test_do2d_output_data(_param, _paramComplex, _param_set):
start_p1 = 0
stop_p1 = 0.5
num_points_p1 = 5
delay_p1 = 0
start_p2 = 0.5
stop_p2 = 1
num_points_p2 = 5
delay_p2 = 0.0
exp = do2d(_param_set, start_p1, stop_p1, num_points_p1, delay_p1,
_param_set, start_p2, stop_p2, num_points_p2, delay_p2,
_param, _paramComplex)
data = load_by_id(exp[0])
assert data.parameters == f'{_param_set.name},{_param.name},{_paramComplex.name}'
assert np.allclose(data.get_parameter_data(_param.name)[_param.name][_param.name], np.ones(25))
assert np.allclose(data.get_parameter_data(_paramComplex.name)[_paramComplex.name][_paramComplex.name], np.array([(1+1j)] * 25))
assert np.allclose(data.get_parameter_data(_param_set.name)[_param_set.name][_param_set.name], np.array([0.5, 0.5, 0.625, 0.625,
0.75, 0.75, 0.875, 0.875,
1, 1] * 5))
| [
"qdev_wrappers.dataset.doNd.do2d",
"pytest.fixture",
"numpy.ones",
"qcodes.instrument.parameter.Parameter",
"qcodes.utils.validators.ComplexNumbers",
"qdev_wrappers.dataset.doNd.do0d",
"numpy.array",
"qcodes.load_by_id",
"pytest.mark.parametrize",
"qcodes.new_experiment",
"qdev_wrappers.dataset.... | [((405, 458), 'qcodes.new_experiment', 'new_experiment', (['"""doNd-tests"""'], {'sample_name': '"""no sample"""'}), "('doNd-tests', sample_name='no sample')\n", (419, 458), False, 'from qcodes import config, new_experiment, load_by_id\n'), ((462, 478), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (476, 478), False, 'import pytest\n'), ((617, 633), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (631, 633), False, 'import pytest\n'), ((843, 859), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (857, 859), False, 'import pytest\n'), ((1243, 1259), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1257, 1259), False, 'import pytest\n'), ((1443, 1541), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period, plot"""', '[(None, True), (None, False), (1, True), (1, False)]'], {}), "('period, plot', [(None, True), (None, False), (1, \n True), (1, False)])\n", (1466, 1541), False, 'import pytest\n'), ((1674, 1772), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period, plot"""', '[(None, True), (None, False), (1, True), (1, False)]'], {}), "('period, plot', [(None, True), (None, False), (1, \n True), (1, False)])\n", (1697, 1772), False, 'import pytest\n'), ((1922, 2020), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period, plot"""', '[(None, True), (None, False), (1, True), (1, False)]'], {}), "('period, plot', [(None, True), (None, False), (1, \n True), (1, False)])\n", (1945, 2020), False, 'import pytest\n'), ((2167, 2265), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period, plot"""', '[(None, True), (None, False), (1, True), (1, False)]'], {}), "('period, plot', [(None, True), (None, False), (1, \n True), (1, False)])\n", (2190, 2265), False, 'import pytest\n'), ((2429, 2527), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period, plot"""', '[(None, True), (None, False), (1, True), (1, False)]'], {}), "('period, plot', [(None, True), (None, False), (1, \n True), (1, False)])\n", (2452, 2527), False, 'import pytest\n'), ((3359, 3404), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""delay"""', '[0, 0.1, 1]'], {}), "('delay', [0, 0.1, 1])\n", (3382, 3404), False, 'import pytest\n'), ((3578, 3623), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""delay"""', '[0, 0.1, 1]'], {}), "('delay', [0, 0.1, 1])\n", (3601, 3623), False, 'import pytest\n'), ((3815, 3860), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""delay"""', '[0, 0.1, 1]'], {}), "('delay', [0, 0.1, 1])\n", (3838, 3860), False, 'import pytest\n'), ((4065, 4110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""delay"""', '[0, 0.1, 1]'], {}), "('delay', [0, 0.1, 1])\n", (4088, 4110), False, 'import pytest\n'), ((4838, 4946), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sweep, columns"""', '[(False, False), (False, True), (True, False), (True, True)]'], {}), "('sweep, columns', [(False, False), (False, True), (\n True, False), (True, True)])\n", (4861, 4946), False, 'import pytest\n'), ((502, 565), 'qcodes.instrument.parameter.Parameter', 'Parameter', (['"""simple_parameter"""'], {'set_cmd': 'None', 'get_cmd': '(lambda : 1)'}), "('simple_parameter', set_cmd=None, get_cmd=lambda : 1)\n", (511, 565), False, 'from qcodes.instrument.parameter import Parameter\n'), ((886, 950), 'qcodes.instrument.parameter.Parameter', 'Parameter', (['"""simple_setter_parameter"""'], {'set_cmd': 'None', 'get_cmd': 'None'}), "('simple_setter_parameter', set_cmd=None, get_cmd=None)\n", (895, 950), False, 'from qcodes.instrument.parameter import Parameter\n'), ((1623, 1670), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param'], {'write_period': 'period', 'do_plot': 'plot'}), '(_param, write_period=period, do_plot=plot)\n', (1627, 1670), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((1864, 1918), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_paramComplex'], {'write_period': 'period', 'do_plot': 'plot'}), '(_paramComplex, write_period=period, do_plot=plot)\n', (1868, 1918), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((2107, 2163), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param_callable'], {'write_period': 'period', 'do_plot': 'plot'}), '(_param_callable, write_period=period, do_plot=plot)\n', (2111, 2163), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((2363, 2425), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param', '_paramComplex'], {'write_period': 'period', 'do_plot': 'plot'}), '(_param, _paramComplex, write_period=period, do_plot=plot)\n', (2367, 2425), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((2687, 2758), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param_callable', '_paramComplex'], {'write_period': 'period', 'do_plot': 'plot'}), '(_param_callable, _paramComplex, write_period=period, do_plot=plot)\n', (2691, 2758), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((2822, 2834), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param'], {}), '(_param)\n', (2826, 2834), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((2947, 2966), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_paramComplex'], {}), '(_paramComplex)\n', (2951, 2966), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((3076, 3097), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param_callable'], {}), '(_param_callable)\n', (3080, 3097), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((3181, 3193), 'qdev_wrappers.dataset.doNd.do0d', 'do0d', (['_param'], {}), '(_param)\n', (3185, 3193), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((3205, 3223), 'qcodes.load_by_id', 'load_by_id', (['exp[0]'], {}), '(exp[0])\n', (3215, 3223), False, 'from qcodes import config, new_experiment, load_by_id\n'), ((3519, 3575), 'qdev_wrappers.dataset.doNd.do1d', 'do1d', (['_param_set', 'start', 'stop', 'num_points', 'delay', '_param'], {}), '(_param_set, start, stop, num_points, delay, _param)\n', (3523, 3575), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((3748, 3811), 'qdev_wrappers.dataset.doNd.do1d', 'do1d', (['_param_set', 'start', 'stop', 'num_points', 'delay', '_paramComplex'], {}), '(_param_set, start, stop, num_points, delay, _paramComplex)\n', (3752, 3811), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((3990, 4061), 'qdev_wrappers.dataset.doNd.do1d', 'do1d', (['_param_set', 'start', 'stop', 'num_points', 'delay', '_param', '_paramComplex'], {}), '(_param_set, start, stop, num_points, delay, _param, _paramComplex)\n', (3994, 4061), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((4239, 4295), 'qdev_wrappers.dataset.doNd.do1d', 'do1d', (['_param_set', 'start', 'stop', 'num_points', 'delay', '_param'], {}), '(_param_set, start, stop, num_points, delay, _param)\n', (4243, 4295), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((4449, 4505), 'qdev_wrappers.dataset.doNd.do1d', 'do1d', (['_param_set', 'start', 'stop', 'num_points', 'delay', '_param'], {}), '(_param_set, start, stop, num_points, delay, _param)\n', (4453, 4505), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((4517, 4535), 'qcodes.load_by_id', 'load_by_id', (['exp[0]'], {}), '(exp[0])\n', (4527, 4535), False, 'from qcodes import config, new_experiment, load_by_id\n'), ((5191, 5385), 'qdev_wrappers.dataset.doNd.do2d', 'do2d', (['_param_set', 'start_p1', 'stop_p1', 'num_points_p1', 'delay_p1', '_param_set', 'start_p2', 'stop_p2', 'num_points_p2', 'delay_p2', '_param', '_paramComplex'], {'set_before_sweep': 'sweep', 'flush_columns': 'columns'}), '(_param_set, start_p1, stop_p1, num_points_p1, delay_p1, _param_set,\n start_p2, stop_p2, num_points_p2, delay_p2, _param, _paramComplex,\n set_before_sweep=sweep, flush_columns=columns)\n', (5195, 5385), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((5629, 5772), 'qdev_wrappers.dataset.doNd.do2d', 'do2d', (['_param_set', 'start_p1', 'stop_p1', 'num_points_p1', 'delay_p1', '_param_set', 'start_p2', 'stop_p2', 'num_points_p2', 'delay_p2', '_param', '_paramComplex'], {}), '(_param_set, start_p1, stop_p1, num_points_p1, delay_p1, _param_set,\n start_p2, stop_p2, num_points_p2, delay_p2, _param, _paramComplex)\n', (5633, 5772), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((6063, 6206), 'qdev_wrappers.dataset.doNd.do2d', 'do2d', (['_param_set', 'start_p1', 'stop_p1', 'num_points_p1', 'delay_p1', '_param_set', 'start_p2', 'stop_p2', 'num_points_p2', 'delay_p2', '_param', '_paramComplex'], {}), '(_param_set, start_p1, stop_p1, num_points_p1, delay_p1, _param_set,\n start_p2, stop_p2, num_points_p2, delay_p2, _param, _paramComplex)\n', (6067, 6206), False, 'from qdev_wrappers.dataset.doNd import do0d, do1d, do2d\n'), ((6249, 6267), 'qcodes.load_by_id', 'load_by_id', (['exp[0]'], {}), '(exp[0])\n', (6259, 6267), False, 'from qcodes import config, new_experiment, load_by_id\n'), ((4689, 4699), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (4696, 4699), True, 'import numpy as np\n'), ((4800, 4833), 'numpy.array', 'np.array', (['[0, 0.25, 0.5, 0.75, 1]'], {}), '([0, 0.25, 0.5, 0.75, 1])\n', (4808, 4833), True, 'import numpy as np\n'), ((6442, 6453), 'numpy.ones', 'np.ones', (['(25)'], {}), '(25)\n', (6449, 6453), True, 'import numpy as np\n'), ((6563, 6588), 'numpy.array', 'np.array', (['([1 + 1.0j] * 25)'], {}), '([1 + 1.0j] * 25)\n', (6571, 6588), True, 'import numpy as np\n'), ((6687, 6757), 'numpy.array', 'np.array', (['([0.5, 0.5, 0.625, 0.625, 0.75, 0.75, 0.875, 0.875, 1, 1] * 5)'], {}), '([0.5, 0.5, 0.625, 0.625, 0.75, 0.75, 0.875, 0.875, 1, 1] * 5)\n', (6695, 6757), True, 'import numpy as np\n'), ((798, 825), 'qcodes.utils.validators.ComplexNumbers', 'validators.ComplexNumbers', ([], {}), '()\n', (823, 825), False, 'from qcodes.utils import validators\n')] |
# Compatibility Python 2/3
from __future__ import division, print_function, absolute_import
# ----------------------------------------------------------------------------------------------------------------------
from dotmap import DotMap
import numpy as np
import h5py
import sys
sys.path.insert(0, '/home/manu/ros_ws/src/Research/manu_sawyer/src/tensorflow_model_is_gripping')
import aolib.img as ig
import RGB2video as RGB2video
def convert_to_video(path, list_filenames):
if isinstance(list_filenames, basestring):
list_filenames = [list_filenames] # only a string is convert to a list
for file in list_filenames:
namefile = path + file
data = DotMap()
data.n_steps = []
data.frequency = []
data.kinect.A.image = []
data.gelsight.A.image = []
data.gelsight.B.image = []
print('Opening file: %s' % namefile)
f = h5py.File(namefile, "r")
data.n_steps = f['n_steps'].value
print('Number of steps: %d' % data.n_steps)
data.frequency = int(f['frequency'].value)
print('FPS: %d' % data.frequency)
data.kinect.A.image = map(ig.uncompress, f['/color_image_KinectA'].value)
print("color_image_KinectA done")
data.gelsight.A.image = map(ig.uncompress, f['/GelSightA_image'].value)
print("GelSightA_image done")
data.gelsight.B.image = map(ig.uncompress, f['/GelSightB_image'].value)
print("GelSightB_image done")
# Convert to np arrays
kinect_A = np.asarray(data.kinect.A.image)
print('kinect.A To array done')
gelsight_A = np.asarray(data.gelsight.A.image)
print('gelsight.A To array done')
gelsight_B = np.asarray(data.gelsight.B.image)
print('gelsight.B To array done')
print(kinect_A.shape)
print(gelsight_A.shape)
print(gelsight_B.shape)
print(RGB2video.RGB2video(data=kinect_A, nameFile=file + '_kinect_A', framerate=data.frequency))
print(RGB2video.RGB2video(data=gelsight_A, nameFile=file + '_gelsight_A', framerate=data.frequency))
print(RGB2video.RGB2video(data=gelsight_B, nameFile=file + '_gelsight_B', framerate=data.frequency))
if __name__ == '__main__':
path = '/home/manu/ros_ws/src/Research/manu_sawyer/src/video_conv/'
import os
list_filenames = []
for file in os.listdir(path):
if file.endswith(".hdf5"):
list_filenames.append(file)
list_filenames = sorted(list_filenames)
convert_to_video(path=path, list_filenames=list_filenames)
| [
"RGB2video.RGB2video",
"h5py.File",
"numpy.asarray",
"sys.path.insert",
"dotmap.DotMap",
"os.listdir"
] | [((283, 389), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/manu/ros_ws/src/Research/manu_sawyer/src/tensorflow_model_is_gripping"""'], {}), "(0,\n '/home/manu/ros_ws/src/Research/manu_sawyer/src/tensorflow_model_is_gripping'\n )\n", (298, 389), False, 'import sys\n'), ((2379, 2395), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2389, 2395), False, 'import os\n'), ((689, 697), 'dotmap.DotMap', 'DotMap', ([], {}), '()\n', (695, 697), False, 'from dotmap import DotMap\n'), ((913, 937), 'h5py.File', 'h5py.File', (['namefile', '"""r"""'], {}), "(namefile, 'r')\n", (922, 937), False, 'import h5py\n'), ((1539, 1570), 'numpy.asarray', 'np.asarray', (['data.kinect.A.image'], {}), '(data.kinect.A.image)\n', (1549, 1570), True, 'import numpy as np\n'), ((1632, 1665), 'numpy.asarray', 'np.asarray', (['data.gelsight.A.image'], {}), '(data.gelsight.A.image)\n', (1642, 1665), True, 'import numpy as np\n'), ((1729, 1762), 'numpy.asarray', 'np.asarray', (['data.gelsight.B.image'], {}), '(data.gelsight.B.image)\n', (1739, 1762), True, 'import numpy as np\n'), ((1915, 2009), 'RGB2video.RGB2video', 'RGB2video.RGB2video', ([], {'data': 'kinect_A', 'nameFile': "(file + '_kinect_A')", 'framerate': 'data.frequency'}), "(data=kinect_A, nameFile=file + '_kinect_A', framerate=\n data.frequency)\n", (1934, 2009), True, 'import RGB2video as RGB2video\n'), ((2020, 2117), 'RGB2video.RGB2video', 'RGB2video.RGB2video', ([], {'data': 'gelsight_A', 'nameFile': "(file + '_gelsight_A')", 'framerate': 'data.frequency'}), "(data=gelsight_A, nameFile=file + '_gelsight_A',\n framerate=data.frequency)\n", (2039, 2117), True, 'import RGB2video as RGB2video\n'), ((2129, 2226), 'RGB2video.RGB2video', 'RGB2video.RGB2video', ([], {'data': 'gelsight_B', 'nameFile': "(file + '_gelsight_B')", 'framerate': 'data.frequency'}), "(data=gelsight_B, nameFile=file + '_gelsight_B',\n framerate=data.frequency)\n", (2148, 2226), True, 'import RGB2video as RGB2video\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
import chumpy as ch
import scipy.sparse as sp
from chumpy.utils import col
class sp_dot(ch.Ch):
terms = 'a',
dterms = 'b',
def compute_r(self):
return self.a.dot(self.b.r)
def compute(self):
# To stay consistent with numpy, we must upgrade 1D arrays to 2D
ar = sp.csr_matrix((self.a.data, self.a.indices, self.a.indptr),
shape=(max(np.sum(self.a.shape[:-1]), 1), self.a.shape[-1]))
br = col(self.b.r) if len(self.b.r.shape) < 2 else self.b.r.reshape((self.b.r.shape[0], -1))
if br.ndim <= 1:
return ar
elif br.ndim <= 2:
return sp.kron(ar, sp.eye(br.shape[1], br.shape[1]))
else:
raise NotImplementedError
def compute_dr_wrt(self, wrt):
if wrt is self.b:
return self.compute() | [
"chumpy.utils.col",
"numpy.sum",
"scipy.sparse.eye"
] | [((536, 549), 'chumpy.utils.col', 'col', (['self.b.r'], {}), '(self.b.r)\n', (539, 549), False, 'from chumpy.utils import col\n'), ((730, 762), 'scipy.sparse.eye', 'sp.eye', (['br.shape[1]', 'br.shape[1]'], {}), '(br.shape[1], br.shape[1])\n', (736, 762), True, 'import scipy.sparse as sp\n'), ((473, 498), 'numpy.sum', 'np.sum', (['self.a.shape[:-1]'], {}), '(self.a.shape[:-1])\n', (479, 498), True, 'import numpy as np\n')] |
"""Process SSURGO soil database to create site-specific soil files."""
import numpy as np
import pandas as pd
from collections import Counter
from itertools import compress
def bin_depth(df_soils):
"""
Bin soil into 5 depth categories.
Parameters
----------
df_soils : pd.DataFrame
"""
depths = [[-1, 0],
[0, 50],
[50, 100],
[100, 150],
[150, 250]]
depth_categories = [0, 50, 100, 150, 200]
df_soils['depth_category'] = np.nan
for depth, depth_category in zip(depths, depth_categories):
df_soils.loc[
(df_soils.depth > depth[0]) & (df_soils.depth <= depth[1]),
'depth_category'] = depth_category
return df_soils
def merge_texture(df_soils, df_textures):
"""
Merge texture info (from R) into main soil file.
Parameters
----------
df_soils : pd.DataFrame
df_texture : pd.DataFrame
"""
textures = []
for i in np.arange(df_textures.shape[0]):
texture = df_textures.iloc[i]
try:
texture = texture[texture == 1].index[0]
textures.append(texture)
except IndexError:
texture = 'ambiguous'
textures.append(texture)
df_soils_copy = df_soils.copy()
df_soils_copy['texture'] = textures
return df_soils_copy
def texture_profile(df_soils):
"""
Assign mean texture profile for soil categories.
- Cl: clay
- SiCl: silty clay
- SaCl: sandy clay
- ClLo: clay loam
- SiClLo: silty clay loam
- SaClLo: sandy clay loam
- Lo: loam
- SiLo: silty loam
- SaLo: sandy loam
- Si: silt
- LoSa: loamy sand
- Sa: sand
Parameters
----------
df_soils : pd.DataFrame
Returns
-------
df_texture : pd.DataFrame
"""
df_texture = df_soils.groupby(['texture', 'depth_category']).mean()
df_texture = df_texture[['sand', 'silt', 'clay',
'OM', 'dbthirdbar', 'th33', 'th1500']]
df_texture.OM = df_texture['OM']/100
df_texture.th33 = df_texture['th33']/100
df_texture.th1500 = df_texture['th1500']/100
return df_texture
def texture_prevalence(df_soils, depth1, depth2, sort_column='cokey'):
"""
Order soil texture based on texture prevalence.
Parameters
----------
df_soils : pd.DataFrame
depth1 : int
First soil depth category to include.
0.0, 50.0, 100.0, 150.0, 200.0
depth2 : int
Second soil depth category to include.
0.0, 50.0, 100.0, 150.0, 200.0
Returns
-------
df_texture_prevalence : pd.DataFrame
"""
df_soils_depth = df_soils.query(
f'(depth_category == {depth1}) | (depth_category == {depth2})')
df_texture_count = df_soils_depth.groupby('texture').count()
df_texture_prevalence = pd.DataFrame(df_texture_count.sort_values(
by=sort_column, axis=0, ascending=False).index)
return df_texture_prevalence
def assign_texture(df_soils, df_sites, depth1, depth2, n_nearbysites):
"""
Assign soil texture for each simulation site.
Parameters
----------
df_soils : pd.DataFrame
df_sites : pd.DataFrame
depth1 : int
First soil depth category to include.
0.0, 50.0, 100.0, 150.0, 200.0
depth2 : int
Second soil depth category to include.
0.0, 50.0, 100.0, 150.0, 200.0
n_nearbysites : int
Returns
-------
list_texture : list
List of textures for all simulation sites.
"""
sites = df_sites.site
df_soils_depth = df_soils.query(
f'(depth_category == {depth1}) | (depth_category == {depth2})')
list_texture = []
df_texture_ordered = texture_prevalence(df_soils, depth1, depth2)
for site in sites:
lat = float(df_sites[df_sites.site == site].lat)
lon = float(df_sites[df_sites.site == site].lon)
# calculate Euclidean distance
dist = list(enumerate(np.sqrt((lat - df_soils_depth.lat)**2 + (
lon - (df_soils_depth.lon))**2)))
df_dist = pd.DataFrame(dist, columns=['rownum', 'distance'])
# select the nearest n soil sites
rows = list(df_dist.nsmallest(n_nearbysites, 'distance').rownum)
# summarize the textures for those sites and order by prevalance
textures = Counter(df_soils_depth.iloc[rows].texture).most_common()
# select out only texture counts
texture_counts = [item[1] for item in textures]
if len(textures) == 1:
# when there's only one texture type
texture = textures[0][0]
list_texture.append(texture)
elif len(textures) > 1 and texture_counts[0] != texture_counts[1]:
# when there's more than one texture type
# but only one dominant type
texture = textures[0][0]
list_texture.append(texture)
else:
# when there's more than one texture type
# and there's a tie between dominant texture types
maxcount = max(texture_counts)
# create list with True/False booleans to filter out tied textures
textures_select = [
textures[item][1] == maxcount for item in np.arange(
len(texture_counts))]
# filter out tied textures
textures_selected = list(compress(textures, textures_select))
textures_selected = [
textures_selected[item][0] for item in np.arange(
len(textures_selected))]
# identify prevalence between the tied textures
texture_prevalance = [df_texture_ordered[
df_texture_ordered.texture == textures_selected[item]].
index.values[0] for item in np.arange(len(textures_selected))]
# assing final texture based on df_texture_ordered
texture = df_texture_ordered[
df_texture_ordered.index == min(
texture_prevalance)].texture.values[0]
list_texture.append(texture)
return list_texture
| [
"pandas.DataFrame",
"numpy.arange",
"itertools.compress",
"collections.Counter",
"numpy.sqrt"
] | [((985, 1016), 'numpy.arange', 'np.arange', (['df_textures.shape[0]'], {}), '(df_textures.shape[0])\n', (994, 1016), True, 'import numpy as np\n'), ((4090, 4140), 'pandas.DataFrame', 'pd.DataFrame', (['dist'], {'columns': "['rownum', 'distance']"}), "(dist, columns=['rownum', 'distance'])\n", (4102, 4140), True, 'import pandas as pd\n'), ((3984, 4058), 'numpy.sqrt', 'np.sqrt', (['((lat - df_soils_depth.lat) ** 2 + (lon - df_soils_depth.lon) ** 2)'], {}), '((lat - df_soils_depth.lat) ** 2 + (lon - df_soils_depth.lon) ** 2)\n', (3991, 4058), True, 'import numpy as np\n'), ((4348, 4390), 'collections.Counter', 'Counter', (['df_soils_depth.iloc[rows].texture'], {}), '(df_soils_depth.iloc[rows].texture)\n', (4355, 4390), False, 'from collections import Counter\n'), ((5383, 5418), 'itertools.compress', 'compress', (['textures', 'textures_select'], {}), '(textures, textures_select)\n', (5391, 5418), False, 'from itertools import compress\n')] |
import theano
import numpy
import os
from theano import tensor as T
from collections import OrderedDict
class model(object):
def __init__(self, nh, nc, ne, de, cs):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(ne+1, de)).astype(theano.config.floatX)) # add one for PADDING at the end
self.Wx = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(de * cs, nh)).astype(theano.config.floatX))
self.Wh = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nh)).astype(theano.config.floatX))
self.W = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0,\
(nh, nc)).astype(theano.config.floatX))
self.bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
self.b = theano.shared(numpy.zeros(nc, dtype=theano.config.floatX))
self.h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
# bundle
self.params = [ self.emb, self.Wx, self.Wh, self.W, self.bh, self.b, self.h0 ]
self.names = ['embeddings', 'Wx', 'Wh', 'W', 'bh', 'b', 'h0']
idxs = T.imatrix() # as many columns as context window size/lines as words in the sentence
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y = T.iscalar('y') # label
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) + self.bh)
s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)
return [h_t, s_t]
[h, s], _ = theano.scan(fn=recurrence, \
sequences=x, outputs_info=[self.h0, None], \
n_steps=x.shape[0])
p_y_given_x_lastword = s[-1,0,:]
p_y_given_x_sentence = s[:,0,:]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# cost and gradients and learning rate
lr = T.scalar('lr')
nll = -T.log(p_y_given_x_lastword)[y]
gradients = T.grad( nll, self.params )
updates = OrderedDict(( p, p-lr*g ) for p, g in zip( self.params , gradients))
# theano functions
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.pcost = theano.function( inputs = [idxs, y], outputs = nll)
self.train = theano.function( inputs = [idxs, y, lr],
outputs = nll,
updates = updates )
self.normalize = theano.function( inputs = [],
updates = {self.emb:\
self.emb/T.sqrt((self.emb**2).sum(axis=1)).dimshuffle(0,'x')})
def save(self, folder):
for param, name in zip(self.params, self.names):
numpy.save(os.path.join(folder, name + '.npy'), param.get_value())
def load(self, folder):
for param, name in zip(self.params, self.names):
param.set_value(numpy.load(os.path.join(folder, name + '.npy')))
| [
"numpy.random.uniform",
"theano.tensor.log",
"theano.tensor.iscalar",
"os.path.join",
"theano.function",
"theano.tensor.dot",
"numpy.zeros",
"theano.tensor.imatrix",
"theano.scan",
"theano.tensor.grad",
"theano.tensor.argmax",
"theano.tensor.scalar"
] | [((1444, 1455), 'theano.tensor.imatrix', 'T.imatrix', ([], {}), '()\n', (1453, 1455), True, 'from theano import tensor as T\n'), ((1602, 1616), 'theano.tensor.iscalar', 'T.iscalar', (['"""y"""'], {}), "('y')\n", (1611, 1616), True, 'from theano import tensor as T\n'), ((1863, 1956), 'theano.scan', 'theano.scan', ([], {'fn': 'recurrence', 'sequences': 'x', 'outputs_info': '[self.h0, None]', 'n_steps': 'x.shape[0]'}), '(fn=recurrence, sequences=x, outputs_info=[self.h0, None],\n n_steps=x.shape[0])\n', (1874, 1956), False, 'import theano\n'), ((2080, 2118), 'theano.tensor.argmax', 'T.argmax', (['p_y_given_x_sentence'], {'axis': '(1)'}), '(p_y_given_x_sentence, axis=1)\n', (2088, 2118), True, 'from theano import tensor as T\n'), ((2180, 2194), 'theano.tensor.scalar', 'T.scalar', (['"""lr"""'], {}), "('lr')\n", (2188, 2194), True, 'from theano import tensor as T\n'), ((2261, 2285), 'theano.tensor.grad', 'T.grad', (['nll', 'self.params'], {}), '(nll, self.params)\n', (2267, 2285), True, 'from theano import tensor as T\n'), ((2435, 2481), 'theano.function', 'theano.function', ([], {'inputs': '[idxs]', 'outputs': 'y_pred'}), '(inputs=[idxs], outputs=y_pred)\n', (2450, 2481), False, 'import theano\n'), ((2504, 2550), 'theano.function', 'theano.function', ([], {'inputs': '[idxs, y]', 'outputs': 'nll'}), '(inputs=[idxs, y], outputs=nll)\n', (2519, 2550), False, 'import theano\n'), ((2579, 2646), 'theano.function', 'theano.function', ([], {'inputs': '[idxs, y, lr]', 'outputs': 'nll', 'updates': 'updates'}), '(inputs=[idxs, y, lr], outputs=nll, updates=updates)\n', (2594, 2646), False, 'import theano\n'), ((1052, 1095), 'numpy.zeros', 'numpy.zeros', (['nh'], {'dtype': 'theano.config.floatX'}), '(nh, dtype=theano.config.floatX)\n', (1063, 1095), False, 'import numpy\n'), ((1130, 1173), 'numpy.zeros', 'numpy.zeros', (['nc'], {'dtype': 'theano.config.floatX'}), '(nc, dtype=theano.config.floatX)\n', (1141, 1173), False, 'import numpy\n'), ((1208, 1251), 'numpy.zeros', 'numpy.zeros', (['nh'], {'dtype': 'theano.config.floatX'}), '(nh, dtype=theano.config.floatX)\n', (1219, 1251), False, 'import numpy\n'), ((2210, 2237), 'theano.tensor.log', 'T.log', (['p_y_given_x_lastword'], {}), '(p_y_given_x_lastword)\n', (2215, 2237), True, 'from theano import tensor as T\n'), ((3035, 3070), 'os.path.join', 'os.path.join', (['folder', "(name + '.npy')"], {}), "(folder, name + '.npy')\n", (3047, 3070), False, 'import os\n'), ((1783, 1801), 'theano.tensor.dot', 'T.dot', (['h_t', 'self.W'], {}), '(h_t, self.W)\n', (1788, 1801), True, 'from theano import tensor as T\n'), ((3219, 3254), 'os.path.join', 'os.path.join', (['folder', "(name + '.npy')"], {}), "(folder, name + '.npy')\n", (3231, 3254), False, 'import os\n'), ((494, 539), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1.0)', '(1.0)', '(ne + 1, de)'], {}), '(-1.0, 1.0, (ne + 1, de))\n', (514, 539), False, 'import numpy\n'), ((660, 706), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1.0)', '(1.0)', '(de * cs, nh)'], {}), '(-1.0, 1.0, (de * cs, nh))\n', (680, 706), False, 'import numpy\n'), ((796, 837), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1.0)', '(1.0)', '(nh, nh)'], {}), '(-1.0, 1.0, (nh, nh))\n', (816, 837), False, 'import numpy\n'), ((927, 968), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1.0)', '(1.0)', '(nh, nc)'], {}), '(-1.0, 1.0, (nh, nc))\n', (947, 968), False, 'import numpy\n'), ((1695, 1714), 'theano.tensor.dot', 'T.dot', (['x_t', 'self.Wx'], {}), '(x_t, self.Wx)\n', (1700, 1714), True, 'from theano import tensor as T\n'), ((1717, 1738), 'theano.tensor.dot', 'T.dot', (['h_tm1', 'self.Wh'], {}), '(h_tm1, self.Wh)\n', (1722, 1738), True, 'from theano import tensor as T\n')] |
import io
import numpy as np
import six
from typing import Dict, List, Optional, Union
class WordEmbedder:
""" splits one or more texts into words,
maps words to word indexes and
maps word indexes to word embedding vectors.
Although, the task of embedding words could also be accomplished by using an embedding layer of a Keras neural
network model instead of this class, integrating the embedding into each model as a layer does not exploit the
potential to share the embedding lookup table between multiple models in order save main memory when keeping
around a larger number of models at the same time.
"""
def __init__(self,
embedding_file_path: str,
embedding_limit: Optional[int] = None,
embedding_sequence_length: int = 3000
):
"""
:param embedding_file_path:
absolute file system path to a non-compressed embedding data file from
https://fasttext.cc/docs/en/crawl-vectors.html
in text format (file name suffix '.vec')
:param embedding_limit:
the maximum number of word embeddings to read and use from embedding_path, None means 'no limit'
:param embedding_sequence_length:
the fixed number of word vectors to return for a given input text,
if the input text has more words, excess words will be truncated at the end,
if the input text has fewer words, zero vectors will be padded at the end
"""
self.__embedding_sequence_length: int = embedding_sequence_length
self.__word_2_index_dict, self.__embedding_matrix, almost_only_lower_case_words = \
self.__create_word_index_dict_and_embedding_matrix(embedding_file_path, embedding_limit)
self.__convert_texts_to_lower_case = almost_only_lower_case_words
if self.__convert_texts_to_lower_case:
print("will convert all input texts to lower case before looking up their word embeddings")
self.__embedding_dim: int = self.__embedding_matrix.shape[1]
self.__zero_embedding_vector = np.zeros(shape=self.__embedding_dim, dtype='float32')
def embedding_dim(self) -> int:
"""
:return: the number of dimensions of the embedding vectors, e. g. 300, depends on the embedding file used
"""
return self.__embedding_dim
def embed_texts(self, texts: List[str]):
"""
returns embeddings of one or more texts.
:param texts: a list of texts to tokenize and embed
:return: result is a (3, len(texts), embedding_sequence_length, embedding_dim) float32 numpy.ndarray (tensor).
result[0] contains all embedded texts.
result[1] contains all embedded left contexts, i. e. embedded texts shifted by one word to the right.
result[2] contains all embedded right contexts, i. e. embedded words shifted by one word to the left.
"""
padded_token_vectors = self.tokenize_texts(texts)
embedded_texts = self.embed_token_vectors(padded_token_vectors)
return embedded_texts
def tokenize_texts(self, texts: List[str]):
"""
returns fixed sized token/word index sequences of one or more texts
:param texts: the texts to tokenize
:return: numpy.ndarray of numpy.ndarrays of ints, plug these into embed_token_vectors to get embeddings
"""
token_vectors = self.__texts_to_word_index_lists(texts, lower=self.__convert_texts_to_lower_case)
padded_token_vectors = self.__pad_sequences(
token_vectors, maxlen=self.__embedding_sequence_length, padding='post', truncating='post', value=0)
return padded_token_vectors
def embed_token_vector(self, token_vector):
"""
returns embedding of a single token vector. A token is an int index into the embedding matrix.
:param token_vector: numpy.ndarray of tokens
:return: result is a (3, 1, token_vector.shape[0], embedding_dim) float32 tensor.
result[0] contains the embedded text.
result[1] contains the embedded left context, i. e. embedded tokens shifted by one token to the right.
result[2] contains the embedded right context, i. e. embedded tokens shifted by one token to the left.
"""
token_vectors = np.asarray([token_vector])
embedded_token_vectors = self.embed_token_vectors(token_vectors)
return embedded_token_vectors
def embed_token_vectors(self, token_vectors):
"""
returns embeddings of token vectors. A token is an int index into the embedding matrix.
:param token_vectors: numpy.ndarray of numpy.ndarrays of tokens
:return: result is a (3, token_vectors.shape[0], token_vectors.shape[1], embedding_dim) float32 tensor.
result[0] contains all embedded texts.
result[1] contains all embedded left contexts, i. e. embedded tokens shifted by one token to the right.
result[2] contains all embedded right contexts, i. e. embedded tokens shifted by one token to the left.
"""
num_words: int = token_vectors.shape[1] # e.g. 3000
embedding_tensor = np.empty(
shape=(token_vectors.shape[0], num_words + 2, self.__embedding_dim), dtype='float32')
for i, token_vector in enumerate(token_vectors):
embedding_tensor[i, 0] = self.__zero_embedding_vector
embedding_tensor[i, 1:num_words + 1] = self.__embedding_matrix.take(token_vector, axis=0)
embedding_tensor[i, num_words + 1] = self.__zero_embedding_vector
# slice 3 views from the batch_embedding_tensor
embedded_tensor_with_contexts = [
embedding_tensor[:, 1:-1], # all embedded texts
embedding_tensor[:, 0:-2], # all embedded left contexts
embedding_tensor[:, 2:] # all embedded right contexts
]
return embedded_tensor_with_contexts
def __create_word_index_dict_and_embedding_matrix(
self, embedding_file_path: str, embedding_limit: Optional[int] = None):
"""
loads the embedding data into a dictionary mapping words to array indices (word indexes)
and a numpy array holding a word vector numpy array per index (so-called embedding matrix)
:param embedding_file_path: the path to the embedding file
:param embedding_limit: the maximum number of embeddings to read from the embedding file
:return: the word to array index dictionary,
the embedding matrix numpy array,
whether the dictionary contains (almost) only lower case words
"""
number_of_non_lower_case_words: int = 0
embedding_file = io.open(embedding_file_path, 'r', encoding='utf-8', newline='\n', errors='strict')
number_of_embeddings, embedding_dim = map(int, embedding_file.readline().split())
if embedding_limit is not None:
number_of_embeddings: int = min(number_of_embeddings, embedding_limit)
print(f"loading up to {number_of_embeddings} word embeddings...")
word_2_index_dict: Dict[str, int] = {}
embedding_matrix = np.empty((number_of_embeddings, embedding_dim), dtype='float32')
embedding_matrix[0] = np.zeros(shape=embedding_dim, dtype='float32') # 0 -> zero vector
next_word_index: int = 1
for line in embedding_file:
if next_word_index >= number_of_embeddings: # reached the limit or end of file
break
tokens = line.rstrip().split(' ')
if len(tokens) != embedding_dim + 1:
print(f"WARN: skipped unexpected line in embedding file: {line}")
continue # line does not have the expected number of tokens
word = tokens[0]
if word is not None and len(word) > 0:
word_sequence = self.__text_to_word_list(word)
if len(word_sequence) == 1:
# word from embedding is a single word with respect to our own word splitting method
if word_sequence[0].lower() != word_sequence[0]:
number_of_non_lower_case_words += 1
word_2_index_dict[word_sequence[0]] = next_word_index
embedding_matrix[next_word_index] = np.asarray(tokens[1:], dtype='float32')
next_word_index += 1
else:
#print(f"skipping line with compound word {word} because it splits into {word_sequence}")
pass
embedding_file.close()
print(f"loaded {next_word_index - 1} word embeddings")
non_lower_case_percentage: float = 100 * number_of_non_lower_case_words / max(next_word_index, 1)
if non_lower_case_percentage < 5:
almost_only_lower_case_words = True
print("loaded less than 5% non-lower-case words from embedding file")
else:
almost_only_lower_case_words = False
print(f"loaded {non_lower_case_percentage:.2f}% non-lower case words from embedding file")
return word_2_index_dict, embedding_matrix, almost_only_lower_case_words
def __texts_to_word_index_lists(self, texts: List[str], lower: bool = False) -> List[List[int]]:
"""
transforms each text in texts to a list of word indexes (integers).
Only words available in the word_2_index dictionary will be taken into account.
:param a list of texts (strings)
:return a list of lists of word indexes (ints)
"""
return list(self.__texts_to_word_index_lists_generator(texts, lower=lower))
def __texts_to_word_index_lists_generator(self, texts: List[str], lower: bool = False) -> List[List[int]]:
"""
transforms each text in texts to a list of word indexes (integers).
Only words available in the word_2_index dictionary will be taken into account.
:param texts a list of texts (strings)
:return yields individual lists of word indexes (ints)
"""
for text in texts:
word_sequence = self.__text_to_word_list(text, lower=lower)
word_index_sequence = []
for word in word_sequence:
word_index = self.__word_2_index_dict.get(word)
if word_index is not None:
word_index_sequence.append(word_index)
yield word_index_sequence
@staticmethod
def __text_to_word_list(text: str,
filters: Union[List[str], str] = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n\r',
lower: bool = False,
split: str = ' ') -> List[str]:
"""
splits a text into a list of words (also known as 'tokens').
# Arguments
:param text Input text (string).
:param filters list (or concatenation) of characters to filter out, such as punctuation
:param lower Whether to convert the input to lowercase.
:param split Separator for word splitting.
:return a list of words.
"""
if lower:
text = text.lower()
translate_dict = dict((c, split) for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
word_sequence = text.split(split)
return [word for word in word_sequence if word] # without empty words
@staticmethod
def __pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function is a copy from Keras 2.3/keras_preprocessing/sequence.py
in order to get rid of the full Keras dependency.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the beginning or the end
if padding='post.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
| [
"numpy.full",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.max",
"io.open",
"numpy.issubdtype"
] | [((2159, 2212), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.__embedding_dim', 'dtype': '"""float32"""'}), "(shape=self.__embedding_dim, dtype='float32')\n", (2167, 2212), True, 'import numpy as np\n'), ((4429, 4455), 'numpy.asarray', 'np.asarray', (['[token_vector]'], {}), '([token_vector])\n', (4439, 4455), True, 'import numpy as np\n'), ((5310, 5409), 'numpy.empty', 'np.empty', ([], {'shape': '(token_vectors.shape[0], num_words + 2, self.__embedding_dim)', 'dtype': '"""float32"""'}), "(shape=(token_vectors.shape[0], num_words + 2, self.__embedding_dim\n ), dtype='float32')\n", (5318, 5409), True, 'import numpy as np\n'), ((6858, 6945), 'io.open', 'io.open', (['embedding_file_path', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""strict"""'}), "(embedding_file_path, 'r', encoding='utf-8', newline='\\n', errors=\n 'strict')\n", (6865, 6945), False, 'import io\n'), ((7302, 7366), 'numpy.empty', 'np.empty', (['(number_of_embeddings, embedding_dim)'], {'dtype': '"""float32"""'}), "((number_of_embeddings, embedding_dim), dtype='float32')\n", (7310, 7366), True, 'import numpy as np\n'), ((7397, 7443), 'numpy.zeros', 'np.zeros', ([], {'shape': 'embedding_dim', 'dtype': '"""float32"""'}), "(shape=embedding_dim, dtype='float32')\n", (7405, 7443), True, 'import numpy as np\n'), ((14730, 14795), 'numpy.full', 'np.full', (['((num_samples, maxlen) + sample_shape)', 'value'], {'dtype': 'dtype'}), '((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n', (14737, 14795), True, 'import numpy as np\n'), ((14287, 14302), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (14293, 14302), True, 'import numpy as np\n'), ((14327, 14356), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (14340, 14356), True, 'import numpy as np\n'), ((14360, 14393), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.unicode_'], {}), '(dtype, np.unicode_)\n', (14373, 14393), True, 'import numpy as np\n'), ((15275, 15305), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (15285, 15305), True, 'import numpy as np\n'), ((8452, 8491), 'numpy.asarray', 'np.asarray', (['tokens[1:]'], {'dtype': '"""float32"""'}), "(tokens[1:], dtype='float32')\n", (8462, 8491), True, 'import numpy as np\n'), ((14008, 14021), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (14018, 14021), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image, ImageEnhance
def prepare_image(path, width, brightness):
img = Image.open(path).convert('L')
enhancer = ImageEnhance.Brightness(img)
img_out = enhancer.enhance(brightness)
w, h = img_out.size
height = int(h * (width / w))
new_img = img_out.resize((width, height))
small = np.array(new_img)
# Shows treshold image
# new_img.show()
return small
| [
"PIL.ImageEnhance.Brightness",
"numpy.array",
"PIL.Image.open"
] | [((157, 185), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['img'], {}), '(img)\n', (180, 185), False, 'from PIL import Image, ImageEnhance\n'), ((349, 366), 'numpy.array', 'np.array', (['new_img'], {}), '(new_img)\n', (357, 366), True, 'import numpy as np\n'), ((111, 127), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (121, 127), False, 'from PIL import Image, ImageEnhance\n')] |
import numpy as np
import sys
from gym import Env, spaces
from io import StringIO
import numpy
import random
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap as cmap
COMPLEXITY = 1.
DENSITY = 1.
def build_maze(width=81, height=51, complexity=.75, density=.75, seed=42):
# local random number generator using the seed specified
rng = random.Random(seed)
# Only odd shapes
shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5 * (shape[0] + shape[1]))) # number of components
density = int(density * ((shape[0] // 2) * (shape[1] // 2))) # size of components
# Build actual maze
Z = numpy.zeros(shape, dtype=int)
# Fill borders
Z[0, :] = Z[-1, :] = 1
Z[:, 0] = Z[:, -1] = 1
# Make islands
for i in range(density):
x, y = rng.randint(0, shape[1] // 2) * 2, rng.randint(0, shape[0] // 2) * 2 # pick a random position
Z[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1:
neighbours.append((y, x - 2))
if x < shape[1] - 2:
neighbours.append((y, x + 2))
if y > 1:
neighbours.append((y - 2, x))
if y < shape[0] - 2:
neighbours.append((y + 2, x))
if len(neighbours):
y_,x_ = neighbours[rng.randint(0, len(neighbours) - 1)]
if Z[y_, x_] == 0:
Z[y_, x_] = 1
Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
# MAKE SURE THE STARTING POINT AND THE ENDING POINT ARE FREE
Z[1, 1] = Z[-2, -2] = 0
return Z
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
# ACTIONS = [
# np.array([-1, 0]), # UP
# np.array([0, 1]), # RIGHT
# np.array([1, 0]), # DOWN
# np.array([0, -1]), # LEFT
# ]
ACTIONS = [
(-1, 0), # UP
(0, 1), # RIGHT
(1, 0), # DOWN
(0, -1), # LEFT
]
cmaplist = [
(1., 1., 1., 1.), # white floor
(0., 0., 0., 1.), # black walls
(0., 0., 1., 1.), # blue start
(1., 0., 0., 1.), # red target
(0., 1., 0., 1.), # green agent
]
colormap = cmap.from_list('maze', cmaplist, len(cmaplist))
class MazeEnv(Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, rows=10, cols=10, maze_seed=42, complexity=COMPLEXITY, density=DENSITY):
super(MazeEnv, self).__init__()
shape = [2*s+1 for s in [rows, cols]]
self._maze_seed = maze_seed
self.complexity = complexity
self.density = density
self.shape = shape
self.nS = np.prod(shape)
self.nA = 4
self.ACTIONS = ACTIONS
self.maze = build_maze(self.shape[0],
self.shape[1],
complexity=self.complexity,
density=self.density,
seed=self._maze_seed)
self.s = None
self.lastaction = None
self._rendered_maze = None
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Discrete(self.nS)
self.start = (1, 1)
self.end = (shape[0]-2, shape[1]-2)
# self.seed(seed)
self.reset()
# def seed(self, seed=None):
# if seed is not None:
# self._rndseed = seed
# return self._rndseed
def get_params(self):
return (self.shape[0]//2,
self.shape[1]//2,
self._maze_seed,
float(self.complexity),
float(self.density))
def get_name(self):
return "Maze_({},{},{},{},{})".format(*self.get_params())
def reset(self):
# self.maze = build_maze(self.shape[0],
# self.shape[1],
# complexity=self.complexity,
# density=self.density,
# seed=self.seed())
assert self.maze[self.start] == 0
assert self.maze[self.end] == 0
self.s = self.start
self.lastaction = None
if self._rendered_maze is not None:
plt.close(self._rendered_maze[0])
self._rendered_maze = None
# self.render()
return self.s
def step(self, a):
self.lastaction = a
if self.s == self.end:
return self.s, 0, True, {}
# print("action", a)
dy, dx = self.ACTIONS[a]
y, x = self.s
x += dx
y += dy
s = y, x
if self.maze[s] == 0:
self.s = s
d = self.s == self.end
r = 0 if d else -1
return self.s, r, d, {}
def render(self, mode='plot', close=False):
if close:
return
if mode == "plot":
maze = self.maze.copy()
maze[self.start] = 2
maze[self.end] = 3
maze[self.s] = 4
if self._rendered_maze is None:
self._rendered_maze = plt.subplots(1, figsize=(10, 5))
self._rendered_maze[1].cla()
self._rendered_maze[1].imshow(maze, cmap=colormap, interpolation='nearest')
self._rendered_maze[1].set_xticks([])
self._rendered_maze[1].set_yticks([])
self._rendered_maze[0].canvas.draw_idle()
self._rendered_maze[0].show()
plt.pause(0.01)
else:
outfile = StringIO() if mode == 'ansi' else sys.stdout
for y in range(self.maze.shape[0]):
for x in range(self.maze.shape[1]):
s = y, x
if self.s == s:
output = " x "
elif s == self.start:
output = " S "
elif s == self.end:
output = " T "
elif self.maze[s] == 1:
output = " # "
else:
output = " "
if x == 0:
output = output.lstrip()
if x == self.shape[1] - 1:
output = output.rstrip()
outfile.write(output)
if x == self.shape[1] - 1:
outfile.write("\n")
| [
"io.StringIO",
"random.Random",
"matplotlib.pyplot.close",
"numpy.zeros",
"gym.spaces.Discrete",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.pause",
"numpy.prod"
] | [((390, 409), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (403, 409), False, 'import random\n'), ((757, 786), 'numpy.zeros', 'numpy.zeros', (['shape'], {'dtype': 'int'}), '(shape, dtype=int)\n', (768, 786), False, 'import numpy\n'), ((2763, 2777), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2770, 2777), True, 'import numpy as np\n'), ((3213, 3237), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.nA'], {}), '(self.nA)\n', (3228, 3237), False, 'from gym import Env, spaces\n'), ((3271, 3295), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.nS'], {}), '(self.nS)\n', (3286, 3295), False, 'from gym import Env, spaces\n'), ((4394, 4427), 'matplotlib.pyplot.close', 'plt.close', (['self._rendered_maze[0]'], {}), '(self._rendered_maze[0])\n', (4403, 4427), True, 'import matplotlib.pyplot as plt\n'), ((5724, 5739), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (5733, 5739), True, 'import matplotlib.pyplot as plt\n'), ((5353, 5385), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 5)'}), '(1, figsize=(10, 5))\n', (5365, 5385), True, 'import matplotlib.pyplot as plt\n'), ((5777, 5787), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5785, 5787), False, 'from io import StringIO\n')] |
from gluonts_forecasts.training_session import TrainingSession
from dku_constants import METRICS_DATASET
from datetime import datetime
import pandas as pd
import numpy as np
class TestCrossValidation:
def setup_class(self):
self.df = pd.DataFrame(
{
"date": [
"2020-01-12 00:00:00",
"2020-01-12 06:00:00",
"2020-01-12 12:00:00",
"2020-01-12 18:00:00",
"2020-01-13 00:00:00",
"2020-01-13 06:00:00",
"2020-01-13 12:00:00",
"2020-01-13 18:00:00",
"2020-01-12 00:00:00",
"2020-01-12 06:00:00",
"2020-01-12 12:00:00",
"2020-01-12 18:00:00",
"2020-01-13 00:00:00",
"2020-01-13 06:00:00",
"2020-01-13 12:00:00",
"2020-01-13 18:00:00",
],
"target": [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8],
"item": [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2],
}
)
self.df["date"] = pd.to_datetime(self.df["date"]).dt.tz_localize(tz=None)
self.models_parameters = {
"trivial_identity": {"activated": True, "method": "trivial_identity", "kwargs": {"num_samples": 100}},
}
self.session_name = datetime.utcnow().isoformat() + "Z"
self.target_length = 8
def setup_method(self):
self.training_session = TrainingSession(
target_columns_names=["target"],
time_column_name="date",
frequency="6H",
epoch=2,
models_parameters=self.models_parameters,
prediction_length=2,
training_df=self.df,
make_forecasts=True,
external_features_columns_names=None,
timeseries_identifiers_names=["item"],
batch_size=32,
user_num_batches_per_epoch=-1,
timeseries_cross_validation=True,
rolling_windows_number=3,
cutoff_period=1,
)
self.training_session.init(self.session_name)
self.training_session.create_gluon_list_datasets()
self.training_session.instantiate_models()
self.training_session.train_evaluate_models()
def test_cut_lengths_train_test_pairs(self):
expected_cut_lengths_train_test_pairs = [(4, 2), (3, 1), (2, 0)]
assert (
self.training_session.rolling_windows_cut_lengths_train_test_pairs == expected_cut_lengths_train_test_pairs
)
def test_gluon_list_datasets_by_cut_length(self):
for cut_length, gluon_list_dataset in self.training_session.gluon_list_datasets_by_cut_length.items():
assert len(gluon_list_dataset.list_data[0].get("target")) == self.target_length - cut_length
def test_metrics_df(self):
metrics_df = self.training_session.get_evaluation_metrics_to_display()
# metrics has 9 rows = 1 model * 1 overall aggregated rows + 1 model * 2 timeseries * 1 rolling windows aggregated row
# + 1 model * 2 timeseries * 3 rolling windows
assert metrics_df.shape == (9, 15)
assert len(metrics_df[metrics_df[METRICS_DATASET.TARGET_COLUMN] == METRICS_DATASET.AGGREGATED_ROW].index) == 1
rolling_windows_metrics = metrics_df[
metrics_df[METRICS_DATASET.ROLLING_WINDOWS] != METRICS_DATASET.AGGREGATED_ROW
]
assert np.array_equal(rolling_windows_metrics[METRICS_DATASET.ROLLING_WINDOWS], [0, 0, 1, 1, 2, 2])
for identifier in [1, 2]:
identifier_df = metrics_df[metrics_df["item"] == identifier]
aggregation = identifier_df[
identifier_df[METRICS_DATASET.ROLLING_WINDOWS] == METRICS_DATASET.AGGREGATED_ROW
]["mape"].iloc[0]
average = identifier_df[identifier_df[METRICS_DATASET.ROLLING_WINDOWS] != METRICS_DATASET.AGGREGATED_ROW][
"mape"
].mean()
assert round(aggregation, 4) == round(average, 4)
| [
"pandas.DataFrame",
"gluonts_forecasts.training_session.TrainingSession",
"datetime.datetime.utcnow",
"pandas.to_datetime",
"numpy.array_equal"
] | [((248, 786), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': ['2020-01-12 00:00:00', '2020-01-12 06:00:00',\n '2020-01-12 12:00:00', '2020-01-12 18:00:00', '2020-01-13 00:00:00',\n '2020-01-13 06:00:00', '2020-01-13 12:00:00', '2020-01-13 18:00:00',\n '2020-01-12 00:00:00', '2020-01-12 06:00:00', '2020-01-12 12:00:00',\n '2020-01-12 18:00:00', '2020-01-13 00:00:00', '2020-01-13 06:00:00',\n '2020-01-13 12:00:00', '2020-01-13 18:00:00'], 'target': [1, 2, 3, 4, 5,\n 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8], 'item': [1, 1, 1, 1, 1, 1, 1, 1, 2, 2,\n 2, 2, 2, 2, 2, 2]}"], {}), "({'date': ['2020-01-12 00:00:00', '2020-01-12 06:00:00',\n '2020-01-12 12:00:00', '2020-01-12 18:00:00', '2020-01-13 00:00:00',\n '2020-01-13 06:00:00', '2020-01-13 12:00:00', '2020-01-13 18:00:00',\n '2020-01-12 00:00:00', '2020-01-12 06:00:00', '2020-01-12 12:00:00',\n '2020-01-12 18:00:00', '2020-01-13 00:00:00', '2020-01-13 06:00:00',\n '2020-01-13 12:00:00', '2020-01-13 18:00:00'], 'target': [1, 2, 3, 4, 5,\n 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8], 'item': [1, 1, 1, 1, 1, 1, 1, 1, 2, 2,\n 2, 2, 2, 2, 2, 2]})\n", (260, 786), True, 'import pandas as pd\n'), ((1581, 2010), 'gluonts_forecasts.training_session.TrainingSession', 'TrainingSession', ([], {'target_columns_names': "['target']", 'time_column_name': '"""date"""', 'frequency': '"""6H"""', 'epoch': '(2)', 'models_parameters': 'self.models_parameters', 'prediction_length': '(2)', 'training_df': 'self.df', 'make_forecasts': '(True)', 'external_features_columns_names': 'None', 'timeseries_identifiers_names': "['item']", 'batch_size': '(32)', 'user_num_batches_per_epoch': '(-1)', 'timeseries_cross_validation': '(True)', 'rolling_windows_number': '(3)', 'cutoff_period': '(1)'}), "(target_columns_names=['target'], time_column_name='date',\n frequency='6H', epoch=2, models_parameters=self.models_parameters,\n prediction_length=2, training_df=self.df, make_forecasts=True,\n external_features_columns_names=None, timeseries_identifiers_names=[\n 'item'], batch_size=32, user_num_batches_per_epoch=-1,\n timeseries_cross_validation=True, rolling_windows_number=3, cutoff_period=1\n )\n", (1596, 2010), False, 'from gluonts_forecasts.training_session import TrainingSession\n'), ((3552, 3648), 'numpy.array_equal', 'np.array_equal', (['rolling_windows_metrics[METRICS_DATASET.ROLLING_WINDOWS]', '[0, 0, 1, 1, 2, 2]'], {}), '(rolling_windows_metrics[METRICS_DATASET.ROLLING_WINDOWS], [0,\n 0, 1, 1, 2, 2])\n', (3566, 3648), True, 'import numpy as np\n'), ((1209, 1240), 'pandas.to_datetime', 'pd.to_datetime', (["self.df['date']"], {}), "(self.df['date'])\n", (1223, 1240), True, 'import pandas as pd\n'), ((1453, 1470), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1468, 1470), False, 'from datetime import datetime\n')] |
""" $lic$
Copyright (c) 2016-2021, <NAME>
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from collections import OrderedDict
import functools
import os
import shutil
import sys
import unittest
import warnings
import numpy as np
import matplotlib
import matplotlib.font_manager as mlpfm
import matplotlib.pyplot as plt
import matplotlib.testing
import matplotlib.testing.compare as mplcmp
import matplotlib.ticker
import matplotlib.units
from matplotlib.testing.decorators import _image_directories
import pytest
import easypyplot.util
def remove_ticks_and_titles(figure):
''' Remove ticks and titles from figure. '''
figure.suptitle('')
null_formatter = matplotlib.ticker.NullFormatter()
for ax in figure.get_axes():
ax.set_title('')
ax.xaxis.set_major_formatter(null_formatter)
ax.xaxis.set_minor_formatter(null_formatter)
ax.yaxis.set_major_formatter(null_formatter)
ax.yaxis.set_minor_formatter(null_formatter)
try:
ax.zaxis.set_major_formatter(null_formatter)
ax.zaxis.set_minor_formatter(null_formatter)
except AttributeError:
pass
def skip_if_without_fonts(fonts):
''' Skip the test if the system does not have the given fonts. '''
__tracebackhide__ = True # pylint: disable=unused-variable
for font in fonts:
# Use unicode string as font name.
try:
name = unicode(font)
except NameError:
name = font
fp = matplotlib.font_manager.FontProperties(
fname=matplotlib.font_manager.findfont(name))
if fp.get_name() == name:
# Find a font.
return
# No font found.
raise unittest.SkipTest('Skip because fonts {} is not in this system.'
.format(fonts))
def skip_if_without_tex():
''' Skip the test if the system does not have TeX. '''
__tracebackhide__ = True # pylint: disable=unused-variable
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
if matplotlib.checkdep_usetex(True):
return
except UserWarning as e:
if 'Agg' in str(e):
# Filter the warning about using Tex with Agg backend.
return
except Warning:
# Since we turn all warnings into errors in order to catch the one
# above, we also need to shield the rest.
pass
raise unittest.SkipTest('Skip because Tex is not in this system.')
def sin_plot(axes, phi=0, fmt='', remove_text=True):
''' Plot a sin function in the axes. '''
x = np.linspace(0, 2 * np.pi, 500)
y = np.sin(x + phi)
if fmt:
axes.plot(x, y, fmt)
else:
axes.plot(x, y)
if remove_text:
remove_ticks_and_titles(axes.get_figure())
else:
families = matplotlib.rcParams['font.family']
try:
if isinstance(families, basestring):
families = [families]
except NameError:
if isinstance(families, str):
families = [families]
fonts = sum([matplotlib.rcParams['font.{}'.format(f)]
for f in families], [])
skip_if_without_fonts(fonts)
def setup():
''' Set up. '''
plt.close('all')
original_units_registry = matplotlib.units.registry.copy()
original_settings = matplotlib.rcParams.copy()
ver = easypyplot.util.matplotlib_version_tuple()
# Setup routine introduced from 1.5.
if ver >= (1, 5):
matplotlib.testing.setup()
# Style name has changed over matplotlib versions.
# See changes from <matplotlib repo>/lib/matplotlib/testing/conftest.py:mpl_test_settings()
if ver >= (3, 2):
matplotlib.style.use(['classic', '_classic_test_patch'])
elif ver >= (2, 0):
matplotlib.style.use('_classic_test')
elif ver >= (1, 5):
matplotlib.style.use('classic')
# Times bold bug.
# https://stackoverflow.com/questions/33955900
# https://github.com/matplotlib/matplotlib/issues/5574
#
# The proposed solution simply removes `'roman'` from `weight_dict`, which
# may cause `KeyError` when `weight_dict['roman']` is used in other places.
#
# Instead we order `'roman'` after `'bold'`, so when matching weights,
# "Times New Roman Bold" will first match `'bold'` and get its weight.
#
# Also we need to update the shortcut module method `findfont`. It may
# already be imported in other modules, so instead of update its value, we
# also need to update the font lists of the previous font manager, so that
# those already imported can also use the new font lists.
#
# This bug was fixed since v3.2, through https://github.com/matplotlib/matplotlib/pull/14483.
if (3, 2) > ver >= (2, 0):
# Order `'bold'` before `'roman'` in the weight list.
roman_weight = mlpfm.weight_dict.pop('roman', None)
mlpfm.weight_dict = OrderedDict(mlpfm.weight_dict)
if roman_weight is not None:
mlpfm.weight_dict['roman'] = roman_weight
# Rebuild font manager and update existing font lists.
fm = mlpfm.fontManager
# _rebuild() was removed since v3.4, in commit a0065c30ae7abbaa4eef8394c97f74d93da3f2b0.
mlpfm._rebuild() # pylint: disable=protected-access
mlpfm.findfont = mlpfm.fontManager.findfont
fm.ttflist = mlpfm.fontManager.ttflist
fm.afmlist = mlpfm.fontManager.afmlist
return original_units_registry, original_settings
def teardown(origs):
''' Tear down. '''
plt.close('all')
original_units_registry, original_settings = origs
matplotlib.rcParams.clear()
matplotlib.rcParams.update(original_settings)
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings()
class _ImageComparisonBase(unittest.TestCase):
'''
Base TestCase class used to replace original test function.
We use a different class for each individual test function, and use
different tests to compare different image extensions. Thus, the setup and
teardown methods are class-level fixtures, and the original test function
to plot the figure is also executed in this class-level setup method (after
derived).
'''
@classmethod
def setUpClass(cls):
cls.origs = setup()
cls.baseline_dir, cls.result_dir = cls._image_directories()
@classmethod
def tearDownClass(cls):
teardown(cls.origs)
@staticmethod
def mark_extension(extension):
''' Mark whether extension is supported. '''
__tracebackhide__ = True # pylint: disable=unused-variable
if extension not in mplcmp.comparable_formats():
raise unittest.SkipTest('Cannot compare {} files in this '
'system'.format(extension))
if extension == 'svg' and easypyplot.util.matplotlib_version_tuple() < (2, 1):
raise unittest.SkipTest('Cannot compare svg files due to incompatible '
'Inkscape command line interface change of '
'--export-png.')
def compare(self, baseline_images, actual_suffix, extension, tol):
''' Compare actual images with baseline images. '''
__tracebackhide__ = True # pylint: disable=unused-variable
cls = self.__class__
for baseline in baseline_images:
actual_fname = os.path.join(
cls.result_dir, baseline + actual_suffix + '.' + extension)
self.assertTrue(os.path.exists(actual_fname),
'Image does not exist: {}'.format(actual_fname))
expected_fname = self._copy_baseline(baseline, extension)
self.assertTrue(os.path.exists(expected_fname),
'Image does not exist: {}'.format(expected_fname))
err = mplcmp.compare_images(expected_fname, actual_fname, tol)
self.assertFalse(err, 'Images are not close\n{}'.format(err))
@classmethod
def _image_directories(cls):
raise NotImplementedError('{}: _image_directories'
.format(cls.__name__))
def _copy_baseline(self, baseline, extension):
''' Copy baseline image with given extension to result directory. '''
__tracebackhide__ = True # pylint: disable=unused-variable
cls = self.__class__
base_ext = baseline + '.' + extension
# Original baseline file.
baseline_fname = os.path.join(cls.baseline_dir, base_ext)
if extension == 'eps' and not os.path.exists(baseline_fname):
baseline_fname = baseline_fname[:len('eps')] + 'pdf'
# Copied expected file.
expected_fname = mplcmp.make_test_filename(os.path.join(
cls.result_dir, os.path.basename(baseline_fname)), 'expected')
self.assertTrue(os.path.exists(baseline_fname),
'Do not have baseline image {0} '
'because this file does not exist: {1}'
.format(expected_fname, baseline_fname))
shutil.copyfile(baseline_fname, expected_fname)
return expected_fname
def image_comparison(baseline_images, extensions=None, tol=0,
remove_text=True, savefig_kwargs=None,
saved_as=None, save_suffix=''):
'''
Compare images generated by the test with those specified in
`baseline_images`.
Derived from matplotlib, lib/matplotlib/testing/decorators.py.
Add `saved_as` option, which means that the test function has already saved
the images to the locations, if not empty.
Add `save_suffix` option, which is added to the baseline name before the
extension to be used as the actual file name, so that multiple tests can
share the same baseline image and still be stored as different files.
'''
__tracebackhide__ = True # pylint: disable=unused-variable
if not extensions:
extensions = ['pdf', 'png', 'svg']
if not savefig_kwargs:
savefig_kwargs = {}
if saved_as and len(baseline_images) != len(saved_as):
raise ValueError('image_comparison: `saved_as` should have the same '
'length as `baseline_images` if not empty.')
def decorator(func):
''' Decorator. '''
__tracebackhide__ = True # pylint: disable=unused-variable
class ImageComparisonTest(_ImageComparisonBase):
''' TestCase class to compare image. '''
@classmethod
def _image_directories(cls):
return _image_directories(func)
@classmethod
def setUpClass(cls):
super(ImageComparisonTest, cls).setUpClass()
func()
__doc__ = func.__doc__ # __doc__ must be assigned at define time.
# __name__ and __module__ are assigned after definition.
ImageComparisonTest.__name__ = func.__name__
ImageComparisonTest.__module__ = func.__module__
def test(self, extension):
''' Common method to compare an image with extension. '''
self.mark_extension(extension)
# Save figures.
kwargs = savefig_kwargs.copy()
if extension == 'pdf':
kwargs.setdefault('metadata',
{'Creator': None,
'Producer': None,
'CreationDate': None})
result_dir = self.__class__.result_dir
if len(plt.get_fignums()) != len(baseline_images):
raise ValueError('image_comparison: `baseline_images` should '
'have the same length as the number of '
'figures generated')
for idx, baseline in enumerate(baseline_images):
fignum = plt.get_fignums()[idx]
figure = plt.figure(fignum)
actual_fname = os.path.join(
result_dir, baseline + save_suffix + '.' + extension)
if saved_as:
# Just copy local saved file to result directory.
saved_fname = saved_as[idx]
if not saved_fname.endswith('.' + extension):
saved_fname += '.' + extension
shutil.move(saved_fname, actual_fname)
else:
if remove_text:
remove_ticks_and_titles(figure)
figure.savefig(actual_fname, **kwargs)
# Decide the extra tolerance.
extra_tol = 0.5
def aggr_ticklabel(ticklabels):
''' Aggregate all ticklabels as a string. '''
return ''.join(str(tl) for tl in ticklabels)
for ax in figure.get_axes():
xticklabels = aggr_ticklabel(ax.get_xticklabels()
+ ax.get_xticklabels(minor=True))
yticklabels = aggr_ticklabel(ax.get_yticklabels()
+ ax.get_yticklabels(minor=True))
if easypyplot.util.matplotlib_version_tuple() < (2, 0) \
and yticklabels:
# yaxis tick vertical alignment, fixed in 2.0.
# See https://github.com/matplotlib/matplotlib/pull/6200
extra_tol += 256 * min(0.15, 0.01 * len(yticklabels))
elif extension == 'png':
# PNG backend is not accurate. Weird warning from libpng.
extra_tol += 256 * min(0.1, 0.005 * len(xticklabels
+ yticklabels))
# Compare images.
self.compare(baseline_images, save_suffix, extension, tol + extra_tol)
# Dynamically add test methods for each image extension.
for extension in extensions:
ext_tst_func = lambda self, ext=extension: test(self, ext)
ext_tst_name = 'test_{}'.format(extension)
ext_tst_func.__name__ = ext_tst_name
ext_tst_func.__doc__ = ' Compare images for {}. '.format(extension)
setattr(ImageComparisonTest, ext_tst_name, ext_tst_func)
return ImageComparisonTest
return decorator
# Fix unittest.TestCase.assertRaisesRegex before Python 3.2
if sys.version_info < (3, 2):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
| [
"matplotlib.font_manager.weight_dict.pop",
"matplotlib.style.use",
"matplotlib.units.registry.copy",
"matplotlib.pyplot.figure",
"numpy.sin",
"os.path.join",
"matplotlib.units.registry.clear",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.font_manager._rebuild",
"os.path.e... | [((1159, 1192), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (1190, 1192), False, 'import matplotlib\n'), ((2959, 3019), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Skip because Tex is not in this system."""'], {}), "('Skip because Tex is not in this system.')\n", (2976, 3019), False, 'import unittest\n'), ((3128, 3158), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(500)'], {}), '(0, 2 * np.pi, 500)\n', (3139, 3158), True, 'import numpy as np\n'), ((3167, 3182), 'numpy.sin', 'np.sin', (['(x + phi)'], {}), '(x + phi)\n', (3173, 3182), True, 'import numpy as np\n'), ((3782, 3798), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3791, 3798), True, 'import matplotlib.pyplot as plt\n'), ((3829, 3861), 'matplotlib.units.registry.copy', 'matplotlib.units.registry.copy', ([], {}), '()\n', (3859, 3861), False, 'import matplotlib\n'), ((3886, 3912), 'matplotlib.rcParams.copy', 'matplotlib.rcParams.copy', ([], {}), '()\n', (3910, 3912), False, 'import matplotlib\n'), ((6099, 6115), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6108, 6115), True, 'import matplotlib.pyplot as plt\n'), ((6175, 6202), 'matplotlib.rcParams.clear', 'matplotlib.rcParams.clear', ([], {}), '()\n', (6200, 6202), False, 'import matplotlib\n'), ((6207, 6252), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['original_settings'], {}), '(original_settings)\n', (6233, 6252), False, 'import matplotlib\n'), ((6257, 6290), 'matplotlib.units.registry.clear', 'matplotlib.units.registry.clear', ([], {}), '()\n', (6288, 6290), False, 'import matplotlib\n'), ((6295, 6352), 'matplotlib.units.registry.update', 'matplotlib.units.registry.update', (['original_units_registry'], {}), '(original_units_registry)\n', (6327, 6352), False, 'import matplotlib\n'), ((6357, 6381), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (6379, 6381), False, 'import warnings\n'), ((2463, 2488), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2486, 2488), False, 'import warnings\n'), ((2498, 2530), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (2521, 2530), False, 'import warnings\n'), ((4037, 4063), 'matplotlib.testing.setup', 'matplotlib.testing.setup', ([], {}), '()\n', (4061, 4063), False, 'import matplotlib\n'), ((4245, 4301), 'matplotlib.style.use', 'matplotlib.style.use', (["['classic', '_classic_test_patch']"], {}), "(['classic', '_classic_test_patch'])\n", (4265, 4301), False, 'import matplotlib\n'), ((5410, 5446), 'matplotlib.font_manager.weight_dict.pop', 'mlpfm.weight_dict.pop', (['"""roman"""', 'None'], {}), "('roman', None)\n", (5431, 5446), True, 'import matplotlib.font_manager as mlpfm\n'), ((5475, 5505), 'collections.OrderedDict', 'OrderedDict', (['mlpfm.weight_dict'], {}), '(mlpfm.weight_dict)\n', (5486, 5505), False, 'from collections import OrderedDict\n'), ((5796, 5812), 'matplotlib.font_manager._rebuild', 'mlpfm._rebuild', ([], {}), '()\n', (5810, 5812), True, 'import matplotlib.font_manager as mlpfm\n'), ((9108, 9148), 'os.path.join', 'os.path.join', (['cls.baseline_dir', 'base_ext'], {}), '(cls.baseline_dir, base_ext)\n', (9120, 9148), False, 'import os\n'), ((9709, 9756), 'shutil.copyfile', 'shutil.copyfile', (['baseline_fname', 'expected_fname'], {}), '(baseline_fname, expected_fname)\n', (9724, 9756), False, 'import shutil\n'), ((2559, 2591), 'matplotlib.checkdep_usetex', 'matplotlib.checkdep_usetex', (['(True)'], {}), '(True)\n', (2585, 2591), False, 'import matplotlib\n'), ((4334, 4371), 'matplotlib.style.use', 'matplotlib.style.use', (['"""_classic_test"""'], {}), "('_classic_test')\n", (4354, 4371), False, 'import matplotlib\n'), ((7252, 7279), 'matplotlib.testing.compare.comparable_formats', 'mplcmp.comparable_formats', ([], {}), '()\n', (7277, 7279), True, 'import matplotlib.testing.compare as mplcmp\n'), ((7522, 7653), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Cannot compare svg files due to incompatible Inkscape command line interface change of --export-png."""'], {}), "(\n 'Cannot compare svg files due to incompatible Inkscape command line interface change of --export-png.'\n )\n", (7539, 7653), False, 'import unittest\n'), ((8021, 8093), 'os.path.join', 'os.path.join', (['cls.result_dir', "(baseline + actual_suffix + '.' + extension)"], {}), "(cls.result_dir, baseline + actual_suffix + '.' + extension)\n", (8033, 8093), False, 'import os\n'), ((8475, 8531), 'matplotlib.testing.compare.compare_images', 'mplcmp.compare_images', (['expected_fname', 'actual_fname', 'tol'], {}), '(expected_fname, actual_fname, tol)\n', (8496, 8531), True, 'import matplotlib.testing.compare as mplcmp\n'), ((9482, 9512), 'os.path.exists', 'os.path.exists', (['baseline_fname'], {}), '(baseline_fname)\n', (9496, 9512), False, 'import os\n'), ((2042, 2080), 'matplotlib.font_manager.findfont', 'matplotlib.font_manager.findfont', (['name'], {}), '(name)\n', (2074, 2080), False, 'import matplotlib\n'), ((4404, 4435), 'matplotlib.style.use', 'matplotlib.style.use', (['"""classic"""'], {}), "('classic')\n", (4424, 4435), False, 'import matplotlib\n'), ((8139, 8167), 'os.path.exists', 'os.path.exists', (['actual_fname'], {}), '(actual_fname)\n', (8153, 8167), False, 'import os\n'), ((8345, 8375), 'os.path.exists', 'os.path.exists', (['expected_fname'], {}), '(expected_fname)\n', (8359, 8375), False, 'import os\n'), ((9187, 9217), 'os.path.exists', 'os.path.exists', (['baseline_fname'], {}), '(baseline_fname)\n', (9201, 9217), False, 'import os\n'), ((9410, 9442), 'os.path.basename', 'os.path.basename', (['baseline_fname'], {}), '(baseline_fname)\n', (9426, 9442), False, 'import os\n'), ((11210, 11234), 'matplotlib.testing.decorators._image_directories', '_image_directories', (['func'], {}), '(func)\n', (11228, 11234), False, 'from matplotlib.testing.decorators import _image_directories\n'), ((12557, 12575), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (12567, 12575), True, 'import matplotlib.pyplot as plt\n'), ((12608, 12674), 'os.path.join', 'os.path.join', (['result_dir', "(baseline + save_suffix + '.' + extension)"], {}), "(result_dir, baseline + save_suffix + '.' + extension)\n", (12620, 12674), False, 'import os\n'), ((12171, 12188), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (12186, 12188), True, 'import matplotlib.pyplot as plt\n'), ((12509, 12526), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (12524, 12526), True, 'import matplotlib.pyplot as plt\n'), ((12985, 13023), 'shutil.move', 'shutil.move', (['saved_fname', 'actual_fname'], {}), '(saved_fname, actual_fname)\n', (12996, 13023), False, 'import shutil\n')] |
# adpated from http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.correlate2d.html
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy import misc
face = misc.face() - misc.face().mean()
face = face.sum(-1)
template = np.copy(face[700:800, 310:380]) # right eye
template -= template.mean()
noisyface = face + np.random.randn(*face.shape) * 50 # add noise
corr = signal.correlate2d(noisyface, template, boundary='symm', mode='same')
y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
fig, ((ax_orig, ax_template), (ax_noisy, ax_corr)) = plt.subplots(2, 2)
ax_orig.imshow(face, cmap='gray')
ax_orig.set_title('Original')
ax_orig.set_axis_off()
ax_orig.plot(x, y, 'ro')
ax_template.imshow(template, cmap='gray')
ax_template.set_title('Template')
ax_template.set_axis_off()
ax_noisy.imshow(noisyface, cmap='gray')
ax_noisy.set_title('Noisy')
ax_noisy.set_axis_off()
ax_noisy.plot(x, y, 'ro')
ax_corr.imshow(corr, cmap='gray')
ax_corr.set_title('Cross-correlation')
ax_corr.set_axis_off()
fig.show()
| [
"numpy.copy",
"numpy.argmax",
"numpy.random.randn",
"scipy.signal.correlate2d",
"scipy.misc.face",
"matplotlib.pyplot.subplots"
] | [((277, 308), 'numpy.copy', 'np.copy', (['face[700:800, 310:380]'], {}), '(face[700:800, 310:380])\n', (284, 308), True, 'import numpy as np\n'), ((421, 490), 'scipy.signal.correlate2d', 'signal.correlate2d', (['noisyface', 'template'], {'boundary': '"""symm"""', 'mode': '"""same"""'}), "(noisyface, template, boundary='symm', mode='same')\n", (439, 490), False, 'from scipy import signal\n'), ((616, 634), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (628, 634), True, 'import matplotlib.pyplot as plt\n'), ((213, 224), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (222, 224), False, 'from scipy import misc\n'), ((515, 530), 'numpy.argmax', 'np.argmax', (['corr'], {}), '(corr)\n', (524, 530), True, 'import numpy as np\n'), ((368, 396), 'numpy.random.randn', 'np.random.randn', (['*face.shape'], {}), '(*face.shape)\n', (383, 396), True, 'import numpy as np\n'), ((227, 238), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (236, 238), False, 'from scipy import misc\n')] |
import time
import sqlite3
import pandas as pd
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
'''
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier, GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
'''
traindataframes = []
testDataFrame = []
max = 20
#def predict_next_day():
#return 0
def denormalize_features(features):
frames = []
for i,r in enumerate(traindataframes):
denormalized_features = []
price_col = r['Current_price']
price_col = price_col[0:max]
change_col = r['Today_price']
change_col = change_col[0:max]
#parse data and remove + sign
for j in change_col:
if '+' in j:
t = j.replace('+','')
change_col = change_col.replace(str(j),t)
for j in change_col:
change_col = change_col.replace(str(j),float(j))
for j,element in enumerate(price_col):
initial_price = element - change_col[j]
#closing_price = element
#normalized = ((closing_price/initial_price)-1)
closing_price = (features[i][j] + 1) * initial_price
denormalized_features.append(closing_price)
frames.append(denormalized_features)
features = pd.DataFrame(frames).transpose()
return features
def gradient_descent():
global traindataframes
global testDataFrame
prediction_frame = testDataFrame[0]
prediction_frame = prediction_frame['Current_price']
prediction_frame = prediction_frame[0:max]
#this takes the closing price and the initial price Current_price = initial today price is the change is price
#that day. So we take closing minus today change to get initial
#we then normalize this data
frames = []
for i in traindataframes:
normalized_features = []
price_col = i['Current_price']
price_col = price_col[0:max]
change_col = i['Today_price']
change_col = change_col[0:max]
#parse data and remove + sign
for j in change_col:
if '+' in j:
t = j.replace('+','')
change_col = change_col.replace(str(j),t)
for j in change_col:
change_col = change_col.replace(str(j),float(j))
#part that gets the initial and closing
for j,element in enumerate(price_col):
initial_price = element - change_col[j]
closing_price = element
normalized = ((closing_price/initial_price)-1)
normalized_features.append(normalized)
frames.append(normalized_features)
#get features
features = pd.DataFrame(frames).transpose()
features_array = np.array(features)
#same as above we ar normalizing the values
frames = []
normalized_features = []
prediction_frame_change = testDataFrame[0]
prediction_frame_change = prediction_frame_change['Today_price']
prediction_frame_change = prediction_frame_change[0:max]
#parse data and remove + sign
for i in prediction_frame_change:
if '+' in i:
t = i.replace('+','')
prediction_frame_change = prediction_frame_change.replace(str(i),t)
for i in prediction_frame_change:
prediction_frame_change = prediction_frame_change.replace(str(i),float(i))
#part that gets the initial and closing
for i,element in enumerate(prediction_frame):
initial_price = element - prediction_frame_change[i]
closing_price = element
normalized = ((closing_price/initial_price)-1)
'''
print('closing_price: ' + str(closing_price))
#print('todays_change: ' + str())
print('normalized_price: ' + str(normalized))
denormalized = (normalized + 1) * initial_price
print('denormalized_price: ' + str(denormalized))
'''
normalized_features.append(normalized)
for i in normalized_features:
frames.append(i)
#get values
values_array = np.array(frames)
m = len(values_array)
alpha = 0.01
num_iterations = 2000000
theta_descent = np.zeros(len(features.columns))
cost_history = []
#actual gradient descent part
for i in range(num_iterations):
predicted_value = np.dot(features_array, theta_descent)
theta_descent = theta_descent + alpha/m * np.dot(values_array - predicted_value, features_array)
sum_of_square_errors = np.square(np.dot(features_array, theta_descent) - values_array).sum()
cost = sum_of_square_errors / (2 * m)
cost_history.append(cost)
#this causes lag
if(i % 1000 == 0):
print('Epoch: ' + str(i/1000) + ' : ' + 'Cost: ' + str(cost_history[i]))
#all output and debugging
cost_history = pd.Series(cost_history)
predictions = np.dot(features_array, theta_descent).transpose()
print('============================================')
print('Cost History: ', cost_history)
print('Theta Descent: ',theta_descent)
print('Alpha: ', alpha)
print('Iterations: ',num_iterations)
data_predictions = np.sum((values_array - predictions)**2)
mean = np.mean(values_array)
sq_mean = np.sum((values_array - mean)**2)
if(sq_mean == 0):
sq_mean = sq_mean + 0.0000001
r = 1 - data_predictions / sq_mean
print('R: ', r)
#denormalize data
features = denormalize_features(features)
#features = np.array(features)
#features = features[0]
predictions = np.dot(features, theta_descent)
print('Predictions: ',predictions)
print('============================================')
day_before = features.transpose()
day_before = day_before[-1:]
day_before = day_before.transpose()
fig, ax = plt.subplots()
ax.plot(prediction_frame,'o',markersize = 1, color = 'green', label = 'Actual Price')
ax.plot(predictions,'o',markersize = 1, color = 'blue', label = 'Predicted Price')
ax.plot(day_before,'o',markersize = 1, color = 'red', label = 'Price Previously')
plt.legend()
fig2, ax2 = plt.subplots()
ax2.plot(cost_history,'o',markersize = 1, color = 'blue')
plt.show()
def get_Data():
global traindataframes
global testDataFrame
tables = []
con = sqlite3.connect("GE_Data.db")
cur = con.cursor()
table = cur.execute("select name from sqlite_master where type = 'table'")
for i in table.fetchall():
tables.append(i[0])
for i in tables[:-1]:
# print('DFs: ' + str(i))
q = "select * from " + i + " ORDER BY Id"
traindataframes.append(pd.read_sql(q,con))
for i in tables[-1:]:
# print('TestDF: ' + str(i))
q = "select * from " + i + " ORDER BY Id"
testDataFrame.append(pd.read_sql(q,con))
cur.close()
con.close()
get_Data()
gradient_descent()
#predict()
| [
"pandas.DataFrame",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.mean",
"numpy.array",
"pandas.Series",
"sqlite3.connect",
"pandas.read_sql",
"numpy.dot",
"matplotlib.pyplot.subplots"
] | [((2700, 2718), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2708, 2718), True, 'import numpy as np\n'), ((3853, 3869), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (3861, 3869), True, 'import numpy as np\n'), ((4556, 4579), 'pandas.Series', 'pd.Series', (['cost_history'], {}), '(cost_history)\n', (4565, 4579), True, 'import pandas as pd\n'), ((4864, 4905), 'numpy.sum', 'np.sum', (['((values_array - predictions) ** 2)'], {}), '((values_array - predictions) ** 2)\n', (4870, 4905), True, 'import numpy as np\n'), ((4912, 4933), 'numpy.mean', 'np.mean', (['values_array'], {}), '(values_array)\n', (4919, 4933), True, 'import numpy as np\n'), ((4945, 4979), 'numpy.sum', 'np.sum', (['((values_array - mean) ** 2)'], {}), '((values_array - mean) ** 2)\n', (4951, 4979), True, 'import numpy as np\n'), ((5223, 5254), 'numpy.dot', 'np.dot', (['features', 'theta_descent'], {}), '(features, theta_descent)\n', (5229, 5254), True, 'import numpy as np\n'), ((5464, 5478), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5476, 5478), True, 'import matplotlib.pyplot as plt\n'), ((5734, 5746), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5744, 5746), True, 'import matplotlib.pyplot as plt\n'), ((5760, 5774), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5772, 5774), True, 'import matplotlib.pyplot as plt\n'), ((5835, 5845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5843, 5845), True, 'import matplotlib.pyplot as plt\n'), ((5958, 5987), 'sqlite3.connect', 'sqlite3.connect', (['"""GE_Data.db"""'], {}), "('GE_Data.db')\n", (5973, 5987), False, 'import sqlite3\n'), ((4093, 4130), 'numpy.dot', 'np.dot', (['features_array', 'theta_descent'], {}), '(features_array, theta_descent)\n', (4099, 4130), True, 'import numpy as np\n'), ((1448, 1468), 'pandas.DataFrame', 'pd.DataFrame', (['frames'], {}), '(frames)\n', (1460, 1468), True, 'import pandas as pd\n'), ((2649, 2669), 'pandas.DataFrame', 'pd.DataFrame', (['frames'], {}), '(frames)\n', (2661, 2669), True, 'import pandas as pd\n'), ((4597, 4634), 'numpy.dot', 'np.dot', (['features_array', 'theta_descent'], {}), '(features_array, theta_descent)\n', (4603, 4634), True, 'import numpy as np\n'), ((6344, 6363), 'pandas.read_sql', 'pd.read_sql', (['q', 'con'], {}), '(q, con)\n', (6355, 6363), True, 'import pandas as pd\n'), ((6533, 6552), 'pandas.read_sql', 'pd.read_sql', (['q', 'con'], {}), '(q, con)\n', (6544, 6552), True, 'import pandas as pd\n'), ((4175, 4229), 'numpy.dot', 'np.dot', (['(values_array - predicted_value)', 'features_array'], {}), '(values_array - predicted_value, features_array)\n', (4181, 4229), True, 'import numpy as np\n'), ((4265, 4302), 'numpy.dot', 'np.dot', (['features_array', 'theta_descent'], {}), '(features_array, theta_descent)\n', (4271, 4302), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from Eir.DTMC.spatialModel.HubModel import Hub
from Eir.DTMC.spatialModel.simul_details import Simul_Details
from Eir.utility import Person, dist, randEvent
class HubSEIR(Hub):
"""
Object that represents the Hub Model with compartments S, E, I, and R. In this model, E is assumed to not be
able to spread the virus.
Parameters
----------
S0: int
Initial amount of susceptibles at the start of the simulation.
E0: int
Initial amount of exposed at the start of the simulation.
I0: int
Initial amount of infected at the start of the simulation.
R0: int
Initial amount of recovered at the start of the simulation.
pss: float
The probability that the randomly generated person at the start of the simulation is a super spreader.
rho: float
Rho is the probability of someone moving from E to I compartment. Rho is in [0, 1].
gamma: float
The probability of someone going from I to R.
rstart: float
The spreading radius of a normal spreader.
days: int
The nubmer of days being simulated.
w0: float optional
The probability of a susceptible getting infected if the distance between the infectious person and susceptible is 0. Default is 1.0.
hubConstant: float optional
The scale by which the spreading radius of a super spreader increases. Default is sqrt(6).
alpha: float optional
Constant used in the infect probability generator. Default is 2.0.
Attributes
----------
S: ndarray
A numpy array that stores the number of people in the susceptible state on each given day of the simulation.
E: ndarray
A numpy array that stores the number of people in the exposed state on each given day of the simulation.
I: ndarray
A numpy array that stores the number of people in the infected state on each given day of the simulation.
R: ndarray
A numpy array that stores the number of people in the recovered state on each given day of the simulation.
popsize: int
The total size of the population in the simulation. Given by S0 + E0 +I0 + R0 + V0.
Scollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is SUSCEPTIBLE. Has a total of popsize Person objects,
with numbers [0, popsize).
Ecollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is EXPOSED. Has a total of popsize Person objects,
with numbers [0, popsize).
Icollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is INFECTED. Has a total of popsize Person objects,
with numbers [0, popsize).
Rcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is RECOVERED. Has a total of popsize Person objects,
with numbers [0, popsize).
details: Simul_Details
An object that can be returned to give a more in-depth look into the simulation. With this object,
one can see transmission chains, state changes, the movement history of each individaul, the state
history of each person, and more.
"""
def __init__(self, S0: int, E0: int, I0: int, R0: int, pss: float, rho: float,
gamma: float, side: float, rstart:float, days: int, w0=1.0, hubConstant=6**0.5, alpha=2.0):
#error checking
self.intCheck([S0, E0, I0, R0, days])
self.floatCheck([pss, rho, gamma, side, rstart, w0, alpha, hubConstant])
self.negValCheck([S0, E0, I0, R0, pss, rho, gamma, side, rstart, days, w0, hubConstant, alpha])
self.probValCheck([pss, rho, gamma, w0])
super(HubSEIR, self).__init__(popsize=S0+I0+R0, pss=pss, rstart=rstart, alpha=alpha, side=side, S0=S0, I0=I0,
days=days, w0=w0,hubConstant=hubConstant)
# adjust the popsize
self.popsize += E0
# locations in the plane
self.locx, self.locy = np.random.random(self.popsize)*self.side, np.random.random(self.popsize)*self.side
# probability of going from I to R
self.gamma = gamma
# initialize the probability of leaving E
self.rho = rho
# make the initial R class variable
self.R0 = R0
# create the R collect datastructure
self.Rcollect = []
# create the E collect datastructure
self.Ecollect = []
self.E0 = E0
# create numpy arrays to store number of people in each compartment
self.E = np.zeros(days+1)
self.E[0] = E0
self.R = np.zeros(days+1)
# put the initial removed values into the array
self.R[0] = R0
# create a Simul_Details object
self.details = Simul_Details(days=days, popsize=self.popsize, static=True)
for i in range(self.popsize):
# event is whether person is a super spreader
event = randEvent(self.pss)
# susceptible version
p1 = Person(self.locx[i], self.locy[i], event)
# exposed version
p2 = Person(self.locx[i], self.locy[i], event)
# infectious version
p3 = Person(self.locx[i], self.locy[i], event)
# removed version
p4 = Person(self.locx[i], self.locy[i], event)
# depending on the number, say that the person is in S, I, R. Add that state to the Simul_Details object
if i < S0:
p1.isIncluded = True
self.details.addStateChange(i, "S", 0)
elif i < S0 + I0:
p3.isIncluded = True
self.details.addStateChange(i, "I", 0)
elif i < S0 + E0 + I0:
p2.isIncluded=True
self.details.addStateChange(i, "E", 0)
else:
p4.isIncluded = True
self.details.addStateChange(i, "R", 0)
# add the locations to the Simul_Details object
self.details.addLocation(0, (self.locx[i], self.locy[i]))
# append the Person objects to the collections
self.Scollect.append(p1)
self.Ecollect.append(p2)
self.Icollect.append(p3)
self.Rcollect.append(p4)
# run state changes from S to E
def _StoE(self, day: int):
"""
Deals with the transfers from S compartment to E compartment.
Parameters
----------
day: int
feed in the current day the state transfer is taking place on.
Return
------
set:
returns the set contianing the indices of those that whose self.Ecollect[index].isIncluded must be set to True
"""
# set that keeps track of the indices of people that changed states
transfers = set()
for count, inf in enumerate(self.Icollect):
if not inf.isIncluded:
continue
for count2, sus in enumerate(self.Scollect):
#print("Susceptible Person ", count2)
if not sus.isIncluded:
continue
# generate the probability of infection
prob = self._infect(inf, sus)
# generate a random event based on the P(infection)
event = randEvent(prob)
# if an infection doesn't occur
if not event:
continue
# remove the person from the susceptible state
self.Scollect[count2].isIncluded = False
self.details.addTransmission(day, count, count2)
# put the person in the transfer set to be made an exposed person
transfers.add(count2)
return transfers
# run state changes from E to I
def _EtoI(self):
"""
Deals with transferring those from E compartment to I compartment.
Return
------
set:
the indices of people who will be transferred from E compartment to I compartment
"""
# set that keeps track of the indices of people that changed states
transfers = set()
for count, per in enumerate(self.Ecollect):
if not per.isIncluded:
continue
event = randEvent(self.rho)
if not event:
continue
self.Ecollect[count].isIncluded = False
transfers.add(count)
return transfers
def _ItoR(self):
# set that keeps track of the indices of people that changed states
"""
Deals with transferring those from E compartment to I compartment.
Return
------
set:
the indices of people who will be transferred from I compartment to R compartment
"""
transfers = set()
for count, inf in enumerate(self.Icollect):
if not inf.isIncluded:
continue
event = randEvent(self.gamma)
if not event:
continue
self.Icollect[count].isIncluded = False
transfers.add(count)
return transfers
# run the simulation using
def run(self, getDetails=True):
for i in range(1, self.days + 1):
#print("Day: ", i)
# run the transfers from different compartments
transferSE = self._StoE(i)
transferEI = self._EtoI()
transferIR = self._ItoR()
# go after and change the indices in the collection data structure thing
for index in transferSE:
self.Ecollect[index].isIncluded = True
self.details.addStateChange(index, "E", i)
for index in transferEI:
self.Icollect[index].isIncluded = True
self.details.addStateChange(index, "I", i)
for index in transferIR:
self.Rcollect[index].isIncluded = True
self.details.addStateChange(index, "R", i)
# change the number of people in each state on the day i by adjusting the previous day's count
self.S[i] = self.S[i - 1] - len(transferSE)
self.E[i] = self.E[i-1] +len(transferSE) - len(transferEI)
self.I[i] = self.I[i - 1] + len(transferEI) - len(transferIR)
self.R[i] = self.R[i-1] + len(transferIR)
if getDetails:
return self.details
def plot(self):
"""
Plots all variables on subplots
Return
-------
pyplot.Figure:
return a fig object that will contian the graphs
"""
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("Number of Susceptible People")
ax1.set_title("Hub SEIR Simulation")
ax3.plot(t, self.I, label="Active Cases", color='b')
ax3.set_ylabel("Active Cases")
ax2.plot(t, self.E, label="Exposed", color='c')
ax2.set_ylabel("# of Exposed")
ax4.plot(t, self.R, label="Recovered", color='m')
ax4.set_xlabel("Days")
ax4.set_ylabel('Number of Recovered')
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show()
return fig
# convert the arrays to dataframe
def toDataFrame(self):
"""
Converts the arrays to a pandas DataFrame.
Return
------
pd.DataFrame:
a dataframe containing the people in S, E, I, and R compartments per day.
"""
# create the linspaced numpy array
t = np.linspace(0, self.days, self.days + 1)
# create a 2D array with the days and susceptible and infected arrays
# do it over axis one so that it creates columns days, susceptible, infected
arr = np.stack([t, self.S, self.E, self.I, self.R], axis=1)
df = pd.DataFrame(arr, columns=["Days", "Susceptible", "Exposed", "Infected", "Recovered"])
return df
| [
"numpy.stack",
"pandas.DataFrame",
"Eir.DTMC.spatialModel.simul_details.Simul_Details",
"matplotlib.pyplot.show",
"Eir.utility.Person",
"numpy.zeros",
"numpy.random.random",
"numpy.linspace",
"Eir.utility.randEvent",
"matplotlib.pyplot.subplots"
] | [((5114, 5132), 'numpy.zeros', 'np.zeros', (['(days + 1)'], {}), '(days + 1)\n', (5122, 5132), True, 'import numpy as np\n'), ((5173, 5191), 'numpy.zeros', 'np.zeros', (['(days + 1)'], {}), '(days + 1)\n', (5181, 5191), True, 'import numpy as np\n'), ((5336, 5395), 'Eir.DTMC.spatialModel.simul_details.Simul_Details', 'Simul_Details', ([], {'days': 'days', 'popsize': 'self.popsize', 'static': '(True)'}), '(days=days, popsize=self.popsize, static=True)\n', (5349, 5395), False, 'from Eir.DTMC.spatialModel.simul_details import Simul_Details\n'), ((11392, 11432), 'numpy.linspace', 'np.linspace', (['(0)', 'self.days', '(self.days + 1)'], {}), '(0, self.days, self.days + 1)\n', (11403, 11432), True, 'import numpy as np\n'), ((11470, 11505), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'sharex': '"""all"""'}), "(nrows=4, sharex='all')\n", (11482, 11505), True, 'import matplotlib.pyplot as plt\n'), ((12103, 12113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12111, 12113), True, 'import matplotlib.pyplot as plt\n'), ((12488, 12528), 'numpy.linspace', 'np.linspace', (['(0)', 'self.days', '(self.days + 1)'], {}), '(0, self.days, self.days + 1)\n', (12499, 12528), True, 'import numpy as np\n'), ((12709, 12762), 'numpy.stack', 'np.stack', (['[t, self.S, self.E, self.I, self.R]'], {'axis': '(1)'}), '([t, self.S, self.E, self.I, self.R], axis=1)\n', (12717, 12762), True, 'import numpy as np\n'), ((12777, 12867), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'columns': "['Days', 'Susceptible', 'Exposed', 'Infected', 'Recovered']"}), "(arr, columns=['Days', 'Susceptible', 'Exposed', 'Infected',\n 'Recovered'])\n", (12789, 12867), True, 'import pandas as pd\n'), ((5515, 5534), 'Eir.utility.randEvent', 'randEvent', (['self.pss'], {}), '(self.pss)\n', (5524, 5534), False, 'from Eir.utility import Person, dist, randEvent\n'), ((5588, 5629), 'Eir.utility.Person', 'Person', (['self.locx[i]', 'self.locy[i]', 'event'], {}), '(self.locx[i], self.locy[i], event)\n', (5594, 5629), False, 'from Eir.utility import Person, dist, randEvent\n'), ((5679, 5720), 'Eir.utility.Person', 'Person', (['self.locx[i]', 'self.locy[i]', 'event'], {}), '(self.locx[i], self.locy[i], event)\n', (5685, 5720), False, 'from Eir.utility import Person, dist, randEvent\n'), ((5773, 5814), 'Eir.utility.Person', 'Person', (['self.locx[i]', 'self.locy[i]', 'event'], {}), '(self.locx[i], self.locy[i], event)\n', (5779, 5814), False, 'from Eir.utility import Person, dist, randEvent\n'), ((5864, 5905), 'Eir.utility.Person', 'Person', (['self.locx[i]', 'self.locy[i]', 'event'], {}), '(self.locx[i], self.locy[i], event)\n', (5870, 5905), False, 'from Eir.utility import Person, dist, randEvent\n'), ((8972, 8991), 'Eir.utility.randEvent', 'randEvent', (['self.rho'], {}), '(self.rho)\n', (8981, 8991), False, 'from Eir.utility import Person, dist, randEvent\n'), ((9673, 9694), 'Eir.utility.randEvent', 'randEvent', (['self.gamma'], {}), '(self.gamma)\n', (9682, 9694), False, 'from Eir.utility import Person, dist, randEvent\n'), ((4552, 4582), 'numpy.random.random', 'np.random.random', (['self.popsize'], {}), '(self.popsize)\n', (4568, 4582), True, 'import numpy as np\n'), ((4594, 4624), 'numpy.random.random', 'np.random.random', (['self.popsize'], {}), '(self.popsize)\n', (4610, 4624), True, 'import numpy as np\n'), ((7964, 7979), 'Eir.utility.randEvent', 'randEvent', (['prob'], {}), '(prob)\n', (7973, 7979), False, 'from Eir.utility import Person, dist, randEvent\n')] |
import re
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
from nilearn.plotting import cm
from scipy.stats.mstats import zscore
from scipy.stats import percentileofscore
import seaborn as sns
from src.data_cleaning import clean_confound, mad, select_mad_percentile
from src.utils import unflatten
def rank_labels(pd_ser):
'''
rank behaviour variables and ignore labels of sparsed variables.
return label and a flatten array of the current values
'''
pd_ser = pd_ser.replace(to_replace=0, value=np.nan)
pd_ser = pd_ser.sort_values(ascending=False, )
behav_labels = list(pd_ser.index)
v_ranked = pd_ser.values
v_ranked_flat = np.zeros((len(behav_labels),1))
v_ranked_flat.flat[:v_ranked.shape[0]] = v_ranked
return v_ranked_flat, behav_labels
def plot_heatmap(ax, mat, x_labels, y_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plot one single genaric heatmap
Only when axis is provided
ax: the axis of figure
mat: 2-d matrix
x_labels, y_labels: lists of labels
cb_max: maxium value of the color bar
'''
graph = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=cmap)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels, rotation='vertical')
ax.set_yticklabels(y_labels)
return graph
def single_heatmap(mat, x_labels, y_labels, cb_label):
'''
heat map with color bar
'''
cb_max = np.max(np.abs(mat))
fig = plt.figure()
ax = fig.add_subplot(111)
hm = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=plt.cm.RdBu_r)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=1)
cb = fig.colorbar(hm, cax=cax)
cb.set_label(cb_label)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels)
ax.set_yticklabels(y_labels)
return fig
def plot_SCCA_FC_MWQ(FC_ws, behav_ws, region_labels, behav_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plotting tool for functional connectivity vs MRIQ
'''
plt.close('all')
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(111)
brain = plot_heatmap(ax, FC_ws, region_labels, region_labels, cb_max, cmap)
# add a line to a diagnal
ax.plot([-0.5, len(region_labels)-0.5], [-0.5, len(region_labels)-0.5], ls='--', c='.3')
divider = make_axes_locatable(ax)
ax2 = divider.append_axes("right", size="1%", pad=8)
behav = plot_heatmap(ax2, behav_ws, [' '], behav_labels, cb_max, cmap)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="50%", pad=0.25)
fig.colorbar(brain, cax=cax)
return fig
def map_labels(data, lables):
df = pd.DataFrame(data, index=lables)
return df
def show_results(u, v, u_labels, v_labels, rank_v=True, sparse=True):
'''
for plotting the scca decompostion heatmapt
u must be from a functional connectivity data set
v must be from a data set that can be expressed in a single vector
'''
df_v = map_labels(v, v_labels)
n_component = v.shape[1]
# find maxmum for the color bar
u_max = np.max(np.abs(u))
v_max = np.max(np.abs(v))
cb_max = np.max((u_max, v_max))
figs = []
for i in range(n_component):
# reconstruct the correlation matrix
ui = unflatten(u[:, i])
if rank_v:
vi, cur_v_labels = rank_labels(df_v.iloc[:, i])
else:
vi = v[:, i - 1 :i] # the input of the plot function must be an array
cur_v_labels = v_labels
if sparse:
idx = np.isnan(vi).reshape((vi.shape[0]))
vi = vi[~idx]
vi = vi.reshape((vi.shape[0], 1))
cur_v_labels = np.array(cur_v_labels)[~idx]
cur_fig = plot_SCCA_FC_MWQ(ui, vi, u_labels, cur_v_labels, cb_max=cb_max, cmap=plt.cm.RdBu_r)
# save for later
figs.append(cur_fig)
return figs
from matplotlib.backends.backend_pdf import PdfPages
def write_pdf(fname, figures):
'''
write a list of figures to a single pdf
'''
doc = PdfPages(fname)
for fig in figures:
fig.savefig(doc, format='pdf', dpi=150, bbox_inches='tight')
doc.close()
def write_png(fname, figures):
'''
write a list of figures to separate png files
'''
for i, fig in enumerate(figures):
fig.savefig(fname.format(i + 1), dpi=150, bbox_inches='tight')
def set_text_size(size):
'''
set all the text in the figures
the font is always sans-serif. You only need this
'''
font = {'family' : 'sans-serif',
'sans-serif' : 'Arial',
'size' : size}
matplotlib.rc('font', **font)
class plot_mad_results(object):
'''
Dimension reduction on the functional connectivity using
median absolute distribution.
'''
def __init__(self, fc_flatten):
self.X = fc_flatten
self.X_mad = mad(fc_flatten)
self.X_mean = fc_flatten.mean(axis=0)
self.pr = [50, 75, 90, 95]
def plot_mad_distroubtion(self, fn):
x_lim = self.X.shape[1]
y_lim = self.X_mad.max()
sort_idx = np.argsort(-self.X_mad)
X_sort = self.X_mad[sort_idx]
fig, ax = plt.subplots(figsize=(6,4))
ax.fill_between(range(0,x_lim), 0, X_sort)
x_ticks = [x_lim]
for i in self.pr:
ax.fill_between(range(0,x_lim), 0, X_sort,
where= X_sort > np.percentile(self.X_mad, i))
t = np.percentile(range(0, x_lim), 100 - i)
x_ticks.append(int(t))
x_ticks.append(1)
x_ticks = sorted(x_ticks)
plt.xlim((1, x_lim))
plt.ylim((0, y_lim))
plt.xticks(x_ticks, rotation=90)
ax.annotate('95%', xy=(np.percentile(range(0,x_lim), 2), 0.23))
ax.annotate('90%', xy=(np.percentile(range(0,x_lim), 7), 0.18))
ax.annotate('75%', xy=(np.percentile(range(0,x_lim), 15), 0.16))
ax.annotate('50%', xy=(np.percentile(range(0,x_lim), 35), 0.14))
plt.xlabel('Functional connectivity edges')
plt.ylabel('Median absolute deviation')
plt.savefig('reports/figures/{}.png'.format(fn), dpi=300, bbox_inches='tight', transparent=True)
plt.show()
plt.close()
def plot_mad_reduction(self, label_file, fn):
'''
requre file generated from PyROICluster
'references/scorr05_2level_names_100.csv'
'''
X_filtered = {'Full matix' : unflatten(self.X_mean)}
for i in self.pr[:-1]:
x_f, _ = select_mad_percentile(self.X_mean, self.X_mad, i)
X_filtered['{}% matix'.format(i)] = unflatten(x_f)
fig, axarr= plt.subplots(2, 2, figsize=(10, 10))
for i, (title, mat) in enumerate(X_filtered.items()):
loc_x, loc_y = np.unravel_index(i, (2,2))
ax_cur = axarr[loc_x, loc_y]
ax_cur.set_title(title)
mat, ticks, ticklabels = sort_by_yeo7(mat, label_file)
sns.heatmap(mat, center=0,
square=True, annot=False,
cmap='cold_hot', ax=ax_cur)
ax_cur.hlines(ticks[1:], *ax_cur.get_xlim(), color='w', lw=0.5)
ax_cur.vlines(ticks[1:], *ax_cur.get_ylim(), color='w', lw=0.5)
ax_cur.set_xticks(ticks)
ax_cur.set_xticklabels(ticklabels)
ax_cur.set_yticks(ticks)
ax_cur.set_yticklabels(ticklabels)
plt.savefig('reports/figures/{}.png'.format(fn), dpi=300, transparent=True)
plt.show()
plt.close()
def sort_by_yeo7(mat, label_file):
'''
Sort craddock atlas with yeo 7 networks
require file generated from PyROICluster
'references/scorr05_2level_names_100.csv'
mat:
the n by n matrix to be sorted
label_file:
path to label file in .csv generated from PyROICluster
with Yeo-Krienen 7 networks labels
The number of rows should match n
'''
def get_primary(x):
list_name = re.findall("[a-zA-Z]+", x)
if len(list_name) == 1:
prim = list_name[0]
elif list_name[0] == 'None':
prim = list_name[1]
else:
prim = list_name[0]
return prim
def get_ticks(label_names_yeo7):
ticks = [1]
ticklabels = ['Default']
for i in range(label_names_yeo7.shape[0]):
cur = label_names_yeo7.iloc[i, 3]
if ticklabels[-1] != cur:
ticks.append(i + 1)
ticklabels.append(cur)
return ticks
label_names = pd.read_csv(label_file)
label_names['Yeo7'] = label_names['Yeo-Krienen 7 networks'].apply(get_primary)
label_names = label_names.sort_values('Yeo7')
label_names_yeo7 = label_names.iloc[:, [0, 1, -1]].reset_index()
tmp = pd.DataFrame(mat,
index=range(1, label_names.shape[0] + 1),
columns=range(1, label_names.shape[0] + 1))
idx = label_names_yeo7.index.tolist()
reorder = tmp.reindex(index=idx, columns=idx)
ticks = get_ticks(label_names_yeo7)
ticklabels = ['DMN', 'DAN', 'FPN', 'LIM', 'None', 'S-M', 'VAN', 'VIS']
return reorder.values, ticks, ticklabels
| [
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.rc",
"numpy.abs",
"seaborn.heatmap",
"pandas.read_csv",
"numpy.isnan",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"src.utils.unflatten",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.max",
"re.findall",
"ma... | [((1579, 1591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1589, 1591), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1731), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1727, 1731), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2191, 2207), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2200, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 4)'}), '(figsize=(15, 4))\n', (2229, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2496, 2519), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2515, 2519), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2666, 2690), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax2'], {}), '(ax2)\n', (2685, 2690), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2842, 2874), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'lables'}), '(data, index=lables)\n', (2854, 2874), True, 'import pandas as pd\n'), ((3324, 3346), 'numpy.max', 'np.max', (['(u_max, v_max)'], {}), '((u_max, v_max))\n', (3330, 3346), True, 'import numpy as np\n'), ((4214, 4229), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['fname'], {}), '(fname)\n', (4222, 4229), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((4782, 4811), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (4795, 4811), False, 'import matplotlib\n'), ((8733, 8756), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {}), '(label_file)\n', (8744, 8756), True, 'import pandas as pd\n'), ((1263, 1286), 'numpy.arange', 'np.arange', (['mat.shape[1]'], {}), '(mat.shape[1])\n', (1272, 1286), True, 'import numpy as np\n'), ((1306, 1329), 'numpy.arange', 'np.arange', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (1315, 1329), True, 'import numpy as np\n'), ((1555, 1566), 'numpy.abs', 'np.abs', (['mat'], {}), '(mat)\n', (1561, 1566), True, 'import numpy as np\n'), ((1870, 1893), 'numpy.arange', 'np.arange', (['mat.shape[1]'], {}), '(mat.shape[1])\n', (1879, 1893), True, 'import numpy as np\n'), ((1913, 1936), 'numpy.arange', 'np.arange', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (1922, 1936), True, 'import numpy as np\n'), ((3270, 3279), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (3276, 3279), True, 'import numpy as np\n'), ((3300, 3309), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (3306, 3309), True, 'import numpy as np\n'), ((3450, 3468), 'src.utils.unflatten', 'unflatten', (['u[:, i]'], {}), '(u[:, i])\n', (3459, 3468), False, 'from src.utils import unflatten\n'), ((5041, 5056), 'src.data_cleaning.mad', 'mad', (['fc_flatten'], {}), '(fc_flatten)\n', (5044, 5056), False, 'from src.data_cleaning import clean_confound, mad, select_mad_percentile\n'), ((5267, 5290), 'numpy.argsort', 'np.argsort', (['(-self.X_mad)'], {}), '(-self.X_mad)\n', (5277, 5290), True, 'import numpy as np\n'), ((5349, 5377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (5361, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5794), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1, x_lim)'], {}), '((1, x_lim))\n', (5782, 5794), True, 'import matplotlib.pyplot as plt\n'), ((5803, 5823), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, y_lim)'], {}), '((0, y_lim))\n', (5811, 5823), True, 'import matplotlib.pyplot as plt\n'), ((5832, 5864), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_ticks'], {'rotation': '(90)'}), '(x_ticks, rotation=90)\n', (5842, 5864), True, 'import matplotlib.pyplot as plt\n'), ((6165, 6208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Functional connectivity edges"""'], {}), "('Functional connectivity edges')\n", (6175, 6208), True, 'import matplotlib.pyplot as plt\n'), ((6217, 6256), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Median absolute deviation"""'], {}), "('Median absolute deviation')\n", (6227, 6256), True, 'import matplotlib.pyplot as plt\n'), ((6370, 6380), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6378, 6380), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6400), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6398, 6400), True, 'import matplotlib.pyplot as plt\n'), ((6825, 6861), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)'}), '(2, 2, figsize=(10, 10))\n', (6837, 6861), True, 'import matplotlib.pyplot as plt\n'), ((7681, 7691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7689, 7691), True, 'import matplotlib.pyplot as plt\n'), ((7700, 7711), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7709, 7711), True, 'import matplotlib.pyplot as plt\n'), ((8161, 8187), 're.findall', 're.findall', (['"""[a-zA-Z]+"""', 'x'], {}), "('[a-zA-Z]+', x)\n", (8171, 8187), False, 'import re\n'), ((6615, 6637), 'src.utils.unflatten', 'unflatten', (['self.X_mean'], {}), '(self.X_mean)\n', (6624, 6637), False, 'from src.utils import unflatten\n'), ((6691, 6740), 'src.data_cleaning.select_mad_percentile', 'select_mad_percentile', (['self.X_mean', 'self.X_mad', 'i'], {}), '(self.X_mean, self.X_mad, i)\n', (6712, 6740), False, 'from src.data_cleaning import clean_confound, mad, select_mad_percentile\n'), ((6789, 6803), 'src.utils.unflatten', 'unflatten', (['x_f'], {}), '(x_f)\n', (6798, 6803), False, 'from src.utils import unflatten\n'), ((6952, 6979), 'numpy.unravel_index', 'np.unravel_index', (['i', '(2, 2)'], {}), '(i, (2, 2))\n', (6968, 6979), True, 'import numpy as np\n'), ((7136, 7221), 'seaborn.heatmap', 'sns.heatmap', (['mat'], {'center': '(0)', 'square': '(True)', 'annot': '(False)', 'cmap': '"""cold_hot"""', 'ax': 'ax_cur'}), "(mat, center=0, square=True, annot=False, cmap='cold_hot', ax=ax_cur\n )\n", (7147, 7221), True, 'import seaborn as sns\n'), ((3856, 3878), 'numpy.array', 'np.array', (['cur_v_labels'], {}), '(cur_v_labels)\n', (3864, 3878), True, 'import numpy as np\n'), ((3721, 3733), 'numpy.isnan', 'np.isnan', (['vi'], {}), '(vi)\n', (3729, 3733), True, 'import numpy as np\n'), ((5582, 5610), 'numpy.percentile', 'np.percentile', (['self.X_mad', 'i'], {}), '(self.X_mad, i)\n', (5595, 5610), True, 'import numpy as np\n')] |
import os
from IPython.display import clear_output
import time
import datetime
import numpy as np
import pickle
# Printer:
def print_average_score(total_scores, ratio=10):
# Calculate and print the average score per a number of episodes (tick)
scores_per_tick_episodes = np.split(np.array(total_scores), ratio) # episodes / tick
episodes = len(total_scores)
tick = episodes // ratio
print('\n********Average score per %d episodes********\n' % tick)
count = tick
for r in scores_per_tick_episodes:
print(count, ": ", str(sum(r / 1000)))
count += tick
def print_training_progress(i, ep_score, scores_history, window=100, trailing=True, eps=None, ep_start_time=None):
time_string = ''
if ep_start_time is not None:
time_string = '; runtime: %s' % str(datetime.datetime.now() - ep_start_time).split('.')[0]
print('Episode: %d ;' % (i + 1), 'score: %.2f' % ep_score, time_string) # score: %d - crashes when float NaN
eps_string = ''
if eps is not None:
eps_string = 'epsilon %.3f' % eps # %.4f
# compute the running avg of the last 'window' / 'avg_num' episodes:
avg_score = np.mean(scores_history[-window:]) if (i + 1) >= window else None
if avg_score is not None:
if trailing: # every episode
print('trailing %d episodes ;' % window,
'average score %.3f ;' % avg_score,
eps_string)
elif (i + 1) % window == 0: # every 'window' / 'avg_num' episodes
print('episodes: %d - %d ;' % (i + 2 - window, i + 1),
'average score %.3f ;' % avg_score,
eps_string)
return avg_score
def print_policy(Q, policy):
print('\n', 'Policy', '\n')
for s in policy:
a = policy[s]
print('s', s, 'a', a, ' - ', '%.3f' % Q[s, a])
def keras_print_model_info(keras_model):
# model's params number
print('Model info - total params: %d ; layers params: %s' % (
keras_model.count_params(), [layer.count_params() for layer in keras_model.layers]
))
# # model's weights and biases
# print('model weights', '\n', model.weights, '\n')
# print("model layers' weights and biases:", '\n', [layer.get_weights() for layer in model.layers], '\n')
# for layer in model.layers:
# weights, biases = layer.get_weights()
# print("Layer's weights", '\n', weights, '\n')
# print("Layer's biases", '\n', biases, '\n')
# Calculator:
EPS_DEC_LINEAR = 0
EPS_DEC_EXPONENTIAL = 1
EPS_DEC_EXPONENTIAL_TIME_RELATED = 2
# EPS_DEC_QUADRATIC = 4
def decrement_eps(eps_current, eps_min, eps_dec, eps_dec_type, eps_max=None, t=None):
if eps_dec_type == EPS_DEC_EXPONENTIAL:
eps_temp = eps_current * eps_dec # eps_dec = 0.996
elif eps_dec_type == EPS_DEC_EXPONENTIAL_TIME_RELATED and eps_max is not None and t is not None:
return eps_min + (eps_max - eps_min) * np.exp(-eps_dec * t) # t == i
else: # eps_dec_type == Calculator.EPS_DEC_LINEAR:
eps_temp = eps_current - eps_dec
return max(eps_temp, eps_min)
def calculate_standardized_returns_of_consecutive_episodes(memory_r, memory_terminal, GAMMA):
memory_G = np.zeros_like(memory_r, dtype=np.float32) # np.float64
G = 0
for i in reversed(range(len(memory_r))):
if memory_terminal[i]:
G = 0
G = GAMMA * G + memory_r[i]
memory_G[i] = G
memory_G = standardize(memory_G)
return memory_G
# Tester:
def run_method(custom_env, episodes, choose_action):
env = custom_env.env
print('\n', 'Test Started', '\n')
start_time = datetime.datetime.now()
total_scores = np.zeros(episodes)
total_accumulated_scores = np.zeros(episodes)
accumulated_score = 0
eval = custom_env.get_evaluation_tuple()
for i in range(episodes):
done = False
ep_steps = 0
ep_score = 0
observation = env.reset()
s = custom_env.get_state(observation)
while not done:
a = choose_action(s)
observation_, reward, done, info = env.step(a)
eval = custom_env.update_evaluation_tuple(i + 1, reward, done, eval)
ep_steps += 1
ep_score += reward
accumulated_score += reward
s_ = custom_env.get_state(observation_)
observation, s = observation_, s_
total_scores[i] = ep_score
total_accumulated_scores[i] = accumulated_score
print_training_progress(i, ep_score, total_scores)
print('\n', 'Test Ended ~~~ Episodes: %d ~~~ Runtime: %s' %
(episodes, str(datetime.datetime.now() - start_time).split('.')[0]), '\n')
custom_env.analyze_evaluation_tuple(eval, episodes)
return total_scores, total_accumulated_scores
# Watcher:
def watch_method(custom_env, episodes, choose_action, is_toy_text=False):
env = custom_env.env
for i in range(episodes):
done = False
ep_steps = 0
ep_score = 0
observation = env.reset()
s = custom_env.get_state(observation)
if is_toy_text:
print('\n*****EPISODE ', i + 1, '*****\n')
time.sleep(1)
clear_output(wait=True)
env.render()
if is_toy_text:
time.sleep(0.3)
while not done:
a = choose_action(s)
observation_, reward, done, info = env.step(a)
ep_steps += 1
ep_score += reward
s_ = custom_env.get_state(observation_)
observation, s = observation_, s_
if is_toy_text:
clear_output(wait=True)
env.render()
if is_toy_text:
time.sleep(0.3)
print('Episode Score:', ep_score)
if is_toy_text:
time.sleep(3)
clear_output(wait=True)
env.close()
# SaverLoader:
def pickle_load(file_name, directory=''):
with open(directory + file_name + '.pkl', 'rb') as file: # .pickle # rb = read binary
var = pickle.load(file) # var == [X_train, y_train]
return var
def pickle_save(var, file_name, directory=''):
with open(directory + file_name + '.pkl', 'wb') as file: # .pickle # wb = write binary
pickle.dump(var, file) # var == [X_train, y_train]
# General:
def rescale(np_array, max_value=1, min_value=-1, x_max=255, x_min=0):
"""
Rescale data to a scale of: [min,max]
Default: rescaling pixel values [0,255] to a scale of: [-1,1].
"""
return ((np_array - x_min) / (x_max - x_min)) * (max_value - min_value) + min_value
def normalize(np_array, x_max=255, x_min=0):
"""
Normalize data to a scale of: [0,1]
Default: normalizing pixel values [0,255].
"""
return (np_array - x_min) / (x_max - x_min)
def standardize(np_array):
"""
Standardize data to a normal distribution of: N(0,1)
transforming data to have a gaussian distribution of: mean 0 (μ=0), STD 1 (σ=1)
"""
mean = np.mean(np_array)
std = np.std(np_array)
if std == 0:
std = 1
return (np_array - mean) / std
def query_env(env):
print(
'Environment Id -', env.spec.id, '\n', # id (str): The official environment ID
# nondeterministic (bool): Whether this environment is non-deterministic even after seeding:
'Non-Deterministic -', env.spec.nondeterministic, '\n',
'Observation Space -', env.observation_space, '\n',
'Action Space -', env.action_space, '\n',
'Max Episode Seconds -', env.spec.max_episode_seconds, '\n',
# max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
'Max Episode Steps -', env.spec.max_episode_steps, '\n',
'Reward Range -', env.reward_range, '\n',
# reward_threshold (Optional[int]): The reward threshold before the task is considered solved
'Reward Threshold -', env.spec.reward_threshold, '\n',
'TimeStep Limit -', env.spec.timestep_limit, '\n',
'Trials -', env.spec.trials, '\n',
'Local Only -', getattr(env.spec, '_local_only', 'not defined'), '\n',
'kwargs -', getattr(env.spec, '_kwargs', '') # kwargs (dict): The kwargs to pass to the environment class
)
def make_sure_dir_exists(path_dir):
if not os.path.exists(path_dir):
os.makedirs(path_dir)
| [
"pickle.dump",
"numpy.zeros_like",
"os.makedirs",
"numpy.std",
"numpy.zeros",
"os.path.exists",
"time.sleep",
"numpy.mean",
"numpy.array",
"pickle.load",
"numpy.exp",
"IPython.display.clear_output",
"datetime.datetime.now"
] | [((3212, 3253), 'numpy.zeros_like', 'np.zeros_like', (['memory_r'], {'dtype': 'np.float32'}), '(memory_r, dtype=np.float32)\n', (3225, 3253), True, 'import numpy as np\n'), ((3635, 3658), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3656, 3658), False, 'import datetime\n'), ((3678, 3696), 'numpy.zeros', 'np.zeros', (['episodes'], {}), '(episodes)\n', (3686, 3696), True, 'import numpy as np\n'), ((3728, 3746), 'numpy.zeros', 'np.zeros', (['episodes'], {}), '(episodes)\n', (3736, 3746), True, 'import numpy as np\n'), ((6984, 7001), 'numpy.mean', 'np.mean', (['np_array'], {}), '(np_array)\n', (6991, 7001), True, 'import numpy as np\n'), ((7012, 7028), 'numpy.std', 'np.std', (['np_array'], {}), '(np_array)\n', (7018, 7028), True, 'import numpy as np\n'), ((292, 314), 'numpy.array', 'np.array', (['total_scores'], {}), '(total_scores)\n', (300, 314), True, 'import numpy as np\n'), ((1170, 1203), 'numpy.mean', 'np.mean', (['scores_history[-window:]'], {}), '(scores_history[-window:])\n', (1177, 1203), True, 'import numpy as np\n'), ((6028, 6045), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (6039, 6045), False, 'import pickle\n'), ((6240, 6262), 'pickle.dump', 'pickle.dump', (['var', 'file'], {}), '(var, file)\n', (6251, 6262), False, 'import pickle\n'), ((8301, 8325), 'os.path.exists', 'os.path.exists', (['path_dir'], {}), '(path_dir)\n', (8315, 8325), False, 'import os\n'), ((8335, 8356), 'os.makedirs', 'os.makedirs', (['path_dir'], {}), '(path_dir)\n', (8346, 8356), False, 'import os\n'), ((5167, 5180), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5177, 5180), False, 'import time\n'), ((5193, 5216), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5205, 5216), False, 'from IPython.display import clear_output\n'), ((5274, 5289), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (5284, 5289), False, 'import time\n'), ((5795, 5808), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5805, 5808), False, 'import time\n'), ((5821, 5844), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5833, 5844), False, 'from IPython.display import clear_output\n'), ((5607, 5630), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5619, 5630), False, 'from IPython.display import clear_output\n'), ((5700, 5715), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (5710, 5715), False, 'import time\n'), ((2938, 2958), 'numpy.exp', 'np.exp', (['(-eps_dec * t)'], {}), '(-eps_dec * t)\n', (2944, 2958), True, 'import numpy as np\n'), ((816, 839), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (837, 839), False, 'import datetime\n'), ((4622, 4645), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4643, 4645), False, 'import datetime\n')] |
#!/usr/bin/python
# -*- coding:utf8 -*-
import numpy as np
import math
import Control_Exp1001 as CE
import os
import json
from Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese import Thickener
from Control_Exp1001.common.replay.replay_buffer import ReplayBuffer
from Control_Exp1001.common.action_noise.no_exploration import No_Exploration
from Control_Exp1001.demo.thickener_noise_chinese.controllers.value_iterate import VI
from Control_Exp1001.demo.thickener_noise_chinese.controllers.hdp import HDP
import torch
import random
from Control_Exp1001.common.penaltys.quadratic import Quadratic
import matplotlib.pyplot as plt
from Control_Exp1001.demo.thickener_noise_chinese.common.one_round_exp import OneRoundExp
from Control_Exp1001.demo.thickener_noise_chinese.common.one_round_evaluation import OneRoundEvaluation
penalty_para = {
#"weight_matrix": [0, 0.002],
"weight_matrix": [0, 0.004],
"S": [0.0001, 0.0008],
#"S": [0.0003, 0.0024],
#"S": [0.0000, 0.000],
}
thickner_para = {
"dt":1,
"noise_in": False,
"noise_p": 0.002,
"noise_type": 2,
'time_length': 20,# 浓密机每次仿真20秒
}
from Control_Exp1001.demo.thickener_noise_chinese.common import exp_name
exp_name.set_exp_name('VI_Replay')
EXP_NAME = exp_name.get_exp_name()
img_path = os.path.join('../images',EXP_NAME)
if not os.path.exists(img_path):
os.mkdir(img_path)
def new_vi(capacity=2, batch_size=2):
capacity = capacity
predict_round=3000
u_optim='adam'
gamma=0.6
replay_vi = ReplayBuffer(capacity=capacity)
env_VI = Thickener(
noise_p=0.03,
noise_in=True,
)
exploration = No_Exploration()
print('make new vi controller')
vi = VI(
replay_buffer = replay_vi,
u_bounds = env_VI.u_bounds,
#exploration = None,
exploration = exploration,
env=env_VI,
predict_training_rounds=predict_round,
gamma=gamma,
batch_size = batch_size,
predict_batch_size=32,
model_nn_error_limit = 0.0008,
critic_nn_error_limit = 0.001,
actor_nn_error_limit = 0.001,
actor_nn_lr = 0.005,
critic_nn_lr = 0.01,
model_nn_lr = 0.01,
indice_y = None,
indice_y_star = None,
indice_c=None,
hidden_model = 10,
hidden_critic = 14,
hidden_actor = 14,
predict_epoch= 30,
Nc=1000,
u_optim=u_optim,
img_path=EXP_NAME
)
env_VI.reset()
vi.train_identification_model()
#vi.test_predict_model(test_rounds=100)
return vi
def run_vi(rounds=1000,seed=random.randint(0,1000000),name='VI',capacity=2,batch_size=2,
predict_round=3000,u_optim='adam',):
print('seed :',seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
vi = new_vi(capacity=capacity, batch_size=batch_size)
penalty = Quadratic(**penalty_para)
thickner_para['random_seed']=seed
env_vi = Thickener(
penalty_calculator=penalty,
**thickner_para,
)
res1 = OneRoundExp(controller=vi, env=env_vi,max_step=rounds, exp_name=name).run()
print(name,':',vi.u_iter_times*1.0/rounds)
return res1
if __name__ == '__main__':
round = 1600
predict_round=800
res_list = []
rand_seed = np.random.randint(0,10000000)
rand_seed = 9726164
res_list.append(
run_vi(rounds=round,seed=rand_seed, name='HCNVI-无经验回放', predict_round=predict_round, capacity=1, batch_size=1))
res_list.append(
run_vi(rounds=round,seed=rand_seed, name='HCNVI-经验回放数量为2', predict_round=predict_round, capacity=2, batch_size=2))
eval_res = OneRoundEvaluation(res_list=res_list)
eval_res.plot_all()
| [
"os.mkdir",
"Control_Exp1001.demo.thickener_noise_chinese.controllers.value_iterate.VI",
"numpy.random.seed",
"random.randint",
"Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese.Thickener",
"torch.manual_seed",
"Control_Exp1001.demo.thickener_noise_chinese.common.one_round_exp.OneRoundExp"... | [((1211, 1245), 'Control_Exp1001.demo.thickener_noise_chinese.common.exp_name.set_exp_name', 'exp_name.set_exp_name', (['"""VI_Replay"""'], {}), "('VI_Replay')\n", (1232, 1245), False, 'from Control_Exp1001.demo.thickener_noise_chinese.common import exp_name\n'), ((1257, 1280), 'Control_Exp1001.demo.thickener_noise_chinese.common.exp_name.get_exp_name', 'exp_name.get_exp_name', ([], {}), '()\n', (1278, 1280), False, 'from Control_Exp1001.demo.thickener_noise_chinese.common import exp_name\n'), ((1292, 1327), 'os.path.join', 'os.path.join', (['"""../images"""', 'EXP_NAME'], {}), "('../images', EXP_NAME)\n", (1304, 1327), False, 'import os\n'), ((1334, 1358), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1348, 1358), False, 'import os\n'), ((1364, 1382), 'os.mkdir', 'os.mkdir', (['img_path'], {}), '(img_path)\n', (1372, 1382), False, 'import os\n'), ((1519, 1550), 'Control_Exp1001.common.replay.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'capacity': 'capacity'}), '(capacity=capacity)\n', (1531, 1550), False, 'from Control_Exp1001.common.replay.replay_buffer import ReplayBuffer\n'), ((1564, 1602), 'Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese.Thickener', 'Thickener', ([], {'noise_p': '(0.03)', 'noise_in': '(True)'}), '(noise_p=0.03, noise_in=True)\n', (1573, 1602), False, 'from Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese import Thickener\n'), ((1644, 1660), 'Control_Exp1001.common.action_noise.no_exploration.No_Exploration', 'No_Exploration', ([], {}), '()\n', (1658, 1660), False, 'from Control_Exp1001.common.action_noise.no_exploration import No_Exploration\n'), ((1707, 2232), 'Control_Exp1001.demo.thickener_noise_chinese.controllers.value_iterate.VI', 'VI', ([], {'replay_buffer': 'replay_vi', 'u_bounds': 'env_VI.u_bounds', 'exploration': 'exploration', 'env': 'env_VI', 'predict_training_rounds': 'predict_round', 'gamma': 'gamma', 'batch_size': 'batch_size', 'predict_batch_size': '(32)', 'model_nn_error_limit': '(0.0008)', 'critic_nn_error_limit': '(0.001)', 'actor_nn_error_limit': '(0.001)', 'actor_nn_lr': '(0.005)', 'critic_nn_lr': '(0.01)', 'model_nn_lr': '(0.01)', 'indice_y': 'None', 'indice_y_star': 'None', 'indice_c': 'None', 'hidden_model': '(10)', 'hidden_critic': '(14)', 'hidden_actor': '(14)', 'predict_epoch': '(30)', 'Nc': '(1000)', 'u_optim': 'u_optim', 'img_path': 'EXP_NAME'}), '(replay_buffer=replay_vi, u_bounds=env_VI.u_bounds, exploration=\n exploration, env=env_VI, predict_training_rounds=predict_round, gamma=\n gamma, batch_size=batch_size, predict_batch_size=32,\n model_nn_error_limit=0.0008, critic_nn_error_limit=0.001,\n actor_nn_error_limit=0.001, actor_nn_lr=0.005, critic_nn_lr=0.01,\n model_nn_lr=0.01, indice_y=None, indice_y_star=None, indice_c=None,\n hidden_model=10, hidden_critic=14, hidden_actor=14, predict_epoch=30,\n Nc=1000, u_optim=u_optim, img_path=EXP_NAME)\n', (1709, 2232), False, 'from Control_Exp1001.demo.thickener_noise_chinese.controllers.value_iterate import VI\n'), ((2607, 2633), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (2621, 2633), False, 'import random\n'), ((2746, 2769), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2763, 2769), False, 'import torch\n'), ((2774, 2794), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2788, 2794), True, 'import numpy as np\n'), ((2799, 2816), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2810, 2816), False, 'import random\n'), ((2889, 2914), 'Control_Exp1001.common.penaltys.quadratic.Quadratic', 'Quadratic', ([], {}), '(**penalty_para)\n', (2898, 2914), False, 'from Control_Exp1001.common.penaltys.quadratic import Quadratic\n'), ((2966, 3020), 'Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese.Thickener', 'Thickener', ([], {'penalty_calculator': 'penalty'}), '(penalty_calculator=penalty, **thickner_para)\n', (2975, 3020), False, 'from Control_Exp1001.demo.thickener_noise_chinese.thickener_chinese import Thickener\n'), ((3298, 3328), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000000)'], {}), '(0, 10000000)\n', (3315, 3328), True, 'import numpy as np\n'), ((3652, 3689), 'Control_Exp1001.demo.thickener_noise_chinese.common.one_round_evaluation.OneRoundEvaluation', 'OneRoundEvaluation', ([], {'res_list': 'res_list'}), '(res_list=res_list)\n', (3670, 3689), False, 'from Control_Exp1001.demo.thickener_noise_chinese.common.one_round_evaluation import OneRoundEvaluation\n'), ((3056, 3126), 'Control_Exp1001.demo.thickener_noise_chinese.common.one_round_exp.OneRoundExp', 'OneRoundExp', ([], {'controller': 'vi', 'env': 'env_vi', 'max_step': 'rounds', 'exp_name': 'name'}), '(controller=vi, env=env_vi, max_step=rounds, exp_name=name)\n', (3067, 3126), False, 'from Control_Exp1001.demo.thickener_noise_chinese.common.one_round_exp import OneRoundExp\n')] |
import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
SawyerXYZEnv,
_assert_task_is_set,
)
from pyquaternion import Quaternion
from metaworld.envs.mujoco.utils.rotation import euler2quat
class SawyerHammerEnvV2(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.12
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.4, 0.0)
obj_high = (0.1, 0.5, 0.0)
goal_low = (0.2399, 0.7399, 0.109)
goal_high = (0.2401, 0.7401, 0.111)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
"hammer_init_pos": np.array([0.07, 0.4, 0.2]),
"hand_init_pos": np.array([0, 0.4, 0.2]),
}
self.goal = self.init_config["hammer_init_pos"]
self.hammer_init_pos = self.init_config["hammer_init_pos"]
self.hand_init_pos = self.init_config["hand_init_pos"]
self.liftThresh = liftThresh
self.max_path_length = 200
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.max_nail_dist = None
self.max_hammer_dist = None
self.maxHammerDist = 0.2
@property
def model_name(self):
return full_v2_path_for("sawyer_xyz/sawyer_hammer.xml")
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, _, screwDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
"reachDist": reachDist,
"pickRew": pickRew,
"epRew": reward,
"goalDist": screwDist,
"success": float(screwDist <= 0.05),
}
return ob, reward, False, info
def _get_pos_objects(self):
return np.hstack(
(self.get_body_com("hammer").copy(), self.get_body_com("nail_link").copy())
)
def _set_hammer_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
# Set position of box & nail (these are not randomized)
self.sim.model.body_pos[self.model.body_name2id("box")] = np.array(
[-0.24, 0.85, 0.0]
)
# Update _target_pos
self._target_pos = self._get_site_pos("goal")
# Randomize hammer position
self.hammer_init_pos = (
self._get_state_rand_vec()
if self.random_init
else self.init_config["hammer_init_pos"]
)
self._set_hammer_xyz(self.hammer_init_pos)
# Update heights (for use in reward function)
self.hammerHeight = self.get_body_com("hammer").copy()[2]
self.heightTarget = self.hammerHeight + self.liftThresh
# Update distances (for use in reward function)
nail_init_pos = self._get_site_pos("nailHead")
self.max_nail_dist = (self._target_pos - nail_init_pos)[1]
self.max_hammer_dist = np.linalg.norm(
np.array(
[self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget]
)
- nail_init_pos
+ self.heightTarget
+ np.abs(self.max_nail_dist)
)
# close gripper at initial state
# self.do_simulation([1, -1], self.frame_skip)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.pickCompleted = False
def compute_reward(self, actions, obs):
hammerPos = obs[3:6]
hammerHeadPos = self.data.get_geom_xpos("hammer_head").copy()
hammerHandlePos = self.data.get_geom_xpos("hammer_handle").copy()
objPos = self.data.site_xpos[self.model.site_name2id("nailHead")]
rightFinger, leftFinger = (
self._get_site_pos("rightEndEffector"),
self._get_site_pos("leftEndEffector"),
)
fingerCOM = (rightFinger + leftFinger) / 2
heightTarget = self.heightTarget
hammerDist = np.linalg.norm(objPos - hammerHeadPos)
screwDist = np.abs(objPos[1] - self._target_pos[1])
reachDist = np.linalg.norm(hammerHandlePos - fingerCOM)
rewards = 0
# penalty for dropping the hammer
drop_thresh = 0.03
# import pdb; pdb.set_trace()
if hammerPos[2] < objPos[2] - drop_thresh:
rewards -= 10
hammer_nail_dist_reward = 1 - np.tanh(hammerDist)
rewards += hammer_nail_dist_reward
nail_strike_reward = 1 - np.tanh(screwDist)
nail_striike_weight = 100
rewards += nail_striike_weight * nail_strike_reward
return [rewards, 0, reachDist, 0, 0, hammerDist, screwDist]
| [
"numpy.abs",
"numpy.tanh",
"numpy.linalg.norm",
"numpy.array",
"metaworld.envs.asset_path_utils.full_v2_path_for"
] | [((1485, 1533), 'metaworld.envs.asset_path_utils.full_v2_path_for', 'full_v2_path_for', (['"""sawyer_xyz/sawyer_hammer.xml"""'], {}), "('sawyer_xyz/sawyer_hammer.xml')\n", (1501, 1533), False, 'from metaworld.envs.asset_path_utils import full_v2_path_for\n'), ((2549, 2577), 'numpy.array', 'np.array', (['[-0.24, 0.85, 0.0]'], {}), '([-0.24, 0.85, 0.0])\n', (2557, 2577), True, 'import numpy as np\n'), ((4361, 4399), 'numpy.linalg.norm', 'np.linalg.norm', (['(objPos - hammerHeadPos)'], {}), '(objPos - hammerHeadPos)\n', (4375, 4399), True, 'import numpy as np\n'), ((4420, 4459), 'numpy.abs', 'np.abs', (['(objPos[1] - self._target_pos[1])'], {}), '(objPos[1] - self._target_pos[1])\n', (4426, 4459), True, 'import numpy as np\n'), ((4480, 4523), 'numpy.linalg.norm', 'np.linalg.norm', (['(hammerHandlePos - fingerCOM)'], {}), '(hammerHandlePos - fingerCOM)\n', (4494, 4523), True, 'import numpy as np\n'), ((824, 850), 'numpy.array', 'np.array', (['[0.07, 0.4, 0.2]'], {}), '([0.07, 0.4, 0.2])\n', (832, 850), True, 'import numpy as np\n'), ((881, 904), 'numpy.array', 'np.array', (['[0, 0.4, 0.2]'], {}), '([0, 0.4, 0.2])\n', (889, 904), True, 'import numpy as np\n'), ((1215, 1232), 'numpy.array', 'np.array', (['obj_low'], {}), '(obj_low)\n', (1223, 1232), True, 'import numpy as np\n'), ((1234, 1252), 'numpy.array', 'np.array', (['obj_high'], {}), '(obj_high)\n', (1242, 1252), True, 'import numpy as np\n'), ((1284, 1302), 'numpy.array', 'np.array', (['goal_low'], {}), '(goal_low)\n', (1292, 1302), True, 'import numpy as np\n'), ((1304, 1323), 'numpy.array', 'np.array', (['goal_high'], {}), '(goal_high)\n', (1312, 1323), True, 'import numpy as np\n'), ((4770, 4789), 'numpy.tanh', 'np.tanh', (['hammerDist'], {}), '(hammerDist)\n', (4777, 4789), True, 'import numpy as np\n'), ((4867, 4885), 'numpy.tanh', 'np.tanh', (['screwDist'], {}), '(screwDist)\n', (4874, 4885), True, 'import numpy as np\n'), ((3545, 3571), 'numpy.abs', 'np.abs', (['self.max_nail_dist'], {}), '(self.max_nail_dist)\n', (3551, 3571), True, 'import numpy as np\n'), ((3361, 3440), 'numpy.array', 'np.array', (['[self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget]'], {}), '([self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget])\n', (3369, 3440), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements several tensorflow graphs and capsulate them as Graph."""
from __future__ import division
import collections
import os
import time
import numpy as np
import tensorflow.compat.v1 as tf
from meta_reward_learning.semantic_parsing.nsm import data_utils
from meta_reward_learning.semantic_parsing.nsm import score_utils
from meta_reward_learning.semantic_parsing.nsm import tf_utils
from tensorflow.contrib import graph_editor as contrib_graph_editor
from tensorflow.contrib import rnn as contrib_rnn
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
RNN_CELL_DICT = dict(
rnn=contrib_rnn.RNNCell,
lstm=contrib_rnn.BasicLSTMCell,
layernorm_lstm=contrib_rnn.LayerNormBasicLSTMCell,
gru=contrib_rnn.GRUCell)
OPTIMIZER_DICT = dict(
sgd=tf.train.GradientDescentOptimizer,
adam=tf.train.AdamOptimizer,
momentum=tf.train.MomentumOptimizer,
adagrad=tf.train.AdagradOptimizer,
rmsprop=tf.train.RMSPropOptimizer)
ACTIVATION_DICT = dict(relu=tf.nn.relu, sigmoid=tf.nn.sigmoid, tanh=tf.nn.tanh)
# Graph replace fn
graph_replace = contrib_graph_editor.graph_replace
# Bind a variable length tensor with its sequence_length.
SeqTensor = collections.namedtuple('SeqTensor', ['tensor', 'sequence_length'])
def with_graph_variable_scope(func):
def func_wrapper(*args, **kwargs):
self = args[0]
with self._graph.as_default():
pid = os.getpid()
container_name = 'worker{}'.format(pid)
# print(container_name)
with self._graph.container(container_name):
with tf.variable_scope(self.vs):
return func(*args, **kwargs)
return func_wrapper
# Hack to create a control dependency for a op when it's already created
def first_before_second_noop(op_first, op_second):
with tf.control_dependencies([op_first]):
op_second = tf.group(op_second, tf.no_op())
return op_second
class Graph(object):
"""A TensorFlow graph with simpler interface to interact with it.
The neural network architecture (basically all the
tensorflow code) should live within this class. A new
architecture (for example, Seq2seq) should implement a new
subclass (Seq2seqGraph).
"""
def __init__(self, name):
self.node_dict = {'summaries': []}
self._graph = tf.Graph()
self.vs_name = name
self.meta_learn = False
self.use_gpu = False
with tf.variable_scope(name) as vs:
self.vs = vs
@property
def graph(self):
return self._graph
@with_graph_variable_scope
def launch(self,
init_model_path='',
trainable_only=True,
ckpt_from_another=False,
init_score_path=None):
"""Launch and initialize the graph."""
if self.use_gpu:
n_gpu = 1
else:
n_gpu = 0
session_config = tf.ConfigProto(
device_count={'GPU': n_gpu},
allow_soft_placement=True, # False,
log_device_placement=False,
)
if n_gpu:
session_config.gpu_options.allow_growth = True
tf.logging.info('number of gpu used {}'.format(n_gpu))
self.session = tf.Session(graph=self._graph, config=session_config)
self.saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
self.session.run(init)
def check_vars(name):
name_list = name.split('/')
for x in ['score_fn', 'Training']:
if x in name_list:
return False
return True
if trainable_only:
variables_to_restore = tf.trainable_variables()
variables_to_restore = [
v for v in variables_to_restore if check_vars(v.name)
]
saver = tf.train.Saver(variables_to_restore)
elif ckpt_from_another:
# TODO(rishabhagarwal): Hack for loading a model trained on cloud machine.
variables_to_restore = tf.global_variables()
variables_to_restore = [
v for v in variables_to_restore
if (check_vars(v.name) and v != self.node_dict['global_step'])
]
saver = tf.train.Saver(variables_to_restore)
else:
saver = self.saver
if init_model_path:
saver.restore(self.session, init_model_path)
if init_score_path:
score_variables = [
v for v in tf.global_variables() if 'score_fn' in v.name.split('/')
]
score_saver = tf.train.Saver(score_variables)
score_saver.restore(self.session, init_score_path)
self._graph.finalize()
return self.session
def restore(self, model_path):
self.saver.restore(self.session, model_path)
def save(self, model_path, global_step):
return self.saver.save(self.session, model_path, global_step)
def run(self, fetch_list, feed_dict, writer=None):
"""Main interface to interact with the tensorflow graph.
Args:
fetch_list: a list of names (strings) indicating the name of result
operations.
feed_dict: a dictionary with the names of the nodes as keys and the
corresponding values that are fed as values.
writer: a tensorflow summary writer
Returns:
outputs: a dictionary with the names in the fetch_list as
keys, and the outputs from the executing graph as values.
"""
fetch_dict = dict([(name, self.node_dict[name])
for name in fetch_list
if name in self.node_dict])
if writer is not None:
fetch_dict['summaries'] = self.node_dict['summaries']
fetch_dict['global_step'] = self.node_dict['global_step']
outputs = self.session.run(fetch_dict, map_dict(self.node_dict, feed_dict))
if (writer is not None) and self._plot_summaries:
writer.add_summary(outputs['summaries'], outputs['global_step'])
writer.flush()
return outputs
@with_graph_variable_scope
def add_train(self,
aux_loss_list=None,
optimizer='adam',
learning_rate=0.01,
max_grad_norm=5.0,
decay_after_n_steps=1000,
decay_every_n_steps=1000,
lr_decay_factor=1.0,
debug=False,
l2_coeff=0.0,
adam_beta1=0.9,
meta_lr=1e-3,
momentum=0.9,
plot_summaries=True,
name='Training'):
"""Construct part of the graph that controls training (SGD optimization)."""
self.node_dict['max_batch_size'] = tf.placeholder(tf.int32, None)
self._plot_summaries = plot_summaries
with tf.variable_scope(name):
global_step = tf.Variable(0, trainable=False, dtype=tf.int32)
self.node_dict['global_step'] = global_step
if self.eval_graph:
return
all_summaries = []
batch_size = tf.cast(self.node_dict['max_batch_size'], dtype=tf.float32)
# No need to divide by batch size since the scores are already normalized
loss = self.node_dict['loss'] # / batch_size
if self.meta_learn:
loss_original = self.node_dict['loss_nometa']
all_summaries.append(
tf.summary.scalar(self.vs_name + '/' + 'loss_orig', loss_original))
all_summaries.append(tf.summary.scalar(self.vs_name + '/' + 'loss', loss))
total_loss = loss
if aux_loss_list is not None:
for loss_name, w in aux_loss_list:
if w: # Consider non-zero coefficients which can be negative too
aux_loss = self.node_dict[loss_name]
if loss_name == 'ent_reg':
aux_loss *= -1 # Since we want to maximize the entropy
aux_loss *= w / batch_size
total_loss += aux_loss
aux_loss_summary = tf.summary.scalar(self.vs_name + '/' + loss_name,
aux_loss)
all_summaries.append(aux_loss_summary)
if debug:
total_loss = tf.Print(
total_loss, [self.node_dict['sequence_loss']],
message='seq_loss:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['weights']],
message='weights:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['targets'].tensor],
message='targets:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['probs'].tensor],
message='probs:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['logits'].tensor],
message='logits:',
summarize=10000)
if self.meta_learn:
total_loss = tf.Print(
total_loss, [self.node_dict['scores']],
message='scores:',
summarize=10000)
total_loss_summary = tf.summary.scalar(self.vs_name + '/' + 'total_loss',
total_loss)
all_summaries.append(total_loss_summary)
lr = tf.Variable(
float(learning_rate),
trainable=False,
name='learning_rate',
constraint=tf.keras.constraints.non_neg())
new_lr = tf.placeholder(dtype=tf.float32, shape=(), name='new_lr')
update_lr = lr.assign(new_lr)
meta_lr = tf.Variable(float(meta_lr), trainable=False)
update_meta_lr = meta_lr.assign(new_lr)
lr_summary = tf.summary.scalar(self.vs_name + '/' + 'learning_rate', lr)
all_summaries.append(lr_summary)
meta_hparams = []
all_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.vs_name)
score_fn_vars = [v for v in all_params if 'score_fn' in v.name.split('/')]
meta_vars = score_fn_vars + meta_hparams
params = [v for v in all_params if v not in meta_vars]
n_params = 0
tf.logging.info('trainable parameters:')
for tv in params:
n_tv_params = np.product(tv.get_shape().as_list())
n_params += n_tv_params
tf.logging.info('{}: {}'.format(tv.name, n_tv_params))
if 'weights' in tv.name or 'kernel' in tv.name:
total_loss += tf.reduce_sum(tf.nn.l2_loss(tv)) * l2_coeff
tf.logging.info(
'total number of trainable parameters {}'.format(n_params))
tf.logging.info('Calculate gradients wrt model params...')
scores = self.node_dict['scores']
log_scores = self.node_dict['log_scores']
score_node = log_scores if self._use_log_scores else scores
gradients = tf.gradients(total_loss, params, stop_gradients=[score_node])
clipped_gradients, grad_norm = tf.clip_by_global_norm(
gradients, max_grad_norm)
if optimizer == 'adam':
tf.logging.info('adam beta1: {}'.format(adam_beta1))
opt = OPTIMIZER_DICT[optimizer](lr, beta1=adam_beta1)
elif optimizer == 'momentum':
tf.logging.info('Using momentum optimizer')
opt = OPTIMIZER_DICT[optimizer](lr, momentum=momentum)
else:
opt = OPTIMIZER_DICT[optimizer](lr)
# Create the update op for theta (model parameters)
update = opt.apply_gradients(
zip(clipped_gradients, params), global_step=global_step)
if self.meta_learn:
t1 = time.time()
if optimizer == 'momentum':
accum = [opt.get_slot(p, 'momentum') for p in params]
grads = [
momentum * acc + g for (acc, g) in zip(accum, clipped_gradients)
]
else:
grads = clipped_gradients
# Create the meta training loss
updated_params = [p - lr * g for p, g in zip(params, grads)]
replaced_params = dict(zip([p.value() for p in params], updated_params))
self.create_val_meta_loss(replaced_params)
val_meta_loss = self.node_dict['val_meta_loss']
tf.logging.info('Creating meta optimizer...')
meta_opt = tf.train.AdamOptimizer(learning_rate=meta_lr)
# Calculate the partial gradients wrt scores only for the meta
# validation loss
# Used score_node because tensorflow can't handle indirect dependency
# structure for calculating gradients
# Example: y = x + 1; z = x + 2; tf.gradients(y, z) --> returns None
score_grads = tf.gradients(val_meta_loss, score_node)
clipped_score_grads, score_grad_norm = tf.clip_by_global_norm(
score_grads, max_grad_norm)
# Optimize only the score function variables using the meta optimizer
meta_gradients = tf.gradients([score_node],
score_fn_vars,
grad_ys=clipped_score_grads)
meta_clipped_gradients, meta_grad_norm = tf.clip_by_global_norm(
meta_gradients, max_grad_norm)
if meta_hparams:
meta_hparams_grad = tf.gradients(val_meta_loss, meta_hparams)
meta_clipped_gradients += meta_hparams_grad
update_score_fn = meta_opt.apply_gradients(
zip(meta_clipped_gradients, meta_vars))
self.node_dict.update(meta_train=update_score_fn)
# Add the control dependency so that score fn is updated first before
# updating the model parameters, doesn't work due to tf restrictions
# update = first_before_second_noop(update_score_fn, update)
t2 = time.time()
tf.logging.info('Time taken for meta learning setup {}'.format(t2 - t1))
grad_norm_summary = tf.summary.scalar(self.vs_name + '/' + 'grad_norm',
grad_norm)
all_summaries.append(grad_norm_summary)
# Summaries for meta learning related stuff
if self.meta_learn:
val_loss_summary = tf.summary.scalar(
'val_loss', self.node_dict['val_loss'], family='meta_train')
meta_grad_norm_summary = tf.summary.scalar(
'meta_grad_norm', meta_grad_norm, family='meta_train')
score_grad_norm_summary = tf.summary.scalar(
'score_grad_norm', score_grad_norm, family='meta_train')
scores_summary = tf.summary.histogram(
'scores', scores, family='meta_train')
all_summaries.extend([
val_loss_summary, meta_grad_norm_summary, score_grad_norm_summary,
scores_summary
])
# Code for logging the feature weights for the linear softmax case
if self.score_fn.score_model == 'linear':
weight_summaries = []
for v in score_fn_vars:
tensor_name = v.name.split('/')[-1]
if 'weights' in tensor_name:
weight_summaries += [
tf.summary.scalar(
'w{}'.format(i), v[i], family='linear_score_fn')
for i in range(self.score_fn.num_features)
]
elif 'alpha' in tensor_name:
weight_summaries.append(
tf.summary.scalar('alpha', v, family='linear_score_fn'))
elif 'bias' in tensor_name:
weight_summaries.append(
tf.summary.scalar('bias', v[0], family='linear_score_fn'))
all_summaries.extend(weight_summaries)
if debug:
_, clipped_grad_norm = tf.clip_by_global_norm(clipped_gradients,
max_grad_norm)
clipped_grad_norm_summary = tf.summary.scalar(
self.vs_name + '/' + 'clipped_grad_norm', clipped_grad_norm)
n_summary = tf.summary.scalar(self.vs_name + '/' + 'n',
self.node_dict['n'])
seq_loss_summary = tf.summary.histogram(self.vs_name + '/' + 'seq_loss',
self.node_dict['sequence_loss'])
weights_summary = tf.summary.histogram(self.vs_name + '/' + 'weights',
self.node_dict['weights'])
all_summaries += [
clipped_grad_norm_summary, n_summary, seq_loss_summary,
weights_summary
]
if self.meta_learn:
total_loss = tf.Print(
total_loss, [score_grads],
message='score_grads:',
summarize=10000)
batch_size_summary = tf.summary.scalar(self.vs_name + '/' + 'batch_size',
self.node_dict['batch_size'])
all_summaries.append(batch_size_summary)
if 'ent_reg' in self.node_dict:
ent_reg_summary = tf.summary.scalar(
self.vs_name + '/' + 'polic_entropy',
(self.node_dict['ent_reg'] / tf.cast(self.node_dict['n'],
tf.float32)))
ent_reg_ppl_summary = tf.summary.scalar(
self.vs_name + '/' + 'policy_entropy_ppl',
tf.exp(self.node_dict['ent_reg'] / tf.cast(self.node_dict['n'],
tf.float32)))
all_summaries.append(ent_reg_summary)
all_summaries.append(ent_reg_ppl_summary)
if self._plot_summaries:
for s in self.node_dict['summaries']:
all_summaries.append(s)
merged = tf.summary.merge(inputs=all_summaries)
else:
merged = tf.no_op(name='no_summary_op')
self.node_dict.update(
train=update,
global_step=global_step,
summaries=merged,
update_lr=update_lr,
update_meta_lr=update_meta_lr,
new_lr=new_lr)
@property
def final_state(self):
return 'final_state'
@property
def outputs(self):
return 'outputs'
@property
def initial_state(self):
return 'initial_state'
@property
def en_outputs(self):
return 'en_outputs'
@property
def n_examples(self):
return 'n_examples'
@property
def prediction_probs(self):
return 'probs'
@property
def samples(self):
return 'samples'
@property
def predictions(self):
return 'predictions'
@property
def en_initial_state(self):
return 'en_initial_state'
def add_outputs(self, output_type, output_config):
'Create part of the graph that compute final outputs from the RNN output.'
if output_type == 'softmax':
self.add_softmax_outputs(**output_config)
# self.add_val_softmax_outputs(**output_config)
elif output_type == 'regression':
self.add_regression_outputs(**output_config)
else:
raise NotImplementedError(
'Output type {} not supported!'.format(output_type))
@with_graph_variable_scope
def add_softmax_outputs(self,
output_vocab_size=None,
use_logits=None,
sampling_strategy='probs',
name='Softmax'):
"""Add softmax layer on top of RNN outputs."""
maxlen = self.node_dict['outputs'].tensor.shape.as_list()[1]
with tf.variable_scope(name):
seq_targets = create_seq_inputs(
shape=tf.TensorShape([None, maxlen]), dtype=tf.int32)
if self.meta_learn:
self.node_dict['val_targets'] = create_seq_inputs(
shape=tf.TensorShape([None, maxlen]),
dtype=tf.int32,
name='val_targets')
if use_logits:
# Feeding logits instead of outputs (thus no linear transformation needed).
logits, probs, predictions, samples, temperature = create_softmax_from_logits(
self.node_dict['outputs'].tensor)
else:
logits, probs, predictions, samples, temperature = create_softmax(
self.node_dict['outputs'].tensor,
output_vocab_size=output_vocab_size)
sequence_length = self.node_dict['outputs'].sequence_length
# From openai baselines to avoid numerical issue.
a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
clipped_entropy = p0 * (tf.log(z0) - a0)
seq_entropy = (
tf.reduce_sum(clipped_entropy, axis=-1) * tf.sequence_mask(
sequence_length, maxlen=maxlen, dtype=tf.float32))
policy_entropy = tf.reduce_sum(
tf.reduce_sum(clipped_entropy, axis=-1) * tf.sequence_mask(
sequence_length, maxlen=maxlen, dtype=tf.float32))
seq_logits, seq_probs, seq_predictions, seq_samples = [
SeqTensor(x, sequence_length)
for x in (logits, probs, predictions, samples)
]
# Compute the probs
sequence_probs, sequence_logprobs, step_logprobs = create_probs(
seq_logits.tensor, seq_targets.tensor, sequence_length)
sequence_neg_logprobs = -1 * sequence_logprobs
if not self.eval_graph:
# Compute sequence cross entropy loss.
with tf.name_scope('cross_entropy_loss'):
weights = tf.placeholder(name='weights', shape=[None], dtype=tf.float32)
baselines = tf.placeholder(
name='baselines', shape=[None], dtype=tf.float32)
# Code for using score_fn as true reward
batch_size = self.node_dict['batch_size']
with tf.variable_scope('score_fn', reuse=tf.AUTO_REUSE):
scores, log_scores = self.score_fn.get_scores(n=batch_size)
self.node_dict.update(scores=scores, log_scores=log_scores)
self._use_log_scores = (sampling_strategy !=
'probs') or self.score_fn.is_linear_softmax
if sampling_strategy != 'probs':
weights_to_use = weights
if sampling_strategy == 'reward':
# Sampling according to the reward distribution
unweighted_loss = tf_utils.dice(log_scores) * sequence_neg_logprobs
elif sampling_strategy == 'probs_and_reward':
# Sampling according to the distribution induced by product of
# rewards and probs
unweighted_loss = -tf_utils.dice(log_scores + sequence_logprobs)
elif sampling_strategy == 'st_estimator':
weights_to_use = tf_utils.st_estimator(
1.0, weights * tf_utils.dice(log_scores))
unweighted_loss = sequence_neg_logprobs
elif sampling_strategy == 'urex':
# The first half of the batch corresponds to sampling using the
# scores while the second half of the batch is sampled using the
# policy model
loss_score_sampling = tf_utils.dice(
log_scores) * sequence_neg_logprobs
loss_model_sampling = -tf_utils.dice(sequence_logprobs) * log_scores
batch_mask = tf.sequence_mask(
lengths=batch_size // 2,
maxlen=batch_size,
dtype=tf.float32,
name='batch_mask')
unweighted_loss = batch_mask * loss_score_sampling + \
(1.0 - batch_mask) * loss_model_sampling
else:
# dirac_delta = lambda x: tf.cond(
# tf.equal(x, 0.0), lambda: 1.0, lambda: 0.0)
# scores_sum = tf.reduce_sum(scores)
# scores_normalization = scores_sum + dirac_delta(scores_sum)
# scores_to_use = scores / tf.stop_gradient(scores_normalization)
if self.score_fn.is_linear_softmax:
scores_to_use = log_scores
else:
scores_to_use = scores
weights_to_use = weights * scores_to_use
unweighted_loss = -tf_utils.dice(sequence_logprobs)
sequence_loss = weights_to_use * unweighted_loss
# if sampling_strategy == 'probs':
# xent_loss = tf.reduce_mean(sequence_loss)
# else:
xent_loss = tf.reduce_mean(sequence_loss)
self.node_dict.update(
sequence_loss=sequence_loss,
loss=xent_loss,
weights=weights,
baselines=baselines)
if self.meta_learn:
# Create this loss to be used for creating val loss via
# `graph_replace`, also used for plotting on tensorboard
xent_loss_nometa = tf.reduce_mean(
weights * sequence_neg_logprobs, name='loss_nometa')
val_weights = tf.placeholder(
name='val_weights', shape=[None], dtype=tf.float32)
self.node_dict.update(
val_weights=val_weights, loss_nometa=xent_loss_nometa)
# Add new nodes to the node_dict.
self.node_dict.update(
targets=seq_targets,
temperature=temperature,
ent_reg=policy_entropy,
seq_entropy=seq_entropy,
probs=seq_probs,
sequence_probs=sequence_probs,
sequence_logprobs=sequence_logprobs,
step_logprobs=step_logprobs,
samples=seq_samples,
predictions=seq_predictions,
logits=seq_logits)
def create_val_meta_loss(self, replaced_params):
"""Run graph replace to create the meta learning loss."""
replacement_tuples = []
for key in [
'targets', 'inputs', 'en_inputs', 'en_input_features',
'output_features', 'n_constants', 'constant_spans',
'constant_value_embeddings', 'context', 'batch_size', 'weights'
]:
if key not in self.node_dict:
continue
val_key = 'val_{}'.format(key)
x, y = self.node_dict[key], self.node_dict[val_key]
if isinstance(x, tuple):
if isinstance(x.tensor, tuple):
replacement_tuples += zip(x.tensor, y.tensor)
replacement_tuples += [(x.sequence_length, y.sequence_length)]
else:
replacement_tuples += zip(x, y)
else:
replacement_tuples += [(x, y)]
replacement_ts = dict(replacement_tuples)
# Fix the dropout values to be zero, for deterministic validation loss
dropout_placeholders = ['rnn_dropout', 'en_rnn_dropout', 'en_input_dropout']
zero_tensor = tf.constant(0.0)
replacement_ts.update(
{self.node_dict[pc]: zero_tensor for pc in dropout_placeholders})
with tf.name_scope('validation'):
tf.logging.info('Running graph replace for creating val loss...')
val_loss = graph_replace(self.node_dict['loss_nometa'], replacement_ts)
tf.logging.info('Running graph replace for meta val loss...')
val_meta_loss = graph_replace(val_loss, replaced_params)
self.node_dict.update(val_loss=val_loss, val_meta_loss=val_meta_loss)
class SeqGraph(Graph):
'TensorFlow graph for RNN sequence model.'
def __init__(self, graph_config, name='seq_graph'):
super(SeqGraph, self).__init__(name)
self.add_seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq(self,
input_shape,
input_vocab_size=None,
hidden_size=128,
n_layers=2,
cell_type='lstm',
bidirectional=False,
dropout=0.0,
use_embeddings=True,
embedding_size=64,
name='Sequence'):
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=(), name='batch_size')
if use_embeddings:
embeddings = tf.get_variable(
'embeddings',
shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, _) = create_seq_graph(
input_shape,
batch_size=batch_size,
hidden_size=hidden_size,
n_layers=n_layers,
cell_type=cell_type,
bidirectional=bidirectional,
embeddings=embeddings)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
embeddings=embeddings,
batch_size=batch_size,
final_state=final_state,
outputs=seq_outputs,
n=n,
initial_state=initial_state)
class Seq2seqGraph(Graph):
"""TensorFlow graph for seq2seq model.
A basic seq2seq model with attention. The model supports
all the common specifications for a seq2seq model such as
number of layers, whether to use bidirectional encoder,
attention type, etc.
"""
def __init__(self, graph_config, name='seq2seq_graph'):
super(Seq2seqGraph, self).__init__(name)
self.add_seq2seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq2seq(self,
en_input_shape,
input_shape,
use_attn=True,
attn_size=128,
attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128,
en_n_layers=2,
hidden_size=128,
n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
use_embeddings=True,
en_embedding_size=64,
embedding_size=64,
name='Seq2seq'):
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=[], name='batch_size')
# Create encoder.
with tf.variable_scope('Encoder'):
if en_use_embeddings:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
en_embeddings = None
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, _) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size)
if use_attn:
attn_inputs = en_seq_outputs
else:
attn_inputs = None
if en_bidirectional:
en_final_state = en_final_state[0]
# Create decoder.
with tf.variable_scope('Decoder'):
if use_embeddings:
embeddings = tf.get_variable(
'embeddings',
shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, _) = create_seq_graph(
input_shape,
batch_size=batch_size,
hidden_size=hidden_size,
n_layers=n_layers,
cell_type=cell_type,
bidirectional=False,
embeddings=embeddings,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
attn_inputs=attn_inputs,
initial_state=en_final_state)
# Count number of steps.
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state,
n=n,
encoded_context=en_seq_outputs,
context=en_seq_inputs,
en_embeddings=en_embeddings,
embeddings=embeddings)
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
class MemorySeq2seqGraph(Graph):
def __init__(self, graph_config, name='memory_seq2seq_graph'):
super(MemorySeq2seqGraph, self).__init__(name)
self.use_gpu = graph_config['use_gpu']
self.meta_learn = graph_config['meta_learn']
self.eval_graph = not graph_config['train_config']
if self.use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = graph_config['gpu_id']
else:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
dict_to_pass = graph_config['core_config'].copy()
self.score_model = graph_config['score_fn_config'].pop('score_model', None)
dict_to_pass.update(graph_config['score_fn_config'])
self.add_memory_seq2seq(**dict_to_pass)
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
# For evaluation, train_config would be set to {}
self.add_train(**graph_config['train_config'])
self.config = graph_config
@with_graph_variable_scope
def add_memory_seq2seq(self,
max_n_valid_indices=None,
n_mem=None,
n_builtin=None,
use_attn=True,
attn_size=128,
attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128,
en_n_layers=2,
hidden_size=128,
n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
en_embedding_size=4,
value_embedding_size=128,
en_pretrained_vocab_size=None,
en_pretrained_embedding_size=-1,
tie_en_embeddings=True,
add_lm_loss=False,
n_en_input_features=1,
n_de_output_features=1,
en_attn_on_constants=False,
max_programs=None,
num_envs=None,
maxlen=25,
en_maxlen=75,
num_features=11,
score_norm_fn=None,
name='MemorySeq2seq',
**kwargs):
"""Create seq2seq with key variable memory.
Seq2seq with key variable memory is used for semantic
parsing (generating programs from natural language
instructions/questions).
A MemorySeq2seq Model uses a memory cell in decoder.
There are 3 types of tokens in a program:
1) constants that are provided at the before the program
is generated (added before decoding, different for
different examples); 2) variables that saves the results
from executing past expressions (added during decoding,
different for different examples); 3) language
primitives such as built-in functions and reserved
tokens (for example, "(", ")"). (the same for different
examples).
There are two kinds of constants: 1) constants from the
question, whose representation is from the span the
annotated constants; 2) constants from the context,
whose representation is from the constant value
embeddings, for example, table columns.
So the decoder vocab is organized as
[primitives, constants, variables].
For a constant, its embedding is computed as sum of two
parts: 1) embedding of the span (from encoder) on which
the constant is annotated with, for example the span
"barack obama" in "who is barack obama's wife" or the
span "one" in "what is one plus one"; 2) embedding of
the constant, for example, the embedding of the entity
Obama or the embedding of the number one.
For a variable, its embedding is the decoder RNN output
at the step where the variable is created.
For a primitive, its embedding is initialized randomly
and tuned by SGD.
Inspired by the code asistance (such as autocompletion)
in modern IDE, we also apply semantic and syntax
constraint on the decoder vocabulary so that at each
step, only some of the tokens are valid. So the decoder
has a dynamic vocabulary that is changing through
different steps.
"""
if not self.eval_graph:
# Code for score fn
args_to_pass = dict(
num_envs=num_envs,
num_features=num_features,
max_programs=max_programs,
score_temperature=kwargs['score_temperature'],
score_norm_fn=score_norm_fn)
self.score_fn = score_utils.ScoreFunction(
self.score_model, trainable=self.meta_learn, **args_to_pass)
self.node_dict.update(self.score_fn.score_dict)
input_shape = tf_utils.MemoryInputTuple(
tf.TensorShape([None, maxlen]), tf.TensorShape([None, maxlen]),
tf.TensorShape([None, maxlen, max_n_valid_indices]))
input_dtype = tf_utils.MemoryInputTuple(tf.int32, tf.int32, tf.int32)
en_input_shape = tf.TensorShape([None, en_maxlen])
constant_span_shape = tf.TensorShape([None, n_mem, 2])
constant_value_embedding_shape = tf.TensorShape(
[None, n_mem, value_embedding_size])
builtin_de_embeddings_shape = tf.TensorShape([n_builtin, hidden_size])
with tf.variable_scope('ConstantInput'):
# constant_span_embedding encodes the information
# from the span where the constant is referred to,
# for example the span "obama" in "who is the wife
# of obama".
# constant_value_embedding encodes the information
# from the value of the constant, for example, the
# embedding of the entity Obama.
# constant_span: (B, n_mem, 2)
constant_spans_placeholder = tf.placeholder(tf.int32, constant_span_shape)
constant_spans = constant_spans_placeholder
n_constants_placeholder = tf.placeholder(tf.int32, [None, 1])
n_constants = tf.squeeze(n_constants_placeholder, [-1])
# constant_spans: (B, n_mem, 1)
# 0.0 if the span is [-1, -1], else 1.0.
constant_span_masks = tf.cast(
tf.greater(tf.reduce_sum(constant_spans, axis=2), 0), tf.float32)
constant_span_masks = tf.expand_dims(constant_span_masks, -1)
# constant_spans: (B, n_mem, 2, 1)
constant_spans = tf.maximum(constant_spans, 0)
constant_spans = tf.expand_dims(constant_spans, axis=-1)
if constant_value_embedding_shape is not None:
constant_value_embeddings_placeholder = tf.placeholder(
tf.float32, shape=constant_value_embedding_shape)
constant_value_embeddings = constant_value_embeddings_placeholder
constant_value_embeddings = tf.layers.dense(
constant_value_embeddings, hidden_size, use_bias=True)
constant_value_masks = tf.squeeze(1 - constant_span_masks, [-1])
if n_en_input_features > 0:
en_input_features_shape = tf.TensorShape(
[None, en_maxlen, n_en_input_features])
else:
en_input_features_shape = None
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=(), name='batch_size')
with tf.variable_scope('Encoder'):
if en_use_embeddings:
if en_pretrained_embedding_size < 0:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.1))
else:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size - en_pretrained_vocab_size,
en_embedding_size),
initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.1))
en_pretrained_embeddings = tf.get_variable(
'pretrained_embeddings',
shape=(en_pretrained_vocab_size, en_pretrained_embedding_size),
trainable=False,
initializer=tf.zeros_initializer())
en_pretrained_embeddings_placeholder = tf.placeholder(
tf.float32,
[en_pretrained_vocab_size, en_pretrained_embedding_size])
en_pretrained_embeddings_init = en_pretrained_embeddings.assign(
en_pretrained_embeddings_placeholder)
en_pretrained_embeddings = tf.layers.dense(
inputs=en_pretrained_embeddings,
units=en_embedding_size,
use_bias=True)
en_embeddings = tf.concat(
values=[en_embeddings, en_pretrained_embeddings], axis=0)
else:
en_embeddings = None
if en_attn_on_constants:
tf.logging.info('Using attention in encoder!!!')
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape,
attn_inputs=constant_value_embeddings,
attn_masks=constant_value_masks,
attn_size=attn_size,
attn_vec_size=attn_vec_size)
else:
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape)
if n_en_input_features > 0:
en_seq_input_features = SeqTensor(en_seq_inputs.tensor[1],
tf.placeholder(tf.int32, [None]))
en_seq_inputs = SeqTensor(en_seq_inputs.tensor[0],
en_seq_inputs.sequence_length)
if add_lm_loss:
sequence_length = tf.maximum(en_seq_inputs.sequence_length - 1, 0)
en_n = tf.cast(tf.reduce_sum(sequence_length), tf.float32)
mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
if en_bidirectional:
en_fw_outputs = en_rnn_outputs[0]
en_bw_outputs = en_rnn_outputs[1]
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(en_fw_outputs[:, :-1, :],
tf.transpose(en_embeddings))
en_bw_logits = tf_utils.tensormul(en_bw_outputs[:, 1:, :],
tf.transpose(en_embeddings))
else:
# Use 0 to n-2 to compute logits.
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_bw_logits = tf.layers.dense(
en_bw_outputs[:, 1:, :], en_input_vocab_size, use_bias=True)
# Use 1 to n-1 as targets.
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_bw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, :-1], logits=en_bw_logits) * mask
en_lm_loss = tf.reduce_sum(en_fw_lm_loss + en_bw_lm_loss) / en_n
else:
en_fw_outputs = en_rnn_outputs
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(en_fw_outputs[:, :-1, :],
tf.transpose(en_embeddings))
else:
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_lm_step_loss = en_fw_lm_loss
en_lm_loss = tf.reduce_sum(en_lm_step_loss) / en_n
if use_attn:
attn_inputs = en_seq_outputs.tensor
attn_masks = tf.sequence_mask(
en_seq_outputs.sequence_length, maxlen=en_maxlen, dtype=tf.float32)
else:
attn_inputs = None
attn_masks = None
with tf.variable_scope('ConstantEncoder'):
batch_ind = tf.range(batch_size)
# batch_ind: (B, 1, 1, 1)
for i in range(3):
batch_ind = tf.expand_dims(batch_ind, axis=-1)
# batch_ind: (B, n_mem, 2, 1)
batch_ind = tf.tile(batch_ind, [1, n_mem, 2, 1])
# constant_span: (B, n_mem, 2, 2)
constant_spans = tf.concat([batch_ind, constant_spans], axis=-1)
# constant_span_embedding: (B, n_mem, 2, en_output_size)
constant_span_embeddings = tf.gather_nd(en_seq_outputs.tensor,
constant_spans)
# constant_embedding: (B, n_mem, en_output_size)
constant_embeddings = tf.reduce_mean(constant_span_embeddings, axis=2)
constant_embeddings = constant_embeddings * constant_span_masks
if constant_value_embedding_shape is not None:
constant_embeddings = constant_embeddings + constant_value_embeddings
# mask out the bad constants.
# constant mask: (B, n_mem)
constant_masks = tf.sequence_mask(
n_constants, maxlen=n_mem, dtype=tf.float32)
# constant mask: (B, n_mem, 1)
constant_masks = tf.expand_dims(constant_masks, -1)
constant_masks = tf.tile(constant_masks, [1, 1, hidden_size])
# constant_embeddings: (B, n_mem, hidden_size)
constant_embeddings = constant_embeddings * constant_masks
# builtin_de_embeddings: (n_builtin, embed_size)
builtin_de_embeddings = tf.get_variable(
'builtin_de_embeddings',
builtin_de_embeddings_shape,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
# builtin_de_embeddings: (1, n_builtin, embed_size)
builtin_de_embeddings = tf.expand_dims(builtin_de_embeddings, axis=0)
# builtin_de_embeddings: (B, n_builtin, embed_size)
builtin_de_embeddings = tf.tile(builtin_de_embeddings,
[batch_size] + [1] * 2)
# initial_memory: (B, n_builtin + n_mem, embed_size)
initial_memory = tf.concat([builtin_de_embeddings, constant_embeddings],
axis=1)
# concatenate static and constant embeddings to form
# new memory to create initial states.
if en_bidirectional:
initial_state = en_final_state[0]
else:
initial_state = en_final_state
with tf.variable_scope('Decoder'):
initial_state = tf_utils.MemoryStateTuple(initial_memory, initial_state)
seq_inputs = create_seq_inputs(shape=input_shape, dtype=input_dtype)
inputs = seq_inputs.tensor
sequence_length = seq_inputs.sequence_length
rnn_dropout = tf.placeholder_with_default(
0.0, shape=None, name='rnn_dropout')
# Create multilayer attention cell then wrap with memory cell.
cell = multilayer_dropout_cell(
cell_fn=RNN_CELL_DICT[cell_type],
hidden_size=hidden_size,
n_layers=n_layers,
dropout=rnn_dropout)
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
mem_size = builtin_de_embeddings_shape[0] + constant_span_shape[1]
embed_size = hidden_size
use_attn_scores = (self.score_model == 'attn') and (not self.eval_graph)
cell = tf_utils.MemoryWrapper(
cell,
mem_size,
embed_size,
max_n_valid_indices,
use_score_wrapper=use_attn_scores,
activation=score_norm_fn)
flat_inputs = data_utils.flatten(inputs)
flat_inputs = [tf.expand_dims(in_, -1) for in_ in flat_inputs[:2]
] + flat_inputs[2:]
flat_inputs_unstacked = [tf.unstack(x, axis=1) for x in flat_inputs]
inputs = [
tf_utils.MemoryInputTuple(
read_ind=x[0], write_ind=x[1], valid_indices=x[2])
for x in zip(*flat_inputs_unstacked)
]
cell_outputs, final_state = tf.nn.static_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32)
if use_attn_scores:
outputs = [x[0] for x in cell_outputs]
scores_per_timestep = [x[1] for x in cell_outputs]
self.score_fn.create_attn_based_scores(
scores_per_timestep, sequence_length)
else:
outputs = cell_outputs
outputs = tf.stack(outputs, axis=1)
if n_de_output_features > 0:
de_seq_output_features = create_seq_inputs(
shape=tf.TensorShape(
[None, maxlen, max_n_valid_indices, n_de_output_features]),
dtype=tf.int32,
name='de_output_features')
output_feature_weights = tf.get_variable(
'de_output_feature_weights',
shape=tf.TensorShape([n_de_output_features, 1]),
initializer=tf.zeros_initializer())
outputs = outputs + tf.squeeze(
tf_utils.tensormul(
tf.cast(de_seq_output_features.tensor, tf.float32),
output_feature_weights),
axis=-1)
seq_outputs = SeqTensor(outputs, sequence_length)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
constant_spans=constant_spans_placeholder,
constant_embeddings=constant_embeddings,
constant_masks=constant_masks,
n_constants=n_constants_placeholder,
rnn_dropout=rnn_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state,
n=n,
encoded_context=en_seq_outputs,
context=en_seq_inputs,
en_embeddings=en_embeddings)
if en_pretrained_embedding_size > 0:
self.node_dict[
'en_pretrained_embeddings'] = en_pretrained_embeddings_placeholder
self.node_dict[
'en_pretrained_embeddings_init'] = en_pretrained_embeddings_init
if constant_value_embedding_shape is not None:
self.node_dict[
'constant_value_embeddings'] = constant_value_embeddings_placeholder
if add_lm_loss:
self.node_dict['en_lm_loss'] = en_lm_loss
# self.node_dict['en_lm_step_loss'] = en_lm_step_loss
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
if n_en_input_features > 0:
self.node_dict['en_input_features'] = en_seq_input_features
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'en_input_features_sum',
tf.reduce_sum(en_seq_input_features.tensor)))
if n_de_output_features > 0:
self.node_dict['output_features'] = de_seq_output_features
self.node_dict['output_feature_weights'] = output_feature_weights
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'output_feature_weights_0',
output_feature_weights[0][0]))
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'output_features_sum',
tf.reduce_sum(de_seq_output_features.tensor)))
if self.meta_learn:
val_en_seq_inputs = create_seq_inputs(
en_input_shape, en_seq_inputs.tensor.dtype, name='val_en_inputs')
val_seq_inputs = create_seq_inputs(
shape=input_shape, dtype=input_dtype, name='val_inputs')
self.node_dict.update(
val_inputs=val_seq_inputs,
val_en_inputs=val_en_seq_inputs,
val_context=val_en_seq_inputs)
if n_en_input_features:
self.node_dict['val_en_input_features'] = create_seq_inputs(
en_input_features_shape,
en_seq_input_features.tensor.dtype,
name='val_en_input_features')
if n_de_output_features:
self.node_dict['val_output_features'] = create_seq_inputs(
shape=de_seq_output_features.tensor.shape,
dtype=de_seq_output_features.tensor.dtype,
name='val_output_features')
with tf.name_scope('val_constants'):
for key in [
'batch_size', 'n_constants', 'constant_spans',
'constant_value_embeddings'
]:
val_key = 'val_{}'.format(key)
self.node_dict[val_key] = create_placeholder_copy(self.node_dict[key])
class MonitorGraph(object):
"""A tensorflow graph to monitor some values during training.
Generate tensorflow summaries for the values to monitor
them through tensorboard.
"""
def __init__(self):
self.node_dict = {}
self._graph = tf.Graph()
def launch(self):
with self._graph.as_default():
self.merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.session = tf.Session(graph=self._graph)
self.session.run(init)
def add_scalar_monitor(self, name, dtype):
with self._graph.as_default():
x = tf.placeholder_with_default(
input=tf.zeros(shape=(), dtype=dtype), shape=(), name=name)
# x = tf.placeholder(dtype=dtype, shape=(), name=name)
tf.summary.scalar(name, x)
self.node_dict[name] = x
def generate_summary(self, feed_dict):
summary_str = self.session.run(self.merged,
map_dict(self.node_dict, feed_dict))
return summary_str
# Utility functions for creating TensorFlow graphs.
# FNN
def create_multilayer_fnn(inputs, dropout, hidden_sizes, activation='relu'):
x = inputs
for size in hidden_sizes:
x = tf.nn.dropout(x, 1 - dropout)
x = tf.layers.dense(
inputs=x, units=size, activation=ACTIVATION_DICT[activation])
return x
# Loss
def create_seq_mse_loss(outputs, targets, weights, sequence_length):
mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
loss = tf.reduce_sum(tf.squared_difference(outputs, targets) * weights * mask)
return loss
def create_probs(logits, targets, sequence_length, use_sparse=False):
"""Create graph nodes for step and sequence probabilities."""
mask = tf.sequence_mask(
sequence_length, maxlen=tf.shape(targets)[1], dtype=tf.float32)
if use_sparse:
step_neg_logprobs = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
else:
# Second order derivative of sparse_cross_entropy is not defined, needed
# for the meta gradient
one_hot_targets = tf.one_hot(targets, depth=logits.shape.as_list()[-1])
step_neg_logprobs = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_targets, logits=logits)
step_logprobs = -1 * step_neg_logprobs * mask
sequence_logprobs = tf.reduce_sum(step_logprobs, axis=1)
sequence_probs = tf.exp(sequence_logprobs)
return sequence_probs, sequence_logprobs, step_logprobs
def create_softmax(inputs,
softmax_w=None,
output_vocab_size=None,
use_bias=False,
name='Softmax_layer'):
"""Create nodes for linear transformation of inputs/softmax computation."""
with tf.name_scope(name):
# inputs = tf.nn.dropout(inputs, 1-dropout)
if softmax_w is None:
logits = tf.layers.dense(
inputs=inputs, units=output_vocab_size, use_bias=use_bias)
else:
logits = tf_utils.tensormul(inputs, softmax_w)
if use_bias:
softmax_b = tf.Variable(
initial_value=np.zeros((1, output_vocab_size), dtype=tf.float32),
name='softmax_bias')
logits += softmax_b
return create_softmax_from_logits(logits)
def create_softmax_from_logits(logits):
"""Create nodes for softmax computation from logits."""
temperature = tf.placeholder_with_default(1.0, shape=(), name='temperature')
logits = logits / temperature
logits_shape = tf.shape(logits)
logits_dim = logits_shape[-1]
logits_2d = tf.reshape(logits, [-1, logits_dim])
samples = tf.multinomial(logits_2d, 1)
samples = tf.reshape(samples, logits_shape[:-1])
probs = tf.nn.softmax(logits)
predictions = tf.argmax(probs, axis=2)
return logits, probs, predictions, samples, temperature
# Embedding
def embed_inputs(inputs, embeddings, name='Embedding_layer'):
with tf.name_scope(name):
embedded_inputs = tf.nn.embedding_lookup(embeddings, inputs)
return embedded_inputs
# RNN
def create_rnn(cell,
initial_state,
inputs,
sequence_length,
hidden_size,
bidirectional,
cell_bw=None,
name='RNN'):
with tf.name_scope(name):
if bidirectional:
# Note that you can't use bidirectional RNN if you
# want to do decoding.
initial_state_fw = initial_state[0]
initial_state_bw = initial_state[1]
outputs, final_state_fw, final_state_bw = tf.nn.static_bidirectional_rnn(
cell,
cell_bw,
inputs,
sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=tf.float32)
final_state = (final_state_fw, final_state_bw)
else:
outputs, final_state = tf.nn.static_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32)
outputs = tf.stack(outputs, axis=1)
return outputs, final_state
# RNN Cell
def multilayer_dropout_cell(cell_fn,
hidden_size,
n_layers,
dropout,
use_skip_connection=True):
"""Create multilayer RNN cell with dropout."""
cells = []
for i in xrange(n_layers):
cell = cell_fn(hidden_size)
if i > 0 and use_skip_connection:
cell = tf.nn.rnn_cell.ResidualWrapper(cell)
cell = contrib_rnn.DropoutWrapper(cell, output_keep_prob=1.0 - dropout)
# variational_recurrent=True,
# state_keep_prob = 1.0 - dropout,
# dtype=tf.float32)
cells.append(cell)
final_cell = contrib_rnn.MultiRNNCell(cells)
return final_cell
# Input placeholders.
def create_seq_inputs(shape, dtype=tf.float32, name='inputs'):
with tf.name_scope(name):
if isinstance(shape, tuple):
flat_input_shape = data_utils.flatten(shape)
assert isinstance(dtype, tuple)
flat_dtype = data_utils.flatten(dtype)
flat_inputs = [
tf.placeholder(dt, sh, name='inputs')
for dt, sh in zip(flat_dtype, flat_input_shape)
]
inputs = data_utils.pack_sequence_as(shape, flat_inputs)
else:
inputs = tf.placeholder(dtype, shape)
sequence_length = tf.placeholder(tf.int32, [None], name='sequence_length')
return SeqTensor(inputs, sequence_length)
def create_tuple_placeholders_with_default(inputs, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder_with_default(inputs, list(extra_dims) + [shape])
else:
subplaceholders = [
create_tuple_placeholders_with_default(subinputs, extra_dims, subshape)
for subinputs, subshape in zip(inputs, shape)
]
t = type(shape)
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
def create_placeholder_copy(p):
return tf.placeholder(dtype=p.dtype, shape=p.shape)
def create_tuple_placeholders(dtype, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder(dtype, list(extra_dims) + [shape])
else:
subplaceholders = [
create_tuple_placeholders(dtype, extra_dims, subshape)
for subshape in shape
]
t = type(shape)
# Handles both tuple and LSTMStateTuple.
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
# Sequence models.
def create_seq_graph(
input_shape,
batch_size=None,
# input_vocab_size=None,
attn_inputs=None,
attn_size=128,
attn_vec_size=128,
# output_size=128,
input_size=None,
hidden_size=128,
n_layers=2,
cell_type='lstm',
bidirectional=False,
initial_state=None,
embeddings=None,
output_proj_size=None,
input_features_shape=None,
attn_masks=None,
inputs_name='inputs'):
# Create inputs.
seq_inputs = create_seq_inputs(
shape=input_shape,
dtype=tf.int32 if embeddings is not None else tf.float32,
name=inputs_name)
rnn_dropout = tf.placeholder_with_default(0.0, shape=None, name='rnn_dropout')
# Create embedding layer.
if embeddings is not None:
embedded_inputs = embed_inputs(seq_inputs.tensor, embeddings=embeddings)
else:
embedded_inputs = seq_inputs.tensor
input_dropout = tf.placeholder_with_default(
0.0, shape=None, name='input_dropout')
embedded_inputs = tf.nn.dropout(embedded_inputs, 1 - input_dropout)
# If we include features in inputs, then add them here.
if input_features_shape is not None:
seq_input_features = create_seq_inputs(
shape=input_features_shape, dtype=tf.int32)
embedded_inputs = tf.concat(
[embedded_inputs,
tf.cast(seq_input_features.tensor, tf.float32)],
axis=-1)
seq_inputs = SeqTensor((seq_inputs.tensor, seq_input_features.tensor),
seq_inputs.sequence_length)
else:
seq_input_features = None
embedded_seq_inputs = SeqTensor(embedded_inputs, seq_inputs.sequence_length)
# Create RNN cell
cell = multilayer_dropout_cell(RNN_CELL_DICT[cell_type], hidden_size,
n_layers, rnn_dropout)
if bidirectional:
cell_bw = multilayer_dropout_cell(RNN_CELL_DICT[cell_type], hidden_size,
n_layers, rnn_dropout)
else:
cell_bw = None
# Add attention.
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
if bidirectional:
cell_bw = tf_utils.SeqAttentionCellWrapper(
cell_bw,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
if initial_state is None:
# Create zero state.
zero_state = cell.zero_state(batch_size, tf.float32)
if bidirectional:
zero_state_bw = cell_bw.zero_state(batch_size, tf.float32)
zero_state = (zero_state, zero_state_bw)
initial_state = zero_state
inputs = tf.unstack(embedded_seq_inputs.tensor, axis=1)
# inputs = embedded_seq_inputs.tensor
# Create RNN.
outputs, final_state = create_rnn(
cell,
initial_state,
inputs,
embedded_seq_inputs.sequence_length,
hidden_size=hidden_size,
bidirectional=bidirectional,
cell_bw=cell_bw)
rnn_outputs = outputs
if bidirectional:
# Comment this if using static api
# outputs = tf.concat(outputs, axis=2)
hidden_size *= 2
# Whether to add linear transformation to outputs.
if output_proj_size is not None:
outputs = tf.layers.dense(
inputs=outputs, units=output_proj_size, use_bias=True)
seq_outputs = SeqTensor(
outputs,
tf.placeholder_with_default(seq_inputs.sequence_length, shape=[None]))
return (seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, rnn_outputs)
# General utility functions.
def map_dict(dict_1, main_dict):
new_dict = {}
for k in main_dict.keys():
if k in dict_1:
new_dict[dict_1[k]] = main_dict[k]
return new_dict
| [
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.keras.constraints.non_neg",
"tensorflow.compat.v1.Print",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.summary.histogram",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.nn.softmax_cr... | [((1779, 1845), 'collections.namedtuple', 'collections.namedtuple', (['"""SeqTensor"""', "['tensor', 'sequence_length']"], {}), "('SeqTensor', ['tensor', 'sequence_length'])\n", (1801, 1845), False, 'import collections\n'), ((55462, 55513), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['sequence_length'], {'dtype': 'tf.float32'}), '(sequence_length, dtype=tf.float32)\n', (55478, 55513), True, 'import tensorflow.compat.v1 as tf\n'), ((56344, 56380), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['step_logprobs'], {'axis': '(1)'}), '(step_logprobs, axis=1)\n', (56357, 56380), True, 'import tensorflow.compat.v1 as tf\n'), ((56400, 56425), 'tensorflow.compat.v1.exp', 'tf.exp', (['sequence_logprobs'], {}), '(sequence_logprobs)\n', (56406, 56425), True, 'import tensorflow.compat.v1 as tf\n'), ((57365, 57427), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()', 'name': '"""temperature"""'}), "(1.0, shape=(), name='temperature')\n", (57392, 57427), True, 'import tensorflow.compat.v1 as tf\n'), ((57478, 57494), 'tensorflow.compat.v1.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (57486, 57494), True, 'import tensorflow.compat.v1 as tf\n'), ((57541, 57577), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['logits', '[-1, logits_dim]'], {}), '(logits, [-1, logits_dim])\n', (57551, 57577), True, 'import tensorflow.compat.v1 as tf\n'), ((57590, 57618), 'tensorflow.compat.v1.multinomial', 'tf.multinomial', (['logits_2d', '(1)'], {}), '(logits_2d, 1)\n', (57604, 57618), True, 'import tensorflow.compat.v1 as tf\n'), ((57631, 57669), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['samples', 'logits_shape[:-1]'], {}), '(samples, logits_shape[:-1])\n', (57641, 57669), True, 'import tensorflow.compat.v1 as tf\n'), ((57681, 57702), 'tensorflow.compat.v1.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (57694, 57702), True, 'import tensorflow.compat.v1 as tf\n'), ((57719, 57743), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['probs'], {'axis': '(2)'}), '(probs, axis=2)\n', (57728, 57743), True, 'import tensorflow.compat.v1 as tf\n'), ((59710, 59741), 'tensorflow.contrib.rnn.MultiRNNCell', 'contrib_rnn.MultiRNNCell', (['cells'], {}), '(cells)\n', (59734, 59741), True, 'from tensorflow.contrib import rnn as contrib_rnn\n'), ((60948, 60992), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'p.dtype', 'shape': 'p.shape'}), '(dtype=p.dtype, shape=p.shape)\n', (60962, 60992), True, 'import tensorflow.compat.v1 as tf\n'), ((62091, 62155), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': 'None', 'name': '"""rnn_dropout"""'}), "(0.0, shape=None, name='rnn_dropout')\n", (62118, 62155), True, 'import tensorflow.compat.v1 as tf\n'), ((62358, 62424), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': 'None', 'name': '"""input_dropout"""'}), "(0.0, shape=None, name='input_dropout')\n", (62385, 62424), True, 'import tensorflow.compat.v1 as tf\n'), ((62453, 62502), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['embedded_inputs', '(1 - input_dropout)'], {}), '(embedded_inputs, 1 - input_dropout)\n', (62466, 62502), True, 'import tensorflow.compat.v1 as tf\n'), ((64240, 64286), 'tensorflow.compat.v1.unstack', 'tf.unstack', (['embedded_seq_inputs.tensor'], {'axis': '(1)'}), '(embedded_seq_inputs.tensor, axis=1)\n', (64250, 64286), True, 'import tensorflow.compat.v1 as tf\n'), ((2363, 2398), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', (['[op_first]'], {}), '([op_first])\n', (2386, 2398), True, 'import tensorflow.compat.v1 as tf\n'), ((2848, 2858), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2856, 2858), True, 'import tensorflow.compat.v1 as tf\n'), ((3367, 3469), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': n_gpu}", 'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), "(device_count={'GPU': n_gpu}, allow_soft_placement=True,\n log_device_placement=False)\n", (3381, 3469), True, 'import tensorflow.compat.v1 as tf\n'), ((3653, 3705), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'self._graph', 'config': 'session_config'}), '(graph=self._graph, config=session_config)\n', (3663, 3705), True, 'import tensorflow.compat.v1 as tf\n'), ((3772, 3805), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3803, 3805), True, 'import tensorflow.compat.v1 as tf\n'), ((6948, 6978), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', 'None'], {}), '(tf.int32, None)\n', (6962, 6978), True, 'import tensorflow.compat.v1 as tf\n'), ((26103, 26119), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (26114, 26119), True, 'import tensorflow.compat.v1 as tf\n'), ((37423, 37478), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.MemoryInputTuple', 'tf_utils.MemoryInputTuple', (['tf.int32', 'tf.int32', 'tf.int32'], {}), '(tf.int32, tf.int32, tf.int32)\n', (37448, 37478), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((37501, 37534), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, en_maxlen]'], {}), '([None, en_maxlen])\n', (37515, 37534), True, 'import tensorflow.compat.v1 as tf\n'), ((37562, 37594), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, n_mem, 2]'], {}), '([None, n_mem, 2])\n', (37576, 37594), True, 'import tensorflow.compat.v1 as tf\n'), ((37632, 37683), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, n_mem, value_embedding_size]'], {}), '([None, n_mem, value_embedding_size])\n', (37646, 37683), True, 'import tensorflow.compat.v1 as tf\n'), ((37727, 37767), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[n_builtin, hidden_size]'], {}), '([n_builtin, hidden_size])\n', (37741, 37767), True, 'import tensorflow.compat.v1 as tf\n'), ((54320, 54330), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (54328, 54330), True, 'import tensorflow.compat.v1 as tf\n'), ((54496, 54525), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'self._graph'}), '(graph=self._graph)\n', (54506, 54525), True, 'import tensorflow.compat.v1 as tf\n'), ((55239, 55268), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['x', '(1 - dropout)'], {}), '(x, 1 - dropout)\n', (55252, 55268), True, 'import tensorflow.compat.v1 as tf\n'), ((55277, 55354), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'units': 'size', 'activation': 'ACTIVATION_DICT[activation]'}), '(inputs=x, units=size, activation=ACTIVATION_DICT[activation])\n', (55292, 55354), True, 'import tensorflow.compat.v1 as tf\n'), ((55883, 55960), 'tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'targets', 'logits': 'logits'}), '(labels=targets, logits=logits)\n', (55929, 55960), True, 'import tensorflow.compat.v1 as tf\n'), ((56183, 56269), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'one_hot_targets', 'logits': 'logits'}), '(labels=one_hot_targets, logits=\n logits)\n', (56225, 56269), True, 'import tensorflow.compat.v1 as tf\n'), ((56753, 56772), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (56766, 56772), True, 'import tensorflow.compat.v1 as tf\n'), ((57886, 57905), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (57899, 57905), True, 'import tensorflow.compat.v1 as tf\n'), ((57929, 57971), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'inputs'], {}), '(embeddings, inputs)\n', (57951, 57971), True, 'import tensorflow.compat.v1 as tf\n'), ((58235, 58254), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (58248, 58254), True, 'import tensorflow.compat.v1 as tf\n'), ((59010, 59035), 'tensorflow.compat.v1.stack', 'tf.stack', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (59018, 59035), True, 'import tensorflow.compat.v1 as tf\n'), ((59509, 59573), 'tensorflow.contrib.rnn.DropoutWrapper', 'contrib_rnn.DropoutWrapper', (['cell'], {'output_keep_prob': '(1.0 - dropout)'}), '(cell, output_keep_prob=1.0 - dropout)\n', (59535, 59573), True, 'from tensorflow.contrib import rnn as contrib_rnn\n'), ((59856, 59875), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (59869, 59875), True, 'import tensorflow.compat.v1 as tf\n'), ((60319, 60375), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""sequence_length"""'}), "(tf.int32, [None], name='sequence_length')\n", (60333, 60375), True, 'import tensorflow.compat.v1 as tf\n'), ((63475, 63645), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.SeqAttentionCellWrapper', 'tf_utils.SeqAttentionCellWrapper', (['cell'], {'attn_inputs': 'attn_inputs', 'attn_size': 'attn_size', 'attn_vec_size': 'attn_vec_size', 'output_size': 'hidden_size', 'attn_masks': 'attn_masks'}), '(cell, attn_inputs=attn_inputs, attn_size=\n attn_size, attn_vec_size=attn_vec_size, output_size=hidden_size,\n attn_masks=attn_masks)\n', (63507, 63645), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((64811, 64881), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', ([], {'inputs': 'outputs', 'units': 'output_proj_size', 'use_bias': '(True)'}), '(inputs=outputs, units=output_proj_size, use_bias=True)\n', (64826, 64881), True, 'import tensorflow.compat.v1 as tf\n'), ((64940, 65009), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['seq_inputs.sequence_length'], {'shape': '[None]'}), '(seq_inputs.sequence_length, shape=[None])\n', (64967, 65009), True, 'import tensorflow.compat.v1 as tf\n'), ((1989, 2000), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1998, 2000), False, 'import os\n'), ((2436, 2446), 'tensorflow.compat.v1.no_op', 'tf.no_op', ([], {}), '()\n', (2444, 2446), True, 'import tensorflow.compat.v1 as tf\n'), ((2945, 2968), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2962, 2968), True, 'import tensorflow.compat.v1 as tf\n'), ((3738, 3759), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (3757, 3759), True, 'import tensorflow.compat.v1 as tf\n'), ((4056, 4080), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4078, 4080), True, 'import tensorflow.compat.v1 as tf\n'), ((4198, 4234), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', (['variables_to_restore'], {}), '(variables_to_restore)\n', (4212, 4234), True, 'import tensorflow.compat.v1 as tf\n'), ((4868, 4899), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', (['score_variables'], {}), '(score_variables)\n', (4882, 4899), True, 'import tensorflow.compat.v1 as tf\n'), ((7030, 7053), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (7047, 7053), True, 'import tensorflow.compat.v1 as tf\n'), ((7075, 7122), 'tensorflow.compat.v1.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'dtype': 'tf.int32'}), '(0, trainable=False, dtype=tf.int32)\n', (7086, 7122), True, 'import tensorflow.compat.v1 as tf\n'), ((7258, 7317), 'tensorflow.compat.v1.cast', 'tf.cast', (["self.node_dict['max_batch_size']"], {'dtype': 'tf.float32'}), "(self.node_dict['max_batch_size'], dtype=tf.float32)\n", (7265, 7317), True, 'import tensorflow.compat.v1 as tf\n'), ((9300, 9364), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'total_loss')", 'total_loss'], {}), "(self.vs_name + '/' + 'total_loss', total_loss)\n", (9317, 9364), True, 'import tensorflow.compat.v1 as tf\n'), ((9640, 9697), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()', 'name': '"""new_lr"""'}), "(dtype=tf.float32, shape=(), name='new_lr')\n", (9654, 9697), True, 'import tensorflow.compat.v1 as tf\n'), ((9862, 9921), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'learning_rate')", 'lr'], {}), "(self.vs_name + '/' + 'learning_rate', lr)\n", (9879, 9921), True, 'import tensorflow.compat.v1 as tf\n'), ((10005, 10076), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': 'self.vs_name'}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.vs_name)\n', (10022, 10076), True, 'import tensorflow.compat.v1 as tf\n'), ((10303, 10343), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""trainable parameters:"""'], {}), "('trainable parameters:')\n", (10318, 10343), True, 'import tensorflow.compat.v1 as tf\n'), ((10746, 10804), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Calculate gradients wrt model params..."""'], {}), "('Calculate gradients wrt model params...')\n", (10761, 10804), True, 'import tensorflow.compat.v1 as tf\n'), ((10977, 11038), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['total_loss', 'params'], {'stop_gradients': '[score_node]'}), '(total_loss, params, stop_gradients=[score_node])\n', (10989, 11038), True, 'import tensorflow.compat.v1 as tf\n'), ((11076, 11124), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'max_grad_norm'], {}), '(gradients, max_grad_norm)\n', (11098, 11124), True, 'import tensorflow.compat.v1 as tf\n'), ((13894, 13956), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'grad_norm')", 'grad_norm'], {}), "(self.vs_name + '/' + 'grad_norm', grad_norm)\n", (13911, 13956), True, 'import tensorflow.compat.v1 as tf\n'), ((16648, 16735), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'batch_size')", "self.node_dict['batch_size']"], {}), "(self.vs_name + '/' + 'batch_size', self.node_dict[\n 'batch_size'])\n", (16665, 16735), True, 'import tensorflow.compat.v1 as tf\n'), ((17556, 17594), 'tensorflow.compat.v1.summary.merge', 'tf.summary.merge', ([], {'inputs': 'all_summaries'}), '(inputs=all_summaries)\n', (17572, 17594), True, 'import tensorflow.compat.v1 as tf\n'), ((17620, 17650), 'tensorflow.compat.v1.no_op', 'tf.no_op', ([], {'name': '"""no_summary_op"""'}), "(name='no_summary_op')\n", (17628, 17650), True, 'import tensorflow.compat.v1 as tf\n'), ((19251, 19274), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (19268, 19274), True, 'import tensorflow.compat.v1 as tf\n'), ((20196, 20206), 'tensorflow.compat.v1.exp', 'tf.exp', (['a0'], {}), '(a0)\n', (20202, 20206), True, 'import tensorflow.compat.v1 as tf\n'), ((20218, 20260), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['ea0'], {'axis': '(-1)', 'keepdims': '(True)'}), '(ea0, axis=-1, keepdims=True)\n', (20231, 20260), True, 'import tensorflow.compat.v1 as tf\n'), ((26231, 26258), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""validation"""'], {}), "('validation')\n", (26244, 26258), True, 'import tensorflow.compat.v1 as tf\n'), ((26266, 26331), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Running graph replace for creating val loss..."""'], {}), "('Running graph replace for creating val loss...')\n", (26281, 26331), True, 'import tensorflow.compat.v1 as tf\n'), ((26416, 26477), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Running graph replace for meta val loss..."""'], {}), "('Running graph replace for meta val loss...')\n", (26431, 26477), True, 'import tensorflow.compat.v1 as tf\n'), ((27335, 27358), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (27352, 27358), True, 'import tensorflow.compat.v1 as tf\n'), ((27379, 27438), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '()', 'name': '"""batch_size"""'}), "(dtype=tf.int32, shape=(), name='batch_size')\n", (27393, 27438), True, 'import tensorflow.compat.v1 as tf\n'), ((28058, 28099), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seq_inputs.sequence_length'], {}), '(seq_inputs.sequence_length)\n', (28071, 28099), True, 'import tensorflow.compat.v1 as tf\n'), ((29688, 29711), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (29705, 29711), True, 'import tensorflow.compat.v1 as tf\n'), ((29732, 29791), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[]', 'name': '"""batch_size"""'}), "(dtype=tf.int32, shape=[], name='batch_size')\n", (29746, 29791), True, 'import tensorflow.compat.v1 as tf\n'), ((31656, 31697), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seq_inputs.sequence_length'], {}), '(seq_inputs.sequence_length)\n', (31669, 31697), True, 'import tensorflow.compat.v1 as tf\n'), ((37073, 37164), 'meta_reward_learning.semantic_parsing.nsm.score_utils.ScoreFunction', 'score_utils.ScoreFunction', (['self.score_model'], {'trainable': 'self.meta_learn'}), '(self.score_model, trainable=self.meta_learn, **\n args_to_pass)\n', (37098, 37164), False, 'from meta_reward_learning.semantic_parsing.nsm import score_utils\n'), ((37279, 37309), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen]'], {}), '([None, maxlen])\n', (37293, 37309), True, 'import tensorflow.compat.v1 as tf\n'), ((37311, 37341), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen]'], {}), '([None, maxlen])\n', (37325, 37341), True, 'import tensorflow.compat.v1 as tf\n'), ((37351, 37402), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen, max_n_valid_indices]'], {}), '([None, maxlen, max_n_valid_indices])\n', (37365, 37402), True, 'import tensorflow.compat.v1 as tf\n'), ((37778, 37812), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""ConstantInput"""'], {}), "('ConstantInput')\n", (37795, 37812), True, 'import tensorflow.compat.v1 as tf\n'), ((38230, 38275), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', 'constant_span_shape'], {}), '(tf.int32, constant_span_shape)\n', (38244, 38275), True, 'import tensorflow.compat.v1 as tf\n'), ((38358, 38393), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[None, 1]'], {}), '(tf.int32, [None, 1])\n', (38372, 38393), True, 'import tensorflow.compat.v1 as tf\n'), ((38414, 38455), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['n_constants_placeholder', '[-1]'], {}), '(n_constants_placeholder, [-1])\n', (38424, 38455), True, 'import tensorflow.compat.v1 as tf\n'), ((38683, 38722), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['constant_span_masks', '(-1)'], {}), '(constant_span_masks, -1)\n', (38697, 38722), True, 'import tensorflow.compat.v1 as tf\n'), ((38788, 38817), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['constant_spans', '(0)'], {}), '(constant_spans, 0)\n', (38798, 38817), True, 'import tensorflow.compat.v1 as tf\n'), ((38841, 38880), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['constant_spans'], {'axis': '(-1)'}), '(constant_spans, axis=-1)\n', (38855, 38880), True, 'import tensorflow.compat.v1 as tf\n'), ((39393, 39447), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, en_maxlen, n_en_input_features]'], {}), '([None, en_maxlen, n_en_input_features])\n', (39407, 39447), True, 'import tensorflow.compat.v1 as tf\n'), ((39516, 39539), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (39533, 39539), True, 'import tensorflow.compat.v1 as tf\n'), ((39560, 39619), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '()', 'name': '"""batch_size"""'}), "(dtype=tf.int32, shape=(), name='batch_size')\n", (39574, 39619), True, 'import tensorflow.compat.v1 as tf\n'), ((50680, 50721), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seq_inputs.sequence_length'], {}), '(seq_inputs.sequence_length)\n', (50693, 50721), True, 'import tensorflow.compat.v1 as tf\n'), ((54407, 54429), 'tensorflow.compat.v1.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (54427, 54429), True, 'import tensorflow.compat.v1 as tf\n'), ((54443, 54476), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (54474, 54476), True, 'import tensorflow.compat.v1 as tf\n'), ((54810, 54836), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['name', 'x'], {}), '(name, x)\n', (54827, 54836), True, 'import tensorflow.compat.v1 as tf\n'), ((56863, 56937), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', ([], {'inputs': 'inputs', 'units': 'output_vocab_size', 'use_bias': 'use_bias'}), '(inputs=inputs, units=output_vocab_size, use_bias=use_bias)\n', (56878, 56937), True, 'import tensorflow.compat.v1 as tf\n'), ((56974, 57011), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.tensormul', 'tf_utils.tensormul', (['inputs', 'softmax_w'], {}), '(inputs, softmax_w)\n', (56992, 57011), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((58496, 58680), 'tensorflow.compat.v1.nn.static_bidirectional_rnn', 'tf.nn.static_bidirectional_rnn', (['cell', 'cell_bw', 'inputs'], {'sequence_length': 'sequence_length', 'initial_state_fw': 'initial_state_fw', 'initial_state_bw': 'initial_state_bw', 'dtype': 'tf.float32'}), '(cell, cell_bw, inputs, sequence_length=\n sequence_length, initial_state_fw=initial_state_fw, initial_state_bw=\n initial_state_bw, dtype=tf.float32)\n', (58526, 58680), True, 'import tensorflow.compat.v1 as tf\n'), ((58834, 58948), 'tensorflow.compat.v1.nn.static_rnn', 'tf.nn.static_rnn', (['cell', 'inputs'], {'sequence_length': 'sequence_length', 'initial_state': 'initial_state', 'dtype': 'tf.float32'}), '(cell, inputs, sequence_length=sequence_length,\n initial_state=initial_state, dtype=tf.float32)\n', (58850, 58948), True, 'import tensorflow.compat.v1 as tf\n'), ((59461, 59497), 'tensorflow.compat.v1.nn.rnn_cell.ResidualWrapper', 'tf.nn.rnn_cell.ResidualWrapper', (['cell'], {}), '(cell)\n', (59491, 59497), True, 'import tensorflow.compat.v1 as tf\n'), ((59935, 59960), 'meta_reward_learning.semantic_parsing.nsm.data_utils.flatten', 'data_utils.flatten', (['shape'], {}), '(shape)\n', (59953, 59960), False, 'from meta_reward_learning.semantic_parsing.nsm import data_utils\n'), ((60018, 60043), 'meta_reward_learning.semantic_parsing.nsm.data_utils.flatten', 'data_utils.flatten', (['dtype'], {}), '(dtype)\n', (60036, 60043), False, 'from meta_reward_learning.semantic_parsing.nsm import data_utils\n'), ((60195, 60242), 'meta_reward_learning.semantic_parsing.nsm.data_utils.pack_sequence_as', 'data_utils.pack_sequence_as', (['shape', 'flat_inputs'], {}), '(shape, flat_inputs)\n', (60222, 60242), False, 'from meta_reward_learning.semantic_parsing.nsm import data_utils\n'), ((60268, 60296), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['dtype', 'shape'], {}), '(dtype, shape)\n', (60282, 60296), True, 'import tensorflow.compat.v1 as tf\n'), ((63724, 63897), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.SeqAttentionCellWrapper', 'tf_utils.SeqAttentionCellWrapper', (['cell_bw'], {'attn_inputs': 'attn_inputs', 'attn_size': 'attn_size', 'attn_vec_size': 'attn_vec_size', 'output_size': 'hidden_size', 'attn_masks': 'attn_masks'}), '(cell_bw, attn_inputs=attn_inputs,\n attn_size=attn_size, attn_vec_size=attn_vec_size, output_size=\n hidden_size, attn_masks=attn_masks)\n', (63756, 63897), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((4373, 4394), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (4392, 4394), True, 'import tensorflow.compat.v1 as tf\n'), ((4563, 4599), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', (['variables_to_restore'], {}), '(variables_to_restore)\n', (4577, 4599), True, 'import tensorflow.compat.v1 as tf\n'), ((7667, 7719), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'loss')", 'loss'], {}), "(self.vs_name + '/' + 'loss', loss)\n", (7684, 7719), True, 'import tensorflow.compat.v1 as tf\n'), ((8362, 8459), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['sequence_loss']]"], {'message': '"""seq_loss:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['sequence_loss']], message='seq_loss:',\n summarize=10000)\n", (8370, 8459), True, 'import tensorflow.compat.v1 as tf\n'), ((8515, 8605), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['weights']]"], {'message': '"""weights:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['weights']], message='weights:',\n summarize=10000)\n", (8523, 8605), True, 'import tensorflow.compat.v1 as tf\n'), ((8661, 8758), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['targets'].tensor]"], {'message': '"""targets:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['targets'].tensor], message='targets:',\n summarize=10000)\n", (8669, 8758), True, 'import tensorflow.compat.v1 as tf\n'), ((8814, 8907), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['probs'].tensor]"], {'message': '"""probs:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['probs'].tensor], message='probs:',\n summarize=10000)\n", (8822, 8907), True, 'import tensorflow.compat.v1 as tf\n'), ((8963, 9058), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['logits'].tensor]"], {'message': '"""logits:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['logits'].tensor], message='logits:',\n summarize=10000)\n", (8971, 9058), True, 'import tensorflow.compat.v1 as tf\n'), ((11698, 11709), 'time.time', 'time.time', ([], {}), '()\n', (11707, 11709), False, 'import time\n'), ((12277, 12322), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Creating meta optimizer..."""'], {}), "('Creating meta optimizer...')\n", (12292, 12322), True, 'import tensorflow.compat.v1 as tf\n'), ((12342, 12387), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'meta_lr'}), '(learning_rate=meta_lr)\n', (12364, 12387), True, 'import tensorflow.compat.v1 as tf\n'), ((12708, 12747), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['val_meta_loss', 'score_node'], {}), '(val_meta_loss, score_node)\n', (12720, 12747), True, 'import tensorflow.compat.v1 as tf\n'), ((12795, 12845), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['score_grads', 'max_grad_norm'], {}), '(score_grads, max_grad_norm)\n', (12817, 12845), True, 'import tensorflow.compat.v1 as tf\n'), ((12962, 13032), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['[score_node]', 'score_fn_vars'], {'grad_ys': 'clipped_score_grads'}), '([score_node], score_fn_vars, grad_ys=clipped_score_grads)\n', (12974, 13032), True, 'import tensorflow.compat.v1 as tf\n'), ((13158, 13211), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['meta_gradients', 'max_grad_norm'], {}), '(meta_gradients, max_grad_norm)\n', (13180, 13211), True, 'import tensorflow.compat.v1 as tf\n'), ((13775, 13786), 'time.time', 'time.time', ([], {}), '()\n', (13784, 13786), False, 'import time\n'), ((14151, 14229), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""val_loss"""', "self.node_dict['val_loss']"], {'family': '"""meta_train"""'}), "('val_loss', self.node_dict['val_loss'], family='meta_train')\n", (14168, 14229), True, 'import tensorflow.compat.v1 as tf\n'), ((14276, 14348), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""meta_grad_norm"""', 'meta_grad_norm'], {'family': '"""meta_train"""'}), "('meta_grad_norm', meta_grad_norm, family='meta_train')\n", (14293, 14348), True, 'import tensorflow.compat.v1 as tf\n'), ((14396, 14470), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""score_grad_norm"""', 'score_grad_norm'], {'family': '"""meta_train"""'}), "('score_grad_norm', score_grad_norm, family='meta_train')\n", (14413, 14470), True, 'import tensorflow.compat.v1 as tf\n'), ((14509, 14568), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""scores"""', 'scores'], {'family': '"""meta_train"""'}), "('scores', scores, family='meta_train')\n", (14529, 14568), True, 'import tensorflow.compat.v1 as tf\n'), ((15639, 15695), 'tensorflow.compat.v1.clip_by_global_norm', 'tf.clip_by_global_norm', (['clipped_gradients', 'max_grad_norm'], {}), '(clipped_gradients, max_grad_norm)\n', (15661, 15695), True, 'import tensorflow.compat.v1 as tf\n'), ((15786, 15864), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'clipped_grad_norm')", 'clipped_grad_norm'], {}), "(self.vs_name + '/' + 'clipped_grad_norm', clipped_grad_norm)\n", (15803, 15864), True, 'import tensorflow.compat.v1 as tf\n'), ((15898, 15962), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'n')", "self.node_dict['n']"], {}), "(self.vs_name + '/' + 'n', self.node_dict['n'])\n", (15915, 15962), True, 'import tensorflow.compat.v1 as tf\n'), ((16028, 16119), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (["(self.vs_name + '/' + 'seq_loss')", "self.node_dict['sequence_loss']"], {}), "(self.vs_name + '/' + 'seq_loss', self.node_dict[\n 'sequence_loss'])\n", (16048, 16119), True, 'import tensorflow.compat.v1 as tf\n'), ((16189, 16268), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (["(self.vs_name + '/' + 'weights')", "self.node_dict['weights']"], {}), "(self.vs_name + '/' + 'weights', self.node_dict['weights'])\n", (16209, 16268), True, 'import tensorflow.compat.v1 as tf\n'), ((20138, 20183), 'tensorflow.compat.v1.reduce_max', 'tf.reduce_max', (['logits'], {'axis': '(-1)', 'keepdims': '(True)'}), '(logits, axis=-1, keepdims=True)\n', (20151, 20183), True, 'import tensorflow.compat.v1 as tf\n'), ((20362, 20401), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['clipped_entropy'], {'axis': '(-1)'}), '(clipped_entropy, axis=-1)\n', (20375, 20401), True, 'import tensorflow.compat.v1 as tf\n'), ((20404, 20470), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['sequence_length'], {'maxlen': 'maxlen', 'dtype': 'tf.float32'}), '(sequence_length, maxlen=maxlen, dtype=tf.float32)\n', (20420, 20470), True, 'import tensorflow.compat.v1 as tf\n'), ((21130, 21165), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""cross_entropy_loss"""'], {}), "('cross_entropy_loss')\n", (21143, 21165), True, 'import tensorflow.compat.v1 as tf\n'), ((21185, 21247), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'name': '"""weights"""', 'shape': '[None]', 'dtype': 'tf.float32'}), "(name='weights', shape=[None], dtype=tf.float32)\n", (21199, 21247), True, 'import tensorflow.compat.v1 as tf\n'), ((21268, 21332), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'name': '"""baselines"""', 'shape': '[None]', 'dtype': 'tf.float32'}), "(name='baselines', shape=[None], dtype=tf.float32)\n", (21282, 21332), True, 'import tensorflow.compat.v1 as tf\n'), ((23962, 23991), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['sequence_loss'], {}), '(sequence_loss)\n', (23976, 23991), True, 'import tensorflow.compat.v1 as tf\n'), ((29828, 29856), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (29845, 29856), True, 'import tensorflow.compat.v1 as tf\n'), ((30795, 30823), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (30812, 30823), True, 'import tensorflow.compat.v1 as tf\n'), ((38983, 39047), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'constant_value_embedding_shape'}), '(tf.float32, shape=constant_value_embedding_shape)\n', (38997, 39047), True, 'import tensorflow.compat.v1 as tf\n'), ((39171, 39241), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['constant_value_embeddings', 'hidden_size'], {'use_bias': '(True)'}), '(constant_value_embeddings, hidden_size, use_bias=True)\n', (39186, 39241), True, 'import tensorflow.compat.v1 as tf\n'), ((39286, 39327), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['(1 - constant_span_masks)', '[-1]'], {}), '(1 - constant_span_masks, [-1])\n', (39296, 39327), True, 'import tensorflow.compat.v1 as tf\n'), ((39631, 39659), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""Encoder"""'], {}), "('Encoder')\n", (39648, 39659), True, 'import tensorflow.compat.v1 as tf\n'), ((42915, 42963), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(en_seq_inputs.sequence_length - 1)', '(0)'], {}), '(en_seq_inputs.sequence_length - 1, 0)\n', (42925, 42963), True, 'import tensorflow.compat.v1 as tf\n'), ((43046, 43097), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['sequence_length'], {'dtype': 'tf.float32'}), '(sequence_length, dtype=tf.float32)\n', (43062, 43097), True, 'import tensorflow.compat.v1 as tf\n'), ((44977, 45066), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['en_seq_outputs.sequence_length'], {'maxlen': 'en_maxlen', 'dtype': 'tf.float32'}), '(en_seq_outputs.sequence_length, maxlen=en_maxlen, dtype=tf\n .float32)\n', (44993, 45066), True, 'import tensorflow.compat.v1 as tf\n'), ((45152, 45188), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""ConstantEncoder"""'], {}), "('ConstantEncoder')\n", (45169, 45188), True, 'import tensorflow.compat.v1 as tf\n'), ((45210, 45230), 'tensorflow.compat.v1.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (45218, 45230), True, 'import tensorflow.compat.v1 as tf\n'), ((45409, 45445), 'tensorflow.compat.v1.tile', 'tf.tile', (['batch_ind', '[1, n_mem, 2, 1]'], {}), '(batch_ind, [1, n_mem, 2, 1])\n', (45416, 45445), True, 'import tensorflow.compat.v1 as tf\n'), ((45514, 45561), 'tensorflow.compat.v1.concat', 'tf.concat', (['[batch_ind, constant_spans]'], {'axis': '(-1)'}), '([batch_ind, constant_spans], axis=-1)\n', (45523, 45561), True, 'import tensorflow.compat.v1 as tf\n'), ((45663, 45714), 'tensorflow.compat.v1.gather_nd', 'tf.gather_nd', (['en_seq_outputs.tensor', 'constant_spans'], {}), '(en_seq_outputs.tensor, constant_spans)\n', (45675, 45714), True, 'import tensorflow.compat.v1 as tf\n'), ((45851, 45899), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['constant_span_embeddings'], {'axis': '(2)'}), '(constant_span_embeddings, axis=2)\n', (45865, 45899), True, 'import tensorflow.compat.v1 as tf\n'), ((46208, 46269), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['n_constants'], {'maxlen': 'n_mem', 'dtype': 'tf.float32'}), '(n_constants, maxlen=n_mem, dtype=tf.float32)\n', (46224, 46269), True, 'import tensorflow.compat.v1 as tf\n'), ((46347, 46381), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['constant_masks', '(-1)'], {}), '(constant_masks, -1)\n', (46361, 46381), True, 'import tensorflow.compat.v1 as tf\n'), ((46407, 46451), 'tensorflow.compat.v1.tile', 'tf.tile', (['constant_masks', '[1, 1, hidden_size]'], {}), '(constant_masks, [1, 1, hidden_size])\n', (46414, 46451), True, 'import tensorflow.compat.v1 as tf\n'), ((46930, 46975), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['builtin_de_embeddings'], {'axis': '(0)'}), '(builtin_de_embeddings, axis=0)\n', (46944, 46975), True, 'import tensorflow.compat.v1 as tf\n'), ((47068, 47122), 'tensorflow.compat.v1.tile', 'tf.tile', (['builtin_de_embeddings', '([batch_size] + [1] * 2)'], {}), '(builtin_de_embeddings, [batch_size] + [1] * 2)\n', (47075, 47122), True, 'import tensorflow.compat.v1 as tf\n'), ((47250, 47313), 'tensorflow.compat.v1.concat', 'tf.concat', (['[builtin_de_embeddings, constant_embeddings]'], {'axis': '(1)'}), '([builtin_de_embeddings, constant_embeddings], axis=1)\n', (47259, 47313), True, 'import tensorflow.compat.v1 as tf\n'), ((47598, 47626), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (47615, 47626), True, 'import tensorflow.compat.v1 as tf\n'), ((47652, 47708), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.MemoryStateTuple', 'tf_utils.MemoryStateTuple', (['initial_memory', 'initial_state'], {}), '(initial_memory, initial_state)\n', (47677, 47708), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((47898, 47962), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': 'None', 'name': '"""rnn_dropout"""'}), "(0.0, shape=None, name='rnn_dropout')\n", (47925, 47962), True, 'import tensorflow.compat.v1 as tf\n'), ((48741, 48877), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.MemoryWrapper', 'tf_utils.MemoryWrapper', (['cell', 'mem_size', 'embed_size', 'max_n_valid_indices'], {'use_score_wrapper': 'use_attn_scores', 'activation': 'score_norm_fn'}), '(cell, mem_size, embed_size, max_n_valid_indices,\n use_score_wrapper=use_attn_scores, activation=score_norm_fn)\n', (48763, 48877), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((48970, 48996), 'meta_reward_learning.semantic_parsing.nsm.data_utils.flatten', 'data_utils.flatten', (['inputs'], {}), '(inputs)\n', (48988, 48996), False, 'from meta_reward_learning.semantic_parsing.nsm import data_utils\n'), ((49410, 49524), 'tensorflow.compat.v1.nn.static_rnn', 'tf.nn.static_rnn', (['cell', 'inputs'], {'sequence_length': 'sequence_length', 'initial_state': 'initial_state', 'dtype': 'tf.float32'}), '(cell, inputs, sequence_length=sequence_length,\n initial_state=initial_state, dtype=tf.float32)\n', (49426, 49524), True, 'import tensorflow.compat.v1 as tf\n'), ((49887, 49912), 'tensorflow.compat.v1.stack', 'tf.stack', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (49895, 49912), True, 'import tensorflow.compat.v1 as tf\n'), ((52577, 52677), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'output_feature_weights_0')", 'output_feature_weights[0][0]'], {}), "(self.vs_name + '/' + 'output_feature_weights_0',\n output_feature_weights[0][0])\n", (52594, 52677), True, 'import tensorflow.compat.v1 as tf\n'), ((53783, 53813), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""val_constants"""'], {}), "('val_constants')\n", (53796, 53813), True, 'import tensorflow.compat.v1 as tf\n'), ((55537, 55576), 'tensorflow.compat.v1.squared_difference', 'tf.squared_difference', (['outputs', 'targets'], {}), '(outputs, targets)\n', (55558, 55576), True, 'import tensorflow.compat.v1 as tf\n'), ((55802, 55819), 'tensorflow.compat.v1.shape', 'tf.shape', (['targets'], {}), '(targets)\n', (55810, 55819), True, 'import tensorflow.compat.v1 as tf\n'), ((60076, 60113), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['dt', 'sh'], {'name': '"""inputs"""'}), "(dt, sh, name='inputs')\n", (60090, 60113), True, 'import tensorflow.compat.v1 as tf\n'), ((62765, 62811), 'tensorflow.compat.v1.cast', 'tf.cast', (['seq_input_features.tensor', 'tf.float32'], {}), '(seq_input_features.tensor, tf.float32)\n', (62772, 62811), True, 'import tensorflow.compat.v1 as tf\n'), ((2140, 2166), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['self.vs'], {}), '(self.vs)\n', (2157, 2166), True, 'import tensorflow.compat.v1 as tf\n'), ((4783, 4804), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (4802, 4804), True, 'import tensorflow.compat.v1 as tf\n'), ((7572, 7638), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + 'loss_orig')", 'loss_original'], {}), "(self.vs_name + '/' + 'loss_orig', loss_original)\n", (7589, 7638), True, 'import tensorflow.compat.v1 as tf\n'), ((9144, 9232), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', "[self.node_dict['scores']]"], {'message': '"""scores:"""', 'summarize': '(10000)'}), "(total_loss, [self.node_dict['scores']], message='scores:',\n summarize=10000)\n", (9152, 9232), True, 'import tensorflow.compat.v1 as tf\n'), ((9593, 9623), 'tensorflow.compat.v1.keras.constraints.non_neg', 'tf.keras.constraints.non_neg', ([], {}), '()\n', (9621, 9623), True, 'import tensorflow.compat.v1 as tf\n'), ((11333, 11376), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Using momentum optimizer"""'], {}), "('Using momentum optimizer')\n", (11348, 11376), True, 'import tensorflow.compat.v1 as tf\n'), ((13280, 13321), 'tensorflow.compat.v1.gradients', 'tf.gradients', (['val_meta_loss', 'meta_hparams'], {}), '(val_meta_loss, meta_hparams)\n', (13292, 13321), True, 'import tensorflow.compat.v1 as tf\n'), ((16500, 16576), 'tensorflow.compat.v1.Print', 'tf.Print', (['total_loss', '[score_grads]'], {'message': '"""score_grads:"""', 'summarize': '(10000)'}), "(total_loss, [score_grads], message='score_grads:', summarize=10000)\n", (16508, 16576), True, 'import tensorflow.compat.v1 as tf\n'), ((19331, 19361), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen]'], {}), '([None, maxlen])\n', (19345, 19361), True, 'import tensorflow.compat.v1 as tf\n'), ((20312, 20322), 'tensorflow.compat.v1.log', 'tf.log', (['z0'], {}), '(z0)\n', (20318, 20322), True, 'import tensorflow.compat.v1 as tf\n'), ((20536, 20575), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['clipped_entropy'], {'axis': '(-1)'}), '(clipped_entropy, axis=-1)\n', (20549, 20575), True, 'import tensorflow.compat.v1 as tf\n'), ((20578, 20644), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['sequence_length'], {'maxlen': 'maxlen', 'dtype': 'tf.float32'}), '(sequence_length, maxlen=maxlen, dtype=tf.float32)\n', (20594, 20644), True, 'import tensorflow.compat.v1 as tf\n'), ((21458, 21508), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""score_fn"""'], {'reuse': 'tf.AUTO_REUSE'}), "('score_fn', reuse=tf.AUTO_REUSE)\n", (21475, 21508), True, 'import tensorflow.compat.v1 as tf\n'), ((24344, 24411), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(weights * sequence_neg_logprobs)'], {'name': '"""loss_nometa"""'}), "(weights * sequence_neg_logprobs, name='loss_nometa')\n", (24358, 24411), True, 'import tensorflow.compat.v1 as tf\n'), ((24451, 24517), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'name': '"""val_weights"""', 'shape': '[None]', 'dtype': 'tf.float32'}), "(name='val_weights', shape=[None], dtype=tf.float32)\n", (24465, 24517), True, 'import tensorflow.compat.v1 as tf\n'), ((38600, 38637), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['constant_spans'], {'axis': '(2)'}), '(constant_spans, axis=2)\n', (38613, 38637), True, 'import tensorflow.compat.v1 as tf\n'), ((41231, 41279), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Using attention in encoder!!!"""'], {}), "('Using attention in encoder!!!')\n", (41246, 41279), True, 'import tensorflow.compat.v1 as tf\n'), ((42987, 43017), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['sequence_length'], {}), '(sequence_length)\n', (43000, 43017), True, 'import tensorflow.compat.v1 as tf\n'), ((45315, 45349), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['batch_ind'], {'axis': '(-1)'}), '(batch_ind, axis=-1)\n', (45329, 45349), True, 'import tensorflow.compat.v1 as tf\n'), ((48289, 48459), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.SeqAttentionCellWrapper', 'tf_utils.SeqAttentionCellWrapper', (['cell'], {'attn_inputs': 'attn_inputs', 'attn_size': 'attn_size', 'attn_vec_size': 'attn_vec_size', 'output_size': 'hidden_size', 'attn_masks': 'attn_masks'}), '(cell, attn_inputs=attn_inputs, attn_size=\n attn_size, attn_vec_size=attn_vec_size, output_size=hidden_size,\n attn_masks=attn_masks)\n', (48321, 48459), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((49146, 49167), 'tensorflow.compat.v1.unstack', 'tf.unstack', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (49156, 49167), True, 'import tensorflow.compat.v1 as tf\n'), ((49221, 49297), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.MemoryInputTuple', 'tf_utils.MemoryInputTuple', ([], {'read_ind': 'x[0]', 'write_ind': 'x[1]', 'valid_indices': 'x[2]'}), '(read_ind=x[0], write_ind=x[1], valid_indices=x[2])\n', (49246, 49297), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((52308, 52351), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['en_seq_input_features.tensor'], {}), '(en_seq_input_features.tensor)\n', (52321, 52351), True, 'import tensorflow.compat.v1 as tf\n'), ((52845, 52889), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['de_seq_output_features.tensor'], {}), '(de_seq_output_features.tensor)\n', (52858, 52889), True, 'import tensorflow.compat.v1 as tf\n'), ((54689, 54720), 'tensorflow.compat.v1.zeros', 'tf.zeros', ([], {'shape': '()', 'dtype': 'dtype'}), '(shape=(), dtype=dtype)\n', (54697, 54720), True, 'import tensorflow.compat.v1 as tf\n'), ((8164, 8223), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["(self.vs_name + '/' + loss_name)", 'aux_loss'], {}), "(self.vs_name + '/' + loss_name, aux_loss)\n", (8181, 8223), True, 'import tensorflow.compat.v1 as tf\n'), ((16998, 17038), 'tensorflow.compat.v1.cast', 'tf.cast', (["self.node_dict['n']", 'tf.float32'], {}), "(self.node_dict['n'], tf.float32)\n", (17005, 17038), True, 'import tensorflow.compat.v1 as tf\n'), ((19482, 19512), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen]'], {}), '([None, maxlen])\n', (19496, 19512), True, 'import tensorflow.compat.v1 as tf\n'), ((23739, 23771), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['sequence_logprobs'], {}), '(sequence_logprobs)\n', (23752, 23771), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((27606, 27659), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (27637, 27659), True, 'import tensorflow.compat.v1 as tf\n'), ((40603, 40691), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[en_pretrained_vocab_size, en_pretrained_embedding_size]'], {}), '(tf.float32, [en_pretrained_vocab_size,\n en_pretrained_embedding_size])\n', (40617, 40691), True, 'import tensorflow.compat.v1 as tf\n'), ((40891, 40983), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', ([], {'inputs': 'en_pretrained_embeddings', 'units': 'en_embedding_size', 'use_bias': '(True)'}), '(inputs=en_pretrained_embeddings, units=en_embedding_size,\n use_bias=True)\n', (40906, 40983), True, 'import tensorflow.compat.v1 as tf\n'), ((41057, 41124), 'tensorflow.compat.v1.concat', 'tf.concat', ([], {'values': '[en_embeddings, en_pretrained_embeddings]', 'axis': '(0)'}), '(values=[en_embeddings, en_pretrained_embeddings], axis=0)\n', (41066, 41124), True, 'import tensorflow.compat.v1 as tf\n'), ((42704, 42736), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (42718, 42736), True, 'import tensorflow.compat.v1 as tf\n'), ((43630, 43707), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['en_fw_outputs[:, :-1, :]', 'en_input_vocab_size'], {'use_bias': '(True)'}), '(en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)\n', (43645, 43707), True, 'import tensorflow.compat.v1 as tf\n'), ((43752, 43828), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['en_bw_outputs[:, 1:, :]', 'en_input_vocab_size'], {'use_bias': '(True)'}), '(en_bw_outputs[:, 1:, :], en_input_vocab_size, use_bias=True)\n', (43767, 43828), True, 'import tensorflow.compat.v1 as tf\n'), ((43910, 44018), 'tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'en_seq_inputs.tensor[:, 1:]', 'logits': 'en_fw_logits'}), '(labels=en_seq_inputs.tensor[\n :, 1:], logits=en_fw_logits)\n', (43956, 44018), True, 'import tensorflow.compat.v1 as tf\n'), ((44062, 44171), 'tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'en_seq_inputs.tensor[:, :-1]', 'logits': 'en_bw_logits'}), '(labels=en_seq_inputs.tensor[\n :, :-1], logits=en_bw_logits)\n', (44108, 44171), True, 'import tensorflow.compat.v1 as tf\n'), ((44213, 44257), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(en_fw_lm_loss + en_bw_lm_loss)'], {}), '(en_fw_lm_loss + en_bw_lm_loss)\n', (44226, 44257), True, 'import tensorflow.compat.v1 as tf\n'), ((44542, 44619), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['en_fw_outputs[:, :-1, :]', 'en_input_vocab_size'], {'use_bias': '(True)'}), '(en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)\n', (44557, 44619), True, 'import tensorflow.compat.v1 as tf\n'), ((44663, 44771), 'tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'en_seq_inputs.tensor[:, 1:]', 'logits': 'en_fw_logits'}), '(labels=en_seq_inputs.tensor[\n :, 1:], logits=en_fw_logits)\n', (44709, 44771), True, 'import tensorflow.compat.v1 as tf\n'), ((44854, 44884), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['en_lm_step_loss'], {}), '(en_lm_step_loss)\n', (44867, 44884), True, 'import tensorflow.compat.v1 as tf\n'), ((46783, 46836), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (46814, 46836), True, 'import tensorflow.compat.v1 as tf\n'), ((49020, 49043), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['in_', '(-1)'], {}), '(in_, -1)\n', (49034, 49043), True, 'import tensorflow.compat.v1 as tf\n'), ((57090, 57140), 'numpy.zeros', 'np.zeros', (['(1, output_vocab_size)'], {'dtype': 'tf.float32'}), '((1, output_vocab_size), dtype=tf.float32)\n', (57098, 57140), True, 'import numpy as np\n'), ((10616, 10633), 'tensorflow.compat.v1.nn.l2_loss', 'tf.nn.l2_loss', (['tv'], {}), '(tv)\n', (10629, 10633), True, 'import tensorflow.compat.v1 as tf\n'), ((17241, 17281), 'tensorflow.compat.v1.cast', 'tf.cast', (["self.node_dict['n']", 'tf.float32'], {}), "(self.node_dict['n'], tf.float32)\n", (17248, 17281), True, 'import tensorflow.compat.v1 as tf\n'), ((21987, 22012), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['log_scores'], {}), '(log_scores)\n', (22000, 22012), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((30047, 30100), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (30078, 30100), True, 'import tensorflow.compat.v1 as tf\n'), ((31002, 31055), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (31033, 31055), True, 'import tensorflow.compat.v1 as tf\n'), ((43366, 43393), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['en_embeddings'], {}), '(en_embeddings)\n', (43378, 43393), True, 'import tensorflow.compat.v1 as tf\n'), ((43512, 43539), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['en_embeddings'], {}), '(en_embeddings)\n', (43524, 43539), True, 'import tensorflow.compat.v1 as tf\n'), ((44470, 44497), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['en_embeddings'], {}), '(en_embeddings)\n', (44482, 44497), True, 'import tensorflow.compat.v1 as tf\n'), ((50025, 50098), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, maxlen, max_n_valid_indices, n_de_output_features]'], {}), '([None, maxlen, max_n_valid_indices, n_de_output_features])\n', (50039, 50098), True, 'import tensorflow.compat.v1 as tf\n'), ((50305, 50346), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[n_de_output_features, 1]'], {}), '([n_de_output_features, 1])\n', (50319, 50346), True, 'import tensorflow.compat.v1 as tf\n'), ((50374, 50396), 'tensorflow.compat.v1.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (50394, 50396), True, 'import tensorflow.compat.v1 as tf\n'), ((22231, 22276), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['(log_scores + sequence_logprobs)'], {}), '(log_scores + sequence_logprobs)\n', (22244, 22276), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((39905, 39958), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (39936, 39958), True, 'import tensorflow.compat.v1 as tf\n'), ((40214, 40267), 'tensorflow.compat.v1.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (40245, 40267), True, 'import tensorflow.compat.v1 as tf\n'), ((40528, 40550), 'tensorflow.compat.v1.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (40548, 40550), True, 'import tensorflow.compat.v1 as tf\n'), ((50492, 50542), 'tensorflow.compat.v1.cast', 'tf.cast', (['de_seq_output_features.tensor', 'tf.float32'], {}), '(de_seq_output_features.tensor, tf.float32)\n', (50499, 50542), True, 'import tensorflow.compat.v1 as tf\n'), ((15329, 15384), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""alpha"""', 'v'], {'family': '"""linear_score_fn"""'}), "('alpha', v, family='linear_score_fn')\n", (15346, 15384), True, 'import tensorflow.compat.v1 as tf\n'), ((22922, 23024), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', ([], {'lengths': '(batch_size // 2)', 'maxlen': 'batch_size', 'dtype': 'tf.float32', 'name': '"""batch_mask"""'}), "(lengths=batch_size // 2, maxlen=batch_size, dtype=tf.\n float32, name='batch_mask')\n", (22938, 23024), True, 'import tensorflow.compat.v1 as tf\n'), ((15483, 15540), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""bias"""', 'v[0]'], {'family': '"""linear_score_fn"""'}), "('bias', v[0], family='linear_score_fn')\n", (15500, 15540), True, 'import tensorflow.compat.v1 as tf\n'), ((22412, 22437), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['log_scores'], {}), '(log_scores)\n', (22425, 22437), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((22749, 22774), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['log_scores'], {}), '(log_scores)\n', (22762, 22774), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n'), ((22851, 22883), 'meta_reward_learning.semantic_parsing.nsm.tf_utils.dice', 'tf_utils.dice', (['sequence_logprobs'], {}), '(sequence_logprobs)\n', (22864, 22883), False, 'from meta_reward_learning.semantic_parsing.nsm import tf_utils\n')] |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset."""
import socket
import threading
import time
from absl.testing import parameterized
import numpy as np
from reverb import client
from reverb import dataset as reverb_dataset
from reverb import errors
from reverb import item_selectors
from reverb import rate_limiters
from reverb import replay_sample
from reverb import server as reverb_server
import tensorflow.compat.v1 as tf
import tree
from tensorflow.python.framework import tensor_spec # pylint:disable=g-direct-tensorflow-import
def make_server():
return reverb_server.Server(
tables=[
reverb_server.Table(
'dist',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1)),
reverb_server.Table(
'dist_queue',
sampler=item_selectors.Fifo(),
remover=item_selectors.Fifo(),
max_size=1000000,
max_times_sampled=1,
rate_limiter=rate_limiters.MinSize(1)),
reverb_server.Table(
'signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))),
reverb_server.Table(
'bounded_spec_signatured',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1),
# Currently only the `shape` and `dtype` of the bounded spec
# is considered during signature check.
# TODO(b/158033101): Check the boundaries as well.
signature=tensor_spec.BoundedTensorSpec(
dtype=tf.float32,
shape=(None, None),
minimum=(0.0, 0.0),
maximum=(10.0, 10.)),
),
],
port=None,
)
class LocalReplayDatasetTest(tf.test.TestCase, parameterized.TestCase):
USE_LOCALHOST = True
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._server = make_server()
if cls.USE_LOCALHOST:
connect_to = 'localhost'
else:
connect_to = 'dns:///{}'.format(socket.gethostname())
cls._client = client.Client(f'{connect_to}:{cls._server.port}')
def setUp(self):
super().setUp()
self._num_prev_samples = {
table: self._get_total_num_samples(table)
for table in ('dist', 'dist_queue', 'signatured',
'bounded_spec_signatured')
}
def tearDown(self):
super().tearDown()
self._client.reset('dist')
self._client.reset('dist_queue')
self._client.reset('signatured')
self._client.reset('bounded_spec_signatured')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def _populate_replay(self,
sequence_length=100,
max_time_steps=None,
max_items=1000):
max_time_steps = max_time_steps or sequence_length
num_items = 0
with self._client.writer(max_time_steps) as writer:
for i in range(1000):
writer.append([np.zeros((3, 3), dtype=np.float32)])
if i % min(5, sequence_length) == 0 and i >= sequence_length:
writer.create_item(
table='dist', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='dist_queue', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='signatured', num_timesteps=sequence_length, priority=1)
writer.create_item(
table='bounded_spec_signatured',
num_timesteps=sequence_length,
priority=1)
num_items += 1
if num_items >= max_items:
break
def _sample_from(self, dataset, num_samples):
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.evaluate(iterator.initializer)
return [self.evaluate(dataset_item) for _ in range(num_samples)]
def _get_total_num_samples(self, table: str) -> int:
table_info = self._client.server_info()[table]
return table_info.rate_limiter_info.sample_stats.completed
def _get_num_samples(self, table: str) -> int:
"""Gets the number of samples since the start of the test."""
return self._get_total_num_samples(table) - self._num_prev_samples[table]
@parameterized.named_parameters(
{
'testcase_name': 'default_values',
},
{
'testcase_name': 'num_workers_per_iterator_is_0',
'num_workers_per_iterator': 0,
'want_error': ValueError,
},
{
'testcase_name': 'num_workers_per_iterator_is_1',
'num_workers_per_iterator': 1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_1',
'num_workers_per_iterator': -1,
},
{
'testcase_name': 'num_workers_per_iterator_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_0',
'max_samples_per_stream': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_samples_per_stream_is_1',
'max_samples_per_stream': 1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_1',
'max_samples_per_stream': -1,
},
{
'testcase_name': 'max_samples_per_stream_is_minus_2',
'num_workers_per_iterator': -2,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_0',
'max_in_flight_samples_per_worker': 0,
'want_error': ValueError,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_1',
'max_in_flight_samples_per_worker': 1,
},
{
'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1',
'max_in_flight_samples_per_worker': -1,
'want_error': ValueError,
},
)
def test_sampler_parameter_validation(self, **kwargs):
dtypes = (tf.float32,)
shapes = (tf.TensorShape([3, 3]),)
if 'max_in_flight_samples_per_worker' not in kwargs:
kwargs['max_in_flight_samples_per_worker'] = 100
if 'want_error' in kwargs:
error = kwargs.pop('want_error')
with self.assertRaises(error):
reverb_dataset.ReplayDataset(self._client.server_address, 'dist',
dtypes, shapes, **kwargs)
else:
reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes,
shapes, **kwargs)
def test_iterate(self):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
tf.constant(self._client.server_address),
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# A single sample is returned so the key should be a scalar int64.
self.assertIsInstance(sample.info.key, np.uint64)
np.testing.assert_array_equal(sample.data[0],
np.zeros((3, 3), dtype=np.float32))
def test_distribution_strategy(self):
self._populate_replay()
physical_devices = tf.config.list_physical_devices('CPU')
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
if configs is None:
virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration()
for _ in range(4)]
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0], virtual_devices)
strategy = tf.distribute.MirroredStrategy(['/cpu:%d' % i for i in range(4)])
def reverb_dataset_fn(i):
tf.print('Creating dataset for replica; index:', i)
return reverb_dataset.ReplayDataset(
self._client.server_address,
table=tf.constant('dist'),
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100).take(2)
def dataset_fn(_):
return tf.data.Dataset.range(4).flat_map(reverb_dataset_fn).take(2 * 4)
ds = strategy.experimental_distribute_datasets_from_function(dataset_fn)
def check_probabilities(_, v):
probability = v.info.probability
self.assertLen(probability.values, 4)
# Don't use any math ops since tensor values seem to contain
# unaligned tensors on some systems; but tf.print doesn't check alignment.
#
# This seems to be caused by a compatibility issue where DistStrat isn't
# well tested when eager mode is disabled. So instead of treating this
# as a true TF bug, we just work around it. We can remove this hack and
# convert it to e.g. tf.assert_greater type check if/when we enable eager
# execution for these tests.
tf.print('Probability values:', probability.values)
def get_next_value(v):
return tf.distribute.get_replica_context().merge_call(
check_probabilities, args=(v,))
@tf.function
def run_strategy(ds_):
i = tf.constant(0)
for v in ds_:
strategy.run(get_next_value, args=(v,))
i += 1
return i
rs = run_strategy(ds)
# Each iteration contains 4 items - one from each replica. We take 8 items
# total, so there should be 2 iterations.
self.assertEqual(2, self.evaluate(rs))
def test_timeout_invalid_arguments(self):
with self.assertRaisesRegex(ValueError, r'must be an integer >= -1'):
reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=-2,
max_in_flight_samples_per_worker=100)
def test_timeout(self):
dataset_0s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist_queue',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=50, # Slightly above exactly 0.
max_in_flight_samples_per_worker=100)
dataset_1s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist_queue',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=1000,
max_in_flight_samples_per_worker=100)
dataset_2s = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist_queue',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
rate_limiter_timeout_ms=2000,
max_in_flight_samples_per_worker=100)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_0s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 0)
self.assertLess(duration, 5)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_1s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 1)
self.assertLess(duration, 10)
start_time = time.time()
with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError,
r'End of sequence'):
self._sample_from(dataset_2s, 1)
duration = time.time() - start_time
self.assertGreaterEqual(duration, 2)
self.assertLess(duration, 10)
# If we insert some data, and the rate limiter doesn't force any waiting,
# then we can ask for a timeout of 0s and still get data back.
iterator = dataset_0s.make_initializable_iterator()
dataset_0s_item = iterator.get_next()
self.evaluate(iterator.initializer)
for _ in range(3):
self._populate_replay(max_items=2)
# Pull two items
for _ in range(2):
self.evaluate(dataset_0s_item)
# Wait for the time it would take a broken sampler to time out
# on next iteration.
time.sleep(0.5)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_inconsistent_signature_size(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32, tf.float64),
shapes=(tf.TensorShape([3, 3]), tf.TensorShape([])),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Inconsistent number of tensors requested from table \'{}\'. '
r'Requested 6 tensors, but table signature shows 5 tensors.'.format(
table_name)):
self._sample_from(dataset, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_dtype(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.int64,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
dataset_emit_sequences = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.int64,),
shapes=(tf.TensorShape([None, 3, 3]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset_emit_sequences, 10)
@parameterized.parameters(['signatured'], ['bounded_spec_signatured'])
def test_incompatible_signature_shape(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset, 10)
dataset_emit_sequences = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None, 3]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
r'Requested incompatible tensor at flattened index 4 from table '
r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). '
r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)):
self._sample_from(dataset_emit_sequences, 10)
@parameterized.parameters([1], [3], [10])
def test_incompatible_shape_when_using_sequence_length(self, sequence_length):
with self.assertRaises(ValueError):
reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([sequence_length + 1, 3, 3]),),
emit_timesteps=False,
sequence_length=sequence_length,
max_in_flight_samples_per_worker=100)
def test_incompatible_dataset_shapes_and_types_without_signature(self):
self._populate_replay()
ds_wrong_shape = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([]),),
max_in_flight_samples_per_worker=100)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Specification has \(dtype, shape\): \(float, \[\]\). '
r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'):
self._sample_from(ds_wrong_shape, 1)
ds_full_sequences_wrong_shape = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None]),),
emit_timesteps=False,
max_in_flight_samples_per_worker=100)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Specification has \(dtype, shape\): \(float, \[\]\). '
r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'):
self._sample_from(ds_full_sequences_wrong_shape, 1)
@parameterized.parameters(
('dist', 1, 1),
('dist', 1, 3),
('dist', 3, 3),
('dist', 3, 5),
('dist', 10, 10),
('dist', 10, 11),
('signatured', 1, 1),
('signatured', 3, 3),
('signatured', 3, 5),
('signatured', 10, 10),
('bounded_spec_signatured', 1, 1),
('bounded_spec_signatured', 3, 3),
('bounded_spec_signatured', 3, 5),
('bounded_spec_signatured', 10, 10),
)
def test_iterate_with_sequence_length(self, table_name, sequence_length,
max_time_steps):
# Also ensure we get sequence_length-shaped outputs when
# writers' max_time_steps != sequence_length.
self._populate_replay(sequence_length, max_time_steps=max_time_steps)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([sequence_length, 3, 3]),),
emit_timesteps=False,
sequence_length=sequence_length,
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys and data should be batched up by the sequence length.
self.assertEqual(sample.info.key.shape, (sequence_length,))
np.testing.assert_array_equal(
sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32))
@parameterized.parameters(
('dist', 1),
('dist', 3),
('dist', 10),
('signatured', 1),
('signatured', 3),
('signatured', 10),
('bounded_spec_signatured', 1),
('bounded_spec_signatured', 3),
('bounded_spec_signatured', 10),
)
def test_iterate_with_unknown_sequence_length(self, table_name,
sequence_length):
self._populate_replay(sequence_length)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([None, 3, 3]),),
emit_timesteps=False,
sequence_length=None,
max_in_flight_samples_per_worker=100)
# Check the shape of the items.
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.assertIsNone(dataset_item.info.key.shape.as_list()[0], None)
self.assertIsNone(dataset_item.data[0].shape.as_list()[0], None)
# Verify that once evaluated, the samples has the expected length.
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys and data should be batched up by the sequence length.
self.assertEqual(sample.info.key.shape, (sequence_length,))
np.testing.assert_array_equal(
sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32))
@parameterized.parameters(
('dist', 1, 2),
('dist', 2, 1),
('signatured', 1, 2),
('signatured', 2, 1),
('bounded_spec_signatured', 1, 2),
('bounded_spec_signatured', 2, 1),
)
def test_checks_sequence_length_when_timesteps_emitted(
self, table_name, actual_sequence_length, provided_sequence_length):
self._populate_replay(actual_sequence_length)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([provided_sequence_length, 3, 3]),),
emit_timesteps=True,
sequence_length=provided_sequence_length,
max_in_flight_samples_per_worker=100)
with self.assertRaises(tf.errors.InvalidArgumentError):
self._sample_from(dataset, 10)
@parameterized.named_parameters(
dict(testcase_name='TableDist', table_name='dist'),
dict(testcase_name='TableSignatured', table_name='signatured'),
dict(
testcase_name='TableBoundedSpecSignatured',
table_name='bounded_spec_signatured'))
def test_iterate_batched(self, table_name):
self._populate_replay()
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table=table_name,
dtypes=(tf.float32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(2, True)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
# The keys should be batched up like the data.
self.assertEqual(sample.info.key.shape, (2,))
np.testing.assert_array_equal(sample.data[0],
np.zeros((2, 3, 3), dtype=np.float32))
def test_iterate_nested_and_batched(self):
with self._client.writer(100) as writer:
for i in range(1000):
writer.append({
'observation': {
'data': np.zeros((3, 3), dtype=np.float32),
'extras': [
np.int64(10),
np.ones([1], dtype=np.int32),
],
},
'reward': np.zeros((10, 10), dtype=np.float32),
})
if i % 5 == 0 and i >= 100:
writer.create_item(
table='dist', num_timesteps=100, priority=1)
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32),
shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None),
tf.TensorShape([1]))),
tf.TensorShape([10, 10])),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(3)
structure = {
'observation': {
'data':
tf.TensorSpec([3, 3], tf.float32),
'extras': [
tf.TensorSpec([], tf.int64),
tf.TensorSpec([1], tf.int32),
],
},
'reward': tf.TensorSpec([], tf.int64),
}
got = self._sample_from(dataset, 10)
self.assertLen(got, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
transition = tree.unflatten_as(structure, tree.flatten(sample.data))
np.testing.assert_array_equal(transition['observation']['data'],
np.zeros([3, 3, 3], dtype=np.float32))
np.testing.assert_array_equal(transition['observation']['extras'][0],
np.ones([3], dtype=np.int64) * 10)
np.testing.assert_array_equal(transition['observation']['extras'][1],
np.ones([3, 1], dtype=np.int32))
np.testing.assert_array_equal(transition['reward'],
np.zeros([3, 10, 10], dtype=np.float32))
def test_multiple_iterators(self):
with self._client.writer(100) as writer:
for i in range(10):
writer.append([np.ones((81, 81), dtype=np.float32) * i])
writer.create_item(table='dist', num_timesteps=10, priority=1)
trajectory_length = 5
batch_size = 3
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.float32,),
shapes=(tf.TensorShape([81, 81]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(trajectory_length)
iterators = [
dataset.make_initializable_iterator() for _ in range(batch_size)
]
items = tf.stack(
[tf.squeeze(iterator.get_next().data) for iterator in iterators])
with self.session() as session:
session.run([iterator.initializer for iterator in iterators])
got = session.run(items)
self.assertEqual(got.shape, (batch_size, trajectory_length, 81, 81))
want = np.array(
[[np.ones([81, 81]) * i for i in range(trajectory_length)]] *
batch_size)
np.testing.assert_array_equal(got, want)
def test_iterate_over_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
self.assertIsInstance(sample.info.probability, np.float64)
np.testing.assert_array_equal(sample.data[0],
np.ones((3, 3), dtype=np.int32))
@parameterized.parameters(1, 3, 7)
def test_respects_max_in_flight_samples_per_worker(
self, max_in_flight_samples_per_worker):
if not self.USE_LOCALHOST:
self.skipTest('TODO(b/190761815): test broken in Nonlocal case')
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=max_in_flight_samples_per_worker)
iterator = dataset.make_initializable_iterator()
dataset_item = iterator.get_next()
self.evaluate(iterator.initializer)
for _ in range(100):
self.evaluate(dataset_item)
# Check that the buffer is incremented by steps of
# max_in_flight_samples_per_worker.
self.assertEqual(
self._get_num_samples('dist') % max_in_flight_samples_per_worker, 0)
def test_iterate_over_batched_blobs(self):
for _ in range(10):
self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=(tf.int32,),
shapes=(tf.TensorShape([3, 3]),),
max_in_flight_samples_per_worker=100)
dataset = dataset.batch(5)
got = self._sample_from(dataset, 20)
self.assertLen(got, 20)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertEqual(sample.info.key.shape, (5,))
np.testing.assert_array_equal(sample.data[0],
np.ones((5, 3, 3), dtype=np.int32))
def test_converts_spec_lists_into_tuples(self):
for _ in range(10):
data = [
(np.ones([1, 1], dtype=np.int32),),
[
np.ones([3, 3], dtype=np.int8),
(np.ones([2, 2], dtype=np.float64),)
],
]
self._client.insert(data, {'dist': 1})
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=[
(tf.int32,),
[
tf.int8,
(tf.float64,),
],
],
shapes=[
(tf.TensorShape([1, 1]),),
[
tf.TensorShape([3, 3]),
(tf.TensorShape([2, 2]),),
],
],
max_in_flight_samples_per_worker=100)
got = self._sample_from(dataset, 10)
for sample in got:
self.assertIsInstance(sample, replay_sample.ReplaySample)
self.assertIsInstance(sample.info.key, np.uint64)
tree.assert_same_structure(sample.data, (
(None,),
(
None,
(None,),
),
))
def test_session_is_closed_while_op_pending(self):
dataset = reverb_dataset.ReplayDataset(
self._client.server_address,
table='dist',
dtypes=tf.float32,
shapes=tf.TensorShape([]),
max_in_flight_samples_per_worker=100)
iterator = dataset.make_initializable_iterator()
item = iterator.get_next()
def _session_closer(sess, wait_time_secs):
def _fn():
time.sleep(wait_time_secs)
sess.close()
return _fn
with self.session() as sess:
sess.run(iterator.initializer)
thread = threading.Thread(target=_session_closer(sess, 3))
thread.start()
with self.assertRaises(tf.errors.CancelledError):
sess.run(item)
class NonlocalReplayDatasetTest(LocalReplayDatasetTest):
"""Test with non-localhost connection to server."""
USE_LOCALHOST = False
class FromTableSignatureTest(tf.test.TestCase):
def test_table_not_found(self):
server = reverb_server.Server([
reverb_server.Table.queue('table_a', 10),
reverb_server.Table.queue('table_c', 10),
reverb_server.Table.queue('table_b', 10),
])
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError,
f'Server at {address} does not contain any table named not_found. '
f'Found: table_a, table_b, table_c.'):
reverb_dataset.ReplayDataset.from_table_signature(
address, 'not_found', 100)
def test_server_not_found(self):
with self.assertRaises(errors.DeadlineExceededError):
reverb_dataset.ReplayDataset.from_table_signature(
'localhost:1234', 'not_found', 100, get_signature_timeout_secs=1)
def test_table_does_not_have_signature(self):
server = make_server()
address = f'localhost:{server.port}'
with self.assertRaisesWithPredicateMatch(
ValueError, f'Table dist at {address} does not have a signature.'):
reverb_dataset.ReplayDataset.from_table_signature(
address, 'dist', 100)
def test_sets_dtypes_from_signature(self):
signature = {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
'x': tf.TensorSpec([None], tf.uint64),
}
server = reverb_server.Server(
[reverb_server.Table.queue('queue', 10, signature=signature)])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(dataset.element_spec.data, signature)
def test_sets_dtypes_from_bounded_spec_signature(self):
bounded_spec_signature = {
'a': {
'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3),
'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5),
},
}
server = reverb_server.Server([
reverb_server.Table.queue(
'queue', 10, signature=bounded_spec_signature)
])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}', 'queue', 100)
self.assertDictEqual(
dataset.element_spec.data, {
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
})
def test_combines_sequence_length_with_signature_if_not_emit_timestamps(self):
server = reverb_server.Server([
reverb_server.Table.queue(
'queue',
10,
signature={
'a': {
'b': tf.TensorSpec([3, 3], tf.float32),
'c': tf.TensorSpec([], tf.int64),
},
})
])
dataset = reverb_dataset.ReplayDataset.from_table_signature(
f'localhost:{server.port}',
'queue',
100,
emit_timesteps=False,
sequence_length=5)
self.assertDictEqual(
dataset.element_spec.data, {
'a': {
'b': tf.TensorSpec([5, 3, 3], tf.float32),
'c': tf.TensorSpec([5], tf.int64),
},
})
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
| [
"reverb.item_selectors.Fifo",
"numpy.ones",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.print",
"tensorflow.python.framework.tensor_spec.BoundedTensorSpec",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.TensorShape",
"tensorflow.compat.v1.test.main",
"socket.gethos... | [((5295, 6587), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'default_values'}", "{'testcase_name': 'num_workers_per_iterator_is_0',\n 'num_workers_per_iterator': 0, 'want_error': ValueError}", "{'testcase_name': 'num_workers_per_iterator_is_1',\n 'num_workers_per_iterator': 1}", "{'testcase_name': 'num_workers_per_iterator_is_minus_1',\n 'num_workers_per_iterator': -1}", "{'testcase_name': 'num_workers_per_iterator_is_minus_2',\n 'num_workers_per_iterator': -2, 'want_error': ValueError}", "{'testcase_name': 'max_samples_per_stream_is_0', 'max_samples_per_stream': \n 0, 'want_error': ValueError}", "{'testcase_name': 'max_samples_per_stream_is_1', 'max_samples_per_stream': 1}", "{'testcase_name': 'max_samples_per_stream_is_minus_1',\n 'max_samples_per_stream': -1}", "{'testcase_name': 'max_samples_per_stream_is_minus_2',\n 'num_workers_per_iterator': -2, 'want_error': ValueError}", "{'testcase_name': 'max_in_flight_samples_per_worker_is_0',\n 'max_in_flight_samples_per_worker': 0, 'want_error': ValueError}", "{'testcase_name': 'max_in_flight_samples_per_worker_is_1',\n 'max_in_flight_samples_per_worker': 1}", "{'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1',\n 'max_in_flight_samples_per_worker': -1, 'want_error': ValueError}"], {}), "({'testcase_name': 'default_values'}, {\n 'testcase_name': 'num_workers_per_iterator_is_0',\n 'num_workers_per_iterator': 0, 'want_error': ValueError}, {\n 'testcase_name': 'num_workers_per_iterator_is_1',\n 'num_workers_per_iterator': 1}, {'testcase_name':\n 'num_workers_per_iterator_is_minus_1', 'num_workers_per_iterator': -1},\n {'testcase_name': 'num_workers_per_iterator_is_minus_2',\n 'num_workers_per_iterator': -2, 'want_error': ValueError}, {\n 'testcase_name': 'max_samples_per_stream_is_0',\n 'max_samples_per_stream': 0, 'want_error': ValueError}, {\n 'testcase_name': 'max_samples_per_stream_is_1',\n 'max_samples_per_stream': 1}, {'testcase_name':\n 'max_samples_per_stream_is_minus_1', 'max_samples_per_stream': -1}, {\n 'testcase_name': 'max_samples_per_stream_is_minus_2',\n 'num_workers_per_iterator': -2, 'want_error': ValueError}, {\n 'testcase_name': 'max_in_flight_samples_per_worker_is_0',\n 'max_in_flight_samples_per_worker': 0, 'want_error': ValueError}, {\n 'testcase_name': 'max_in_flight_samples_per_worker_is_1',\n 'max_in_flight_samples_per_worker': 1}, {'testcase_name':\n 'max_in_flight_samples_per_worker_is_minus_1',\n 'max_in_flight_samples_per_worker': -1, 'want_error': ValueError})\n", (5325, 6587), False, 'from absl.testing import parameterized\n'), ((13318, 13387), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["['signatured']", "['bounded_spec_signatured']"], {}), "(['signatured'], ['bounded_spec_signatured'])\n", (13342, 13387), False, 'from absl.testing import parameterized\n'), ((14033, 14102), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["['signatured']", "['bounded_spec_signatured']"], {}), "(['signatured'], ['bounded_spec_signatured'])\n", (14057, 14102), False, 'from absl.testing import parameterized\n'), ((15403, 15472), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["['signatured']", "['bounded_spec_signatured']"], {}), "(['signatured'], ['bounded_spec_signatured'])\n", (15427, 15472), False, 'from absl.testing import parameterized\n'), ((16768, 16808), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1]', '[3]', '[10]'], {}), '([1], [3], [10])\n', (16792, 16808), False, 'from absl.testing import parameterized\n'), ((18358, 18733), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["('dist', 1, 1)", "('dist', 1, 3)", "('dist', 3, 3)", "('dist', 3, 5)", "('dist', 10, 10)", "('dist', 10, 11)", "('signatured', 1, 1)", "('signatured', 3, 3)", "('signatured', 3, 5)", "('signatured', 10, 10)", "('bounded_spec_signatured', 1, 1)", "('bounded_spec_signatured', 3, 3)", "('bounded_spec_signatured', 3, 5)", "('bounded_spec_signatured', 10, 10)"], {}), "(('dist', 1, 1), ('dist', 1, 3), ('dist', 3, 3), (\n 'dist', 3, 5), ('dist', 10, 10), ('dist', 10, 11), ('signatured', 1, 1),\n ('signatured', 3, 3), ('signatured', 3, 5), ('signatured', 10, 10), (\n 'bounded_spec_signatured', 1, 1), ('bounded_spec_signatured', 3, 3), (\n 'bounded_spec_signatured', 3, 5), ('bounded_spec_signatured', 10, 10))\n", (18382, 18733), False, 'from absl.testing import parameterized\n'), ((19822, 20056), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["('dist', 1)", "('dist', 3)", "('dist', 10)", "('signatured', 1)", "('signatured', 3)", "('signatured', 10)", "('bounded_spec_signatured', 1)", "('bounded_spec_signatured', 3)", "('bounded_spec_signatured', 10)"], {}), "(('dist', 1), ('dist', 3), ('dist', 10), (\n 'signatured', 1), ('signatured', 3), ('signatured', 10), (\n 'bounded_spec_signatured', 1), ('bounded_spec_signatured', 3), (\n 'bounded_spec_signatured', 10))\n", (19846, 20056), False, 'from absl.testing import parameterized\n'), ((21294, 21474), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["('dist', 1, 2)", "('dist', 2, 1)", "('signatured', 1, 2)", "('signatured', 2, 1)", "('bounded_spec_signatured', 1, 2)", "('bounded_spec_signatured', 2, 1)"], {}), "(('dist', 1, 2), ('dist', 2, 1), ('signatured', 1, \n 2), ('signatured', 2, 1), ('bounded_spec_signatured', 1, 2), (\n 'bounded_spec_signatured', 2, 1))\n", (21318, 21474), False, 'from absl.testing import parameterized\n'), ((27095, 27128), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1)', '(3)', '(7)'], {}), '(1, 3, 7)\n', (27119, 27128), False, 'from absl.testing import parameterized\n'), ((34022, 34050), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (34048, 34050), True, 'import tensorflow.compat.v1 as tf\n'), ((34053, 34067), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (34065, 34067), True, 'import tensorflow.compat.v1 as tf\n'), ((3111, 3160), 'reverb.client.Client', 'client.Client', (['f"""{connect_to}:{cls._server.port}"""'], {}), "(f'{connect_to}:{cls._server.port}')\n", (3124, 3160), False, 'from reverb import client\n'), ((8377, 8415), 'tensorflow.compat.v1.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""CPU"""'], {}), "('CPU')\n", (8408, 8415), True, 'import tensorflow.compat.v1 as tf\n'), ((8431, 8507), 'tensorflow.compat.v1.config.experimental.get_virtual_device_configuration', 'tf.config.experimental.get_virtual_device_configuration', (['physical_devices[0]'], {}), '(physical_devices[0])\n', (8486, 8507), True, 'import tensorflow.compat.v1 as tf\n'), ((11813, 11824), 'time.time', 'time.time', ([], {}), '()\n', (11822, 11824), False, 'import time\n'), ((12134, 12145), 'time.time', 'time.time', ([], {}), '()\n', (12143, 12145), False, 'import time\n'), ((12456, 12467), 'time.time', 'time.time', ([], {}), '()\n', (12465, 12467), False, 'import time\n'), ((32296, 32391), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['f"""localhost:{server.port}"""', '"""queue"""', '(100)'], {}), "(f'localhost:{server.port}',\n 'queue', 100)\n", (32345, 32391), True, 'from reverb import dataset as reverb_dataset\n'), ((32877, 32972), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['f"""localhost:{server.port}"""', '"""queue"""', '(100)'], {}), "(f'localhost:{server.port}',\n 'queue', 100)\n", (32926, 32972), True, 'from reverb import dataset as reverb_dataset\n'), ((33599, 33735), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['f"""localhost:{server.port}"""', '"""queue"""', '(100)'], {'emit_timesteps': '(False)', 'sequence_length': '(5)'}), "(f'localhost:{server.port}',\n 'queue', 100, emit_timesteps=False, sequence_length=5)\n", (33648, 33735), True, 'from reverb import dataset as reverb_dataset\n'), ((7074, 7096), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (7088, 7096), True, 'import tensorflow.compat.v1 as tf\n'), ((7473, 7568), 'reverb.dataset.ReplayDataset', 'reverb_dataset.ReplayDataset', (['self._client.server_address', '"""dist"""', 'dtypes', 'shapes'], {}), "(self._client.server_address, 'dist', dtypes,\n shapes, **kwargs)\n", (7501, 7568), True, 'from reverb import dataset as reverb_dataset\n'), ((7708, 7748), 'tensorflow.compat.v1.constant', 'tf.constant', (['self._client.server_address'], {}), '(self._client.server_address)\n', (7719, 7748), True, 'import tensorflow.compat.v1 as tf\n'), ((8668, 8765), 'tensorflow.compat.v1.config.experimental.set_virtual_device_configuration', 'tf.config.experimental.set_virtual_device_configuration', (['physical_devices[0]', 'virtual_devices'], {}), '(physical_devices[0],\n virtual_devices)\n', (8723, 8765), True, 'import tensorflow.compat.v1 as tf\n'), ((8892, 8943), 'tensorflow.compat.v1.print', 'tf.print', (['"""Creating dataset for replica; index:"""', 'i'], {}), "('Creating dataset for replica; index:', i)\n", (8900, 8943), True, 'import tensorflow.compat.v1 as tf\n'), ((10007, 10058), 'tensorflow.compat.v1.print', 'tf.print', (['"""Probability values:"""', 'probability.values'], {}), "('Probability values:', probability.values)\n", (10015, 10058), True, 'import tensorflow.compat.v1 as tf\n'), ((10245, 10259), 'tensorflow.compat.v1.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (10256, 10259), True, 'import tensorflow.compat.v1 as tf\n'), ((12017, 12028), 'time.time', 'time.time', ([], {}), '()\n', (12026, 12028), False, 'import time\n'), ((12338, 12349), 'time.time', 'time.time', ([], {}), '()\n', (12347, 12349), False, 'import time\n'), ((12660, 12671), 'time.time', 'time.time', ([], {}), '()\n', (12669, 12671), False, 'import time\n'), ((13298, 13313), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13308, 13313), False, 'import time\n'), ((24365, 24392), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (24378, 24392), True, 'import tensorflow.compat.v1 as tf\n'), ((26295, 26335), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['got', 'want'], {}), '(got, want)\n', (26324, 26335), True, 'import numpy as np\n'), ((29775, 29842), 'tree.assert_same_structure', 'tree.assert_same_structure', (['sample.data', '((None,), (None, (None,)))'], {}), '(sample.data, ((None,), (None, (None,))))\n', (29801, 29842), False, 'import tree\n'), ((31290, 31366), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['address', '"""not_found"""', '(100)'], {}), "(address, 'not_found', 100)\n", (31339, 31366), True, 'from reverb import dataset as reverb_dataset\n'), ((31478, 31597), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['"""localhost:1234"""', '"""not_found"""', '(100)'], {'get_signature_timeout_secs': '(1)'}), "('localhost:1234',\n 'not_found', 100, get_signature_timeout_secs=1)\n", (31527, 31597), True, 'from reverb import dataset as reverb_dataset\n'), ((31850, 31921), 'reverb.dataset.ReplayDataset.from_table_signature', 'reverb_dataset.ReplayDataset.from_table_signature', (['address', '"""dist"""', '(100)'], {}), "(address, 'dist', 100)\n", (31899, 31921), True, 'from reverb import dataset as reverb_dataset\n'), ((32134, 32166), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[None]', 'tf.uint64'], {}), '([None], tf.uint64)\n', (32147, 32166), True, 'import tensorflow.compat.v1 as tf\n'), ((3071, 3091), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3089, 3091), False, 'import socket\n'), ((7328, 7423), 'reverb.dataset.ReplayDataset', 'reverb_dataset.ReplayDataset', (['self._client.server_address', '"""dist"""', 'dtypes', 'shapes'], {}), "(self._client.server_address, 'dist', dtypes,\n shapes, **kwargs)\n", (7356, 7423), True, 'from reverb import dataset as reverb_dataset\n'), ((7764, 7783), 'tensorflow.compat.v1.constant', 'tf.constant', (['"""dist"""'], {}), "('dist')\n", (7775, 7783), True, 'import tensorflow.compat.v1 as tf\n'), ((8248, 8282), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (8256, 8282), True, 'import numpy as np\n'), ((8566, 8617), 'tensorflow.compat.v1.config.experimental.VirtualDeviceConfiguration', 'tf.config.experimental.VirtualDeviceConfiguration', ([], {}), '()\n', (8615, 8617), True, 'import tensorflow.compat.v1 as tf\n'), ((19765, 19816), 'numpy.zeros', 'np.zeros', (['(sequence_length, 3, 3)'], {'dtype': 'np.float32'}), '((sequence_length, 3, 3), dtype=np.float32)\n', (19773, 19816), True, 'import numpy as np\n'), ((21237, 21288), 'numpy.zeros', 'np.zeros', (['(sequence_length, 3, 3)'], {'dtype': 'np.float32'}), '((sequence_length, 3, 3), dtype=np.float32)\n', (21245, 21288), True, 'import numpy as np\n'), ((23058, 23095), 'numpy.zeros', 'np.zeros', (['(2, 3, 3)'], {'dtype': 'np.float32'}), '((2, 3, 3), dtype=np.float32)\n', (23066, 23095), True, 'import numpy as np\n'), ((24171, 24204), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[3, 3]', 'tf.float32'], {}), '([3, 3], tf.float32)\n', (24184, 24204), True, 'import tensorflow.compat.v1 as tf\n'), ((24606, 24631), 'tree.flatten', 'tree.flatten', (['sample.data'], {}), '(sample.data)\n', (24618, 24631), False, 'import tree\n'), ((24740, 24777), 'numpy.zeros', 'np.zeros', (['[3, 3, 3]'], {'dtype': 'np.float32'}), '([3, 3, 3], dtype=np.float32)\n', (24748, 24777), True, 'import numpy as np\n'), ((25038, 25069), 'numpy.ones', 'np.ones', (['[3, 1]'], {'dtype': 'np.int32'}), '([3, 1], dtype=np.int32)\n', (25045, 25069), True, 'import numpy as np\n'), ((25165, 25204), 'numpy.zeros', 'np.zeros', (['[3, 10, 10]'], {'dtype': 'np.float32'}), '([3, 10, 10], dtype=np.float32)\n', (25173, 25204), True, 'import numpy as np\n'), ((26425, 26456), 'numpy.ones', 'np.ones', (['[3, 3]'], {'dtype': 'np.int32'}), '([3, 3], dtype=np.int32)\n', (26432, 26456), True, 'import numpy as np\n'), ((27058, 27089), 'numpy.ones', 'np.ones', (['(3, 3)'], {'dtype': 'np.int32'}), '((3, 3), dtype=np.int32)\n', (27065, 27089), True, 'import numpy as np\n'), ((27384, 27415), 'numpy.ones', 'np.ones', (['[3, 3]'], {'dtype': 'np.int32'}), '([3, 3], dtype=np.int32)\n', (27391, 27415), True, 'import numpy as np\n'), ((28173, 28204), 'numpy.ones', 'np.ones', (['[3, 3]'], {'dtype': 'np.int32'}), '([3, 3], dtype=np.int32)\n', (28180, 28204), True, 'import numpy as np\n'), ((28769, 28803), 'numpy.ones', 'np.ones', (['(5, 3, 3)'], {'dtype': 'np.int32'}), '((5, 3, 3), dtype=np.int32)\n', (28776, 28803), True, 'import numpy as np\n'), ((30112, 30130), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (30126, 30130), True, 'import tensorflow.compat.v1 as tf\n'), ((30336, 30362), 'time.sleep', 'time.sleep', (['wait_time_secs'], {}), '(wait_time_secs)\n', (30346, 30362), False, 'import time\n'), ((30904, 30944), 'reverb.server.Table.queue', 'reverb_server.Table.queue', (['"""table_a"""', '(10)'], {}), "('table_a', 10)\n", (30929, 30944), True, 'from reverb import server as reverb_server\n'), ((30954, 30994), 'reverb.server.Table.queue', 'reverb_server.Table.queue', (['"""table_c"""', '(10)'], {}), "('table_c', 10)\n", (30979, 30994), True, 'from reverb import server as reverb_server\n'), ((31004, 31044), 'reverb.server.Table.queue', 'reverb_server.Table.queue', (['"""table_b"""', '(10)'], {}), "('table_b', 10)\n", (31029, 31044), True, 'from reverb import server as reverb_server\n'), ((32029, 32062), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[3, 3]', 'tf.float32'], {}), '([3, 3], tf.float32)\n', (32042, 32062), True, 'import tensorflow.compat.v1 as tf\n'), ((32081, 32108), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (32094, 32108), True, 'import tensorflow.compat.v1 as tf\n'), ((32219, 32278), 'reverb.server.Table.queue', 'reverb_server.Table.queue', (['"""queue"""', '(10)'], {'signature': 'signature'}), "('queue', 10, signature=signature)\n", (32244, 32278), True, 'from reverb import server as reverb_server\n'), ((32582, 32637), 'tensorflow.python.framework.tensor_spec.BoundedTensorSpec', 'tensor_spec.BoundedTensorSpec', (['[3, 3]', 'tf.float32', '(0)', '(3)'], {}), '([3, 3], tf.float32, 0, 3)\n', (32611, 32637), False, 'from tensorflow.python.framework import tensor_spec\n'), ((32656, 32705), 'tensorflow.python.framework.tensor_spec.BoundedTensorSpec', 'tensor_spec.BoundedTensorSpec', (['[]', 'tf.int64', '(0)', '(5)'], {}), '([], tf.int64, 0, 5)\n', (32685, 32705), False, 'from tensorflow.python.framework import tensor_spec\n'), ((32769, 32841), 'reverb.server.Table.queue', 'reverb_server.Table.queue', (['"""queue"""', '(10)'], {'signature': 'bounded_spec_signature'}), "('queue', 10, signature=bounded_spec_signature)\n", (32794, 32841), True, 'from reverb import server as reverb_server\n'), ((7831, 7853), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (7845, 7853), True, 'import tensorflow.compat.v1 as tf\n'), ((10100, 10135), 'tensorflow.compat.v1.distribute.get_replica_context', 'tf.distribute.get_replica_context', ([], {}), '()\n', (10133, 10135), True, 'import tensorflow.compat.v1 as tf\n'), ((11120, 11142), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (11134, 11142), True, 'import tensorflow.compat.v1 as tf\n'), ((11416, 11438), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (11430, 11438), True, 'import tensorflow.compat.v1 as tf\n'), ((11685, 11707), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (11699, 11707), True, 'import tensorflow.compat.v1 as tf\n'), ((13639, 13661), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (13653, 13661), True, 'import tensorflow.compat.v1 as tf\n'), ((13663, 13681), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (13677, 13681), True, 'import tensorflow.compat.v1 as tf\n'), ((14341, 14363), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (14355, 14363), True, 'import tensorflow.compat.v1 as tf\n'), ((14928, 14956), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, 3, 3]'], {}), '([None, 3, 3])\n', (14942, 14956), True, 'import tensorflow.compat.v1 as tf\n'), ((15714, 15733), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3]'], {}), '([3])\n', (15728, 15733), True, 'import tensorflow.compat.v1 as tf\n'), ((16298, 16323), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, 3]'], {}), '([None, 3])\n', (16312, 16323), True, 'import tensorflow.compat.v1 as tf\n'), ((17508, 17526), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (17522, 17526), True, 'import tensorflow.compat.v1 as tf\n'), ((17992, 18014), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (18006, 18014), True, 'import tensorflow.compat.v1 as tf\n'), ((19275, 19314), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[sequence_length, 3, 3]'], {}), '([sequence_length, 3, 3])\n', (19289, 19314), True, 'import tensorflow.compat.v1 as tf\n'), ((20430, 20458), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[None, 3, 3]'], {}), '([None, 3, 3])\n', (20444, 20458), True, 'import tensorflow.compat.v1 as tf\n'), ((21843, 21891), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[provided_sequence_length, 3, 3]'], {}), '([provided_sequence_length, 3, 3])\n', (21857, 21891), True, 'import tensorflow.compat.v1 as tf\n'), ((22625, 22647), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (22639, 22647), True, 'import tensorflow.compat.v1 as tf\n'), ((23987, 24011), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[10, 10]'], {}), '([10, 10])\n', (24001, 24011), True, 'import tensorflow.compat.v1 as tf\n'), ((24246, 24273), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (24259, 24273), True, 'import tensorflow.compat.v1 as tf\n'), ((24291, 24319), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[1]', 'tf.int32'], {}), '([1], tf.int32)\n', (24304, 24319), True, 'import tensorflow.compat.v1 as tf\n'), ((24891, 24919), 'numpy.ones', 'np.ones', (['[3]'], {'dtype': 'np.int64'}), '([3], dtype=np.int64)\n', (24898, 24919), True, 'import numpy as np\n'), ((25645, 25669), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[81, 81]'], {}), '([81, 81])\n', (25659, 25669), True, 'import tensorflow.compat.v1 as tf\n'), ((26620, 26642), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (26634, 26642), True, 'import tensorflow.compat.v1 as tf\n'), ((27579, 27601), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (27593, 27601), True, 'import tensorflow.compat.v1 as tf\n'), ((28368, 28390), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (28382, 28390), True, 'import tensorflow.compat.v1 as tf\n'), ((28906, 28937), 'numpy.ones', 'np.ones', (['[1, 1]'], {'dtype': 'np.int32'}), '([1, 1], dtype=np.int32)\n', (28913, 28937), True, 'import numpy as np\n'), ((28967, 28997), 'numpy.ones', 'np.ones', (['[3, 3]'], {'dtype': 'np.int8'}), '([3, 3], dtype=np.int8)\n', (28974, 28997), True, 'import numpy as np\n'), ((33081, 33114), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[3, 3]', 'tf.float32'], {}), '([3, 3], tf.float32)\n', (33094, 33114), True, 'import tensorflow.compat.v1 as tf\n'), ((33137, 33164), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (33150, 33164), True, 'import tensorflow.compat.v1 as tf\n'), ((33876, 33912), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[5, 3, 3]', 'tf.float32'], {}), '([5, 3, 3], tf.float32)\n', (33889, 33912), True, 'import tensorflow.compat.v1 as tf\n'), ((33935, 33963), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[5]', 'tf.int64'], {}), '([5], tf.int64)\n', (33948, 33963), True, 'import tensorflow.compat.v1 as tf\n'), ((1268, 1315), 'reverb.item_selectors.Prioritized', 'item_selectors.Prioritized', ([], {'priority_exponent': '(1)'}), '(priority_exponent=1)\n', (1294, 1315), False, 'from reverb import item_selectors\n'), ((1339, 1360), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1358, 1360), False, 'from reverb import item_selectors\n'), ((1421, 1445), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (1442, 1445), False, 'from reverb import rate_limiters\n'), ((1529, 1550), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1548, 1550), False, 'from reverb import item_selectors\n'), ((1574, 1595), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1593, 1595), False, 'from reverb import item_selectors\n'), ((1691, 1715), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (1712, 1715), False, 'from reverb import rate_limiters\n'), ((1799, 1846), 'reverb.item_selectors.Prioritized', 'item_selectors.Prioritized', ([], {'priority_exponent': '(1)'}), '(priority_exponent=1)\n', (1825, 1846), False, 'from reverb import item_selectors\n'), ((1870, 1891), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1889, 1891), False, 'from reverb import item_selectors\n'), ((1952, 1976), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (1973, 1976), False, 'from reverb import rate_limiters\n'), ((2002, 2053), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', ([], {'dtype': 'tf.float32', 'shape': '(None, None)'}), '(dtype=tf.float32, shape=(None, None))\n', (2015, 2053), True, 'import tensorflow.compat.v1 as tf\n'), ((2150, 2197), 'reverb.item_selectors.Prioritized', 'item_selectors.Prioritized', ([], {'priority_exponent': '(1)'}), '(priority_exponent=1)\n', (2176, 2197), False, 'from reverb import item_selectors\n'), ((2221, 2242), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (2240, 2242), False, 'from reverb import item_selectors\n'), ((2303, 2327), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (2324, 2327), False, 'from reverb import rate_limiters\n'), ((2547, 2661), 'tensorflow.python.framework.tensor_spec.BoundedTensorSpec', 'tensor_spec.BoundedTensorSpec', ([], {'dtype': 'tf.float32', 'shape': '(None, None)', 'minimum': '(0.0, 0.0)', 'maximum': '(10.0, 10.0)'}), '(dtype=tf.float32, shape=(None, None), minimum\n =(0.0, 0.0), maximum=(10.0, 10.0))\n', (2576, 2661), False, 'from tensorflow.python.framework import tensor_spec\n'), ((4027, 4061), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (4035, 4061), True, 'import numpy as np\n'), ((10823, 10845), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (10837, 10845), True, 'import tensorflow.compat.v1 as tf\n'), ((17079, 17122), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[sequence_length + 1, 3, 3]'], {}), '([sequence_length + 1, 3, 3])\n', (17093, 17122), True, 'import tensorflow.compat.v1 as tf\n'), ((23497, 23533), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {'dtype': 'np.float32'}), '((10, 10), dtype=np.float32)\n', (23505, 23533), True, 'import numpy as np\n'), ((23859, 23881), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (23873, 23881), True, 'import tensorflow.compat.v1 as tf\n'), ((29014, 29047), 'numpy.ones', 'np.ones', (['[2, 2]'], {'dtype': 'np.float64'}), '([2, 2], dtype=np.float64)\n', (29021, 29047), True, 'import numpy as np\n'), ((29388, 29410), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[1, 1]'], {}), '([1, 1])\n', (29402, 29410), True, 'import tensorflow.compat.v1 as tf\n'), ((29444, 29466), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (29458, 29466), True, 'import tensorflow.compat.v1 as tf\n'), ((9042, 9061), 'tensorflow.compat.v1.constant', 'tf.constant', (['"""dist"""'], {}), "('dist')\n", (9053, 9061), True, 'import tensorflow.compat.v1 as tf\n'), ((9232, 9256), 'tensorflow.compat.v1.data.Dataset.range', 'tf.data.Dataset.range', (['(4)'], {}), '(4)\n', (9253, 9256), True, 'import tensorflow.compat.v1 as tf\n'), ((23293, 23327), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (23301, 23327), True, 'import numpy as np\n'), ((23884, 23904), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['None'], {}), '(None)\n', (23898, 23904), True, 'import tensorflow.compat.v1 as tf\n'), ((23948, 23967), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[1]'], {}), '([1])\n', (23962, 23967), True, 'import tensorflow.compat.v1 as tf\n'), ((25338, 25373), 'numpy.ones', 'np.ones', (['(81, 81)'], {'dtype': 'np.float32'}), '((81, 81), dtype=np.float32)\n', (25345, 25373), True, 'import numpy as np\n'), ((29485, 29507), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[2, 2]'], {}), '([2, 2])\n', (29499, 29507), True, 'import tensorflow.compat.v1 as tf\n'), ((9113, 9135), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[3, 3]'], {}), '([3, 3])\n', (9127, 9135), True, 'import tensorflow.compat.v1 as tf\n'), ((23377, 23389), 'numpy.int64', 'np.int64', (['(10)'], {}), '(10)\n', (23385, 23389), True, 'import numpy as np\n'), ((23411, 23439), 'numpy.ones', 'np.ones', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (23418, 23439), True, 'import numpy as np\n'), ((26207, 26224), 'numpy.ones', 'np.ones', (['[81, 81]'], {}), '([81, 81])\n', (26214, 26224), True, 'import numpy as np\n'), ((33454, 33487), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[3, 3]', 'tf.float32'], {}), '([3, 3], tf.float32)\n', (33467, 33487), True, 'import tensorflow.compat.v1 as tf\n'), ((33514, 33541), 'tensorflow.compat.v1.TensorSpec', 'tf.TensorSpec', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (33527, 33541), True, 'import tensorflow.compat.v1 as tf\n')] |
import rqalpha
from rqalpha.api import *
import numpy as np
from strategy.RL.DoubleDQN import config
from algorithm.RL.DoubleDQN import Algorithm
from base.env.trader import ActionCode
from sklearn.preprocessing import StandardScaler
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
# context.s1 = '600036.XSHG'
context.codes_sort = ["600036", "601328", "601998", "601398"]
context.codes = ["600036.XSHG", "601328.XSHG", "601398.XSHG", "601998.XSHG"]
scale = StandardScaler
context.has_save_data = False
context.account_amount = config.get('base').get('accounts').get('stock')
base = config.get('base')
context.bar_list_origin = []
context.bar_list = []
# context.scales = [scale() for _ in context.codes]
context.scale = scale()
context.algorithm = Algorithm.generator(context.codes_sort, base.get('start_date'), base.get('end_date'))
context.algorithm.restore()
subscribe(context.codes)
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
pass
def _get_portfolio_state(context):
portfolio = [context.portfolio.unit_net_value / context.account_amount,
context.portfolio.market_value / context.account_amount]
for code in context.codes:
if not context.portfolio.accounts.get('STOCK').positions.get(code):
portfolio.append(0.0)
continue
portfolio.append(context.portfolio.accounts.get('STOCK').positions.get(code).quantity / context.account_amount)
return portfolio
def process_data(context, bar_dict):
data = []
for code in context.codes:
s1 = bar_dict[code]
data.extend([s1.open, s1.high, s1.low, s1.close, s1.volume])
scale = context.scale.fit(np.array([data]).T)
data_scaled = scale.transform([data])[0]
data_scaled = np.insert(data_scaled, -1, _get_portfolio_state(context)).reshape((1, -1))
context.bar_list_origin.append(data)
context.bar_list.append(data_scaled)
return context.bar_list[len(context.bar_list) - 1]
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
s = process_data(context, bar_dict)
if s is None:
return
c, a, _ = context.algorithm.predict(s)
# context.algorithm.live_train()
# s_next, r, status, info = context.algorithm.env.forward(c, a)
code = c + '.XSHG'
if a == ActionCode.Buy.value:
# order(code, 0.10)
order_percent(code, 0.2)
print('Buy Signal:', code)
elif a == ActionCode.Sell.value:
order_percent(code, 0)
print('Sell Signal:', code)
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
pass
result = rqalpha.run_func(init=init,
before_trading=before_trading,
handle_bar=handle_bar,
after_trading=after_trading,
config=config)
print(result)
| [
"rqalpha.run_func",
"numpy.array",
"strategy.RL.DoubleDQN.config.get"
] | [((2715, 2845), 'rqalpha.run_func', 'rqalpha.run_func', ([], {'init': 'init', 'before_trading': 'before_trading', 'handle_bar': 'handle_bar', 'after_trading': 'after_trading', 'config': 'config'}), '(init=init, before_trading=before_trading, handle_bar=\n handle_bar, after_trading=after_trading, config=config)\n', (2731, 2845), False, 'import rqalpha\n'), ((636, 654), 'strategy.RL.DoubleDQN.config.get', 'config.get', (['"""base"""'], {}), "('base')\n", (646, 654), False, 'from strategy.RL.DoubleDQN import config\n'), ((1761, 1777), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (1769, 1777), True, 'import numpy as np\n'), ((576, 594), 'strategy.RL.DoubleDQN.config.get', 'config.get', (['"""base"""'], {}), "('base')\n", (586, 594), False, 'from strategy.RL.DoubleDQN import config\n')] |
from collections import defaultdict
from unittest.case import TestCase
from numpy.random import permutation
from numpy.random.mtrand import RandomState
from pandas import Series
from survey.questions import RankedChoiceQuestion
class TestRankedChoiceQuestion(TestCase):
def setUp(self) -> None:
RandomState(0)
self.choices = [f'Option {choice}' for choice in range(1, 11)]
perms = [permutation(self.choices) for _ in range(100)]
self.expected_ranks = defaultdict(int)
for perm in perms:
for rank, option in enumerate(perm):
self.expected_ranks[(option, rank + 1)] += 1
data = Series(['\n'.join(perm) for perm in perms])
self.question = RankedChoiceQuestion(
name='ranked_choice_question',
text='Ranked Choice Question',
categories=self.choices,
data=data
)
def test_distribution_table__no_significance(self):
table = self.question.distribution_table()
for option in self.choices:
for rank in range(1, 11):
self.assertEqual(
self.expected_ranks[option, rank],
table.loc[option, rank]
)
def test_distribution__significance(self):
table = self.question.distribution_table(significance=True)
for option in self.choices:
for rank in range(1, 11):
self.assertEqual(
self.expected_ranks[option, rank],
table.loc[option, rank]
)
significance = self.question.significance__one_vs_any()
self.assertTrue(significance.equals(table['Significance']))
| [
"collections.defaultdict",
"numpy.random.mtrand.RandomState",
"survey.questions.RankedChoiceQuestion",
"numpy.random.permutation"
] | [((312, 326), 'numpy.random.mtrand.RandomState', 'RandomState', (['(0)'], {}), '(0)\n', (323, 326), False, 'from numpy.random.mtrand import RandomState\n'), ((492, 508), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (503, 508), False, 'from collections import defaultdict\n'), ((729, 852), 'survey.questions.RankedChoiceQuestion', 'RankedChoiceQuestion', ([], {'name': '"""ranked_choice_question"""', 'text': '"""Ranked Choice Question"""', 'categories': 'self.choices', 'data': 'data'}), "(name='ranked_choice_question', text=\n 'Ranked Choice Question', categories=self.choices, data=data)\n", (749, 852), False, 'from survey.questions import RankedChoiceQuestion\n'), ((415, 440), 'numpy.random.permutation', 'permutation', (['self.choices'], {}), '(self.choices)\n', (426, 440), False, 'from numpy.random import permutation\n')] |
""" Shared utilities for models.py and test_run.py.
"""
import os
import random
import numpy as np
import pickle
import torch
from torch.autograd import Variable
__author__ = "<NAME>"
def bool_ext(rbool):
""" Solve the problem that raw bool type is always True.
Parameters
----------
rbool: str
should be True of False.
"""
if rbool not in ["True", "False"]:
raise ValueError("Not a valid boolean string")
return rbool == "True"
def load_dataset(input_dir="data", deg_shuffle=False):
""" Load dataset, modify, and shuffle`.
Parameters
----------
input_dir: str
input directory of dataset
deg_shuffle: bool
whether to shuffle DEG or not
Returns
-------
dataset: dict
dict of lists, including SGAs, cancer types, DEGs, patient barcodes
"""
# load dataset
data = pickle.load( open(os.path.join(input_dir, "dataset.pkl"), "rb") )
can_r = data["can"] # cancer type index of tumors: list of int
sga_r = data["sga"] # SGA index of tumors: list of list
deg = data["deg"] # DEG binary matrix of tumors: 2D array of 0/1
tmr = data["tmr"] # barcodes of tumors: list of str
# shift the index of cancer type by +1, 0 is for padding
can = np.asarray([[x+1] for x in can_r], dtype=int)
# shift the index of SGAs by +1, 0 is for padding
num_max_sga = max([len(s) for s in sga_r])
sga = np.zeros( (len(sga_r), num_max_sga), dtype=int )
for idx, line in enumerate(sga_r):
line = [s+1 for s in line]
sga[idx,0:len(line)] = line
# shuffle DEGs
if deg_shuffle:
rng = list(range(deg.shape[1]))
for idx in range(deg.shape[0]):
random.shuffle(rng)
deg[idx] = deg[idx][rng]
# shuffle whole dataset
rng = list(range(len(can)))
random.Random(2019).shuffle(rng)
can = can[rng]
sga = sga[rng]
deg = deg[rng]
tmr = [tmr[idx] for idx in rng]
dataset = {"can":can, "sga":sga, "deg":deg, "tmr":tmr}
return dataset
def split_dataset(dataset, ratio=0.66):
""" Split the dataset according to the ratio of training/test sets.
Parameters
----------
dataset: dict
dict of lists, including SGAs, cancer types, DEGs, patient barcodes
ratio: float
size(train_set)/size(train_set+test_set)
Returns
-------
train_set, test_set: dict
"""
num_sample = len(dataset["can"])
num_train_sample = int(num_sample*ratio)
train_set = {"sga":dataset["sga"][:num_train_sample],
"can":dataset["can"][:num_train_sample],
"deg":dataset["deg"][:num_train_sample],
"tmr":dataset["tmr"][:num_train_sample]}
test_set = {"sga":dataset["sga"][num_train_sample:],
"can":dataset["can"][num_train_sample:],
"deg":dataset["deg"][num_train_sample:],
"tmr":dataset["tmr"][num_train_sample:]}
return train_set, test_set
def wrap_dataset(sga, can, deg, tmr):
""" Wrap default numpy or list data into PyTorch variables.
"""
dataset = {"sga": Variable(torch.LongTensor(sga)),
"can": Variable(torch.LongTensor(can)),
"deg": Variable(torch.FloatTensor(deg)),
"tmr": tmr}
return dataset
def get_minibatch(dataset, index, batch_size, batch_type="train"):
""" Get a mini-batch dataset for training or test.
Parameters
----------
dataset: dict
dict of lists, including SGAs, cancer types, DEGs, patient barcodes
index: int
starting index of current mini-batch
batch_size: int
batch_type: str
batch strategy is slightly different for training and test
"train": will return to beginning of the queue when `index` out of range
"test": will not return to beginning of the queue when `index` out of range
Returns
-------
batch_dataset: dict
a mini-batch of the input `dataset`.
"""
sga = dataset["sga"]
can = dataset["can"]
deg = dataset["deg"]
tmr = dataset["tmr"]
if batch_type == "train":
batch_sga = [ sga[idx%len(sga)]
for idx in range(index,index+batch_size) ]
batch_can = [ can[idx%len(can)]
for idx in range(index,index+batch_size) ]
batch_deg = [ deg[idx%len(deg)]
for idx in range(index,index+batch_size) ]
batch_tmr = [ tmr[idx%len(tmr)]
for idx in range(index,index+batch_size) ]
elif batch_type == "test":
batch_sga = sga[index:index+batch_size]
batch_can = can[index:index+batch_size]
batch_deg = deg[index:index+batch_size]
batch_tmr = tmr[index:index+batch_size]
batch_dataset = wrap_dataset(
batch_sga,
batch_can,
batch_deg,
batch_tmr)
return batch_dataset
def evaluate(labels, preds, epsilon=1e-4):
""" Calculate performance metrics given ground truths and prediction results.
Parameters
----------
labels: matrix of 0/1
ground truth labels
preds: matrix of float in [0,1]
predicted labels
epsilon: float
a small Laplacian smoothing term to avoid zero denominator
Returns
-------
precision: float
recall: float
f1score: float
accuracy: float
"""
flat_labels = np.reshape(labels,-1)
flat_preds = np.reshape(np.around(preds),-1)
accuracy = np.mean(flat_labels == flat_preds)
true_pos = np.dot(flat_labels, flat_preds)
precision = 1.0*true_pos/flat_preds.sum()
recall = 1.0*true_pos/flat_labels.sum()
f1score = 2*precision*recall/(precision+recall+epsilon)
return precision, recall, f1score, accuracy
| [
"torch.LongTensor",
"random.shuffle",
"numpy.asarray",
"random.Random",
"torch.FloatTensor",
"numpy.around",
"numpy.mean",
"numpy.reshape",
"numpy.dot",
"os.path.join"
] | [((1212, 1259), 'numpy.asarray', 'np.asarray', (['[[x + 1] for x in can_r]'], {'dtype': 'int'}), '([[x + 1] for x in can_r], dtype=int)\n', (1222, 1259), True, 'import numpy as np\n'), ((5021, 5043), 'numpy.reshape', 'np.reshape', (['labels', '(-1)'], {}), '(labels, -1)\n', (5031, 5043), True, 'import numpy as np\n'), ((5104, 5138), 'numpy.mean', 'np.mean', (['(flat_labels == flat_preds)'], {}), '(flat_labels == flat_preds)\n', (5111, 5138), True, 'import numpy as np\n'), ((5152, 5183), 'numpy.dot', 'np.dot', (['flat_labels', 'flat_preds'], {}), '(flat_labels, flat_preds)\n', (5158, 5183), True, 'import numpy as np\n'), ((5069, 5085), 'numpy.around', 'np.around', (['preds'], {}), '(preds)\n', (5078, 5085), True, 'import numpy as np\n'), ((848, 886), 'os.path.join', 'os.path.join', (['input_dir', '"""dataset.pkl"""'], {}), "(input_dir, 'dataset.pkl')\n", (860, 886), False, 'import os\n'), ((1627, 1646), 'random.shuffle', 'random.shuffle', (['rng'], {}), '(rng)\n', (1641, 1646), False, 'import random\n'), ((1737, 1756), 'random.Random', 'random.Random', (['(2019)'], {}), '(2019)\n', (1750, 1756), False, 'import random\n'), ((2967, 2988), 'torch.LongTensor', 'torch.LongTensor', (['sga'], {}), '(sga)\n', (2983, 2988), False, 'import torch\n'), ((3020, 3041), 'torch.LongTensor', 'torch.LongTensor', (['can'], {}), '(can)\n', (3036, 3041), False, 'import torch\n'), ((3073, 3095), 'torch.FloatTensor', 'torch.FloatTensor', (['deg'], {}), '(deg)\n', (3090, 3095), False, 'import torch\n')] |
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
contact__ = "<EMAIL>"
import pandas as pd
import numpy as np
from impetuous.quantification import group_significance
from impetuous.convert import *
def pathway_frame_from_file ( filename ,
delimiter = '\t' , item_sep = ',' ) :
pdf = None
with open( filename,'r' ) as input :
for line in input :
lspl = line.replace('\n','').split(delimiter)
analytes_ = lspl[2:]
desc = lspl[1]
iden = lspl[0]
ps = pd.Series( [desc , item_sep.join(analytes_) ] ,
name = iden , index = ['description','analytes'] )
pdf = pd.concat([pdf,pd.DataFrame(ps).T])
return ( pdf )
def create_dag_representation_df ( pathway_file = '../data/GROUPDEFINITIONS.gmt' ,
pcfile = '../data/PCLIST.txt'
) :
pc_list_file = pcfile
tree , ance , desc = parent_child_to_dag ( pc_list_file )
pdf = make_pathway_ancestor_data_frame ( ance )
pdf_ = pathway_frame_from_file( pathway_file )
pdf.index = [v.replace(' ','') for v in pdf.index.values]
pdf_.index= [v.replace(' ','') for v in pdf_.index.values]
dag_df = pd.concat([pdf.T,pdf_.T]).T
return ( dag_df , tree )
def HierarchalEnrichment (
analyte_df , dag_df , dag_level_label = 'DAG,l' ,
ancestors_id_label = 'aid' , id_name = None , threshold = 0.05 ,
p_label = 'C(Status),p', analyte_name_label = 'analytes' ,
item_delimiter = ',' , alexa_elim=False , alternative = 'two-sided'
) :
# BACKWARDS COMPATIBILITY
return ( HierarchicalEnrichment (
analyte_df=analyte_df , dag_df=dag_df , dag_level_label = dag_level_label ,
ancestors_id_label = ancestors_id_label , id_name = id_name , threshold = threshold ,
p_label = p_label , analyte_name_label = analyte_name_label ,
item_delimiter = item_delimiter , alexa_elim = alexa_elim , alternative = alternative ) )
def HierarchicalEnrichment (
analyte_df , dag_df , dag_level_label = 'DAG,l' ,
ancestors_id_label = 'aid' , id_name = None , threshold = 0.05 ,
p_label = 'C(Status),p', analyte_name_label = 'analytes' ,
item_delimiter = ',' , alexa_elim=False , alternative = 'two-sided'
) :
#
# NEEDS AN ANALYTE SIGNIFICANCE FRAME:
# INCLUDING P VALUES OF ANALYTES
# DAG GRAPH DESCRIPTION FRAME:
# INCLUDING NODE ID, NODE ANALYTES FIELD (SEPERATED BY ITEM DELIMITER)
# INCLUDING ANCESTORS FIELD (SEPERATED BY ITEM DELIMITER)
# DAG LEVEL OF EACH NODE
tolerance = threshold
df = dag_df ; dag_depth = np.max( df[dag_level_label].values )
AllAnalytes = set( analyte_df.index.values ) ; nidx = len( AllAnalytes )
SigAnalytes = set( analyte_df.iloc[ (analyte_df.loc[:,p_label].values < tolerance), : ].index.values )
if len( AllAnalytes ) == len( SigAnalytes ) :
print ( 'THIS STATISTICAL TEST WILL BE NONSENSE' )
print ( 'TRY A DIFFERENT THRESHOLD' )
marked_analytes = {} ; used_analytes = {} ; node_sig = {}
for d in range( dag_depth, 0, -1 ) :
# ROOT IS NOT INCLUDED
filter_ = df [ dag_level_label ] == d
nodes = df.iloc[ [i for i in np.where(filter_)[ 0 ]] ,:].index.values
for node in nodes :
if 'nan' in str(df.loc[node,analyte_name_label]).lower() :
continue
analytes_ = df.loc[node,analyte_name_label].replace('\n','').replace(' ','').split(item_delimiter)
try :
group = analyte_df.loc[[a for a in analytes_ if a in AllAnalytes] ].dropna( axis=0, how='any', thresh=analyte_df.shape[1]/2 ).drop_duplicates()
except KeyError as e :
continue
if node in marked_analytes :
unused_group = group.loc[ list( set(group.index.values)-marked_analytes[node] ) ]
group = unused_group
L_ = len( group ) ; str_analytes=','.join(group.index.values)
if L_ > 0 :
used_analytes[node] = ','.join( group.index.values )
pv,odds = group_significance( group , AllAnalytes=AllAnalytes, SigAnalytes=SigAnalytes , tolerance = threshold , alternative=alternative )
node_sig[node] = pv ; marked_ = set( group.index.values )
ancestors = df.loc[node,ancestors_id_label].replace('\n','').replace(' ','').split(item_delimiter)
if alexa_elim and pv > threshold : # USE ALEXAS ELIM ALGORITHM : IS NOT DEFAULT
continue
for u in ancestors :
if u in marked_analytes :
us = marked_analytes[u]
marked_analytes[u] = us | marked_
else :
marked_analytes[u] = marked_
df['Hierarchical,p'] = [ node_sig[idx] if idx in node_sig else 1. for idx in df.index.values ]
df['Included analytes,ids'] = [ used_analytes[idx] if idx in used_analytes else '' for idx in df.index.values ]
df = df.dropna()
return ( df )
def calculate_hierarchy_matrix ( data_frame = None ,
distance_matrix = None ,
bVerbose = False,
coarse_grain_structure = 0 ) :
info__ = """ This is the saiga/pelican/panda you are looking for RICEARD"""
# print (info__ )
from impetuous.clustering import connectivity , absolute_coordinates_to_distance_matrix
import operator
if not operator.xor( data_frame is None , distance_matrix is None ) :
print ( "ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX" )
print ( "calculate_hierarchy_matrix FAILED" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
exit(1)
if not data_frame is None :
if not 'pandas' in str(type(data_frame)) :
print ( "ONLY SUPPLY A SINGE DATA FRAME WITH ABSOLUTE COORDINATES" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
print ( "calculate_hierarchy_matrix FAILED" )
exit ( 1 )
if bVerbose :
print ( data_frame )
distance_matrix = absolute_coordinates_to_distance_matrix(data_frame.values)
nmt_ = np.shape(distance_matrix)
if nmt_[0] != nmt_[1] : # SANITY CHECK
print ( "PLEASE SUPPLY A PROPER SQUARE DISTANCE MATRIX" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
print ( "from scipy.spatial.distance import squareform , pdist\nabsolute_coordinates_to_distance_matrix = lambda Q:squareform(pdist(Q))" )
if not distance_matrix is None :
if bVerbose :
print ( distance_matrix )
if bVerbose :
print ( "EMPLOYING SORTING HAT" )
uco_v = sorted(list(set(distance_matrix.reshape(-1))))
if coarse_grain_structure>0 :
if bVerbose :
nuco = len(uco_v)
print ( "WILL COARSE GRAIN THE HIERARCHY STRUCTE INTO" )
print ( "AT MAX:", np.ceil(nuco/coarse_grain_structure), " LEVELS" )
print ( "TECH: NTOT >", nuco,",\t dN >", coarse_grain_structure )
uco_v = uco_v[::coarse_grain_structure]
level_distance_lookup = {}
if bVerbose :
print ( "DOING CONNECTIVITY CLUSTERING" )
hsers = []
for icut in range(len(uco_v)) :
cutoff = uco_v[icut]
# clustercontacts : clusterid , particleid relation
# clustercontent : clusterid to number of particles in range
#from clustering import connectivity # LOCAL TESTING
clustercontent , clustercontacts = connectivity ( distance_matrix , cutoff ,
bVerbose = bVerbose )
#
# internal ordering is a range so this does not need to be a dict
pid2clusterid = clustercontacts[:,0]
level_distance_lookup['level'+str(icut)] = [ icut , cutoff , np.mean(clustercontent) ]
hser = pd.Series(pid2clusterid,name='level'+str(icut),index=range(len(distance_matrix)))
hsers.append(hser)
if bVerbose :
print ( 100.0*icut/len(uco_v) ," % DONE ")
if len(set(hser.values)) == 1 :
break
if not data_frame is None :
if 'pandas' in str(type(data_frame)):
names = data_frame.index.values
else :
names = [ str(i) for i in range(len(distance_matrix)) ]
res_df = pd.DataFrame ( hsers )
res_df .columns = names
hierarchy_matrix = res_df
if bVerbose:
print ()
print ("TAKE NOTE THAT THE CLUSTER INDEXING BETWEEN LEVELS MIGHT NOT BE THE SAME")
print ("YOU HAVE TO USE THE CLUSTER ID NUMBERS ACROSS A LEVEL TO DEDUCE WHICH PARTICLES")
print ("BELONG TOGETHER IN A CLUSTER. THAT IS: I.E. CLUSTER 0 ON LEVEL 12 MIGHT NOT CORRESPOND")
print ("TO CLUSTER 0 ON LEVEL 13. THE CALCULATED HIERARCHY MATRIX IS: ")
print ( hierarchy_matrix )
print ("AND THESE ARE THE DISTANCES THE HIERARCHY LEVELS CORRESPOND TO:" )
print ( level_distance_lookup )
return ( hierarchy_matrix , level_distance_lookup )
def parent_child_matrix_relationships ( hierarchy_matrix ,
bVerbose = False ,
bRemoveRedundant = True ,
separators = ['_','-'],
iLegacy = 1 ) :
s_ = separators
M = hierarchy_matrix
ns,ms = np.shape(M)
if not 'pandas' in str(type(M)):
print ( "hierarchy_matrix MUST BE A PANDAS DATA FRAME" )
if not ( len(set(M.index.values)) == ns and len(set(M.columns.values)) == ms ):
print( "USE UNIQUE COLUMN AND INDEX NAMES")
exit(1)
if bVerbose :
print ( "THE hierarchy_matrix MUST BE ORDERED FROM THE LEAVES TO THE ROOT")
print ( "LOWER INDICES THUS CORRESPONDS TO CHILDREN OF HIGHER ONES")
n,c,levels = len(M),M.columns.values,M.index.values
ancestor_offspring_relationships = []
pc_df = None
lookup = {}
for i in range(n)[::-1][:-1]:
I = i
J = i-1
parents = M.T.groupby(M.iloc[I,:].values).apply(lambda x:x.index)
children = M.T.groupby(M.iloc[J,:].values).apply(lambda x:x.index)
parents_level_name = levels[I]
children_level_name = levels[J]
if bVerbose:
print ( i )
print ( parents.values , parents.index )
print ( children.values , children.index )
for p__,pidx in zip( parents.values , parents.index ):
for c__,cidx in zip( children.values , children.index ):
pcrel = []
p_ = set(p__)
c_ = set(c__)
if len ( p_ & c_ ) > 0 and len( c_-p_) == 0 :
if bRemoveRedundant:
ps = '.'.join(p__)
cs = '.'.join(c__)
pcs= ps+'+'+cs
if not pcs in lookup:
lookup[pcs] = [(parents_level_name,pidx,I,children_level_name,cidx,J)]
else :
lookup[pcs] .append((parents_level_name,pidx,I,children_level_name,cidx,J))
continue
pcser = pd.Series( [ parents_level_name , pidx ,
children_level_name , cidx ] ,
index = ['Parent level label','Parent level cluster index',
'Child level label','Child level cluster index'] ,
name = str(I)+s_[0]+str(pidx)+s_[1]+str(J)+s_[0]+str(cidx) )
pcrel .append ( pd.DataFrame(pcser) )
if len ( pcrel ) > 0 :
if pc_df is None :
pc_df = pd.DataFrame(pcser)
else:
pc_df = pd.concat([pc_df.T,pd.DataFrame(pcser).T]).T
ancestor_offspring_relationships.append( pcrel )
pc_df = pc_df.T
if bRemoveRedundant:
idx_rename = {}
for item in lookup.items():
if len(item[1])>1 :
orig = str(item[1][0][2]) + s_[0] + str(item[1][0][ 1]) + s_[1] + \
str(item[1][0][-1]) + s_[0] + str(item[1][0][-2])
new = str(item[1][0][2]) + s_[0] + str(item[1][0][ 1]) + s_[1] + \
str(item[1][-1][-1]) + s_[0] + str(item[1][-1][-2])
if bVerbose :
print ( item )
print ( str(item[1][0][2]) + s_[0] + str(item[1][0][ 1]) )
print ( str(item[1][0][-1]) + s_[0] + str(item[1][0][-2]) )
print ( str(item[1][-1][-1]) + s_[0] + str(item[1][-1][-2]) )
print ( orig , new )
print ( pc_df.loc[orig,:])
pc_df.loc[orig,'Child level label'] = item[1][-1][-3]
pc_df.loc[orig,'Child level cluster index'] = item[1][-1][-2]
idx_rename[orig] = new
pc_df = pc_df.rename(index=idx_rename)
if iLegacy == 1 :
return ( pc_df )
else :
return ( pc_df , hierarchy_matrix )
def create_cpgmt_lookup ( pcdf , hierarchy_matrix , separators = ['_','-'] ):
s_ = separators
M = hierarchy_matrix
all_parents = list(set([v.split(s_[1])[0] for v in pcdf.index.values]))
lookup = {'content':['children','descriptions','parts']}
children , descriptions , parts = [] , [] , []
for parent in all_parents:
selected = pcdf.loc[ [idx for idx in pcdf.index.values if parent == idx.split(s_[1])[0]],:]
for i_ in selected.index :
a_level = pcdf.loc[ i_ , [ c for c in pcdf.columns if 'label' in c ] ] .values[1]
a_cluster = pcdf.loc[ i_ , [ c for c in pcdf.columns if 'index' in c ] ] .values[1]
collected_parts = M.columns .values[ M.loc[a_level].values == a_cluster ]
p_ = pcdf.loc[ i_ , [ c for c in pcdf.columns if 'label' in c ] ] .values[0] + s_[0] + \
str(pcdf.loc[ i_ , [ c for c in pcdf.columns if 'index' in c ] ] .values[0])
children .append ( 'level'+i_.split(s_[1])[-1] )
descriptions.append ( p_ )
parts .append ( collected_parts )
lookup['children'] = children
lookup['parts'] = parts
lookup['descriptions'] = descriptions
return ( lookup )
def write_cpgmt ( lookup ,
filename = 'childparentparts.gmt',
bVerbose = False ) :
if bVerbose :
print ( """
If you want to check specific level clusters
using traditional methods such as GSEA or
perhaps try my hierarchical enrichment or
awesome righteuous-fa method ...
irregardless you'll need to create a gmt file """ )
if 'content' in lookup :
with open( filename , 'w' ) as of :
print ( '\n'.join( [ '\t'.join([c,d,'\t'.join(p)]) for \
(c,d,p) in zip( *[ lookup[ c ] for c in lookup['content'] ]) ]) ,
file=of )
#
# TOOLS LOCAL
def ordered_remove ( str,delete ):
for d in delete :
str = str.replace(d,'')
return ( str )
def error ( criterion,message ) :
if criterion :
print ( message )
exit ( 1 )
def build_pclist_word_hierarchy ( filename = None , # 'new_compartment_genes.gmt',
ledger = None ,
delete = ['\n'] , group_id_prefix = None,
analyte_prefix = 'ENSG', root_name = 'COMP0000000000',
bReturnList = False , bUseGroupPrefix = False ,
bSingleChild = False , bSingleDescent = True ) :
bSingleChild = bSingleChild or bSingleDescent # USER SHOULD SET THE bSingleDescent OPTION
bUseFile = not filename is None
import operator
error ( not operator.xor ( filename is None , ledger is None ), "YOU MUST SUPPLY A GMT FILE XOR A DICTIONARY" )
if bUseFile :
error ( not '.gmt' in filename , 'MUST HAVE A VALID GMT FILE' )
#
# RETURNS THE PC LIST THAT CREATES THE WORD HIERARCHY
# LATANTLY PRESENT IN THE GMT ANALYTE (GROUPING) DEFINITIONS
#
S_M = set()
D_i = dict()
bUseGroupPrefix = not group_id_prefix is None
if bUseGroupPrefix :
bUseGroupPrefix = 'str' in str(type(group_id_prefix)).lower()
check_prefix = analyte_prefix
if bUseGroupPrefix :
check_prefix = group_id_prefix
if bUseFile :
with open ( filename,'r' ) as input :
for line in input :
lsp = ordered_remove(line,delete).split('\t')
if not check_prefix in line :
continue
S_i = set(lsp[2:])
D_i [ lsp[0] ] = tuple( (lsp[1] , S_i , len(S_i)) )
S_M = S_M | S_i
else :
for item in ledger.items() :
print(item)
if bUseGroupPrefix :
if not check_prefix in item[0]:
continue
else :
if not check_prefix in ''.join(item[1][1]):
continue
S_i = set( item[1][1] )
D_i [ item[0] ] = tuple( (item[1][0] , S_i , len(S_i)) )
S_M = S_M | S_i
isDecendant = lambda sj,sk : len(sj-sk)==0
relative_idx = lambda sj,sk : len(sk-sj)
parent_id = root_name
parent_words = S_M
all_potential_parents = [ [root_name,S_M] , *[ [ d[0],d[1][1]] for d in D_i.items() ] ]
PClist = []
CPlist = {}
for parent_id,parent_words in all_potential_parents:
lookup = {}
for d in D_i .items() :
if isDecendant ( d[1][1] , parent_words ) :
Nij = relative_idx ( d[1][1] , parent_words )
if Nij in lookup :
lookup[Nij] .append(d[0])
else :
lookup[Nij] = [d[0]]
ledger = sorted ( lookup.items() )
for ie_ in range( len( ledger ) ) :
l1 = ledger[ ie_ ][0]
for potential_child in ledger[ie_][1]:
pchild_words = D_i[ potential_child ][1]
bIsChild = True
if potential_child == parent_id :
bIsChild = False
break
check = [ je_ for je_ in range( ie_ + 1 )] [::-1]
if len(check) > 0 :
for je_ in check :
l2 = ledger[ je_ ][0]
for relative in ledger[je_][1] :
if D_i[relative][0] == D_i[potential_child][0] :
continue
relative_words = D_i[relative][1]
bIsChild = len(relative_words^pchild_words)>0 or (len(relative_words^pchild_words)==0 and l2==l1 )
if not bIsChild :
break
if bIsChild :
if potential_child in CPlist :
if CPlist[potential_child][-1]>relative_idx(pchild_words,parent_words):
CPlist[potential_child] = [parent_id , potential_child , relative_idx(pchild_words,parent_words) ]
else :
CPlist[potential_child] = [parent_id , potential_child , relative_idx(pchild_words,parent_words) ]
PClist .append ( [parent_id , potential_child ] )
D_i[root_name] = tuple( ('full cell',S_M,len(S_M)) )
pcl_ = []
if bSingleChild:
PClist = [ (v[0],v[1]) for k,v in CPlist.items() ]
if bReturnList :
return ( [PClist,D_i] )
else :
return ( PClist )
if __name__ == '__main__' :
if False :
#
bVerbose = False
if bVerbose:
print ( "For creating pc and gmt files" )
print ( "note that the description includes the parent as a reference" )
print ( "to create a pc file you should reorder i.e. using" )
print ( "$ cat childparentparts.gmt | awk '{print $2,$1}'" )
#
pdf = pd.read_csv( '../data/genes.tsv' , '\t' , index_col=0 )
M , L = calculate_hierarchy_matrix ( pdf )
cpgl = create_cpgmt_lookup( parent_child_matrix_relationships ( M ) , separators = ['_','-'] )
write_cpgmt ( cpgl )
if False :
print ( "hierarchy matrix test" )
R = np.random.rand(90).reshape(30,3)
P = np.zeros(90).reshape(30,3)
P [ 1:10 ,: ] += 1
P [ 0:5 ,: ] += 2
P [ 20: ,: ] += 3
P [ 15:20,: ] += 2
P [ 25: ,: ] += 2
pdf = pd.DataFrame( P + R ,
index = ['pid'+str(i) for i in range(30)] ,
columns = ['x','y','z'] )
M,L = calculate_hierarchy_matrix ( pdf )
print ( M )
parent_child_matrix_relationships ( M )
from impetuous.visualisation import *
X,Y,W,Z = [],[],[],[]
for item in L.items():
X.append(item[1][1])
W.append(item[1][1])
Z.append(item[1][2])
Y.append(len(set(M.loc[item[0]].values)))
from bokeh.plotting import show
#
# SHOW THE COORDINATION AND SEGREGATION FUNCTIONS
# BOTH ARE WELL KNOWN FROM STATISTICAL PHYSICS
#
show ( plotter( [X,W] , [Y,Z] ,
[nice_colors[0],nice_colors[2]] ,
legends = ['segregation','coordination'],
axis_labels = ['distance','Number']) )
if True :
# https://github.com/richardtjornhammar/impetuous/blob/728bef88f1bba64a051603b807e06231269c7dbb/new_compartment_genes.gmt
PClist,D_i = build_pclist_word_hierarchy ( filename = 'new_compartment_genes.gmt', delete = ['\n'],
group_id_prefix = 'COMP', analyte_prefix = 'ENSG', root_name = 'COMP0000000000', bReturnList=True )
for pc in PClist :
print ( '\t'.join( pc ) )
show_leftward_dependance = lambda s1,s2:[len(s1-s2),len(s1),len(s2)]
print ( D_i[pc[0]][0], D_i[pc[1]][0] )
print ( show_leftward_dependance( D_i[pc[0]][1],D_i[pc[1]][1]) )
| [
"pandas.DataFrame",
"numpy.ceil",
"impetuous.clustering.absolute_coordinates_to_distance_matrix",
"impetuous.clustering.connectivity",
"pandas.read_csv",
"numpy.zeros",
"numpy.shape",
"impetuous.quantification.group_significance",
"numpy.max",
"numpy.mean",
"numpy.where",
"numpy.random.rand",
... | [((3260, 3294), 'numpy.max', 'np.max', (['df[dag_level_label].values'], {}), '(df[dag_level_label].values)\n', (3266, 3294), True, 'import numpy as np\n'), ((6940, 6965), 'numpy.shape', 'np.shape', (['distance_matrix'], {}), '(distance_matrix)\n', (6948, 6965), True, 'import numpy as np\n'), ((9128, 9147), 'pandas.DataFrame', 'pd.DataFrame', (['hsers'], {}), '(hsers)\n', (9140, 9147), True, 'import pandas as pd\n'), ((10196, 10207), 'numpy.shape', 'np.shape', (['M'], {}), '(M)\n', (10204, 10207), True, 'import numpy as np\n'), ((1752, 1778), 'pandas.concat', 'pd.concat', (['[pdf.T, pdf_.T]'], {}), '([pdf.T, pdf_.T])\n', (1761, 1778), True, 'import pandas as pd\n'), ((6154, 6211), 'operator.xor', 'operator.xor', (['(data_frame is None)', '(distance_matrix is None)'], {}), '(data_frame is None, distance_matrix is None)\n', (6166, 6211), False, 'import operator\n'), ((6869, 6927), 'impetuous.clustering.absolute_coordinates_to_distance_matrix', 'absolute_coordinates_to_distance_matrix', (['data_frame.values'], {}), '(data_frame.values)\n', (6908, 6927), False, 'from impetuous.clustering import connectivity, absolute_coordinates_to_distance_matrix\n'), ((8312, 8368), 'impetuous.clustering.connectivity', 'connectivity', (['distance_matrix', 'cutoff'], {'bVerbose': 'bVerbose'}), '(distance_matrix, cutoff, bVerbose=bVerbose)\n', (8324, 8368), False, 'from impetuous.clustering import connectivity, absolute_coordinates_to_distance_matrix\n'), ((21013, 21064), 'pandas.read_csv', 'pd.read_csv', (['"""../data/genes.tsv"""', '"""\t"""'], {'index_col': '(0)'}), "('../data/genes.tsv', '\\t', index_col=0)\n", (21024, 21064), True, 'import pandas as pd\n'), ((8632, 8655), 'numpy.mean', 'np.mean', (['clustercontent'], {}), '(clustercontent)\n', (8639, 8655), True, 'import numpy as np\n'), ((16768, 16814), 'operator.xor', 'operator.xor', (['(filename is None)', '(ledger is None)'], {}), '(filename is None, ledger is None)\n', (16780, 16814), False, 'import operator\n'), ((4736, 4861), 'impetuous.quantification.group_significance', 'group_significance', (['group'], {'AllAnalytes': 'AllAnalytes', 'SigAnalytes': 'SigAnalytes', 'tolerance': 'threshold', 'alternative': 'alternative'}), '(group, AllAnalytes=AllAnalytes, SigAnalytes=SigAnalytes,\n tolerance=threshold, alternative=alternative)\n', (4754, 4861), False, 'from impetuous.quantification import group_significance\n'), ((7717, 7755), 'numpy.ceil', 'np.ceil', (['(nuco / coarse_grain_structure)'], {}), '(nuco / coarse_grain_structure)\n', (7724, 7755), True, 'import numpy as np\n'), ((21324, 21342), 'numpy.random.rand', 'np.random.rand', (['(90)'], {}), '(90)\n', (21338, 21342), True, 'import numpy as np\n'), ((21369, 21381), 'numpy.zeros', 'np.zeros', (['(90)'], {}), '(90)\n', (21377, 21381), True, 'import numpy as np\n'), ((1196, 1212), 'pandas.DataFrame', 'pd.DataFrame', (['ps'], {}), '(ps)\n', (1208, 1212), True, 'import pandas as pd\n'), ((12413, 12432), 'pandas.DataFrame', 'pd.DataFrame', (['pcser'], {}), '(pcser)\n', (12425, 12432), True, 'import pandas as pd\n'), ((12557, 12576), 'pandas.DataFrame', 'pd.DataFrame', (['pcser'], {}), '(pcser)\n', (12569, 12576), True, 'import pandas as pd\n'), ((3853, 3870), 'numpy.where', 'np.where', (['filter_'], {}), '(filter_)\n', (3861, 3870), True, 'import numpy as np\n'), ((12662, 12681), 'pandas.DataFrame', 'pd.DataFrame', (['pcser'], {}), '(pcser)\n', (12674, 12681), True, 'import pandas as pd\n')] |
#Ref: <NAME>
"""
This code normalizes staining appearance of H&E stained images.
It also separates the hematoxylin and eosing stains in to different images.
Workflow based on the following papers:
A method for normalizing histology slides for quantitative analysis.
M. Macenko et al., ISBI 2009
http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf
Efficient nucleus detector in histopathology images. J.P. Vink et al., J Microscopy, 2013
Original MATLAB code:
https://github.com/mitkovetta/staining-normalization/blob/master/normalizeStaining.m
Other useful references:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5226799/
https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0169875
PROPOSED WORKFLOW:
Input: RGB image
Step 1: Convert RGB to OD
Step 2: Remove data with OD intensity less than β
Step 3: Calculate singular value decomposition (SVD) on the OD tuples
Step 4: Create plane from the SVD directions corresponding to the
two largest singular values
Step 5: Project data onto the plane, and normalize to unit length
Step 6: Calculate angle of each point wrt the first SVD direction
Step 7: Find robust extremes (αth and (100−α)th 7 percentiles) of the
angle
Step 8: Convert extreme values back to OD space
Output: Optimal Stain Vectors
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
############### INPUT RGB IMAGE #######################
#Using opencv to read images may bemore robust compared to using skimage
#but need to remember to convert BGR to RGB.
#Also, convert to float later on and normalize to between 0 and 1.
#Image downloaded from:
#https://pbs.twimg.com/media/C1MkrgQWQAASbdz.jpg
img=cv2.imread('images/HnE_Image.jpg', 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
Io = 240 # Transmitted light intensity, Normalizing factor for image intensities
alpha = 1 #As recommend in the paper. tolerance for the pseudo-min and pseudo-max (default: 1)
beta = 0.15 #As recommended in the paper. OD threshold for transparent pixels (default: 0.15)
######## Step 1: Convert RGB to OD ###################
## reference H&E OD matrix.
#Can be updated if you know the best values for your image.
#Otherwise use the following default values.
#Read the above referenced papers on this topic.
HERef = np.array([[0.5626, 0.2159],
[0.7201, 0.8012],
[0.4062, 0.5581]])
### reference maximum stain concentrations for H&E
maxCRef = np.array([1.9705, 1.0308])
# extract the height, width and num of channels of image
h, w, c = img.shape
# reshape image to multiple rows and 3 columns.
#Num of rows depends on the image size (wxh)
img = img.reshape((-1,3))
# calculate optical density
# OD = −log10(I)
#OD = -np.log10(img+0.004) #Use this when reading images with skimage
#Adding 0.004 just to avoid log of zero.
OD = -np.log10((img.astype(np.float)+1)/Io) #Use this for opencv imread
#Add 1 in case any pixels in the image have a value of 0 (log 0 is indeterminate)
"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(121, projection='3d')
ax1.scatter(img[:,0],img[:,1],img[:,2])
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(OD[:,0],OD[:,1],OD[:,2])
plt.show()
"""
############ Step 2: Remove data with OD intensity less than β ############
# remove transparent pixels (clear region with no tissue)
ODhat = OD[~np.any(OD < beta, axis=1)] #Returns an array where OD values are above beta
#Check by printing ODhat.min()
############# Step 3: Calculate SVD on the OD tuples ######################
#Estimate covariance matrix of ODhat (transposed)
# and then compute eigen values & eigenvectors.
eigvals, eigvecs = np.linalg.eigh(np.cov(ODhat.T))
######## Step 4: Create plane from the SVD directions with two largest values ######
#project on the plane spanned by the eigenvectors corresponding to the two
# largest eigenvalues
That = ODhat.dot(eigvecs[:,1:3]) #Dot product
############### Step 5: Project data onto the plane, and normalize to unit length ###########
############## Step 6: Calculate angle of each point wrt the first SVD direction ########
#find the min and max vectors and project back to OD space
phi = np.arctan2(That[:,1],That[:,0])
minPhi = np.percentile(phi, alpha)
maxPhi = np.percentile(phi, 100-alpha)
vMin = eigvecs[:,1:3].dot(np.array([(np.cos(minPhi), np.sin(minPhi))]).T)
vMax = eigvecs[:,1:3].dot(np.array([(np.cos(maxPhi), np.sin(maxPhi))]).T)
# a heuristic to make the vector corresponding to hematoxylin first and the
# one corresponding to eosin second
if vMin[0] > vMax[0]:
HE = np.array((vMin[:,0], vMax[:,0])).T
else:
HE = np.array((vMax[:,0], vMin[:,0])).T
# rows correspond to channels (RGB), columns to OD values
Y = np.reshape(OD, (-1, 3)).T
# determine concentrations of the individual stains
C = np.linalg.lstsq(HE,Y, rcond=None)[0]
# normalize stain concentrations
maxC = np.array([np.percentile(C[0,:], 99), np.percentile(C[1,:],99)])
tmp = np.divide(maxC,maxCRef)
C2 = np.divide(C,tmp[:, np.newaxis])
###### Step 8: Convert extreme values back to OD space
# recreate the normalized image using reference mixing matrix
Inorm = np.multiply(Io, np.exp(-HERef.dot(C2)))
Inorm[Inorm>255] = 254
Inorm = np.reshape(Inorm.T, (h, w, 3)).astype(np.uint8)
# Separating H and E components
H = np.multiply(Io, np.exp(np.expand_dims(-HERef[:,0], axis=1).dot(np.expand_dims(C2[0,:], axis=0))))
H[H>255] = 254
H = np.reshape(H.T, (h, w, 3)).astype(np.uint8)
E = np.multiply(Io, np.exp(np.expand_dims(-HERef[:,1], axis=1).dot(np.expand_dims(C2[1,:], axis=0))))
E[E>255] = 254
E = np.reshape(E.T, (h, w, 3)).astype(np.uint8)
plt.imsave("images/HnE_normalized.jpg", Inorm)
plt.imsave("images/HnE_separated_H.jpg", H)
plt.imsave("images/HnE_separated_E.jpg", E)
| [
"numpy.divide",
"numpy.arctan2",
"numpy.linalg.lstsq",
"cv2.cvtColor",
"numpy.expand_dims",
"numpy.percentile",
"cv2.imread",
"numpy.any",
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.imsave",
"numpy.reshape",
"numpy.cos",
"numpy.cov"
] | [((1697, 1734), 'cv2.imread', 'cv2.imread', (['"""images/HnE_Image.jpg"""', '(1)'], {}), "('images/HnE_Image.jpg', 1)\n", (1707, 1734), False, 'import cv2\n'), ((1741, 1777), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1753, 1777), False, 'import cv2\n'), ((2300, 2364), 'numpy.array', 'np.array', (['[[0.5626, 0.2159], [0.7201, 0.8012], [0.4062, 0.5581]]'], {}), '([[0.5626, 0.2159], [0.7201, 0.8012], [0.4062, 0.5581]])\n', (2308, 2364), True, 'import numpy as np\n'), ((2462, 2488), 'numpy.array', 'np.array', (['[1.9705, 1.0308]'], {}), '([1.9705, 1.0308])\n', (2470, 2488), True, 'import numpy as np\n'), ((4230, 4264), 'numpy.arctan2', 'np.arctan2', (['That[:, 1]', 'That[:, 0]'], {}), '(That[:, 1], That[:, 0])\n', (4240, 4264), True, 'import numpy as np\n'), ((4272, 4297), 'numpy.percentile', 'np.percentile', (['phi', 'alpha'], {}), '(phi, alpha)\n', (4285, 4297), True, 'import numpy as np\n'), ((4307, 4338), 'numpy.percentile', 'np.percentile', (['phi', '(100 - alpha)'], {}), '(phi, 100 - alpha)\n', (4320, 4338), True, 'import numpy as np\n'), ((5021, 5045), 'numpy.divide', 'np.divide', (['maxC', 'maxCRef'], {}), '(maxC, maxCRef)\n', (5030, 5045), True, 'import numpy as np\n'), ((5050, 5082), 'numpy.divide', 'np.divide', (['C', 'tmp[:, np.newaxis]'], {}), '(C, tmp[:, np.newaxis])\n', (5059, 5082), True, 'import numpy as np\n'), ((5697, 5743), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""images/HnE_normalized.jpg"""', 'Inorm'], {}), "('images/HnE_normalized.jpg', Inorm)\n", (5707, 5743), True, 'from matplotlib import pyplot as plt\n'), ((5744, 5787), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""images/HnE_separated_H.jpg"""', 'H'], {}), "('images/HnE_separated_H.jpg', H)\n", (5754, 5787), True, 'from matplotlib import pyplot as plt\n'), ((5788, 5831), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""images/HnE_separated_E.jpg"""', 'E'], {}), "('images/HnE_separated_E.jpg', E)\n", (5798, 5831), True, 'from matplotlib import pyplot as plt\n'), ((3728, 3743), 'numpy.cov', 'np.cov', (['ODhat.T'], {}), '(ODhat.T)\n', (3734, 3743), True, 'import numpy as np\n'), ((4790, 4813), 'numpy.reshape', 'np.reshape', (['OD', '(-1, 3)'], {}), '(OD, (-1, 3))\n', (4800, 4813), True, 'import numpy as np\n'), ((4873, 4907), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['HE', 'Y'], {'rcond': 'None'}), '(HE, Y, rcond=None)\n', (4888, 4907), True, 'import numpy as np\n'), ((3412, 3437), 'numpy.any', 'np.any', (['(OD < beta)'], {'axis': '(1)'}), '(OD < beta, axis=1)\n', (3418, 3437), True, 'import numpy as np\n'), ((4636, 4670), 'numpy.array', 'np.array', (['(vMin[:, 0], vMax[:, 0])'], {}), '((vMin[:, 0], vMax[:, 0]))\n', (4644, 4670), True, 'import numpy as np\n'), ((4691, 4725), 'numpy.array', 'np.array', (['(vMax[:, 0], vMin[:, 0])'], {}), '((vMax[:, 0], vMin[:, 0]))\n', (4699, 4725), True, 'import numpy as np\n'), ((4961, 4987), 'numpy.percentile', 'np.percentile', (['C[0, :]', '(99)'], {}), '(C[0, :], 99)\n', (4974, 4987), True, 'import numpy as np\n'), ((4988, 5014), 'numpy.percentile', 'np.percentile', (['C[1, :]', '(99)'], {}), '(C[1, :], 99)\n', (5001, 5014), True, 'import numpy as np\n'), ((5281, 5311), 'numpy.reshape', 'np.reshape', (['Inorm.T', '(h, w, 3)'], {}), '(Inorm.T, (h, w, 3))\n', (5291, 5311), True, 'import numpy as np\n'), ((5486, 5512), 'numpy.reshape', 'np.reshape', (['H.T', '(h, w, 3)'], {}), '(H.T, (h, w, 3))\n', (5496, 5512), True, 'import numpy as np\n'), ((5652, 5678), 'numpy.reshape', 'np.reshape', (['E.T', '(h, w, 3)'], {}), '(E.T, (h, w, 3))\n', (5662, 5678), True, 'import numpy as np\n'), ((5432, 5464), 'numpy.expand_dims', 'np.expand_dims', (['C2[0, :]'], {'axis': '(0)'}), '(C2[0, :], axis=0)\n', (5446, 5464), True, 'import numpy as np\n'), ((5598, 5630), 'numpy.expand_dims', 'np.expand_dims', (['C2[1, :]'], {'axis': '(0)'}), '(C2[1, :], axis=0)\n', (5612, 5630), True, 'import numpy as np\n'), ((5392, 5428), 'numpy.expand_dims', 'np.expand_dims', (['(-HERef[:, 0])'], {'axis': '(1)'}), '(-HERef[:, 0], axis=1)\n', (5406, 5428), True, 'import numpy as np\n'), ((5558, 5594), 'numpy.expand_dims', 'np.expand_dims', (['(-HERef[:, 1])'], {'axis': '(1)'}), '(-HERef[:, 1], axis=1)\n', (5572, 5594), True, 'import numpy as np\n'), ((4375, 4389), 'numpy.cos', 'np.cos', (['minPhi'], {}), '(minPhi)\n', (4381, 4389), True, 'import numpy as np\n'), ((4391, 4405), 'numpy.sin', 'np.sin', (['minPhi'], {}), '(minPhi)\n', (4397, 4405), True, 'import numpy as np\n'), ((4449, 4463), 'numpy.cos', 'np.cos', (['maxPhi'], {}), '(maxPhi)\n', (4455, 4463), True, 'import numpy as np\n'), ((4465, 4479), 'numpy.sin', 'np.sin', (['maxPhi'], {}), '(maxPhi)\n', (4471, 4479), True, 'import numpy as np\n')] |
from flask import Flask
app = Flask(__name__,static_folder="myCSS") #,template_folder="/content/COVID-Brain-Tumour-Project/project folder")
import numpy as np
from keras.preprocessing import image
from keras.models import load_model
from flask import redirect, url_for, request, render_template, Response, jsonify, redirect
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
import os
import sys
import shutil
from flask_cors import CORS, cross_origin
import tensorflow as tf
from uuid import uuid4
cors = CORS(app, resources={r"/*": {"origins": "*"}})
y=[]
@app.route('/', methods=['GET', 'POST'])
#@cross_origin()
def index():
prediction="wait"
if request.method=="POST":
f = request.files['file']
# Save the file to ./uploads
#basepath = os.path.dirname(__file__)
image1 = secure_filename(f.filename) #os.path.join(basepath, 'uploads', secure_filename(f.filename))
f.save(image1)
#rem('.\uploads')
print("done")
print('model loading ...')
covid_model = load_model('Covid_model.h5',compile=False)
print('model loading done.')
#xray_model = load_model("/content/xrayornot_data/xrayornot_model2.h5")
test_image = image.load_img(image1,target_size=(224,224))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
results = covid_model.predict(test_image)
#result = xray_model.predict(test_image)
dict={}
#if it is an xray then
if np.argmax(results, axis=1) == 0 :# and result2[0][0]>4.226988e-15:
prediction = 'High risk of COVID-19'
#return 1
dict["Disease"]=1
else:
prediction = 'Patient is Healthy'
#return 0
dict["Disease"]=0
print('===================================')
print(prediction)
print('===================================')
print("inside if")
return render_template('/covidPage.html',resultt=prediction)
else:
print("inside else")
return render_template('/covidPage.html',resultt=prediction)
@app.route('/covidPage.html', methods=['GET', 'POST'])
#@cross_origin()
def predict():
prediction="wait"
if request.method=="POST":
f = request.files['file']
# Save the file to ./uploads
#basepath = os.path.dirname(__file__)
image1 = secure_filename(f.filename) #os.path.join(basepath, 'uploads', secure_filename(f.filename))
f.save(image1)
#rem('.\uploads')
print("done")
print('model loading ...')
covid_model = load_model('Covid_model.h5',compile=False)
print('model loading done.')
#xray_model = load_model("/content/xrayornot_data/xrayornot_model2.h5")
test_image = image.load_img(image1,target_size=(224,224))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
results = covid_model.predict(test_image)
#result = xray_model.predict(test_image)
dict={}
#if it is an xray then
if np.argmax(results, axis=1) == 0 :# and result2[0][0]>4.226988e-15:
prediction = 'High risk of COVID-19'
#return 1
dict["Disease"]=1
else:
prediction = 'Patient is Healthy'
#return 0
dict["Disease"]=0
print('===================================')
print(prediction)
print('===================================')
print("inside if")
return render_template('/covidPage.html',resultt=prediction)
else:
print("inside else")
return render_template('/covidPage.html',resultt=prediction)
@app.route('/brainTumourPage.html', methods=['GET', 'POST'])
#@cross_origin()
def predict2():
prediction="wait"
if request.method=="POST":
f = request.files['file']
# Save the file to ./uploads
#basepath = os.path.dirname(__file__)
image1 = secure_filename(f.filename) #os.path.join(basepath, 'uploads', secure_filename(f.filename))
f.save(image1)
#rem('.\uploads')
print("done")
print('model loading ...')
covid_model = load_model('Brain_model.h5',compile=False)
print('model loading done.')
#xray_model = load_model("/content/xrayornot_data/xrayornot_model2.h5")
test_image = image.load_img(image1,target_size=(224,224))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
results = covid_model.predict(test_image)
#result = xray_model.predict(test_image)
dict={}
#if it is an xray then
if np.argmax(results, axis=1) == 0 :# and result2[0][0]>4.226988e-15:
prediction = 'High risk of brainTumour'
#return 1
dict["Disease"]=1
else:
prediction = 'Patient is Healthy'
#return 0
dict["Disease"]=0
print('===================================')
print(prediction)
print('===================================')
print("inside if")
return render_template('brainTumourPage.html',resultt=prediction)
else:
print("inside else")
return render_template('/brainTumourPage.html',resultt=prediction)
if __name__=="__main__":
#http_server = WSGIServer(('',8080),app)
#http_server.serve_forever()
app.run()
| [
"keras.models.load_model",
"numpy.argmax",
"flask_cors.CORS",
"flask.Flask",
"numpy.expand_dims",
"werkzeug.utils.secure_filename",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"flask.render_template"
] | [((30, 68), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""myCSS"""'}), "(__name__, static_folder='myCSS')\n", (35, 68), False, 'from flask import Flask\n'), ((536, 581), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (540, 581), False, 'from flask_cors import CORS, cross_origin\n'), ((828, 855), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (843, 855), False, 'from werkzeug.utils import secure_filename\n'), ((1028, 1071), 'keras.models.load_model', 'load_model', (['"""Covid_model.h5"""'], {'compile': '(False)'}), "('Covid_model.h5', compile=False)\n", (1038, 1071), False, 'from keras.models import load_model\n'), ((1197, 1243), 'keras.preprocessing.image.load_img', 'image.load_img', (['image1'], {'target_size': '(224, 224)'}), '(image1, target_size=(224, 224))\n', (1211, 1243), False, 'from keras.preprocessing import image\n'), ((1259, 1289), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (1277, 1289), False, 'from keras.preprocessing import image\n'), ((1307, 1341), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (1321, 1341), True, 'import numpy as np\n'), ((1882, 1936), 'flask.render_template', 'render_template', (['"""/covidPage.html"""'], {'resultt': 'prediction'}), "('/covidPage.html', resultt=prediction)\n", (1897, 1936), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((1980, 2034), 'flask.render_template', 'render_template', (['"""/covidPage.html"""'], {'resultt': 'prediction'}), "('/covidPage.html', resultt=prediction)\n", (1995, 2034), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((2291, 2318), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (2306, 2318), False, 'from werkzeug.utils import secure_filename\n'), ((2491, 2534), 'keras.models.load_model', 'load_model', (['"""Covid_model.h5"""'], {'compile': '(False)'}), "('Covid_model.h5', compile=False)\n", (2501, 2534), False, 'from keras.models import load_model\n'), ((2660, 2706), 'keras.preprocessing.image.load_img', 'image.load_img', (['image1'], {'target_size': '(224, 224)'}), '(image1, target_size=(224, 224))\n', (2674, 2706), False, 'from keras.preprocessing import image\n'), ((2722, 2752), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (2740, 2752), False, 'from keras.preprocessing import image\n'), ((2770, 2804), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (2784, 2804), True, 'import numpy as np\n'), ((3345, 3399), 'flask.render_template', 'render_template', (['"""/covidPage.html"""'], {'resultt': 'prediction'}), "('/covidPage.html', resultt=prediction)\n", (3360, 3399), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((3443, 3497), 'flask.render_template', 'render_template', (['"""/covidPage.html"""'], {'resultt': 'prediction'}), "('/covidPage.html', resultt=prediction)\n", (3458, 3497), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((3759, 3786), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3774, 3786), False, 'from werkzeug.utils import secure_filename\n'), ((3959, 4002), 'keras.models.load_model', 'load_model', (['"""Brain_model.h5"""'], {'compile': '(False)'}), "('Brain_model.h5', compile=False)\n", (3969, 4002), False, 'from keras.models import load_model\n'), ((4128, 4174), 'keras.preprocessing.image.load_img', 'image.load_img', (['image1'], {'target_size': '(224, 224)'}), '(image1, target_size=(224, 224))\n', (4142, 4174), False, 'from keras.preprocessing import image\n'), ((4190, 4220), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (4208, 4220), False, 'from keras.preprocessing import image\n'), ((4238, 4272), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (4252, 4272), True, 'import numpy as np\n'), ((4816, 4875), 'flask.render_template', 'render_template', (['"""brainTumourPage.html"""'], {'resultt': 'prediction'}), "('brainTumourPage.html', resultt=prediction)\n", (4831, 4875), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((4919, 4979), 'flask.render_template', 'render_template', (['"""/brainTumourPage.html"""'], {'resultt': 'prediction'}), "('/brainTumourPage.html', resultt=prediction)\n", (4934, 4979), False, 'from flask import redirect, url_for, request, render_template, Response, jsonify, redirect\n'), ((1482, 1508), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (1491, 1508), True, 'import numpy as np\n'), ((2945, 2971), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (2954, 2971), True, 'import numpy as np\n'), ((4413, 4439), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (4422, 4439), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.front.broadcast_with_range import ExpandRangeConstant
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \
regular_op_with_empty_data, connect_data
class TestRangeBroadcast(unittest.TestCase):
def test_broadcast_with_range_positive_test(self):
graph = build_graph({
**regular_op_with_shaped_data('shape', [2], {'type': 'Parameter'}),
**valued_const_with_data('value', np.arange(0, 384).reshape((1, 384))),
**regular_op_with_empty_data('bc', {'type': 'Broadcast'}),
**result(),
}, [
*connect('value', '0:bc'),
*connect('shape', '1:bc'),
*connect('bc', 'output'),
], nodes_with_edges_only=True)
ExpandRangeConstant().find_and_replace_pattern(graph)
graph_ref = build_graph({
**regular_op_with_shaped_data('shape', [2], {'type': 'Parameter'}),
# start
**valued_const_with_data('start', np.array(0)),
# limit
**valued_const_with_data('minus_one', np.array(-1)),
**valued_const_with_data('zero', np.array(0)),
**regular_op_with_empty_data('range_dim', {'type': 'Gather'}),
# delta
**valued_const_with_data('delta', np.array(1)),
**regular_op_with_empty_data('range', {'type': 'Range'}),
# keep dims
**valued_const_with_data('axes', np.array([0])),
**regular_op_with_empty_data('keep_shape', {'type': 'Unsqueeze'}),
**regular_op_with_empty_data('bc', {'type': 'Broadcast'}),
**result(),
}, [
*connect('start', '0:range'),
*connect('shape', '0:range_dim'),
*connect('minus_one', '1:range_dim'),
*connect('zero', '2:range_dim'),
*connect('range_dim', '1:range'),
*connect('delta', '2:range'),
*connect('range', '0:keep_shape'),
*connect('axes', '1:keep_shape'),
*connect('keep_shape', '0:bc'),
*connect_data('shape', '1:bc'),
*connect('bc', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
| [
"mo.utils.ir_engine.compare_graphs.compare_graphs",
"mo.utils.unittest.graph.regular_op_with_shaped_data",
"mo.utils.unittest.graph.connect",
"numpy.array",
"numpy.arange",
"mo.utils.unittest.graph.result",
"mo.utils.unittest.graph.connect_data",
"mo.utils.unittest.graph.regular_op_with_empty_data",
... | [((2430, 2493), 'mo.utils.ir_engine.compare_graphs.compare_graphs', 'compare_graphs', (['graph', 'graph_ref', '"""output"""'], {'check_op_attrs': '(True)'}), "(graph, graph_ref, 'output', check_op_attrs=True)\n", (2444, 2493), False, 'from mo.utils.ir_engine.compare_graphs import compare_graphs\n'), ((563, 627), 'mo.utils.unittest.graph.regular_op_with_shaped_data', 'regular_op_with_shaped_data', (['"""shape"""', '[2]', "{'type': 'Parameter'}"], {}), "('shape', [2], {'type': 'Parameter'})\n", (590, 627), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((727, 782), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""bc"""', "{'type': 'Broadcast'}"], {}), "('bc', {'type': 'Broadcast'})\n", (753, 782), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((798, 806), 'mo.utils.unittest.graph.result', 'result', ([], {}), '()\n', (804, 806), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((984, 1005), 'extensions.front.broadcast_with_range.ExpandRangeConstant', 'ExpandRangeConstant', ([], {}), '()\n', (1003, 1005), False, 'from extensions.front.broadcast_with_range import ExpandRangeConstant\n'), ((1087, 1151), 'mo.utils.unittest.graph.regular_op_with_shaped_data', 'regular_op_with_shaped_data', (['"""shape"""', '[2]', "{'type': 'Parameter'}"], {}), "('shape', [2], {'type': 'Parameter'})\n", (1114, 1151), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1392, 1451), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""range_dim"""', "{'type': 'Gather'}"], {}), "('range_dim', {'type': 'Gather'})\n", (1418, 1451), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1547, 1601), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""range"""', "{'type': 'Range'}"], {}), "('range', {'type': 'Range'})\n", (1573, 1601), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1703, 1766), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""keep_shape"""', "{'type': 'Unsqueeze'}"], {}), "('keep_shape', {'type': 'Unsqueeze'})\n", (1729, 1766), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1783, 1838), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""bc"""', "{'type': 'Broadcast'}"], {}), "('bc', {'type': 'Broadcast'})\n", (1809, 1838), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1854, 1862), 'mo.utils.unittest.graph.result', 'result', ([], {}), '()\n', (1860, 1862), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((834, 858), 'mo.utils.unittest.graph.connect', 'connect', (['"""value"""', '"""0:bc"""'], {}), "('value', '0:bc')\n", (841, 858), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((873, 897), 'mo.utils.unittest.graph.connect', 'connect', (['"""shape"""', '"""1:bc"""'], {}), "('shape', '1:bc')\n", (880, 897), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((912, 935), 'mo.utils.unittest.graph.connect', 'connect', (['"""bc"""', '"""output"""'], {}), "('bc', 'output')\n", (919, 935), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1220, 1231), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1228, 1231), True, 'import numpy as np\n'), ((1304, 1316), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (1312, 1316), True, 'import numpy as np\n'), ((1364, 1375), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1372, 1375), True, 'import numpy as np\n'), ((1519, 1530), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1527, 1530), True, 'import numpy as np\n'), ((1673, 1686), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1681, 1686), True, 'import numpy as np\n'), ((1890, 1917), 'mo.utils.unittest.graph.connect', 'connect', (['"""start"""', '"""0:range"""'], {}), "('start', '0:range')\n", (1897, 1917), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1932, 1963), 'mo.utils.unittest.graph.connect', 'connect', (['"""shape"""', '"""0:range_dim"""'], {}), "('shape', '0:range_dim')\n", (1939, 1963), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((1978, 2013), 'mo.utils.unittest.graph.connect', 'connect', (['"""minus_one"""', '"""1:range_dim"""'], {}), "('minus_one', '1:range_dim')\n", (1985, 2013), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2028, 2058), 'mo.utils.unittest.graph.connect', 'connect', (['"""zero"""', '"""2:range_dim"""'], {}), "('zero', '2:range_dim')\n", (2035, 2058), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2073, 2104), 'mo.utils.unittest.graph.connect', 'connect', (['"""range_dim"""', '"""1:range"""'], {}), "('range_dim', '1:range')\n", (2080, 2104), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2119, 2146), 'mo.utils.unittest.graph.connect', 'connect', (['"""delta"""', '"""2:range"""'], {}), "('delta', '2:range')\n", (2126, 2146), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2161, 2193), 'mo.utils.unittest.graph.connect', 'connect', (['"""range"""', '"""0:keep_shape"""'], {}), "('range', '0:keep_shape')\n", (2168, 2193), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2208, 2239), 'mo.utils.unittest.graph.connect', 'connect', (['"""axes"""', '"""1:keep_shape"""'], {}), "('axes', '1:keep_shape')\n", (2215, 2239), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2254, 2283), 'mo.utils.unittest.graph.connect', 'connect', (['"""keep_shape"""', '"""0:bc"""'], {}), "('keep_shape', '0:bc')\n", (2261, 2283), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2298, 2327), 'mo.utils.unittest.graph.connect_data', 'connect_data', (['"""shape"""', '"""1:bc"""'], {}), "('shape', '1:bc')\n", (2310, 2327), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((2342, 2365), 'mo.utils.unittest.graph.connect', 'connect', (['"""bc"""', '"""output"""'], {}), "('bc', 'output')\n", (2349, 2365), False, 'from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, regular_op_with_empty_data, connect_data\n'), ((675, 692), 'numpy.arange', 'np.arange', (['(0)', '(384)'], {}), '(0, 384)\n', (684, 692), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Use this file for your answers.
This file should been in the root of the repository
(do not move it or change the file name)
"""
import numpy as np
from numpy.linalg import inv
def lml(alpha, beta, Phi, Y):
"""
4 marks
:param alpha: float
:param beta: float
:param Phi: array of shape (N, M)
:param Y: array of shape (N, 1)
:return: the log marginal likelihood, a scalar
"""
N = len(Phi)
M = len(Phi[0])
# print(N)
# print(M)
part1 = (-N*0.5)*np.log(2*np.pi)
wholePhi = np.dot(np.dot(Phi, alpha*np.identity(M)),Phi.T)
wholeBeta = beta*np.identity(N)
part2 = - 0.5*np.log(np.linalg.det( wholePhi + wholeBeta))
part3 = -0.5*np.dot(np.dot(Y.T, inv((wholePhi + wholeBeta))),Y)
logFunc = part1 + part2 + part3
return logFunc[0][0]
def grad_lml(alpha, beta, Phi, Y):
"""
8 marks (4 for each component)
:param alpha: float
:param beta: float
:param Phi: array of shape (N, M)
:param Y: array of shape (N, 1)
:return: array of shape (2,). The components of this array are the gradients
(d_lml_d_alpha, d_lml_d_beta), the gradients of lml with respect to alpha and beta respectively.
"""
N = len(Phi)
M = len(Phi[0])
X = np.dot(np.dot(alpha, Phi), Phi.T) + np.dot(beta, np.identity(N))
common = 0.5 * np.dot(np.dot(inv(X), np.dot(Y, Y.T) - X), inv(X))
d_alpha = np.trace(np.dot(common.T, np.dot(Phi, Phi.T)))
d_bete = np.trace(common.T)
return np.array([d_alpha, d_bete])
# Phi = np.array([[1,2],[1,3]])
#a
# Y = np.array([[-0.75426779],[-0.5480492]])
#
# print(lml(2.0, 4.0, Phi, Y)) | [
"numpy.trace",
"numpy.log",
"numpy.identity",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot",
"numpy.linalg.det"
] | [((1487, 1505), 'numpy.trace', 'np.trace', (['common.T'], {}), '(common.T)\n', (1495, 1505), True, 'import numpy as np\n'), ((1518, 1545), 'numpy.array', 'np.array', (['[d_alpha, d_bete]'], {}), '([d_alpha, d_bete])\n', (1526, 1545), True, 'import numpy as np\n'), ((530, 547), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (536, 547), True, 'import numpy as np\n'), ((630, 644), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (641, 644), True, 'import numpy as np\n'), ((670, 705), 'numpy.linalg.det', 'np.linalg.det', (['(wholePhi + wholeBeta)'], {}), '(wholePhi + wholeBeta)\n', (683, 705), True, 'import numpy as np\n'), ((1285, 1303), 'numpy.dot', 'np.dot', (['alpha', 'Phi'], {}), '(alpha, Phi)\n', (1291, 1303), True, 'import numpy as np\n'), ((1327, 1341), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (1338, 1341), True, 'import numpy as np\n'), ((1405, 1411), 'numpy.linalg.inv', 'inv', (['X'], {}), '(X)\n', (1408, 1411), False, 'from numpy.linalg import inv\n'), ((1453, 1471), 'numpy.dot', 'np.dot', (['Phi', 'Phi.T'], {}), '(Phi, Phi.T)\n', (1459, 1471), True, 'import numpy as np\n'), ((586, 600), 'numpy.identity', 'np.identity', (['M'], {}), '(M)\n', (597, 600), True, 'import numpy as np\n'), ((745, 770), 'numpy.linalg.inv', 'inv', (['(wholePhi + wholeBeta)'], {}), '(wholePhi + wholeBeta)\n', (748, 770), False, 'from numpy.linalg import inv\n'), ((1376, 1382), 'numpy.linalg.inv', 'inv', (['X'], {}), '(X)\n', (1379, 1382), False, 'from numpy.linalg import inv\n'), ((1384, 1398), 'numpy.dot', 'np.dot', (['Y', 'Y.T'], {}), '(Y, Y.T)\n', (1390, 1398), True, 'import numpy as np\n')] |
import numpy as np
import h5py
def group_name(norm_fn, residual_fn):
if norm_fn not in ['normal_dist', 'percent_change', 'zero_one', '']:
raise ValueError('Invalid norm function')
if residual_fn not in ['exp_residual', 'gdp_residual', 'linear_residual', 'none', '']:
raise ValueError('Invalid residual function')
if norm_fn == '':
norm_fn = 'zero_one'
if residual_fn == 'none':
residual_fn = ''
if residual_fn != '':
return norm_fn + '_' + residual_fn
else:
return norm_fn
def get_hdf5(aggregate, sample, path='/centurion/FREDcast/'):
if aggregate:
hdf5_name = 'rnn_data'
else:
hdf5_name = 'split_data'
if sample:
hdf5_name += '_sample'
return path + hdf5_name + '.hdf5'
def y_index(indicator):
try:
return ['gdp', 'cpi', 'payroll', 'unemployment'].index(indicator)
except ValueError:
raise ValueError('%s not a valid indicator' % indicator)
def get_data(data_tuple, dsets=False,
norm_fn='', residual_fn='', aggregate=False, sample=True,
indicator='gdp'):
if data_tuple is not None:
indicator = data_tuple[0]
norm_fn = data_tuple[1]
residual_fn = data_tuple[2]
aggregate = data_tuple[3]
sample = data_tuple[4]
hdf5 = h5py.File(get_hdf5(aggregate, sample), 'r')
grp = hdf5[group_name(norm_fn, residual_fn)]
x_train, x_test, y_train, y_test = grp['train_x'], grp['test_x'], grp['train_y'], grp['test_y']
for arr in x_train, x_test, y_train, y_test:
print(data_tuple)
check_complete(arr)
y_train = y_train[:, y_index(indicator)]
y_test = y_test[:, y_index(indicator)]
if dsets: # Return the h5py dset objects
return x_train, x_test, y_train, y_test
else: # Return the actual data
return x_train[:], x_test[:], y_train[:], y_test[:]
def check_complete(array, name=''):
if np.isnan(np.min(array)):
raise ValueError('%s contains nan' % name) | [
"numpy.min"
] | [((1968, 1981), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (1974, 1981), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 20:40:39 2021
@author:AlanMWatson
Napari plugin for reading imaris files as a multiresolution series.
NOTE: Currently "File/Preferences/Render Images Asynchronously" must be turned on for this plugin to work
*** Issues remain with indexing and the shape of returned arrays.
1) It is unclear if there is an issue with how I am implementing slicing in the ims module
2) Different expections from napari on the state of the data that is returned between the Image and Chunk_loader methods in ims module
** It appears that napari is only requesting 2D (YX) chunks from the loader during 2D rendering
which limits the utility of the async chunk_loader.
*Future implemetation of caching in RAM and persistantly on disk is planned via ims module - currently disabled
RAM Cache may be redundant to napari cache unless we can implement 3D chunk caching
Disk cache may allow for loaded chunks to be stored to SSD for rapid future retrieval
with options to maintain this cache persistantly accross sessions.
"""
import os
import numpy as np
import dask.array as da
from imaris_ims_file_reader.ims import ims
from napari_plugin_engine import napari_hook_implementation
"""
Is this a bug in napari or specific to this reader?
path = "...\napari-env\Lib\site-packages\napari\layers\image\image.py"
Line 619-620
should read:
indices[d] = slice(
int(self.corner_pixels[0, d]),
int(self.corner_pixels[1, d] + 1),
1,
)
start/stop values of the slice must be coerced to int otherwise an error
is thrown when switching from 3D to 2D view
### NOTE: This may no longer be a problem
"""
def ims_reader(path,resLevel='max', colorsIndependant=False, preCache=False):
# path = r"Z:\testData\bitplaneConverter.ims" ## Dataset for testing
#print('I AM IN THE READER')
# path = r"Z:\toTest\bil\download.brainimagelibrary.org\2b\da\2bdaf9e66a246844\mouseID_405429-182725\CH1_0.35_100um\ch1_0.35_100um.ims"
# path = r"Z:\testData\2D\1time_1color_composite_z500_c488.ims"
imsClass = ims(path)
if imsClass.dtype==np.dtype('uint16'):
contrastLimits = [0, 65535]
elif imsClass.dtype==np.dtype('uint8'):
contrastLimits = [0, 255]
elif imsClass.dtype==np.dtype('float'):
contrastLimits = [0,1]
## Enable async loading of tiles
os.environ["NAPARI_ASYNC"] = "1"
# os.environ['NAPARI_OCTREE'] = "1"
# cache = Cache(10e9) # Leverage two gigabytes of memory
# cache.register() # Turn cache on globally
## Display current Channel Names
channelNames = []
for cc in range(imsClass.Channels):
channelNames.append('Channel {}'.format(cc))
if len(channelNames) == 1:
channelNames = channelNames[0]
data = []
for rr in range(imsClass.ResolutionLevels):
print('Loading resolution level {}'.format(rr))
data.append(ims(path,ResolutionLevelLock=rr,cache_location=imsClass.cache_location))
chunks = True
for idx,_ in enumerate(data):
data[idx] = da.from_array(data[idx],
chunks=data[idx].chunks if chunks == True else (1,1,data[idx].shape[-3],data[idx].shape[-2],data[idx].shape[-1]),
fancy=False
)
print(data)
# Base metadata that apply to all senarios
meta = {
"contrast_limits": contrastLimits,
"name": channelNames,
"metadata": {'fileName':imsClass.filePathComplete,
'resolutionLevels':imsClass.ResolutionLevels
}
}
# Reslice to remove dangling single dimensions, this may not be necessary anymore
inwardSlice = 0
for ii in range(len(imsClass.shape)):
if imsClass.shape[ii] == 1:
inwardSlice += 1
else:
break
if inwardSlice == 0:
for idx,_ in enumerate(data):
meta['channel_axis'] = 1
elif inwardSlice == 1:
for idx,_ in enumerate(data):
data[idx] = data[idx][0]
meta['channel_axis'] = 0
elif inwardSlice == 2:
for idx,_ in enumerate(data):
data[idx] = data[idx][0,0]
meta['channel_axis'] = None
elif inwardSlice == 3:
for idx,_ in enumerate(data):
data[idx] = data[idx][0,0,0]
meta['channel_axis'] = None
elif inwardSlice == 4:
for idx,_ in enumerate(data):
data[idx] = data[idx][0,0,0,0]
meta['channel_axis'] = None
# Remove single color dims, this may not be necessary
if len(data) >= 1 and data[0].ndim >= 4 and data[0].shape[-4] == 1:
for idx,_ in enumerate(data):
data[idx] = data[idx][...,0,:,:,:]
meta['channel_axis'] = None
# # Remove single Z dims, this may not be necessary, may cause scal issues
# if len(data) >= 3 and data[0].shape[-3] == 1:
# for idx,_ in enumerate(data):
# data[idx] = data[idx][...,0,:,:]
## Possibility of implementing rapid caching of some data (lower resolution levels?) prior to visualization.
## done by calling a simple calculation over the whole dask array da.min()?
# if preCache == True:
# for idx,dd in enumerate(reversed(data)):
# print('Caching resolution level {}'.format(len(data)-idx-1))
# for ii in range(imsClass.Channels):
# dd[0,ii].min().compute()
# if idx == 2:
# break
# Option to cut off lower resolutions to improve 3D rendering
# May provide a widgit that can impletment this after the dataset is loaded
if isinstance(resLevel,int) and resLevel+1 > len(data):
raise ValueError('Selected resolution level is too high: Options are between 0 and {}'.format(imsClass.ResolutionLevels-1))
data = data if resLevel=='max' else data[:resLevel+1]
print(data)
# Set multiscale based on whether multiple resolutions are present
meta["multiscale"] = True if len(data) > 1 else False
## Extract Voxel Spacing
scale = imsClass.resolution
scale = scale[-2::] if len(data[0].shape) == 2 else scale #Reduces scale to 2dim when a single color 2D dataset
scale = [tuple(scale)]*imsClass.Channels
meta["scale"] = scale if len(scale) > 1 else scale[0]
if colorsIndependant and 'channel_axis' in meta and meta['channel_axis'] is not None:
channelAxis = meta['channel_axis']
channelData = []
for cc in range(data[0].shape[channelAxis]):
singleChannel = []
for dd in data:
if channelAxis == 0:
singleChannel.append(dd[cc])
elif channelAxis == 1:
singleChannel.append(dd[:,cc])
channelData.append(singleChannel)
del(meta['channel_axis'])
metaData = []
for cc in range(data[0].shape[channelAxis]):
singleChannel = {
'contrast_limits':meta['contrast_limits'],
'multiscale':meta['multiscale'],
'metadata':meta['metadata'],
'scale':meta['scale'][cc],
'name':meta['name'][cc]
}
metaData.append(singleChannel)
finalOutput = []
for dd,mm in zip(channelData,metaData):
if len(dd) > 1:
finalOutput.append(
(dd,mm)
)
else:
finalOutput.append(
(dd[0],mm)
)
return finalOutput
else:
return [(data if len(data) > 1 else data[0],meta)]
@napari_hook_implementation
def napari_get_reader(path):
if isinstance(path,str) and os.path.splitext(path)[1].lower() == '.ims':
return ims_reader
| [
"dask.array.from_array",
"imaris_ims_file_reader.ims.ims",
"numpy.dtype",
"os.path.splitext"
] | [((2137, 2146), 'imaris_ims_file_reader.ims.ims', 'ims', (['path'], {}), '(path)\n', (2140, 2146), False, 'from imaris_ims_file_reader.ims import ims\n'), ((2178, 2196), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (2186, 2196), True, 'import numpy as np\n'), ((3173, 3336), 'dask.array.from_array', 'da.from_array', (['data[idx]'], {'chunks': '(data[idx].chunks if chunks == True else (1, 1, data[idx].shape[-3], data[\n idx].shape[-2], data[idx].shape[-1]))', 'fancy': '(False)'}), '(data[idx], chunks=data[idx].chunks if chunks == True else (1,\n 1, data[idx].shape[-3], data[idx].shape[-2], data[idx].shape[-1]),\n fancy=False)\n', (3186, 3336), True, 'import dask.array as da\n'), ((2259, 2276), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (2267, 2276), True, 'import numpy as np\n'), ((3014, 3087), 'imaris_ims_file_reader.ims.ims', 'ims', (['path'], {'ResolutionLevelLock': 'rr', 'cache_location': 'imsClass.cache_location'}), '(path, ResolutionLevelLock=rr, cache_location=imsClass.cache_location)\n', (3017, 3087), False, 'from imaris_ims_file_reader.ims import ims\n'), ((2337, 2354), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (2345, 2354), True, 'import numpy as np\n'), ((7978, 8000), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (7994, 8000), False, 'import os\n')] |
import pandas as pd
import numpy as np
import os
from IPython.display import display
pd.set_option('display.max_rows', 1200)
pd.set_option('display.max_columns', 20)
data = pd.read_csv('data.csv')
#1
display(data[data.budget == data.budget.max()])
#answer - 4
#2
display(data[data.runtime == data.runtime.max()])
#answer - 2
#3
display(data[data.runtime == data.runtime.min()])
#answer - 3
#4
# display(data.runtime.mean())
#answer - 2
#5
display(data.runtime.median())
#answer - 1
#6
data['profit'] = data['revenue'] - data['budget']
display(data[data['profit'] == data['profit'].max()])
#answer - 5
#7
display(data[data['profit'] == data['profit'].min()])
#answer - 2
#8
display(data[data['profit'] > 0].count())
display(len(data[data['profit'] > 0]))
#answer - 1
#9
d2008 = data[data['release_year'] == 2008]
display(d2008[d2008['profit'] == d2008['profit'].max()])
#answer - 4
#10
sliced = data.query('2011 < release_year < 2015')
display(sliced[sliced['profit'] == sliced['profit'].min()])
#answer - 5
#11
def get_genres(df, g):
return df[df.genres.str.contains(g)]
all_genres = []
unique_genre_sets = data.genres.unique()
for lst in unique_genre_sets:
genre_names = lst.split('|')
for g in genre_names:
if g not in all_genres:
all_genres.append(g)
#####
# TODO - make a kind of sorted array when I got time
#####
display(len(get_genres(data, 'Action')))
display(len(get_genres(data, 'Adventure')))
display(len(get_genres(data, 'Drama')))
display(len(get_genres(data, 'Comedy')))
display(len(get_genres(data, 'Thriller')))
#answer - 3
#12
display(len(get_genres(data, 'Action').query('profit > 0')))
display(len(get_genres(data, 'Adventure').query('profit > 0')))
display(len(get_genres(data, 'Drama').query('profit > 0')))
display(len(get_genres(data, 'Comedy').query('profit > 0')))
display(len(get_genres(data, 'Thriller').query('profit > 0')))
#answer - 3
#13 - this one looks better than previous two
display(data.query('director in ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]').director.value_counts())
#answer - 3
#14
display(data.query('director in ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"] & profit > 0').director.value_counts())
#answer - 2
#15
pivot = data.loc[data['director'].isin(["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"])].pivot_table(values=['profit'],
index=['director'],
aggfunc='sum',
fill_value=0)
display(pivot)
#answer - 5
#16
display(data.query('cast.str.contains("<NAME>")').profit.sum())
display(data.query('cast.str.contains("<NAME>")').profit.sum())
display(data.query('cast.str.contains("<NAME>")').profit.sum())
display(data.query('cast.str.contains("<NAME>")').profit.sum())
display(data.query('cast.str.contains("<NAME>")').profit.sum())
#answer - 1
#17
def get_profit_by_actor(df, actor):
return df.query('cast.str.contains("' + actor + '")').profit.sum()
d2012 = data[data['release_year'] == 2012]
df = pd.DataFrame({'<NAME>': [get_profit_by_actor(d2012, '<NAME>')],
'<NAME>': [get_profit_by_actor(d2012, '<NAME>')],
'<NAME>': [get_profit_by_actor(d2012, '<NAME>')],
'<NAME>': [get_profit_by_actor(d2012, '<NAME>')],
'<NAME>': [get_profit_by_actor(d2012, '<NAME>')]})
display(df)
#answer - 3
#18
avg = data.budget.mean()
high_budget = data[data.budget > avg]
display(len(high_budget.query('cast.str.contains("<NAME>")')))
display(len(high_budget.query('cast.str.contains("<NAME>")')))
display(len(high_budget.query('cast.str.contains("<NAME>")')))
display(len(high_budget.query('cast.str.contains("<NAME>")')))
display(len(high_budget.query('cast.str.contains("<NAME>")')))
#answer - 3
#19
cage = data.query('cast.str.contains("<NAME>")')
display(len(get_genres(cage, 'Drama')))
display(len(get_genres(cage, 'Action')))
display(len(get_genres(cage, 'Thriller')))
display(len(get_genres(cage, 'Adventure')))
display(len(get_genres(cage, 'Crime')))
#answer - 2
#20
display(len(data.query('production_companies.str.contains("Universal")')))
display(len(data.query('production_companies.str.contains("Paramount Pictures")')))
display(len(data.query('production_companies.str.contains("Columbia Pictures")')))
display(len(data.query('production_companies.str.contains("<NAME>")')))
display(len(data.query('production_companies.str.contains("Twentieth Century Fox Film Corporation")')))
#answer - 1
#21
d2015 = data[data['release_year'] == 2015]
display(len(d2015.query('production_companies.str.contains("Universal Pictures")')))
display(len(d2015.query('production_companies.str.contains("Paramount Pictures")')))
display(len(d2015.query('production_companies.str.contains("Columbia Pictures")')))
display(len(d2015.query('production_companies.str.contains("<NAME>")')))
display(len(d2015.query('production_companies.str.contains("Twentieth Century Fox Film Corporation")')))
#answer - 4
#22
def named_field_entries_count(df, field, s):
return s + ' ' + str(len(df.query(field + '.str.contains("' + s + '")')))
def named_field_entries_sum(df, field, s, inner):
return s + ' ' + str((df.query(field + '.str.contains("' + s + '")')[inner].sum()))
comedies = get_genres(data, 'Comedy')
print(named_field_entries_sum(comedies, 'production_companies', '<NAME>', 'profit'))
print(named_field_entries_sum(comedies, 'production_companies', 'Universal', 'profit'))
print(named_field_entries_sum(comedies, 'production_companies', 'Columbia Pictures', 'profit'))
print(named_field_entries_sum(comedies, 'production_companies', 'Paramount Pictures', 'profit'))
print(named_field_entries_sum(comedies, 'production_companies', '<NAME>', 'profit'))
#answer - 2
#23
def field_entries_count(df, field, s):
return len(df.query(field + '.str.contains("' + s + '")'))
def field_entries_sum(df, field, s, inner):
return df.query(field + '.str.contains("' + s + '")')[inner].sum()
df = pd.DataFrame({'Universal': [field_entries_sum(d2012, 'production_companies', 'Universal', 'profit')],
'<NAME>': [field_entries_sum(d2012, 'production_companies', '<NAME>', 'profit')],
'Columbia Pictures': [field_entries_sum(d2012, 'production_companies', 'Columbia Pictures', 'profit')],
'Paramount Pictures': [field_entries_sum(d2012, 'production_companies', 'Paramount Pictures', 'profit')],
'Lucasfilm': [field_entries_sum(d2012, 'production_companies', 'Lucasfilm', 'profit')]
})
display(df)
#answer - 3
#24
paramounts = data.query('production_companies.str.contains("Paramount Pictures")')
display(paramounts[paramounts.profit == paramounts.profit.min()])
#answer - 1
#25
pivot = data.loc[data['release_year'].isin([2002, 2008, 2012, 2014, 2015])].pivot_table(values=['profit'],
index=['release_year'],
aggfunc='sum',
fill_value=0)
display(pivot)
#answer - 5
#26
warners = data.query('production_companies.str.contains("<NAME>")')
pivot = warners.loc[warners['release_year'].isin([2002, 2008, 2012, 2014, 2015])].pivot_table(values=['profit'],
index=['release_year'],
aggfunc='sum',
fill_value=0)
display(pivot)
#answer - 1
#27
def by_month(df, index):
return df[df.release_date.str.find(index, 0, len(index)) == 0]
jan = by_month(data, '1/')
jun = by_month(data, '6/')
dec = by_month(data, '12/')
sep = by_month(data, '9/')
may = by_month(data, '5/')
display(len(jan), len(jun), len(dec), len(sep), len(may))
#answer - 4
#28
jul = by_month(data, '7/')
aug = by_month(data, '8/')
display(len(jun) + len(jul) + len(aug))
#answer - 2
#29
display(len(data[(data['director'] == "<NAME>") & ((data.release_date.str.find('1/', 0, len('1/')) == 0) | (data.release_date.str.find('2/', 0, len('2/')) == 0) | (data.release_date.str.find('12/', 0, len('12/')) == 0))]))
display(len(data[(data['director'] == "<NAME>") & ((data.release_date.str.find('1/', 0, len('1/')) == 0) | (data.release_date.str.find('2/', 0, len('2/')) == 0) | (data.release_date.str.find('12/', 0, len('12/')) == 0))]))
display(len(data[(data['director'] == "<NAME>") & ((data.release_date.str.find('1/', 0, len('1/')) == 0) | (data.release_date.str.find('2/', 0, len('2/')) == 0) | (data.release_date.str.find('12/', 0, len('12/')) == 0))]))
display(len(data[(data['director'] == "<NAME>") & ((data.release_date.str.find('1/', 0, len('1/')) == 0) | (data.release_date.str.find('2/', 0, len('2/')) == 0) | (data.release_date.str.find('12/', 0, len('12/')) == 0))]))
display(len(data[(data['director'] == "<NAME>") & ((data.release_date.str.find('1/', 0, len('1/')) == 0) | (data.release_date.str.find('2/', 0, len('2/')) == 0) | (data.release_date.str.find('12/', 0, len('12/')) == 0))]))
#answer - 5
#30 Think more!!!
years = data.release_year.unique()
lst = []
for y in years:
lst.append([y, jan.query('release_year == ' + str(y)).profit.sum(), jun.query('release_year == ' + str(y)).profit.sum(), dec.query('release_year == ' + str(y)).profit.sum(), sep.query('release_year == ' + str(y)).profit.sum(), may.query('release_year == ' + str(y)).profit.sum()])
df = pd.DataFrame(np.array(lst), columns = ['y', 'jan', 'jun', 'dec', 'sep', 'may'])
df['mv'] = df.max(axis = 1)
#answer - 2
#31
display(data.query('production_companies.str.contains("Universal")').original_title.str.len().mean())
display(data.query('production_companies.str.contains("<NAME>")').original_title.str.len().mean())
display(data.query('production_companies.str.contains("<NAME>son Company, The")').original_title.str.len().mean())
display(data.query('production_companies.str.contains("Paramount Pictures")').original_title.str.len().mean())
display(data.query('production_companies.str.contains("Four By Two Productions")').original_title.str.len().mean())
#32
display(data.query('production_companies.str.contains("Universal")').original_title.str.split().map(lambda a: len(a)).mean())
display(data.query('production_companies.str.contains("<NAME>")').original_title.str.split().map(lambda a: len(a)).mean())
display(data.query('production_companies.str.contains("<NAME> Company, The")').original_title.str.split().map(lambda a: len(a)).mean())
display(data.query('production_companies.str.contains("Paramount Pictures")').original_title.str.split().map(lambda a: len(a)).mean())
display(data.query('production_companies.str.contains("Four By Two Productions")').original_title.str.split().map(lambda a: len(a)).mean())
#answer - 5
#33
names = data.original_title.values.tolist();
words = []
for w in names:
arr = w.split(' ')
for s in arr:
if s.lower() not in words:
words.append(s.lower())
print(len(words))
#answer - 3
#34
topsorted = data.sort_values('vote_average', ascending = False)
display(topsorted.head(int(0.01 * len(topsorted))))
#answer - 1
#35
display(len(data.query('cast.str.contains("<NAME>") & cast.str.contains("<NAME>")')))
display(len(data.query('cast.str.contains("<NAME>") & cast.str.contains("<NAME>")')))
display(len(data.query('cast.str.contains("<NAME>") & cast.str.contains("<NAME>")')))
display(len(data.query('cast.str.contains("<NAME>") & cast.str.contains("<NAME>")')))
display(len(data.query('cast.str.contains("<NAME>") & cast.str.contains("<NAME>")')))
#answer - 5
#36
display(len(data.query('director == "<NAME>" & profit > 0')) / len(data.query('director == "<NAME>"')))
display(len(data.query('director == "<NAME>" & profit > 0')) / len(data.query('director == "<NAME>"')))
display(len(data.query('director == "<NAME>" & profit > 0')) / len(data.query('director == "<NAME>"')))
display(len(data.query('director == "<NAME>" & profit > 0')) / len(data.query('director == "<NAME>"')))
display(len(data.query('director == "<NAME>" & profit > 0')) / len(data.query('director == "Cl<NAME>"')))
#answer - 1 (По коду получилось два правильных ответа, я из них не угадал, потом подсказали, что может быть несколько режиссеров у фильма, мораль - надо смотреть данные внимательней)
| [
"pandas.read_csv",
"pandas.set_option",
"numpy.array",
"IPython.display.display"
] | [((86, 125), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1200)'], {}), "('display.max_rows', 1200)\n", (99, 125), True, 'import pandas as pd\n'), ((126, 166), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(20)'], {}), "('display.max_columns', 20)\n", (139, 166), True, 'import pandas as pd\n'), ((175, 198), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (186, 198), True, 'import pandas as pd\n'), ((2374, 2388), 'IPython.display.display', 'display', (['pivot'], {}), '(pivot)\n', (2381, 2388), False, 'from IPython.display import display\n'), ((3165, 3176), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (3172, 3176), False, 'from IPython.display import display\n'), ((6256, 6267), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (6263, 6267), False, 'from IPython.display import display\n'), ((6611, 6625), 'IPython.display.display', 'display', (['pivot'], {}), '(pivot)\n', (6618, 6625), False, 'from IPython.display import display\n'), ((6877, 6891), 'IPython.display.display', 'display', (['pivot'], {}), '(pivot)\n', (6884, 6891), False, 'from IPython.display import display\n'), ((8828, 8841), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (8836, 8841), True, 'import numpy as np\n')] |
import argparse
import os
from pathlib import Path
import librosa
import numpy as np
import tqdm
import ruamel.yaml
from preprocessing.text import Pipeline
from utils.audio import Audio
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='CONFIG', type=str, required=True)
parser.add_argument('--dont_cache_phonemes', dest='CACHE_PHON', action='store_false')
parser.add_argument('--njobs', dest='NJOBS', type=int, default=16)
parser.add_argument('--col_sep', dest='COLUMN_SEP', type=str, default='|')
parser.add_argument('--recompute_phon', dest='RECOMPUTE_PHON', action='store_true')
args = parser.parse_args()
for arg in vars(args):
print('{}: {}'.format(arg, getattr(args, arg)))
yaml = ruamel.yaml.YAML()
with open(str(Path(args.CONFIG) / 'data_config.yaml'), 'rb') as conf_yaml:
config = yaml.load(conf_yaml)
args.DATA_DIR = config['data_directory']
args.META_FILE = os.path.join(args.DATA_DIR, config['metadata_filename'])
args.WAV_DIR = os.path.join(args.DATA_DIR, config['wav_subdir_name'])
args.TARGET_DIR = config['train_data_directory']
if args.TARGET_DIR is None:
args.TARGET_DIR = args.DATA_DIR
mel_dir = os.path.join(args.TARGET_DIR, 'mels')
if not os.path.exists(mel_dir):
os.makedirs(mel_dir)
phon_path = os.path.join(args.TARGET_DIR, 'phonemes.npy')
text_proc = Pipeline.default_training_pipeline(config['phoneme_language'], add_start_end=True)
if os.path.exists(phon_path) and not args.RECOMPUTE_PHON:
print('Using cached phonemes.')
audio_data = np.load(phon_path)
else:
print('\nLoading and cleaning text')
audio_data = []
with open(args.META_FILE, 'r', encoding='utf-8') as f:
for l in f.readlines():
l_split = l.split(args.COLUMN_SEP)
filename, text = l_split[0], l_split[-1]
if filename.endswith('.wav'):
filename = filename.split('.')[0]
text = text_proc.cleaner(text)
audio_data.append((filename, text))
audio_data = np.array(audio_data)
print('\nPhonemizing')
texts = audio_data[:, 1]
batch_size = 250 # batch phonemization to avoid memory issues.
phonemes = []
for i in tqdm.tqdm(range(0, len(audio_data), batch_size)):
batch = texts[i: i + batch_size]
batch = text_proc.phonemizer(batch, njobs=args.NJOBS)
phonemes.extend(batch)
audio_data = np.concatenate([np.array(audio_data), np.expand_dims(phonemes, axis=1)], axis=1)
if args.CACHE_PHON:
np.save(phon_path, audio_data, allow_pickle=True)
print('\nBuilding dataset and writing files')
np.random.seed(42)
np.random.shuffle(audio_data)
test_metafile = os.path.join(args.TARGET_DIR, 'test_metafile.txt')
train_metafile = os.path.join(args.TARGET_DIR, 'train_metafile.txt')
test_lines = [''.join([filename, '|', text, '|', phon, '\n']) for filename, text, phon in
audio_data[:config['n_test']]]
train_lines = [''.join([filename, '|', text, '|', phon, '\n']) for filename, text, phon in
audio_data[config['n_test']:-1]]
with open(test_metafile, 'w+', encoding='utf-8') as test_f:
test_f.writelines(test_lines)
with open(train_metafile, 'w+', encoding='utf-8') as train_f:
train_f.writelines(train_lines)
audio = Audio(config)
for i in tqdm.tqdm(range(len(audio_data))):
filename, _, _ = audio_data[i]
wav_path = os.path.join(args.WAV_DIR, filename + '.wav')
y, sr = librosa.load(wav_path, sr=config['sampling_rate'])
mel = audio.mel_spectrogram(y)
mel_path = os.path.join(mel_dir, filename)
np.save(mel_path, mel.T)
print('\nDone')
| [
"numpy.load",
"numpy.save",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"preprocessing.text.Pipeline.default_training_pipeline",
"os.path.exists",
"numpy.expand_dims",
"pathlib.Path",
"utils.audio.Audio",
"numpy.array",
"librosa.load",
"os.path.join",
"numpy.random.shuff... | [((198, 223), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (221, 223), False, 'import argparse\n'), ((903, 959), 'os.path.join', 'os.path.join', (['args.DATA_DIR', "config['metadata_filename']"], {}), "(args.DATA_DIR, config['metadata_filename'])\n", (915, 959), False, 'import os\n'), ((975, 1029), 'os.path.join', 'os.path.join', (['args.DATA_DIR', "config['wav_subdir_name']"], {}), "(args.DATA_DIR, config['wav_subdir_name'])\n", (987, 1029), False, 'import os\n'), ((1154, 1191), 'os.path.join', 'os.path.join', (['args.TARGET_DIR', '"""mels"""'], {}), "(args.TARGET_DIR, 'mels')\n", (1166, 1191), False, 'import os\n'), ((1262, 1307), 'os.path.join', 'os.path.join', (['args.TARGET_DIR', '"""phonemes.npy"""'], {}), "(args.TARGET_DIR, 'phonemes.npy')\n", (1274, 1307), False, 'import os\n'), ((1320, 1406), 'preprocessing.text.Pipeline.default_training_pipeline', 'Pipeline.default_training_pipeline', (["config['phoneme_language']"], {'add_start_end': '(True)'}), "(config['phoneme_language'],\n add_start_end=True)\n", (1354, 1406), False, 'from preprocessing.text import Pipeline\n'), ((2588, 2606), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2602, 2606), True, 'import numpy as np\n'), ((2607, 2636), 'numpy.random.shuffle', 'np.random.shuffle', (['audio_data'], {}), '(audio_data)\n', (2624, 2636), True, 'import numpy as np\n'), ((2653, 2703), 'os.path.join', 'os.path.join', (['args.TARGET_DIR', '"""test_metafile.txt"""'], {}), "(args.TARGET_DIR, 'test_metafile.txt')\n", (2665, 2703), False, 'import os\n'), ((2721, 2772), 'os.path.join', 'os.path.join', (['args.TARGET_DIR', '"""train_metafile.txt"""'], {}), "(args.TARGET_DIR, 'train_metafile.txt')\n", (2733, 2772), False, 'import os\n'), ((3250, 3263), 'utils.audio.Audio', 'Audio', (['config'], {}), '(config)\n', (3255, 3263), False, 'from utils.audio import Audio\n'), ((1199, 1222), 'os.path.exists', 'os.path.exists', (['mel_dir'], {}), '(mel_dir)\n', (1213, 1222), False, 'import os\n'), ((1228, 1248), 'os.makedirs', 'os.makedirs', (['mel_dir'], {}), '(mel_dir)\n', (1239, 1248), False, 'import os\n'), ((1406, 1431), 'os.path.exists', 'os.path.exists', (['phon_path'], {}), '(phon_path)\n', (1420, 1431), False, 'import os\n'), ((1514, 1532), 'numpy.load', 'np.load', (['phon_path'], {}), '(phon_path)\n', (1521, 1532), True, 'import numpy as np\n'), ((1996, 2016), 'numpy.array', 'np.array', (['audio_data'], {}), '(audio_data)\n', (2004, 2016), True, 'import numpy as np\n'), ((3358, 3403), 'os.path.join', 'os.path.join', (['args.WAV_DIR', "(filename + '.wav')"], {}), "(args.WAV_DIR, filename + '.wav')\n", (3370, 3403), False, 'import os\n'), ((3416, 3466), 'librosa.load', 'librosa.load', (['wav_path'], {'sr': "config['sampling_rate']"}), "(wav_path, sr=config['sampling_rate'])\n", (3428, 3466), False, 'import librosa\n'), ((3517, 3548), 'os.path.join', 'os.path.join', (['mel_dir', 'filename'], {}), '(mel_dir, filename)\n', (3529, 3548), False, 'import os\n'), ((3553, 3577), 'numpy.save', 'np.save', (['mel_path', 'mel.T'], {}), '(mel_path, mel.T)\n', (3560, 3577), True, 'import numpy as np\n'), ((2491, 2540), 'numpy.save', 'np.save', (['phon_path', 'audio_data'], {'allow_pickle': '(True)'}), '(phon_path, audio_data, allow_pickle=True)\n', (2498, 2540), True, 'import numpy as np\n'), ((2394, 2414), 'numpy.array', 'np.array', (['audio_data'], {}), '(audio_data)\n', (2402, 2414), True, 'import numpy as np\n'), ((2416, 2448), 'numpy.expand_dims', 'np.expand_dims', (['phonemes'], {'axis': '(1)'}), '(phonemes, axis=1)\n', (2430, 2448), True, 'import numpy as np\n'), ((750, 767), 'pathlib.Path', 'Path', (['args.CONFIG'], {}), '(args.CONFIG)\n', (754, 767), False, 'from pathlib import Path\n')] |
import numpy as np
from pyticle.particle import Particle
class SwarmOptimization:
def __init__(
self,
cost_func: object,
particle_num: int,
omega_start: float,
omega_end: float,
coef: list,
low_bound: float,
high_bound: float,
boundary_strategy: str,
var_size: int,
max_iter_num: int,
elite_rate: float,
):
"""
implements the Particle Swarm Intelligence class
:param cost_func: the cost function to minimize
:param particle_num: the number of particles
:param omega_start: the starting value of Omega (linear schedule)
:param omega_end: the ending value of Omega (linear schedule)
:param coef: PSO coefficients (C1 and C2) as a list
:param low_bound: the lower bound of variables in the optimization problem
:param high_bound: the higher bound of variables in the optimization problem
:param boundary_strategy: The strategy of handling particles outside of the boundary ("random", "clipping")
:param var_size: the problem's dimension
:param max_iter_num: the maximum number of iterations
:param elite_rate: The elite rate in PSO
"""
# params
self.cost_func = cost_func
self.particle_num = particle_num
self.omega_schedule = np.linspace(
omega_start, omega_end, max_iter_num, endpoint=True
)
self.coef = coef
self.low_bound = low_bound
self.high_bound = high_bound
self.boundary_strategy = boundary_strategy
self.var_size = var_size
self.max_inter_num = max_iter_num
self.elite_rate = elite_rate
self.elimination_rate = 1 - elite_rate
self.global_best_position = None
self.global_best_fitness = np.inf
self.particles = [
Particle(
low_bound=self.low_bound,
high_bound=self.high_bound,
var_size=self.var_size,
)
for _ in range(self.particle_num)
]
# fitness and global best
for i in range(self.particle_num):
self.particles[i].fitness = self.cost_func(self.particles[i].position)
if self.particles[i].fitness < self.global_best_fitness:
self.global_best_position = self.particles[i].position
self.global_best_fitness = self.particles[i].fitness
def particle_positions(self):
"""
returns the position of all particles as a numpy array
"""
pos_list = [[i.position] for i in self.particles]
return np.concatenate(pos_list, axis=0)
def optimize(self):
"""
Optimizes the cost function
"""
# vars
iter_num = 0
self.fitness_max_hist = []
self.fitness_mean_hist = []
self.fitness_min_hist = []
self.particle_positions_hist = []
self.global_best_position_hist = []
# the main loop
while iter_num < self.max_inter_num:
for particle_index in range(self.particle_num):
self.update_particle(particle_index, iter_num)
# store the history of optimization
self.fitness_mean_hist.append(self.get_mean_fitness())
self.fitness_min_hist.append(self.get_min_fitness())
self.fitness_max_hist.append(self.get_max_fitness())
self.particle_positions_hist.append(self.particle_positions())
self.global_best_position_hist.append(self.global_best_position)
iter_num = iter_num + 1
return self.global_best_fitness, self.global_best_position
def update_particle(self, particle_index: int, iter_num: int):
"""
updates the velocity and position of a single particle
:param particle_index: the index of the particle to update
:param iter_num: the iteration number (for scheduling)
"""
# no update for the global best
elite_limit = np.quantile(self.get_particles_fitness(), self.elite_rate)
distance_to_global_best = np.abs(
self.particles[particle_index].fitness - self.global_best_fitness
)
if distance_to_global_best > elite_limit:
self.update_particle_vel(particle_index, iter_num)
self.update_particle_position(particle_index)
def update_particle_vel(self, particle_index: int, iter_num: int):
"""
updates the velocity of one particle
:param particle_index: the index of the particle to update
:param iter_num: the iteration number (for scheduling)
"""
r = np.random.uniform(low=0.0, high=1.0, size=2)
omega = self.omega_schedule[iter_num]
vel = (
omega * self.particles[particle_index].velocity
+ self.coef[0] * r[0] * self.particles[particle_index].best_position
+ self.coef[1] * r[1] * self.global_best_position
)
self.particles[particle_index].velocity = vel
def update_particle_position(self, particle_index: int):
"""
updates the position of one particle
:param particle_index: the index of the particle to update
"""
# choose the best direction to move
p1 = (
self.particles[particle_index].position
+ self.particles[particle_index].velocity
)
p2 = (
self.particles[particle_index].position
- self.particles[particle_index].velocity
)
if self.get_out_range_dim_num(p1) < self.get_out_range_dim_num(p2):
self.particles[particle_index].position = p1
else:
self.particles[particle_index].position = p2
# check for out of range positions
for dim in range(self.var_size):
dim_value = self.particles[particle_index].position[dim]
# pass the dimension if it's in the right range
if self.low_bound <= dim_value <= self.high_bound:
continue
if self.boundary_strategy == "random":
dim_new_value = np.random.uniform(
low=self.low_bound, high=self.high_bound, size=1
)
elif self.boundary_strategy == "clipping":
if dim_value < self.low_bound:
dim_new_value = self.low_bound
if dim_value > self.high_bound:
dim_new_value = self.high_bound
else:
raise Exception(
f"{self.boundary_strategy} is a unknown boundary strategy"
)
self.particles[particle_index].position[dim] = dim_new_value
# reset weak particles
if self.particles[particle_index].fitness > np.quantile(
self.get_particles_fitness(), self.elimination_rate
):
mean_point = (self.high_bound - self.low_bound) / 2
self.particles[particle_index].position = 0.5 * (
self.particles[particle_index].best_position
+ self.global_best_position
+ np.random.normal(0.0, np.sqrt(mean_point), size=self.var_size)
)
self.particles[particle_index].fitness = self.cost_func(
self.particles[particle_index].position
)
# update particle best
if (
self.particles[particle_index].fitness
< self.particles[particle_index].best_fitness
):
self.particles[particle_index].best_position = self.particles[
particle_index
].position
self.particles[particle_index].best_fitness = self.particles[
particle_index
].fitness
# update global best
if self.particles[particle_index].fitness < self.global_best_fitness:
self.global_best_position = self.particles[particle_index].position
self.global_best_fitness = self.particles[particle_index].fitness
def get_particles_fitness(self):
"""
returns the fitness of all particles as a list
"""
return [i.fitness for i in self.particles]
def get_mean_fitness(self):
"""
returns the mean of fitness of all particles
"""
return np.mean(self.get_particles_fitness())
def get_median_fitness(self):
"""
returns the median of fitness of all particles
"""
return np.median(self.get_particles_fitness())
def get_max_fitness(self):
"""
returns the max of fitness of all particles
"""
return np.max(self.get_particles_fitness())
def get_min_fitness(self):
"""
returns the min of fitness of all particles
"""
return np.min(self.get_particles_fitness())
def get_out_range_dim_num(self, position):
"""
counts the number of dimensions of a position that are out of range
:param position: the particle's position
"""
return np.sum(
[
0 if self.low_bound <= position[dim] <= self.high_bound else 1
for dim in range(self.var_size)
]
)
| [
"numpy.random.uniform",
"numpy.abs",
"numpy.linspace",
"pyticle.particle.Particle",
"numpy.concatenate",
"numpy.sqrt"
] | [((1376, 1440), 'numpy.linspace', 'np.linspace', (['omega_start', 'omega_end', 'max_iter_num'], {'endpoint': '(True)'}), '(omega_start, omega_end, max_iter_num, endpoint=True)\n', (1387, 1440), True, 'import numpy as np\n'), ((2664, 2696), 'numpy.concatenate', 'np.concatenate', (['pos_list'], {'axis': '(0)'}), '(pos_list, axis=0)\n', (2678, 2696), True, 'import numpy as np\n'), ((4148, 4221), 'numpy.abs', 'np.abs', (['(self.particles[particle_index].fitness - self.global_best_fitness)'], {}), '(self.particles[particle_index].fitness - self.global_best_fitness)\n', (4154, 4221), True, 'import numpy as np\n'), ((4700, 4744), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(2)'}), '(low=0.0, high=1.0, size=2)\n', (4717, 4744), True, 'import numpy as np\n'), ((1892, 1983), 'pyticle.particle.Particle', 'Particle', ([], {'low_bound': 'self.low_bound', 'high_bound': 'self.high_bound', 'var_size': 'self.var_size'}), '(low_bound=self.low_bound, high_bound=self.high_bound, var_size=\n self.var_size)\n', (1900, 1983), False, 'from pyticle.particle import Particle\n'), ((6173, 6240), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.low_bound', 'high': 'self.high_bound', 'size': '(1)'}), '(low=self.low_bound, high=self.high_bound, size=1)\n', (6190, 6240), True, 'import numpy as np\n'), ((7197, 7216), 'numpy.sqrt', 'np.sqrt', (['mean_point'], {}), '(mean_point)\n', (7204, 7216), True, 'import numpy as np\n')] |
from __future__ import print_function
from pandas import DataFrame
from .magrittr import import_methods
import_methods(obj=DataFrame(), namespace=globals(), strict=True)
if __name__=='__main__':
import numpy as np
df = DataFrame(np.arange(9).reshape((3,3)), columns=list('abc'))
print('>>> df')
print(df)
print(">>> df >> head(2)")
print(df >> head(2))
print(">>> df >> set_index('c')")
print(df >> set_index('c'))
print(">>> df >> set_index('c') >> stack()")
print(df >> set_index('c') >> stack())
print("\nDataFrame and Series share a lot of methods and by default we "
"check the types as this can catch errors early and give more "
" informative error messages.\n")
print(">>> df >> set_index('c') >> stack() >> head(3)")
try:
print(df >> set_index('c') >> stack() >> head(3))
except Exception as ex:
print(ex)
print("\nHowever this can be turned off and we can use duck-typing "
"instead.\n")
print(">>> import_methods(obj=DataFrame(), namespace=globals(), "
"strict=False)")
import_methods(obj=DataFrame(), namespace=globals(), strict=False)
print(">>> df >> set_index('c') >> stack() >> head(3)")
print(df >> set_index('c') >> stack() >> head(3))
| [
"pandas.DataFrame",
"numpy.arange"
] | [((127, 138), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (136, 138), False, 'from pandas import DataFrame\n'), ((1130, 1141), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1139, 1141), False, 'from pandas import DataFrame\n'), ((243, 255), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (252, 255), True, 'import numpy as np\n')] |
import numpy as np
from random import expovariate, choice
from math import comb
hashpowers = [0.1, 0.2, 0.3, 0.4]
voter_depth_k = [1, 2, 3, 4]
num_voter_chains = [10, 20, 30, 40]
success_prob = {}
for beta in hashpowers:
nsamples = 100000
nblocks = 200
# Simulate block mining times
adv_wt = np.zeros((nsamples, nblocks), dtype=np.float64)
honest_wt = np.zeros((nsamples, nblocks), dtype=np.float64)
for i in range(1, nsamples):
for j in range(1, nblocks):
adv_wt[i, j] = expovariate(beta)
honest_wt[i, j] = expovariate(1 - beta)
adv_mtime = np.cumsum(adv_wt, axis=1)
honest_mtime = np.cumsum(honest_wt, axis=1)
success_prob[beta] = {}
for k in range(1, 100):
simlen = max(2*k, k+100)
count = 0.0
for i in range(nsamples):
if np.any(adv_mtime[i, k+1:simlen] < honest_mtime[i, k+1:simlen]):
count += 1.0
p = (count/nsamples)
if np.isclose(p, 0.0):
break
else:
success_prob[beta][k] = p
for beta in hashpowers:
for prism_k in voter_depth_k:
# p is rev prob for a single chain
p = success_prob[beta][prism_k]
for m in num_voter_chains:
# calculate epsilon overall rev prob guaranteed by prism
epsilon = 0
for i in range((m // 2) + 1, m + 1):
epsilon += (comb(m, i) * pow(p, i) * pow((1-p), (m - i)))
# find k for which bitcoin can guarantee epsilon rev probability
bitcoin_k = None
for k in success_prob[beta]:
if success_prob[beta][k] <= epsilon:
bitcoin_k = k
break
if bitcoin_k is None:
print('Could not find bitcoin_k for beta: %0.1f, prism_k: %d, p: %s, voter chains: %d, rev_prob: %s'
% (beta, prism_k, p, m, epsilon))
else:
print('beta: %0.1f, prism_k: %d, p: %s, voter chains: %d, rev_prob: %s, bitcoin_k: %d'
% (beta, prism_k, p, m, epsilon, bitcoin_k))
| [
"random.expovariate",
"math.comb",
"numpy.zeros",
"numpy.any",
"numpy.cumsum",
"numpy.isclose"
] | [((314, 361), 'numpy.zeros', 'np.zeros', (['(nsamples, nblocks)'], {'dtype': 'np.float64'}), '((nsamples, nblocks), dtype=np.float64)\n', (322, 361), True, 'import numpy as np\n'), ((378, 425), 'numpy.zeros', 'np.zeros', (['(nsamples, nblocks)'], {'dtype': 'np.float64'}), '((nsamples, nblocks), dtype=np.float64)\n', (386, 425), True, 'import numpy as np\n'), ((608, 633), 'numpy.cumsum', 'np.cumsum', (['adv_wt'], {'axis': '(1)'}), '(adv_wt, axis=1)\n', (617, 633), True, 'import numpy as np\n'), ((653, 681), 'numpy.cumsum', 'np.cumsum', (['honest_wt'], {'axis': '(1)'}), '(honest_wt, axis=1)\n', (662, 681), True, 'import numpy as np\n'), ((975, 993), 'numpy.isclose', 'np.isclose', (['p', '(0.0)'], {}), '(p, 0.0)\n', (985, 993), True, 'import numpy as np\n'), ((522, 539), 'random.expovariate', 'expovariate', (['beta'], {}), '(beta)\n', (533, 539), False, 'from random import expovariate, choice\n'), ((570, 591), 'random.expovariate', 'expovariate', (['(1 - beta)'], {}), '(1 - beta)\n', (581, 591), False, 'from random import expovariate, choice\n'), ((841, 907), 'numpy.any', 'np.any', (['(adv_mtime[i, k + 1:simlen] < honest_mtime[i, k + 1:simlen])'], {}), '(adv_mtime[i, k + 1:simlen] < honest_mtime[i, k + 1:simlen])\n', (847, 907), True, 'import numpy as np\n'), ((1424, 1434), 'math.comb', 'comb', (['m', 'i'], {}), '(m, i)\n', (1428, 1434), False, 'from math import comb\n')] |
#!/usr/bin/env python
"""
Calculates the Lambertian, BRDF corrected and BRDF + Terrain corrected
----------------------------------------------------------------------
reflectance
-----------
"""
from __future__ import absolute_import, print_function
import numpy
import h5py
from wagl.constants import DatasetName, GroupName, BrdfDirectionalParameters
from wagl.constants import AtmosphericCoefficients as AC
from wagl.constants import ArdProducts as AP
from wagl.data import as_array
from wagl.hdf5 import H5CompressionFilter, attach_image_attributes
from wagl.hdf5 import create_external_link, find
from wagl.metadata import create_ard_yaml
from wagl.__surface_reflectance import reflectance
NO_DATA_VALUE = -999
def _calculate_reflectance(acquisition, acquisitions, interpolation_fname,
satellite_solar_angles_fname, slope_aspect_fname,
relative_slope_fname, incident_angles_fname,
exiting_angles_fname, shadow_masks_fname,
ancillary_fname, rori, out_fname, compression,
filter_opts, normalized_solar_zenith):
"""
A private wrapper for dealing with the internal custom workings of the
NBAR workflow.
"""
with h5py.File(interpolation_fname, 'r') as fid_interp,\
h5py.File(satellite_solar_angles_fname, 'r') as fid_sat_sol,\
h5py.File(slope_aspect_fname, 'r') as fid_slp_asp,\
h5py.File(relative_slope_fname, 'r') as fid_rel_slp,\
h5py.File(incident_angles_fname, 'r') as fid_inc,\
h5py.File(exiting_angles_fname, 'r') as fid_exi,\
h5py.File(shadow_masks_fname, 'r') as fid_shadow,\
h5py.File(ancillary_fname, 'r') as fid_anc,\
h5py.File(out_fname, 'w') as fid:
grp1 = fid_interp[GroupName.INTERP_GROUP.value]
grp2 = fid_sat_sol[GroupName.SAT_SOL_GROUP.value]
grp3 = fid_slp_asp[GroupName.SLP_ASP_GROUP.value]
grp4 = fid_rel_slp[GroupName.REL_SLP_GROUP.value]
grp5 = fid_inc[GroupName.INCIDENT_GROUP.value]
grp6 = fid_exi[GroupName.EXITING_GROUP.value]
grp7 = fid_shadow[GroupName.SHADOW_GROUP.value]
grp8 = fid_anc[GroupName.ANCILLARY_GROUP.value]
calculate_reflectance(acquisition, grp1, grp2, grp3, grp4, grp5, grp6,
grp7, grp8, rori, fid, compression, filter_opts,
normalized_solar_zenith)
create_ard_yaml(acquisitions, grp8, fid, normalized_solar_zenith)
def calculate_reflectance(acquisition, interpolation_group,
satellite_solar_group, slope_aspect_group,
relative_slope_group, incident_angles_group,
exiting_angles_group, shadow_masks_group,
ancillary_group, rori, out_group=None,
compression=H5CompressionFilter.LZF,
filter_opts=None, normalized_solar_zenith=45.0, esun=None):
"""
Calculates Lambertian, BRDF corrected and BRDF + terrain
illumination corrected surface reflectance.
:param acquisition:
An instance of an acquisition object.
:param interpolation_group:
The root HDF5 `Group` that contains the interpolated
atmospheric coefficients.
The dataset pathnames are given by:
* DatasetName.INTERPOLATION_FMT
:param satellite_solar_group:
The root HDF5 `Group` that contains the solar zenith and
solar azimuth datasets specified by the pathnames given by:
* DatasetName.SOLAR_ZENITH
* DatasetName.SOLAR_AZIMUTH
* DatasetName.SATELLITE_VIEW
* DatasetName.SATELLITE_AZIMUTH
* DatasetName.RELATIVE_AZIMUTH
:param slope_aspect_group:
The root HDF5 `Group` that contains the slope and aspect
datasets specified by the pathnames given by:
* DatasetName.SLOPE
* DatasetName.ASPECT
:param relative_slope_group:
The root HDF5 `Group` that contains the relative slope dataset
specified by the pathname given by:
* DatasetName.RELATIVE_SLOPE
:param incident_angles_group:
The root HDF5 `Group` that contains the incident
angle dataset specified by the pathname given by:
* DatasetName.INCIDENT
:param exiting_angles_group:
The root HDF5 `Group` that contains the exiting
angle dataset specified by the pathname given by:
* DatasetName.EXITING
:param shadow_masks_group:
The root HDF5 `Group` that contains the combined shadow
masks; self shadow, cast shadow (solar),
cast shadow (satellite), dataset specified by the pathname
given by:
* DatasetName.COMBINED_SHADOW
:param ancillary_group:
The root HDF5 `Group` that contains the Isotropic (iso),
RossThick (vol), and LiSparseR (geo) BRDF scalar parameters.
The dataset pathnames are given by:
* DatasetName.BRDF_FMT
:param rori:
Threshold for terrain correction. Fuqin to document.
:param out_group:
If set to None (default) then the results will be returned
as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
a writeable HDF5 `Group` object.
The dataset names will be given by the format string detailed
by:
* DatasetName.REFLECTANCE_FMT
The reflectance products are:
* lambertian
* nbar (BRDF corrected reflectance)
* nbart (BRDF + terrain illumination corrected reflectance)
:param compression:
The compression filter to use.
Default is H5CompressionFilter.LZF
:param filter_opts:
A dict of key value pairs available to the given configuration
instance of H5CompressionFilter. For example
H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
available.
Default is None, which will use the default settings for the
chosen H5CompressionFilter instance.
:param normalized_solar_zenith:
A float value type to normalize reflectance to a particular angle.
:param esun
A float value type. A solar solar irradiance normal to atmosphere
in unit of W/sq cm/sr/nm.
:return:
An opened `h5py.File` object, that is either in-memory using the
`core` driver, or on disk.
"""
geobox = acquisition.gridded_geo_box()
bn = acquisition.band_name
dname_fmt = DatasetName.INTERPOLATION_FMT.value
fv_dataset = interpolation_group[dname_fmt.format(coefficient=AC.FV.value,
band_name=bn)]
fs_dataset = interpolation_group[dname_fmt.format(coefficient=AC.FS.value,
band_name=bn)]
b_dataset = interpolation_group[dname_fmt.format(coefficient=AC.B.value,
band_name=bn)]
s_dataset = interpolation_group[dname_fmt.format(coefficient=AC.S.value,
band_name=bn)]
a_dataset = interpolation_group[dname_fmt.format(coefficient=AC.A.value,
band_name=bn)]
dir_dataset = interpolation_group[dname_fmt.format(coefficient=AC.DIR.value,
band_name=bn)]
dif_dataset = interpolation_group[dname_fmt.format(coefficient=AC.DIF.value,
band_name=bn)]
ts_dataset = interpolation_group[dname_fmt.format(coefficient=AC.TS.value,
band_name=bn)]
solar_zenith_dset = satellite_solar_group[DatasetName.SOLAR_ZENITH.value]
solar_azimuth_dset = satellite_solar_group[DatasetName.SOLAR_AZIMUTH.value]
satellite_v_dset = satellite_solar_group[DatasetName.SATELLITE_VIEW.value]
relative_a_dset = satellite_solar_group[DatasetName.RELATIVE_AZIMUTH.value]
slope_dataset = slope_aspect_group[DatasetName.SLOPE.value]
aspect_dataset = slope_aspect_group[DatasetName.ASPECT.value]
relative_s_dset = relative_slope_group[DatasetName.RELATIVE_SLOPE.value]
incident_angle_dataset = incident_angles_group[DatasetName.INCIDENT.value]
exiting_angle_dataset = exiting_angles_group[DatasetName.EXITING.value]
shadow_dataset = shadow_masks_group[DatasetName.COMBINED_SHADOW.value]
dname_fmt = DatasetName.BRDF_FMT.value
dname = dname_fmt.format(band_name=bn, parameter=BrdfDirectionalParameters.ALPHA_1.value)
brdf_alpha1 = ancillary_group[dname][()]
dname = dname_fmt.format(band_name=bn, parameter=BrdfDirectionalParameters.ALPHA_2.value)
brdf_alpha2 = ancillary_group[dname][()]
# Initialise the output file
if out_group is None:
fid = h5py.File('surface-reflectance.h5', driver='core',
backing_store=False)
else:
fid = out_group
if GroupName.STANDARD_GROUP.value not in fid:
fid.create_group(GroupName.STANDARD_GROUP.value)
if filter_opts is None:
filter_opts = {}
else:
filter_opts = filter_opts.copy()
filter_opts['chunks'] = acquisition.tile_size
kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
grp = fid[GroupName.STANDARD_GROUP.value]
kwargs['shape'] = (acquisition.lines, acquisition.samples)
kwargs['fillvalue'] = NO_DATA_VALUE
kwargs['dtype'] = 'int16'
# create the datasets
dname_fmt = DatasetName.REFLECTANCE_FMT.value
dname = dname_fmt.format(product=AP.LAMBERTIAN.value, band_name=bn)
lmbrt_dset = grp.create_dataset(dname, **kwargs)
dname = dname_fmt.format(product=AP.NBAR.value, band_name=bn)
nbar_dset = grp.create_dataset(dname, **kwargs)
dname = dname_fmt.format(product=AP.NBART.value, band_name=bn)
nbart_dset = grp.create_dataset(dname, **kwargs)
# attach some attributes to the image datasets
attrs = {'crs_wkt': geobox.crs.ExportToWkt(),
'geotransform': geobox.transform.to_gdal(),
'no_data_value': kwargs['fillvalue'],
'rori_threshold_setting': rori,
'platform_id': acquisition.platform_id,
'sensor_id': acquisition.sensor_id,
'band_id': acquisition.band_id,
'band_name': bn,
'alias': acquisition.alias}
desc = "Contains the lambertian reflectance data scaled by 10000."
attrs['description'] = desc
attach_image_attributes(lmbrt_dset, attrs)
desc = "Contains the brdf corrected reflectance data scaled by 10000."
attrs['description'] = desc
attach_image_attributes(nbar_dset, attrs)
desc = ("Contains the brdf and terrain corrected reflectance data scaled "
"by 10000.")
attrs['description'] = desc
attach_image_attributes(nbart_dset, attrs)
# process by tile
for tile in acquisition.tiles():
# tile indices
idx = (slice(tile[0][0], tile[0][1]), slice(tile[1][0], tile[1][1]))
# define some static arguments
acq_args = {'window': tile,
'out_no_data': NO_DATA_VALUE,
'esun': esun}
f32_args = {'dtype': numpy.float32, 'transpose': True}
# Read the data corresponding to the current tile for all dataset
# Convert the datatype if required and transpose
band_data = as_array(acquisition.radiance_data(**acq_args), **f32_args)
shadow = as_array(shadow_dataset[idx], numpy.int8, transpose=True)
solar_zenith = as_array(solar_zenith_dset[idx], **f32_args)
solar_azimuth = as_array(solar_azimuth_dset[idx], **f32_args)
satellite_view = as_array(satellite_v_dset[idx], **f32_args)
relative_angle = as_array(relative_a_dset[idx], **f32_args)
slope = as_array(slope_dataset[idx], **f32_args)
aspect = as_array(aspect_dataset[idx], **f32_args)
incident_angle = as_array(incident_angle_dataset[idx], **f32_args)
exiting_angle = as_array(exiting_angle_dataset[idx], **f32_args)
relative_slope = as_array(relative_s_dset[idx], **f32_args)
a_mod = as_array(a_dataset[idx], **f32_args)
b_mod = as_array(b_dataset[idx], **f32_args)
s_mod = as_array(s_dataset[idx], **f32_args)
fs = as_array(fs_dataset[idx], **f32_args)
fv = as_array(fv_dataset[idx], **f32_args)
ts = as_array(ts_dataset[idx], **f32_args)
direct = as_array(dir_dataset[idx], **f32_args)
diffuse = as_array(dif_dataset[idx], **f32_args)
# Allocate the output arrays
xsize, ysize = band_data.shape # band_data has been transposed
ref_lm = numpy.zeros((ysize, xsize), dtype='int16')
ref_brdf = numpy.zeros((ysize, xsize), dtype='int16')
ref_terrain = numpy.zeros((ysize, xsize), dtype='int16')
# Allocate the work arrays (single row of data)
ref_lm_work = numpy.zeros(xsize, dtype='float32')
ref_brdf_work = numpy.zeros(xsize, dtype='float32')
ref_terrain_work = numpy.zeros(xsize, dtype='float32')
# Run terrain correction
reflectance(xsize, ysize, rori, brdf_alpha1, brdf_alpha2,
acquisition.reflectance_adjustment, kwargs['fillvalue'],
band_data, shadow, solar_zenith, solar_azimuth,
satellite_view, relative_angle, slope, aspect,
incident_angle, exiting_angle, relative_slope, a_mod,
b_mod, s_mod, fs, fv, ts, direct, diffuse, ref_lm_work,
ref_brdf_work, ref_terrain_work, ref_lm.transpose(),
ref_brdf.transpose(), ref_terrain.transpose(), normalized_solar_zenith)
# Write the current tile to disk
lmbrt_dset[idx] = ref_lm
nbar_dset[idx] = ref_brdf
nbart_dset[idx] = ref_terrain
# close any still opened files, arrays etc associated with the acquisition
acquisition.close()
if out_group is None:
return fid
def link_standard_data(input_fnames, out_fname):
# TODO: incorporate linking for multi-granule and multi-group
# datasets
"""
Links the individual reflectance and surface temperature
results into a single file for easier access.
"""
for fname in input_fnames:
with h5py.File(fname, 'r') as fid:
dataset_names = find(fid, dataset_class='IMAGE')
for dname in dataset_names:
create_external_link(fname, dname, out_fname, dname)
# metadata
with h5py.File(fname, 'r') as fid:
with h5py.File(out_fname) as out_fid:
yaml_dname = DatasetName.NBAR_YAML.value
if yaml_dname in fid and yaml_dname not in out_fid:
fid.copy(yaml_dname, out_fid, name=yaml_dname)
yaml_dname = DatasetName.SBT_YAML.value
if yaml_dname in fid and yaml_dname not in out_fid:
fid.copy(yaml_dname, out_fid, name=yaml_dname)
| [
"wagl.data.as_array",
"wagl.hdf5.find",
"h5py.File",
"wagl.hdf5.attach_image_attributes",
"numpy.zeros",
"wagl.metadata.create_ard_yaml",
"wagl.hdf5.create_external_link"
] | [((10579, 10621), 'wagl.hdf5.attach_image_attributes', 'attach_image_attributes', (['lmbrt_dset', 'attrs'], {}), '(lmbrt_dset, attrs)\n', (10602, 10621), False, 'from wagl.hdf5 import H5CompressionFilter, attach_image_attributes\n'), ((10734, 10775), 'wagl.hdf5.attach_image_attributes', 'attach_image_attributes', (['nbar_dset', 'attrs'], {}), '(nbar_dset, attrs)\n', (10757, 10775), False, 'from wagl.hdf5 import H5CompressionFilter, attach_image_attributes\n'), ((10917, 10959), 'wagl.hdf5.attach_image_attributes', 'attach_image_attributes', (['nbart_dset', 'attrs'], {}), '(nbart_dset, attrs)\n', (10940, 10959), False, 'from wagl.hdf5 import H5CompressionFilter, attach_image_attributes\n'), ((1275, 1310), 'h5py.File', 'h5py.File', (['interpolation_fname', '"""r"""'], {}), "(interpolation_fname, 'r')\n", (1284, 1310), False, 'import h5py\n'), ((1335, 1379), 'h5py.File', 'h5py.File', (['satellite_solar_angles_fname', '"""r"""'], {}), "(satellite_solar_angles_fname, 'r')\n", (1344, 1379), False, 'import h5py\n'), ((1405, 1439), 'h5py.File', 'h5py.File', (['slope_aspect_fname', '"""r"""'], {}), "(slope_aspect_fname, 'r')\n", (1414, 1439), False, 'import h5py\n'), ((1465, 1501), 'h5py.File', 'h5py.File', (['relative_slope_fname', '"""r"""'], {}), "(relative_slope_fname, 'r')\n", (1474, 1501), False, 'import h5py\n'), ((1527, 1564), 'h5py.File', 'h5py.File', (['incident_angles_fname', '"""r"""'], {}), "(incident_angles_fname, 'r')\n", (1536, 1564), False, 'import h5py\n'), ((1586, 1622), 'h5py.File', 'h5py.File', (['exiting_angles_fname', '"""r"""'], {}), "(exiting_angles_fname, 'r')\n", (1595, 1622), False, 'import h5py\n'), ((1644, 1678), 'h5py.File', 'h5py.File', (['shadow_masks_fname', '"""r"""'], {}), "(shadow_masks_fname, 'r')\n", (1653, 1678), False, 'import h5py\n'), ((1703, 1734), 'h5py.File', 'h5py.File', (['ancillary_fname', '"""r"""'], {}), "(ancillary_fname, 'r')\n", (1712, 1734), False, 'import h5py\n'), ((1756, 1781), 'h5py.File', 'h5py.File', (['out_fname', '"""w"""'], {}), "(out_fname, 'w')\n", (1765, 1781), False, 'import h5py\n'), ((2464, 2529), 'wagl.metadata.create_ard_yaml', 'create_ard_yaml', (['acquisitions', 'grp8', 'fid', 'normalized_solar_zenith'], {}), '(acquisitions, grp8, fid, normalized_solar_zenith)\n', (2479, 2529), False, 'from wagl.metadata import create_ard_yaml\n'), ((8907, 8978), 'h5py.File', 'h5py.File', (['"""surface-reflectance.h5"""'], {'driver': '"""core"""', 'backing_store': '(False)'}), "('surface-reflectance.h5', driver='core', backing_store=False)\n", (8916, 8978), False, 'import h5py\n'), ((11581, 11638), 'wagl.data.as_array', 'as_array', (['shadow_dataset[idx]', 'numpy.int8'], {'transpose': '(True)'}), '(shadow_dataset[idx], numpy.int8, transpose=True)\n', (11589, 11638), False, 'from wagl.data import as_array\n'), ((11662, 11706), 'wagl.data.as_array', 'as_array', (['solar_zenith_dset[idx]'], {}), '(solar_zenith_dset[idx], **f32_args)\n', (11670, 11706), False, 'from wagl.data import as_array\n'), ((11731, 11776), 'wagl.data.as_array', 'as_array', (['solar_azimuth_dset[idx]'], {}), '(solar_azimuth_dset[idx], **f32_args)\n', (11739, 11776), False, 'from wagl.data import as_array\n'), ((11802, 11845), 'wagl.data.as_array', 'as_array', (['satellite_v_dset[idx]'], {}), '(satellite_v_dset[idx], **f32_args)\n', (11810, 11845), False, 'from wagl.data import as_array\n'), ((11871, 11913), 'wagl.data.as_array', 'as_array', (['relative_a_dset[idx]'], {}), '(relative_a_dset[idx], **f32_args)\n', (11879, 11913), False, 'from wagl.data import as_array\n'), ((11930, 11970), 'wagl.data.as_array', 'as_array', (['slope_dataset[idx]'], {}), '(slope_dataset[idx], **f32_args)\n', (11938, 11970), False, 'from wagl.data import as_array\n'), ((11988, 12029), 'wagl.data.as_array', 'as_array', (['aspect_dataset[idx]'], {}), '(aspect_dataset[idx], **f32_args)\n', (11996, 12029), False, 'from wagl.data import as_array\n'), ((12055, 12104), 'wagl.data.as_array', 'as_array', (['incident_angle_dataset[idx]'], {}), '(incident_angle_dataset[idx], **f32_args)\n', (12063, 12104), False, 'from wagl.data import as_array\n'), ((12129, 12177), 'wagl.data.as_array', 'as_array', (['exiting_angle_dataset[idx]'], {}), '(exiting_angle_dataset[idx], **f32_args)\n', (12137, 12177), False, 'from wagl.data import as_array\n'), ((12203, 12245), 'wagl.data.as_array', 'as_array', (['relative_s_dset[idx]'], {}), '(relative_s_dset[idx], **f32_args)\n', (12211, 12245), False, 'from wagl.data import as_array\n'), ((12262, 12298), 'wagl.data.as_array', 'as_array', (['a_dataset[idx]'], {}), '(a_dataset[idx], **f32_args)\n', (12270, 12298), False, 'from wagl.data import as_array\n'), ((12315, 12351), 'wagl.data.as_array', 'as_array', (['b_dataset[idx]'], {}), '(b_dataset[idx], **f32_args)\n', (12323, 12351), False, 'from wagl.data import as_array\n'), ((12368, 12404), 'wagl.data.as_array', 'as_array', (['s_dataset[idx]'], {}), '(s_dataset[idx], **f32_args)\n', (12376, 12404), False, 'from wagl.data import as_array\n'), ((12418, 12455), 'wagl.data.as_array', 'as_array', (['fs_dataset[idx]'], {}), '(fs_dataset[idx], **f32_args)\n', (12426, 12455), False, 'from wagl.data import as_array\n'), ((12469, 12506), 'wagl.data.as_array', 'as_array', (['fv_dataset[idx]'], {}), '(fv_dataset[idx], **f32_args)\n', (12477, 12506), False, 'from wagl.data import as_array\n'), ((12520, 12557), 'wagl.data.as_array', 'as_array', (['ts_dataset[idx]'], {}), '(ts_dataset[idx], **f32_args)\n', (12528, 12557), False, 'from wagl.data import as_array\n'), ((12575, 12613), 'wagl.data.as_array', 'as_array', (['dir_dataset[idx]'], {}), '(dir_dataset[idx], **f32_args)\n', (12583, 12613), False, 'from wagl.data import as_array\n'), ((12632, 12670), 'wagl.data.as_array', 'as_array', (['dif_dataset[idx]'], {}), '(dif_dataset[idx], **f32_args)\n', (12640, 12670), False, 'from wagl.data import as_array\n'), ((12797, 12839), 'numpy.zeros', 'numpy.zeros', (['(ysize, xsize)'], {'dtype': '"""int16"""'}), "((ysize, xsize), dtype='int16')\n", (12808, 12839), False, 'import numpy\n'), ((12859, 12901), 'numpy.zeros', 'numpy.zeros', (['(ysize, xsize)'], {'dtype': '"""int16"""'}), "((ysize, xsize), dtype='int16')\n", (12870, 12901), False, 'import numpy\n'), ((12924, 12966), 'numpy.zeros', 'numpy.zeros', (['(ysize, xsize)'], {'dtype': '"""int16"""'}), "((ysize, xsize), dtype='int16')\n", (12935, 12966), False, 'import numpy\n'), ((13046, 13081), 'numpy.zeros', 'numpy.zeros', (['xsize'], {'dtype': '"""float32"""'}), "(xsize, dtype='float32')\n", (13057, 13081), False, 'import numpy\n'), ((13106, 13141), 'numpy.zeros', 'numpy.zeros', (['xsize'], {'dtype': '"""float32"""'}), "(xsize, dtype='float32')\n", (13117, 13141), False, 'import numpy\n'), ((13169, 13204), 'numpy.zeros', 'numpy.zeros', (['xsize'], {'dtype': '"""float32"""'}), "(xsize, dtype='float32')\n", (13180, 13204), False, 'import numpy\n'), ((14439, 14460), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (14448, 14460), False, 'import h5py\n'), ((14497, 14529), 'wagl.hdf5.find', 'find', (['fid'], {'dataset_class': '"""IMAGE"""'}), "(fid, dataset_class='IMAGE')\n", (14501, 14529), False, 'from wagl.hdf5 import create_external_link, find\n'), ((14579, 14631), 'wagl.hdf5.create_external_link', 'create_external_link', (['fname', 'dname', 'out_fname', 'dname'], {}), '(fname, dname, out_fname, dname)\n', (14599, 14631), False, 'from wagl.hdf5 import create_external_link, find\n'), ((14665, 14686), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (14674, 14686), False, 'import h5py\n'), ((14712, 14732), 'h5py.File', 'h5py.File', (['out_fname'], {}), '(out_fname)\n', (14721, 14732), False, 'import h5py\n')] |
""" Tests geometry routines
"""
from builtins import range
import random
import itertools
import pytest
import numpy as np
import moldesign as mdt
from moldesign import units as u
from . import helpers
registered_types = {}
__PYTEST_MARK__ = 'internal' # mark all tests in this module with this label (see ./conftest.py)
# TODO: automated method testing based on its metadata - i.e. test to make sure parameters are
# honored, test that it calcultes what it says it does, test that properties have the right
# units and array shapes, etc.
# step for numerical derivative testing
def typedfixture(*types, **kwargs):
"""This is a decorator that lets us associate fixtures with one or more arbitrary types.
We'll later use this type to determine what tests to run on the result"""
def fixture_wrapper(func):
for t in types:
registered_types.setdefault(t, []).append(func.__name__)
return pytest.fixture(**kwargs)(func)
return fixture_wrapper
def _make_mol_with_n_hydrogens(n):
return mdt.Molecule([mdt.Atom('H') for i in range(n)])
def _apply_random_offsets(mol, idim):
mol.positions[:, idim] += (random.random()-0.5)*100.0*u.angstrom
@typedfixture('atomcontainer', scope='function')
def three_particle_right_angle():
mol = _make_mol_with_n_hydrogens(3)
mol.atoms[0].x = 1.0 * u.angstrom
mol.atoms[2].y = 1.0 * u.angstrom
for idim in range(3):
_apply_random_offsets(mol, idim)
return mol
@typedfixture('atomcontainer', scope='function')
def four_particle_45_twist():
mol = _make_mol_with_n_hydrogens(4)
mol.positions = u.nm*[[0.1, 0.0, -0.5],
[0.0, 0.0, -0.5],
[0.0, 0.0, 0.5],
[0.2, -0.2, 0.5]]
for idim in range(3):
_apply_random_offsets(mol, idim)
for iatom in range(3):
mol.atoms[iatom].bond_to(mol.atoms[iatom+1], 1)
return mol
########################
# Dihedrals #
########################
def test_dihedral_measure(four_particle_45_twist):
mol = four_particle_45_twist
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
45.0,
decimal=8)
def test_dihedral_two_atom_selection(four_particle_45_twist):
mol = four_particle_45_twist
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms[1:3]).value_in(u.degrees),
45.0,
decimal=8)
with pytest.raises(ValueError): # raises exception because it's not part of a dihedral
mdt.dihedral(mol.atoms[0], mol.atoms[1])
def test_set_dihedral(four_particle_45_twist):
mol = four_particle_45_twist
mdt.set_dihedral(mol.atoms[0], mol.atoms[1], mol.atoms[2], mol.atoms[3], 10.0 * u.degrees)
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
10.0,
decimal=8)
@pytest.mark.screening
def test_set_dihedral_sign_convention(four_particle_45_twist):
mol = four_particle_45_twist
mdt.set_dihedral(mol.atoms[0], mol.atoms[1], mol.atoms[2], mol.atoms[3], -23.0 * u.degrees)
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
337.0,
decimal=8)
def test_set_dihedral_two_atom_selection(four_particle_45_twist):
mol = four_particle_45_twist
mdt.set_dihedral(mol.atoms[1], mol.atoms[2], 10.0 * u.degrees)
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
10.0,
decimal=8)
with pytest.raises(ValueError): # raises exception because it's not part of a dihedral
mdt.set_dihedral(mol.atoms[0], mol.atoms[1], 5.0 * u.degrees)
def test_set_dihedral_bond_no_adjust(four_particle_45_twist):
mol = four_particle_45_twist
bond = mdt.Bond(mol.atoms[1], mol.atoms[2])
mdt.set_dihedral(bond, 10.0 * u.degrees, adjustmol=False)
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
10.0,
decimal=8)
def test_dihedral_sign_convention(four_particle_45_twist):
mol = four_particle_45_twist
mol.atoms[-1].y += 0.4 * u.nm
np.testing.assert_almost_equal(mdt.dihedral(*mol.atoms).value_in(u.degrees),
315.0,
decimal=8)
# TODO: test behavior at discontinuities (180, -180)
@pytest.mark.screening
def test_dihedral_gradient(four_particle_45_twist):
mol = four_particle_45_twist
dihe = mdt.DihedralMonitor(*mol.atoms)
calc_grad = dihe.gradient()
num_grad = helpers.num_grad(mol, lambda: dihe.value)
np.testing.assert_allclose(calc_grad.defunits_value(),
num_grad.defunits_value(),
atol=5.0*helpers.DEFSTEP.defunits_value())
def test_dihedral_gradient_sign_convention(four_particle_45_twist):
mol = four_particle_45_twist
mol.atoms[-1].y += 0.4 * u.nm
dihe = mdt.DihedralMonitor(*mol.atoms)
calc_grad = dihe.gradient()
num_grad = helpers.num_grad(mol, lambda: dihe.value)
np.testing.assert_allclose(calc_grad,
num_grad,
atol=5.0*helpers.DEFSTEP.defunits_value())
########################
# Angles #
########################
def test_angle_measure(three_particle_right_angle):
mol = three_particle_right_angle
np.testing.assert_almost_equal(mdt.angle(*mol.atoms).value_in(u.degrees),
90.0,
decimal=8)
@pytest.mark.screening
def test_angle_gradient(three_particle_right_angle):
mol = three_particle_right_angle
ang = mdt.AngleMonitor(*mol.atoms)
assert abs(ang.value.value_in(u.degrees) - 90.0) <= 1.0e-8
calc_grad = ang.gradient()
num_grad = helpers.num_grad(mol, lambda:ang.value)
np.testing.assert_allclose(calc_grad.defunits_value(),
num_grad.defunits_value(),
atol=5.0*helpers.DEFSTEP.defunits_value())
def test_set_angle_with_monitor(three_particle_right_angle):
mol = three_particle_right_angle
ang = mdt.AngleMonitor(*mol.atoms)
ang.value = 45 * u.degrees
assert abs(mdt.angle(*mol.atoms) - (45 * u.degrees)) < 0.1 * u.degrees
def test_set_angle_noadjust(four_particle_45_twist):
mol = four_particle_45_twist
assert mdt.angle(*mol.atoms[:3]) == 90.0 * u.degrees
final = 45 * u.degrees
origpos = mol.positions.copy()
mdt.set_angle(mol.atoms[0], mol.atoms[1], mol.atoms[2], final, adjustmol=False)
assert abs(mdt.angle(*mol.atoms[:3]) - final) < 0.1 * u.degrees
assert (mol.positions[-1] == origpos[-1]).all()
########################
# Distances #
########################
def test_distance_array(three_particle_right_angle):
mol = three_particle_right_angle
desired_distance_array = u.angstrom*[[0.0, 1.0, np.sqrt(2)],
[1.0, 0.0, 1.0],
[np.sqrt(2), 1.0, 0.0]]
distance_array = mol.calc_distance_array()
np.testing.assert_allclose(distance_array,
desired_distance_array,
atol=1e-8)
def test_set_distance_and_adjust(four_particle_45_twist):
mol = four_particle_45_twist
origpos = mol.positions.copy()
distance = mdt.DistanceMonitor(mol.atoms[1], mol.atoms[2])
olddist = distance.value
distance.value *= 2.0
displacement = np.sqrt(((origpos[0] - mol.positions[0])**2).sum()) + \
np.sqrt(((origpos[-1] - mol.positions[-1])**2).sum())
assert abs(mdt.distance(mol.atoms[1], mol.atoms[2]) - 2.0*olddist) <= 1e-9 * u.angstrom
assert abs(displacement - olddist) < 1.0e-9 * u.angstrom
def test_set_distance_noadjust(four_particle_45_twist):
mol = four_particle_45_twist
origpos = mol.positions.copy()
olddist = mdt.distance(mol.atoms[1], mol.atoms[2])
mdt.set_distance(mol.atoms[1], mol.atoms[2], 2.0 * olddist, adjustmol=False)
assert abs(mdt.distance(mol.atoms[1], mol.atoms[2]) - 2.0*olddist) <= 1e-9 * u.angstrom
assert (origpos[0] == mol.positions[0]).all() and (origpos[-1] == mol.positions[-1]).all()
@pytest.mark.parametrize('objkey', registered_types['atomcontainer'])
def test_atomic_distance_measures_are_consistent(objkey, request):
mol = request.getfixturevalue(objkey)
distance_array = mol.calc_distance_array()
for i, j in itertools.product(range(3), range(3)):
ai, aj = mol.atoms[i], mol.atoms[j]
assert ai.distance(aj) == distance_array[i, j]
assert mdt.distance(ai, aj) == distance_array[i, j]
np.testing.assert_almost_equal(np.sum((ai.position - aj.position)**2).defunits_value(),
(distance_array[i, j]**2).defunits_value(),
decimal=10)
def test_center_of_mass(four_particle_45_twist):
mol = four_particle_45_twist
mol.positions = u.nm*[[0.1, 0.0, -0.5],
[0.0, 0.0, -0.5],
[0.0, 0.0, 0.5],
[0.2, -0.2, 0.5]]
desired_com_angstroms = np.array([0.1+0.2, -0.2, 0.0]) * 10.0 / 4.0
np.testing.assert_almost_equal(mol.center_of_mass.defunits_value(),
desired_com_angstroms)
mol.atoms[0].mass = 5.0 * u.ureg.kilograms
mol.atoms[1].mass = 10.0 * u.ureg.kilograms
mol.atoms[2].mass = 5.0 * u.ureg.kilograms
mol.atoms[3].mass = 10.0 * u.ureg.kilograms
desired_com_angstroms = np.array([0.1+0.4, -0.4, 0.0]) * 10.0 / 6.0
np.testing.assert_almost_equal(mol.center_of_mass.defunits_value(),
desired_com_angstroms)
def test_set_center_of_mass(four_particle_45_twist):
# reset COM
four_particle_45_twist.com = [0, 0, 0] * u.angstrom
np.testing.assert_almost_equal(four_particle_45_twist.com.value_in(u.angstrom),
([0, 0, 0] * u.angstrom).value_in(u.angstrom))
# set it to its current position
four_particle_45_twist.com = [0, 0, 0] * u.angstrom
np.testing.assert_almost_equal(four_particle_45_twist.com.value_in(u.angstrom),
([0, 0, 0] * u.angstrom).value_in(u.angstrom))
# move COM elsewhere
four_particle_45_twist.com = [10, 0, -10] * u.angstrom
np.testing.assert_almost_equal(four_particle_45_twist.com.value_in(u.angstrom),
([10, 0, -10] * u.angstrom).value_in(u.angstrom))
def test_distance_gradient(three_particle_right_angle):
mol = three_particle_right_angle
dist = mdt.DistanceMonitor(*mol.atoms[:2])
assert dist.value == mol.atoms[0].distance(mol.atoms[1])
calc_grad = dist.gradient()
num_grad = helpers.num_grad(mol, lambda:dist.value)
np.testing.assert_allclose(calc_grad.defunits_value(),
num_grad.defunits_value(),
atol=5.0*helpers.DEFSTEP.defunits_value())
#########################
# Utilities #
#########################
def test_sub_angles():
from moldesign.geom import sub_angles
np.testing.assert_allclose(sub_angles(np.pi*u.radian, np.pi/2.0*u.radian),
np.pi/2.0 * u.radian)
np.testing.assert_allclose(sub_angles(360*u.degrees, 179*u.degrees),
-179*u.degrees)
np.testing.assert_allclose(sub_angles(360*u.degrees, 3*u.degrees),
-3*u.degrees)
np.testing.assert_allclose(sub_angles(720*u.degrees, -360*u.degrees),
0*u.degrees)
np.testing.assert_allclose(sub_angles(180*u.degrees, 270*u.degrees),
-90*u.degrees)
np.testing.assert_allclose(sub_angles(270*u.degrees, 0*u.degrees),
-90*u.degrees)
| [
"moldesign.set_distance",
"numpy.sum",
"moldesign.distance",
"pytest.mark.parametrize",
"builtins.range",
"moldesign.DihedralMonitor",
"pytest.raises",
"numpy.testing.assert_allclose",
"moldesign.set_dihedral",
"pytest.fixture",
"random.random",
"moldesign.set_angle",
"moldesign.AngleMonitor... | [((8556, 8624), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""objkey"""', "registered_types['atomcontainer']"], {}), "('objkey', registered_types['atomcontainer'])\n", (8579, 8624), False, 'import pytest\n'), ((1433, 1441), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (1438, 1441), False, 'from builtins import range\n'), ((1813, 1821), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (1818, 1821), False, 'from builtins import range\n'), ((1882, 1890), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (1887, 1890), False, 'from builtins import range\n'), ((2790, 2885), 'moldesign.set_dihedral', 'mdt.set_dihedral', (['mol.atoms[0]', 'mol.atoms[1]', 'mol.atoms[2]', 'mol.atoms[3]', '(10.0 * u.degrees)'], {}), '(mol.atoms[0], mol.atoms[1], mol.atoms[2], mol.atoms[3], \n 10.0 * u.degrees)\n', (2806, 2885), True, 'import moldesign as mdt\n'), ((3174, 3270), 'moldesign.set_dihedral', 'mdt.set_dihedral', (['mol.atoms[0]', 'mol.atoms[1]', 'mol.atoms[2]', 'mol.atoms[3]', '(-23.0 * u.degrees)'], {}), '(mol.atoms[0], mol.atoms[1], mol.atoms[2], mol.atoms[3], -\n 23.0 * u.degrees)\n', (3190, 3270), True, 'import moldesign as mdt\n'), ((3540, 3602), 'moldesign.set_dihedral', 'mdt.set_dihedral', (['mol.atoms[1]', 'mol.atoms[2]', '(10.0 * u.degrees)'], {}), '(mol.atoms[1], mol.atoms[2], 10.0 * u.degrees)\n', (3556, 3602), True, 'import moldesign as mdt\n'), ((4042, 4078), 'moldesign.Bond', 'mdt.Bond', (['mol.atoms[1]', 'mol.atoms[2]'], {}), '(mol.atoms[1], mol.atoms[2])\n', (4050, 4078), True, 'import moldesign as mdt\n'), ((4083, 4140), 'moldesign.set_dihedral', 'mdt.set_dihedral', (['bond', '(10.0 * u.degrees)'], {'adjustmol': '(False)'}), '(bond, 10.0 * u.degrees, adjustmol=False)\n', (4099, 4140), True, 'import moldesign as mdt\n'), ((4781, 4812), 'moldesign.DihedralMonitor', 'mdt.DihedralMonitor', (['*mol.atoms'], {}), '(*mol.atoms)\n', (4800, 4812), True, 'import moldesign as mdt\n'), ((5242, 5273), 'moldesign.DihedralMonitor', 'mdt.DihedralMonitor', (['*mol.atoms'], {}), '(*mol.atoms)\n', (5261, 5273), True, 'import moldesign as mdt\n'), ((5978, 6006), 'moldesign.AngleMonitor', 'mdt.AngleMonitor', (['*mol.atoms'], {}), '(*mol.atoms)\n', (5994, 6006), True, 'import moldesign as mdt\n'), ((6458, 6486), 'moldesign.AngleMonitor', 'mdt.AngleMonitor', (['*mol.atoms'], {}), '(*mol.atoms)\n', (6474, 6486), True, 'import moldesign as mdt\n'), ((6805, 6884), 'moldesign.set_angle', 'mdt.set_angle', (['mol.atoms[0]', 'mol.atoms[1]', 'mol.atoms[2]', 'final'], {'adjustmol': '(False)'}), '(mol.atoms[0], mol.atoms[1], mol.atoms[2], final, adjustmol=False)\n', (6818, 6884), True, 'import moldesign as mdt\n'), ((7415, 7493), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['distance_array', 'desired_distance_array'], {'atol': '(1e-08)'}), '(distance_array, desired_distance_array, atol=1e-08)\n', (7441, 7493), True, 'import numpy as np\n'), ((7698, 7745), 'moldesign.DistanceMonitor', 'mdt.DistanceMonitor', (['mol.atoms[1]', 'mol.atoms[2]'], {}), '(mol.atoms[1], mol.atoms[2])\n', (7717, 7745), True, 'import moldesign as mdt\n'), ((8243, 8283), 'moldesign.distance', 'mdt.distance', (['mol.atoms[1]', 'mol.atoms[2]'], {}), '(mol.atoms[1], mol.atoms[2])\n', (8255, 8283), True, 'import moldesign as mdt\n'), ((8288, 8364), 'moldesign.set_distance', 'mdt.set_distance', (['mol.atoms[1]', 'mol.atoms[2]', '(2.0 * olddist)'], {'adjustmol': '(False)'}), '(mol.atoms[1], mol.atoms[2], 2.0 * olddist, adjustmol=False)\n', (8304, 8364), True, 'import moldesign as mdt\n'), ((10998, 11033), 'moldesign.DistanceMonitor', 'mdt.DistanceMonitor', (['*mol.atoms[:2]'], {}), '(*mol.atoms[:2])\n', (11017, 11033), True, 'import moldesign as mdt\n'), ((2572, 2597), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2585, 2597), False, 'import pytest\n'), ((2663, 2703), 'moldesign.dihedral', 'mdt.dihedral', (['mol.atoms[0]', 'mol.atoms[1]'], {}), '(mol.atoms[0], mol.atoms[1])\n', (2675, 2703), True, 'import moldesign as mdt\n'), ((3781, 3806), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3794, 3806), False, 'import pytest\n'), ((3872, 3933), 'moldesign.set_dihedral', 'mdt.set_dihedral', (['mol.atoms[0]', 'mol.atoms[1]', '(5.0 * u.degrees)'], {}), '(mol.atoms[0], mol.atoms[1], 5.0 * u.degrees)\n', (3888, 3933), True, 'import moldesign as mdt\n'), ((6693, 6718), 'moldesign.angle', 'mdt.angle', (['*mol.atoms[:3]'], {}), '(*mol.atoms[:3])\n', (6702, 6718), True, 'import moldesign as mdt\n'), ((8817, 8825), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (8822, 8825), False, 'from builtins import range\n'), ((8827, 8835), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (8832, 8835), False, 'from builtins import range\n'), ((11551, 11603), 'moldesign.geom.sub_angles', 'sub_angles', (['(np.pi * u.radian)', '(np.pi / 2.0 * u.radian)'], {}), '(np.pi * u.radian, np.pi / 2.0 * u.radian)\n', (11561, 11603), False, 'from moldesign.geom import sub_angles\n'), ((11684, 11728), 'moldesign.geom.sub_angles', 'sub_angles', (['(360 * u.degrees)', '(179 * u.degrees)'], {}), '(360 * u.degrees, 179 * u.degrees)\n', (11694, 11728), False, 'from moldesign.geom import sub_angles\n'), ((11805, 11847), 'moldesign.geom.sub_angles', 'sub_angles', (['(360 * u.degrees)', '(3 * u.degrees)'], {}), '(360 * u.degrees, 3 * u.degrees)\n', (11815, 11847), False, 'from moldesign.geom import sub_angles\n'), ((11922, 11967), 'moldesign.geom.sub_angles', 'sub_angles', (['(720 * u.degrees)', '(-360 * u.degrees)'], {}), '(720 * u.degrees, -360 * u.degrees)\n', (11932, 11967), False, 'from moldesign.geom import sub_angles\n'), ((12041, 12085), 'moldesign.geom.sub_angles', 'sub_angles', (['(180 * u.degrees)', '(270 * u.degrees)'], {}), '(180 * u.degrees, 270 * u.degrees)\n', (12051, 12085), False, 'from moldesign.geom import sub_angles\n'), ((12161, 12203), 'moldesign.geom.sub_angles', 'sub_angles', (['(270 * u.degrees)', '(0 * u.degrees)'], {}), '(270 * u.degrees, 0 * u.degrees)\n', (12171, 12203), False, 'from moldesign.geom import sub_angles\n'), ((950, 974), 'pytest.fixture', 'pytest.fixture', ([], {}), '(**kwargs)\n', (964, 974), False, 'import pytest\n'), ((1071, 1084), 'moldesign.Atom', 'mdt.Atom', (['"""H"""'], {}), "('H')\n", (1079, 1084), True, 'import moldesign as mdt\n'), ((8952, 8972), 'moldesign.distance', 'mdt.distance', (['ai', 'aj'], {}), '(ai, aj)\n', (8964, 8972), True, 'import moldesign as mdt\n'), ((9516, 9548), 'numpy.array', 'np.array', (['[0.1 + 0.2, -0.2, 0.0]'], {}), '([0.1 + 0.2, -0.2, 0.0])\n', (9524, 9548), True, 'import numpy as np\n'), ((9910, 9942), 'numpy.array', 'np.array', (['[0.1 + 0.4, -0.4, 0.0]'], {}), '([0.1 + 0.4, -0.4, 0.0])\n', (9918, 9942), True, 'import numpy as np\n'), ((1094, 1102), 'builtins.range', 'range', (['n'], {}), '(n)\n', (1099, 1102), False, 'from builtins import range\n'), ((1176, 1191), 'random.random', 'random.random', ([], {}), '()\n', (1189, 1191), False, 'import random\n'), ((2159, 2183), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (2171, 2183), True, 'import moldesign as mdt\n'), ((2424, 2453), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms[1:3]'], {}), '(*mol.atoms[1:3])\n', (2436, 2453), True, 'import moldesign as mdt\n'), ((2916, 2940), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (2928, 2940), True, 'import moldesign as mdt\n'), ((3301, 3325), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (3313, 3325), True, 'import moldesign as mdt\n'), ((3638, 3662), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (3650, 3662), True, 'import moldesign as mdt\n'), ((4176, 4200), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (4188, 4200), True, 'import moldesign as mdt\n'), ((4472, 4496), 'moldesign.dihedral', 'mdt.dihedral', (['*mol.atoms'], {}), '(*mol.atoms)\n', (4484, 4496), True, 'import moldesign as mdt\n'), ((5722, 5743), 'moldesign.angle', 'mdt.angle', (['*mol.atoms'], {}), '(*mol.atoms)\n', (5731, 5743), True, 'import moldesign as mdt\n'), ((6534, 6555), 'moldesign.angle', 'mdt.angle', (['*mol.atoms'], {}), '(*mol.atoms)\n', (6543, 6555), True, 'import moldesign as mdt\n'), ((6901, 6926), 'moldesign.angle', 'mdt.angle', (['*mol.atoms[:3]'], {}), '(*mol.atoms[:3])\n', (6910, 6926), True, 'import moldesign as mdt\n'), ((7227, 7237), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7234, 7237), True, 'import numpy as np\n'), ((7340, 7350), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7347, 7350), True, 'import numpy as np\n'), ((7965, 8005), 'moldesign.distance', 'mdt.distance', (['mol.atoms[1]', 'mol.atoms[2]'], {}), '(mol.atoms[1], mol.atoms[2])\n', (7977, 8005), True, 'import moldesign as mdt\n'), ((8381, 8421), 'moldesign.distance', 'mdt.distance', (['mol.atoms[1]', 'mol.atoms[2]'], {}), '(mol.atoms[1], mol.atoms[2])\n', (8393, 8421), True, 'import moldesign as mdt\n'), ((9036, 9076), 'numpy.sum', 'np.sum', (['((ai.position - aj.position) ** 2)'], {}), '((ai.position - aj.position) ** 2)\n', (9042, 9076), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import torch
import os
import time
from .opts import opts
from .tracker_trt import FairTracker
from .fairmot.utils.transformation import *
from .fairmot.tracking_utils import visualization as vis
from .fairmot.tracking_utils.log import logger
from .test_utils import write_results
"""
This is the kernel of system and do the tracking task.
It can accept tow intput format. One is the image sequence direction, the other is a video file.
For the image sequence direction, it should be the format similar to the following:
- DIRECTION "img1": the image set of all image files named like "000397"
- INI FILE "seqinfo.ini": the file has some information about sequence, for instance:
[Sequence]
name=MOT17-01-FRCNN
imDir=img1
frameRate=30
seqLength=450
imWidth=1920
imHeight=1080
imExt=.jpg
And for the video file, it should be a file type which can be loaded by cv2.videocapture.
"""
class TrackingKernel:
def __init__(self, enableFP16):
self.enableFP16 = enableFP16
def init_kernel(self, frame_rate, image_size, target_size, opt):
""" Create kernel only. """
self.image_size = image_size
self.target_size = target_size
# tracker_init
# setup the MOT_Tracker
self.tracker = FairTracker(opt, frame_rate, self.enableFP16)
def pre_processing(self, img0):
img = np.array(img0)
# Padded resize
img, _, _, _ = letterbox(img, height=self.target_size[0], width=self.target_size[1])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img
def call_once(self, img0, dense_region=None, dense_region0=None):
""" Kernel Executor in a single step. Current only support for video file. """
st = time.time()
#### PRE PROCESSING ####
img = self.pre_processing(img0)
if dense_region is not None:
img = img[:, dense_region[1]:dense_region[3], dense_region[0]:dense_region[2]]
blob = np.expand_dims(img, axis=0)
#### UPDATE TRACKER ####
#### ********* ####
ret_trks = []
# blob = torch.from_numpy(img).cuda().unsqueeze(0)
#### TRACKING ####
trks = self.tracker.update(blob, self.image_size, dense_region0)
et = time.time()
return trks, et-st
def infer_preprocessing(self, img):
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img
def infer(self, img):
""" Kernel Executor in a single step. Current only support for video file. """
#### PRE PROCESSING ####
st = time.time()
print(img.shape)
img = self.infer_preprocessing(img)
#### UPDATE TRACKER ####
#### ********* ####
ret_trks = []
blob = np.expand_dims(img, axis=0)
#### TRACKING ####
dets = self.tracker.infer(blob, self.image_size)
for det in dets:
print(det.tlwh)
et = time.time()
return dets, et-st | [
"numpy.ascontiguousarray",
"numpy.array",
"numpy.expand_dims",
"time.time"
] | [((1440, 1454), 'numpy.array', 'np.array', (['img0'], {}), '(img0)\n', (1448, 1454), True, 'import numpy as np\n'), ((1659, 1702), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1679, 1702), True, 'import numpy as np\n'), ((1918, 1929), 'time.time', 'time.time', ([], {}), '()\n', (1927, 1929), False, 'import time\n'), ((2146, 2173), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2160, 2173), True, 'import numpy as np\n'), ((2430, 2441), 'time.time', 'time.time', ([], {}), '()\n', (2439, 2441), False, 'import time\n'), ((2573, 2616), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (2593, 2616), True, 'import numpy as np\n'), ((2817, 2828), 'time.time', 'time.time', ([], {}), '()\n', (2826, 2828), False, 'import time\n'), ((2996, 3023), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3010, 3023), True, 'import numpy as np\n'), ((3175, 3186), 'time.time', 'time.time', ([], {}), '()\n', (3184, 3186), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Methods for loading and saving fiber_bundles objects into .dat and .h5 files
"""
import os
import numpy as np
import h5py
from .. import objects
def load(file_name, group_name='/'):
"""
Load fiberbundles configurations from a text file oder hdf5 file
Parameters
----------
file_name : string
path to file
group_name : string, optional
path inside hdf5-file for fiber bundles
Returns
-------
res : list(list(fiber)), fibers are (n,4)-arrays with (x,y,z,radii) for each fiber point
"""
_, ext = os.path.splitext(file_name)
if ext in ['.dat', '.txt']:
with open(file_name, 'r') as f:
fiber_bundles = load_dat(f)
elif ext == '.h5':
with h5py.File(file_name, 'r') as h5f:
fiber_bundles = load_h5(h5f[group_name])
else:
raise TypeError(ext + ' is not implemented yet')
return fiber_bundles
def load_dat(file):
"""
Load fiberbundles configurations from a text file oder hdf5 file
Parameters
----------
file : file object
opened file: with open(file_name, 'r') as file:
Returns
-------
res : list(list(fiber)), fibers are (n,4)-arrays with (x,y,z,radii) for each fiber point
"""
fiber = []
fiber_bundles = [[]]
empty_lines = 0
for line in file:
if line.strip():
empty_lines = 0
numbers = list(map(float, line.split()))
fiber.append(numbers[0:4])
else: # empty line
empty_lines += 1
if empty_lines == 1: # end of fiber
if fiber:
fiber_bundles[-1].append(np.array(fiber, float))
fiber = []
elif empty_lines == 2: # end of fiber_bundle
if fiber_bundles[-1]:
fiber_bundles.append([])
if fiber: # save last fiber
fiber_bundles[-1].append(np.array(fiber))
return fiber_bundles
def load_h5(h5f):
"""
Load fiberbundles configurations from a hdf5 class
Parameters
----------
h5f : hdf5 class
h5-file or group object
Returns
-------
res : list(list(fiber)), fibers are (n,4)-arrays with (x,y,z,radii) for each fiber point
"""
fiber_bundles = []
fb_list = list(map(int, list(h5f.keys())))
fb_list.sort()
for fb in fb_list:
fiber_bundles.append([])
f_list = list(map(int, list(h5f[str(fb)].keys())))
f_list.sort()
for f in f_list:
fiber_bundles[-1].append(h5f[str(fb)][str(f)][:].astype(float))
return fiber_bundles
def save(file_name, fiber_bundles, group_name='/', mode='w-'):
"""
Save fiberbundles configurations to a text file oder hdf5 file
Parameters
----------
file_name : string
path to file
fiber_bundles : list( list( (n,4)-array_like ) )
group_name : string, optional
path inside hdf5-file for fiber bundles
mode : string, optional
file mode of open() or h5py.File()
"""
_, ext = os.path.splitext(file_name)
if ext in ['.dat', '.txt']:
mode = 'x' if mode == 'w-' else mode
with open(file_name, mode) as file:
save_dat(file, fiber_bundles)
elif ext == '.h5':
with h5py.File(file_name, mode) as h5f:
if not group_name or group_name == '/':
save_h5(h5f, fiber_bundles)
else:
save_h5(h5f.create_group(group_name), fiber_bundles)
else:
raise TypeError(ext + ' is not implemented yet')
def save_dat(file, fiber_bundles):
"""
Save fiberbundles configurations to a text file oder hdf5 file
Parameters
----------
file_obj : file object
opened file: with open(file_name, 'w-') as file:
fiber_bundles : list( list( (n,4)-array_like ) )
"""
if not fiber_bundles:
return
fiber_bundles = objects.fiber_bundles.Cast(fiber_bundles)
for fb, fiber_bundle in enumerate(fiber_bundles):
for fiber in fiber_bundle:
if fiber.shape[1] != 4 or len(fiber.shape) != 2:
raise TypeError('Wrong shape:', fiber.shape)
for line in fiber:
file.write(
str(line[0]) + " " + str(line[1]) + " " + str(line[2]) +
" " + str(line[3]) + "\n")
file.write("\n")
if fb != len(fiber_bundles) - 1:
file.write("\n")
def save_h5(h5f, fiber_bundles):
"""
Save fiberbundles configurations inside a hdf5 file
Parameters
----------
h5f : hdf5 class
h5-file or group object
fiber_bundles : list( list( (n,4)-array_like ) )
"""
if not fiber_bundles:
return
fiber_bundles = objects.fiber_bundles.Cast(fiber_bundles)
for fb_i, fb in enumerate(fiber_bundles):
grp_fb = h5f.create_group(str(fb_i))
for i, f in enumerate(fb):
grp_fb[str(i)] = f[:, :]
| [
"h5py.File",
"numpy.array",
"os.path.splitext"
] | [((587, 614), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (603, 614), False, 'import os\n'), ((3078, 3105), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3094, 3105), False, 'import os\n'), ((1946, 1961), 'numpy.array', 'np.array', (['fiber'], {}), '(fiber)\n', (1954, 1961), True, 'import numpy as np\n'), ((764, 789), 'h5py.File', 'h5py.File', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (773, 789), False, 'import h5py\n'), ((3306, 3332), 'h5py.File', 'h5py.File', (['file_name', 'mode'], {}), '(file_name, mode)\n', (3315, 3332), False, 'import h5py\n'), ((1684, 1706), 'numpy.array', 'np.array', (['fiber', 'float'], {}), '(fiber, float)\n', (1692, 1706), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""st for line_search."""
import pytest
import numpy as onp
from scipy.optimize.linesearch import line_search_wolfe2 as osp_line_search
import mindspore.numpy as mnp
from mindspore import context
from mindspore.scipy.optimize.line_search import line_search as msp_line_search
from .utils import match_array
context.set_context(mode=context.GRAPH_MODE)
def _scalar_func_1(np):
def f(x):
return -x - x ** 3 + x ** 4
def fprime(x):
return -1 - 3 * x ** 2 + 4 * x ** 3
return f, fprime
def _scalar_func_2(np):
def f(x):
return np.exp(-4 * x) + x ** 2
def fprime(x):
return -4 * np.exp(-4 * x) + 2 * x
return f, fprime
def _scalar_func_3(np):
def f(x):
return -np.sin(10 * x)
def fprime(x):
return -10 * np.cos(10 * x)
return f, fprime
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('maxiter, func, x, p', [(10, _scalar_func_1, 0., 1.),
(10, _scalar_func_2, 0., 1.),
(10, _scalar_func_3, 0., 1.)])
def test_scalar_search(maxiter, func, x, p):
"""
Feature: ALL TO ALL
Description: test cases for 1-d function
Expectation: the result match scipy
"""
osp_f, osp_fp = func(onp)
osp_x, osp_p = onp.array(x), onp.array(p)
osp_res = osp_line_search(osp_f, osp_fp, osp_x, osp_p, maxiter=maxiter)
msp_f, _ = func(mnp)
msp_x, msp_p = mnp.array(x), mnp.array(p)
msp_res = msp_line_search(msp_f, msp_x, msp_p, maxiter=maxiter)
match_array(msp_res.a_k, osp_res[0], error=5)
match_array(msp_res.f_k, osp_res[3], error=5)
def _line_func_1(np, *args):
def f(x):
return np.dot(x, x)
def fprime(x):
return 2 * x
return f, fprime
def _line_func_2(np, *args):
def f(x):
A = args[0]
return np.dot(x, np.dot(A, x)) + 1
def fprime(x):
A = args[0]
return np.dot(A + A.T, x)
return f, fprime
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('maxiter, func, x, p',
[(10, _line_func_1, [1.13689136, 0.09772497, 0.58295368, -0.39944903, 0.37005589],
[-1.30652685, 1.65813068, -0.11816405, -0.6801782, 0.66638308]),
(10, _line_func_1, [-0.52118931, -1.84306955, -0.477974, -0.47965581, 0.6203583],
[0.69845715, 0.00377089, 0.93184837, 0.33996498, -0.01568211]),
(10, _line_func_2, [0.15634897, 1.23029068, 1.20237985, -0.38732682, -0.30230275],
[-1.04855297, -1.42001794, -1.70627019, 1.9507754, -0.50965218]),
(10, _line_func_2, [0.42833187, 0.06651722, 0.3024719, -0.63432209, -0.36274117],
[-0.67246045, -0.35955316, -0.81314628, -1.7262826, 0.17742614])])
def test_line_search(maxiter, func, x, p):
"""
Feature: ALL TO ALL
Description: test cases for n-d function
Expectation: the result match scipy
"""
A = [[1.76405235, 0.40015721, 0.97873798, 2.2408932, 1.86755799],
[-0.97727788, 0.95008842, -0.15135721, -0.10321885, 0.4105985],
[0.14404357, 1.45427351, 0.76103773, 0.12167502, 0.44386323],
[0.33367433, 1.49407907, -0.20515826, 0.3130677, -0.85409574],
[-2.55298982, 0.6536186, 0.8644362, -0.74216502, 2.26975462]]
osp_x, osp_p, osp_A = onp.array(x), onp.array(p), onp.array(A)
osp_f, osp_fp = func(onp, osp_A)
osp_res = osp_line_search(osp_f, osp_fp, osp_x, osp_p, maxiter=maxiter)
msp_x, msp_p, msp_A = mnp.array(x), mnp.array(p), mnp.array(A)
msp_f, _ = func(mnp, msp_A)
msp_res = msp_line_search(msp_f, msp_x, msp_p, maxiter=maxiter)
match_array(msp_res.a_k, osp_res[0], error=5)
match_array(msp_res.f_k, osp_res[3], error=5)
| [
"mindspore.context.set_context",
"scipy.optimize.linesearch.line_search_wolfe2",
"numpy.array",
"mindspore.numpy.array",
"pytest.mark.parametrize",
"mindspore.scipy.optimize.line_search.line_search"
] | [((979, 1023), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (998, 1023), False, 'from mindspore import context\n'), ((1616, 1765), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""maxiter, func, x, p"""', '[(10, _scalar_func_1, 0.0, 1.0), (10, _scalar_func_2, 0.0, 1.0), (10,\n _scalar_func_3, 0.0, 1.0)]'], {}), "('maxiter, func, x, p', [(10, _scalar_func_1, 0.0, \n 1.0), (10, _scalar_func_2, 0.0, 1.0), (10, _scalar_func_3, 0.0, 1.0)])\n", (1639, 1765), False, 'import pytest\n'), ((2873, 3555), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""maxiter, func, x, p"""', '[(10, _line_func_1, [1.13689136, 0.09772497, 0.58295368, -0.39944903, \n 0.37005589], [-1.30652685, 1.65813068, -0.11816405, -0.6801782, \n 0.66638308]), (10, _line_func_1, [-0.52118931, -1.84306955, -0.477974, \n -0.47965581, 0.6203583], [0.69845715, 0.00377089, 0.93184837, \n 0.33996498, -0.01568211]), (10, _line_func_2, [0.15634897, 1.23029068, \n 1.20237985, -0.38732682, -0.30230275], [-1.04855297, -1.42001794, -\n 1.70627019, 1.9507754, -0.50965218]), (10, _line_func_2, [0.42833187, \n 0.06651722, 0.3024719, -0.63432209, -0.36274117], [-0.67246045, -\n 0.35955316, -0.81314628, -1.7262826, 0.17742614])]'], {}), "('maxiter, func, x, p', [(10, _line_func_1, [\n 1.13689136, 0.09772497, 0.58295368, -0.39944903, 0.37005589], [-\n 1.30652685, 1.65813068, -0.11816405, -0.6801782, 0.66638308]), (10,\n _line_func_1, [-0.52118931, -1.84306955, -0.477974, -0.47965581, \n 0.6203583], [0.69845715, 0.00377089, 0.93184837, 0.33996498, -\n 0.01568211]), (10, _line_func_2, [0.15634897, 1.23029068, 1.20237985, -\n 0.38732682, -0.30230275], [-1.04855297, -1.42001794, -1.70627019, \n 1.9507754, -0.50965218]), (10, _line_func_2, [0.42833187, 0.06651722, \n 0.3024719, -0.63432209, -0.36274117], [-0.67246045, -0.35955316, -\n 0.81314628, -1.7262826, 0.17742614])])\n", (2896, 3555), False, 'import pytest\n'), ((2113, 2174), 'scipy.optimize.linesearch.line_search_wolfe2', 'osp_line_search', (['osp_f', 'osp_fp', 'osp_x', 'osp_p'], {'maxiter': 'maxiter'}), '(osp_f, osp_fp, osp_x, osp_p, maxiter=maxiter)\n', (2128, 2174), True, 'from scipy.optimize.linesearch import line_search_wolfe2 as osp_line_search\n'), ((2261, 2314), 'mindspore.scipy.optimize.line_search.line_search', 'msp_line_search', (['msp_f', 'msp_x', 'msp_p'], {'maxiter': 'maxiter'}), '(msp_f, msp_x, msp_p, maxiter=maxiter)\n', (2276, 2314), True, 'from mindspore.scipy.optimize.line_search import line_search as msp_line_search\n'), ((4367, 4428), 'scipy.optimize.linesearch.line_search_wolfe2', 'osp_line_search', (['osp_f', 'osp_fp', 'osp_x', 'osp_p'], {'maxiter': 'maxiter'}), '(osp_f, osp_fp, osp_x, osp_p, maxiter=maxiter)\n', (4382, 4428), True, 'from scipy.optimize.linesearch import line_search_wolfe2 as osp_line_search\n'), ((4543, 4596), 'mindspore.scipy.optimize.line_search.line_search', 'msp_line_search', (['msp_f', 'msp_x', 'msp_p'], {'maxiter': 'maxiter'}), '(msp_f, msp_x, msp_p, maxiter=maxiter)\n', (4558, 4596), True, 'from mindspore.scipy.optimize.line_search import line_search as msp_line_search\n'), ((2072, 2084), 'numpy.array', 'onp.array', (['x'], {}), '(x)\n', (2081, 2084), True, 'import numpy as onp\n'), ((2086, 2098), 'numpy.array', 'onp.array', (['p'], {}), '(p)\n', (2095, 2098), True, 'import numpy as onp\n'), ((2220, 2232), 'mindspore.numpy.array', 'mnp.array', (['x'], {}), '(x)\n', (2229, 2232), True, 'import mindspore.numpy as mnp\n'), ((2234, 2246), 'mindspore.numpy.array', 'mnp.array', (['p'], {}), '(p)\n', (2243, 2246), True, 'import mindspore.numpy as mnp\n'), ((4275, 4287), 'numpy.array', 'onp.array', (['x'], {}), '(x)\n', (4284, 4287), True, 'import numpy as onp\n'), ((4289, 4301), 'numpy.array', 'onp.array', (['p'], {}), '(p)\n', (4298, 4301), True, 'import numpy as onp\n'), ((4303, 4315), 'numpy.array', 'onp.array', (['A'], {}), '(A)\n', (4312, 4315), True, 'import numpy as onp\n'), ((4456, 4468), 'mindspore.numpy.array', 'mnp.array', (['x'], {}), '(x)\n', (4465, 4468), True, 'import mindspore.numpy as mnp\n'), ((4470, 4482), 'mindspore.numpy.array', 'mnp.array', (['p'], {}), '(p)\n', (4479, 4482), True, 'import mindspore.numpy as mnp\n'), ((4484, 4496), 'mindspore.numpy.array', 'mnp.array', (['A'], {}), '(A)\n', (4493, 4496), True, 'import mindspore.numpy as mnp\n')] |
"""
Computes the reflectance curves in figures 7 and 8
"""
import os
import imageio
import numpy as np
import matplotlib.pyplot as plt
from utils.uhi import UHIData
def fig_reflectance(sample_dir, mask_dir, model_path=None, median_centered=True):
"""
Create reflectance figures from samples and masks contained in the given directories
:param sample_dir: The path to the directory containing the UHI files (A.h5, B.h5.. etc)
:param mask_dir: The path to the directory containing the masks (A-1.png, B-2.png.. etc)
:param model_path: [Optional] Path to a gaussian process model (.h5) to use instead of the embedded calibration data
:param median_centered: If true, each spectr is centered around the average median
:return:
"""
# Read masks
mask_files = [x for x in os.listdir(mask_dir) if x.endswith('.png') and '-' in x]
mask_files = sorted(mask_files)
# Group samples by UHI file
samples = {}
for fname in mask_files:
fbase, fext = os.path.splitext(fname)
s_id, m_id = fbase.split('-')
try:
samples[s_id].append(fname)
except (KeyError, AttributeError):
samples[s_id] = [fname]
# Iterate over samples and masks
for s_id, masks in samples.items():
# Read uhi file
uhi = UHIData()
uhi.read_hdf5(os.path.join(sample_dir, s_id + '.h5'))
uhi.subtract_ambient()
uhi.correct_gain()
h, w, d = uhi.px.shape
wl = np.broadcast_to(uhi.wl[np.newaxis, np.newaxis, :], shape=uhi.px.shape)
# Calculate reflectance
uhi.calc_refl(model_path)
# Plot reflectance curves for masks
for mask in masks:
print(f'Plotting {os.path.splitext(mask)[0]} ...')
im_mask = imageio.imread(os.path.join(mask_dir, mask))
if len(im_mask.shape) > 2:
im_mask = im_mask[:, :, 0]
# Convert to boolean
im_mask = im_mask > 127
# Extract masked area
s_refl = uhi.refl[im_mask, :].reshape((-1, d))
s_wl = wl[im_mask, :].reshape((-1, d))
s_refl_median = np.median(s_refl, axis=0)
if median_centered:
s_refl -= np.mean((s_refl - s_refl_median[np.newaxis, :]), axis=1)[:, np.newaxis]
# Plot reflectance curve for mask
plt.figure()
plt.title(f'Sample {os.path.splitext(mask)[0]}')
plt.scatter(s_wl.ravel(), s_refl.ravel(), c='k', s=0.05)
plt.scatter(uhi.wl, s_refl_median, c='r', s=1.0)
plt.ylim([np.min(s_refl[:, 30:-20]), np.max(s_refl[:, 30:-20])])
plt.xlim([uhi.wl.min(), uhi.wl.max()])
plt.ylabel('Reflectance [%]')
plt.xlabel('Wavelength [nm]')
plt.show()
if __name__ == '__main__':
# Plot reflectance curves from masked areas and calibration data embedded in the UHI-files
fig_reflectance(sample_dir='data' + os.path.sep + 'samples',
mask_dir='data' + os.path.sep + 'masks',
model_path=None)
# Plot reflectance curves from masked areas and external regression model
"""
fig_reflectance(sample_dir='data' + os.path.sep + 'samples',
mask_dir='data' + os.path.sep + 'masks',
model_path=os.path.join('data', 'model', 'gpflow_tiltplate_model'))
"""
| [
"matplotlib.pyplot.show",
"numpy.median",
"utils.uhi.UHIData",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.min",
"os.path.splitext",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.broadcast_to",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir"
] | [((2789, 2799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2797, 2799), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1026), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (1019, 1026), False, 'import os\n'), ((1313, 1322), 'utils.uhi.UHIData', 'UHIData', ([], {}), '()\n', (1320, 1322), False, 'from utils.uhi import UHIData\n'), ((1487, 1557), 'numpy.broadcast_to', 'np.broadcast_to', (['uhi.wl[np.newaxis, np.newaxis, :]'], {'shape': 'uhi.px.shape'}), '(uhi.wl[np.newaxis, np.newaxis, :], shape=uhi.px.shape)\n', (1502, 1557), True, 'import numpy as np\n'), ((809, 829), 'os.listdir', 'os.listdir', (['mask_dir'], {}), '(mask_dir)\n', (819, 829), False, 'import os\n'), ((1345, 1383), 'os.path.join', 'os.path.join', (['sample_dir', "(s_id + '.h5')"], {}), "(sample_dir, s_id + '.h5')\n", (1357, 1383), False, 'import os\n'), ((2152, 2177), 'numpy.median', 'np.median', (['s_refl'], {'axis': '(0)'}), '(s_refl, axis=0)\n', (2161, 2177), True, 'import numpy as np\n'), ((2368, 2380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2378, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2571), 'matplotlib.pyplot.scatter', 'plt.scatter', (['uhi.wl', 's_refl_median'], {'c': '"""r"""', 's': '(1.0)'}), "(uhi.wl, s_refl_median, c='r', s=1.0)\n", (2534, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2712, 2741), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reflectance [%]"""'], {}), "('Reflectance [%]')\n", (2722, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2754, 2783), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [nm]"""'], {}), "('Wavelength [nm]')\n", (2764, 2783), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1825), 'os.path.join', 'os.path.join', (['mask_dir', 'mask'], {}), '(mask_dir, mask)\n', (1809, 1825), False, 'import os\n'), ((2237, 2291), 'numpy.mean', 'np.mean', (['(s_refl - s_refl_median[np.newaxis, :])'], {'axis': '(1)'}), '(s_refl - s_refl_median[np.newaxis, :], axis=1)\n', (2244, 2291), True, 'import numpy as np\n'), ((2594, 2619), 'numpy.min', 'np.min', (['s_refl[:, 30:-20]'], {}), '(s_refl[:, 30:-20])\n', (2600, 2619), True, 'import numpy as np\n'), ((2621, 2646), 'numpy.max', 'np.max', (['s_refl[:, 30:-20]'], {}), '(s_refl[:, 30:-20])\n', (2627, 2646), True, 'import numpy as np\n'), ((1727, 1749), 'os.path.splitext', 'os.path.splitext', (['mask'], {}), '(mask)\n', (1743, 1749), False, 'import os\n'), ((2413, 2435), 'os.path.splitext', 'os.path.splitext', (['mask'], {}), '(mask)\n', (2429, 2435), False, 'import os\n')] |
#!/usr/bin/python3
"""Training and Validation On Segmentation Task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import shutil
import argparse
import importlib
import data_utils
import numpy as np
import pointfly as pf
import tensorflow as tf
from datetime import datetime
from tqdm import tqdm
import pdb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')
#parser.add_argument('--save_folder', '-s', help='Path to folder for saving the generation results', required=True)
parser.add_argument('--model', '-m', help='Model to use', required=True)
parser.add_argument('--setting', '-x', help='Setting to use', required=True)
parser.add_argument('--grid_size', help='Batch size (default defined in setting)', type=int)
parser.add_argument('--sample_size', default=512, type=int)
args = parser.parse_args()
#print('PID:', os.getpid())
#print(args)
model = importlib.import_module(args.model)
setting_path = os.path.join(os.path.dirname(__file__), args.model)
sys.path.append(setting_path)
setting = importlib.import_module(args.setting)
######################################################################
# Placeholders
is_training = tf.placeholder(tf.bool, name='is_training')
pts_fts = tf.placeholder(tf.float32, shape=(None, args.sample_size, setting.data_dim), name='pts_fts')
labels = tf.placeholder(tf.int32, shape=(None,), name='labels')
sigma = tf.placeholder(tf.float32, shape=(None,), name='sigma')
######################################################################
features_augmented = None
points_augmented = pts_fts
sigma_unsqueezed = tf.reshape(sigma, shape=(-1, 1, 1))
perturbed_points = points_augmented
net = model.Net(perturbed_points, features_augmented, labels, is_training, setting)
scores = net.logits
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
sigmas = np.exp(np.linspace(np.log(0.3), np.log(0.01), 10))
label_range = np.arange(10)
n_steps_each= 100
step_lr=0.00001
with tf.Session() as sess:
sess.run(init_op)
# Load the model
if args.load_ckpt is not None:
saver.restore(sess, tf.train.latest_checkpoint(args.load_ckpt))
print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt))
grid_size = args.grid_size
results = []
x_mod = np.random.rand(grid_size ** 2, args.sample_size, 3)
for i in tqdm(range(sigmas.shape[0])):
label_used = np.ones(grid_size ** 2) * label_range[i]
sigma_used = sigmas[label_used.astype(int)]
step_size = step_lr * (sigmas[i] / sigmas[-1]) ** 2
for s in range(n_steps_each):
results.append(np.expand_dims(x_mod,0))
noise = np.random.randn(*x_mod.shape) * np.sqrt(step_size * 2)
grad = sess.run([scores],
feed_dict={
pts_fts: x_mod,
is_training: False,
labels: label_used,
sigma: sigma_used,
})
grad = grad[0]
x_mod = x_mod + step_size * grad + noise
results = np.concatenate(results, axis=0)
np.save('./generated_point_clouds.npy', results)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.local_variables_initializer",
"tensorflow.train.latest_checkpoint",
"numpy.arange",
"sys.path.append",
"numpy.random.randn",
"os.path.dirname",
"tensorflow.placeholder",
"datetime.datetime.now",
"numpy.save",
"importl... | [((448, 473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (471, 473), False, 'import argparse\n'), ((1098, 1133), 'importlib.import_module', 'importlib.import_module', (['args.model'], {}), '(args.model)\n', (1121, 1133), False, 'import importlib\n'), ((1209, 1238), 'sys.path.append', 'sys.path.append', (['setting_path'], {}), '(setting_path)\n', (1224, 1238), False, 'import sys\n'), ((1253, 1290), 'importlib.import_module', 'importlib.import_module', (['args.setting'], {}), '(args.setting)\n', (1276, 1290), False, 'import importlib\n'), ((1404, 1447), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (1418, 1447), True, 'import tensorflow as tf\n'), ((1462, 1558), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, args.sample_size, setting.data_dim)', 'name': '"""pts_fts"""'}), "(tf.float32, shape=(None, args.sample_size, setting.data_dim),\n name='pts_fts')\n", (1476, 1558), True, 'import tensorflow as tf\n'), ((1568, 1622), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)', 'name': '"""labels"""'}), "(tf.int32, shape=(None,), name='labels')\n", (1582, 1622), True, 'import tensorflow as tf\n'), ((1635, 1690), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None,)', 'name': '"""sigma"""'}), "(tf.float32, shape=(None,), name='sigma')\n", (1649, 1690), True, 'import tensorflow as tf\n'), ((1851, 1886), 'tensorflow.reshape', 'tf.reshape', (['sigma'], {'shape': '(-1, 1, 1)'}), '(sigma, shape=(-1, 1, 1))\n', (1861, 1886), True, 'import tensorflow as tf\n'), ((2150, 2182), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (2164, 2182), True, 'import tensorflow as tf\n'), ((2267, 2280), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2276, 2280), True, 'import numpy as np\n'), ((1166, 1191), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1181, 1191), False, 'import os\n'), ((2068, 2101), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2099, 2101), True, 'import tensorflow as tf\n'), ((2103, 2135), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2133, 2135), True, 'import tensorflow as tf\n'), ((2333, 2345), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2343, 2345), True, 'import tensorflow as tf\n'), ((2689, 2740), 'numpy.random.rand', 'np.random.rand', (['(grid_size ** 2)', 'args.sample_size', '(3)'], {}), '(grid_size ** 2, args.sample_size, 3)\n', (2703, 2740), True, 'import numpy as np\n'), ((3578, 3609), 'numpy.concatenate', 'np.concatenate', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (3592, 3609), True, 'import numpy as np\n'), ((3618, 3666), 'numpy.save', 'np.save', (['"""./generated_point_clouds.npy"""', 'results'], {}), "('./generated_point_clouds.npy', results)\n", (3625, 3666), True, 'import numpy as np\n'), ((2217, 2228), 'numpy.log', 'np.log', (['(0.3)'], {}), '(0.3)\n', (2223, 2228), True, 'import numpy as np\n'), ((2230, 2242), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (2236, 2242), True, 'import numpy as np\n'), ((2477, 2519), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['args.load_ckpt'], {}), '(args.load_ckpt)\n', (2503, 2519), True, 'import tensorflow as tf\n'), ((2813, 2836), 'numpy.ones', 'np.ones', (['(grid_size ** 2)'], {}), '(grid_size ** 2)\n', (2820, 2836), True, 'import numpy as np\n'), ((2578, 2592), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2590, 2592), False, 'from datetime import datetime\n'), ((3047, 3071), 'numpy.expand_dims', 'np.expand_dims', (['x_mod', '(0)'], {}), '(x_mod, 0)\n', (3061, 3071), True, 'import numpy as np\n'), ((3096, 3125), 'numpy.random.randn', 'np.random.randn', (['*x_mod.shape'], {}), '(*x_mod.shape)\n', (3111, 3125), True, 'import numpy as np\n'), ((3128, 3150), 'numpy.sqrt', 'np.sqrt', (['(step_size * 2)'], {}), '(step_size * 2)\n', (3135, 3150), True, 'import numpy as np\n')] |
"""
Compute cross-spectral density (CSD) matrices
"""
from __future__ import print_function
import warnings
import argparse
import numpy as np
import mne
from mne.time_frequency import csd_morlet
from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions
# Be verbose
mne.set_log_level('INFO')
# Handle command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('subject', metavar='sub###', help='The subject to process')
args = parser.parse_args()
subject = args.subject
print('Processing subject:', subject)
# Read the epochs
print('Reading epochs...')
epochs = mne.read_epochs(fname.epo(subject=subject))
# Suppress warning about wavelet length.
warnings.simplefilter('ignore')
# Load the report to add figures to
report = mne.open_report(fname.report(subject=subject))
# Individual frequencies to estimate the CSD for
fmin = freq_bands[0][0]
fmax = freq_bands[-1][1]
frequencies = np.arange(fmin, fmax + 1, 2)
# Compute CSD matrices for each frequency and each condition.
for condition in conditions:
print('Condition:', condition)
# Remove the mean during the time interval for which we compute the CSD
epochs_baselined = epochs[condition].apply_baseline((csd_tmin, csd_tmax))
# Compute CSD for the desired time interval
csd = csd_morlet(epochs_baselined, frequencies=frequencies, tmin=csd_tmin,
tmax=csd_tmax, decim=20, n_jobs=n_jobs, verbose=True)
# Save the CSD matrices
csd.save(fname.csd(condition=condition, subject=subject))
report.add_figs_to_section(csd.plot(show=False),
['CSD for %s' % condition],
section='Sensor-level', replace=True)
# Also compute the CSD for the baseline period (use all epochs for this,
# regardless of condition). This way, we can compare the change in power caused
# by the presentation of the stimulus.
epochs = epochs.apply_baseline((-0.2, 0)) # Make sure data is zero-mean
csd_baseline = csd_morlet(epochs, frequencies=frequencies, tmin=-0.2, tmax=0,
decim=20, n_jobs=n_jobs, verbose=True)
csd_baseline.save(fname.csd(condition='baseline', subject=subject))
report.add_figs_to_section(csd_baseline.plot(show=False), ['CSD for baseline'],
section='Sensor-level', replace=True)
# Save the report as HDF5 for later loading (in many other script, the report
# is loaded with a context manager, which takes care of this automatically, but
# here we have to do it manually).
report.save(fname.report(subject=subject), overwrite=True)
# Render the report as HTML as well
report.save(fname.report_html(subject=subject), overwrite=True,
open_browser=False)
| [
"config.fname.csd",
"config.fname.epo",
"warnings.simplefilter",
"argparse.ArgumentParser",
"mne.set_log_level",
"config.fname.report_html",
"mne.time_frequency.csd_morlet",
"numpy.arange",
"config.fname.report"
] | [((289, 314), 'mne.set_log_level', 'mne.set_log_level', (['"""INFO"""'], {}), "('INFO')\n", (306, 314), False, 'import mne\n'), ((357, 401), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (380, 401), False, 'import argparse\n'), ((711, 742), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (732, 742), False, 'import warnings\n'), ((949, 977), 'numpy.arange', 'np.arange', (['fmin', '(fmax + 1)', '(2)'], {}), '(fmin, fmax + 1, 2)\n', (958, 977), True, 'import numpy as np\n'), ((2015, 2120), 'mne.time_frequency.csd_morlet', 'csd_morlet', (['epochs'], {'frequencies': 'frequencies', 'tmin': '(-0.2)', 'tmax': '(0)', 'decim': '(20)', 'n_jobs': 'n_jobs', 'verbose': '(True)'}), '(epochs, frequencies=frequencies, tmin=-0.2, tmax=0, decim=20,\n n_jobs=n_jobs, verbose=True)\n', (2025, 2120), False, 'from mne.time_frequency import csd_morlet\n'), ((641, 667), 'config.fname.epo', 'fname.epo', ([], {'subject': 'subject'}), '(subject=subject)\n', (650, 667), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n'), ((805, 834), 'config.fname.report', 'fname.report', ([], {'subject': 'subject'}), '(subject=subject)\n', (817, 834), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n'), ((1318, 1445), 'mne.time_frequency.csd_morlet', 'csd_morlet', (['epochs_baselined'], {'frequencies': 'frequencies', 'tmin': 'csd_tmin', 'tmax': 'csd_tmax', 'decim': '(20)', 'n_jobs': 'n_jobs', 'verbose': '(True)'}), '(epochs_baselined, frequencies=frequencies, tmin=csd_tmin, tmax=\n csd_tmax, decim=20, n_jobs=n_jobs, verbose=True)\n', (1328, 1445), False, 'from mne.time_frequency import csd_morlet\n'), ((2161, 2209), 'config.fname.csd', 'fname.csd', ([], {'condition': '"""baseline"""', 'subject': 'subject'}), "(condition='baseline', subject=subject)\n", (2170, 2209), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n'), ((2562, 2591), 'config.fname.report', 'fname.report', ([], {'subject': 'subject'}), '(subject=subject)\n', (2574, 2591), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n'), ((2658, 2692), 'config.fname.report_html', 'fname.report_html', ([], {'subject': 'subject'}), '(subject=subject)\n', (2675, 2692), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n'), ((1504, 1551), 'config.fname.csd', 'fname.csd', ([], {'condition': 'condition', 'subject': 'subject'}), '(condition=condition, subject=subject)\n', (1513, 1551), False, 'from config import fname, n_jobs, csd_tmin, csd_tmax, freq_bands, conditions\n')] |
import sys
import os
import numpy as np
import scipy.io
import scipy.sparse
import numba
import random
import multiprocessing as mp
import subprocess
import cytoolz as toolz
import collections
from itertools import chain
import regex as re
import yaml
import logging
import time
import gzip
import pandas as pd
from functools import partial
from typing import NamedTuple
from pysam import AlignmentFile
from .util import compute_edit_distance, read_gene_map_from_gtf
from .fastq_io import read_fastq
from .barcode import ErrorBarcodeHash, ErrorBarcodeHashConstraint
from .estimate_cell_barcode import get_cell_whitelist
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
def format_fastq(*fastq, config, method, fastq_out, cb_count,
num_thread=4, max_num_cell=1000000):
"""
Merging fastq reads by putting the cell barcodes and UMI sequences
to the headers of the cDNA reads
:param config: the config file
:param method: the library preparation protocol, e.g., can be one of
10X, Drop-seq, InDrop, Seq-Well, CEL-seq2, sci-RNA-seq, SPLiT-seq,
you can add protocol to the configure file easily
by specifying the read structures.
A template configuration file is provided in scumi/config.yaml
:param fastq: input fastq files
:param fastq_out: the output fastq file
:param cb_count: an output file containing the # reads for each cell barcode
:param num_thread: int
the number of cpu cores to use
:param max_num_cell: int
the maximum number of cells
"""
with open(config, 'r') as stream:
config_dict = yaml.safe_load(stream)
config_dict = config_dict[method]
num_read = config_dict['num_read']
num_fastq = len(fastq)
if num_fastq != num_read:
logger.error(f'Error: the number of input fastq files {num_fastq} is different '
f'from the number of fastq files {num_read} detected in the config file')
sys.exit(-1)
read_regex_str, barcode_filter, read_regex_str_qual = \
zip(*[_extract_input_read_template('read' + str(i), config_dict)
for i in range(1, num_read + 1)])
barcode_filter_dict = dict()
for d in barcode_filter:
barcode_filter_dict.update(d)
read_template = _infer_read_template(read_regex_str)
# select
read_regex_list = [re.compile(z) for z in read_regex_str_qual]
format_read = partial(_format_read, read_regex_list=read_regex_list,
read_template=read_template.read_template,
cb_tag=read_template.cb_tag,
ub_len=read_template.ub_len,
barcode_filter_dict=barcode_filter_dict)
chunk_size = 8000
fastq_reader = [read_fastq(fastq_i) for fastq_i in fastq]
chunks = toolz.partition_all(chunk_size, zip(*fastq_reader))
num_cpu = mp.cpu_count()
num_thread = num_thread if num_cpu > num_thread else num_cpu
seq_chunk_obj = toolz.partition_all(num_thread, chunks)
fastq_out_all = [fastq_out + str(x) + '.gz' for x in range(num_thread)]
[gzip.open(x, 'wb').close() for x in fastq_out_all]
cb_count_all = [cb_count + str(x) + '.csv' for x in range(num_thread)]
[open(x, 'wt').close() for x in cb_count_all]
fastq_info = collections.defaultdict(collections.Counter)
iteration = 0
results = []
time_start = time.time()
pool = mp.Pool(num_thread)
for fastq_chunk in seq_chunk_obj:
res = pool.starmap_async(format_read, zip(fastq_chunk, fastq_out_all, cb_count_all))
results.append(res)
if len(results) == num_thread * 10:
results[0].wait()
while results and results[0].ready():
iteration += 1
if not (iteration % 10):
logger.info(f'Processed {iteration * chunk_size * num_thread:,d} reads!')
res = results.pop(0)
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
pool.close()
pool.join()
for res in results:
chunk_info = res.get()
_update_fastq_info(fastq_info, chunk_info)
with open('.fastq_count.tsv', 'w') as f:
for k, v in fastq_info['read'].most_common():
f.write(f'{k}\t{v}\n')
cmd_cat_fastq = ' '.join(['cat'] + fastq_out_all + ['>'] + [fastq_out])
try:
subprocess.check_output(cmd_cat_fastq, shell=True)
[os.remove(fastq_file) for fastq_file in fastq_out_all]
except subprocess.CalledProcessError:
logger.info(f'Errors in concatenate fastq files')
sys.exit(-1)
except OSError:
logger.info(f'Errors in deleting fastq files')
sys.exit(-1)
time_used = time.time() - time_start
logger.info(f'Formatting fastq done, taking {time_used/3600.0:.3f} hours')
if not cb_count:
cb_count = fastq_out + '.cb_count'
df = _count_cell_barcode_umi(cb_count_all[0])
for cb_file in cb_count_all[1:]:
df1 = _count_cell_barcode_umi(cb_file)
df = pd.concat([df, df1], axis=0)
df = df.groupby(df.index).sum()
if df.shape[0] > max_num_cell * 2:
df = df.sort_values(by=df.columns[0], ascending=False)
df = df.iloc[:max_num_cell, :]
try:
[os.remove(cb_file) for cb_file in cb_count_all]
except OSError:
logger.info(f'Errors in deleting cell barcode files')
sys.exit(-1)
df = df.sort_values(by=df.columns[0], ascending=False)
if df.shape[0] > 0:
df.columns = [str(x) for x in range(df.shape[1])]
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count, sep='\t')
def _update_fastq_info(fastq_info, chunk_info):
for fastq_count in chunk_info:
fastq_info['read'].update(read_pass=fastq_count[0],
read_pass_barcode=fastq_count[1],
read_pass_polyt=fastq_count[2],
read_total=fastq_count[3])
def _count_cell_barcode_umi(cb_file, chunk_size=10 ** 7):
cb_reader = pd.read_csv(cb_file, header=None, iterator=True,
sep='\t', index_col=0)
chunks = cb_reader.get_chunk(chunk_size)
chunks = chunks.groupby(chunks.index).sum()
status = True
while status:
try:
chunk = cb_reader.get_chunk(chunk_size)
chunks = pd.concat([chunks, chunk], axis=0)
chunks = chunks.groupby(chunks.index).sum()
except StopIteration:
status = False
logger.info('Read cell barcode counts done.')
return chunks
def _extract_barcode_pos(barcode_dict, config):
barcode_reg = []
pos_all = []
barcode_filter = dict()
for barcode_and_pos in barcode_dict:
barcode, pos = barcode_and_pos
pos_all.append(pos)
barcode_reg.append('(?P<' + barcode + '>.{' +
str(pos[1] - pos[0] + 1) + '})')
try:
value = config[barcode + '_value']
barcode_filter.update({barcode: ErrorBarcodeHash(value, 1)})
except KeyError:
pass
return barcode_reg, pos_all, barcode_filter
def _extract_input_read_template(read, config):
read_name = '(@.*)\\n'
read_plus = '(\\+.*)\\n'
read_qual = '(.*)\\n'
filter_dict = dict()
seq = [(key, value) for key, value in config[read].items()
if key.startswith('cDNA')]
if seq:
read_name = '@(?P<name>.*)\\n'
read_seq = '(?P<seq>.*)\\n'
read_qual = '(?P<qual>.*)\\n'
read_template = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template
cell_barcode = [(key, value) for key, value in config[read].items()
if key.startswith('CB') and not key.endswith('value')]
umi = [(key, value) for key, value in config[read].items()
if key.startswith('UMI')]
poly_t = [(key, value) for key, value in config[read].items()
if key.startswith('polyT')]
cb_reg, cb_pos, cb_filter = _extract_barcode_pos(cell_barcode, config[read])
filter_dict.update(cb_filter)
umi_reg, umi_pos, _ = _extract_barcode_pos(umi, config[read])
umi_reg = [z.replace('UMI', 'UB') for z in umi_reg]
pt_reg, pt_pos, _ = _extract_barcode_pos(poly_t, config[read])
read_pos_start = [z[0] for z in cb_pos]
read_pos_start += [z[0] for z in umi_pos]
read_pos_start += [z[0] for z in pt_pos]
read_pos_end = [z[1] for z in cb_pos]
read_pos_end += [z[1] for z in umi_pos]
read_pos_end += [z[1] for z in pt_pos]
idx = sorted(range(len(read_pos_start)),
key=lambda k: read_pos_start[k])
barcode_tag = cb_reg + umi_reg + pt_reg
read_pos_start = [read_pos_start[i] for i in idx]
read_pos_end = [read_pos_end[i] for i in idx]
barcode_tag = [barcode_tag[i] for i in idx]
idx_skip = [read_pos_start[i+1] - read_pos_end[i] - 1
for i in range(0, len(read_pos_start)-1)]
barcode_skip = ['[ACGTN]{' + str(i) + '}' for i in idx_skip]
read_seq = barcode_tag[0]
for i in range(len(read_pos_start)-1):
if idx_skip[i] == 0:
read_seq += barcode_tag[i+1]
else:
read_seq += barcode_skip[i]
read_seq += barcode_tag[i+1]
filter_dict.update(_filter_ploy_t(read_seq))
if read_pos_start[0] > 1:
read_seq = '[ACGTN]{' + str(read_pos_start[0]-1) + '}'
read_seq += '[ACGTN]*'
read_seq = read_seq + '\\n'
read_template = read_name + read_seq + read_plus + read_qual
read_qual = re.sub('>', r'_qual>', read_seq)
read_qual = re.sub('\[ACGTN\]', '.', read_qual)
read_template_qual = read_name + read_seq + read_plus + read_qual
return read_template, filter_dict, read_template_qual
def _filter_ploy_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t_filter = {'polyT': ErrorBarcodeHash('T' * z, 1) for z in poly_t_count}
return poly_t_filter
def _replace_poly_t(read_seq):
match = re.findall('\?P<polyT>\.{[0-9]+}', read_seq)
poly_t_count = [int(re.findall(r'\d+', z)[0]) for z in match]
poly_t = ['(' + 'T'*z + ')' + '{s<=1}' for z in poly_t_count]
for z in range(len(match)):
read_seq = read_seq.replace(match[z], poly_t[z])
return read_seq
def _infer_read_template(reg_list):
class ReadInfo(NamedTuple):
cb: bool
cb_tag: list
cb_len: list
ub: bool
ub_tag: list
ub_len: list
read_template: str
cb = ub = False
cb_tag = ub_tag = []
cb_len = ub_len = []
read_template = '@'
reg = ''.join(k for k in reg_list)
if 'CB' in reg:
logger.info('Cell barcode in configure file')
cb = True
cb_seq_template = _accumulate_barcode('CB', reg)
cb_template = ':CB_' + cb_seq_template[1]
read_template += cb_template
cb_tag = cb_seq_template[0]
cb_len = cb_seq_template[2]
if 'UB' in reg:
logger.info('UMI in config file')
ub = True
ub_seq_template = _accumulate_barcode('UB', reg)
ub_template = ':UB_' + ub_seq_template[1]
read_template += ub_template
ub_tag = ub_seq_template[0]
ub_len = ub_seq_template[2]
read_template += ':{name}'
read_template += '\n{seq}\n+\n{qual}\n'
return ReadInfo(cb=cb, cb_tag=cb_tag, cb_len=cb_len,
ub=ub, ub_tag=ub_tag, ub_len=ub_len,
read_template=read_template)
def _accumulate_barcode(barcode, seq):
barcode_num = [sub_str[0] for sub_str in
seq.split('?P<' + re.escape(barcode))][1:]
status = '>' in barcode_num
barcode_num = ['0' if x == '>' else x for x in barcode_num]
barcode_num = sorted(barcode_num, key=int)
if status:
barcode_num[0] = ''
barcode_seq = [barcode + num for num in barcode_num]
barcode_template = ['{' + tag + '}' for tag in barcode_seq]
barcode_template = '-'.join(barcode_template)
str_split = 'P<' + barcode + '[0-9]*>.{'
barcode_len = [sub_str for sub_str in re.split(str_split, seq)][1:]
barcode_len = [int(re.findall(r'(\d+)', barcode_i)[0])
for barcode_i in barcode_len]
return barcode_seq, barcode_template, barcode_len
def _format_read(chunk, fastq_file, cb_count_file, read_regex_list,
read_template, cb_tag, ub_len, barcode_filter_dict):
reads = []
num_read = len(chunk)
num_read_pass = num_read_barcode = num_read_polyt = 0
num_regex = len(read_regex_list)
barcode_counter = collections.defaultdict(
partial(np.zeros, shape=(ub_len[0] + 1), dtype=np.uint32))
ignore_read = False
for read_i in chunk:
read_dict_list = []
for i, regex_i in enumerate(read_regex_list):
read_match = regex_i.match(read_i[i])
if not read_match:
ignore_read = True
break
read_dict_list.append(read_match.groupdict())
if ignore_read:
ignore_read = False
continue
read1_dict = read_dict_list[0]
if num_regex > 1:
for regex_id in range(1, num_regex):
read1_dict.update(read_dict_list[regex_id])
cb = [barcode_filter_dict[tag][read1_dict[tag]]
if tag in barcode_filter_dict.keys() else read1_dict[tag]
for tag in cb_tag]
if all(cb):
cb = '-'.join(cb)
num_read_barcode += 1
else:
ignore_read = True
ub = read1_dict['UB']
try:
poly_t = read1_dict['polyT']
if not barcode_filter_dict['polyT'][poly_t]:
ignore_read = True
else:
num_read_polyt += 1
except KeyError:
pass
if ignore_read:
ignore_read = False
continue
num_read_pass += 1
if len(read1_dict['seq']) >= 1:
read1_dict = read_template.format_map(read1_dict)
reads.append(read1_dict)
barcode_counter[cb] += [x == 'T' for x in 'T' + ub]
with gzip.open(fastq_file, 'ab') as fastq_hd:
for read in reads:
fastq_hd.write(bytes(read, 'utf8'))
df = pd.DataFrame.from_dict(barcode_counter, orient='index')
if df.shape[0] > 0:
df = df.sort_values(by=df.columns[0], ascending=False)
df.index.name = 'cb'
column_name = list(df.columns.values)
column_name[0] = 'cb_count'
df.columns = column_name
df.to_csv(cb_count_file, sep='\t', mode='a', header=False)
return num_read_pass, num_read_barcode, num_read_polyt, num_read
def _construct_barcode_regex(bam):
read_mode = 'r' if bam.endswith('.sam') else 'rb'
bam_file = AlignmentFile(bam, mode=read_mode)
first_alignment = next(bam_file)
bam_file.close()
barcodes = set()
for barcode in ['CB_', 'UB_']:
if barcode in first_alignment.qname:
barcodes.add(barcode)
barcode_parser = '.*'
if 'CB_' in barcodes:
barcode_parser += ':CB_(?P<CB>[A-Z\-]+)'
if 'UB_' in barcodes:
barcode_parser += ':UB_(?P<UB>[A-Z\-]+)'
if barcode_parser == '.*':
logger.error('Error: no cell barcodes and UMIs.')
sys.exit(-1)
barcode_parser += ':*'
barcode_parser = re.compile(barcode_parser)
match = barcode_parser.match(first_alignment.qname)
cb = _extract_tag(match, 'CB')
return barcode_parser, cb, read_mode
def _extract_tag(match, tag):
try:
tag = match.group(tag)
except IndexError:
tag = None
return tag
def count_feature(*cb, bam, molecular_info_h5, gtf, cb_count, feature_tag='XT:Z',
expect_cell=False, force_cell=False, all_cell=False,
depth_threshold=1, cell_barcode_whitelist=None):
"""
Count the number of reads/UMIs mapped to each gene
:param bam: the input sam/bam file
:param molecular_info_h5: output the molecular info
:param cb: the input cell barcode files, can be empty or None
:param cell_barcode_whitelist: a file contain the selected cell barcodes
:param gtf: a GTF file
:param cb_count: a file containing the number of reads mapped to each cell barcode,
output from format_fastq
:param feature_tag: the tag representing genes in the input bam file
:param depth_threshold: only considering UMIs that have at least
depth_threshold reads support
:param expect_cell: the expected number of cells in the bam file
:param force_cell: force to return the number of cells set by expect_cell
:param all_cell: keep all cell barcodes - can be very slow
"""
barcode_parser, first_cb, read_mode = _construct_barcode_regex(bam)
num_cb = len(first_cb.split('-'))
num_cb_file = len(cb)
if 0 == num_cb_file:
cb = [None] * num_cb
elif num_cb != num_cb_file:
logger.error(f'Error: the number of input cell barcodes files {num_cb_file} '
f'is different from the number of cell barcodes {num_cb} '
f'detected in the bam file')
if num_cb > num_cb_file:
cb = cb + [None] * (num_cb - num_cb_file)
else:
cb = cb[:num_cb]
# TODO: no cell barcodes detected
correct_cb_fun, cb_list, cb_remove = _construct_cb_filter(
cb_count, cb, expect_cell, force_cell, all_cell, cell_barcode_whitelist)
gene_map_dict = read_gene_map_from_gtf(gtf)
logger.info('Counting molecular info')
time_start_count = time.time()
sam_file = AlignmentFile(bam, mode=read_mode)
_count_feature_partial = partial(_count_feature,
gene_map_dict=gene_map_dict,
barcode_parser=barcode_parser,
correct_cb_fun=correct_cb_fun,
sam_file=sam_file,
feature_tag=feature_tag)
track = sam_file.fetch(until_eof=True)
map_info, read_in_cell, molecular_info = _count_feature_partial(track)
time_count = time.time() - time_start_count
logger.info(f'Counting molecular info done - {time_count/3600.0:.3f} hours, '
f'{int(3600.0 * map_info["num_alignment"]/time_count):,d} '
f'alignments/hour\n')
# TODO: still output results
if len(molecular_info) == 0:
logger.error('Error: no reads mapped to features.')
sys.exit(-1)
name = ['cell',
'gene',
'umi',
'depth',
]
logger.info('Converting to a dataframe')
convert_time = time.time()
molecular_info = pd.Series(molecular_info).reset_index()
molecular_info.columns = name
for col in name[:3]:
molecular_info.loc[:, col] = molecular_info[col].astype('category')
convert_time = time.time() - convert_time
logger.info(f'Converting to a dataframe done, '
f'taking {convert_time/60.0:.3f} minutes\n')
molecular_info.columns = name
if num_cb > 1 and cb_list:
molecular_info = molecular_info.loc[molecular_info['cell'].isin(cb_list), :]
if cb_remove:
molecular_info = molecular_info.loc[~molecular_info['cell'].isin(cb_remove), :]
molecular_info = molecular_info.loc[molecular_info['depth'] >= 0.95, :]
molecular_info['depth'] = \
np.floor(molecular_info['depth'].values + 0.5).astype('uint32')
molecular_info = molecular_info.sort_values(name[:3])
molecular_info = molecular_info.reset_index(drop=True)
map_info = pd.Series(map_info)
read_in_cell = pd.DataFrame.from_dict(read_in_cell, orient='index')
logger.info('Writing molecular info')
write_time = time.time()
feature = gene_map_dict.values()
feature = pd.Series(index=set(feature))
feature = feature.sort_index()
with pd.HDFStore(molecular_info_h5, mode='w') as hf:
hf.put('molecular_info', molecular_info, format='table', data_columns=True)
hf.put('map_info', map_info)
hf.put('feature', feature)
hf.put('read_in_cell', read_in_cell)
del molecular_info
write_time = time.time() - write_time
logger.info(f'Writings molecular info done, '
f'taking {write_time/60.0:.3f} minutes\n')
_convert_count_to_matrix(molecular_info_h5, molecular_info_h5,
depth_threshold=depth_threshold)
def _count_feature(track, gene_map_dict, barcode_parser,
correct_cb_fun, sam_file, feature_tag='XT:Z'):
search_undetermined = re.compile('N').search
read_name = None
feature_tag_value_pre = None
filt_multiple_gene_barcode = False
count_read = False
cb_i = feature_tag_value = ub_i = None
num_aln_read = 0
pass_filter = False
map_info = collections.defaultdict(int)
read_in_cell = collections.Counter()
molecular_info = collections.defaultdict(int)
for aln in track:
if map_info['num_alignment'] and not map_info['num_alignment'] % 10000000:
logger.info(f'Parsed {map_info["num_alignment"]:,d} alignments.')
logger.info(f'{map_info["num_unique_read"]:,d} unique reads, '
f'{map_info["num_count_read"]:,d} reads kept.')
logger.info(f'{map_info["num_unmapped_read"]:,d} unmapped reads were filtered.')
logger.info(f'{map_info["num_barcode_with_na"]:,d} reads '
f'were filtered for including NA in barcodes.\n')
num_aln_read_pre = num_aln_read
filter_read_unmapped = False
filter_read_na = False
filter_read_barcode = False
map_info['num_alignment'] += 1
num_aln_read = aln.get_tag('NH')
new_read = aln.qname != read_name
if new_read:
read_name = aln.qname
if count_read:
map_info['num_count_read'] += 1
record_tuple = (cb_i, feature_tag_value, ub_i)
molecular_info[record_tuple] += 1
elif pass_filter and (num_aln_read_pre > 1):
map_info['num_barcode_with_na'] += 1
pass_filter = False
count_read = False
filt_multiple_gene_barcode = True
feature_tag_value_pre = None
map_info['num_unique_read'] += 1
if num_aln_read == 0:
map_info['num_unmapped_read'] += 1
filter_read_unmapped = True
# check cb
match = barcode_parser.match(aln.qname)
cb_i = _extract_tag(match, 'CB')
cb_i_list = cb_i.split('-')
num_na_in_cb = _count_not_specified(cb_i_list)
if any(num_na_in_cb > 1) or sum(num_na_in_cb) > len(num_na_in_cb):
filter_read_na = True
cb_i = correct_cb_fun(cb_i_list)
if cb_i:
read_in_cell[cb_i] += 1
elif not aln.is_unmapped:
map_info['num_barcode_with_na'] += 1
filter_read_barcode = True
if filter_read_unmapped or filter_read_na or filter_read_barcode:
count_read = False
continue
ub_i = _extract_tag(match, 'UB')
if ub_i and search_undetermined(ub_i):
map_info['num_barcode_with_na'] += 1
continue
if ub_i and ub_i == len(ub_i) * ub_i[0]:
map_info['num_barcode_with_na'] += 1
continue
try:
feature_tag_value = aln.get_tag(feature_tag)
except KeyError:
feature_tag_value = sam_file.getrname(aln.reference_id)
if aln.get_tag('XS:Z') == 'Unassigned_Ambiguity':
map_info['num_barcode_with_na'] += 1
continue
pass_filter = True
filt_multiple_gene_barcode = False
try:
feature_tag_value = gene_map_dict[feature_tag_value]
except KeyError:
if num_aln_read == 1:
map_info['num_barcode_with_na'] += 1
continue
feature_tag_value_pre = feature_tag_value
count_read = True
else:
if filt_multiple_gene_barcode:
continue
try:
feature_tag_value = aln.get_tag(feature_tag)
except KeyError:
feature_tag_value = sam_file.getrname(aln.reference_id)
if aln.get_tag('XS:Z') == 'Unassigned_Ambiguity':
filt_multiple_gene_barcode = True
count_read = False
continue
try:
feature_tag_value = gene_map_dict[feature_tag_value]
except KeyError:
feature_tag_value = feature_tag_value_pre
continue
if feature_tag_value_pre and feature_tag_value_pre != feature_tag_value:
filt_multiple_gene_barcode = True
count_read = False
continue
feature_tag_value_pre = feature_tag_value
count_read = True # with valid feature_tag_value
if count_read:
map_info['num_count_read'] += 1
record_tuple = (cb_i, feature_tag_value, ub_i)
molecular_info[record_tuple] += 1
return map_info, read_in_cell, molecular_info
def _construct_cb_filter(cb_count, cb, expect_cell, force_cell,
all_cell, cell_barcode_whitelist):
cb_list = []
cb_remove = []
if all_cell:
correct_cb_fun = _filter_tag_fun(cb, max_distance=1, correct=True)
else:
if cell_barcode_whitelist:
with open(cell_barcode_whitelist, 'r') as file_handle:
cb_list = [line.strip('\n') for line in file_handle]
else:
cb_list, cb_remove = _get_candidate_barcode(cb_count, cb,
expect_cell=expect_cell,
force_cell=force_cell)
num_cell = len(cb_list)
logger.info(f'Detected {num_cell:,d} candidate cell barcodes.')
if num_cell <= 0:
sys.exit(-1)
cb_list_split = [cb.split('-') for cb in cb_list]
cb_df = pd.DataFrame(cb_list_split)
cb_list_split = [''] * len(cb_df.columns)
for cb in cb_df:
cb_list_split[cb] = cb_df[cb].unique()
if len(cb_df.columns) > 1:
barcode_hash = _create_barcode_hash(cb_list)
cb_hash = [ErrorBarcodeHashConstraint(cb, barcode_hash[idx])
for idx, cb in enumerate(cb_list_split)]
correct_cb_fun = partial(_filter_tag_multi, tag_hash=cb_hash, correct=True)
else:
cb_hash = [ErrorBarcodeHash(cb) for cb in cb_list_split]
correct_cb_fun = partial(_filter_tag, tag_hash=cb_hash, correct=True)
return correct_cb_fun, cb_list, cb_remove
def _count_not_specified(barcode):
if not barcode:
return np.array([0])
na_count = [barcode_i.count('N') for barcode_i in barcode]
return np.array(na_count)
def _get_candidate_barcode(cb_count_file, cb_file,
plot_prefix='.',
expect_cell=False,
force_cell=False):
cb = pd.read_csv(cb_count_file, sep='\t')
cb_name = cb['cb'].str.split('-', expand=True)
cb_len = [len(z) for z in cb_name.iloc[0, :]]
num_cb = len(cb_len)
idx = False
for cb_idx in range(num_cb):
filt_cb = [cb_char * cb_len[cb_idx] for cb_char in ['N', 'G']]
idx = cb_name.iloc[:, 0].isin(filt_cb) | idx
cb = cb.loc[~idx, :]
cb = cb.reset_index(drop=True)
cb_count = dict(zip(cb.cb, cb.cb_count))
candidate_cb_whitelist = get_cell_whitelist(cb_count,
plot_prefix=plot_prefix,
expect_cell=expect_cell,
force_cell=force_cell)
candidate_cb_whitelist_refine = \
_refine_whitelist(list(candidate_cb_whitelist), cb_file)
merge_barcode = [x != 'None' and x for x in cb_file]
if any(merge_barcode):
merge_barcode = False
cb_list, cb_remove = _merge_del_barcode(candidate_cb_whitelist_refine,
barcode_count=cb, min_distance=1,
merge_barcode=merge_barcode)
with open('._detected_cb.tsv', 'wt') as file_handle:
for cb_whitelist in np.setdiff1d(cb_list, cb_remove):
file_handle.write(f'{cb_whitelist}\n')
return cb_list, cb_remove
def _refine_whitelist(cb_whitelist, cb_file=None, max_na_per_cb=1):
cb_hash = []
if cb_file is not None:
cb_file = list(cb_file)
cb_file = [None if cb_i == 'None' else cb_i for cb_i in cb_file]
cb_hash = _construct_hash(cb_whitelist, cb_file)
num_cb = len(cb_whitelist[0].split('-'))
cb_whitelist_corrected = collections.defaultdict(set)
for cell_barcode in list(cb_whitelist):
cell_barcode_list = cell_barcode.split('-')
na_count = _count_not_specified(cell_barcode_list)
if any(na_count > max_na_per_cb) or sum(na_count) > num_cb:
continue
cb = cell_barcode
if any(cb_hash):
cb = _correct_cell_barcode(cell_barcode_list, cb_hash)
if any(cb):
cb = '-'.join(cb)
else:
continue
cb_whitelist_corrected[cell_barcode].add(cb)
return cb_whitelist_corrected
def _construct_hash(cb_whitelist, tag_file):
num_tag = len(tag_file)
tag_hash = [''] * num_tag
# Add comments
for i in range(num_tag):
if tag_file[i]:
with open(tag_file[i], 'r') as file_handle:
tag_i = [line.rstrip('\n') for line in file_handle]
if len(tag_i) > 5000:
cell_barcode_list = []
for cell_barcode in list(cb_whitelist):
cell_barcode_list.append(cell_barcode.split('-')[i])
white_list_map = \
_generate_barcode_whitelist_map(cell_barcode_list, tag_i, 1)
tag_i = [list(v)[0] for k, v in white_list_map.items()]
tag_i = list(set(tag_i))
tag_hash[i] = ErrorBarcodeHash(tag_i, edit_distance=1)
return tag_hash
def _correct_cell_barcode(cell_barcode, cb_hash):
num_cb = len(cb_hash)
cb_corrected = cell_barcode
for i in range(num_cb):
if cb_hash[i]:
candidate_cb = cb_hash[i][cell_barcode[i]]
if candidate_cb:
cb_corrected[i] = candidate_cb
else:
return [None]
return cb_corrected
def _generate_barcode_whitelist_map(barcode, whitelist, min_distance=1):
barcode_to_whitelist = collections.defaultdict(set)
whitelist = set([str(x).encode('utf-8') for x in whitelist])
num_cpu = mp.cpu_count()
pool = mp.Pool(num_cpu)
_partial_map_single_barcode_to_whitelist = \
partial(_map_single_barcode_to_whitelist, whitelist=whitelist,
min_distance=min_distance)
corrected_barcode = pool.map(_partial_map_single_barcode_to_whitelist, barcode)
for idx, barcode_i in enumerate(corrected_barcode):
if barcode_i is not None:
barcode_to_whitelist[barcode[idx]].add(barcode_i)
return barcode_to_whitelist
def _map_single_barcode_to_whitelist(barcode, whitelist, min_distance=1):
match = None
barcode_in_bytes = str(barcode).encode('utf-8')
for white_barcode in whitelist:
if barcode_in_bytes in whitelist:
match = barcode
break
if compute_edit_distance(barcode_in_bytes, white_barcode) <= min_distance:
if match is not None:
logging.info(f'Warning: barcode {str(barcode)} can be '
f'mapped to more than one candidate barcodes')
match = None
break
else:
match = white_barcode.decode('utf-8')
return match
def _merge_del_barcode(barcode_dict, barcode_count, min_distance=1, merge_barcode=False):
barcode_list = list(barcode_dict.keys())
idx = barcode_count.cb.isin(barcode_list)
barcode_count_filt = barcode_count.loc[idx, :]
barcode_corr = [barcode_dict[x] for x in barcode_count_filt.cb]
idx = [len(x) > 0 for x in barcode_corr]
barcode_count_filt = barcode_count_filt.iloc[idx, :]
barcode_corr = list(chain(*barcode_corr))
barcode_count_filt.cb = barcode_corr
barcode_count_filt = barcode_count_filt.groupby('cb').sum()
umi_len = barcode_count_filt.shape[1]
barcode_count_filt_ratio = barcode_count_filt.iloc[:, 1:umi_len].div(
barcode_count_filt.cb_count, axis=0)
idx = barcode_count_filt_ratio.gt(0.80, axis=0)
idx = idx | barcode_count_filt_ratio.lt(0.005, axis=0)
count_indel = idx.sum(axis=1)
if sum(count_indel == 0) <= 100000 and merge_barcode:
barcode_whitelist = \
_merge_corrected_barcode(barcode_count_filt.loc[count_indel == 0, :])
else:
barcode_whitelist = barcode_count_filt_ratio.index[count_indel == 0].tolist()
barcode_correctable = barcode_count_filt_ratio.index[count_indel == 1].tolist()
whitelist_remove = []
if len(barcode_correctable) > 0:
barcode_corrected = _correct_del_barcode(
barcode_count_filt.loc[barcode_correctable, :], min_distance)
barcode_corrected_list = list(barcode_corrected.keys())
barcode_corrected_list_mut = [x[:-1]+'N' for x in barcode_corrected_list]
whitelist_dist = [_find_neighbour_barcode(x, barcode_corrected_list_mut, 1)
for x in barcode_whitelist]
whitelist_remove = [barcode_whitelist[k] for k, v in enumerate(whitelist_dist)
if len(v[0]) > 0]
barcode_whitelist.extend(barcode_corrected_list)
return barcode_whitelist, whitelist_remove
# N**2 complexity
def _merge_corrected_barcode(barcode_count):
barcode_count = barcode_count.sort_values('cb_count', ascending=False)
barcode = barcode_count.index.astype(str)
barcode_coverage = dict(zip(barcode, barcode_count.cb_count))
barcode_list = collections.deque()
barcode_list.append(barcode[0])
num_barcode = len(barcode)
if num_barcode <= 1:
return barcode_list
for barcode_i in barcode[1:]:
idx = _find_neighbour_barcode(barcode_i, barcode_list)
num_neighbour = len(idx[0])
if num_neighbour == 0:
barcode_list.append(barcode_i)
continue
elif num_neighbour == 1:
candidate_barcode_idx = idx[0][0]
candidate_barcode_coverage = \
barcode_coverage[barcode_list[candidate_barcode_idx]]
if barcode_coverage[barcode_i] > candidate_barcode_coverage / 10.0:
barcode_list.append(barcode_i)
continue
return barcode_list
def _find_neighbour_barcode(barcode, barcode_list, min_distance=1):
edit_dist = np.array([compute_edit_distance(barcode.encode('utf-8'), x.encode('utf-8'))
for x in barcode_list])
idx_filt = np.where(edit_dist <= min_distance)
return idx_filt
def _correct_del_barcode(barcode_count, min_distance=1):
barcode_count = barcode_count.sort_values('cb_count', ascending=False)
barcode_all = np.asarray(barcode_count.index.tolist())
barcode_whitelist = collections.defaultdict(set)
while len(barcode_all):
barcode_i = barcode_all[0]
barcode_whitelist[barcode_i].add(barcode_i)
barcode_all = barcode_all[1:]
idx = _find_neighbour_barcode(barcode_i, barcode_all, min_distance)
if len(idx[0]):
barcode_ = barcode_all[idx]
barcode_whitelist[barcode_i].update(list(barcode_))
barcode_all = np.delete(barcode_all, idx)
return barcode_whitelist
def _create_barcode_hash(barcode):
barcode_split = [barcode_i.split('-') for barcode_i in barcode]
barcode_df = pd.DataFrame(barcode_split, barcode)
num_barcode = len(barcode_split[0])
barcode_split_uniq = [''] * len(barcode_df.columns)
for barcode_i in barcode_df:
barcode_split_uniq[barcode_i] = barcode_df[barcode_i].unique()
barcode_hash = [collections.defaultdict(list) for _ in range(num_barcode)]
for i in range(num_barcode):
barcode_i = barcode_split_uniq[i]
for barcode_ii in barcode_i:
idx = barcode_df[i] == barcode_ii
barcode_hash[i][barcode_ii] = set(barcode_df.index[idx].tolist())
return barcode_hash
def convert_count_to_matrix(molecular_info, out_prefix, depth_threshold):
_convert_count_to_matrix(molecular_info, out_prefix, depth_threshold)
def _convert_count_to_matrix(molecular_info, out_prefix, depth_threshold):
feature = pd.read_hdf(molecular_info, key='feature')
molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
logger.info('Collapsing UMIs')
write_time = time.time()
molecular_info = _collapse_umi(molecular_info)
write_time = time.time() - write_time
logger.info(f'Collapsing UMIs done, taking {write_time/60.0:.3f} minutes')
df = _generate_fake_count(molecular_info.iloc[0, :],
feature, depth=depth_threshold+1)
molecular_info = pd.concat([df, molecular_info], ignore_index=True)
num_gene = len(feature)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='umi', out_prefix=out_prefix,
depth_threshold=depth_threshold)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='transcript', out_prefix=out_prefix,
depth_threshold=depth_threshold)
def _transform_write_sparse_matrix(molecular_info, num_gene,
sum_type, out_prefix, depth_threshold):
logger.info('Converting to sparse matrix')
convert_time = time.time()
query_filer = f'depth >= {depth_threshold}'
if 'umi' == sum_type:
base_name = out_prefix + '_read'
count_collapsed = molecular_info.groupby(
['cell', 'gene'])
count_collapsed = count_collapsed['depth'].sum()
count_collapsed[:num_gene] -= (depth_threshold + 1)
count_collapsed += 0.5
count_collapsed = count_collapsed.astype(int)
else:
base_name = out_prefix + '_depth_' + str(depth_threshold) + '_transcript'
count_collapsed = molecular_info.query(query_filer).groupby(
['cell', 'gene'])
count_collapsed = count_collapsed['umi'].size()
count_collapsed[:num_gene] -= 1
del molecular_info
count, count_row_name, count_column_name = _convert_to_coo(count_collapsed)
del count_collapsed
convert_time = time.time() - convert_time
logger.info(f'Converting to a sparse matrix done, '
f'taking {convert_time/60.0:.3f} minutes')
logger.info('Output results')
write_time = time.time()
pd.Series(count_row_name).to_csv(base_name + '_gene.tsv',
index=False, header=False)
pd.Series(count_column_name).to_csv(base_name + '_barcode.tsv',
index=False, header=False)
if 'umi' == sum_type:
with open(base_name + '.mtx', 'w+b') as out_handle:
scipy.io.mmwrite(out_handle, count)
else:
with open(base_name + '.mtx', 'w+b') as out_handle:
scipy.io.mmwrite(out_handle, count)
write_time = time.time() - write_time
logger.info(f'Writing final results done, '
f'taking {write_time/60.0:.3f} minutes')
def _map_barcode_to_whitelist(barcode, whitelist, min_distance=1):
whitelist = set([str(x).encode('utf-8') for x in whitelist])
iter_i = 0
for barcode_i in barcode:
match = barcode_i
barcode_in_bytes = str(barcode_i).encode('utf-8')
for white_barcode in whitelist:
if barcode_in_bytes in whitelist:
break
if compute_edit_distance(barcode_in_bytes, white_barcode) <= min_distance:
match = white_barcode.decode('utf-8')
break
barcode[iter_i] = match
iter_i += 1
return barcode
def _collapse_barcode_edit(barcode, value, min_distance=1):
id_srt = value.argsort()[::-1]
barcode = barcode[id_srt]
value = value[id_srt]
max_barcode = value[0]
threshold = max_barcode * 0.1
if threshold < 2:
threshold = 2
elif threshold > 5:
threshold = 5
id_whitelist = value > threshold
whitelist_candidate = barcode[id_whitelist]
noise_candidate = barcode[~id_whitelist]
if len(noise_candidate) > 0 and len(whitelist_candidate) > 0:
corrected_noise = _map_barcode_to_whitelist(noise_candidate,
whitelist_candidate,
min_distance)
barcode[~id_whitelist] = corrected_noise
return barcode, value
def _collapse_umi(x, min_distance=1):
id_start = x.duplicated(['cell', 'gene'])
id_start = id_start[id_start == False].index.tolist()
id_end = id_start[1:]
id_end.append(x.shape[0])
value = x['depth'].values
umi = x['umi'].values.astype('str')
for gene in np.arange(len(id_end)):
id_gene = np.arange(id_start[gene], id_end[gene])
if len(id_gene) <= 1:
continue
umi_gene = umi[id_gene]
value_gene = value[id_gene]
umi_gene, _ = _collapse_barcode_edit(umi_gene, value_gene, min_distance)
umi[id_gene] = umi_gene
x['umi'] = pd.Categorical(umi)
x = x.groupby(['cell', 'gene', 'umi'], observed=True)['depth'].sum()
x = x.reset_index(drop=False)
return x
def _convert_to_coo(data_series):
data_sp = data_series.astype('Sparse')
data_sp, row_name, column_name = data_sp.sparse.to_coo(
column_levels=['cell'],
row_levels=['gene']
)
data_sp.eliminate_zeros()
data_sp = data_sp.astype(int)
coo_tuple = collections.namedtuple('coo_tuple', ['x', 'row_name', 'column_name'])
return coo_tuple(data_sp, row_name, column_name)
def _generate_fake_count(row_of_df, feature, depth=1.5):
index_name = row_of_df.index
df = pd.DataFrame(columns=index_name)
df['gene'] = feature.index.values
df['umi'] = 'N' * len(row_of_df['umi'])
df['depth'] = depth
for index_name_other in list(set(index_name) - {'gene', 'umi', 'depth'}):
df[index_name_other] = row_of_df[index_name_other]
return df
def down_sample(molecular_info, total_read=None, total_cell=None, mean_read=None,
out_prefix='.', depth_threshold=1, seed=0):
"""
Down-sampling the molecular_info such that each library has the same number of reads
:param molecular_info: molecular_info: the input molecular info data frame
:param total_read: the total number of reads for these libraries
:param total_cell: the total number of cells
:param mean_read: the expected number of reads per cell after down-sampling
:param out_prefix: the prefix of the output matrices
:param depth_threshold: the coverage threshold to consider
:param seed used for random sampling
"""
feature = pd.read_hdf(molecular_info, key='feature')
output_molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
for col in output_molecular_info.columns[:3]:
output_molecular_info.loc[:, col] = output_molecular_info[col].astype('category')
output_molecular_info = output_molecular_info.loc[
output_molecular_info['depth'] >= 1, :]
output_molecular_info.reset_index(drop=True, inplace=True)
map_info = pd.read_hdf(molecular_info, key='map_info')
if total_read is None:
total_read = map_info['num_unique_read']
else:
total_read = max(total_read, map_info['num_unique_read'])
read_in_cell = pd.read_hdf(molecular_info, key='read_in_cell')
if total_cell is None:
total_cell = read_in_cell.shape[0]
else:
total_cell = min(total_cell, read_in_cell.shape[0])
if mean_read is None:
mean_read = (10000, )
elif not isinstance(mean_read, tuple):
mean_read = (mean_read, )
cell_vec = output_molecular_info['depth'].copy()
for mean_read_i in mean_read:
random.seed(seed)
seed += 1
_down_sample(output_molecular_info, feature, total_read, total_cell,
mean_read_i, out_prefix, depth_threshold=depth_threshold)
output_molecular_info['depth'] = cell_vec
def _down_sample(molecular_info, feature, total_read, total_cell, mean_read,
out_prefix, depth_threshold=1):
expect_read = mean_read * total_cell
if expect_read > total_read:
return ()
cell_vec = molecular_info['depth']
value = cell_vec.tolist()
id_end = np.cumsum(value)
id_start = np.append(0, id_end)
id_start = id_start[:-1]
read_num_subsample = (id_end[-1] / total_read * 1.0) * expect_read
read_num_subsample = int(read_num_subsample + 0.5)
id_keep = sorted(random.sample(range(id_end[-1]), read_num_subsample))
expanded_count = np.zeros(id_end[-1], dtype=np.int32)
expanded_count[id_keep] = 1
value = _add_umis(expanded_count, id_start, id_end)
molecular_info['depth'] = value
output_molecular_info = molecular_info.loc[molecular_info['depth'] >= 1, :].copy()
output_molecular_info.reset_index(drop=True, inplace=True)
logger.info('Collapsing UMIs')
write_time = time.time()
output_molecular_info = _collapse_umi(output_molecular_info)
write_time = time.time() - write_time
logger.info(f'Collapsing UMIs done, taking {write_time / 60.0:.3f} minutes')
out_prefix = out_prefix + '_sample_' + str(mean_read)
_calculate_cell_gene_matrix(output_molecular_info, feature,
out_prefix=out_prefix,
depth_threshold=depth_threshold)
def down_sample_cell(molecular_info, expect_read=None, out_prefix='', depth_threshold=1):
"""
Down-sampling the molecular_info such that each cell has the same number of reads
:param molecular_info: the input molecular info data frame
:param expect_read: each cell to have expect_read
:param out_prefix: the prefix of the output matrices
:param depth_threshold: the coverage threshold to consider
"""
feature = pd.read_hdf(molecular_info, key='feature')
output_molecular_info = pd.read_hdf(molecular_info, key='molecular_info')
for col in output_molecular_info.columns[:3]:
output_molecular_info.loc[:, col] = output_molecular_info[col].astype('category')
output_molecular_info = output_molecular_info.loc[
output_molecular_info['depth'] >= 1, :]
name = output_molecular_info.columns.tolist()
name = name[:-1]
output_molecular_info = output_molecular_info.sort_values(name) # already sorted?
read_in_cell = pd.read_hdf(molecular_info, key='read_in_cell')
if expect_read is None:
expect_read = (10000, )
elif not isinstance(expect_read, tuple):
expect_read = (expect_read, )
for num_read in expect_read:
_down_sample_cell(output_molecular_info, feature, read_in_cell, num_read,
out_prefix, depth_threshold=depth_threshold)
def _down_sample_cell(molecular_info, feature, read_in_cell, expect_read,
out_prefix, depth_threshold=1):
read_in_cell = read_in_cell[read_in_cell[0] >= expect_read]
num_cell = read_in_cell.shape[0]
if num_cell == 0:
logger.error(f'All cells have less than {expect_read:,d} reads, aborting.')
sys.exit(-1)
cell = set(read_in_cell.index.tolist())
output_molecular_info = molecular_info.loc[
molecular_info['cell'].isin(cell)].copy()
# Update the values of output_molecular_info
output_molecular_info.reset_index(drop=True, inplace=True)
cell_start = output_molecular_info.drop_duplicates('cell')
cell = cell_start['cell'].tolist()
cell_start = cell_start.index.tolist()
cell_end = cell_start[1:]
cell_end.append(output_molecular_info.shape[0])
read_in_cell = read_in_cell.to_dict('dict')[0]
umi_count = output_molecular_info['depth'].as_matrix()
umi_count = umi_count.reshape([umi_count.shape[0], 1])
random.seed(0)
for idx_i, cell_i in enumerate(cell):
logger.info(f'Subsample cell {idx_i:,d}')
id_case, id_end = _find_pos(cell_start[idx_i], cell_end[idx_i], umi_count)
id_start = np.append(0, id_end)
id_start = id_start[:-1]
num_umi = int(expect_read * id_end[-1] / read_in_cell[cell_i] + 0.5)
if id_end[-1] < num_umi:
continue
id_keep = sorted(random.sample(range(id_end[-1]), num_umi))
expanded_count = np.zeros(id_end[-1], dtype=np.int32)
expanded_count[id_keep] = 1
value = _add_umis(expanded_count, id_start, id_end)
umi_count[id_case] = value
logger.info('Collapsing UMIs')
write_time = time.time()
output_molecular_info = _collapse_umi(output_molecular_info)
write_time = time.time() - write_time
logger.info(f'Collapsing UMIs done, taking {write_time/60.0:.3f} minutes')
out_prefix = out_prefix + '.sample_' + str(expect_read)
_calculate_cell_gene_matrix(output_molecular_info, feature, out_prefix=out_prefix,
depth_threshold=depth_threshold)
@numba.jit(nopython=True)
def _add_umis(x, id_start, id_end):
y = np.zeros((len(id_start), 1), dtype=np.uint32)
for i in range(len(id_start)):
y[i] = np.sum(x[id_start[i]:id_end[i]])
return y
@numba.jit(nopython=True)
def _find_pos(cell_start_index, cell_end_index, umi_count):
id_case = np.arange(cell_start_index, cell_end_index)
value = umi_count[id_case]
id_end = np.cumsum(value)
return id_case, id_end
def _calculate_cell_gene_matrix(molecular_info, feature, out_prefix, depth_threshold):
df = _generate_fake_count(molecular_info.iloc[0, :],
feature, depth=depth_threshold+1)
molecular_info = pd.concat([df, molecular_info], ignore_index=True)
num_gene = len(feature)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='transcript', out_prefix=out_prefix,
depth_threshold=depth_threshold)
_transform_write_sparse_matrix(molecular_info, num_gene,
sum_type='umi', out_prefix=out_prefix,
depth_threshold=depth_threshold)
def _filter_tag_fun(tag_file, max_distance, correct=True):
if tag_file is not None:
tag_file = list(tag_file)
tag_file = [None if x == 'None' else x for x in tag_file]
else:
tag_file = [None]
tag_hash = [None] * len(tag_file)
for i, tag_file_i in enumerate(tag_file):
if not tag_file_i:
continue
with open(tag_file_i, 'r') as file_handle:
tag_seq = {line.rstrip('\n') for line in file_handle}
tag_hash[i] = ErrorBarcodeHash(tag_seq, max_distance)
filter_tag_fun = partial(_filter_tag, tag_hash=tag_hash, correct=correct)
return filter_tag_fun
def _filter_tag(tag, tag_hash, correct=True):
tag_corrected = list(tag)
for i, tag_i in enumerate(tag):
if not tag_hash[i]:
continue
tag_corrected[i] = tag_hash[i][tag_i]
if not tag_corrected[i]:
return None
if correct:
return '-'.join(tag_corrected)
else:
return '-'.join(tag)
def _filter_tag_multi(tag, tag_hash, correct=True):
tag_corrected = list(tag)
tag_full = '-'.join(tag)
for i, tag_i in enumerate(tag):
if not tag_hash[i]:
continue
tag_corrected[i] = tag_hash[i][tag_i, tag_full]
if not tag_corrected[i]:
return None
if correct:
return '-'.join(tag_corrected)
else:
return '-'.join(tag)
def annotate_bam(bam, gtf, featureCounts='featureCounts',
annotate_multi_mapping=True, annotate_intron=False,
strand=1, num_thread=4):
import subprocess
logger.info('Running featureCounts to annotate alignments.')
start_time = time.time()
cmd_feature_count = ' '.join([featureCounts,
'-g gene_id', '-t exon', '-R BAM',
'-T', str(num_thread),
'-F GTF', '-a', gtf, '-s', str(strand),
])
if annotate_multi_mapping:
cmd_feature_count = ' '.join([cmd_feature_count, '-M'])
try:
cmd = ' '.join([cmd_feature_count, '-o', bam + '.annotation', bam])
print(cmd, '\n')
subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
logger.info(f'Running featureCount errors')
sys.exit(-1)
except OSError:
logger.info(f'featureCounts not found or cannot run!'
f' Please specify which featureCounts to use.')
sys.exit(-1)
if annotate_intron:
cmd_feature_count = re.sub('-t exon', '-t transcript', cmd_feature_count)
cmd = ' '.join([cmd_feature_count, '-o',
bam + '.annotation.intron',
bam + '.featureCounts.bam'])
try:
subprocess.check_output(cmd, shell=True)
cmd = ' '.join(['mv', bam + '.featureCounts.bam' + '.featureCounts.bam',
bam + '.featureCounts.bam'])
subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
logger.info(f'Running featureCount errors')
sys.exit(-1)
except OSError:
logger.info(f'featureCounts not found or cannot run! '
f'Please specify which featureCounts to use.')
sys.exit(-1)
feature_count_time = time.time() - start_time
logger.info(f'Annotating features done successfully, '
f'taking {feature_count_time/60.0:.3f} minutes')
| [
"os.remove",
"pandas.HDFStore",
"numpy.sum",
"pandas.read_csv",
"numpy.floor",
"collections.defaultdict",
"numpy.arange",
"yaml.safe_load",
"collections.deque",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"pandas.read_hdf",
"regex.compile",
"cytoolz.partition_all",
"numpy.cumsum",
... | [((622, 716), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s: %(levelname)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s: %(levelname)s: %(message)s')\n", (641, 716), False, 'import logging\n'), ((730, 757), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (747, 757), False, 'import logging\n'), ((50358, 50382), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (50367, 50382), False, 'import numba\n'), ((50573, 50597), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (50582, 50597), False, 'import numba\n'), ((2527, 2733), 'functools.partial', 'partial', (['_format_read'], {'read_regex_list': 'read_regex_list', 'read_template': 'read_template.read_template', 'cb_tag': 'read_template.cb_tag', 'ub_len': 'read_template.ub_len', 'barcode_filter_dict': 'barcode_filter_dict'}), '(_format_read, read_regex_list=read_regex_list, read_template=\n read_template.read_template, cb_tag=read_template.cb_tag, ub_len=\n read_template.ub_len, barcode_filter_dict=barcode_filter_dict)\n', (2534, 2733), False, 'from functools import partial\n'), ((2993, 3007), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3005, 3007), True, 'import multiprocessing as mp\n'), ((3093, 3132), 'cytoolz.partition_all', 'toolz.partition_all', (['num_thread', 'chunks'], {}), '(num_thread, chunks)\n', (3112, 3132), True, 'import cytoolz as toolz\n'), ((3410, 3454), 'collections.defaultdict', 'collections.defaultdict', (['collections.Counter'], {}), '(collections.Counter)\n', (3433, 3454), False, 'import collections\n'), ((3508, 3519), 'time.time', 'time.time', ([], {}), '()\n', (3517, 3519), False, 'import time\n'), ((3531, 3550), 'multiprocessing.Pool', 'mp.Pool', (['num_thread'], {}), '(num_thread)\n', (3538, 3550), True, 'import multiprocessing as mp\n'), ((6278, 6349), 'pandas.read_csv', 'pd.read_csv', (['cb_file'], {'header': 'None', 'iterator': '(True)', 'sep': '"""\t"""', 'index_col': '(0)'}), "(cb_file, header=None, iterator=True, sep='\\t', index_col=0)\n", (6289, 6349), True, 'import pandas as pd\n'), ((9827, 9858), 'regex.sub', 're.sub', (['""">"""', '"""_qual>"""', 'read_seq'], {}), "('>', '_qual>', read_seq)\n", (9833, 9858), True, 'import regex as re\n'), ((9876, 9913), 'regex.sub', 're.sub', (['"""\\\\[ACGTN\\\\]"""', '"""."""', 'read_qual'], {}), "('\\\\[ACGTN\\\\]', '.', read_qual)\n", (9882, 9913), True, 'import regex as re\n'), ((10085, 10131), 'regex.findall', 're.findall', (['"""\\\\?P<polyT>\\\\.{[0-9]+}"""', 'read_seq'], {}), "('\\\\?P<polyT>\\\\.{[0-9]+}', read_seq)\n", (10095, 10131), True, 'import regex as re\n'), ((10350, 10396), 'regex.findall', 're.findall', (['"""\\\\?P<polyT>\\\\.{[0-9]+}"""', 'read_seq'], {}), "('\\\\?P<polyT>\\\\.{[0-9]+}', read_seq)\n", (10360, 10396), True, 'import regex as re\n'), ((14601, 14656), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['barcode_counter'], {'orient': '"""index"""'}), "(barcode_counter, orient='index')\n", (14623, 14656), True, 'import pandas as pd\n'), ((15132, 15166), 'pysam.AlignmentFile', 'AlignmentFile', (['bam'], {'mode': 'read_mode'}), '(bam, mode=read_mode)\n', (15145, 15166), False, 'from pysam import AlignmentFile\n'), ((15698, 15724), 'regex.compile', 're.compile', (['barcode_parser'], {}), '(barcode_parser)\n', (15708, 15724), True, 'import regex as re\n'), ((17934, 17945), 'time.time', 'time.time', ([], {}), '()\n', (17943, 17945), False, 'import time\n'), ((17962, 17996), 'pysam.AlignmentFile', 'AlignmentFile', (['bam'], {'mode': 'read_mode'}), '(bam, mode=read_mode)\n', (17975, 17996), False, 'from pysam import AlignmentFile\n'), ((18026, 18193), 'functools.partial', 'partial', (['_count_feature'], {'gene_map_dict': 'gene_map_dict', 'barcode_parser': 'barcode_parser', 'correct_cb_fun': 'correct_cb_fun', 'sam_file': 'sam_file', 'feature_tag': 'feature_tag'}), '(_count_feature, gene_map_dict=gene_map_dict, barcode_parser=\n barcode_parser, correct_cb_fun=correct_cb_fun, sam_file=sam_file,\n feature_tag=feature_tag)\n', (18033, 18193), False, 'from functools import partial\n'), ((19042, 19053), 'time.time', 'time.time', ([], {}), '()\n', (19051, 19053), False, 'import time\n'), ((19984, 20003), 'pandas.Series', 'pd.Series', (['map_info'], {}), '(map_info)\n', (19993, 20003), True, 'import pandas as pd\n'), ((20023, 20075), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['read_in_cell'], {'orient': '"""index"""'}), "(read_in_cell, orient='index')\n", (20045, 20075), True, 'import pandas as pd\n'), ((20136, 20147), 'time.time', 'time.time', ([], {}), '()\n', (20145, 20147), False, 'import time\n'), ((21224, 21252), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (21247, 21252), False, 'import collections\n'), ((21272, 21293), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (21291, 21293), False, 'import collections\n'), ((21315, 21343), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (21338, 21343), False, 'import collections\n'), ((27546, 27564), 'numpy.array', 'np.array', (['na_count'], {}), '(na_count)\n', (27554, 27564), True, 'import numpy as np\n'), ((27763, 27799), 'pandas.read_csv', 'pd.read_csv', (['cb_count_file'], {'sep': '"""\t"""'}), "(cb_count_file, sep='\\t')\n", (27774, 27799), True, 'import pandas as pd\n'), ((29482, 29510), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (29505, 29510), False, 'import collections\n'), ((31356, 31384), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (31379, 31384), False, 'import collections\n'), ((31466, 31480), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (31478, 31480), True, 'import multiprocessing as mp\n'), ((31492, 31508), 'multiprocessing.Pool', 'mp.Pool', (['num_cpu'], {}), '(num_cpu)\n', (31499, 31508), True, 'import multiprocessing as mp\n'), ((31567, 31661), 'functools.partial', 'partial', (['_map_single_barcode_to_whitelist'], {'whitelist': 'whitelist', 'min_distance': 'min_distance'}), '(_map_single_barcode_to_whitelist, whitelist=whitelist, min_distance\n =min_distance)\n', (31574, 31661), False, 'from functools import partial\n'), ((34827, 34846), 'collections.deque', 'collections.deque', ([], {}), '()\n', (34844, 34846), False, 'import collections\n'), ((35794, 35829), 'numpy.where', 'np.where', (['(edit_dist <= min_distance)'], {}), '(edit_dist <= min_distance)\n', (35802, 35829), True, 'import numpy as np\n'), ((36069, 36097), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (36092, 36097), False, 'import collections\n'), ((36663, 36699), 'pandas.DataFrame', 'pd.DataFrame', (['barcode_split', 'barcode'], {}), '(barcode_split, barcode)\n', (36675, 36699), True, 'import pandas as pd\n'), ((37483, 37525), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""feature"""'}), "(molecular_info, key='feature')\n", (37494, 37525), True, 'import pandas as pd\n'), ((37547, 37596), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""molecular_info"""'}), "(molecular_info, key='molecular_info')\n", (37558, 37596), True, 'import pandas as pd\n'), ((37650, 37661), 'time.time', 'time.time', ([], {}), '()\n', (37659, 37661), False, 'import time\n'), ((37977, 38027), 'pandas.concat', 'pd.concat', (['[df, molecular_info]'], {'ignore_index': '(True)'}), '([df, molecular_info], ignore_index=True)\n', (37986, 38027), True, 'import pandas as pd\n'), ((38675, 38686), 'time.time', 'time.time', ([], {}), '()\n', (38684, 38686), False, 'import time\n'), ((39717, 39728), 'time.time', 'time.time', ([], {}), '()\n', (39726, 39728), False, 'import time\n'), ((42412, 42431), 'pandas.Categorical', 'pd.Categorical', (['umi'], {}), '(umi)\n', (42426, 42431), True, 'import pandas as pd\n'), ((42844, 42913), 'collections.namedtuple', 'collections.namedtuple', (['"""coo_tuple"""', "['x', 'row_name', 'column_name']"], {}), "('coo_tuple', ['x', 'row_name', 'column_name'])\n", (42866, 42913), False, 'import collections\n'), ((43069, 43101), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'index_name'}), '(columns=index_name)\n', (43081, 43101), True, 'import pandas as pd\n'), ((44064, 44106), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""feature"""'}), "(molecular_info, key='feature')\n", (44075, 44106), True, 'import pandas as pd\n'), ((44135, 44184), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""molecular_info"""'}), "(molecular_info, key='molecular_info')\n", (44146, 44184), True, 'import pandas as pd\n'), ((44528, 44571), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""map_info"""'}), "(molecular_info, key='map_info')\n", (44539, 44571), True, 'import pandas as pd\n'), ((44744, 44791), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""read_in_cell"""'}), "(molecular_info, key='read_in_cell')\n", (44755, 44791), True, 'import pandas as pd\n'), ((45708, 45724), 'numpy.cumsum', 'np.cumsum', (['value'], {}), '(value)\n', (45717, 45724), True, 'import numpy as np\n'), ((45740, 45760), 'numpy.append', 'np.append', (['(0)', 'id_end'], {}), '(0, id_end)\n', (45749, 45760), True, 'import numpy as np\n'), ((46014, 46050), 'numpy.zeros', 'np.zeros', (['id_end[-1]'], {'dtype': 'np.int32'}), '(id_end[-1], dtype=np.int32)\n', (46022, 46050), True, 'import numpy as np\n'), ((46379, 46390), 'time.time', 'time.time', ([], {}), '()\n', (46388, 46390), False, 'import time\n'), ((47269, 47311), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""feature"""'}), "(molecular_info, key='feature')\n", (47280, 47311), True, 'import pandas as pd\n'), ((47340, 47389), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""molecular_info"""'}), "(molecular_info, key='molecular_info')\n", (47351, 47389), True, 'import pandas as pd\n'), ((47833, 47880), 'pandas.read_hdf', 'pd.read_hdf', (['molecular_info'], {'key': '"""read_in_cell"""'}), "(molecular_info, key='read_in_cell')\n", (47844, 47880), True, 'import pandas as pd\n'), ((49231, 49245), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (49242, 49245), False, 'import random\n'), ((49944, 49955), 'time.time', 'time.time', ([], {}), '()\n', (49953, 49955), False, 'import time\n'), ((50672, 50715), 'numpy.arange', 'np.arange', (['cell_start_index', 'cell_end_index'], {}), '(cell_start_index, cell_end_index)\n', (50681, 50715), True, 'import numpy as np\n'), ((50761, 50777), 'numpy.cumsum', 'np.cumsum', (['value'], {}), '(value)\n', (50770, 50777), True, 'import numpy as np\n'), ((51037, 51087), 'pandas.concat', 'pd.concat', (['[df, molecular_info]'], {'ignore_index': '(True)'}), '([df, molecular_info], ignore_index=True)\n', (51046, 51087), True, 'import pandas as pd\n'), ((52092, 52148), 'functools.partial', 'partial', (['_filter_tag'], {'tag_hash': 'tag_hash', 'correct': 'correct'}), '(_filter_tag, tag_hash=tag_hash, correct=correct)\n', (52099, 52148), False, 'from functools import partial\n'), ((53223, 53234), 'time.time', 'time.time', ([], {}), '()\n', (53232, 53234), False, 'import time\n'), ((1723, 1745), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1737, 1745), False, 'import yaml\n'), ((2074, 2086), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2082, 2086), False, 'import sys\n'), ((2465, 2478), 'regex.compile', 're.compile', (['z'], {}), '(z)\n', (2475, 2478), True, 'import regex as re\n'), ((4479, 4529), 'subprocess.check_output', 'subprocess.check_output', (['cmd_cat_fastq'], {'shell': '(True)'}), '(cmd_cat_fastq, shell=True)\n', (4502, 4529), False, 'import subprocess\n'), ((4828, 4839), 'time.time', 'time.time', ([], {}), '()\n', (4837, 4839), False, 'import time\n'), ((5144, 5172), 'pandas.concat', 'pd.concat', (['[df, df1]'], {'axis': '(0)'}), '([df, df1], axis=0)\n', (5153, 5172), True, 'import pandas as pd\n'), ((12955, 13010), 'functools.partial', 'partial', (['np.zeros'], {'shape': '(ub_len[0] + 1)', 'dtype': 'np.uint32'}), '(np.zeros, shape=ub_len[0] + 1, dtype=np.uint32)\n', (12962, 13010), False, 'from functools import partial\n'), ((14475, 14502), 'gzip.open', 'gzip.open', (['fastq_file', '"""ab"""'], {}), "(fastq_file, 'ab')\n", (14484, 14502), False, 'import gzip\n'), ((15636, 15648), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (15644, 15648), False, 'import sys\n'), ((18507, 18518), 'time.time', 'time.time', ([], {}), '()\n', (18516, 18518), False, 'import time\n'), ((18869, 18881), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (18877, 18881), False, 'import sys\n'), ((19271, 19282), 'time.time', 'time.time', ([], {}), '()\n', (19280, 19282), False, 'import time\n'), ((20275, 20315), 'pandas.HDFStore', 'pd.HDFStore', (['molecular_info_h5'], {'mode': '"""w"""'}), "(molecular_info_h5, mode='w')\n", (20286, 20315), True, 'import pandas as pd\n'), ((20565, 20576), 'time.time', 'time.time', ([], {}), '()\n', (20574, 20576), False, 'import time\n'), ((20980, 20995), 'regex.compile', 're.compile', (['"""N"""'], {}), "('N')\n", (20990, 20995), True, 'import regex as re\n'), ((26699, 26726), 'pandas.DataFrame', 'pd.DataFrame', (['cb_list_split'], {}), '(cb_list_split)\n', (26711, 26726), True, 'import pandas as pd\n'), ((27456, 27469), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27464, 27469), True, 'import numpy as np\n'), ((29014, 29046), 'numpy.setdiff1d', 'np.setdiff1d', (['cb_list', 'cb_remove'], {}), '(cb_list, cb_remove)\n', (29026, 29046), True, 'import numpy as np\n'), ((33053, 33073), 'itertools.chain', 'chain', (['*barcode_corr'], {}), '(*barcode_corr)\n', (33058, 33073), False, 'from itertools import chain\n'), ((36922, 36951), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (36945, 36951), False, 'import collections\n'), ((37730, 37741), 'time.time', 'time.time', ([], {}), '()\n', (37739, 37741), False, 'import time\n'), ((39523, 39534), 'time.time', 'time.time', ([], {}), '()\n', (39532, 39534), False, 'import time\n'), ((40261, 40272), 'time.time', 'time.time', ([], {}), '()\n', (40270, 40272), False, 'import time\n'), ((42121, 42160), 'numpy.arange', 'np.arange', (['id_start[gene]', 'id_end[gene]'], {}), '(id_start[gene], id_end[gene])\n', (42130, 42160), True, 'import numpy as np\n'), ((45162, 45179), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (45173, 45179), False, 'import random\n'), ((46473, 46484), 'time.time', 'time.time', ([], {}), '()\n', (46482, 46484), False, 'import time\n'), ((48558, 48570), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (48566, 48570), False, 'import sys\n'), ((49442, 49462), 'numpy.append', 'np.append', (['(0)', 'id_end'], {}), '(0, id_end)\n', (49451, 49462), True, 'import numpy as np\n'), ((49722, 49758), 'numpy.zeros', 'np.zeros', (['id_end[-1]'], {'dtype': 'np.int32'}), '(id_end[-1], dtype=np.int32)\n', (49730, 49758), True, 'import numpy as np\n'), ((50038, 50049), 'time.time', 'time.time', ([], {}), '()\n', (50047, 50049), False, 'import time\n'), ((50523, 50555), 'numpy.sum', 'np.sum', (['x[id_start[i]:id_end[i]]'], {}), '(x[id_start[i]:id_end[i]])\n', (50529, 50555), True, 'import numpy as np\n'), ((53738, 53778), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (53761, 53778), False, 'import subprocess\n'), ((54118, 54171), 'regex.sub', 're.sub', (['"""-t exon"""', '"""-t transcript"""', 'cmd_feature_count'], {}), "('-t exon', '-t transcript', cmd_feature_count)\n", (54124, 54171), True, 'import regex as re\n'), ((54929, 54940), 'time.time', 'time.time', ([], {}), '()\n', (54938, 54940), False, 'import time\n'), ((4539, 4560), 'os.remove', 'os.remove', (['fastq_file'], {}), '(fastq_file)\n', (4548, 4560), False, 'import os\n'), ((4702, 4714), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4710, 4714), False, 'import sys\n'), ((4798, 4810), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4806, 4810), False, 'import sys\n'), ((5385, 5403), 'os.remove', 'os.remove', (['cb_file'], {}), '(cb_file)\n', (5394, 5403), False, 'import os\n'), ((5523, 5535), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (5531, 5535), False, 'import sys\n'), ((6595, 6629), 'pandas.concat', 'pd.concat', (['[chunks, chunk]'], {'axis': '(0)'}), '([chunks, chunk], axis=0)\n', (6604, 6629), True, 'import pandas as pd\n'), ((19075, 19100), 'pandas.Series', 'pd.Series', (['molecular_info'], {}), '(molecular_info)\n', (19084, 19100), True, 'import pandas as pd\n'), ((19786, 19832), 'numpy.floor', 'np.floor', (["(molecular_info['depth'].values + 0.5)"], {}), "(molecular_info['depth'].values + 0.5)\n", (19794, 19832), True, 'import numpy as np\n'), ((26611, 26623), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (26619, 26623), False, 'import sys\n'), ((27113, 27171), 'functools.partial', 'partial', (['_filter_tag_multi'], {'tag_hash': 'cb_hash', 'correct': '(True)'}), '(_filter_tag_multi, tag_hash=cb_hash, correct=True)\n', (27120, 27171), False, 'from functools import partial\n'), ((27284, 27336), 'functools.partial', 'partial', (['_filter_tag'], {'tag_hash': 'cb_hash', 'correct': '(True)'}), '(_filter_tag, tag_hash=cb_hash, correct=True)\n', (27291, 27336), False, 'from functools import partial\n'), ((36482, 36509), 'numpy.delete', 'np.delete', (['barcode_all', 'idx'], {}), '(barcode_all, idx)\n', (36491, 36509), True, 'import numpy as np\n'), ((39733, 39758), 'pandas.Series', 'pd.Series', (['count_row_name'], {}), '(count_row_name)\n', (39742, 39758), True, 'import pandas as pd\n'), ((39859, 39887), 'pandas.Series', 'pd.Series', (['count_column_name'], {}), '(count_column_name)\n', (39868, 39887), True, 'import pandas as pd\n'), ((53881, 53893), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (53889, 53893), False, 'import sys\n'), ((54052, 54064), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (54060, 54064), False, 'import sys\n'), ((54351, 54391), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (54374, 54391), False, 'import subprocess\n'), ((54547, 54587), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (54570, 54587), False, 'import subprocess\n'), ((3215, 3233), 'gzip.open', 'gzip.open', (['x', '"""wb"""'], {}), "(x, 'wb')\n", (3224, 3233), False, 'import gzip\n'), ((10154, 10175), 'regex.findall', 're.findall', (['"""\\\\d+"""', 'z'], {}), "('\\\\d+', z)\n", (10164, 10175), True, 'import regex as re\n'), ((10420, 10441), 'regex.findall', 're.findall', (['"""\\\\d+"""', 'z'], {}), "('\\\\d+', z)\n", (10430, 10441), True, 'import regex as re\n'), ((12430, 12454), 'regex.split', 're.split', (['str_split', 'seq'], {}), '(str_split, seq)\n', (12438, 12454), True, 'import regex as re\n'), ((12483, 12514), 'regex.findall', 're.findall', (['"""(\\\\d+)"""', 'barcode_i'], {}), "('(\\\\d+)', barcode_i)\n", (12493, 12514), True, 'import regex as re\n'), ((54702, 54714), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (54710, 54714), False, 'import sys\n'), ((54890, 54902), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (54898, 54902), False, 'import sys\n'), ((11957, 11975), 'regex.escape', 're.escape', (['barcode'], {}), '(barcode)\n', (11966, 11975), True, 'import regex as re\n')] |
import pymysql
import pandas as pd
import numpy as np
HOST = '172.16.17.32'
USER = 'guest'
PASSWORD = '<PASSWORD>'
DATABASE = 'PTTData'
def LoadDataList():
def execute_sql2(sql):
conn = ( pymysql.connect(host = HOST,
port = 3306,
user = USER,
password = PASSWORD,
database = DATABASE,
charset="utf8") )
cursor = conn.cursor()
try:
cursor.execute(sql)
data = cursor.fetchall()
conn.close()
return data
except:
conn.close()
return ''
tem = execute_sql2('show tables')
value = np.concatenate(tem, axis=0)
return value
def LoadData(table, select, date):
def execute_sql2(sql):
conn = ( pymysql.connect(host = HOST,
port = 3306,
user = USER,
password = PASSWORD,
database = DATABASE,
charset="utf8") )
cursor = conn.cursor()
try:
cursor.execute(sql)
data = cursor.fetchall()
conn.close()
return data
except:
conn.close()
return ''
def load(table ,date, select):
sql = "select `{}` from {} WHERE `date` >= '{}'".format(select,table,date)
tem = execute_sql2(sql)
data = pd.DataFrame(list(tem))
if len(data) > 0:
data.columns = [select]
return data
def load_multi(table ,date, select_list):
data = pd.DataFrame()
for select in select_list:
value = load(table ,date, select)
data[select] = value
return data
#-----------------------------------------------
if isinstance(select,str):
data = load(table ,date, select)
return data
elif isinstance(select,list):
data = load_multi(table ,date, select)
return data
| [
"pandas.DataFrame",
"pymysql.connect",
"numpy.concatenate"
] | [((767, 794), 'numpy.concatenate', 'np.concatenate', (['tem'], {'axis': '(0)'}), '(tem, axis=0)\n', (781, 794), True, 'import numpy as np\n'), ((203, 309), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'HOST', 'port': '(3306)', 'user': 'USER', 'password': 'PASSWORD', 'database': 'DATABASE', 'charset': '"""utf8"""'}), "(host=HOST, port=3306, user=USER, password=PASSWORD,\n database=DATABASE, charset='utf8')\n", (218, 309), False, 'import pymysql\n'), ((901, 1007), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'HOST', 'port': '(3306)', 'user': 'USER', 'password': 'PASSWORD', 'database': 'DATABASE', 'charset': '"""utf8"""'}), "(host=HOST, port=3306, user=USER, password=PASSWORD,\n database=DATABASE, charset='utf8')\n", (916, 1007), False, 'import pymysql\n'), ((1748, 1762), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1760, 1762), True, 'import pandas as pd\n')] |
#!/bin/python
# -*- coding: utf-8 -*-
#import ImFEATbox
#from PIL import Image
import Image
import numpy as np
import ImFEATbox
from ImFEATbox.__helperCommands import rgb2grayscale
import csv
import matplotlib.pyplot as plt
#print(ImFEATbox.getFeatureNames())
# load test image
with open('testimg.csv', 'r') as csvfile:
I = np.array(list(csv.reader(csvfile, delimiter=','))).astype(np.float)
#I = rgb2grayscale(I)
out_python = ImFEATbox.GlobalFeatures.Intensity.gradient.cFeatures(I)
###############################################################################
# * Matlab Code for CSV feature extract:
# csvwrite('matlab-out.csv', Out)
###############################################################################
matlabBin = "/usr/local/MATLAB/R2016b/bin/matlab"
gitPath = "/home/heiko/git"
mFile = gitPath + "/ImFEATbox/features_python/ImFEATbox/GlobalFeatures/Intensity/GradientF.m"
matlabParameter = "-nojvm -nodisplay -nosplash -r \"try, run('" + mFile + "'), , catch, exit, end, exit\""
# .. now read matlab csv:
with open('matlab-out.csv', 'r') as csvfile:
out_matlab = np.array(list(csv.reader(csvfile, delimiter=','))).astype(np.float).ravel()
# now compare matlab and python output:
if len(out_python) != len(out_matlab):
print("Problem: not same # of features: python: " + str(len(out_python)) + ", matlab: " + str(len(out_matlab)))
print("quit")
quit()
print("shape matlab: " + str(np.shape(out_matlab)) + ", shape python: " + str(np.shape(out_python)))
# we see matlab as reference code
diff = out_matlab - out_python
maxval_matlab = np.max(out_matlab)
maxval_python = np.max(out_python)
minval_matlab = np.min(out_matlab)
minval_python = np.min(out_python)
valrange_matlab = maxval_matlab - minval_matlab
valrange_python = maxval_python - minval_python
#print(np.abs(diff))
# max value differs 5% of value range
if abs(maxval_matlab - maxval_python) > 0.05 * valrange_matlab:
print("Problem: maximum differs > 5 percent of value range")
# corresponding indices
diffindex01 = np.where(np.abs(diff) < 0.001*valrange_matlab)[0]
diffindex1 = np.where(np.abs(diff) > 0.01*valrange_matlab)[0]
diffindex5 = np.where(np.abs(diff) > 0.05*valrange_matlab)[0]
diffindex10 = np.where(np.abs(diff) > 0.1*valrange_matlab)[0]
diffindex = np.where(np.abs(diff) > 0)[0]
diffpercentage = (diff/valrange_matlab)*100
maxdiffpercentage = np.max(np.abs(diffpercentage))
print("Result:")
print("\t* " + str(len(diffindex01)) + " values match < 0.1 percent")
print("\t* maximum difference: " + str(maxdiffpercentage) + " percent")
print("\t* " + str(len(diffindex1)) + " values differ > 1 percent")
print("\t* " + str(len(diffindex5)) + " values differ > 5 percent")
print("\t* " + str(len(diffindex10)) + " values differ > 10 percent")
#print(diffindex)
#for i in diffindex[0]:
# print("i=" + str(i) + " : " + str(dif[i]))
#plt.imshow(I, cmap='gray')
plt.bar(range(1,len(diff)+1), diffpercentage)
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.show",
"csv.reader",
"ImFEATbox.GlobalFeatures.Intensity.gradient.cFeatures",
"numpy.shape",
"numpy.max",
"numpy.min"
] | [((436, 492), 'ImFEATbox.GlobalFeatures.Intensity.gradient.cFeatures', 'ImFEATbox.GlobalFeatures.Intensity.gradient.cFeatures', (['I'], {}), '(I)\n', (489, 492), False, 'import ImFEATbox\n'), ((1594, 1612), 'numpy.max', 'np.max', (['out_matlab'], {}), '(out_matlab)\n', (1600, 1612), True, 'import numpy as np\n'), ((1629, 1647), 'numpy.max', 'np.max', (['out_python'], {}), '(out_python)\n', (1635, 1647), True, 'import numpy as np\n'), ((1664, 1682), 'numpy.min', 'np.min', (['out_matlab'], {}), '(out_matlab)\n', (1670, 1682), True, 'import numpy as np\n'), ((1699, 1717), 'numpy.min', 'np.min', (['out_python'], {}), '(out_python)\n', (1705, 1717), True, 'import numpy as np\n'), ((2955, 2965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2963, 2965), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2419), 'numpy.abs', 'np.abs', (['diffpercentage'], {}), '(diffpercentage)\n', (2403, 2419), True, 'import numpy as np\n'), ((1489, 1509), 'numpy.shape', 'np.shape', (['out_python'], {}), '(out_python)\n', (1497, 1509), True, 'import numpy as np\n'), ((2055, 2067), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2061, 2067), True, 'import numpy as np\n'), ((2118, 2130), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2124, 2130), True, 'import numpy as np\n'), ((2180, 2192), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2186, 2192), True, 'import numpy as np\n'), ((2243, 2255), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2249, 2255), True, 'import numpy as np\n'), ((2303, 2315), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2309, 2315), True, 'import numpy as np\n'), ((345, 379), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (355, 379), False, 'import csv\n'), ((1440, 1460), 'numpy.shape', 'np.shape', (['out_matlab'], {}), '(out_matlab)\n', (1448, 1460), True, 'import numpy as np\n'), ((1121, 1155), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1131, 1155), False, 'import csv\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 14:53:58 2020
@author: ggleizer
"""
import graph_tool.all as gt
import numpy as np
import scipy.sparse.linalg as sla
import scipy.sparse as sparse
import scipy.linalg as la
from scipy.sparse.linalg.eigen.arpack import ArpackNoConvergence
class TrafficAutomaton:
r"""
Weighted automaton (with outputs) representing a traffic model.
Creates a graph-tool graph based on input regions, which are encoded
as sequences of numbers (tuples), and builds edges following the domino
rule. That is, s --> d iff all_but_last(d) is a prefix of
all_but_first(s). For a region r, its output is r[0] and the weight of
an edge is r[0] for every r --> r\' in the edge set.
This class is used to perform operations that are faster or more
convenient to use a graph object.
Parameters
----------
regions : set of tuples
The abstraction state space X.
Attributes
----------
regions : list of tuples
The abstraction state space X.
y : list of ints
Corresponding outpts
G : gt.Graph
The graph-tool graph object representing the automaton. G
has the edge propery weight and the vertex property y.
Methods
-------
all_behavioral_cycles :
Iterator on all cycles of the graph (not suitable for large/dense
graphs).
generate_components :
Generate strongly connnected components of the graph.
minimum_average_cycle :
Compute the minimum average cycle using Karp\'s algorithm.
plot :
Plot the automaton as a graph, with outputs as node labels.
"""
def __init__(self, data, strat=None):
self.has_multiple_actions = False
self.strat = strat
if type(data) is dict:
Ei, w = self._init_transition(data)
else:
Ei = self._init_regions(data)
w = None
# Build graph
self.G = gt.Graph()
self.G.add_edge_list(Ei)
# Add output and weight properties to the graph
self.fill_props(w)
# Initialize components and condensation graph variables
self.comp = None
self.CG = None
# Initialize component value list, in case values are later computed
self.min_v = None
self.max_v = None
self.min_vs = None
self.max_vs = None
def _init_regions(self, regions):
# Use a list (instad of set) of regions for indexing
self.regions = [r for r in regions]
# Build the list of edges using the domino rule
Ei = [(i,j) for j,d in enumerate(regions)
for i,s in enumerate(regions) if d[:len(s)-1] == s[1:len(d)+1]]
return Ei
def _init_transition(self, transition):
regions = set(x[0] for x in transition)
self.has_multiple_actions = len(transition) > len(regions)
# Use a list (instad of set) of regions for indexing
self.regions = [r for r in regions]
Ei = [(self.regions.index(x[0]),self.regions.index(y))
for x,post in transition.items() for y in post]
w = [x[1] for x,post in transition.items() for _ in post]
return Ei, w
def all_behavioral_cycles(self):
for c in gt.all_circuits(self.G):
new_c = [self.regions[x][0] for x in c]
yield new_c
def fill_props(self, w):
# Create weights
if w is None:
if self.strat:
w = [self.strat[s[0]] for d in self.regions
for s in self.regions
if d[:len(s)-1] == s[1:len(d)+1]]
else:
w = [s[0] for d in self.regions for s in self.regions
if d[:len(s)-1] == s[1:len(d)+1]]
weights = self.G.new_edge_property('short', w)
self.G.edge_properties['weight'] = weights
self.w = weights
# Create outputs for plotting
try:
y = self.G.new_vertex_property('int', [x[0] for x in self.regions])
except TypeError:
y = self.G.new_vertex_property('int',
[x[0][0] for x in self.regions])
self.G.vertex_properties['output'] = y
self.y = y
def plot(self):
#self.fill_props()
if self.has_multiple_actions:
gt.graph_draw(self.G, vertex_text=self.y, edge_text=self.w)
else:
gt.graph_draw(self.G, vertex_text=self.y)
def plot_condensation_graph(self):
if self.CG is None:
self.generate_condensation_graph()
gt.graph_draw(self.CG, vertex_text=self.CG.vp.value)
def generate_components(self):
# Generate strongly connected components
comp, hist, attr = gt.label_components(self.G, attractors=True)
self.comp = comp
return comp, hist, attr
def generate_condensation_graph(self, max_val=True):
if self.comp is None:
self.generate_components()
self.CG, prop = gt.condensation_graph(self.G, self.comp)[0:2]
self.comp_map = prop
if self.max_v is not None and self.min_v is not None and max_val:
self.max_v = self.max_v[prop.a]
v = self.max_v
elif self.max_v is None:
self.min_v = self.min_v[prop.a]
v = self.min_v
elif self.min_v is None:
self.max_v = self.max_v[prop.a]
v = self.max_v
if v is not None:
values = self.CG.new_vertex_property('float', v)
self.CG.vertex_properties['value'] = values
def generate_max_avg_cycle_per_state(self):
if self.max_v is None:
self.maximum_average_cycle()
if self.CG is None:
self.generate_condensation_graph()
# Generate maximum average cycle per SCC
w_comp = max_reachable_node_dag(self.CG, self.max_v)
# Value of state... first sort back to the order of components
w_comp = np.array([x[1] for x in sorted(zip(self.comp_map.a, w_comp.a))])
vs_array = w_comp[self.comp.a]
self.max_vs = vs_array
def generate_min_avg_cycle_per_state(self):
if self.min_v is None:
self.minimum_average_cycle()
if self.CG is None:
self.generate_condensation_graph()
# Generate minimum average cycle per SCC
w = -self.min_v
w_comp = max_reachable_node_dag(self.CG, w)
# Value of state... first sort back to the order of components
w_comp = np.array([x[1] for x in sorted(zip(self.comp_map.a, w_comp.a))])
vs_array = w_comp[self.comp.a]
self.min_vs = -vs_array
def maximum_average_cycle(self):
min_v = self.min_v
self.G.ep.weight.a = -self.G.ep.weight.a
v, b, c, i = self.minimum_average_cycle()
self.G.ep.weight.a = -self.G.ep.weight.a
self.max_v = -self.min_v
# Recover min value if it was computed
self.min_v = min_v
try: # single value
v = -v
except TypeError: # list of values
v = [-vv for vv in v]
return v, b, c, i
def minimum_average_cycle(self):
"""Compute a minimum average cycle of the traffic model.
Use Karp's algorithm to compute the minimum average cycle
of our traffic (weighted) automaton.
Returns
-------
val : float
The LimAvg value of the minimum average cycle
behavioral_cycle : tuple
The minimum average cycle, in terms of outputs
cycle_regions : list
The minimum average cycle, as a list of regions (states)
is_simple_cycle: bool
Whether the cycle is the only cycle in its SCC.
"""
# This must be done in each strongly connected component
comp = self.generate_components()[0]
# Store the component list
components = set(comp.a)
if self.strat:
val = max(max(r[0]) for r in self.regions)+1 # kbar + 1
else:
val = max(max(r) for r in self.regions)+1 # kbar + 1
# Save maximal weight of the whole graph
w_max = max(self.G.ep.weight.a)
val_list = []
full_val_list = []
cycle_list = []
cycle_region_list = []
is_simple_cycle_list = []
for component in sorted(components):
# Filter for this component
prop = self.G.new_vertex_property('bool',
comp.a == component)
self.G.set_vertex_filter(prop) # ...
if self.G.num_edges() > 0: # not an isolated node
new_val, new_cycle = karp(self.G)
new_is_simple_cycle = all(self.G.get_out_degrees(
self.G.get_vertices()) == 1)
# Unfilter
self.G.set_vertex_filter(None)
# Get cycle data
new_cycle_regions = [self.regions[int(x)] for x in new_cycle]
new_behavioral_cycle = tuple(r[0] for r in new_cycle_regions)
# Get SCC data
SCC_regions = {self.regions[int(x)]
for x in self.G.iter_vertices() if prop[x]}
val_list.append(new_val)
full_val_list.append(new_val)
cycle_list.append(new_behavioral_cycle)
cycle_region_list.append(SCC_regions)
is_simple_cycle_list.append(new_is_simple_cycle)
if new_val < val:
cycle = new_cycle
val = new_val
cycle_regions = new_cycle_regions
behavioral_cycle = new_behavioral_cycle
is_simple_cycle = new_is_simple_cycle
else: # Unfilter and move on
self.G.set_vertex_filter(None)
# Default value for acyclic components should be larger than
# the global maximal weight (inf would also work)
full_val_list.append(w_max + 1)
cycle_regions = [self.regions[int(x)] for x in cycle]
behavioral_cycle = tuple(r[0] for r in cycle_regions)
self.min_v = np.array(full_val_list)
return val, behavioral_cycle, cycle_regions, is_simple_cycle
def karp(G: gt.Graph):
"""An implementation of Karp's algorithm for minimum average cycle.
The algorithm assumes the graph is strongly connected, but does not
check this for speed reasons.
From a table of the k-shortest paths from an arbitrary node (here, 0)
the minimum values is
val = min_v max_k (dp[v,n] - dp[v,k])/(n-k)
Parameters
----------
G : gt.Graph
A strongly connected graph with the <int or float> edge
property "weight".
Returns
-------
val : float
The minimum average (np.inf if graph has no cycles)
cycle : list, or None
List of vertex indices that compose the minimum cycle. None if
the graph has no cycles
"""
# Table of shortest k-paths
n = G.num_vertices()
# NEW: using sparse-matrix version of shortest k paths
# (7-10x faster)
w = w = G.ep['weight']
W = gt.adjacency(G, w)
dp, bp = shortest_k_paths_sparse(W)
#val: min_v max_k (dp[v,n] - dp[v,k])/n-k
val = np.inf
v_min = None # Minimizer
# This loop is needed to store the vertex associated with the minimizer
# for recovering the cycle.
for v in range(n):
# val_v = -np.inf
# k_min = None
val_v = max((dp[v,n] - dp[v,k])/(n-k) for k in range(n)
if not np.isinf(dp[v,k]))
# for k in range(n):
# val_this = (dp[v,n] - dp[v,k])/(n-k)
# if val_this > val_v:
# k_min = k
# val_v = val_this
if val_v < val:
v_min = v
val = val_v
# Now recover the minimizer cycle (Chatuverdi and McConnell, 2017)
if v_min is not None:
# A loop to detect a cycle in O(n) as suggested in the paper,
# walking backwards from the minimizer vertex.
v = v_min
walk = [v]
marked = set((v,))
for k in range(n,-1,-1):
v = bp[v,k]
walk.insert(0,v)
if v in marked:
break
marked.add(v)
cycle = walk[0:walk.index(v,1)]
# Make list of vertex pointers instead of indices
v_dict = {i:v for i,v in enumerate(G.vertices())}
cycle = [v_dict[i] for i in cycle]
else:
cycle = None
return val, cycle
def shortest_k_paths(G: gt.Graph):
"""Compute the shortest k-paths from vertex 0 to any vertex.
Part of the minimum average cycle algorithm from Karp. The
dynamic programming recursion is
dp[k][v] = min_(u,v in E)(dp[k-1][u] + weight(u,v))
Parameters
----------
G : gt.Graph
A strongly connected graph with the <int or float> edge
property "weight".
Returns
-------
dp : np.array
Element (i,j) contains the minimum weight of the j-path
from 0 to node i.
bp : np.array
Element (i,j) contains the index of the (j-1)-th vertex
of the minimum j-path from 0 to i (backpointer).
"""
# s = G.vertex(0) # source, always 0
n = G.num_vertices()
# Initialize tables: all paths with infinite weight, except 0-->0.
dp = np.zeros((n,n+1))
dp[:,:] = np.inf
dp[0,0] = 0
# Initialize backpointers (-1 for no path).
bp = -np.ones((n,n+1), dtype='int') # Backpointers
v_dict = {v:i for i,v in enumerate(G.vertices())}
for k in range(1, n+1):
for e in G.edges():
u, v = v_dict[e.source()], v_dict[e.target()]
w = G.ep.weight[e]
c = dp[u,k-1] + w
if c < dp[v,k]:
dp[v,k] = c
bp[v,k] = u
return dp, bp
def shortest_k_paths_sparse(W: sparse.csr_matrix):
"""Compute the shortest k-paths from vertex 0 to any vertex.
Part of the minimum average cycle algorithm from Karp. The
dynamic programming recursion is
dp[k][v] = min_(u,v in E)(dp[k-1][u] + weight(u,v))
Parameters
----------
W : sparse.csr_matrix
The weighted adjacency matrix of a strongly connected graph.
Returns
-------
dp : np.array
Element (i,j) contains the minimum weight of the j-path
from 0 to node i.
bp : np.array
Element (i,j) contains the index of the (j-1)-th vertex
of the minimum j-path from 0 to i (backpointer).
"""
# s = G.vertex(0) # source, always 0
n = W.shape[0]
# Initialize tables: all paths with infinite weight, except 0-->0.
dp = np.zeros((n,n+1))
dp[:,:] = np.inf
dp[0,0] = 0
# Initialize backpointers (-1 for no path).
bp = -np.ones((n,n+1), dtype='int') # Backpointers
for k in range(1, n+1):
# Next for loop is parallelizable
for v in range(n): # csr matrix: iterate through posts
weights_to_v = W.data[W.indptr[v]:W.indptr[v+1]]
us_to_v = W.indices[W.indptr[v]:W.indptr[v+1]]
cost_candidates = dp[us_to_v, k-1] + weights_to_v
iu_min = np.argmin(cost_candidates)
dp[v, k] = cost_candidates[iu_min]
bp[v, k] = us_to_v[iu_min]
return dp, bp
def max_reachable_node_dag(G, w):
"""Compute the value of the maximum reachable node for any node.
It assumes we have a directed acyclic graph G, and w is the list of
vertex weights. Returns a list of weights accordingly."""
class Visitor(gt.DFSVisitor):
def __init__(self, w):
self.w = G.new_vertex_property('float', w)
self.s = 0
def discover_vertex(self, u):
self.s = u
def examine_edge(self, e):
self.w[e.source()] = max(self.w[e.source()], self.w[e.target()])
def finish_vertex(self, u):
self.w[u] = max(self.w[u], self.w[self.s])
visitor = Visitor(w.copy())
gt.dfs_search(G, visitor=visitor)
return visitor.w
| [
"graph_tool.all.all_circuits",
"graph_tool.all.Graph",
"graph_tool.all.dfs_search",
"graph_tool.all.condensation_graph",
"graph_tool.all.label_components",
"graph_tool.all.graph_draw",
"numpy.zeros",
"numpy.ones",
"numpy.argmin",
"numpy.isinf",
"graph_tool.all.adjacency",
"numpy.array"
] | [((11263, 11281), 'graph_tool.all.adjacency', 'gt.adjacency', (['G', 'w'], {}), '(G, w)\n', (11275, 11281), True, 'import graph_tool.all as gt\n'), ((13488, 13508), 'numpy.zeros', 'np.zeros', (['(n, n + 1)'], {}), '((n, n + 1))\n', (13496, 13508), True, 'import numpy as np\n'), ((14806, 14826), 'numpy.zeros', 'np.zeros', (['(n, n + 1)'], {}), '((n, n + 1))\n', (14814, 14826), True, 'import numpy as np\n'), ((16120, 16153), 'graph_tool.all.dfs_search', 'gt.dfs_search', (['G'], {'visitor': 'visitor'}), '(G, visitor=visitor)\n', (16133, 16153), True, 'import graph_tool.all as gt\n'), ((1993, 2003), 'graph_tool.all.Graph', 'gt.Graph', ([], {}), '()\n', (2001, 2003), True, 'import graph_tool.all as gt\n'), ((3301, 3324), 'graph_tool.all.all_circuits', 'gt.all_circuits', (['self.G'], {}), '(self.G)\n', (3316, 3324), True, 'import graph_tool.all as gt\n'), ((4638, 4690), 'graph_tool.all.graph_draw', 'gt.graph_draw', (['self.CG'], {'vertex_text': 'self.CG.vp.value'}), '(self.CG, vertex_text=self.CG.vp.value)\n', (4651, 4690), True, 'import graph_tool.all as gt\n'), ((4803, 4847), 'graph_tool.all.label_components', 'gt.label_components', (['self.G'], {'attractors': '(True)'}), '(self.G, attractors=True)\n', (4822, 4847), True, 'import graph_tool.all as gt\n'), ((10263, 10286), 'numpy.array', 'np.array', (['full_val_list'], {}), '(full_val_list)\n', (10271, 10286), True, 'import numpy as np\n'), ((13601, 13633), 'numpy.ones', 'np.ones', (['(n, n + 1)'], {'dtype': '"""int"""'}), "((n, n + 1), dtype='int')\n", (13608, 13633), True, 'import numpy as np\n'), ((14919, 14951), 'numpy.ones', 'np.ones', (['(n, n + 1)'], {'dtype': '"""int"""'}), "((n, n + 1), dtype='int')\n", (14926, 14951), True, 'import numpy as np\n'), ((4387, 4446), 'graph_tool.all.graph_draw', 'gt.graph_draw', (['self.G'], {'vertex_text': 'self.y', 'edge_text': 'self.w'}), '(self.G, vertex_text=self.y, edge_text=self.w)\n', (4400, 4446), True, 'import graph_tool.all as gt\n'), ((4473, 4514), 'graph_tool.all.graph_draw', 'gt.graph_draw', (['self.G'], {'vertex_text': 'self.y'}), '(self.G, vertex_text=self.y)\n', (4486, 4514), True, 'import graph_tool.all as gt\n'), ((5056, 5096), 'graph_tool.all.condensation_graph', 'gt.condensation_graph', (['self.G', 'self.comp'], {}), '(self.G, self.comp)\n', (5077, 5096), True, 'import graph_tool.all as gt\n'), ((15303, 15329), 'numpy.argmin', 'np.argmin', (['cost_candidates'], {}), '(cost_candidates)\n', (15312, 15329), True, 'import numpy as np\n'), ((11689, 11707), 'numpy.isinf', 'np.isinf', (['dp[v, k]'], {}), '(dp[v, k])\n', (11697, 11707), True, 'import numpy as np\n')] |
import os
import datetime
import json
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import scipy
class Namespace(dict):
"""A dict subclass that exposes its items as attributes.
Warning: Namespace instances do not have direct access to the
dict methods.
Taken from: http://code.activestate.com/recipes/577887-a-simple-namespace-class/
"""
def __init__(self, obj={}):
super().__init__(obj)
def __dir__(self):
return tuple(self)
def __repr__(self):
return "%s(%s)" % (type(self).__name__, super().__repr__())
def __getattribute__(self, name):
try:
return self[name]
except KeyError:
msg = "'%s' object has no attribute '%s'"
raise AttributeError(msg % (type(self).__name__, name))
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
#------------------------
# "copy constructors"
@classmethod
def from_object(cls, obj, names=None):
if names is None:
names = dir(obj)
ns = {name:getattr(obj, name) for name in names}
return cls(ns)
@classmethod
def from_mapping(cls, ns, names=None):
if names:
ns = {name:ns[name] for name in names}
return cls(ns)
@classmethod
def from_sequence(cls, seq, names=None):
if names:
seq = {name:val for name, val in seq if name in names}
return cls(seq)
#------------------------
# static methods
@staticmethod
def hasattr(ns, name):
try:
object.__getattribute__(ns, name)
except AttributeError:
return False
return True
@staticmethod
def getattr(ns, name):
return object.__getattribute__(ns, name)
@staticmethod
def setattr(ns, name, value):
return object.__setattr__(ns, name, value)
@staticmethod
def delattr(ns, name):
return object.__delattr__(ns, name)
def checkfolder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def show_message(msg_str, lvl=0):
if lvl == 0:
print(datetime.datetime.now(), '-', msg_str)
elif lvl == 1:
print('______________________________________________________________')
print(datetime.datetime.now(), '-', msg_str)
print('--------------------------------------------------------------')
else:
pass
def save_dict(dict, path, filename):
fullpath = os.path.join(path, filename)
with open(fullpath, 'w+') as fp:
json.dump(dict, fp)
def save_model_configuration(args, path):
args_dict = vars(args)
filename = 'configuration.json'
save_dict(args_dict,path,filename)
def load_model_configuration(path):
fullpath = os.path.join(path, 'configuration.json')
with open(fullpath, 'r') as fp:
data = json.load(fp)
return Namespace(data)
def save_image_local(image, path, infostr):
datestr = datetime.datetime.now().strftime('%Y%m%d_T%H%M%S')
filename = datestr + '_' + infostr + '.png'
path = os.path.join(path,filename)
image = np.squeeze(image)
scipy.misc.toimage(image, cmin=-1, cmax=1).save(path)
def save_image_local_batch(images,path,infostr):
n_images = images.shape[0]
for i in range(n_images):
save_image_local(images[i,:,:,:],path, infostr + '_' +str(i))
| [
"scipy.misc.toimage",
"json.dump",
"json.load",
"os.makedirs",
"tensorflow.trainable_variables",
"os.path.exists",
"datetime.datetime.now",
"tensorflow.contrib.slim.model_analyzer.analyze_vars",
"numpy.squeeze",
"os.path.join"
] | [((2218, 2242), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2240, 2242), True, 'import tensorflow as tf\n'), ((2247, 2308), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['model_vars'], {'print_info': '(True)'}), '(model_vars, print_info=True)\n', (2279, 2308), True, 'import tensorflow.contrib.slim as slim\n'), ((2723, 2751), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2735, 2751), False, 'import os\n'), ((3014, 3054), 'os.path.join', 'os.path.join', (['path', '"""configuration.json"""'], {}), "(path, 'configuration.json')\n", (3026, 3054), False, 'import os\n'), ((3321, 3349), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (3333, 3349), False, 'import os\n'), ((3361, 3378), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (3371, 3378), True, 'import numpy as np\n'), ((2101, 2124), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (2115, 2124), False, 'import os\n'), ((2134, 2154), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2145, 2154), False, 'import os\n'), ((2797, 2816), 'json.dump', 'json.dump', (['dict', 'fp'], {}), '(dict, fp)\n', (2806, 2816), False, 'import json\n'), ((3106, 3119), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3115, 3119), False, 'import json\n'), ((2376, 2399), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2397, 2399), False, 'import datetime\n'), ((3211, 3234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3232, 3234), False, 'import datetime\n'), ((3383, 3425), 'scipy.misc.toimage', 'scipy.misc.toimage', (['image'], {'cmin': '(-1)', 'cmax': '(1)'}), '(image, cmin=-1, cmax=1)\n', (3401, 3425), False, 'import scipy\n'), ((2528, 2551), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2549, 2551), False, 'import datetime\n')] |
import os
import os.path
import tempfile
import shutil
import numpy as np
import yt
from yt.testing import \
assert_equal
from yt.utilities.lib.api import add_rgba_points_to_image
def setup():
"""Test specific setup."""
from yt.config import ytcfg
ytcfg["yt", "__withintesting"] = "True"
def test_splat():
# Perform I/O in safe place instead of yt main dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng = np.random.RandomState(0x4d3d3d3)
N = 16
Np = int(1e2)
image = np.zeros([N,N,4])
xs = prng.random_sample(Np)
ys = prng.random_sample(Np)
cbx = yt.visualization.color_maps.mcm.RdBu
cs = cbx(prng.random_sample(Np))
add_rgba_points_to_image(image, xs, ys, cs)
before_hash = image.copy()
fn = 'tmp.png'
yt.write_bitmap(image, fn)
assert_equal(os.path.exists(fn), True)
os.remove(fn)
assert_equal(before_hash, image)
os.chdir(curdir)
# clean up
shutil.rmtree(tmpdir)
| [
"os.remove",
"yt.utilities.lib.api.add_rgba_points_to_image",
"os.getcwd",
"numpy.zeros",
"yt.testing.assert_equal",
"numpy.random.RandomState",
"os.path.exists",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.chdir",
"yt.write_bitmap"
] | [((394, 412), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (410, 412), False, 'import tempfile\n'), ((426, 437), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (435, 437), False, 'import os\n'), ((442, 458), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (450, 458), False, 'import os\n'), ((471, 502), 'numpy.random.RandomState', 'np.random.RandomState', (['(80991187)'], {}), '(80991187)\n', (492, 502), True, 'import numpy as np\n'), ((546, 565), 'numpy.zeros', 'np.zeros', (['[N, N, 4]'], {}), '([N, N, 4])\n', (554, 565), True, 'import numpy as np\n'), ((717, 760), 'yt.utilities.lib.api.add_rgba_points_to_image', 'add_rgba_points_to_image', (['image', 'xs', 'ys', 'cs'], {}), '(image, xs, ys, cs)\n', (741, 760), False, 'from yt.utilities.lib.api import add_rgba_points_to_image\n'), ((816, 842), 'yt.write_bitmap', 'yt.write_bitmap', (['image', 'fn'], {}), '(image, fn)\n', (831, 842), False, 'import yt\n'), ((890, 903), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (899, 903), False, 'import os\n'), ((908, 940), 'yt.testing.assert_equal', 'assert_equal', (['before_hash', 'image'], {}), '(before_hash, image)\n', (920, 940), False, 'from yt.testing import assert_equal\n'), ((946, 962), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (954, 962), False, 'import os\n'), ((982, 1003), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (995, 1003), False, 'import shutil\n'), ((860, 878), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (874, 878), False, 'import os\n')] |
from typing import List, Union
from pathlib import Path
import warnings
import numpy as np
from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface
from .base import BaseExtractor, BaseSegment
from .core_tools import write_binary_recording, write_memory_recording
from warnings import warn
class BaseRecording(BaseExtractor):
"""
Abstract class representing several a multichannel timeseries (or block of raw ephys traces).
Internally handle list of RecordingSegment
"""
_main_annotations = ['is_filtered']
_main_properties = ['group', 'location', 'gain_to_uV', 'offset_to_uV']
_main_features = [] # recording do not handle features
def __init__(self, sampling_frequency: float, channel_ids: List, dtype):
BaseExtractor.__init__(self, channel_ids)
self.is_dumpable = True
self._sampling_frequency = sampling_frequency
self._dtype = np.dtype(dtype)
self._recording_segments: List[BaseRecordingSegment] = []
# initialize main annotation and properties
self.annotate(is_filtered=False)
def __repr__(self):
clsname = self.__class__.__name__
nseg = self.get_num_segments()
nchan = self.get_num_channels()
sf_khz = self.get_sampling_frequency() / 1000.
duration = self.get_total_duration()
txt = f'{clsname}: {nchan} channels - {nseg} segments - {sf_khz:0.1f}kHz - {duration:0.3f}s'
if 'file_paths' in self._kwargs:
txt += '\n file_paths: {}'.format(self._kwargs['file_paths'])
if 'file_path' in self._kwargs:
txt += '\n file_path: {}'.format(self._kwargs['file_path'])
return txt
def get_num_segments(self):
return len(self._recording_segments)
def add_recording_segment(self, recording_segment):
# todo: check channel count and sampling frequency
self._recording_segments.append(recording_segment)
recording_segment.set_parent_extractor(self)
def get_sampling_frequency(self):
return self._sampling_frequency
@property
def channel_ids(self):
return self._main_ids
def get_channel_ids(self):
return self._main_ids
def get_num_channels(self):
return len(self.get_channel_ids())
def get_dtype(self):
return self._dtype
def get_num_samples(self, segment_index=None):
segment_index = self._check_segment_index(segment_index)
return self._recording_segments[segment_index].get_num_samples()
get_num_frames = get_num_samples
def get_total_samples(self):
s = 0
for segment_index in range(self.get_num_segments()):
s += self.get_num_samples(segment_index)
return s
def get_total_duration(self):
duration = self.get_total_samples() / self.get_sampling_frequency()
return duration
def get_traces(self,
segment_index: Union[int, None] = None,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_ids: Union[List, None] = None,
order: Union[str, None] = None,
return_scaled=False,
):
segment_index = self._check_segment_index(segment_index)
channel_indices = self.ids_to_indices(channel_ids, prefer_slice=True)
rs = self._recording_segments[segment_index]
traces = rs.get_traces(start_frame=start_frame, end_frame=end_frame, channel_indices=channel_indices)
if order is not None:
assert order in ["C", "F"]
traces = np.asanyarray(traces, order=order)
if return_scaled:
if not self.has_scaled_traces():
raise ValueError('This recording do not support return_scaled=True (need gain_to_uV and offset_'
'to_uV properties)')
else:
gains = self.get_property('gain_to_uV')
offsets = self.get_property('offset_to_uV')
gains = gains[channel_indices].astype('float32')
offsets = offsets[channel_indices].astype('float32')
traces = traces.astype('float32') * gains + offsets
return traces
def has_scaled_traces(self):
if self.get_property('gain_to_uV') is None or self.get_property('offset_to_uV') is None:
return False
else:
return True
def is_filtered(self):
# the is_filtered is handle with annotation
return self._annotations.get('is_filtered', False)
def get_times(self, segment_index=None):
"""
Get time vector for a recording segment.
If the segment has a time_vector, then it is returned. Otherwise
a time_vector is constructed on the fly with sampling frequency.
If t_start is defined and the time vector is constructed on the fly,
the first time will be t_start. Otherwise it will start from 0.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
times = rs.get_times()
return times
def has_time_vector(self, segment_index=None):
"""
Check if the segment of the recording has a time vector.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
d = rs.get_times_kwargs()
return d['time_vector'] is not None
def set_times(self, times, segment_index=None, with_warning=True):
"""
Set times for a recording segment.
"""
segment_index = self._check_segment_index(segment_index)
rs = self._recording_segments[segment_index]
assert times.ndim == 1, 'Time must have ndim=1'
assert rs.get_num_samples() == times.shape[0], 'times have wrong shape'
rs.t_start = None
rs.time_vector = times.astype('float64')
if with_warning:
warnings.warn('Setting times with Recording.set_times() is not recommended because '
'times are not always propagated to across preprocessing'
'Use use this carefully!')
_job_keys = ['n_jobs', 'total_memory', 'chunk_size', 'chunk_memory', 'progress_bar', 'verbose']
def _save(self, format='binary', **save_kwargs):
"""
This function replaces the old CacheRecordingExtractor, but enables more engines
for caching a results. At the moment only 'binary' with memmap is supported.
We plan to add other engines, such as zarr and NWB.
"""
# handle t_starts
t_starts = []
has_time_vectors = []
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
t_starts.append(d['t_start'])
has_time_vectors.append(d['time_vector'] is not None)
if all(t_start is None for t_start in t_starts):
t_starts = None
if format == 'binary':
# TODO save properties as npz!!!!!
folder = save_kwargs['folder']
file_paths = [folder / f'traces_cached_seg{i}.raw' for i in range(self.get_num_segments())]
dtype = save_kwargs.get('dtype', None)
if dtype is None:
dtype = self.get_dtype()
job_kwargs = {k: save_kwargs[k] for k in self._job_keys if k in save_kwargs}
write_binary_recording(self, file_paths=file_paths, dtype=dtype, **job_kwargs)
from .binaryrecordingextractor import BinaryRecordingExtractor
cached = BinaryRecordingExtractor(file_paths=file_paths, sampling_frequency=self.get_sampling_frequency(),
num_chan=self.get_num_channels(), dtype=dtype,
t_starts=t_starts, channel_ids=self.get_channel_ids(), time_axis=0,
file_offset=0, gain_to_uV=self.get_channel_gains(),
offset_to_uV=self.get_channel_offsets())
elif format == 'memory':
job_kwargs = {k: save_kwargs[k] for k in self._job_keys if k in save_kwargs}
traces_list = write_memory_recording(self, dtype=None, **job_kwargs)
from .numpyextractors import NumpyRecording
cached = NumpyRecording(traces_list, self.get_sampling_frequency(), t_starts=t_starts,
channel_ids=self.channel_ids)
elif format == 'zarr':
# TODO implement a format based on zarr
raise NotImplementedError
elif format == 'nwb':
# TODO implement a format based on zarr
raise NotImplementedError
else:
raise ValueError(f'format {format} not supported')
if self.get_property('contact_vector') is not None:
probegroup = self.get_probegroup()
cached.set_probegroup(probegroup)
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
time_vector = d['time_vector']
if time_vector is not None:
cached._recording_segments[segment_index].time_vector = time_vector
return cached
def _extra_metadata_from_folder(self, folder):
# load probe
folder = Path(folder)
if (folder / 'probe.json').is_file():
probegroup = read_probeinterface(folder / 'probe.json')
self.set_probegroup(probegroup, in_place=True)
# load time vector if any
for segment_index, rs in enumerate(self._recording_segments):
time_file = folder / f'times_cached_seg{segment_index}.npy'
if time_file.is_file():
time_vector = np.load(time_file)
rs.time_vector = time_vector
def _extra_metadata_to_folder(self, folder):
# save probe
if self.get_property('contact_vector') is not None:
probegroup = self.get_probegroup()
write_probeinterface(folder / 'probe.json', probegroup)
# save time vector if any
for segment_index, rs in enumerate(self._recording_segments):
d = rs.get_times_kwargs()
time_vector = d['time_vector']
if time_vector is not None:
np.save(folder / f'times_cached_seg{segment_index}.npy', time_vector)
def set_probe(self, probe, group_mode='by_probe', in_place=False):
"""
Wrapper on top on set_probes when there one unique probe.
"""
assert isinstance(probe, Probe), 'must give Probe'
probegroup = ProbeGroup()
probegroup.add_probe(probe)
return self.set_probes(probegroup, group_mode=group_mode, in_place=in_place)
def set_probegroup(self, probegroup, group_mode='by_probe', in_place=False):
return self.set_probes(probegroup, group_mode=group_mode, in_place=in_place)
def set_probes(self, probe_or_probegroup, group_mode='by_probe', in_place=False):
"""
Attach a Probe to a recording.
For this Probe.device_channel_indices is used to link contacts to recording channels.
If some contacts of the Probe are not connected (device_channel_indices=-1)
then the recording is "sliced" and only connected channel are kept.
The probe order is not kept. Channel ids are re-ordered to match the channel_ids of the recording.
Parameters
----------
probe_or_probegroup: Probe, list of Probe, or ProbeGroup
The probe(s) to be attached to the recording
group_mode: str
'by_probe' or 'by_shank'. Adds grouping property to the recording based on the probes ('by_probe')
or shanks ('by_shanks')
in_place: bool
False by default.
Useful internally when extractor do self.set_probegroup(probe)
Returns
-------
sub_recording: BaseRecording
A view of the recording (ChannelSliceRecording or clone or itself)
"""
from spikeinterface import ChannelSliceRecording
assert group_mode in ('by_probe', 'by_shank'), "'group_mode' can be 'by_probe' or 'by_shank'"
# handle several input possibilities
if isinstance(probe_or_probegroup, Probe):
probegroup = ProbeGroup()
probegroup.add_probe(probe_or_probegroup)
elif isinstance(probe_or_probegroup, ProbeGroup):
probegroup = probe_or_probegroup
elif isinstance(probe_or_probegroup, list):
assert all([isinstance(e, Probe) for e in probe_or_probegroup])
probegroup = ProbeGroup()
for probe in probe_or_probegroup:
probegroup.add_probe(probe)
else:
raise ValueError('must give Probe or ProbeGroup or list of Probe')
# handle not connected channels
assert all(probe.device_channel_indices is not None for probe in probegroup.probes), \
'Probe must have device_channel_indices'
# this is a vector with complex fileds (dataframe like) that handle all contact attr
arr = probegroup.to_numpy(complete=True)
# keep only connected contact ( != -1)
keep = arr['device_channel_indices'] >= 0
if np.any(~keep):
warn('The given probes have unconnected contacts: they are removed')
arr = arr[keep]
inds = arr['device_channel_indices']
order = np.argsort(inds)
inds = inds[order]
# check
if np.max(inds) >= self.get_num_channels():
raise ValueError('The given Probe have "device_channel_indices" that do not match channel count')
new_channel_ids = self.get_channel_ids()[inds]
arr = arr[order]
arr['device_channel_indices'] = np.arange(arr.size, dtype='int64')
# create recording : channel slice or clone or self
if in_place:
if not np.array_equal(new_channel_ids, self.get_channel_ids()):
raise Exception('set_proce(inplace=True) must have all channel indices')
sub_recording = self
else:
if np.array_equal(new_channel_ids, self.get_channel_ids()):
sub_recording = self.clone()
else:
sub_recording = ChannelSliceRecording(self, new_channel_ids)
# create a vector that handle all contacts in property
sub_recording.set_property('contact_vector', arr, ids=None)
# planar_contour is saved in annotations
for probe_index, probe in enumerate(probegroup.probes):
contour = probe.probe_planar_contour
if contour is not None:
sub_recording.set_annotation(f'probe_{probe_index}_planar_contour', contour, overwrite=True)
# duplicate positions to "locations" property
ndim = probegroup.ndim
locations = np.zeros((arr.size, ndim), dtype='float64')
for i, dim in enumerate(['x', 'y', 'z'][:ndim]):
locations[:, i] = arr[dim]
sub_recording.set_property('location', locations, ids=None)
# handle groups
groups = np.zeros(arr.size, dtype='int64')
if group_mode == 'by_probe':
for group, probe_index in enumerate(np.unique(arr['probe_index'])):
mask = arr['probe_index'] == probe_index
groups[mask] = group
elif group_mode == 'by_shank':
assert all(probe.shank_ids is not None for probe in probegroup.probes), \
'shank_ids is None in probe, you cannot group by shank'
for group, a in enumerate(np.unique(arr[['probe_index', 'shank_ids']])):
mask = (arr['probe_index'] == a['probe_index']) & (arr['shank_ids'] == a['shank_ids'])
groups[mask] = group
sub_recording.set_property('group', groups, ids=None)
return sub_recording
def get_probe(self):
probes = self.get_probes()
assert len(probes) == 1, 'there are several probe use .get_probes() or get_probegroup()'
return probes[0]
def get_probes(self):
probegroup = self.get_probegroup()
return probegroup.probes
def get_probegroup(self):
arr = self.get_property('contact_vector')
if arr is None:
positions = self.get_property('location')
if positions is None:
raise ValueError('There is not Probe attached to recording. use set_probe(...)')
else:
warn('There is no Probe attached to this recording. Creating a dummy one with contact positions')
ndim = positions.shape[1]
probe = Probe(ndim=ndim)
probe.set_contacts(positions=positions, shapes='circle', shape_params={'radius': 5})
probe.set_device_channel_indices(np.arange(self.get_num_channels(), dtype='int64'))
# probe.create_auto_shape()
probegroup = ProbeGroup()
probegroup.add_probe(probe)
else:
probegroup = ProbeGroup.from_numpy(arr)
for probe_index, probe in enumerate(probegroup.probes):
contour = self.get_annotation(f'probe_{probe_index}_planar_contour')
if contour is not None:
probe.set_planar_contour(contour)
return probegroup
def set_dummy_probe_from_locations(self, locations, shape="circle", shape_params={"radius": 1}):
probe = Probe()
probe.set_contacts(locations, shapes=shape, shape_params=shape_params)
probe.set_device_channel_indices(np.arange(self.get_num_channels()))
self.set_probe(probe, in_place=True)
def set_channel_locations(self, locations, channel_ids=None):
if self.get_property('contact_vector') is not None:
raise ValueError('set_channel_locations(..) destroy the probe description, prefer set_probes(..)')
self.set_property('location', locations, ids=channel_ids)
def get_channel_locations(self, channel_ids=None, locations_2d=True):
if channel_ids is None:
channel_ids = self.get_channel_ids()
channel_indices = self.ids_to_indices(channel_ids)
if self.get_property('contact_vector') is not None:
probe = self.get_probe()
return probe.contact_positions[channel_indices]
else:
location = self.get_property('location')
if location is None:
raise Exception('there is no channel location')
location = np.asarray(location)[channel_indices]
return location
def clear_channel_locations(self, channel_ids=None):
if channel_ids is None:
n = self.get_num_channel()
else:
n = len(channel_ids)
locations = np.zeros((n, 2)) * np.nan
self.set_property('location', locations, ids=channel_ids)
def set_channel_groups(self, groups, channel_ids=None):
if 'probes' in self._annotations:
warn('set_channel_groups(..) destroys the probe description. Using set_probe(...) is preferable')
self._annotations.pop('probes')
self.set_property('group', groups, ids=channel_ids)
def get_channel_groups(self, channel_ids=None):
groups = self.get_property('group', ids=channel_ids)
return groups
def clear_channel_groups(self, channel_ids=None):
if channel_ids is None:
n = self.get_num_channels()
else:
n = len(channel_ids)
groups = np.zeros(n, dtype='int64')
self.set_property('group', groups, ids=channel_ids)
def set_channel_gains(self, gains, channel_ids=None):
if np.isscalar(gains):
gains = [gains] * self.get_num_channels()
self.set_property('gain_to_uV', gains, ids=channel_ids)
def get_channel_gains(self, channel_ids=None):
return self.get_property('gain_to_uV', ids=channel_ids)
def set_channel_offsets(self, offsets, channel_ids=None):
if np.isscalar(offsets):
offsets = [offsets] * self.get_num_channels()
self.set_property('offset_to_uV', offsets, ids=channel_ids)
def get_channel_offsets(self, channel_ids=None):
return self.get_property('offset_to_uV', ids=channel_ids)
def get_channel_property(self, channel_id, key):
values = self.get_property(key)
v = values[self.id_to_index(channel_id)]
return v
def channel_slice(self, channel_ids, renamed_channel_ids=None):
from spikeinterface import ChannelSliceRecording
sub_recording = ChannelSliceRecording(self, channel_ids, renamed_channel_ids=renamed_channel_ids)
return sub_recording
def frame_slice(self, start_frame, end_frame):
from spikeinterface import FrameSliceRecording
sub_recording = FrameSliceRecording(self, start_frame=start_frame, end_frame=end_frame)
return sub_recording
def split_by(self, property='group', outputs='dict'):
assert outputs in ('list', 'dict')
from .channelslicerecording import ChannelSliceRecording
values = self.get_property(property)
if values is None:
raise ValueError(f'property {property} is not set')
if outputs == 'list':
recordings = []
elif outputs == 'dict':
recordings = {}
for value in np.unique(values):
inds, = np.nonzero(values == value)
new_channel_ids = self.get_channel_ids()[inds]
subrec = ChannelSliceRecording(self, new_channel_ids)
if outputs == 'list':
recordings.append(subrec)
elif outputs == 'dict':
recordings[value] = subrec
return recordings
class BaseRecordingSegment(BaseSegment):
"""
Abstract class representing a multichannel timeseries, or block of raw ephys traces
"""
def __init__(self, sampling_frequency=None, t_start=None, time_vector=None):
# sampling_frequency and time_vector are exclusive
if sampling_frequency is None:
assert time_vector is not None, "Pass either 'sampling_frequency' or 'time_vector'"
assert time_vector.ndim == 1, "time_vector should be a 1D array"
if time_vector is None:
assert sampling_frequency is not None, "Pass either 'sampling_frequency' or 'time_vector'"
self.sampling_frequency = sampling_frequency
self.t_start = t_start
self.time_vector = time_vector
BaseSegment.__init__(self)
def get_times(self):
if self.time_vector is not None:
return self.time_vector
else:
time_vector = np.arange(self.get_num_samples(), dtype='float64')
time_vector /= self.sampling_frequency
if self.t_start is not None:
time_vector += self.t_start
return time_vector
def get_times_kwargs(self):
# useful for other internal RecordingSegment
d = dict(sampling_frequency=self.sampling_frequency, t_start=self.t_start,
time_vector=self.time_vector)
return d
def sample_index_to_time(self, sample_ind):
"""
Transform sample index into time in seconds
"""
if self.time_vector is None:
time_s = sample_ind / self.sampling_frequency
if self.t_start is not None:
time_s += self.t_start
else:
time_s = self.time_vector[sample_ind]
return time_s
def time_to_sample_index(self, time_s):
"""
Transform time in seconds into sample index
"""
if self.time_vector is None:
if self.t_start is None:
sample_index = time_s * self.sampling_frequency
else:
sample_index = (time_s - self.t_start) * self.sampling_frequency
else:
sample_index = np.searchsorted(self.time_vector, time_s, side='right') - 1
return int(sample_index)
def get_num_samples(self) -> int:
"""Returns the number of samples in this signal segment
Returns:
SampleIndex: Number of samples in the signal segment
"""
# must be implemented in subclass
raise NotImplementedError
def get_traces(self,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_indices: Union[List, None] = None,
) -> np.ndarray:
"""
Return the raw traces, optionally for a subset of samples and/or channels
Parameters
----------
start_frame: (Union[int, None], optional)
start sample index, or zero if None. Defaults to None.
end_frame: (Union[int, None], optional)
end_sample, or number of samples if None. Defaults to None.
channel_indices: (Union[List, None], optional)
Indices of channels to return, or all channels if None. Defaults to None.
order: (Order, optional)
The memory order of the returned array.
Use Order.C for C order, Order.F for Fortran order, or Order.K to keep the order of the underlying data.
Defaults to Order.K.
Returns
-------
traces: np.ndarray
Array of traces, num_samples x num_channels
"""
# must be implemented in subclass
raise NotImplementedError
| [
"numpy.load",
"spikeinterface.FrameSliceRecording",
"numpy.argsort",
"pathlib.Path",
"numpy.arange",
"numpy.unique",
"numpy.max",
"probeinterface.write_probeinterface",
"spikeinterface.ChannelSliceRecording",
"numpy.save",
"probeinterface.ProbeGroup.from_numpy",
"probeinterface.Probe",
"prob... | [((937, 952), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (945, 952), True, 'import numpy as np\n'), ((9459, 9471), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (9463, 9471), False, 'from pathlib import Path\n'), ((10752, 10764), 'probeinterface.ProbeGroup', 'ProbeGroup', ([], {}), '()\n', (10762, 10764), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((13418, 13431), 'numpy.any', 'np.any', (['(~keep)'], {}), '(~keep)\n', (13424, 13431), True, 'import numpy as np\n'), ((13600, 13616), 'numpy.argsort', 'np.argsort', (['inds'], {}), '(inds)\n', (13610, 13616), True, 'import numpy as np\n'), ((13942, 13976), 'numpy.arange', 'np.arange', (['arr.size'], {'dtype': '"""int64"""'}), "(arr.size, dtype='int64')\n", (13951, 13976), True, 'import numpy as np\n'), ((15029, 15072), 'numpy.zeros', 'np.zeros', (['(arr.size, ndim)'], {'dtype': '"""float64"""'}), "((arr.size, ndim), dtype='float64')\n", (15037, 15072), True, 'import numpy as np\n'), ((15279, 15312), 'numpy.zeros', 'np.zeros', (['arr.size'], {'dtype': '"""int64"""'}), "(arr.size, dtype='int64')\n", (15287, 15312), True, 'import numpy as np\n'), ((17618, 17625), 'probeinterface.Probe', 'Probe', ([], {}), '()\n', (17623, 17625), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((19688, 19714), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""int64"""'}), "(n, dtype='int64')\n", (19696, 19714), True, 'import numpy as np\n'), ((19845, 19863), 'numpy.isscalar', 'np.isscalar', (['gains'], {}), '(gains)\n', (19856, 19863), True, 'import numpy as np\n'), ((20173, 20193), 'numpy.isscalar', 'np.isscalar', (['offsets'], {}), '(offsets)\n', (20184, 20193), True, 'import numpy as np\n'), ((20751, 20837), 'spikeinterface.ChannelSliceRecording', 'ChannelSliceRecording', (['self', 'channel_ids'], {'renamed_channel_ids': 'renamed_channel_ids'}), '(self, channel_ids, renamed_channel_ids=\n renamed_channel_ids)\n', (20772, 20837), False, 'from spikeinterface import ChannelSliceRecording\n'), ((20993, 21064), 'spikeinterface.FrameSliceRecording', 'FrameSliceRecording', (['self'], {'start_frame': 'start_frame', 'end_frame': 'end_frame'}), '(self, start_frame=start_frame, end_frame=end_frame)\n', (21012, 21064), False, 'from spikeinterface import FrameSliceRecording\n'), ((21537, 21554), 'numpy.unique', 'np.unique', (['values'], {}), '(values)\n', (21546, 21554), True, 'import numpy as np\n'), ((3663, 3697), 'numpy.asanyarray', 'np.asanyarray', (['traces'], {'order': 'order'}), '(traces, order=order)\n', (3676, 3697), True, 'import numpy as np\n'), ((6057, 6230), 'warnings.warn', 'warnings.warn', (['"""Setting times with Recording.set_times() is not recommended because times are not always propagated to across preprocessingUse use this carefully!"""'], {}), "(\n 'Setting times with Recording.set_times() is not recommended because times are not always propagated to across preprocessingUse use this carefully!'\n )\n", (6070, 6230), False, 'import warnings\n'), ((9543, 9585), 'probeinterface.read_probeinterface', 'read_probeinterface', (["(folder / 'probe.json')"], {}), "(folder / 'probe.json')\n", (9562, 9585), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((10142, 10197), 'probeinterface.write_probeinterface', 'write_probeinterface', (["(folder / 'probe.json')", 'probegroup'], {}), "(folder / 'probe.json', probegroup)\n", (10162, 10197), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((12458, 12470), 'probeinterface.ProbeGroup', 'ProbeGroup', ([], {}), '()\n', (12468, 12470), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((13445, 13513), 'warnings.warn', 'warn', (['"""The given probes have unconnected contacts: they are removed"""'], {}), "('The given probes have unconnected contacts: they are removed')\n", (13449, 13513), False, 'from warnings import warn\n'), ((13671, 13683), 'numpy.max', 'np.max', (['inds'], {}), '(inds)\n', (13677, 13683), True, 'import numpy as np\n'), ((17200, 17226), 'probeinterface.ProbeGroup.from_numpy', 'ProbeGroup.from_numpy', (['arr'], {}), '(arr)\n', (17221, 17226), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((18952, 18968), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (18960, 18968), True, 'import numpy as np\n'), ((19159, 19266), 'warnings.warn', 'warn', (['"""set_channel_groups(..) destroys the probe description. Using set_probe(...) is preferable"""'], {}), "(\n 'set_channel_groups(..) destroys the probe description. Using set_probe(...) is preferable'\n )\n", (19163, 19266), False, 'from warnings import warn\n'), ((21576, 21603), 'numpy.nonzero', 'np.nonzero', (['(values == value)'], {}), '(values == value)\n', (21586, 21603), True, 'import numpy as np\n'), ((21684, 21728), 'spikeinterface.ChannelSliceRecording', 'ChannelSliceRecording', (['self', 'new_channel_ids'], {}), '(self, new_channel_ids)\n', (21705, 21728), False, 'from spikeinterface import ChannelSliceRecording\n'), ((9888, 9906), 'numpy.load', 'np.load', (['time_file'], {}), '(time_file)\n', (9895, 9906), True, 'import numpy as np\n'), ((10440, 10509), 'numpy.save', 'np.save', (["(folder / f'times_cached_seg{segment_index}.npy')", 'time_vector'], {}), "(folder / f'times_cached_seg{segment_index}.npy', time_vector)\n", (10447, 10509), True, 'import numpy as np\n'), ((14438, 14482), 'spikeinterface.ChannelSliceRecording', 'ChannelSliceRecording', (['self', 'new_channel_ids'], {}), '(self, new_channel_ids)\n', (14459, 14482), False, 'from spikeinterface import ChannelSliceRecording\n'), ((15398, 15427), 'numpy.unique', 'np.unique', (["arr['probe_index']"], {}), "(arr['probe_index'])\n", (15407, 15427), True, 'import numpy as np\n'), ((16648, 16755), 'warnings.warn', 'warn', (['"""There is no Probe attached to this recording. Creating a dummy one with contact positions"""'], {}), "(\n 'There is no Probe attached to this recording. Creating a dummy one with contact positions'\n )\n", (16652, 16755), False, 'from warnings import warn\n'), ((16812, 16828), 'probeinterface.Probe', 'Probe', ([], {'ndim': 'ndim'}), '(ndim=ndim)\n', (16817, 16828), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((17104, 17116), 'probeinterface.ProbeGroup', 'ProbeGroup', ([], {}), '()\n', (17114, 17116), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((18690, 18710), 'numpy.asarray', 'np.asarray', (['location'], {}), '(location)\n', (18700, 18710), True, 'import numpy as np\n'), ((24092, 24147), 'numpy.searchsorted', 'np.searchsorted', (['self.time_vector', 'time_s'], {'side': '"""right"""'}), "(self.time_vector, time_s, side='right')\n", (24107, 24147), True, 'import numpy as np\n'), ((12781, 12793), 'probeinterface.ProbeGroup', 'ProbeGroup', ([], {}), '()\n', (12791, 12793), False, 'from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface\n'), ((15759, 15803), 'numpy.unique', 'np.unique', (["arr[['probe_index', 'shank_ids']]"], {}), "(arr[['probe_index', 'shank_ids']])\n", (15768, 15803), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.offline import plot
from cea.plots.variable_naming import LOGO, COLOR
def thermal_storage_activation_curve(data_frame, analysis_fields_charging, analysis_fields_discharging,
analysis_fields_status, title, output_path):
# CALCULATE GRAPH
traces_graph = calc_graph(analysis_fields_charging, analysis_fields_discharging,
analysis_fields_status, data_frame)
# CALCULATE TABLE
traces_table = calc_table(analysis_fields_charging, analysis_fields_discharging,
analysis_fields_status, data_frame)
# PLOT GRAPH
traces_graph.append(traces_table)
# CREATE FIRST PAGE WITH TIMESERIES
layout = dict(images=LOGO, title=title, barmode='relative',
yaxis=dict(title='Power charged/discharged [kW]', domain=[0.45, 1.0]),
xaxis=dict(rangeselector=dict(buttons=list([
dict(count=1, label='1d', step='day', stepmode='backward'),
dict(count=1, label='1w', step='week', stepmode='backward'),
dict(count=1, label='1m', step='month', stepmode='backward'),
dict(count=6, label='6m', step='month', stepmode='backward'),
dict(count=1, label='1y', step='year', stepmode='backward'),
dict(step='all')])), rangeslider=dict(), type='date'))
fig = go.Figure(data=traces_graph, layout=layout)
plot(fig, auto_open=False, filename=output_path)
return {'data': traces_graph, 'layout': layout}
def calc_graph(analysis_fields_charging, analysis_fields_discharging, analysis_fields_status, data_frame):
# main data about technologies
data = (data_frame / 1000).round(2) # to kW
graph = []
for field in analysis_fields_charging:
y = data[field].values
trace = go.Bar(x=data.index, y=y, name=field, marker=dict(color=COLOR[field]))
graph.append(trace)
for field in analysis_fields_discharging:
y = -data[field].values # negative
trace = go.Bar(x=data.index, y=y, name=field, marker=dict(color=COLOR[field]))
graph.append(trace)
# data about the status of the storage
for field in analysis_fields_status:
y = data[field]
trace = go.Scatter(x=data.index, y=y, name=field,
line=dict(color=COLOR[field], width=1))
graph.append(trace)
return graph
def calc_table(analysis_fields_charging, analysis_fields_discharging, analysis_fields_status, data_frame):
"""
draws table of monthly energy balance
:param data_frame_month: data frame of monthly building energy balance
:return:
"""
# translate results into monthly
data_frame.index = pd.to_datetime(data_frame.index)
data_frame_month = (data_frame.resample("M").sum() / 1000000).round(2) # to MW
data_frame_month["month"] = data_frame_month.index.strftime("%B")
# create table arrays
name_month = np.append(data_frame_month['month'].values, ['YEAR'])
status = np.append(data_frame_month[analysis_fields_status].sum(axis=1), ['-'])
total_heat = np.append(data_frame_month[analysis_fields_charging].sum(axis=1),
data_frame_month[analysis_fields_charging].sum(axis=1).sum(axis=0))
total_cool = np.append(data_frame_month[analysis_fields_discharging].sum(axis=1),
data_frame_month[analysis_fields_discharging].sum(axis=1).sum(axis=0))
balance = np.append((total_heat - total_cool), (total_heat - total_cool).sum().round(2))
# draw table
table = go.Table(domain=dict(x=[0, 1], y=[0.0, 0.2]),
header=dict(values=['Month', 'Total in [MWh]', 'Total out [MWh]', 'Balance [MWh]',
'Status Storage [MWh]']),
cells=dict(values=[name_month, total_heat, total_cool, balance, status]))
return table
| [
"plotly.graph_objs.Figure",
"numpy.append",
"pandas.to_datetime",
"plotly.offline.plot"
] | [((1538, 1581), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'traces_graph', 'layout': 'layout'}), '(data=traces_graph, layout=layout)\n', (1547, 1581), True, 'import plotly.graph_objs as go\n'), ((1586, 1634), 'plotly.offline.plot', 'plot', (['fig'], {'auto_open': '(False)', 'filename': 'output_path'}), '(fig, auto_open=False, filename=output_path)\n', (1590, 1634), False, 'from plotly.offline import plot\n'), ((2885, 2917), 'pandas.to_datetime', 'pd.to_datetime', (['data_frame.index'], {}), '(data_frame.index)\n', (2899, 2917), True, 'import pandas as pd\n'), ((3116, 3169), 'numpy.append', 'np.append', (["data_frame_month['month'].values", "['YEAR']"], {}), "(data_frame_month['month'].values, ['YEAR'])\n", (3125, 3169), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import itertools
import logging
import os
import random
import time
import h5py
import yaml
import distutils.util
from functools import partial
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import paddle
import paddle.distributed as dist
from paddle.io import DataLoader, Dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.utils.tools import TimeCostAverage
from paddlenlp.transformers import BertForPretraining, BertModel, BertPretrainingCriterion
from paddlenlp.transformers import ErnieForPretraining, ErnieModel, ErniePretrainingCriterion
from paddlenlp.transformers import BertTokenizer, ErnieTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert":
(BertModel, BertForPretraining, BertPretrainingCriterion, BertTokenizer),
"ernie":
(ErnieModel, ErnieForPretraining, ErniePretrainingCriterion, ErnieTokenizer)
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--input_dir",
default=None,
type=str,
required=True,
help="The input directory where the data will be read from.", )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_predictions_per_seq",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--device",
type=str,
default="gpu",
choices=["cpu", "gpu", "xpu"],
help="Device for selecting for the training.")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=2**15,
help="The value of scale_loss for fp16.")
parser.add_argument(
"--to_static",
type=distutils.util.strtobool,
default=False,
help="Enable training under @to_static.")
args = parser.parse_args()
return args
def set_seed(args):
random.seed(args.seed + paddle.distributed.get_rank())
np.random.seed(args.seed + paddle.distributed.get_rank())
paddle.seed(args.seed + paddle.distributed.get_rank())
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args,
worker_init):
train_data = PretrainingDataset(
input_file=input_file, max_pred_length=max_pred_length)
# files have been sharded, no need to dispatch again
train_batch_sampler = paddle.io.BatchSampler(
train_data, batch_size=args.batch_size, shuffle=True)
# DataLoader cannot be pickled because of its place.
# If it can be pickled, use global function instead of lambda and use
# ProcessPoolExecutor instead of ThreadPoolExecutor to prefetch.
def _collate_data(data, stack_fn=Stack()):
num_fields = len(data[0])
out = [None] * num_fields
# input_ids, segment_ids, input_mask, masked_lm_positions,
# masked_lm_labels, next_sentence_labels, mask_token_num
for i in (0, 1, 2, 5):
out[i] = stack_fn([x[i] for x in data])
batch_size, seq_length = out[0].shape
size = num_mask = sum(len(x[3]) for x in data)
# Padding for divisibility by 8 for fp16 or int8 usage
if size % 8 != 0:
size += 8 - (size % 8)
# masked_lm_positions
# Organize as a 1D tensor for gather or use gather_nd
out[3] = np.full(size, 0, dtype=np.int32)
# masked_lm_labels
out[4] = np.full([size, 1], -1, dtype=np.int64)
mask_token_num = 0
for i, x in enumerate(data):
for j, pos in enumerate(x[3]):
out[3][mask_token_num] = i * seq_length + pos
out[4][mask_token_num] = x[4][j]
mask_token_num += 1
# mask_token_num
out.append(np.asarray([mask_token_num], dtype=np.float32))
return out
train_data_loader = DataLoader(
dataset=train_data,
batch_sampler=train_batch_sampler,
collate_fn=_collate_data,
num_workers=0,
worker_init_fn=worker_init,
return_list=True)
return train_data_loader, input_file
def create_input_specs():
input_ids = paddle.static.InputSpec(
name="input_ids", shape=[-1, -1], dtype="int64")
segment_ids = paddle.static.InputSpec(
name="segment_ids", shape=[-1, -1], dtype="int64")
position_ids = None
input_mask = paddle.static.InputSpec(
name="input_mask", shape=[-1, 1, 1, -1], dtype="float32")
masked_lm_positions = paddle.static.InputSpec(
name="masked_lm_positions", shape=[-1], dtype="int32")
return [
input_ids, segment_ids, position_ids, input_mask, masked_lm_positions
]
class PretrainingDataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = [
'input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions',
'masked_lm_ids', 'next_sentence_labels'
]
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[
input_ids, input_mask, segment_ids, masked_lm_positions,
masked_lm_ids, next_sentence_labels
] = [
input[index].astype(np.int64)
if indice < 5 else np.asarray(input[index].astype(np.int64))
for indice, input in enumerate(self.inputs)
]
# TODO: whether to use reversed mask by changing 1s and 0s to be
# consistent with nv bert
input_mask = (1 - np.reshape(
input_mask.astype(np.float32), [1, 1, input_mask.shape[0]])) * -1e9
index = self.max_pred_length
# store number of masked tokens in index
# outputs of torch.nonzero diff with that of numpy.nonzero by zip
padded_mask_indices = (masked_lm_positions == 0).nonzero()[0]
if len(padded_mask_indices) != 0:
index = padded_mask_indices[0].item()
mask_token_num = index
else:
index = self.max_pred_length
mask_token_num = self.max_pred_length
# masked_lm_labels = np.full(input_ids.shape, -1, dtype=np.int64)
# masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
masked_lm_labels = masked_lm_ids[:index]
masked_lm_positions = masked_lm_positions[:index]
# softmax_with_cross_entropy enforce last dim size equal 1
masked_lm_labels = np.expand_dims(masked_lm_labels, axis=-1)
next_sentence_labels = np.expand_dims(next_sentence_labels, axis=-1)
return [
input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels
]
def do_train(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
worker_init = WorkerInitObj(args.seed + paddle.distributed.get_rank())
args.model_type = args.model_type.lower()
base_class, model_class, criterion_class, tokenizer_class = MODEL_CLASSES[
args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
pretrained_models_list = list(
model_class.pretrained_init_configuration.keys())
if args.model_name_or_path in pretrained_models_list:
model = model_class(
base_class(**model_class.pretrained_init_configuration[
args.model_name_or_path]))
else:
model = model_class.from_pretrained(args.model_name_or_path)
criterion = criterion_class(
getattr(model, model_class.base_model_prefix).config["vocab_size"])
# decorate @to_static for benchmark, skip it by default.
if args.to_static:
specs = create_input_specs()
model = paddle.jit.to_static(model, input_spec=specs)
logger.info("Successfully to apply @to_static with specs: {}".format(
specs))
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
# If use default last_epoch, lr of the first iteration is 0.
# Use `last_epoch = 0` to be consistent with nv bert.
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_steps, last_epoch=0)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
pool = ThreadPoolExecutor(1)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
files = [
os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, f)) and "train" in f
]
files.sort()
num_files = len(files)
random.Random(args.seed + epoch).shuffle(files)
f_start_id = 0
shared_file_list = {}
if paddle.distributed.get_world_size() > num_files:
remainder = paddle.distributed.get_world_size() % num_files
data_file = files[(
f_start_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_start_id) %
num_files]
else:
data_file = files[(f_start_id * paddle.distributed.get_world_size()
+ paddle.distributed.get_rank()) % num_files]
previous_file = data_file
train_data_loader, _ = create_pretraining_dataset(
data_file, args.max_predictions_per_seq, shared_file_list, args,
worker_init)
# TODO(guosheng): better way to process single file
single_file = True if f_start_id + 1 == len(files) else False
for f_id in range(f_start_id, len(files)):
if not single_file and f_id == f_start_id:
continue
if paddle.distributed.get_world_size() > num_files:
data_file = files[(
f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_id) %
num_files]
else:
data_file = files[(f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank()) % num_files]
previous_file = data_file
dataset_future = pool.submit(create_pretraining_dataset, data_file,
args.max_predictions_per_seq,
shared_file_list, args, worker_init)
train_cost_avg = TimeCostAverage()
reader_cost_avg = TimeCostAverage()
total_samples = 0
batch_start = time.time()
for step, batch in enumerate(train_data_loader):
train_reader_cost = time.time() - batch_start
reader_cost_avg.record(train_reader_cost)
global_step += 1
(input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels,
masked_lm_scale) = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["layer_norm", "softmax", "gelu"]):
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
masked_positions=masked_lm_positions)
loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels,
masked_lm_scale)
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
total_samples += args.batch_size
train_run_cost = time.time() - batch_start
train_cost_avg.record(train_run_cost)
if global_step % args.logging_steps == 0:
if paddle.distributed.get_rank() == 0:
logger.info(
"global step: %d, epoch: %d, batch: %d, loss: %f, "
"avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, avg_samples: %.5f, ips: %.5f sequences/sec"
% (global_step, epoch, step, loss,
reader_cost_avg.get_average(),
train_cost_avg.get_average(), total_samples /
args.logging_steps, total_samples / (
args.logging_steps *
train_cost_avg.get_average())))
total_samples = 0
train_cost_avg.reset()
reader_cost_avg.reset()
if global_step % args.save_steps == 0:
if paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
paddle.save(
optimizer.state_dict(),
os.path.join(output_dir, "model_state.pdopt"))
if global_step >= args.max_steps:
del train_data_loader
return
batch_start = time.time()
del train_data_loader
train_data_loader, data_file = dataset_future.result(timeout=None)
if __name__ == "__main__":
args = parse_args()
print(args)
do_train(args)
| [
"paddle.jit.to_static",
"numpy.random.seed",
"argparse.ArgumentParser",
"paddle.amp.GradScaler",
"os.path.join",
"paddlenlp.data.Stack",
"numpy.full",
"random.Random",
"os.path.exists",
"random.seed",
"paddle.DataParallel",
"paddle.set_device",
"concurrent.futures.ThreadPoolExecutor",
"pad... | [((1405, 1459), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'FORMAT'}), '(level=logging.INFO, format=FORMAT)\n', (1424, 1459), False, 'import logging\n'), ((1469, 1496), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1486, 1496), False, 'import logging\n'), ((1735, 1760), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1758, 1760), False, 'import argparse\n'), ((5848, 5924), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=args.batch_size, shuffle=True)\n', (5870, 5924), False, 'import paddle\n'), ((7305, 7465), 'paddle.io.DataLoader', 'DataLoader', ([], {'dataset': 'train_data', 'batch_sampler': 'train_batch_sampler', 'collate_fn': '_collate_data', 'num_workers': '(0)', 'worker_init_fn': 'worker_init', 'return_list': '(True)'}), '(dataset=train_data, batch_sampler=train_batch_sampler,\n collate_fn=_collate_data, num_workers=0, worker_init_fn=worker_init,\n return_list=True)\n', (7315, 7465), False, 'from paddle.io import DataLoader, Dataset\n'), ((7592, 7664), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'name': '"""input_ids"""', 'shape': '[-1, -1]', 'dtype': '"""int64"""'}), "(name='input_ids', shape=[-1, -1], dtype='int64')\n", (7615, 7664), False, 'import paddle\n'), ((7692, 7766), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'name': '"""segment_ids"""', 'shape': '[-1, -1]', 'dtype': '"""int64"""'}), "(name='segment_ids', shape=[-1, -1], dtype='int64')\n", (7715, 7766), False, 'import paddle\n'), ((7817, 7903), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'name': '"""input_mask"""', 'shape': '[-1, 1, 1, -1]', 'dtype': '"""float32"""'}), "(name='input_mask', shape=[-1, 1, 1, -1], dtype=\n 'float32')\n", (7840, 7903), False, 'import paddle\n'), ((7934, 8012), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'name': '"""masked_lm_positions"""', 'shape': '[-1]', 'dtype': '"""int32"""'}), "(name='masked_lm_positions', shape=[-1], dtype='int32')\n", (7957, 8012), False, 'import paddle\n'), ((10368, 10398), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (10385, 10398), False, 'import paddle\n'), ((11934, 12033), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', (['args.learning_rate', 'num_training_steps', 'args.warmup_steps'], {'last_epoch': '(0)'}), '(args.learning_rate, num_training_steps, args.\n warmup_steps, last_epoch=0)\n', (11955, 12033), False, 'from paddlenlp.transformers import LinearDecayWithWarmup\n'), ((12646, 12667), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(1)'], {}), '(1)\n', (12664, 12667), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((12704, 12715), 'time.time', 'time.time', ([], {}), '()\n', (12713, 12715), False, 'import time\n'), ((5466, 5501), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(self.seed + id)'}), '(seed=self.seed + id)\n', (5480, 5501), True, 'import numpy as np\n'), ((5510, 5537), 'random.seed', 'random.seed', (['(self.seed + id)'], {}), '(self.seed + id)\n', (5521, 5537), False, 'import random\n'), ((6172, 6179), 'paddlenlp.data.Stack', 'Stack', ([], {}), '()\n', (6177, 6179), False, 'from paddlenlp.data import Stack, Tuple, Pad\n'), ((6799, 6831), 'numpy.full', 'np.full', (['size', '(0)'], {'dtype': 'np.int32'}), '(size, 0, dtype=np.int32)\n', (6806, 6831), True, 'import numpy as np\n'), ((6876, 6914), 'numpy.full', 'np.full', (['[size, 1]', '(-1)'], {'dtype': 'np.int64'}), '([size, 1], -1, dtype=np.int64)\n', (6883, 6914), True, 'import numpy as np\n'), ((8305, 8331), 'h5py.File', 'h5py.File', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (8314, 8331), False, 'import h5py\n'), ((10075, 10116), 'numpy.expand_dims', 'np.expand_dims', (['masked_lm_labels'], {'axis': '(-1)'}), '(masked_lm_labels, axis=-1)\n', (10089, 10116), True, 'import numpy as np\n'), ((10148, 10193), 'numpy.expand_dims', 'np.expand_dims', (['next_sentence_labels'], {'axis': '(-1)'}), '(next_sentence_labels, axis=-1)\n', (10162, 10193), True, 'import numpy as np\n'), ((10406, 10441), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (10439, 10441), False, 'import paddle\n'), ((10455, 10493), 'paddle.distributed.init_parallel_env', 'paddle.distributed.init_parallel_env', ([], {}), '()\n', (10491, 10493), False, 'import paddle\n'), ((11431, 11476), 'paddle.jit.to_static', 'paddle.jit.to_static', (['model'], {'input_spec': 'specs'}), '(model, input_spec=specs)\n', (11451, 11476), False, 'import paddle\n'), ((11583, 11618), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (11616, 11618), False, 'import paddle\n'), ((11640, 11666), 'paddle.DataParallel', 'paddle.DataParallel', (['model'], {}), '(model)\n', (11659, 11666), False, 'import paddle\n'), ((12577, 12633), 'paddle.amp.GradScaler', 'paddle.amp.GradScaler', ([], {'init_loss_scaling': 'args.scale_loss'}), '(init_loss_scaling=args.scale_loss)\n', (12598, 12633), False, 'import paddle\n'), ((5191, 5220), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (5218, 5220), False, 'import paddle\n'), ((5253, 5282), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (5280, 5282), False, 'import paddle\n'), ((5312, 5341), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (5339, 5341), False, 'import paddle\n'), ((7213, 7259), 'numpy.asarray', 'np.asarray', (['[mask_token_num]'], {'dtype': 'np.float32'}), '([mask_token_num], dtype=np.float32)\n', (7223, 7259), True, 'import numpy as np\n'), ((8511, 8532), 'numpy.asarray', 'np.asarray', (['f[key][:]'], {}), '(f[key][:])\n', (8521, 8532), True, 'import numpy as np\n'), ((10558, 10587), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (10585, 10587), False, 'import paddle\n'), ((12793, 12824), 'os.path.join', 'os.path.join', (['args.input_dir', 'f'], {}), '(args.input_dir, f)\n', (12805, 12824), False, 'import os\n'), ((13125, 13160), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (13158, 13160), False, 'import paddle\n'), ((14847, 14864), 'paddlenlp.utils.tools.TimeCostAverage', 'TimeCostAverage', ([], {}), '()\n', (14862, 14864), False, 'from paddlenlp.utils.tools import TimeCostAverage\n'), ((14895, 14912), 'paddlenlp.utils.tools.TimeCostAverage', 'TimeCostAverage', ([], {}), '()\n', (14910, 14912), False, 'from paddlenlp.utils.tools import TimeCostAverage\n'), ((14969, 14980), 'time.time', 'time.time', ([], {}), '()\n', (14978, 14980), False, 'import time\n'), ((12834, 12860), 'os.listdir', 'os.listdir', (['args.input_dir'], {}), '(args.input_dir)\n', (12844, 12860), False, 'import os\n'), ((13011, 13043), 'random.Random', 'random.Random', (['(args.seed + epoch)'], {}), '(args.seed + epoch)\n', (13024, 13043), False, 'import random\n'), ((13198, 13233), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (13231, 13233), False, 'import paddle\n'), ((14106, 14141), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (14139, 14141), False, 'import paddle\n'), ((18388, 18399), 'time.time', 'time.time', ([], {}), '()\n', (18397, 18399), False, 'import time\n'), ((15078, 15089), 'time.time', 'time.time', ([], {}), '()\n', (15087, 15089), False, 'import time\n'), ((15389, 15480), 'paddle.amp.auto_cast', 'paddle.amp.auto_cast', (['args.use_amp'], {'custom_white_list': "['layer_norm', 'softmax', 'gelu']"}), "(args.use_amp, custom_white_list=['layer_norm',\n 'softmax', 'gelu'])\n", (15409, 15480), False, 'import paddle\n'), ((16407, 16418), 'time.time', 'time.time', ([], {}), '()\n', (16416, 16418), False, 'import time\n'), ((12891, 12922), 'os.path.join', 'os.path.join', (['args.input_dir', 'f'], {}), '(args.input_dir, f)\n', (12903, 12922), False, 'import os\n'), ((13587, 13616), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (13614, 13616), False, 'import paddle\n'), ((16568, 16597), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (16595, 16597), False, 'import paddle\n'), ((17444, 17473), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (17471, 17473), False, 'import paddle\n'), ((17517, 17572), 'os.path.join', 'os.path.join', (['args.output_dir', "('model_%d' % global_step)"], {}), "(args.output_dir, 'model_%d' % global_step)\n", (17529, 17572), False, 'import os\n'), ((13361, 13390), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (13388, 13390), False, 'import paddle\n'), ((13518, 13553), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (13551, 13553), False, 'import paddle\n'), ((14506, 14535), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (14533, 14535), False, 'import paddle\n'), ((17654, 17680), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (17668, 17680), False, 'import os\n'), ((17710, 17733), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (17721, 17733), False, 'import os\n'), ((18192, 18237), 'os.path.join', 'os.path.join', (['output_dir', '"""model_state.pdopt"""'], {}), "(output_dir, 'model_state.pdopt')\n", (18204, 18237), False, 'import os\n'), ((13307, 13342), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (13340, 13342), False, 'import paddle\n'), ((14276, 14305), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (14303, 14305), False, 'import paddle\n'), ((14433, 14468), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (14466, 14468), False, 'import paddle\n'), ((14218, 14253), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (14251, 14253), False, 'import paddle\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import pytest
import six
from plotille import Canvas
try:
import numpy as np
have_numpy = True
except ImportError:
have_numpy = False
try:
from PIL import Image
have_pillow = True
except ImportError:
have_pillow = False
@pytest.mark.skipif(not have_numpy, reason='No numpy installed.')
def test_transform():
c = Canvas(40, 20)
assert 0 == c._transform_x(0)
assert 0 == c._transform_y(0)
assert 40 * 2 == c._transform_x(1)
assert 20 * 4 == c._transform_y(1)
assert 40 == c._transform_x(0.5)
assert 20 * 2 == c._transform_y(0.5)
for v in np.random.random(100):
assert 0 <= c._transform_x(v) <= 40 * 2
assert isinstance(c._transform_x(v), int)
assert 0 <= c._transform_y(v) <= 20 * 4
assert isinstance(c._transform_y(v), int)
assert -40 == c._transform_x(-0.5)
assert -20 * 2 == c._transform_y(-0.5)
assert 40 * 2 + 40 == c._transform_x(1.5)
assert 20 * 4 + 20 * 2 == c._transform_y(1.5)
def test_invalids():
with pytest.raises(AssertionError):
Canvas(0, 0)
with pytest.raises(AssertionError):
Canvas(1.0, 1.0)
with pytest.raises(AssertionError):
Canvas(1, 1, xmin=1, xmax=0)
with pytest.raises(AssertionError):
Canvas(1, 1, ymin=1, ymax=0)
def test_str():
c = Canvas(40, 20)
assert 'Canvas(width=40, height=20, xmin=0, ymin=0, xmax=1, ymax=1)' == six.text_type(c)
assert 'Canvas(width=40, height=20, xmin=0, ymin=0, xmax=1, ymax=1)' == repr(c)
def test_set():
c = Canvas(1, 1)
c._set(0, 0)
assert '⡀' == six.text_type(c._canvas[0][0])
c._set(0, 0, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(0, 1)
assert '⠄' == six.text_type(c._canvas[0][0])
c._set(0, 1, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(0, 2)
assert '⠂' == six.text_type(c._canvas[0][0])
c._set(0, 2, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(0, 3)
assert '⠁' == six.text_type(c._canvas[0][0])
c._set(0, 3, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(1, 0)
assert '⢀' == six.text_type(c._canvas[0][0])
c._set(1, 0, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(1, 1)
assert '⠠' == six.text_type(c._canvas[0][0])
c._set(1, 1, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(1, 2)
assert '⠐' == six.text_type(c._canvas[0][0])
c._set(1, 2, False)
assert '⠀' == six.text_type(c._canvas[0][0])
c._set(1, 3)
assert '⠈' == six.text_type(c._canvas[0][0])
c._set(1, 3, False)
assert '⠀' == six.text_type(c._canvas[0][0])
def test_fill_char():
c = Canvas(1, 1)
c.fill_char(0.5, 0.5)
assert '⣿' == six.text_type(c._canvas[0][0])
c.fill_char(0.5, 0.5, False)
assert '⠀' == six.text_type(c._canvas[0][0])
@pytest.mark.parametrize('color', [None, 'red'])
def test_point(color, tty):
c = Canvas(1, 1)
c.point(0, 0, color=color)
prefix = ''
postfix = ''
if color:
prefix = '\x1b[31m'
postfix = '\x1b[0m'
assert '{}⡀{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
c.point(0, 0, set_=False, color=color)
assert '⠀' == six.text_type(c._canvas[0][0])
@pytest.mark.parametrize('color', [None, 'red'])
def test_set_text(color, tty):
c = Canvas(2, 1)
c.text(0, 0, 'Hi', color=color)
prefix = ''
postfix = ''
if color:
prefix = '\x1b[31m'
postfix = '\x1b[0m'
assert '{}H{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
assert '{}i{}'.format(prefix, postfix) == six.text_type(c._canvas[0][1])
c.text(0, 0, 'Hi', False, color=color)
assert '⠀' == six.text_type(c._canvas[0][0])
assert '⠀' == six.text_type(c._canvas[0][1])
def test_set_text_keep_dots():
c = Canvas(2, 1)
c.fill_char(0, 0)
assert '⣿' == six.text_type(c._canvas[0][0])
c.text(0, 0, 'Hi')
assert 'H' == six.text_type(c._canvas[0][0])
assert 'i' == six.text_type(c._canvas[0][1])
c.fill_char(0.5, 0.5, False)
c.text(0, 0, 'Hi', False)
assert '⣿' == six.text_type(c._canvas[0][0])
assert '⠀' == six.text_type(c._canvas[0][1])
def test_set_text_to_long():
c = Canvas(2, 1)
c.text(0, 0, 'Hello World')
assert 'H' == six.text_type(c._canvas[0][0])
assert 'e' == six.text_type(c._canvas[0][1])
c.fill_char(0.5, 0.5, False)
c.text(0, 0, 'Hello World', False)
assert '⠀' == six.text_type(c._canvas[0][0])
assert '⠀' == six.text_type(c._canvas[0][1])
@pytest.mark.parametrize('empty', ['', None])
def test_set_text_empty(empty):
c = Canvas(2, 1)
c.text(0, 0, empty)
assert '⠀' == six.text_type(c._canvas[0][0])
assert '⠀' == six.text_type(c._canvas[0][1])
c.fill_char(0.5, 0.5, False)
c.text(0, 0, empty, False)
assert '⠀' == six.text_type(c._canvas[0][0])
assert '⠀' == six.text_type(c._canvas[0][1])
@pytest.mark.parametrize('other_color', [None, 'blue'])
def test_unset_keep_color_text(tty, other_color):
c = Canvas(2, 1)
c.text(0, 0, 'Hi', color='red')
prefix = '\x1b[31m'
postfix = '\x1b[0m'
assert '{}H{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
assert '{}i{}'.format(prefix, postfix) == six.text_type(c._canvas[0][1])
c.text(0, 0, 'Hi', False, color=other_color)
assert '{}⠀{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
assert '{}⠀{}'.format(prefix, postfix) == six.text_type(c._canvas[0][1])
@pytest.mark.parametrize('other_color', [None, 'blue'])
def test_unset_keep_color_dots(tty, other_color):
c = Canvas(1, 1)
c.point(0, 0, color='red')
prefix = '\x1b[31m'
postfix = '\x1b[0m'
assert '{}⡀{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
c.point(0, 0, set_=False, color=other_color)
assert '{}⠀{}'.format(prefix, postfix) == six.text_type(c._canvas[0][0])
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
def test_braille_image(cleandoc):
img = Image.open('imgs/ich.jpg')
img = img.convert('L')
img = img.resize((80, 80))
cvs = Canvas(40, 20)
cvs.braille_image(img.getdata())
expected_27 = """
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⠛⠛⠙⢿⠿⢿⡿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠛⠋⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠈⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠛⠁⠀⠀⠀⠹⢻⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠀⠀⠀⠀⠀⢀⠀⠀⠀⠀⣀⣴⣶⣾⣶⣷⣶⣶⣶⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠉⠀⠀⠀⠀⣠⣿⣿⣾⣿⣿⣷⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣏⡀⠀⠀⠀⢠⣿⢿⣿⣿⣿⣿⣿⡿⠿⠿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠙
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠀⡴⣡⣶⣦⣉⣿⣿⡟⠋⢀⣤⠄⠀⡀⠉⠉⠙⠻⣿⣿⣿⣿⣿⣿⣿⣧
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠧⠀⠀⢠⣿⢃⡔⠚⣤⢌⣿⣷⣤⣿⠇⡀⠁⠀⠐⠀⠀⡀⠙⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡂⣄⢀⣿⣿⣿⣿⣿⣿⣿⣿⣿⠸⡦⠀⠁⠀⠀⣀⠀⠀⠁⠀⢹⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣾⣧⠸⣿⣿⣿⣿⣿⣿⣿⣿⣿⡀⠈⠃⠈⠠⣤⡄⠤⠀⠀⠀⠈⣻⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣽⠆⠹⢿⣿⣿⣿⣇⢈⠉⢉⡑⣄⡀⠀⠀⠈⠀⠀⠀⠀⠀⠀⠰⣾⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡆⠀⠸⣿⠿⠉⢀⣨⣟⡁⠉⠃⠀⠀⠀⠀⠀⠀⠀⠀⢀⠀⠀⠈⠻⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⡏⢲⣚⣛⡛⠻⠿⣶⣀⠃⠀⠐⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠹⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠀⡻⣿⣿⣿⣿⣿⡷⣿⠰⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⠘⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣏⠀⠃⠀⠀⢻⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘
⣿⣿⣿⣿⣿⣿⣿⠿⠛⣻⣿⣿⡟⠀⠀⠀⠠⣀⠈⠉⠁⡠⠂⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⣿⣿⡿⠿⠛⠉⠀⠀⠀⠻⣿⣿⣳⡄⠀⠀⠀⠙⠿⠻⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⠻⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠓⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀"""
expected_3 = """
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⠛⠛⠙⠿⠿⣿⡿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠛⠋⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⠀⠀⠀⠙⢻⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠀⠀⠀⠀⠀⢀⠀⠀⠀⠀⣀⣴⣶⣶⣶⣷⣶⣶⣶⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠉⠀⠀⠀⠀⣠⣿⣿⣿⣿⣿⣷⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣇⠀⠀⠀⠀⢀⣿⢿⣿⣿⣿⣿⣿⣿⠿⠿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠛
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠀⡘⣡⣴⣤⣙⣿⣿⡟⠋⢀⣠⠄⠀⠀⠉⠉⠙⠻⣿⣿⣿⣿⣿⣿⣿⣇
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠧⠀⠀⢠⣿⢁⡔⠒⣦⣍⣿⣷⣤⣿⠃⡀⠀⠀⠐⠀⠀⠀⠙⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢂⣄⢀⣿⣿⣿⣿⣿⣿⣿⣿⣟⠙⣿⠀⠁⠀⠀⡀⠀⠀⠁⠀⢹⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣾⣧⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⡀⠈⠀⠈⠠⣤⡄⠤⠀⠀⠀⠈⣻⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠄⠹⢿⣿⣿⣿⣇⢈⠙⢉⡁⣠⡀⠀⠀⠈⠀⠀⠀⠀⠀⠀⠰⣾⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡆⠀⠘⣿⠿⠋⢀⣀⣛⡁⠉⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠹⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⡟⢰⣞⣛⡛⠛⠿⢶⣄⠂⠀⠀⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠹⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠀⠻⣿⣿⣿⣿⣿⣿⡿⠘⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣇⠀⠀⠀⠀⢻⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘
⣿⣿⣿⣿⣿⣿⣿⠿⠛⣹⣿⣿⡏⠀⠀⠀⠠⣀⡀⠉⠁⣠⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⣿⣿⡿⠿⠛⠉⠀⠀⠀⠻⣿⣿⣿⡄⠀⠀⠀⠙⠿⠿⠟⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠓⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀"""
if sys.version_info[0] == 2:
assert cleandoc(expected_27) == cvs.plot()
else:
assert cleandoc(expected_3) == cvs.plot()
cvs.braille_image(img.getdata(), set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 20) == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
@pytest.mark.parametrize('threshold', range(20, 255, 10))
def test_braille_image_thresholds(threshold):
img = Image.open('imgs/ich.jpg')
img = img.convert('L')
img = img.resize((80, 80))
cvs = Canvas(40, 20)
cvs.braille_image(img.getdata(), threshold=threshold)
assert os.linesep.join(['⠀' * 40] * 20) != cvs.plot()
# print()
# print(cvs.plot())
cvs.braille_image(img.getdata(), threshold=threshold, set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 20) == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
def test_braille_image_inverse(cleandoc):
img = Image.open('imgs/ich.jpg')
img = img.convert('L')
img = img.resize((80, 80))
cvs = Canvas(40, 20)
cvs.braille_image(img.getdata(), inverse=True)
expected_27 = """
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣤⣤⣦⡀⣀⡀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣤⣴⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣷⠄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣤⣾⣿⣿⣿⣆⡄⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⣿⣿⣿⡿⣿⣿⣿⣿⠿⠋⠉⠁⠉⠈⠉⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣶⣿⣿⣿⣿⠟⠀⠀⠁⠀⠀⠈⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠰⢿⣿⣿⣿⡟⠀⡀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣦
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⢋⠞⠉⠙⠶⠀⠀⢠⣴⡿⠛⣻⣿⢿⣶⣶⣦⣄⠀⠀⠀⠀⠀⠀⠀⠘
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣘⣿⣿⡟⠀⡼⢫⣥⠛⡳⠀⠈⠛⠀⣸⢿⣾⣿⣯⣿⣿⢿⣦⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢽⠻⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⣇⢙⣿⣾⣿⣿⠿⣿⣿⣾⣿⡆⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠁⠘⣇⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣷⣼⣷⣟⠛⢻⣛⣿⣿⣿⣷⠄⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠂⣹⣆⡀⠀⠀⠀⠸⡷⣶⡶⢮⠻⢿⣿⣿⣷⣿⣿⣿⣿⣿⣿⣏⠁⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⣿⣇⠀⣀⣶⡿⠗⠠⢾⣶⣼⣿⣿⣿⣿⣿⣿⣿⣿⡿⣿⣿⣷⣄⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⢰⡍⠥⠤⢤⣄⣀⠉⠿⣼⣿⣯⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣆⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⢄⠀⠀⠀⠀⠀⢈⠀⣏⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣟⣧⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠰⣿⣼⣿⣿⡄⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧
⠀⠀⠀⠀⠀⠀⠀⣀⣤⠄⠀⠀⢠⣿⣿⣿⣟⠿⣷⣶⣾⢟⣽⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⠀⠀⢀⣀⣤⣶⣿⣿⣿⣄⠀⠀⠌⢻⣿⣿⣿⣦⣀⣄⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣦⣄⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣬⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿"""
expected_3 = """
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣤⣤⣦⣀⣀⠀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣤⣴⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣶⣿⣿⣿⣦⡄⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⣿⣿⣿⡿⣿⣿⣿⣿⠿⠋⠉⠉⠉⠈⠉⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣶⣿⣿⣿⣿⠟⠀⠀⠀⠀⠀⠈⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⣿⣿⣿⡿⠀⡀⠀⠀⠀⠀⠀⠀⣀⣀⣀⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⢧⠞⠋⠛⠦⠀⠀⢠⣴⡿⠟⣻⣿⣿⣶⣶⣦⣄⠀⠀⠀⠀⠀⠀⠀⠸
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣘⣿⣿⡟⠀⡾⢫⣭⠙⠲⠀⠈⠛⠀⣼⢿⣿⣿⣯⣿⣿⣿⣦⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡽⠻⡿⠀⠀⠀⠀⠀⠀⠀⠀⠠⣦⠀⣿⣾⣿⣿⢿⣿⣿⣾⣿⡆⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠁⠘⣧⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣷⣿⣷⣟⠛⢻⣛⣿⣿⣿⣷⠄⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣻⣆⡀⠀⠀⠀⠸⡷⣦⡶⢾⠟⢿⣿⣿⣷⣿⣿⣿⣿⣿⣿⣏⠁⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⣿⣧⠀⣀⣴⡿⠿⠤⢾⣶⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣆⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⢠⡏⠡⠤⢤⣤⣀⡉⠻⣽⣿⣿⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣆⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣄⠀⠀⠀⠀⠀⠀⢀⣧⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⣿⣿⣿⡄⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧
⠀⠀⠀⠀⠀⠀⠀⣀⣤⠆⠀⠀⢰⣿⣿⣿⣟⠿⢿⣶⣾⠟⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⠀⠀⢀⣀⣤⣶⣿⣿⣿⣄⠀⠀⠀⢻⣿⣿⣿⣦⣀⣀⣠⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠀⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣬⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿"""
if sys.version_info[0] == 2:
assert cleandoc(expected_27) == cvs.plot()
else:
assert cleandoc(expected_3) == cvs.plot()
cvs.braille_image(img.getdata(), inverse=True, set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 20) == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
@pytest.mark.parametrize('threshold', range(20, 255, 10))
def test_braille_image_inverse_thresholds(threshold):
img = Image.open('imgs/ich.jpg')
img = img.convert('L')
img = img.resize((80, 80))
cvs = Canvas(40, 20)
cvs.braille_image(img.getdata(), threshold=threshold, inverse=True)
assert os.linesep.join(['⠀' * 40] * 20) != cvs.plot()
cvs.braille_image(img.getdata(), threshold=threshold, inverse=True, set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 20) == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
@pytest.mark.parametrize('r', [0, 50, 100, 123, 255])
@pytest.mark.parametrize('g', [0, 50, 100, 123, 255])
@pytest.mark.parametrize('b', [0, 50, 100, 123, 255])
def test_image_one_px(tty, r, g, b):
cvs = Canvas(1, 1, mode='rgb')
cvs.image([(r, g, b)])
assert '\x1b[48;2;{};{};{}m⠀\x1b[0m'.format(r, g, b) == cvs.plot()
cvs.image([(r, g, b)], set_=False)
# empty canvas
assert '⠀' == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
def test_image_rgb(tty):
img = Image.open('imgs/ich.jpg')
img = img.convert('RGB')
img = img.resize((40, 40))
cvs = Canvas(40, 40, mode='rgb')
cvs.image(img.getdata())
assert os.linesep.join(['⠀' * 40] * 40) != cvs.plot()
# print()
# print(cvs.plot())
cvs.image(img.getdata(), set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 40) == cvs.plot()
@pytest.mark.skipif(not have_pillow, reason='No pillow installed.')
def test_image_byte(tty):
img = Image.open('imgs/ich.jpg')
img = img.convert('RGB')
img = img.resize((40, 40))
cvs = Canvas(40, 40, mode='byte')
cvs.image(img.getdata())
assert os.linesep.join(['⠀' * 40] * 40) != cvs.plot()
# print()
# print(cvs.plot())
cvs.image(img.getdata(), set_=False)
# empty canvas
assert os.linesep.join(['⠀' * 40] * 40) == cvs.plot()
| [
"plotille.Canvas",
"six.text_type",
"PIL.Image.open",
"pytest.raises",
"pytest.mark.skipif",
"numpy.random.random",
"os.linesep.join",
"pytest.mark.parametrize"
] | [((380, 444), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_numpy)'], {'reason': '"""No numpy installed."""'}), "(not have_numpy, reason='No numpy installed.')\n", (398, 444), False, 'import pytest\n'), ((3019, 3066), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""color"""', "[None, 'red']"], {}), "('color', [None, 'red'])\n", (3042, 3066), False, 'import pytest\n'), ((3424, 3471), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""color"""', "[None, 'red']"], {}), "('color', [None, 'red'])\n", (3447, 3471), False, 'import pytest\n'), ((4725, 4769), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""empty"""', "['', None]"], {}), "('empty', ['', None])\n", (4748, 4769), False, 'import pytest\n'), ((5111, 5165), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other_color"""', "[None, 'blue']"], {}), "('other_color', [None, 'blue'])\n", (5134, 5165), False, 'import pytest\n'), ((5683, 5737), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other_color"""', "[None, 'blue']"], {}), "('other_color', [None, 'blue'])\n", (5706, 5737), False, 'import pytest\n'), ((6096, 6162), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (6114, 6162), False, 'import pytest\n'), ((8480, 8546), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (8498, 8546), False, 'import pytest\n'), ((9077, 9143), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (9095, 9143), False, 'import pytest\n'), ((11497, 11563), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (11515, 11563), False, 'import pytest\n'), ((12092, 12158), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (12110, 12158), False, 'import pytest\n'), ((12160, 12212), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""r"""', '[0, 50, 100, 123, 255]'], {}), "('r', [0, 50, 100, 123, 255])\n", (12183, 12212), False, 'import pytest\n'), ((12214, 12266), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""g"""', '[0, 50, 100, 123, 255]'], {}), "('g', [0, 50, 100, 123, 255])\n", (12237, 12266), False, 'import pytest\n'), ((12268, 12320), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""b"""', '[0, 50, 100, 123, 255]'], {}), "('b', [0, 50, 100, 123, 255])\n", (12291, 12320), False, 'import pytest\n'), ((12583, 12649), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (12601, 12649), False, 'import pytest\n'), ((13058, 13124), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not have_pillow)'], {'reason': '"""No pillow installed."""'}), "(not have_pillow, reason='No pillow installed.')\n", (13076, 13124), False, 'import pytest\n'), ((475, 489), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (481, 489), False, 'from plotille import Canvas\n'), ((731, 752), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (747, 752), True, 'import numpy as np\n'), ((1463, 1477), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (1469, 1477), False, 'from plotille import Canvas\n'), ((1681, 1693), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {}), '(1, 1)\n', (1687, 1693), False, 'from plotille import Canvas\n'), ((2845, 2857), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {}), '(1, 1)\n', (2851, 2857), False, 'from plotille import Canvas\n'), ((3103, 3115), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {}), '(1, 1)\n', (3109, 3115), False, 'from plotille import Canvas\n'), ((3511, 3523), 'plotille.Canvas', 'Canvas', (['(2)', '(1)'], {}), '(2, 1)\n', (3517, 3523), False, 'from plotille import Canvas\n'), ((4001, 4013), 'plotille.Canvas', 'Canvas', (['(2)', '(1)'], {}), '(2, 1)\n', (4007, 4013), False, 'from plotille import Canvas\n'), ((4408, 4420), 'plotille.Canvas', 'Canvas', (['(2)', '(1)'], {}), '(2, 1)\n', (4414, 4420), False, 'from plotille import Canvas\n'), ((4810, 4822), 'plotille.Canvas', 'Canvas', (['(2)', '(1)'], {}), '(2, 1)\n', (4816, 4822), False, 'from plotille import Canvas\n'), ((5224, 5236), 'plotille.Canvas', 'Canvas', (['(2)', '(1)'], {}), '(2, 1)\n', (5230, 5236), False, 'from plotille import Canvas\n'), ((5796, 5808), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {}), '(1, 1)\n', (5802, 5808), False, 'from plotille import Canvas\n'), ((6207, 6233), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (6217, 6233), False, 'from PIL import Image\n'), ((6302, 6316), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (6308, 6316), False, 'from plotille import Canvas\n'), ((8661, 8687), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (8671, 8687), False, 'from PIL import Image\n'), ((8756, 8770), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (8762, 8770), False, 'from plotille import Canvas\n'), ((9196, 9222), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (9206, 9222), False, 'from PIL import Image\n'), ((9291, 9305), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (9297, 9305), False, 'from plotille import Canvas\n'), ((11686, 11712), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (11696, 11712), False, 'from PIL import Image\n'), ((11781, 11795), 'plotille.Canvas', 'Canvas', (['(40)', '(20)'], {}), '(40, 20)\n', (11787, 11795), False, 'from plotille import Canvas\n'), ((12368, 12392), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {'mode': '"""rgb"""'}), "(1, 1, mode='rgb')\n", (12374, 12392), False, 'from plotille import Canvas\n'), ((12685, 12711), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (12695, 12711), False, 'from PIL import Image\n'), ((12782, 12808), 'plotille.Canvas', 'Canvas', (['(40)', '(40)'], {'mode': '"""rgb"""'}), "(40, 40, mode='rgb')\n", (12788, 12808), False, 'from plotille import Canvas\n'), ((13161, 13187), 'PIL.Image.open', 'Image.open', (['"""imgs/ich.jpg"""'], {}), "('imgs/ich.jpg')\n", (13171, 13187), False, 'from PIL import Image\n'), ((13258, 13285), 'plotille.Canvas', 'Canvas', (['(40)', '(40)'], {'mode': '"""byte"""'}), "(40, 40, mode='byte')\n", (13264, 13285), False, 'from plotille import Canvas\n'), ((1163, 1192), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1176, 1192), False, 'import pytest\n'), ((1202, 1214), 'plotille.Canvas', 'Canvas', (['(0)', '(0)'], {}), '(0, 0)\n', (1208, 1214), False, 'from plotille import Canvas\n'), ((1225, 1254), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1238, 1254), False, 'import pytest\n'), ((1264, 1280), 'plotille.Canvas', 'Canvas', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (1270, 1280), False, 'from plotille import Canvas\n'), ((1291, 1320), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1304, 1320), False, 'import pytest\n'), ((1330, 1358), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {'xmin': '(1)', 'xmax': '(0)'}), '(1, 1, xmin=1, xmax=0)\n', (1336, 1358), False, 'from plotille import Canvas\n'), ((1369, 1398), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1382, 1398), False, 'import pytest\n'), ((1408, 1436), 'plotille.Canvas', 'Canvas', (['(1)', '(1)'], {'ymin': '(1)', 'ymax': '(0)'}), '(1, 1, ymin=1, ymax=0)\n', (1414, 1436), False, 'from plotille import Canvas\n'), ((1554, 1570), 'six.text_type', 'six.text_type', (['c'], {}), '(c)\n', (1567, 1570), False, 'import six\n'), ((1731, 1761), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (1744, 1761), False, 'import six\n'), ((1804, 1834), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (1817, 1834), False, 'import six\n'), ((1871, 1901), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (1884, 1901), False, 'import six\n'), ((1944, 1974), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (1957, 1974), False, 'import six\n'), ((2011, 2041), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2024, 2041), False, 'import six\n'), ((2084, 2114), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2097, 2114), False, 'import six\n'), ((2151, 2181), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2164, 2181), False, 'import six\n'), ((2224, 2254), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2237, 2254), False, 'import six\n'), ((2291, 2321), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2304, 2321), False, 'import six\n'), ((2364, 2394), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2377, 2394), False, 'import six\n'), ((2431, 2461), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2444, 2461), False, 'import six\n'), ((2504, 2534), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2517, 2534), False, 'import six\n'), ((2571, 2601), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2584, 2601), False, 'import six\n'), ((2644, 2674), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2657, 2674), False, 'import six\n'), ((2711, 2741), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2724, 2741), False, 'import six\n'), ((2784, 2814), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2797, 2814), False, 'import six\n'), ((2905, 2935), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (2918, 2935), False, 'import six\n'), ((2987, 3017), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (3000, 3017), False, 'import six\n'), ((3300, 3330), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (3313, 3330), False, 'import six\n'), ((3392, 3422), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (3405, 3422), False, 'import six\n'), ((3711, 3741), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (3724, 3741), False, 'import six\n'), ((3788, 3818), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (3801, 3818), False, 'import six\n'), ((3882, 3912), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (3895, 3912), False, 'import six\n'), ((3931, 3961), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (3944, 3961), False, 'import six\n'), ((4057, 4087), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4070, 4087), False, 'import six\n'), ((4128, 4158), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4141, 4158), False, 'import six\n'), ((4177, 4207), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (4190, 4207), False, 'import six\n'), ((4291, 4321), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4304, 4321), False, 'import six\n'), ((4340, 4370), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (4353, 4370), False, 'import six\n'), ((4472, 4502), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4485, 4502), False, 'import six\n'), ((4521, 4551), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (4534, 4551), False, 'import six\n'), ((4644, 4674), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4657, 4674), False, 'import six\n'), ((4693, 4723), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (4706, 4723), False, 'import six\n'), ((4868, 4898), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (4881, 4898), False, 'import six\n'), ((4917, 4947), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (4930, 4947), False, 'import six\n'), ((5030, 5060), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (5043, 5060), False, 'import six\n'), ((5079, 5109), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (5092, 5109), False, 'import six\n'), ((5369, 5399), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (5382, 5399), False, 'import six\n'), ((5446, 5476), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (5459, 5476), False, 'import six\n'), ((5574, 5604), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (5587, 5604), False, 'import six\n'), ((5651, 5681), 'six.text_type', 'six.text_type', (['c._canvas[0][1]'], {}), '(c._canvas[0][1])\n', (5664, 5681), False, 'import six\n'), ((5938, 5968), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (5951, 5968), False, 'import six\n'), ((6064, 6094), 'six.text_type', 'six.text_type', (['c._canvas[0][0]'], {}), '(c._canvas[0][0])\n', (6077, 6094), False, 'import six\n'), ((8430, 8462), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (8445, 8462), False, 'import os\n'), ((8841, 8873), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (8856, 8873), False, 'import os\n'), ((9027, 9059), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (9042, 9059), False, 'import os\n'), ((11447, 11479), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (11462, 11479), False, 'import os\n'), ((11880, 11912), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (11895, 11912), False, 'import os\n'), ((12042, 12074), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 20)"], {}), "(['⠀' * 40] * 20)\n", (12057, 12074), False, 'import os\n'), ((12850, 12882), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 40)"], {}), "(['⠀' * 40] * 40)\n", (12865, 12882), False, 'import os\n'), ((13008, 13040), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 40)"], {}), "(['⠀' * 40] * 40)\n", (13023, 13040), False, 'import os\n'), ((13327, 13359), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 40)"], {}), "(['⠀' * 40] * 40)\n", (13342, 13359), False, 'import os\n'), ((13485, 13517), 'os.linesep.join', 'os.linesep.join', (["(['⠀' * 40] * 40)"], {}), "(['⠀' * 40] * 40)\n", (13500, 13517), False, 'import os\n')] |
# Learning to rank with the Galerkin method
from sys import argv
import numpy as np
import numpy.linalg
import scipy.linalg
import time
import sys
import pprCommon
defaultParam = np.array([0.34, 0.33, 1.0, 0.33, 0.75, 0.25, 1.0])
LearnLambda = 1000.0
LearnRate = 1e-4
LossB = 0.2
if len(argv) != 5:
print >> sys.stderr, 'Usage %s [CSC-Prefix] [U.npy] [NumPaper] [TrainRank.txt]' % (argv[0])
sys.exit(1)
cscPrefix = argv[1]
basisFname = argv[2]
numPaper = int(argv[3])
trainRankFname = argv[4]
# Load the basis
U = np.load(basisFname).astype(float)
# Add the following line due to a performance issue with early version of NumPy, see
# http://mail.scipy.org/pipermail/scipy-user/2014-May/035694.html
U = U.flatten().reshape(U.shape)
(n, dim) = U.shape
UtU = np.eye(dim)
# Load P_i and prepare for U'*P_i*U
Pi = []
UtPiU = []
for i in range(7):
fname = '%s%d.bin' % (cscPrefix, i)
_Pi = pprCommon.ReadBinCscMat(fname)
Pi.append(_Pi)
tic = time.time()
_UtPiU = _Pi.T.dot(U).T.dot(U)
toc = time.time()
UtPiU.append(_UtPiU)
print >> sys.stdout, "PROFILE: UtPU[%d] formed, %.1f sec elapsed." % (i, toc - tic)
sys.stdout.flush()
# Get b
b = np.zeros(n)
b[:numPaper] = 0.15
Utb = U.T.dot(b)
# Read train rank
trainRank = pprCommon.ReadTrainRank(trainRankFname)
trainSubset = list(set(trainRank))
submap = np.zeros(n, dtype = 'int')
submap[:] = -1
for (idx, val) in enumerate(trainSubset):
submap[val] = idx
param = np.array(defaultParam)
for optIter in range(20):
tic = time.time()
UtPU = np.zeros(UtU.shape)
for i in range(len(param)):
UtPU += param[i] * UtPiU[i]
UtMU = UtU - 0.85 * UtPU
luUtMU = scipy.linalg.lu_factor(UtMU)
y = scipy.linalg.lu_solve(luUtMU, Utb)
grady = []
for i in range(len(param)):
rhs = 0.85 * (UtPiU[i].dot(y))
gradyi = scipy.linalg.lu_solve(luUtMU, rhs)
grady.append(gradyi)
# Reconstruct x[TrainSubset]
xtrain = U[trainSubset, :].dot(y)
gradxtrain = []
for i in range(len(param)):
gradxtraini = U[trainSubset, :].dot(grady[i])
gradxtrain.append(gradxtraini)
# Compute the gradient and update parameter
gradParam = 2.0 * LearnLambda * (param - defaultParam) # Gradient of regularizer
for j1 in range(len(trainRank)):
for j2 in range(j1 + 1, len(trainRank)):
v = trainRank[j1]
u = trainRank[j2]
uu = submap[u]
vv = submap[v]
gradFactor = max(0.0, xtrain[uu] - xtrain[vv] + LossB)
for i in range(len(param)):
gradParam[i] += gradFactor * (gradxtrain[i][uu] - gradxtrain[i][vv])
pprCommon.ProjectGradient(gradParam)
param -= LearnRate * gradParam
toc = time.time()
print >> sys.stdout, "PROFILE: Optimization iteration %d finished, %.4f sec elapsed." % (optIter+1, (toc-tic))
print >> sys.stdout, "INFO: Param = ", ' '.join([str(x) for x in param])
sys.stdout.flush()
| [
"pprCommon.ReadTrainRank",
"numpy.load",
"numpy.zeros",
"pprCommon.ReadBinCscMat",
"time.time",
"numpy.array",
"sys.stdout.flush",
"pprCommon.ProjectGradient",
"numpy.eye",
"sys.exit"
] | [((183, 233), 'numpy.array', 'np.array', (['[0.34, 0.33, 1.0, 0.33, 0.75, 0.25, 1.0]'], {}), '([0.34, 0.33, 1.0, 0.33, 0.75, 0.25, 1.0])\n', (191, 233), True, 'import numpy as np\n'), ((781, 792), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (787, 792), True, 'import numpy as np\n'), ((1212, 1223), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1220, 1223), True, 'import numpy as np\n'), ((1294, 1333), 'pprCommon.ReadTrainRank', 'pprCommon.ReadTrainRank', (['trainRankFname'], {}), '(trainRankFname)\n', (1317, 1333), False, 'import pprCommon\n'), ((1378, 1402), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""int"""'}), "(n, dtype='int')\n", (1386, 1402), True, 'import numpy as np\n'), ((1493, 1515), 'numpy.array', 'np.array', (['defaultParam'], {}), '(defaultParam)\n', (1501, 1515), True, 'import numpy as np\n'), ((405, 416), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (413, 416), False, 'import sys\n'), ((928, 958), 'pprCommon.ReadBinCscMat', 'pprCommon.ReadBinCscMat', (['fname'], {}), '(fname)\n', (951, 958), False, 'import pprCommon\n'), ((988, 999), 'time.time', 'time.time', ([], {}), '()\n', (997, 999), False, 'import time\n'), ((1045, 1056), 'time.time', 'time.time', ([], {}), '()\n', (1054, 1056), False, 'import time\n'), ((1178, 1196), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1194, 1196), False, 'import sys\n'), ((1555, 1566), 'time.time', 'time.time', ([], {}), '()\n', (1564, 1566), False, 'import time\n'), ((1579, 1598), 'numpy.zeros', 'np.zeros', (['UtU.shape'], {}), '(UtU.shape)\n', (1587, 1598), True, 'import numpy as np\n'), ((2712, 2748), 'pprCommon.ProjectGradient', 'pprCommon.ProjectGradient', (['gradParam'], {}), '(gradParam)\n', (2737, 2748), False, 'import pprCommon\n'), ((2795, 2806), 'time.time', 'time.time', ([], {}), '()\n', (2804, 2806), False, 'import time\n'), ((3005, 3023), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3021, 3023), False, 'import sys\n'), ((530, 549), 'numpy.load', 'np.load', (['basisFname'], {}), '(basisFname)\n', (537, 549), True, 'import numpy as np\n')] |
"""
Class and script for fitting microlensing model using MulensModel.
All the settings are read from a YAML file.
"""
import sys
import time
from os import path, sep
import tempfile
import shutil
import warnings
from multiprocessing import Pool
import math
import numpy as np
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from matplotlib import gridspec, rc, rcParams, rcParamsDefault
from matplotlib.backends.backend_pdf import PdfPages
import os
os.environ["OMP_NUM_THREADS"] = "1"
import_failed = set()
try:
import yaml
except Exception:
import_failed.add("yaml")
try:
import emcee
except Exception:
import_failed.add("emcee")
try:
import corner
except Exception:
import_failed.add("corner")
try:
from pymultinest.run import run as mn_run
from pymultinest.analyse import Analyzer
except Exception:
import_failed.add("pymultinest")
try:
import MulensModel as mm
except Exception:
raise ImportError('\nYou have to install MulensModel first!\n')
__version__ = '0.28.1dev'
class UlensModelFit(object):
"""
Class for fitting microlensing model using *MulensModel* package.
Parameters :
photometry_files: *list* or *str*
List of datasets. It can be either a *str* (then just gives
a name of one file that is read) or a *list*. For *list* each
element is either a *str* (then just gives the name of the file)
or a *dict*, which allows more options to be passed, e.g.,
.. code-block:: python
[{'file_name': 'data_1.dat', 'phot_fmt': 'mag'}]
or
.. code-block:: python
[{'file_name': 'data_1.dat'}, 'data_2.dat']
Currently, keyword ``'add_2450000'`` is turned on by default.
starting_parameters: *dict*
Starting values of the parameters.
It also indicates the EMCEE fitting mode.
Keys of this *dict* are microlensing parameters recognized by
*MulensModel* and values are *str*. First word indicates
the distribution (allowed: ``gauss``, ``uniform``, and
``log-uniform``) and is followed by its parameters.
For ``uniform`` and ``log-uniform`` these parameters are lower
and upper limit.
For ``gauss`` these parameters are mean and sigma of
the distribution. For example:
.. code-block:: python
{
't_0': 'gauss 2455703. 0.01',
'u_0': 'uniform 0.001 1.',
't_E': 'gauss 20. 5.'
}
prior_limits: *dict*
Upper and lower limits of parameters.
It also indicates the pyMultiNest fitting mode.
Keys are MulensModel parameters and values are lists of two floats
each (alternatively a string giving 2 floats can be provided - see
example below). Currently, no informative priors are allowed for
pyMultiNest fitting. Example input:
.. code-block:: python
{
't_0': [2455379.4, 2455379.76]
'u_0': [0.46, 0.65]
't_E': "16. 19.6"
}
model: *dict*
Additional settings for *MulensModel.Model*. Accepted keys:
``'coords'`` - event coordinates,
``'methods'`` - methods used for magnification calculation,
``'methods source 1'`` - methods used for magnification calculation
for the first source in binary source models,
``'methods source 2'`` - methods used for magnification calculation
for the second source in binary source models,
``'default method'`` - default magnification calculation method,
``'limb darkening u'`` - specifies a *dict* that gives limb
darkening coefficients in "u" convention, e.g.,
{'I': 0.4, 'V': 0.5}; note that for plotting the best model we use
the LD coefficient same as for the first dataset,
``'parameters'`` and ``'values'`` - used to plot specific model.
fixed_parameters: *dict*
Provide parameters that will be kept fixed during the fitting
process. This option is often used to set parameter reference
epoch, e.g., ``{'t_0_par': 2456789.}``.
min_values: *dict*
Minimum values of parameters that define the prior, e.g.,
``{'t_E': 0.}``. Note that the these are only limits of a prior.
Functional form of priors can be defines in ``fit_constraints``.
It works only for EMCEE fitting.
max_values: *dict*
Maximum values of parameters that define the prior, e.g.,
``{'u_0': 1.}``.
It works only for EMCEE fitting.
fitting_parameters: *dict*
Parameters of the fit function. They depend on the method used -
we discuss EMCEE and pyMultiNest below.
First - EMCEE. The required parameter is ``n_steps``.
You can also specify ``n_burn`` and ``n_walkers``. The ``n_burn``
controls the length of burn-in. If not provided, it is assumed to
be ``0.25*n_steps``. The ``n_walkers`` gives number of parallel
walkers to be run. If not provided, it is assumed four times
the number of parameters to be fitted.
Other options are described below.
The ``progress`` option (*bool* type value; default is *False*)
controls if a progress bar is shown.
It is possible to export posterior to a .npy file. Just provide
the file name as ``posterior file`` parameter. You can read this
file using ``numpy.load()``. You will get an array with a shape of
(``n_walkers``, ``n_steps-n_burn``, ``n_parameters``). You can
additionally add option ``posterior file fluxes`` for which
allowed values are ``all`` and *None* (``null`` in yaml file).
The value ``all`` means that additionally all source and blending
fluxes will be saved (``n_parameters`` increases by two times the
number of datasets).
Second - pyMultiNest. There are no required parameters, but a few
can be provided. Currently accepted ones are:
``basename`` (*str*) - common part of the output files produced by
MultiNest. If you don't provide it, then the output would be
saved to temporary files and deleted at the end.
``multimodal`` (*bool*) - do you want multiple modes in
the prosterior to be detected and reported separately?
``n_live_points`` (*int*) - number of live points, default value
is 400.
``sampling efficiency`` (*float*) - requested sampling efficiency.
MultiNest documentation suggests 0.8 (default value) for parameter
estimatrion and 0.3 for evidence evalutation.
``evidence tolerance`` (*float*) - requested tolerance of ln(Z)
calculation; default is 0.5 and should work well in most cases.
fit_constraints: *dict*
Constraints on model other than minimal and maximal values.
Currently accepted keys:
``'no_negative_blending_flux'`` - reject models with negative
blending flux if *True*
``'negative_blending_flux_sigma_mag'`` - impose a prior that
disfavours models with negative blending flux using gaussian prior
for negative values; the value provided should be on the order of
*20.*
``'prior'`` - specifies the priors for quantities. It's also
a *dict*. Possible key-value pairs:
``'t_E': 'Mroz et al. 2017'`` - efficiency-corrected t_E
distribution from that paper with two modifications: 1) it is
constant for t_E < 1d, 2) it follows Mao & Paczynski (1996)
analytical approximation (i.e., slope of -3) for t_E longer
than probed by Mroz et al. (2017; i.e., 316 d). Note that
Mroz et al. (2020) studied Galactic bulge.
``'t_E': 'Mroz et al. 2020'`` - similar to above but for
Mroz et al. (2020), where Galactic disc outside bulge region
was studied. Approximate slopes of 3 and -3 from
Mao & Paczynski (1996) are used for t_E shorter and longer,
respectively, than probed by Mroz et al. (2020).
``'pi_E_N': gauss mean sigma`` (same for ``'pi_E_E'``) -
specify gaussian prior for parallax components. Parameters
*mean* and *sigma* are floats.
References:
Mao & Paczynski 1996 -
https://ui.adsabs.harvard.edu/abs/1996ApJ...473...57M/abstract
Mroz et al. 2017 -
https://ui.adsabs.harvard.edu/abs/2017Natur.548..183M/abstract
Mroz et al. 2020 -
https://ui.adsabs.harvard.edu/abs/2020ApJS..249...16M/abstract
plots: *dict*
Parameters of the plots to be made after the fit. Currently
allowed keys are ``triangle``, ``trace`` (only EMCEE fitting),
and ``best model``.
The values are also dicts and currently accepted keys are:
1) for ``best model``:
``'file'``, ``'time range'``, ``'magnitude range'``, ``'legend'``,
and ``'rcParams'``,
2) for ``triangle`` and ``trace``:
``'file'`` and ``'shift t_0'`` (*bool*, *True* is default)
e.g.:
.. code-block:: python
{
'triangle':
'file': 'my_fit_triangle.png'
'trace':
'file': 'my_fit_trace_plot.png'
'shift t_0': False
'best model':
'file': 'my_fit_best.png'
'time range': 2456000. 2456300.
'magnitude range': 15.123 13.012
'legend':
'ncol': 2
'loc': 'lower center'
'rcParams':
'font.size': 15
}
Note that 'rcParams' allows setting many matplotlib parameters.
other_output: *dict*
Parameters for other output. Currently, the only allowed value is
``'models': {'file name': NAME_OF_FILE}`` where NAME_OF_FILE is
a *str* that gives a path to text file to which we will print all
models and their chi^2. If ``NAME_OF_FILE`` is ``"-"``, then
the models will be printed to standard output.
"""
def __init__(
self, photometry_files,
starting_parameters=None, prior_limits=None, model=None,
fixed_parameters=None,
min_values=None, max_values=None, fitting_parameters=None,
fit_constraints=None, plots=None, other_output=None
):
self._check_MM_version()
self._photometry_files = photometry_files
self._starting_parameters = starting_parameters
self._prior_limits = prior_limits
self._model_parameters = model
self._fixed_parameters = fixed_parameters
self._min_values = min_values
self._max_values = max_values
self._fitting_parameters = fitting_parameters
self._fit_constraints = fit_constraints
self._plots = plots
self._other_output = other_output
self._which_task()
self._set_default_parameters()
if self._task == 'fit':
self._guess_fitting_method()
self._set_fit_parameters_unsorted()
self._check_imports()
def _check_MM_version(self):
"""
Check if MulensModel is new enough
"""
if int(mm.__version__.split('.')[0]) < 2:
raise RuntimeError(
"ulens_model_fit.py requires MulensModel in version "
"at least 2.0, but you are using " + mm.__version__)
def _which_task(self):
"""
Check if input parameters indicate run_fit() or plot_best_model() will
be run.
"""
if self._starting_parameters is not None:
fit = True
elif self._prior_limits is not None:
fit = True
else:
fit = False
plot = False
if self._model_parameters is not None:
keys = set(self._model_parameters.keys())
check = keys.intersection({'parameters', 'values'})
if len(check) == 1:
raise ValueError(
'You have to specify either both or none of ' +
'model["parameters"] and model["values"].')
if len(check) == 2:
plot = True
if plot and fit:
raise ValueError(
'Too many parameters specified!\nThe starting_parameters ' +
'indicate you want to fit, but model["parameters"] and ' +
'model["values"] indicate you want to plot. Please decide')
if not plot and not fit:
if self._fixed_parameters is None:
raise ValueError(
'Missing input information. Please specify parameters ' +
'to be plotted (model["parameters"] and ' +
'model["values"]) or starting_parameters to be fit.')
else:
plot = True
if fit:
self._task = 'fit'
elif plot:
self._task = 'plot'
self._check_unnecessary_settings_plot()
else:
raise ValueError('internal error')
def _check_unnecessary_settings_plot(self):
"""
Make sure that there arent' too many parameters specified for:
self._task == 'plot'
"""
keys = ['_starting_parameters', '_min_values', '_max_values',
'_fitting_parameters', '_prior_limits']
for key in keys:
if getattr(self, key) is not None:
raise ValueError(
'In plotting mode you should not provide in __init__: ' +
key[1:])
if self._plots is not None:
if "triangle" in self._plots:
raise ValueError(
'You cannot provide plots["triangle"] if you '
"don't fit")
if "trace" in self._plots:
raise ValueError(
'You cannot provide plots["trace"] if you' "don't fit")
def _set_default_parameters(self):
"""
set some default parameters
"""
if self._task == 'fit':
self._flat_priors = True # Are priors only 0 or 1?
self._return_fluxes = True
self._best_model_ln_prob = -np.inf
self._flux_names = None
self._shift_t_0 = True
elif self._task == 'plot':
pass
else:
raise ValueError('internal error - task ' + str(self._task))
self._print_model = False
def _guess_fitting_method(self):
"""
guess what is the fitting method based on parameters provided
"""
method = None
if self._starting_parameters is not None:
method = "EMCEE"
if self._prior_limits is not None:
if method is not None:
raise ValueError(
"Both starting_parameters and prior_limits were defined "
"which makes impossible to choose the fitting method. "
"These settings indicate EMCEE and pyMultiNest "
"rescpectively, and cannot be both set.")
method = "MultiNest"
if method is None:
raise ValueError(
"No fitting method chosen. You can chose either 'EMCEE' or "
"'pyMultiNest' and you do it by providing "
"starting_parameters or prior_limits, respectively.")
self._fit_method = method
def _set_fit_parameters_unsorted(self):
"""
Find what are the fitted parameters. It will be sorted later.
"""
if self._fit_method == "EMCEE":
unsorted_keys = self._starting_parameters.keys()
elif self._fit_method == "MultiNest":
unsorted_keys = self._prior_limits.keys()
else:
raise ValueError('unexpected method error')
self._fit_parameters_unsorted = list(unsorted_keys)
self._n_fit_parameters = len(self._fit_parameters_unsorted)
def _check_imports(self):
"""
check if all the required packages are imported
"""
required_packages = set()
if self._task == 'fit':
if self._fit_method == 'EMCEE':
required_packages.add('emcee')
elif self._fit_method == "MultiNest":
required_packages.add('pymultinest')
if self._plots is not None and 'triangle' in self._plots:
required_packages.add('corner')
failed = import_failed.intersection(required_packages)
if len(failed) > 0:
message = (
'Some of the required packages could not be imported:\n' +
" ".join(failed))
if "corner" in failed:
message += (
"\nFor corner package it's enough that you run:\nwget " +
"https://raw.githubusercontent.com/dfm/corner.py/" +
"v2.0.0/corner/corner.py")
raise ImportError(message)
def run_fit(self):
"""
Run the fit, print the output, and make the plots.
This function does not accept any parameters. All the settings
are passed via __init__().
"""
if self._task != "fit":
raise ValueError('wrong settings to run .run_fit()')
self._check_plots_parameters()
self._check_model_parameters()
self._check_other_fit_parameters()
self._parse_other_output_parameters()
self._get_datasets()
self._get_parameters_ordered()
self._get_parameters_latex()
self._parse_fitting_parameters()
self._set_prior_limits()
self._parse_fit_constraints()
if self._fit_method == "EMCEE":
self._parse_starting_parameters()
self._check_fixed_parameters()
self._make_model_and_event()
if self._fit_method == "EMCEE":
self._generate_random_parameters()
self._setup_fit()
self._run_fit()
self._finish_fit()
self._parse_results()
self._make_plots()
def plot_best_model(self):
"""
Plot the best model.
The parameters names and their values are taken from __init__()
keyword ``model``, which is a *dict* and has this information in
``model['parameters']`` and ``model['values']``.
"""
if self._task != "plot":
raise ValueError('wrong settings to run .plot_best_model()')
self._check_plots_parameters()
self._check_model_parameters()
self._get_datasets()
self._check_fixed_parameters()
self._make_model_and_event()
self._make_plots()
def _check_plots_parameters(self):
"""
Check if parameters of plots make sense
"""
allowed_keys = set(['best model', 'triangle', 'trace'])
if self._plots is None:
self._plots = dict()
return
unknown = set(self._plots.keys()) - allowed_keys
if len(unknown) > 0:
raise ValueError(
'Unknown plot types: {:}\n'.format(unknown) +
'Accepted plot types are: ' + ", ".join(allowed_keys))
for (key, value) in self._plots.items():
if value is None:
self._plots[key] = dict()
if 'best model' in self._plots:
self._check_plots_parameters_best_model()
if 'triangle' in self._plots:
self._check_plots_parameters_triangle()
if 'trace' in self._plots:
self._check_plots_parameters_trace()
names = {key: value['file'] for (key, value) in self._plots.items()}
done = {}
for (plot_type, name) in names.items():
if name is None:
continue
if name in done:
raise ValueError(
"Names of output plot files cannot repeat. They repeat "
"for: {:} and {:}".format(done[name], plot_type))
done[name] = plot_type
def _check_plots_parameters_best_model(self):
"""
Check if parameters of best model make sense
"""
allowed = set(['file', 'time range', 'magnitude range', 'legend',
'rcParams', 'second Y scale'])
unknown = set(self._plots['best model'].keys()) - allowed
if len(unknown) > 0:
raise ValueError(
'Unknown settings for "best model": {:}'.format(unknown))
if 'time range' in self._plots['best model']:
text = self._plots['best model']['time range'].split()
if len(text) != 2:
raise ValueError(
"'time range' for 'best model' should specify 2 " +
"values (begin and end); got: " +
str(self._plots['best model']['time range']))
t_0 = float(text[0])
t_1 = float(text[1])
if t_1 < t_0:
raise ValueError(
"Incorrect 'time range' for 'best model':\n" +
text[0] + " " + text[1])
self._plots['best model']['time range'] = [t_0, t_1]
if 'magnitude range' in self._plots['best model']:
text = self._plots['best model']['magnitude range'].split()
if len(text) != 2:
raise ValueError(
"'magnitude range' for 'best model' should specify 2 " +
"values (begin and end); got: " +
str(self._plots['best model']['magnitude range']))
mag_0 = float(text[0])
mag_1 = float(text[1])
if mag_1 > mag_0:
raise ValueError(
"Incorrect 'magnitude range' for 'best model':\n" +
text[0] + " " + text[1])
self._plots['best model']['magnitude range'] = [mag_0, mag_1]
for key in ['legend', 'rcParams', 'second Y scale']:
if key in self._plots['best model']:
if not isinstance(self._plots['best model'][key], dict):
msg = ('The value of {:} (in best model setttings)'
'must be a dictionary, but you provided {:}')
args = [key, type(self._plots['best model'][key])]
raise TypeError(msg.format(*args))
if 'second Y scale' in self._plots['best model']:
self._check_plots_parameters_best_model_Y_scale()
def _check_plots_parameters_best_model_Y_scale(self):
"""
Check if parameters for second Y scale make sense.
This function assumes that the second Y scale will be plotted.
"""
settings = self._plots['best model']['second Y scale']
allowed = set(['color', 'label', 'labels', 'magnifications'])
unknown = set(settings.keys()) - allowed
if len(unknown) > 0:
raise ValueError(
'Unknown settings for "second Y scale" in '
'"best model": {:}'.format(unknown))
if not isinstance(settings['magnifications'], list):
raise TypeError(
'"best model" -> "second Y scale" -> "magnifications" has to '
'be a list, not ' + str(type(settings['magnifications'])))
for value in settings['magnifications']:
if not isinstance(value, (int, float)):
raise TypeError(
'Wrong value in magnifications: ' + str(value))
if 'labels' not in settings:
settings['labels'] = [
str(x) for x in settings['magnifications']]
else:
if not isinstance(settings['labels'], list):
raise TypeError(
'"best model" -> "second Y scale" -> "labels" has to be '
'a list, not ' + str(type(settings['labels'])))
if len(settings['labels']) != len(settings['magnifications']):
raise ValueError(
'In "best model" -> "second Y scale", labels and '
'magnifications must be lists of the same length')
def _check_plots_parameters_triangle(self):
"""
Check if parameters of triangle plot make sense
"""
allowed = set(['file', 'shift t_0'])
unknown = set(self._plots['triangle'].keys()) - allowed
if len(unknown) > 0:
raise ValueError(
'Unknown settings for "triangle": {:}'.format(unknown))
self._parse_plots_parameter_shift_t_0(self._plots['triangle'])
def _parse_plots_parameter_shift_t_0(self, settings):
"""
Check if 'shift t_0' is provided and parse it
"""
if 'shift t_0' not in settings:
return
value = settings['shift t_0']
if not isinstance(value, bool):
raise TypeError(
'For triangle and trace plots, the value of "shift t_0" key '
'must be of bool type; you provided: ' + str(type(value)))
self._shift_t_0 = value
def _check_plots_parameters_trace(self):
"""
Check if parameters of trace plot make sense
"""
allowed = set(['file', 'shift t_0'])
unknown = set(self._plots['trace'].keys()) - allowed
if len(unknown) > 0:
raise ValueError(
'Unknown settings for "trace": {:}'.format(unknown))
if self._fit_method == "MultiNest":
raise ValueError(
'Trace plot cannot be requested for MultiNest fit')
self._parse_plots_parameter_shift_t_0(self._plots['trace'])
def _check_model_parameters(self):
"""
Check parameters of the MulensModel.Model provided by the user
directly.
"""
if self._model_parameters is None:
self._model_parameters = dict()
allowed = {'coords', 'default method', 'methods',
'methods source 1', 'methods source 2',
'parameters', 'values', 'limb darkening u'}
keys = set(self._model_parameters.keys())
not_allowed = keys - allowed
if len(not_allowed) > 0:
raise ValueError(
'model keyword is a dict with keys not allowed: ' +
str(not_allowed))
for key in {'methods', 'methods source 1', 'methods source 2'}:
if key in self._model_parameters:
self._model_parameters[key] = self._parse_methods(
self._model_parameters[key])
check = keys.intersection({'parameters', 'values'})
if len(check) == 1:
raise ValueError("If you specify 'parameters' and 'values' for " +
"'model', then both have to be defined")
if len(check) == 2:
self._model_parameters['parameters'] = (
self._model_parameters['parameters'].split())
self._model_parameters['values'] = [
float(x) for x in self._model_parameters['values'].split()]
all_parameters = []
if self._fixed_parameters is not None:
all_parameters += list(self._fixed_parameters.keys())
if 'parameters' in keys:
all_parameters += self._model_parameters['parameters']
if self._task == 'fit':
all_parameters += self._fit_parameters_unsorted
if 'pi_E_E' in all_parameters or 'pi_E_N' in all_parameters:
if 'coords' not in self._model_parameters:
raise ValueError("Parallax model requires model['coords'].")
def _check_other_fit_parameters(self):
"""
Check if there aren't any other inconsistenties between settings
"""
if self._fit_method == "MultiNest":
if self._min_values is not None or self._max_values is not None:
raise ValueError("In MultiNest fitting you cannot set "
"min_values or max_values")
def _parse_methods(self, methods):
"""
check if odd elements are floats and parse them
"""
if isinstance(methods, str):
_enumerate = enumerate(methods.split())
elif isinstance(methods, list):
_enumerate = enumerate(methods)
else:
raise TypeError(
'Wrong type of settings specifying methods used to calculate '
'magnification ("list" or "str" expected): ' +
str(type(methods)))
try:
out = [float(x) if i % 2 == 0 else x for (i, x) in _enumerate]
except ValueError:
raise ValueError(
"Error in parsing floats in methods:\n" + methods)
if len(out) < 3 or len(out) % 2 != 1:
raise ValueError(
"Error in parsing methods:\n" + methods)
return out
def _parse_other_output_parameters(self):
"""
parse information on other output
"""
if self._other_output is None:
return
for (key, value) in self._other_output.items():
if key == 'models':
if not isinstance(value, dict):
raise ValueError('models value should also be *dict*, ' +
'got ' + str(type(value)))
for (key2, value2) in value.items():
if key2 == 'file name':
self._print_model = True
self._print_model_i = 0
if value2 == '-':
self._print_model_file = sys.stdout
else:
try:
self._print_model_file = open(value2, 'w')
except Exception:
raise ValueError(
'Error while opening file ' + str(value2))
else:
raise KeyError("Unrecognized key: " + str(key) +
"\nExpected keys: 'file name'.")
else:
raise ValueError('Unrecognized key: ' + str(key) + "\n" +
"Expected keys: models")
def _get_datasets(self):
"""
construct a list of MulensModel.MulensData objects
"""
kwargs = {'add_2450000': True}
if isinstance(self._photometry_files, str):
self._photometry_files = [self._photometry_files]
elif not isinstance(self._photometry_files, list):
raise TypeError(
'photometry_files should be a list or a str, but you '
'provided ' + str(type(self._photometry_files)))
files = [f if isinstance(f, dict) else {'file_name': f}
for f in self._photometry_files]
self._datasets = []
for file_ in files:
try:
dataset = mm.MulensData(**{**kwargs, **file_})
except FileNotFoundError:
raise FileNotFoundError(
'Provided file path does not exist: ' +
str(file_['file_name']))
except Exception:
print('Something went wrong while reading file ' +
str(file_['file_name']), file=sys.stderr)
raise
self._datasets.append(dataset)
def _get_parameters_ordered(self):
"""
Order input parameters in some logical way.
This is useful to make sure the order of printed parameters
is always the same.
"""
all_fit_parameters_str = (
't_0 u_0 t_0_1 u_0_1 t_0_2 u_0_2 t_E t_eff rho rho_1 rho_2 ' +
't_star t_star_1 t_star_2 pi_E_N pi_E_E s q alpha ds_dt ' +
'dalpha_dt x_caustic_in x_caustic_out t_caustic_in t_caustic_out')
all_fit_parameters = all_fit_parameters_str.split()
unknown = set(self._fit_parameters_unsorted) - set(all_fit_parameters)
if len(unknown) > 0:
raise ValueError('Unknown parameters: {:}'.format(unknown))
indexes = [all_fit_parameters.index(p)
for p in self._fit_parameters_unsorted]
self._fit_parameters = [all_fit_parameters[i] for i in indexes]
def _get_parameters_latex(self):
"""
change self._fit_parameters into latex parameters
"""
conversion = dict(
t_0='t_0', u_0='u_0',
t_0_1='t_{0,1}', u_0_1='u_{0,1}',
t_0_2='t_{0,2}', u_0_2='u_{0,2}', t_E='t_{\\rm E}',
t_eff='t_{\\rm eff}', rho='\\rho', rho_1='\\rho_1',
rho_2='\\rho_2', t_star='t_{\\star}', t_star_1='t_{\\star,1}',
t_star_2='t_{\\star,2}', pi_E_N='\\pi_{{\\rm E},N}',
pi_E_E='\\pi_{{\\rm E},E}', s='s', q='q', alpha='\\alpha',
ds_dt='ds/dt', dalpha_dt='d\\alpha/dt',
x_caustic_in='x_{\\rm caustic,in}',
x_caustic_out='x_{\\rm caustic,out}',
t_caustic_in='t_{\\rm caustic,in}',
t_caustic_out='t_{\\rm caustic,out}')
if self._shift_t_0:
for key in ['t_0', 't_0_1', 't_0_2']:
conversion[key] = '\\Delta ' + conversion[key]
self._fit_parameters_latex = [
('$' + conversion[key] + '$') for key in self._fit_parameters]
def _parse_fitting_parameters(self):
"""
run some checks on self._fitting_parameters to make sure that
the fit can be run
"""
if self._fit_method == 'EMCEE':
self._parse_fitting_parameters_EMCEE()
self._get_n_walkers()
elif self._fit_method == 'MultiNest':
self._parse_fitting_parameters_MultiNest()
else:
raise ValueError('internal inconsistency')
def _parse_fitting_parameters_EMCEE(self):
"""
make sure EMCEE fitting parameters are properly defined
"""
settings = self._fitting_parameters
ints_required = ['n_steps']
required = ints_required
bools = ['progress']
ints = ['n_walkers', 'n_burn']
strings = ['posterior file', 'posterior file fluxes']
allowed = bools + ints + strings
self._check_required_and_allowed_parameters(required, allowed)
self._check_parameters_types(settings, bools=bools,
ints=ints+ints_required, strings=strings)
self._kwargs_EMCEE = {'initial_state': None, # It will be set later.
'nsteps': self._fitting_parameters['n_steps'],
'progress': False}
if 'progress' in settings:
self._kwargs_EMCEE['progress'] = settings['progress']
if 'n_burn' in settings:
if settings['n_burn'] >= settings['n_steps']:
raise ValueError('You cannot set n_burn >= n_steps.')
else:
settings['n_burn'] = int(0.25*self._fitting_parameters['n_steps'])
if 'posterior file' not in settings:
self._posterior_file_name = None
if 'posterior file fluxes' in settings:
raise ValueError('You cannot set "posterior file fluxes" ' +
'without setting "posterior file"')
else:
name = settings['posterior file']
if name[-4:] != '.npy':
raise ValueError('"posterior file" must end in ".npy", ' +
'got: ' + name)
if path.exists(name):
if path.isfile(name):
msg = "Exisiting file " + name + " will be overwritten"
warnings.warn(msg)
else:
raise ValueError("The path provided for posterior (" +
name + ") exsists and is a directory")
self._posterior_file_name = name[:-4]
self._posterior_file_fluxes = None
if 'posterior file fluxes' in settings:
fluxes_allowed = ['all', None]
if settings['posterior file fluxes'] not in fluxes_allowed:
raise ValueError('Unrecognized "posterior file fluxes": ' +
settings['posterior file fluxes'])
self._posterior_file_fluxes = settings['posterior file fluxes']
def _check_required_and_allowed_parameters(self, required, allowed):
"""
Check if required parameters are provided and there aren't parameters
that shouldn't be defined.
"""
settings = self._fitting_parameters
if settings is None:
settings = dict()
full = required + allowed
for required_ in required:
if required_ not in settings:
raise ValueError('EMCEE method requires fitting parameter: ' +
required_)
if len(set(settings.keys()) - set(full)) > 0:
raise ValueError('Unexpected fitting parameters: ' +
str(set(settings.keys()) - set(full)))
def _check_parameters_types(self, settings, bools=None,
ints=None, floats=None, strings=None):
"""
Check if the settings have right type.
For floats we accept ints as well.
"""
if bools is None:
bools = []
if ints is None:
ints = []
if floats is None:
floats = []
if strings is None:
strings = []
fmt = "For key {:} the expected type is {:}, but got {:}"
for (key, value) in settings.items():
if key in bools:
if not isinstance(value, bool):
raise TypeError(
fmt.format(key, "bool", str(type(value))))
elif key in ints:
if not isinstance(value, int):
raise TypeError(
fmt.format(key, "int", str(type(value))))
elif key in floats:
if not isinstance(value, (float, int)):
raise TypeError(
fmt.format(key, "float", str(type(value))))
elif key in strings:
if not isinstance(value, str):
raise TypeError(
fmt.format(key, "string", str(type(value))))
else:
raise ValueError(
"internal bug - no type for key " + key + " specified")
def _get_n_walkers(self):
"""
Guess how many walkers (and hence starting values) there will be.
EMCEE fitting only.
"""
if self._fit_method != 'EMCEE':
raise ValueError('internal bug')
if 'n_walkers' in self._fitting_parameters:
self._n_walkers = self._fitting_parameters['n_walkers']
else:
self._n_walkers = 4 * len(self._starting_parameters)
def _parse_fitting_parameters_MultiNest(self):
"""
make sure MultiNest fitting parameters are properly defined
"""
self._kwargs_MultiNest = dict()
settings = self._fitting_parameters
if settings is None:
settings = dict()
required = []
bools = ['multimodal']
ints = ['n_live_points']
strings = ['basename']
floats = ['sampling efficiency', 'evidence tolerance']
allowed = strings + bools + ints + floats
self._check_required_and_allowed_parameters(required, allowed)
self._check_parameters_types(settings, bools, ints, floats, strings)
self._kwargs_MultiNest['multimodal'] = False
keys = {"basename": "outputfiles_basename",
"sampling efficiency": "sampling_efficiency",
"evidence tolerance": "evidence_tolerance"}
same_keys = ["multimodal", "n_live_points"]
keys = {**keys, **{key: key for key in same_keys}}
self._set_dict_safetly(self._kwargs_MultiNest, settings, keys)
self._kwargs_MultiNest['importance_nested_sampling'] = (
not self._kwargs_MultiNest['multimodal'])
self._MN_temporary_files = False
if 'basename' not in settings:
print("No base for MultiNest output provided.")
self._kwargs_MultiNest['outputfiles_basename'] = (
tempfile.mkdtemp('_MM_ex16_pyMN') + sep)
self._MN_temporary_files = True
self._check_output_files_MultiNest()
def _set_dict_safetly(self, target, source, keys_mapping):
"""
For each key in keys_mapping (*dict*) check if it is in
source (*dict*). If it is, then set
target[keys_mapping[key]] to source[key].
"""
for (key_in, key_out) in keys_mapping.items():
if key_in in source:
target[key_out] = source[key_in]
def _check_output_files_MultiNest(self):
"""
Check if output files exist and warn about overwrtting them.
If they directory doesn't exist then raise error.
"""
root = self._kwargs_MultiNest['outputfiles_basename']
if len(path.dirname(root)) != 0 and not path.isdir(path.dirname(root)):
msg = 'directory for output files does not exist; root path: '
raise ValueError(msg + root)
if path.isdir(root):
root += sep
check = (
'resume.dat phys_live.points live.points phys_live-birth.txt '
'ev.dat dead-birth.txt .txt stats.dat post_equal_weights.dat'
'post_separate_strict.dat post_separate.dat summary.txt').split()
if self._kwargs_MultiNest['importance_nested_sampling']:
check += ['IS.points', 'IS.ptprob', 'IS.iterinfo']
root = self._kwargs_MultiNest['outputfiles_basename']
if path.isdir(root):
root += sep
existing = []
for check_ in check:
file_name = root + check_
if path.isfile(file_name):
existing.append(file_name)
if len(existing) > 0:
message = "\n\n Exisiting files will be overwritten "
message += "(unless you kill this process)!!!\n"
warnings.warn(message + str(existing) + "\n")
def _set_prior_limits(self):
"""
Set minimum and maximum values of the prior space
"""
if self._fit_method == 'EMCEE':
self._set_prior_limits_EMCEE()
elif self._fit_method == 'MultiNest':
self._set_prior_limits_MultiNest()
else:
raise ValueError('internal bug')
def _set_prior_limits_EMCEE(self):
"""
Parse min and max values of parameters so that they're properly
indexed.
"""
if self._min_values is None:
self._min_values = []
if self._max_values is None:
self._max_values = []
for key in self._min_values:
if key in self._max_values:
if self._min_values[key] >= self._max_values[key]:
fmt = (
"This doesn't make sense: for {:} the lower limit " +
"is larger than the upper limit: {:} vs {:}")
raise ValueError(fmt.format(
key, self._min_values[key], self._max_values[key]))
self._min_values_indexed = self._parse_min_max_values_single(
self._min_values)
self._max_values_indexed = self._parse_min_max_values_single(
self._max_values)
def _parse_min_max_values_single(self, limits):
"""
change dict that has str as key to index as key
"""
out = dict()
if len(limits) == 0:
return out
for (key, value) in limits.items():
if key not in self._fit_parameters:
raise ValueError(
'Key provided in limits: {:}\n'.format(key) +
'is not one of the parameters for fitting: ' +
'{:}'.format(self._fit_parameters))
index = self._fit_parameters.index(key)
out[index] = value
return out
def _set_prior_limits_MultiNest(self):
"""
Set prior limits and transformation constants (from unit cube to
MM parameters).
"""
min_values = []
max_values = []
for parameter in self._fit_parameters:
if parameter not in self._prior_limits:
raise ValueError("interal issue")
values = self._prior_limits[parameter]
if isinstance(values, str):
values = values.split()
if not isinstance(values, list) or len(values) != 2:
raise ValueError(
"prior_limits for " + parameter + " could not be "
"processed: " + str(self._prior_limits[parameter]))
try:
values = [float(v) for v in values]
except Exception:
raise ValueError(
"couldn't get floats for prior_limits of " + parameter +
": " + str(self._prior_limits[parameter]))
if values[0] >= values[1]:
raise ValueError(
"This won't work - wrong order of limits for " +
parameter + ": " + str(values))
min_values.append(values[0])
max_values.append(values[1])
self._min_values = np.array(min_values)
self._max_values = np.array(max_values)
self._range_values = self._max_values - self._min_values
def _parse_fit_constraints(self):
"""
Parse the fitting constraints that are not simple limits on parameters
"""
if self._fit_method == 'MultiNest':
if self._fit_constraints is not None:
raise NotImplementedError(
"Currently no fit_constraints are implemented for "
"MultiNest fit. Please contact <NAME> with "
"a specific request.")
self._prior_t_E = None
self._priors = None
if self._fit_constraints is None:
self._fit_constraints = {"no_negative_blending_flux": False}
return
if isinstance(self._fit_constraints, list):
raise TypeError(
"In version 0.5.0 we've changed type of 'fit_constraints' " +
"from list to dict. Please correct you input and re-run " +
"the code. Most probably what you need is:\n" +
"fit_constraints = {'no_negative_blending_flux': True}")
allowed_keys_flux = {
"no_negative_blending_flux", "negative_blending_flux_sigma_mag"}
allowed_keys = {*allowed_keys_flux, "prior"}
used_keys = set(self._fit_constraints.keys())
if len(used_keys - allowed_keys) > 0:
raise ValueError(
'unrecognized constraint: {:}'.format(forbidden))
if len(used_keys.intersection(allowed_keys_flux)) == 2:
raise ValueError(
'you cannot specify both no_negative_blending_flux and ' +
'negative_blending_flux_sigma_mag')
if "no_negative_blending_flux" not in self._fit_constraints:
self._fit_constraints["no_negative_blending_flux"] = False
key = "negative_blending_flux_sigma_mag"
if key in used_keys:
self._fit_constraints[key] = mm.Utils.get_flux_from_mag(
self._fit_constraints[key])
if 'prior' in self._fit_constraints:
self._parse_fit_constraints_prior()
def _parse_fit_constraints_prior(self):
"""
Check if priors in fit constraint are correctly defined.
"""
priors = dict()
for (key, value) in self._fit_constraints['prior'].items():
if key == 't_E':
if value == "Mroz et al. 2017":
self._prior_t_E = 'Mroz+17'
elif value == "Mroz et al. 2020":
self._prior_t_E = 'Mroz+20'
else:
raise ValueError("Unrecognized t_E prior: " + value)
self._read_prior_t_E_data()
elif key in ['pi_E_E', 'pi_E_N']:
words = value.split()
if len(words) != 3 or words[0] != 'gauss':
msg = "Something went wrong in parsing prior for "
msg += "{:}: {:}"
raise ValueError(msg.format(key, value))
try:
settings = [words[0], float(words[1]), float(words[2])]
except Exception:
raise ValueError('error in parsing: ' + words[1] + " " +
words[2])
if settings[2] < 0.:
raise ValueError('sigma cannot be negative: ' + words[2])
priors[key] = settings
else:
raise KeyError(
"Unrecognized key in fit_constraints/prior: " + key)
self._flat_priors = False
if len(priors) > 0:
self._priors = priors
def _read_prior_t_E_data(self):
"""
read data that specify t_E prior and parse them appropriately
"""
self._prior_t_E_data = dict()
if self._prior_t_E == 'Mroz+17':
x = np.array([
-0.93, -0.79, -0.65, -0.51, -0.37, -0.23, -0.09, 0.05, 0.19,
0.33, 0.47, 0.61, 0.75, 0.89, 1.03, 1.17, 1.31, 1.45, 1.59,
1.73, 1.87, 2.01, 2.15, 2.29, 2.43])
y = np.array([
299.40, 245.60, 358.50, 116.96, 0.00, 47.78, 85.10, 90.50,
315.37, 501.77, 898.26, 1559.68, 2381.46, 2849.11, 3405.00,
3431.30, 3611.76, 3038.06, 2170.67, 1680.38, 814.70, 444.06,
254.89, 114.19, 52.14])
dx = x[1] - x[0]
x_min = 0.
x_max = x[-1] + 0.5 * dx
mask = (x > x_min-dx) # We need one more point for extrapolation.
function = interp1d(x[mask], np.log(y[mask]),
kind='cubic', fill_value="extrapolate")
elif self._prior_t_E == 'Mroz+20':
# XXX - TO DO:
# - documentation
# - smooth the input data from M+20 and note that
x = np.array([
0.74, 0.88, 1.01, 1.15, 1.28, 1.42, 1.55, 1.69,
1.82, 1.96, 2.09, 2.23, 2.36, 2.50, 2.63])
y = np.array([
82.04, 94.98, 167.76, 507.81, 402.08, 681.61, 1157.51,
1132.80, 668.12, 412.20, 236.14, 335.34, 74.88, 52.64, 97.78])
dx = (x[1] - x[0]) / 2.
x_min = x[0] - dx
x_max = x[-1] + dx
function = interp1d(x, np.log(y),
kind='cubic', fill_value="extrapolate")
else:
raise ValueError('unexpected internal error')
self._prior_t_E_data['x_min'] = x_min
self._prior_t_E_data['x_max'] = x_max
self._prior_t_E_data['y_min'] = function(x_min)
self._prior_t_E_data['y_max'] = function(x_max)
self._prior_t_E_data['function'] = function
def _parse_starting_parameters(self):
"""
replace self._starting_parameters with dict that has values
[*str*, *float*, ...]
and make basic checks
"""
accepted_types = ['gauss', 'uniform', 'log-uniform']
out = dict()
for (key, value) in self._starting_parameters.items():
words = value.split()
if words[0] not in accepted_types:
raise ValueError(
'starting parameter: ' + words[0] + ' is not recognized.' +
'Allowed parameters: ' + str(accepted_types))
if len(words) != 3:
raise ValueError('Expected 3 parameters, got: ' + str(words))
floats = []
for word in words[1:]:
try:
floats.append(float(word))
except Exception:
raise ValueError('Expected float, got {:}'.format(word))
if words[0] == 'gauss':
if floats[1] < 0.:
raise ValueError(
'Sigma cannot be negative, got: ' + str(floats[1]))
if words[0] in ['uniform', 'log-uniform']:
if floats[1] < floats[0]:
raise ValueError(
'For uniform distribution, the second parameters ' +
'has to be larger than the first one.\n Got ' +
'{:} {:}'.format(floats[0], floats[1]))
out[key] = [words[0]] + floats
self._starting_parameters = out
def _check_fixed_parameters(self):
"""
Check if fixed_parameters make sense
"""
if self._fixed_parameters is None:
return
all_parameters = (
't_0 u_0 t_0_1 u_0_1 t_0_2 u_0_2 t_E t_eff rho rho_1 rho_2 ' +
't_star t_star_1 t_star_2 pi_E_N pi_E_E s q alpha ds_dt ' +
'dalpha_dt x_caustic_in x_caustic_out ' +
't_caustic_in t_caustic_out ' +
't_0_par t_0_kep')
all_parameters = set(all_parameters.split())
fixed = set(self._fixed_parameters.keys())
unknown = fixed - all_parameters
if len(unknown) > 0:
raise ValueError('Unknown fixed parameters: {:}'.format(unknown))
if self._task == 'plot':
return
repeated = set(self._fit_parameters).intersection(fixed)
if len(repeated) > 0:
raise ValueError(
'Some parameters are both fitted and fixed: ' +
'{:}'.format(repeated))
def _make_model_and_event(self):
"""
Set internal MulensModel instances: Model and Event
"""
parameters = self._get_example_parameters()
kwargs = dict()
if 'coords' in self._model_parameters:
kwargs['coords'] = self._model_parameters['coords']
try:
self._model = mm.Model(parameters, **kwargs)
except Exception:
print("Initializer of MulensModel.Model failed.")
print("Parameters passed: {:}".format(parameters))
raise
self._models_satellite = []
for dataset in self._datasets:
if dataset.ephemerides_file is None:
continue
model = mm.Model(
parameters, ephemerides_file=dataset.ephemerides_file,
**kwargs)
self._models_satellite.append(model)
key = 'limb darkening u'
for model in [self._model] + self._models_satellite:
if key in self._model_parameters:
for (band, u_value) in self._model_parameters[key].items():
model.set_limb_coeff_u(band, u_value)
if 'default method' in self._model_parameters:
model.set_default_magnification_method(
self._model_parameters['default method'])
if 'methods' in self._model_parameters:
model.set_magnification_methods(
self._model_parameters['methods'])
if 'methods source 1' in self._model_parameters:
self._model.set_magnification_methods(
self._model_parameters['methods source 1'], 1)
if 'methods source 2' in self._model_parameters:
self._model.set_magnification_methods(
self._model_parameters['methods source 2'], 2)
self._event = mm.Event(self._datasets, self._model)
self._event.sum_function = 'numpy.sum'
self._set_n_fluxes()
def _set_n_fluxes(self):
"""
find out how many flux parameters there are
"""
try:
self._event.get_chi2()
except ValueError:
if 'x_caustic_in' in self._model.parameters.parameters:
self._model.parameters.x_caustic_in = (
self._model.parameters.x_caustic_out + 0.01)
self._event.get_chi2()
else:
raise
n = 0
for (i, dataset) in enumerate(self._datasets):
k = len(self._event.fits[i].source_fluxes) + 1
# Plus 1 is for blending.
if i == 0:
self._n_fluxes_per_dataset = k
elif k != self._n_fluxes_per_dataset:
raise ValueError(
'Strange internal error with number of source fluxes: ' +
"{:} {:} {:}".format(i, k, self._n_fluxes_per_dataset))
n += k
self._n_fluxes = n
def _get_example_parameters(self):
"""
Generate parameters *dict* according to provided starting and fixed
parameters.
"""
if self._task == 'plot':
parameters = dict(zip(
self._model_parameters['parameters'],
self._model_parameters['values']))
# XXX this is some kind of a hack:
self._best_model_theta = []
self._fit_parameters = []
elif self._task == 'fit':
if self._fit_method == 'EMCEE':
parameters = self._get_example_parameters_EMCEE()
elif self._fit_method == 'MultiNest':
means = 0.5 * (self._max_values + self._min_values)
parameters = dict(zip(self._fit_parameters, means))
if "x_caustic_in" in self._fit_parameters:
index = self._fit_parameters.index("x_caustic_in")
parameters["x_caustic_in"] = (
self._min_values[index] +
np.random.randn(1)[0] * self._range_values[index])
else:
raise ValueError('internal value')
else:
raise ValueError('internal value')
if self._fixed_parameters is not None:
for (key, value) in self._fixed_parameters.items():
parameters[key] = value
return parameters
def _get_example_parameters_EMCEE(self):
"""
get sample values of parameters for EMCEE - only to make mm.Model
"""
parameters = dict()
for (key, value) in self._starting_parameters.items():
# We treat Cassan08 case differently so that
# x_caustic_in is different than x_caustic_out.
if key == "x_caustic_in":
if value[0] == 'gauss':
parameters[key] = (
value[1] + value[2] * np.random.randn(1)[0])
elif value[0] in ['uniform', 'log-uniform']:
parameters[key] = 0.25 * value[1] + 0.75 * value[2]
else:
raise ValueError('internal error: ' + value[0])
else:
if value[0] == 'gauss':
parameters[key] = value[1]
elif value[0] in ['uniform', 'log-uniform']:
parameters[key] = (value[1] + value[2]) / 2.
else:
raise ValueError('internal error: ' + value[0])
return parameters
def _generate_random_parameters(self):
"""
Generate a number of starting parameters values.
It is checked if parameters are within the prior.
"""
max_iteration = 20 * self._n_walkers
if self._fit_constraints["no_negative_blending_flux"]:
max_iteration *= 5
starting = []
for parameter in self._fit_parameters:
settings = self._starting_parameters[parameter]
values = self._get_samples_from_distribution(
max_iteration, settings)
starting.append(values)
starting = np.array(starting).T.tolist()
self._check_generated_random_parameters(starting)
def _get_samples_from_distribution(self, n, settings):
"""
Get n samples from a given distribution (settings[0]).
The meaning and number of settings[1:] depends on particular
distribution.
"""
if settings[0] == 'gauss':
values = settings[2] * np.random.randn(n) + settings[1]
elif settings[0] == 'uniform':
values = np.random.uniform(
low=settings[1], high=settings[2], size=n)
elif settings[0] == 'log-uniform':
beg = math.log(settings[1])
end = math.log(settings[2])
values = np.exp(np.random.uniform(beg, end, n))
else:
raise ValueError('Unrecognized keyword: ' + settings[0])
return values
def _check_generated_random_parameters(self, starting):
"""
Check if the set of points provided has at least self._n_walkers
points inside the prior.
"""
out = []
for point in starting:
ln_prob = self._ln_prob(point)
if self._return_fluxes:
ln_prob = ln_prob[0]
if ln_prob > -np.inf:
out.append(point)
if len(out) == self._n_walkers:
break
if len(out) < self._n_walkers:
raise ValueError(
"Couldn't generate required starting points in a prior. "
"Most probably you have to correct at least one of: "
"starting_parameters, min_values, max_values, or "
"fit_constraints.\nGot " + str(len(out)) + " walkers in "
"the prior, but required " + str(self._n_walkers) + ".\n"
"If you think the code should work with your settings, "
"then please contact <NAME>.")
self._kwargs_EMCEE['initial_state'] = out
def _ln_prob(self, theta):
"""
Log probability of the model - combines _ln_prior(), _ln_like(),
and constraints which include fluxes.
NOTE: we're using np.log(), i.e., natural logarithms.
"""
ln_prior = self._ln_prior(theta)
if not np.isfinite(ln_prior):
return self._return_ln_prob(-np.inf)
ln_like = self._ln_like(theta)
if not np.isfinite(ln_like):
return self._return_ln_prob(-np.inf)
ln_prob = ln_prior + ln_like
fluxes = self._get_fluxes()
ln_prior_flux = self._run_flux_checks_ln_prior(fluxes)
if not np.isfinite(ln_prior_flux):
return self._return_ln_prob(-np.inf)
ln_prob += ln_prior_flux
self._update_best_model_EMCEE(ln_prob, theta, fluxes)
return self._return_ln_prob(ln_prob, fluxes)
def _return_ln_prob(self, value, fluxes=None):
"""
used to parse output of _ln_prob() in order to make that function
shorter
"""
if value == -np.inf:
if self._return_fluxes:
return (value, [0.] * self._n_fluxes)
else:
return value
else:
if self._return_fluxes:
if fluxes is None:
raise ValueError('Unexpected error!')
return (value, fluxes)
else:
return value
def _set_model_parameters(self, theta):
"""
Set microlensing parameters of self._model
Note that if only plotting functions are called,
then self._fit_parameters and theta are empty.
"""
for (parameter, value) in zip(self._fit_parameters, theta):
setattr(self._model.parameters, parameter, value)
def _ln_prior(self, theta):
"""
Check if fitting parameters are within the prior.
Constraints from self._fit_constraints:
- on blending flux are NOT applied here,
but in _run_flux_checks_ln_prior(),
- on t_E are applied here.
NOTE: we're using np.log(), i.e., natural logarithms.
"""
inside = 0.
outside = -np.inf
for (index, limit) in self._min_values_indexed.items():
if theta[index] < limit:
return outside
for (index, limit) in self._max_values_indexed.items():
if theta[index] > limit:
return outside
ln_prior = inside
if self._prior_t_E is not None:
self._set_model_parameters(theta)
ln_prior += self._ln_prior_t_E()
if self._priors is not None:
self._set_model_parameters(theta)
for (parameter, prior_settings) in self._priors.items():
if parameter in ['pi_E_N', 'pi_E_E']:
# Other parameters can be added here. XXX
value = self._model.parameters.parameters[parameter]
ln_prior += self._get_ln_prior_for_1_parameter(
value, prior_settings)
else:
raise ValueError('prior not handled: ' + parameter)
return ln_prior
def _get_ln_prior_for_1_parameter(self, value, settings):
"""
Calculate ln(prior) for given value and settings
"""
if settings[0] == 'gauss':
sigma = settings[2]
diff = value - settings[1]
return -0.5*(diff/sigma)**2 - math.log(math.sqrt(2*np.pi)*sigma)
else:
raise ValueError('Case not handelded yet: ' + settings[0])
def _ln_prior_t_E(self):
"""
Get log prior for t_E of current model. This function is executed
if there is t_E prior.
"""
if self._prior_t_E not in ['Mroz+17', 'Mroz+20']:
raise ValueError('unexpected internal error: ' + self._prior_t_E)
try:
x = math.log10(self._model.parameters.t_E)
except ValueError:
if 'x_caustic_in' in self._model.parameters.parameters:
return -np.inf
else:
raise
if x > self._prior_t_E_data['x_max']:
dy = -3. * math.log(10) * (x - self._prior_t_E_data['x_max'])
return self._prior_t_E_data['y_max'] + dy
elif x > self._prior_t_E_data['x_min']:
return self._prior_t_E_data['function'](x)
else:
out = self._prior_t_E_data['y_min'] + 0.
if self._prior_t_E == 'Mroz+20':
out += 3. * math.log(10) * (x - self._prior_t_E_data['x_min'])
return out
def _ln_like(self, theta):
"""
likelihood function
"""
self._set_model_parameters(theta)
chi2 = self._event.get_chi2()
if self._print_model:
self._print_current_model(theta, chi2)
return -0.5 * chi2
def _print_current_model(self, theta, chi2):
"""
print the chi2 and parameters for model provided
"""
out = "{:.4f} {:}".format(chi2, " ".join([repr(x) for x in theta]))
flush = False
cond_1 = self._print_model_i <= 1000 and self._print_model_i % 10 == 0
cond_2 = self._print_model_i > 1000 and self._print_model_i % 100 == 0
if self._print_model_i < 100 or cond_1 or cond_2:
flush = True
print(out, file=self._print_model_file, flush=flush)
self._print_model_i += 1
def _get_fluxes(self):
"""
Extract all fluxes and return them in a list.
"""
fluxes = []
for (i, dataset) in enumerate(self._datasets):
fluxes += self._event.fits[i].source_fluxes.tolist()
fluxes.append(self._event.fits[i].blend_flux)
return fluxes
def _run_flux_checks_ln_prior(self, fluxes):
"""
Run the checks on fluxes - are they in the prior?
"""
inside = 0.
outside = -np.inf
if self._fit_constraints["no_negative_blending_flux"]:
blend_index = self._n_fluxes_per_dataset - 1
if fluxes[blend_index] < 0.:
return outside
key = "negative_blending_flux_sigma_mag"
if key in self._fit_constraints:
blend_index = self._n_fluxes_per_dataset - 1
if fluxes[blend_index] < 0.:
sigma = self._fit_constraints[key]
inside += -0.5 * (fluxes[blend_index] / sigma)**2
return inside
def _update_best_model_EMCEE(self, ln_prob, theta, fluxes):
"""
Check if the current model is the best one and save information.
"""
if ln_prob < self._best_model_ln_prob:
return
self._best_model_ln_prob = ln_prob
self._best_model_theta = theta
self._best_model_fluxes = fluxes
def _setup_fit(self):
"""
Setup what is needed for fitting after MulensModel.Event is set up.
"""
if self._fit_method == 'EMCEE':
self._setup_fit_EMCEE()
elif self._fit_method == 'MultiNest':
self._setup_fit_MultiNest()
else:
raise ValueError('internal bug')
def _setup_fit_EMCEE(self):
"""
Setup EMCEE fit
"""
UlensModelFit._pool = Pool()
self._sampler = emcee.EnsembleSampler(
self._n_walkers, self._n_fit_parameters, self._ln_prob,
pool=UlensModelFit._pool)
def _setup_fit_MultiNest(self):
"""
Prepare MultiNest fit
"""
self._kwargs_MultiNest['Prior'] = self._transform_unit_cube
self._kwargs_MultiNest['LogLikelihood'] = self._ln_like_MN
self._kwargs_MultiNest['resume'] = False
self._kwargs_MultiNest['n_dims'] = self._n_fit_parameters
self._kwargs_MultiNest['n_params'] = self._kwargs_MultiNest['n_dims']
if self._return_fluxes:
self._kwargs_MultiNest['n_params'] += self._n_fluxes
def _transform_unit_cube(self, cube, n_dims, n_params):
"""
Transform MulitNest unit cube to microlensing parameters.
Based on SafePrior() in
https://github.com/JohannesBuchner/PyMultiNest/blob/master/
pymultinest/solve.py
NOTE: We call self._ln_like() here (and remember the result)
because in MultiNest you can add fluxes only in "prior" function,
not in likelihood function.
"""
cube_out = self._min_values + cube[:n_dims] * self._range_values
for i in range(n_dims):
cube[i] = cube_out[i]
self._last_ln_like = self._ln_like(cube_out)
self._last_theta = cube_out
if self._return_fluxes:
fluxes = self._get_fluxes()
for i in range(n_dims, n_params):
cube[i] = fluxes[i-n_dims]
self._last_fluxes = fluxes
def _ln_like_MN(self, theta, n_dim, n_params, lnew):
"""
Calculate likelihood and save if its best model.
This is used for MultiNest fitting.
"""
for i in range(n_dim):
if self._last_theta[i] != theta[i]:
msg = "internal bug:\n{:}\n{:}\n{:}"
raise ValueError(msg.format(i, self._last_theta[i], theta[i]))
ln_like = self._last_ln_like
if ln_like > self._best_model_ln_prob:
self._best_model_ln_prob = ln_like
self._best_model_theta = np.array(theta[:n_dim])
if self._return_fluxes:
self._best_model_fluxes = self._last_fluxes
ln_max = -1.e300
if not np.isfinite(ln_like) or ln_like < ln_max:
if not np.isfinite(ln_like):
msg = "problematic likelihood: {:}\nfor parameters: {:}"
warnings.warn(msg.format(ln_like, theta))
ln_like = ln_max
return ln_like
def _run_fit(self):
"""
Call the method that does the fit.
"""
if self._fit_method == 'EMCEE':
self._run_fit_EMCEE()
elif self._fit_method == 'MultiNest':
self._run_fit_MultiNest()
else:
raise ValueError('internal bug')
def _run_fit_EMCEE(self):
"""
Run EMCEE
"""
tStart = time.time()
self._sampler.run_mcmc(**self._kwargs_EMCEE)
tEnd = time.time()
serial_time = tEnd - tStart
print("Multiprocessing took {0:.1f} seconds".format(serial_time))
def _run_fit_MultiNest(self):
"""
Run MultiNest fit
"""
mn_run(**self._kwargs_MultiNest)
def _finish_fit(self):
"""
Make the things that are necessary after the fit is done.
Currently it's just closing the file with all models and
reads the output files (only for MultiNest).
"""
if self._print_model:
if self._print_model_file is not sys.stdout:
self._print_model_file.close()
self._print_model = False
if self._fit_method == 'EMCEE':
UlensModelFit._pool.close()
UlensModelFit._pool.join()
UlensModelFit._pool.terminate()
elif self._fit_method == 'MultiNest':
base = self._kwargs_MultiNest['outputfiles_basename']
self._analyzer = Analyzer(n_params=self._n_fit_parameters,
outputfiles_basename=base)
self._analyzer_data = self._analyzer.get_data()
if self._kwargs_MultiNest['multimodal']:
self._read_multimode_posterior_MultiNest()
self._read_multimode_best_models_MultiNest()
if self._MN_temporary_files:
shutil.rmtree(base, ignore_errors=True)
def _read_multimode_posterior_MultiNest(self):
"""
We read data from MultiNest output file [root]post_separate.dat.
It has 2 empty lines then info on a mode at this repeats N_modes
times. We read it twice - first to get all the data and then to
find how many samples there are in each mode.
"""
data = np.loadtxt(self._analyzer.post_file)
n_samples = []
skip = False
with open(self._analyzer.post_file) as in_data:
for line in in_data.readlines():
if skip:
skip = False
continue
if len(line) == 1:
skip = True
n_samples.append(0)
else:
n_samples[-1] += 1
if data.shape[0] != sum(n_samples):
raise ValueError('Error in file ' + self._analyzer.post_file +
str(data.shape) + " vs. " + str(n_samples))
self._MN_samples_modes_all = data
self._MN_modes_indexes = n_samples
self._n_modes = len(n_samples)
def _read_multimode_best_models_MultiNest(self):
"""
read and process information about best models
(i.e., highest likelihood, not maximum a posteriori) for each mode
"""
out = dict()
for mode in self._analyzer.get_mode_stats()['modes']:
out[mode['index']] = {"parameters": mode['maximum']}
self._set_model_parameters(mode['maximum'])
out[mode['index']]["chi2"] = self._event.get_chi2()
# The above line is not best performence, but easy solution.
self._best_models_for_modes_MN = out
def _parse_results(self):
"""
Call the function that prints and saves results
"""
if self._fit_method == "EMCEE":
self._parse_results_EMCEE()
if self._posterior_file_name is not None:
self._save_posterior_EMCEE()
elif self._fit_method == "MultiNest":
self._parse_results_MultiNest()
else:
raise ValueError('internal bug')
def _parse_results_EMCEE(self):
"""
Print and save results from EMCEE fitting.
This version works with EMCEE version 2.X and 3.0.
"""
accept_rate = np.mean(self._sampler.acceptance_fraction)
print("Mean acceptance fraction: {0:.3f}".format(accept_rate))
autocorr_times = self._sampler.get_autocorr_time(
quiet=True, discard=self._fitting_parameters['n_burn'])
autocorr_time = np.mean(autocorr_times)
print("Mean autocorrelation time: {0:.1f} steps".format(autocorr_time))
self._extract_posterior_samples_EMCEE()
print("Fitted parameters:")
self._print_results(self._samples_flat)
self._shift_t_0_in_samples()
if self._return_fluxes:
print("Fitted fluxes (source and blending):")
blob_samples = self._get_fluxes_to_print_EMCEE()
self._print_results(blob_samples, names='fluxes')
self._print_best_model()
def _extract_posterior_samples_EMCEE(self):
"""
set self._samples_flat and self._samples
"""
n_burn = self._fitting_parameters['n_burn']
self._samples = self._sampler.chain[:, n_burn:, :]
n_fit = self._n_fit_parameters
self._samples_flat = self._samples.copy().reshape((-1, n_fit))
if 'trace' not in self._plots:
self._samples = None
def _print_results(self, data, names="parameters", mode=None):
"""
calculate and print median values and +- 1 sigma for given parameters
"""
if names == "parameters":
ids = self._fit_parameters
elif names == "fluxes":
if self._flux_names is None:
self._flux_names = self._get_fluxes_names_to_print()
ids = self._flux_names
else:
raise ValueError('internal bug')
if self._fit_method == "EMCEE":
results = self._get_weighted_percentile(data)
elif self._fit_method == "MultiNest":
if mode is None:
weights = self._samples_flat_weights
else:
weights = self._samples_modes_flat_weights[mode]
results = self._get_weighted_percentile(data, weights=weights)
else:
raise ValueError("internal bug")
for (parameter, results_) in zip(ids, results):
format_ = "{:} : {:.5f} +{:.5f} -{:.5f}"
if parameter == 'q':
format_ = "{:} : {:.7f} +{:.7f} -{:.7f}"
print(format_.format(parameter, *results_))
def _get_fluxes_names_to_print(self):
"""
get strings to be used as names of parameters to be printed
"""
if self._n_fluxes_per_dataset == 2:
s_or_b = ['s', 'b']
elif self._n_fluxes_per_dataset == 3:
s_or_b = ['s1', 's2', 'b']
else:
raise ValueError(
'Internal error: ' + str(self._n_fluxes_per_dataset))
n = self._n_fluxes_per_dataset
flux_names = ['flux_{:}_{:}'.format(s_or_b[i % n], i // n+1)
for i in range(self._n_fluxes)]
return flux_names
def _get_weighted_percentile(
self, data, fractions=[0.158655, 0.5, 0.841345], weights=None):
"""
Calculate weighted percentile of the data. Data can be weighted or not.
"""
if weights is None:
kwargs = dict()
if data.shape[0] > data.shape[1]:
kwargs['axis'] = 0
results = np.percentile(data, 100.*np.array(fractions), **kwargs)
else:
results = []
for i in range(data.shape[1]):
data_ = data[:, i]
indexes = np.argsort(data_)
weights_sorted = weights[indexes]
weights_cumulative = np.cumsum(weights_sorted)
weighted_quantiles = weights_cumulative - 0.5 * weights_sorted
weighted_quantiles /= weights_cumulative[-1]
results.append(
np.interp(fractions, weighted_quantiles, data_[indexes]))
results = np.array(results).T
out = []
for i in range(results.shape[1]):
median = results[1, i]
out.append([median, results[2, i]-median, median-results[0, i]])
return out
def _shift_t_0_in_samples(self):
"""
shift the values of t_0, t_0_1, and t_0_2:
"""
if not self._shift_t_0:
return
for name in ['t_0', 't_0_1', 't_0_2']:
if name in self._fit_parameters:
index = self._fit_parameters.index(name)
values = self._samples_flat[:, index]
mean = np.mean(values)
try:
self._samples_flat[:, index] -= int(mean)
if 'trace' in self._plots:
self._samples[:, :, index] -= int(mean)
except TypeError:
fmt = ("Warning: extremely wide range of posterior {:}: "
"from {:} to {:}")
warnings.warn(
fmt.format(name, np.min(values), np.max(values)))
self._samples_flat[:, index] = values - int(mean)
if 'trace' in self._plots:
self._samples[:, :, index] = (
self._samples[:, :, index] - int(mean))
def _get_fluxes_to_print_EMCEE(self):
"""
prepare values to be printed for EMCEE fitting
"""
try:
blobs = np.array(self._sampler.blobs)
except Exception as exception:
raise ValueError('There was some issue with blobs:\n' +
str(exception))
blob_sampler = np.transpose(blobs, axes=(1, 0, 2))
blob_samples = blob_sampler[:, self._fitting_parameters['n_burn']:, :]
blob_samples = blob_samples.reshape((-1, self._n_fluxes))
return blob_samples
def _print_best_model(self):
"""
print best model found
"""
print("Best model:")
if self._flat_priors:
print("chi2 : {:.4f}".format(-2. * self._best_model_ln_prob))
else:
self._ln_like(self._best_model_theta)
print("chi2 : {:.4f}".format(self._event.get_chi2()))
print(*self._fit_parameters)
print(*list(self._best_model_theta))
if self._return_fluxes:
print("Fluxes:")
print(*list(self._best_model_fluxes))
def _save_posterior_EMCEE(self):
"""
save 3D cube with posterior to a numpy array
"""
n_burn = self._fitting_parameters.get('n_burn', 0)
samples = self._sampler.chain[:, n_burn:, :]
if self._posterior_file_fluxes == 'all':
blobs = np.array(self._sampler.blobs)
blobs = np.transpose(blobs, axes=(1, 0, 2))[:, n_burn:, :]
samples = np.dstack((samples, blobs))
np.save(self._posterior_file_name, samples)
def _parse_results_MultiNest(self):
"""
Parse results of MultiNest fitting
"""
self._extract_posterior_samples_MultiNest()
if self._return_fluxes:
flux_names = self._get_fluxes_names_to_print()
if self._kwargs_MultiNest['multimodal']:
self._parse_results_MultiNest_multimodal()
else:
print("Fitted parameters:")
self._print_results(self._samples_flat)
if self._return_fluxes:
print("Fitted fluxes (source and blending):")
flux_samples = self._get_fluxes_to_print_MultiNest()
self._print_results(flux_samples, names='fluxes')
self._shift_t_0_in_samples()
self._print_best_model()
def _parse_results_MultiNest_multimodal(self):
"""
Print parameters and fluxes for each mode separately
"""
print("Number of modes found:", self._n_modes)
if self._return_fluxes:
print("Fitted parameters and fluxes (source and blending) "
"plus best model info:")
else:
print("Fitted parameters:")
self._set_mode_probabilities()
for i_mode in range(self._n_modes):
err = self._mode_probabilities_error[i_mode]
accuracy = self._get_accuracy(err)
fmt = (" MODE {:} probability: {:." + str(accuracy) +
"f} +- {:." + str(accuracy) + "f}")
print(fmt.format(i_mode+1, self._mode_probabilities[i_mode], err))
self._print_results(self._samples_modes_flat[i_mode], mode=i_mode)
if self._return_fluxes:
self._print_results(
self._samples_modes_flat_fluxes[i_mode], names="fluxes")
mode = self._best_models_for_modes_MN[i_mode]
print("{:.4f}".format(mode['chi2']))
print(*mode['parameters'][:self._n_fit_parameters])
print(*mode['parameters'][self._n_fit_parameters:])
print(" END OF MODES")
def _set_mode_probabilities(self):
"""
Calculate probabilities of each mode and its uncertainty
"""
modes = self._analyzer.get_mode_stats()['modes']
key_lnZ = 'local log-evidence'
modes_lnZ = np.array([mode[key_lnZ] for mode in modes])
shift = (np.max(modes_lnZ) + np.min(modes_lnZ)) / 2.
# We subtract shift for numerical stability.
modes_lnZ -= shift
modes_Z = np.exp(modes_lnZ)
self._mode_probabilities = modes_Z / np.sum(modes_Z)
relative_error = [mode[key_lnZ + ' error'] for mode in modes]
# Error in np.sum(modes_Z) is ignored.
self._mode_probabilities_error = (
relative_error * self._mode_probabilities)
def _get_accuracy(self, uncertainty):
"""
Return int which says how to round given value to 2 significant digits
"""
return 2-int(np.log10(uncertainty))
def _extract_posterior_samples_MultiNest(self):
"""
set self._samples_flat and self._samples_flat_weights for MultiNest
"""
index = 2 + self._n_fit_parameters
self._samples_flat = self._analyzer_data[:, 2:index]
self._samples_flat_weights = self._analyzer_data[:, 0]
if self._kwargs_MultiNest['multimodal']:
self._samples_modes_flat = []
self._samples_modes_flat_weights = []
self._samples_modes_flat_fluxes = []
n_begin = 0
for n in self._MN_modes_indexes:
n_end = n_begin + n
weights = self._MN_samples_modes_all[n_begin:n_end, 0]
samples = self._MN_samples_modes_all[n_begin:n_end, 2:index]
fluxes_ = self._MN_samples_modes_all[n_begin:n_end, index:]
self._samples_modes_flat.append(samples)
self._samples_modes_flat_weights.append(weights)
self._samples_modes_flat_fluxes.append(fluxes_)
n_begin = n_end
def _get_fluxes_to_print_MultiNest(self):
"""
prepare values to be printed for EMCEE fitting
"""
index = 2 + self._n_fit_parameters
data = self._analyzer_data[:, index:]
return data
def _make_plots(self):
"""
make plots after fitting: best model, triangle plot, trace plot
"""
if 'triangle' in self._plots:
self._triangle_plot()
if 'trace' in self._plots:
self._trace_plot()
if 'best model' in self._plots:
self._best_model_plot()
def _triangle_plot(self):
"""
Make a triangle plot
"""
self._reset_rcParams()
n_bins = 40
kwargs = {
'bins': n_bins, 'labels': self._fit_parameters_latex,
'show_titles': True, 'quantiles': [0.15866, 0.5, 0.84134],
'verbose': False, 'top_ticks': False}
figure = corner.corner(self._samples_flat, **kwargs)
self._save_figure(self._plots['triangle'].get('file'), figure=figure)
def _reset_rcParams(self):
"""
Reset matplotlib rcParams to their defaults
"""
rcParams.update(rcParamsDefault)
def _save_figure(self, file_name, figure=None, dpi=None):
"""
Save figure or display it
"""
if file_name is None:
plt.show()
# XXX - does this work?
# elif file_name[-4:].upper() == ".PDF":
# pdf = PdfPages(file_name)
# if figure is None:
# figure = plt.gcf()
# pdf.savefig(figure)
else:
caller = plt
if figure is not None:
caller = figure
kwargs = dict()
if dpi is not None:
kwargs = {'dpi': dpi}
caller.savefig(file_name, **kwargs)
plt.close()
def _trace_plot(self):
"""
Make a trace plot
"""
self._reset_rcParams()
alpha = 0.5
plt.rcParams['font.size'] = 12
plt.rcParams['axes.linewidth'] = 1.4
figure_size = (7.5, 10.5)
margins = {'left': 0.13, 'right': 0.97, 'top': 0.98, 'bottom': 0.05}
grid = gridspec.GridSpec(self._n_fit_parameters, 1, hspace=0)
plt.figure(figsize=figure_size)
plt.subplots_adjust(**margins)
x_vector = np.arange(self._samples.shape[1])
for (i, latex_name) in enumerate(self._fit_parameters_latex):
if i == 0:
plt.subplot(grid[i])
ax0 = plt.gca()
else:
plt.gcf().add_subplot(grid[i], sharex=ax0)
plt.ylabel(latex_name)
for j in range(self._samples.shape[0]):
plt.plot(x_vector, self._samples[j, :, i], alpha=alpha)
plt.xlim(0, self._samples.shape[1])
plt.gca().tick_params(axis='both', which='both', direction='in',
top=True, right=True)
if i != self._n_fit_parameters - 1:
plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.gca().set_prop_cycle(None)
plt.xlabel('step count')
self._save_figure(self._plots['trace'].get('file'))
def _best_model_plot(self):
"""
plot best model and residuals
"""
dpi = 300
self._ln_like(self._best_model_theta) # Sets all parameters to
# the best model.
self._reset_rcParams()
if 'rcParams' in self._plots['best model']:
for (key, value) in self._plots['best model']['rcParams'].items():
rcParams[key] = value
kwargs_all = self._get_kwargs_for_best_model_plot()
(kwargs_grid, kwargs_model, kwargs, xlim, t_1, t_2) = kwargs_all[:6]
(kwargs_axes_1, kwargs_axes_2) = kwargs_all[6:]
(ylim, ylim_residuals) = self._get_ylim_for_best_model_plot(t_1, t_2)
grid = gridspec.GridSpec(**kwargs_grid)
axes = plt.subplot(grid[0])
self._event.plot_data(**kwargs)
fluxes = self._event.get_ref_fluxes()
self._plot_models_for_best_model_plot(fluxes, kwargs_model)
self._plot_legend_for_best_model_plot()
plt.xlim(*xlim)
if ylim is not None:
plt.ylim(*ylim)
axes.tick_params(**kwargs_axes_1)
if "second Y scale" in self._plots['best model']:
self._mark_second_Y_axis_in_best_plot()
axes = plt.subplot(grid[1])
self._event.plot_residuals(**kwargs)
plt.xlim(*xlim)
plt.ylim(*ylim_residuals)
axes.tick_params(**kwargs_axes_2)
self._save_figure(self._plots['best model'].get('file'), dpi=dpi)
def _get_kwargs_for_best_model_plot(self):
"""
prepare kwargs/settings for best plot model
"""
plot_size_ratio = 5
hspace = 0
tau = 1.5
remove_245 = True
default_model = {'color': 'black', 'linewidth': 1.0, 'zorder': np.inf}
kwargs_grid = {
'nrows': 2, 'ncols': 1, 'height_ratios': [plot_size_ratio, 1],
'hspace': hspace}
kwargs = {'subtract_2450000': remove_245}
(t_1, t_2) = self._get_time_limits_for_plot(tau)
kwargs_model = {
't_start': t_1, 't_stop': t_2, **default_model, **kwargs}
if self._model.n_sources != 1:
kwargs_model['source_flux_ratio'] = self._datasets[0]
if self._datasets[0].bandpass is not None:
key = 'limb darkening u'
if self._datasets[0].bandpass in self._model_parameters[key]:
u = self._model_parameters[key][self._datasets[0].bandpass]
kwargs_model['gamma'] = mm.Utils.u_to_gamma(u)
if kwargs['subtract_2450000']:
xlim = [t_1-2450000., t_2-2450000.]
else:
xlim = [t_1, t_2]
kwargs_axes_1 = dict(
axis='both', direction='in', bottom=True, top=True, left=True,
right=True, labelbottom=False)
kwargs_axes_2 = {**kwargs_axes_1, 'labelbottom': True}
return (kwargs_grid, kwargs_model, kwargs, xlim, t_1, t_2,
kwargs_axes_1, kwargs_axes_2)
def _get_time_limits_for_plot(self, tau):
"""
find limits for the best model plot
"""
if 'time range' in self._plots['best model']:
t_1 = self._plots['best model']['time range'][0]
t_2 = self._plots['best model']['time range'][1]
return (t_1, t_2)
if self._model.n_sources == 1:
t_1 = self._model.parameters.t_0 - tau * self._model.parameters.t_E
t_2 = self._model.parameters.t_0 + tau * self._model.parameters.t_E
elif self._model.n_sources == 2:
t_1 = self._model.parameters.t_0_1
t_2 = self._model.parameters.t_0_2
if t_1 > t_2:
(t_1, t_2) = (t_2, t_1)
t_1 -= tau * self._model.parameters.t_E
t_2 += tau * self._model.parameters.t_E
else:
raise ValueError('internal issue: ' + str(self._model.n_sources))
return (t_1, t_2)
def _get_ylim_for_best_model_plot(self, t_1, t_2):
"""
Find Y axis ranges for plots of data and their residuals.
Use t_1 and t_2 to limit the data considered.
"""
padding = 0.05
y_1 = y_3 = np.inf
y_2 = y_4 = -np.inf
(f_source_0, f_blend_0) = self._event.get_ref_fluxes()
for (i, data) in enumerate(self._datasets):
mask = (data.time >= t_1) & (data.time <= t_2)
if np.sum(mask) == 0:
continue
(flux, flux_err) = self._event.fits[i].scale_fluxes(
f_source_0, f_blend_0)
(y_value, y_err) = mm.Utils.get_mag_and_err_from_flux(
flux, flux_err)
mask &= np.logical_not(np.isnan(y_value) | (y_err < 0.))
y_1 = min(y_1, np.min((y_value - y_err)[mask]))
y_2 = max(y_2, np.max((y_value + y_err)[mask]))
(residuals, err_mag) = self._event.fits[i].get_residuals(
phot_fmt='scaled', source_flux=f_source_0,
blend_flux=f_blend_0)
mask_ = np.isfinite(residuals[mask])
y_3 = min(y_3, np.min((residuals - err_mag)[mask][mask_]))
y_4 = max(y_4, np.max((residuals + err_mag)[mask][mask_]))
if y_1 == np.inf: # There are no data points in the plot.
return (None, [0.1, -0.1])
dy = padding * (y_2 - y_1)
dres = padding * (y_4 - y_3)
ylim = [y_2 + dy, y_1 - dy]
ylim_r = [y_4 + dres, y_3 - dres]
# Block below is the same in MulensModel.Model.plot_residuals() in
# its version 1.15.6.
ylim_r_max = np.max(np.abs(ylim_r))
if ylim_r_max > 1.:
ylim_r_max = 0.5
ylim_residuals = [ylim_r_max, -ylim_r_max]
if 'magnitude range' in self._plots['best model']:
ylim = self._plots['best model']['magnitude range']
return (ylim, ylim_residuals)
def _plot_models_for_best_model_plot(self, fluxes, kwargs_model):
"""
Plot best models: first ground-based (if needed, hence loop),
then satellite ones (if needed).
"""
for dataset in self._datasets:
if dataset.ephemerides_file is None:
self._model.plot_lc(
source_flux=fluxes[0], blend_flux=fluxes[1],
**kwargs_model)
break
for model in self._models_satellite:
model.parameters.parameters = {**self._model.parameters.parameters}
model.plot_lc(source_flux=fluxes[0], blend_flux=fluxes[1],
**kwargs_model)
def _plot_legend_for_best_model_plot(self):
"""
advanced call to plt.legend()
"""
if len(self._datasets) > 1 or 'legend' in self._plots['best model']:
if 'legend' not in self._plots['best model']:
plt.legend()
else:
try:
plt.legend(**self._plots['best model']['legend'])
except Exception:
print("\npyplot.legend() failed with kwargs:")
print(self._plots['best model']['legend'], "\n")
raise
def _mark_second_Y_axis_in_best_plot(self):
"""
Mark the second (right-hand side) scale for Y axis in
the best model plot
"""
settings = self._plots['best model']["second Y scale"]
magnifications = settings['magnifications']
color = settings.get("color", "red")
label = settings.get("label", "magnification")
labels = settings['labels']
ylim = plt.ylim()
flux_min = mm.Utils.get_flux_from_mag(ylim[0])
flux_max = mm.Utils.get_flux_from_mag(ylim[1])
(source_flux, blend_flux) = self._event.get_ref_fluxes()
if self._model.n_sources == 1:
total_source_flux = source_flux
else:
total_source_flux = sum(source_flux)
flux = total_source_flux * magnifications + blend_flux
if np.any(flux < 0.):
mask = (flux > 0.)
flux = flux[mask]
labels = [l for (l, m) in zip(labels, mask) if m]
msg = ("\n\n{:} label/s on the second Y scale will not be shown "
"because they correspond to negative flux which cannot "
"be translated to magnitudes.")
warnings.warn(msg.format(np.sum(np.logical_not(mask))))
A_min = (flux_min - blend_flux) / total_source_flux
A_max = (flux_max - blend_flux) / total_source_flux
if (np.min(magnifications) < A_min or np.max(magnifications) > A_max or
np.any(flux < 0.)):
msg = ("Provided magnifications for the second (i.e., right-hand "
"side) Y-axis scale are from {:} to {:},\nbut the range "
"of plotted magnifications is from {:} to {:}, hence, "
"the second scale is not plotted")
args = [min(magnifications), max(magnifications),
A_min[0], A_max[0]]
warnings.warn(msg.format(*args))
return
ticks = mm.Utils.get_mag_from_flux(flux)
ax2 = plt.gca().twinx()
ax2.set_ylabel(label).set_color(color)
ax2.spines['right'].set_color(color)
ax2.set_ylim(ylim[0], ylim[1])
ax2.tick_params(axis='y', colors=color)
plt.yticks(ticks, labels, color=color)
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError('Exactly one argument needed - YAML file')
if 'yaml' in import_failed:
raise ImportError('module "yaml" could not be imported :(')
input_file = sys.argv[1]
with open(input_file, 'r') as data:
settings = yaml.safe_load(data)
ulens_model_fit = UlensModelFit(**settings)
ulens_model_fit.run_fit()
| [
"numpy.logical_not",
"MulensModel.Model",
"numpy.sum",
"numpy.abs",
"pymultinest.analyse.Analyzer",
"numpy.isnan",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"yaml.safe_load",
"os.path.isfile",
"MulensModel.Utils.get_mag_and_err_from_flux",
"m... | [((42163, 42179), 'os.path.isdir', 'path.isdir', (['root'], {}), '(root)\n', (42173, 42179), False, 'from os import path, sep\n'), ((42653, 42669), 'os.path.isdir', 'path.isdir', (['root'], {}), '(root)\n', (42663, 42669), False, 'from os import path, sep\n'), ((46312, 46332), 'numpy.array', 'np.array', (['min_values'], {}), '(min_values)\n', (46320, 46332), True, 'import numpy as np\n'), ((46360, 46380), 'numpy.array', 'np.array', (['max_values'], {}), '(max_values)\n', (46368, 46380), True, 'import numpy as np\n'), ((56531, 56568), 'MulensModel.Event', 'mm.Event', (['self._datasets', 'self._model'], {}), '(self._datasets, self._model)\n', (56539, 56568), True, 'import MulensModel as mm\n'), ((70006, 70012), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (70010, 70012), False, 'from multiprocessing import Pool\n'), ((70037, 70145), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['self._n_walkers', 'self._n_fit_parameters', 'self._ln_prob'], {'pool': 'UlensModelFit._pool'}), '(self._n_walkers, self._n_fit_parameters, self.\n _ln_prob, pool=UlensModelFit._pool)\n', (70058, 70145), False, 'import emcee\n'), ((72965, 72976), 'time.time', 'time.time', ([], {}), '()\n', (72974, 72976), False, 'import time\n'), ((73045, 73056), 'time.time', 'time.time', ([], {}), '()\n', (73054, 73056), False, 'import time\n'), ((73260, 73292), 'pymultinest.run.run', 'mn_run', ([], {}), '(**self._kwargs_MultiNest)\n', (73266, 73292), True, 'from pymultinest.run import run as mn_run\n'), ((74812, 74848), 'numpy.loadtxt', 'np.loadtxt', (['self._analyzer.post_file'], {}), '(self._analyzer.post_file)\n', (74822, 74848), True, 'import numpy as np\n'), ((76796, 76838), 'numpy.mean', 'np.mean', (['self._sampler.acceptance_fraction'], {}), '(self._sampler.acceptance_fraction)\n', (76803, 76838), True, 'import numpy as np\n'), ((77060, 77083), 'numpy.mean', 'np.mean', (['autocorr_times'], {}), '(autocorr_times)\n', (77067, 77083), True, 'import numpy as np\n'), ((82432, 82467), 'numpy.transpose', 'np.transpose', (['blobs'], {'axes': '(1, 0, 2)'}), '(blobs, axes=(1, 0, 2))\n', (82444, 82467), True, 'import numpy as np\n'), ((83642, 83685), 'numpy.save', 'np.save', (['self._posterior_file_name', 'samples'], {}), '(self._posterior_file_name, samples)\n', (83649, 83685), True, 'import numpy as np\n'), ((85976, 86019), 'numpy.array', 'np.array', (['[mode[key_lnZ] for mode in modes]'], {}), '([mode[key_lnZ] for mode in modes])\n', (85984, 86019), True, 'import numpy as np\n'), ((86179, 86196), 'numpy.exp', 'np.exp', (['modes_lnZ'], {}), '(modes_lnZ)\n', (86185, 86196), True, 'import numpy as np\n'), ((88657, 88700), 'corner.corner', 'corner.corner', (['self._samples_flat'], {}), '(self._samples_flat, **kwargs)\n', (88670, 88700), False, 'import corner\n'), ((88896, 88928), 'matplotlib.rcParams.update', 'rcParams.update', (['rcParamsDefault'], {}), '(rcParamsDefault)\n', (88911, 88928), False, 'from matplotlib import gridspec, rc, rcParams, rcParamsDefault\n'), ((89584, 89595), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (89593, 89595), True, 'from matplotlib import pyplot as plt\n'), ((89937, 89991), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['self._n_fit_parameters', '(1)'], {'hspace': '(0)'}), '(self._n_fit_parameters, 1, hspace=0)\n', (89954, 89991), False, 'from matplotlib import gridspec, rc, rcParams, rcParamsDefault\n'), ((90001, 90032), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (90011, 90032), True, 'from matplotlib import pyplot as plt\n'), ((90041, 90071), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '(**margins)\n', (90060, 90071), True, 'from matplotlib import pyplot as plt\n'), ((90091, 90124), 'numpy.arange', 'np.arange', (['self._samples.shape[1]'], {}), '(self._samples.shape[1])\n', (90100, 90124), True, 'import numpy as np\n'), ((90873, 90897), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step count"""'], {}), "('step count')\n", (90883, 90897), True, 'from matplotlib import pyplot as plt\n'), ((91660, 91692), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {}), '(**kwargs_grid)\n', (91677, 91692), False, 'from matplotlib import gridspec, rc, rcParams, rcParamsDefault\n'), ((91709, 91729), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[0]'], {}), '(grid[0])\n', (91720, 91729), True, 'from matplotlib import pyplot as plt\n'), ((91942, 91957), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (91950, 91957), True, 'from matplotlib import pyplot as plt\n'), ((92183, 92203), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[1]'], {}), '(grid[1])\n', (92194, 92203), True, 'from matplotlib import pyplot as plt\n'), ((92257, 92272), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*xlim'], {}), '(*xlim)\n', (92265, 92272), True, 'from matplotlib import pyplot as plt\n'), ((92281, 92306), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim_residuals'], {}), '(*ylim_residuals)\n', (92289, 92306), True, 'from matplotlib import pyplot as plt\n'), ((98508, 98518), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (98516, 98518), True, 'from matplotlib import pyplot as plt\n'), ((98538, 98573), 'MulensModel.Utils.get_flux_from_mag', 'mm.Utils.get_flux_from_mag', (['ylim[0]'], {}), '(ylim[0])\n', (98564, 98573), True, 'import MulensModel as mm\n'), ((98593, 98628), 'MulensModel.Utils.get_flux_from_mag', 'mm.Utils.get_flux_from_mag', (['ylim[1]'], {}), '(ylim[1])\n', (98619, 98628), True, 'import MulensModel as mm\n'), ((98915, 98933), 'numpy.any', 'np.any', (['(flux < 0.0)'], {}), '(flux < 0.0)\n', (98921, 98933), True, 'import numpy as np\n'), ((100035, 100067), 'MulensModel.Utils.get_mag_from_flux', 'mm.Utils.get_mag_from_flux', (['flux'], {}), '(flux)\n', (100061, 100067), True, 'import MulensModel as mm\n'), ((100288, 100326), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks', 'labels'], {'color': 'color'}), '(ticks, labels, color=color)\n', (100298, 100326), True, 'from matplotlib import pyplot as plt\n'), ((100641, 100661), 'yaml.safe_load', 'yaml.safe_load', (['data'], {}), '(data)\n', (100655, 100661), False, 'import yaml\n'), ((36332, 36349), 'os.path.exists', 'path.exists', (['name'], {}), '(name)\n', (36343, 36349), False, 'from os import path, sep\n'), ((42800, 42822), 'os.path.isfile', 'path.isfile', (['file_name'], {}), '(file_name)\n', (42811, 42822), False, 'from os import path, sep\n'), ((48311, 48365), 'MulensModel.Utils.get_flux_from_mag', 'mm.Utils.get_flux_from_mag', (['self._fit_constraints[key]'], {}), '(self._fit_constraints[key])\n', (48337, 48365), True, 'import MulensModel as mm\n'), ((50238, 50413), 'numpy.array', 'np.array', (['[-0.93, -0.79, -0.65, -0.51, -0.37, -0.23, -0.09, 0.05, 0.19, 0.33, 0.47, \n 0.61, 0.75, 0.89, 1.03, 1.17, 1.31, 1.45, 1.59, 1.73, 1.87, 2.01, 2.15,\n 2.29, 2.43]'], {}), '([-0.93, -0.79, -0.65, -0.51, -0.37, -0.23, -0.09, 0.05, 0.19, 0.33,\n 0.47, 0.61, 0.75, 0.89, 1.03, 1.17, 1.31, 1.45, 1.59, 1.73, 1.87, 2.01,\n 2.15, 2.29, 2.43])\n', (50246, 50413), True, 'import numpy as np\n'), ((50471, 50685), 'numpy.array', 'np.array', (['[299.4, 245.6, 358.5, 116.96, 0.0, 47.78, 85.1, 90.5, 315.37, 501.77, \n 898.26, 1559.68, 2381.46, 2849.11, 3405.0, 3431.3, 3611.76, 3038.06, \n 2170.67, 1680.38, 814.7, 444.06, 254.89, 114.19, 52.14]'], {}), '([299.4, 245.6, 358.5, 116.96, 0.0, 47.78, 85.1, 90.5, 315.37, \n 501.77, 898.26, 1559.68, 2381.46, 2849.11, 3405.0, 3431.3, 3611.76, \n 3038.06, 2170.67, 1680.38, 814.7, 444.06, 254.89, 114.19, 52.14])\n', (50479, 50685), True, 'import numpy as np\n'), ((55010, 55040), 'MulensModel.Model', 'mm.Model', (['parameters'], {}), '(parameters, **kwargs)\n', (55018, 55040), True, 'import MulensModel as mm\n'), ((55379, 55452), 'MulensModel.Model', 'mm.Model', (['parameters'], {'ephemerides_file': 'dataset.ephemerides_file'}), '(parameters, ephemerides_file=dataset.ephemerides_file, **kwargs)\n', (55387, 55452), True, 'import MulensModel as mm\n'), ((62982, 63003), 'numpy.isfinite', 'np.isfinite', (['ln_prior'], {}), '(ln_prior)\n', (62993, 63003), True, 'import numpy as np\n'), ((63109, 63129), 'numpy.isfinite', 'np.isfinite', (['ln_like'], {}), '(ln_like)\n', (63120, 63129), True, 'import numpy as np\n'), ((63334, 63360), 'numpy.isfinite', 'np.isfinite', (['ln_prior_flux'], {}), '(ln_prior_flux)\n', (63345, 63360), True, 'import numpy as np\n'), ((66629, 66667), 'math.log10', 'math.log10', (['self._model.parameters.t_E'], {}), '(self._model.parameters.t_E)\n', (66639, 66667), False, 'import math\n'), ((72138, 72161), 'numpy.array', 'np.array', (['theta[:n_dim]'], {}), '(theta[:n_dim])\n', (72146, 72161), True, 'import numpy as np\n'), ((82227, 82256), 'numpy.array', 'np.array', (['self._sampler.blobs'], {}), '(self._sampler.blobs)\n', (82235, 82256), True, 'import numpy as np\n'), ((83483, 83512), 'numpy.array', 'np.array', (['self._sampler.blobs'], {}), '(self._sampler.blobs)\n', (83491, 83512), True, 'import numpy as np\n'), ((83606, 83633), 'numpy.dstack', 'np.dstack', (['(samples, blobs)'], {}), '((samples, blobs))\n', (83615, 83633), True, 'import numpy as np\n'), ((86242, 86257), 'numpy.sum', 'np.sum', (['modes_Z'], {}), '(modes_Z)\n', (86248, 86257), True, 'import numpy as np\n'), ((89092, 89102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (89100, 89102), True, 'from matplotlib import pyplot as plt\n'), ((90377, 90399), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['latex_name'], {}), '(latex_name)\n', (90387, 90399), True, 'from matplotlib import pyplot as plt\n'), ((90536, 90571), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self._samples.shape[1]'], {}), '(0, self._samples.shape[1])\n', (90544, 90571), True, 'from matplotlib import pyplot as plt\n'), ((91999, 92014), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (92007, 92014), True, 'from matplotlib import pyplot as plt\n'), ((95510, 95560), 'MulensModel.Utils.get_mag_and_err_from_flux', 'mm.Utils.get_mag_and_err_from_flux', (['flux', 'flux_err'], {}), '(flux, flux_err)\n', (95544, 95560), True, 'import MulensModel as mm\n'), ((95955, 95983), 'numpy.isfinite', 'np.isfinite', (['residuals[mask]'], {}), '(residuals[mask])\n', (95966, 95983), True, 'import numpy as np\n'), ((96518, 96532), 'numpy.abs', 'np.abs', (['ylim_r'], {}), '(ylim_r)\n', (96524, 96532), True, 'import numpy as np\n'), ((99547, 99565), 'numpy.any', 'np.any', (['(flux < 0.0)'], {}), '(flux < 0.0)\n', (99553, 99565), True, 'import numpy as np\n'), ((31743, 31779), 'MulensModel.MulensData', 'mm.MulensData', ([], {}), '(**{**kwargs, **file_})\n', (31756, 31779), True, 'import MulensModel as mm\n'), ((36370, 36387), 'os.path.isfile', 'path.isfile', (['name'], {}), '(name)\n', (36381, 36387), False, 'from os import path, sep\n'), ((41183, 41216), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""_MM_ex16_pyMN"""'], {}), "('_MM_ex16_pyMN')\n", (41199, 41216), False, 'import tempfile\n'), ((50959, 50974), 'numpy.log', 'np.log', (['y[mask]'], {}), '(y[mask])\n', (50965, 50974), True, 'import numpy as np\n'), ((51226, 51329), 'numpy.array', 'np.array', (['[0.74, 0.88, 1.01, 1.15, 1.28, 1.42, 1.55, 1.69, 1.82, 1.96, 2.09, 2.23, \n 2.36, 2.5, 2.63]'], {}), '([0.74, 0.88, 1.01, 1.15, 1.28, 1.42, 1.55, 1.69, 1.82, 1.96, 2.09,\n 2.23, 2.36, 2.5, 2.63])\n', (51234, 51329), True, 'import numpy as np\n'), ((51376, 51506), 'numpy.array', 'np.array', (['[82.04, 94.98, 167.76, 507.81, 402.08, 681.61, 1157.51, 1132.8, 668.12, \n 412.2, 236.14, 335.34, 74.88, 52.64, 97.78]'], {}), '([82.04, 94.98, 167.76, 507.81, 402.08, 681.61, 1157.51, 1132.8, \n 668.12, 412.2, 236.14, 335.34, 74.88, 52.64, 97.78])\n', (51384, 51506), True, 'import numpy as np\n'), ((61226, 61286), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'settings[1]', 'high': 'settings[2]', 'size': 'n'}), '(low=settings[1], high=settings[2], size=n)\n', (61243, 61286), True, 'import numpy as np\n'), ((72299, 72319), 'numpy.isfinite', 'np.isfinite', (['ln_like'], {}), '(ln_like)\n', (72310, 72319), True, 'import numpy as np\n'), ((72360, 72380), 'numpy.isfinite', 'np.isfinite', (['ln_like'], {}), '(ln_like)\n', (72371, 72380), True, 'import numpy as np\n'), ((74010, 74078), 'pymultinest.analyse.Analyzer', 'Analyzer', ([], {'n_params': 'self._n_fit_parameters', 'outputfiles_basename': 'base'}), '(n_params=self._n_fit_parameters, outputfiles_basename=base)\n', (74018, 74078), False, 'from pymultinest.analyse import Analyzer\n'), ((80349, 80366), 'numpy.argsort', 'np.argsort', (['data_'], {}), '(data_)\n', (80359, 80366), True, 'import numpy as np\n'), ((80454, 80479), 'numpy.cumsum', 'np.cumsum', (['weights_sorted'], {}), '(weights_sorted)\n', (80463, 80479), True, 'import numpy as np\n'), ((80752, 80769), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (80760, 80769), True, 'import numpy as np\n'), ((81355, 81370), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (81362, 81370), True, 'import numpy as np\n'), ((83533, 83568), 'numpy.transpose', 'np.transpose', (['blobs'], {'axes': '(1, 0, 2)'}), '(blobs, axes=(1, 0, 2))\n', (83545, 83568), True, 'import numpy as np\n'), ((86037, 86054), 'numpy.max', 'np.max', (['modes_lnZ'], {}), '(modes_lnZ)\n', (86043, 86054), True, 'import numpy as np\n'), ((86057, 86074), 'numpy.min', 'np.min', (['modes_lnZ'], {}), '(modes_lnZ)\n', (86063, 86074), True, 'import numpy as np\n'), ((86640, 86661), 'numpy.log10', 'np.log10', (['uncertainty'], {}), '(uncertainty)\n', (86648, 86661), True, 'import numpy as np\n'), ((90235, 90255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[i]'], {}), '(grid[i])\n', (90246, 90255), True, 'from matplotlib import pyplot as plt\n'), ((90278, 90287), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (90285, 90287), True, 'from matplotlib import pyplot as plt\n'), ((90468, 90523), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vector', 'self._samples[j, :, i]'], {'alpha': 'alpha'}), '(x_vector, self._samples[j, :, i], alpha=alpha)\n', (90476, 90523), True, 'from matplotlib import pyplot as plt\n'), ((93435, 93457), 'MulensModel.Utils.u_to_gamma', 'mm.Utils.u_to_gamma', (['u'], {}), '(u)\n', (93454, 93457), True, 'import MulensModel as mm\n'), ((95330, 95342), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (95336, 95342), True, 'import numpy as np\n'), ((95674, 95705), 'numpy.min', 'np.min', (['(y_value - y_err)[mask]'], {}), '((y_value - y_err)[mask])\n', (95680, 95705), True, 'import numpy as np\n'), ((95734, 95765), 'numpy.max', 'np.max', (['(y_value + y_err)[mask]'], {}), '((y_value + y_err)[mask])\n', (95740, 95765), True, 'import numpy as np\n'), ((96011, 96053), 'numpy.min', 'np.min', (['(residuals - err_mag)[mask][mask_]'], {}), '((residuals - err_mag)[mask][mask_])\n', (96017, 96053), True, 'import numpy as np\n'), ((96082, 96124), 'numpy.max', 'np.max', (['(residuals + err_mag)[mask][mask_]'], {}), '((residuals + err_mag)[mask][mask_])\n', (96088, 96124), True, 'import numpy as np\n'), ((97760, 97772), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (97770, 97772), True, 'from matplotlib import pyplot as plt\n'), ((99463, 99485), 'numpy.min', 'np.min', (['magnifications'], {}), '(magnifications)\n', (99469, 99485), True, 'import numpy as np\n'), ((99497, 99519), 'numpy.max', 'np.max', (['magnifications'], {}), '(magnifications)\n', (99503, 99519), True, 'import numpy as np\n'), ((100083, 100092), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (100090, 100092), True, 'from matplotlib import pyplot as plt\n'), ((12053, 12078), 'MulensModel.__version__.split', 'mm.__version__.split', (['"""."""'], {}), "('.')\n", (12073, 12078), True, 'import MulensModel as mm\n'), ((36485, 36503), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (36498, 36503), False, 'import warnings\n'), ((41971, 41989), 'os.path.dirname', 'path.dirname', (['root'], {}), '(root)\n', (41983, 41989), False, 'from os import path, sep\n'), ((42015, 42033), 'os.path.dirname', 'path.dirname', (['root'], {}), '(root)\n', (42027, 42033), False, 'from os import path, sep\n'), ((51669, 51678), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (51675, 51678), True, 'import numpy as np\n'), ((60736, 60754), 'numpy.array', 'np.array', (['starting'], {}), '(starting)\n', (60744, 60754), True, 'import numpy as np\n'), ((61133, 61151), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (61148, 61151), True, 'import numpy as np\n'), ((61365, 61386), 'math.log', 'math.log', (['settings[1]'], {}), '(settings[1])\n', (61373, 61386), False, 'import math\n'), ((61405, 61426), 'math.log', 'math.log', (['settings[2]'], {}), '(settings[2])\n', (61413, 61426), False, 'import math\n'), ((66904, 66916), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (66912, 66916), False, 'import math\n'), ((74409, 74448), 'shutil.rmtree', 'shutil.rmtree', (['base'], {'ignore_errors': '(True)'}), '(base, ignore_errors=True)\n', (74422, 74448), False, 'import shutil\n'), ((80175, 80194), 'numpy.array', 'np.array', (['fractions'], {}), '(fractions)\n', (80183, 80194), True, 'import numpy as np\n'), ((80672, 80728), 'numpy.interp', 'np.interp', (['fractions', 'weighted_quantiles', 'data_[indexes]'], {}), '(fractions, weighted_quantiles, data_[indexes])\n', (80681, 80728), True, 'import numpy as np\n'), ((90584, 90593), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (90591, 90593), True, 'from matplotlib import pyplot as plt\n'), ((90834, 90843), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (90841, 90843), True, 'from matplotlib import pyplot as plt\n'), ((95613, 95630), 'numpy.isnan', 'np.isnan', (['y_value'], {}), '(y_value)\n', (95621, 95630), True, 'import numpy as np\n'), ((97832, 97881), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), "(**self._plots['best model']['legend'])\n", (97842, 97881), True, 'from matplotlib import pyplot as plt\n'), ((61455, 61485), 'numpy.random.uniform', 'np.random.uniform', (['beg', 'end', 'n'], {}), '(beg, end, n)\n', (61472, 61485), True, 'import numpy as np\n'), ((66193, 66213), 'math.sqrt', 'math.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (66202, 66213), False, 'import math\n'), ((90322, 90331), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (90329, 90331), True, 'from matplotlib import pyplot as plt\n'), ((99306, 99326), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (99320, 99326), True, 'import numpy as np\n'), ((67252, 67264), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (67260, 67264), False, 'import math\n'), ((90778, 90787), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (90785, 90787), True, 'from matplotlib import pyplot as plt\n'), ((59536, 59554), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (59551, 59554), True, 'import numpy as np\n'), ((81799, 81813), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (81805, 81813), True, 'import numpy as np\n'), ((81815, 81829), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (81821, 81829), True, 'import numpy as np\n'), ((58660, 58678), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (58675, 58678), True, 'import numpy as np\n')] |
# encoding=utf8
import logging
import multiprocessing
import threading
import numpy as np
from numpy.random import default_rng
from niapy.util.array import objects_to_array
logging.basicConfig()
logger = logging.getLogger('niapy.util.utility')
logger.setLevel('INFO')
__all__ = [
'Algorithm',
'Individual',
'default_individual_init',
'default_numpy_init'
]
def default_numpy_init(task, population_size, rng, **_kwargs):
r"""Initialize starting population that is represented with `numpy.ndarray` with shape `(population_size, task.dimension)`.
Args:
task (Task): Optimization task.
population_size (int): Number of individuals in population.
rng (numpy.random.Generator): Random number generator.
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float]]:
1. New population with shape `(population_size, task.D)`.
2. New population function/fitness values.
"""
pop = rng.uniform(task.lower, task.upper, (population_size, task.dimension))
fpop = np.apply_along_axis(task.eval, 1, pop)
return pop, fpop
def default_individual_init(task, population_size, rng, individual_type=None, **_kwargs):
r"""Initialize `population_size` individuals of type `individual_type`.
Args:
task (Task): Optimization task.
population_size (int): Number of individuals in population.
rng (numpy.random.Generator): Random number generator.
individual_type (Optional[Individual]): Class of individual in population.
Returns:
Tuple[numpy.ndarray[Individual], numpy.ndarray[float]:
1. Initialized individuals.
2. Initialized individuals function/fitness values.
"""
pop = objects_to_array([individual_type(task=task, rng=rng, e=True) for _ in range(population_size)])
return pop, np.asarray([x.f for x in pop])
class Algorithm:
r"""Class for implementing algorithms.
Date:
2018
Author
<NAME>
License:
MIT
Attributes:
Name (List[str]): List of names for algorithm.
rng (numpy.random.Generator): Random generator.
population_size (int): Population size.
initialization_function (Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Type of individuals used in population, default value is None for Numpy arrays.
"""
Name = ['Algorithm', 'AAA']
def __init__(self, population_size=50, initialization_function=default_numpy_init, individual_type=None,
seed=None, *args, **kwargs):
r"""Initialize algorithm and create name for an algorithm.
Args:
population_size (Optional[int]): Population size.
initialization_function (Optional[Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Individual type used in population, default is Numpy array.
seed (Optional[int]): Starting seed for random generator.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
self.population_size = population_size
self.initialization_function = initialization_function
self.individual_type = individual_type
self.rng = default_rng(seed)
self.exception = None
@staticmethod
def info():
r"""Get algorithm information.
Returns:
str: Bit item.
"""
return '''Basic algorithm. No implementation!!!'''
def set_parameters(self, population_size=50, initialization_function=default_numpy_init, individual_type=None,
*args, **kwargs):
r"""Set the parameters/arguments of the algorithm.
Args:
population_size (Optional[int]): Population size.
initialization_function (Optional[Callable[[int, Task, numpy.random.Generator, Dict[str, Any]], Tuple[numpy.ndarray, numpy.ndarray[float]]]]):
Population initialization function.
individual_type (Optional[Type[Individual]]): Individual type used in population, default is Numpy array.
See Also:
* :func:`niapy.algorithms.default_numpy_init`
* :func:`niapy.algorithms.default_individual_init`
"""
self.population_size = population_size
self.initialization_function = initialization_function
self.individual_type = individual_type
def get_parameters(self):
r"""Get parameters of the algorithm.
Returns:
Dict[str, Any]:
* Parameter name (str): Represents a parameter name
* Value of parameter (Any): Represents the value of the parameter
"""
return {
'population_size': self.population_size,
'initialization_function': self.initialization_function,
'individual_type': self.individual_type
}
def random(self, size=None):
r"""Get random distribution of shape size in range from 0 to 1.
Args:
size (Union[None, int, Iterable[int]]): Shape of returned random distribution.
Returns:
Union[numpy.ndarray[float], float]: Random number or numbers :math:`\in [0, 1]`.
"""
return self.rng.random(size)
def uniform(self, low, high, size=None):
r"""Get uniform random distribution of shape size in range from "low" to "high".
Args:
low (Union[float, Iterable[float]]): Lower bound.
high (Union[float, Iterable[float]]): Upper bound.
size (Union[None, int, Iterable[int]]): Shape of returned uniform random distribution.
Returns:
Union[numpy.ndarray[float], float]: Array of numbers :math:`\in [\mathit{Lower}, \mathit{Upper}]`.
"""
return self.rng.uniform(low, high, size)
def normal(self, loc, scale, size=None):
r"""Get normal random distribution of shape size with mean "loc" and standard deviation "scale".
Args:
loc (float): Mean of the normal random distribution.
scale (float): Standard deviation of the normal random distribution.
size (Union[int, Iterable[int]]): Shape of returned normal random distribution.
Returns:
Union[numpy.ndarray[float], float]: Array of numbers.
"""
return self.rng.normal(loc, scale, size)
def standard_normal(self, size=None):
r"""Get standard normal distribution of shape size.
Args:
size (Union[int, Iterable[int]]): Shape of returned standard normal distribution.
Returns:
Union[numpy.ndarray[float], float]: Random generated numbers or one random generated number :math:`\in [0, 1]`.
"""
return self.rng.standard_normal(size)
def integers(self, low, high=None, size=None, skip=None):
r"""Get discrete uniform (integer) random distribution of D shape in range from "low" to "high".
Args:
low (Union[int, Iterable[int]]): Lower integer bound.
If high = None low is 0 and this value is used as high
high (Union[int, Iterable[int]]): One above upper integer bound.
size (Union[None, int, Iterable[int]]): shape of returned discrete uniform random distribution.
skip (Union[None, int, Iterable[int], numpy.ndarray[int]]): numbers to skip.
Returns:
Union[int, numpy.ndarray[int]]: Random generated integer number.
"""
r = self.rng.integers(low, high, size)
return r if skip is None or r not in skip else self.integers(low, high, size, skip)
@staticmethod
def get_best(population, population_fitness, best_x=None, best_fitness=np.inf):
r"""Get the best individual for population.
Args:
population (numpy.ndarray): Current population.
population_fitness (numpy.ndarray): Current populations fitness/function values of aligned individuals.
best_x (Optional[numpy.ndarray]): Best individual.
best_fitness (float): Fitness value of best individual.
Returns:
Tuple[numpy.ndarray, float]:
1. Coordinates of best solution.
2. beset fitness/function value.
"""
ib = np.argmin(population_fitness)
if isinstance(population_fitness, (float, int)) and best_fitness >= population_fitness:
best_x, best_fitness = population, population_fitness
elif isinstance(population_fitness, (np.ndarray, list)) and best_fitness >= population_fitness[ib]:
best_x, best_fitness = population[ib], population_fitness[ib]
return (best_x.x.copy() if isinstance(best_x, Individual) else best_x.copy()), best_fitness
def init_population(self, task):
r"""Initialize starting population of optimization algorithm.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, Dict[str, Any]]:
1. New population.
2. New population fitness values.
3. Additional arguments.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
pop, fpop = self.initialization_function(task=task, population_size=self.population_size, rng=self.rng,
individual_type=self.individual_type)
return pop, fpop, {}
def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):
r"""Core functionality of algorithm.
This function is called on every algorithm iteration.
Args:
task (Task): Optimization task.
population (numpy.ndarray): Current population coordinates.
population_fitness (numpy.ndarray): Current population fitness value.
best_x (numpy.ndarray): Current generation best individuals coordinates.
best_fitness (float): current generation best individuals fitness value.
**params (Dict[str, Any]): Additional arguments for algorithms.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. New populations coordinates.
2. New populations fitness values.
3. New global best position/solution
4. New global best fitness/objective value
5. Additional arguments of the algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.iteration_generator`
"""
return population, population_fitness, best_x, best_fitness, params
def iteration_generator(self, task):
r"""Run the algorithm for a single iteration and return the best solution.
Args:
task (Task): Task with bounds and objective function for optimization.
Returns:
Generator[Tuple[numpy.ndarray, float], None, None]: Generator getting new/old optimal global values.
Yields:
Tuple[numpy.ndarray, float]:
1. New population best individuals coordinates.
2. Fitness value of the best solution.
See Also:
* :func:`niapy.algorithms.Algorithm.init_population`
* :func:`niapy.algorithms.Algorithm.run_iteration`
"""
pop, fpop, params = self.init_population(task)
xb, fxb = self.get_best(pop, fpop)
if task.stopping_condition():
yield xb, fxb
while True:
pop, fpop, xb, fxb, params = self.run_iteration(task, pop, fpop, xb, fxb, **params)
yield xb, fxb
def run_task(self, task):
r"""Start the optimization.
Args:
task (Task): Task with bounds and objective function for optimization.
Returns:
Tuple[numpy.ndarray, float]:
1. Best individuals components found in optimization process.
2. Best fitness value found in optimization process.
See Also:
* :func:`niapy.algorithms.Algorithm.iteration_generator`
"""
algo, xb, fxb = self.iteration_generator(task), None, np.inf
while not task.stopping_condition():
xb, fxb = next(algo)
task.next_iter()
return xb, fxb
def run(self, task):
r"""Start the optimization.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float]:
1. Best individuals components found in optimization process.
2. Best fitness value found in optimization process.
See Also:
* :func:`niapy.algorithms.Algorithm.run_task`
"""
try:
r = self.run_task(task)
return r[0], r[1] * task.optimization_type.value
except BaseException as e:
if threading.current_thread() == threading.main_thread() and multiprocessing.current_process().name == 'MainProcess':
raise e
self.exception = e
return None, None
def bad_run(self):
r"""Check if some exceptions where thrown when the algorithm was running.
Returns:
bool: True if some error where detected at runtime of the algorithm, otherwise False
"""
return self.exception is not None
class Individual:
r"""Class that represents one solution in population of solutions.
Date:
2018
Author:
<NAME>
License:
MIT
Attributes:
x (numpy.ndarray): Coordinates of individual.
f (float): Function/fitness value of individual.
"""
def __init__(self, x=None, task=None, e=True, rng=None, **kwargs):
r"""Initialize new individual.
Parameters:
task (Optional[Task]): Optimization task.
rand (Optional[numpy.random.Generator]): Random generator.
x (Optional[numpy.ndarray]): Individuals components.
e (Optional[bool]): True to evaluate the individual on initialization. Default value is True.
"""
self.f = task.optimization_type.value * np.inf if task is not None else np.inf
if x is not None:
self.x = x if isinstance(x, np.ndarray) else np.asarray(x)
elif task is not None:
self.generate_solution(task, default_rng(rng))
if e and task is not None:
self.evaluate(task, rng)
def generate_solution(self, task, rng):
r"""Generate new solution.
Generate new solution for this individual and set it to ``self.x``.
This method uses ``rng`` for getting random numbers.
For generating random components ``rng`` and ``task`` is used.
Args:
task (Task): Optimization task.
rng (numpy.random.Generator): Random numbers generator object.
"""
self.x = rng.uniform(task.lower, task.upper, task.dimension)
def evaluate(self, task, rng=None):
r"""Evaluate the solution.
Evaluate solution ``this.x`` with the help of task.
Task is used for repairing the solution and then evaluating it.
Args:
task (Task): Objective function object.
rng (Optional[numpy.random.Generator]): Random generator.
See Also:
* :func:`niapy.task.Task.repair`
"""
self.x = task.repair(self.x, rng=rng)
self.f = task.eval(self.x)
def copy(self):
r"""Return a copy of self.
Method returns copy of ``this`` object so it is safe for editing.
Returns:
Individual: Copy of self.
"""
return Individual(x=self.x.copy(), e=False, f=self.f)
def __eq__(self, other):
r"""Compare the individuals for equalities.
Args:
other (Union[Any, numpy.ndarray]): Object that we want to compare this object to.
Returns:
bool: `True` if equal or `False` if no equal.
"""
if isinstance(other, np.ndarray):
for e in other:
if self == e:
return True
return False
return np.array_equal(self.x, other.x) and self.f == other.f
def __str__(self):
r"""Print the individual with the solution and objective value.
Returns:
str: String representation of self.
"""
return '%s -> %s' % (self.x, self.f)
def __getitem__(self, i):
r"""Get the value of i-th component of the solution.
Args:
i (int): Position of the solution component.
Returns:
Any: Value of ith component.
"""
return self.x[i]
def __setitem__(self, i, v):
r"""Set the value of i-th component of the solution to v value.
Args:
i (int): Position of the solution component.
v (Any): Value to set to i-th component.
"""
self.x[i] = v
def __len__(self):
r"""Get the length of the solution or the number of components.
Returns:
int: Number of components.
"""
return len(self.x)
| [
"multiprocessing.current_process",
"logging.basicConfig",
"numpy.asarray",
"numpy.argmin",
"numpy.random.default_rng",
"numpy.apply_along_axis",
"numpy.array_equal",
"threading.main_thread",
"threading.current_thread",
"logging.getLogger"
] | [((176, 197), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (195, 197), False, 'import logging\n'), ((207, 246), 'logging.getLogger', 'logging.getLogger', (['"""niapy.util.utility"""'], {}), "('niapy.util.utility')\n", (224, 246), False, 'import logging\n'), ((1044, 1082), 'numpy.apply_along_axis', 'np.apply_along_axis', (['task.eval', '(1)', 'pop'], {}), '(task.eval, 1, pop)\n', (1063, 1082), True, 'import numpy as np\n'), ((1849, 1879), 'numpy.asarray', 'np.asarray', (['[x.f for x in pop]'], {}), '([x.f for x in pop])\n', (1859, 1879), True, 'import numpy as np\n'), ((3527, 3544), 'numpy.random.default_rng', 'default_rng', (['seed'], {}), '(seed)\n', (3538, 3544), False, 'from numpy.random import default_rng\n'), ((8582, 8611), 'numpy.argmin', 'np.argmin', (['population_fitness'], {}), '(population_fitness)\n', (8591, 8611), True, 'import numpy as np\n'), ((16509, 16540), 'numpy.array_equal', 'np.array_equal', (['self.x', 'other.x'], {}), '(self.x, other.x)\n', (16523, 16540), True, 'import numpy as np\n'), ((14610, 14623), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (14620, 14623), True, 'import numpy as np\n'), ((14696, 14712), 'numpy.random.default_rng', 'default_rng', (['rng'], {}), '(rng)\n', (14707, 14712), False, 'from numpy.random import default_rng\n'), ((13217, 13243), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (13241, 13243), False, 'import threading\n'), ((13247, 13270), 'threading.main_thread', 'threading.main_thread', ([], {}), '()\n', (13268, 13270), False, 'import threading\n'), ((13275, 13308), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (13306, 13308), False, 'import multiprocessing\n')] |
"""Functions for calculating properties of prisms.
Common definitions:
* lamb: wavelength in m
* omega: frequency in rad/s
* n: refractive index
* nlamb: refractive index function of wavelength
* theta_1: incident angle w.r.t. first face normal, increasing away from apex.
* thetap_1: internal refracted angle w.r.t first face normal
* theta_2: incident angle w.r.t. second face normal, increasing away from apex.
* thetap_2: internal refracted angle w.r.t second face normal
* alpha: apex full angle
The angles are defined so that for alpha=0, theta_2=-theta_1. The deflection angle is therefore theta_1+theta_2-alpha
away from the normal.
"""
import numpy as np
from scipy.misc import derivative
def refract(n,theta_1,alpha,return_internals=False):
"""Calculate refraction angle of prism
All angles are in radians. See `optics.prism_pair' for complete definitions.
Args:
return_internals: whether to return the internal angles
Returns:
Returns theta_2 if return_internals is False, (thetap_1,thetap_2,theta_2) otherwise
"""
# All angles w.r.t. associated normal
thetap_1=np.arcsin(np.sin(theta_1)/n) # Internal angle on first face
thetap_2=alpha-thetap_1 # Internal angle on second face
theta_2=np.arcsin(n*np.sin(thetap_2)) # External angle on second face
if return_internals:
return thetap_1,thetap_2,theta_2
else:
return theta_2
def dtheta_2_dn(alpha,thetap_1,theta_2):
"""Calculate derivative of output angle w.r.t refractive index."""
return np.sin(alpha)/(np.cos(thetap_1)*np.cos(theta_2))
def angular_dispersion(alpha,theta_1,**kwargs):
"""Calculate angular dispersion (derivative of angle w.r.t wavelength).
Args:
kwargs can be either nlamb and lamb, or n and dn_dlamb
Returns:
dtheta_2_dlamb, the derivative of the output angle w.r.t wavelength
"""
if 'nlamb' in kwargs:
nlamb=kwargs['nlamb']
lamb=kwargs['lamb']
n=nlamb(lamb)
dn_dlamb=derivative(nlamb,lamb,lamb/100)
else:
n=kwargs['n']
dn_dlamb=kwargs['dn_dlamb']
thetap_1,thetap_2,theta_2=refract(n,theta_1,alpha,return_internals=True)
return dtheta_2_dn(alpha,thetap_1,theta_2)*dn_dlamb
def beam_expansion(alpha,n,theta_1):
thetap_1,thetap_2,theta_2=refract(n,theta_1,alpha,True)
factor=np.cos(theta_2)/np.cos(thetap_2)*np.cos(thetap_1)/np.cos(theta_1)
return factor,thetap_1,thetap_2,theta_2
def minimum_deviation_incident_angle(alpha,n):
return np.arcsin(np.sin(alpha/2)*n) | [
"numpy.sin",
"numpy.cos",
"scipy.misc.derivative"
] | [((1557, 1570), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1563, 1570), True, 'import numpy as np\n'), ((2043, 2078), 'scipy.misc.derivative', 'derivative', (['nlamb', 'lamb', '(lamb / 100)'], {}), '(nlamb, lamb, lamb / 100)\n', (2053, 2078), False, 'from scipy.misc import derivative\n'), ((2435, 2450), 'numpy.cos', 'np.cos', (['theta_1'], {}), '(theta_1)\n', (2441, 2450), True, 'import numpy as np\n'), ((1150, 1165), 'numpy.sin', 'np.sin', (['theta_1'], {}), '(theta_1)\n', (1156, 1165), True, 'import numpy as np\n'), ((1284, 1300), 'numpy.sin', 'np.sin', (['thetap_2'], {}), '(thetap_2)\n', (1290, 1300), True, 'import numpy as np\n'), ((1572, 1588), 'numpy.cos', 'np.cos', (['thetap_1'], {}), '(thetap_1)\n', (1578, 1588), True, 'import numpy as np\n'), ((1589, 1604), 'numpy.cos', 'np.cos', (['theta_2'], {}), '(theta_2)\n', (1595, 1604), True, 'import numpy as np\n'), ((2418, 2434), 'numpy.cos', 'np.cos', (['thetap_1'], {}), '(thetap_1)\n', (2424, 2434), True, 'import numpy as np\n'), ((2564, 2581), 'numpy.sin', 'np.sin', (['(alpha / 2)'], {}), '(alpha / 2)\n', (2570, 2581), True, 'import numpy as np\n'), ((2385, 2400), 'numpy.cos', 'np.cos', (['theta_2'], {}), '(theta_2)\n', (2391, 2400), True, 'import numpy as np\n'), ((2401, 2417), 'numpy.cos', 'np.cos', (['thetap_2'], {}), '(thetap_2)\n', (2407, 2417), True, 'import numpy as np\n')] |
import os
import json
import random
import codecs
import logging
import numpy as np
from sklearn.externals import joblib
from typing import List
from google.cloud import storage, bigquery
def download_from_gcs(bucket_dir_name: str, file_name: str):
GCS_BUCKET_NAME = "recsys2020-challenge-wantedly"
PROJECT_ID = "wantedly-individual-naomichi"
client = storage.Client(project=PROJECT_ID)
bucket = client.get_bucket(GCS_BUCKET_NAME)
blob = storage.Blob(
os.path.join(bucket_dir_name, file_name),
bucket
)
content = blob.download_as_string()
print(f"Downloading {file_name} from {blob.path}")
return content
def upload_to_gcs(bucket_dir_name: str, files: List[str]):
GCS_BUCKET_NAME = "recsys2020-challenge-wantedly"
PROJECT_ID = "wantedly-individual-naomichi"
client = storage.Client(project=PROJECT_ID)
bucket = client.get_bucket(GCS_BUCKET_NAME)
for filename in files:
basename = os.path.basename(filename)
blob = storage.Blob(os.path.join(bucket_dir_name, basename), bucket)
print(f"Uploading {basename} to {blob.path}")
blob.upload_from_filename(str(filename))
def seed_everything(seed: int=71, gpu_mode: bool=False):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
if gpu_mode:
import tensorflow as tf
import torch
tf.random.set_seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class MyEncoder(json.JSONEncoder):
""" encode numpy objects
https://wtnvenga.hatenablog.com/entry/2018/05/27/113848
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def json_dump(dict_object, save_path) -> None:
f = codecs.open(save_path, 'w', 'utf-8')
json.dump(dict_object, f, indent=4, cls=MyEncoder, ensure_ascii=False)
class Pkl(object):
""" https://github.com/ghmagazine/kagglebook/blob/master/ch04-model-interface/code/util.py
"""
@classmethod
def dump(cls, value, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(value, path, compress=True)
@classmethod
def load(cls, path):
return joblib.load(path)
def get_logger(module_name=None, save_path=None):
logger = logging.getLogger(module_name)
formatter = logging.Formatter('%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
logger.setLevel(logging.DEBUG)
if save_path is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
handler = logging.FileHandler(save_path)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| [
"tensorflow.random.set_seed",
"json.dump",
"sklearn.externals.joblib.dump",
"numpy.random.seed",
"codecs.open",
"logging.FileHandler",
"os.path.basename",
"torch.manual_seed",
"os.path.dirname",
"logging.StreamHandler",
"torch.cuda.manual_seed",
"logging.Formatter",
"google.cloud.storage.Cli... | [((366, 400), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'PROJECT_ID'}), '(project=PROJECT_ID)\n', (380, 400), False, 'from google.cloud import storage, bigquery\n'), ((837, 871), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'PROJECT_ID'}), '(project=PROJECT_ID)\n', (851, 871), False, 'from google.cloud import storage, bigquery\n'), ((1237, 1254), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1248, 1254), False, 'import random\n'), ((1304, 1324), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1318, 1324), True, 'import numpy as np\n'), ((2051, 2087), 'codecs.open', 'codecs.open', (['save_path', '"""w"""', '"""utf-8"""'], {}), "(save_path, 'w', 'utf-8')\n", (2062, 2087), False, 'import codecs\n'), ((2092, 2162), 'json.dump', 'json.dump', (['dict_object', 'f'], {'indent': '(4)', 'cls': 'MyEncoder', 'ensure_ascii': '(False)'}), '(dict_object, f, indent=4, cls=MyEncoder, ensure_ascii=False)\n', (2101, 2162), False, 'import json\n'), ((2583, 2613), 'logging.getLogger', 'logging.getLogger', (['module_name'], {}), '(module_name)\n', (2600, 2613), False, 'import logging\n'), ((2630, 2701), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(name)s] [%(levelname)s] %(message)s"""'], {}), "('%(asctime)s [%(name)s] [%(levelname)s] %(message)s')\n", (2647, 2701), False, 'import logging\n'), ((483, 523), 'os.path.join', 'os.path.join', (['bucket_dir_name', 'file_name'], {}), '(bucket_dir_name, file_name)\n', (495, 523), False, 'import os\n'), ((967, 993), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (983, 993), False, 'import os\n'), ((1404, 1428), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (1422, 1428), True, 'import tensorflow as tf\n'), ((1437, 1460), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1454, 1460), False, 'import torch\n'), ((1469, 1497), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1491, 1497), False, 'import torch\n'), ((2402, 2441), 'sklearn.externals.joblib.dump', 'joblib.dump', (['value', 'path'], {'compress': '(True)'}), '(value, path, compress=True)\n', (2413, 2441), False, 'from sklearn.externals import joblib\n'), ((2500, 2517), 'sklearn.externals.joblib.load', 'joblib.load', (['path'], {}), '(path)\n', (2511, 2517), False, 'from sklearn.externals import joblib\n'), ((2782, 2805), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2803, 2805), False, 'import logging\n'), ((2909, 2939), 'logging.FileHandler', 'logging.FileHandler', (['save_path'], {}), '(save_path)\n', (2928, 2939), False, 'import logging\n'), ((1022, 1061), 'os.path.join', 'os.path.join', (['bucket_dir_name', 'basename'], {}), '(bucket_dir_name, basename)\n', (1034, 1061), False, 'import os\n'), ((2356, 2377), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2371, 2377), False, 'import os\n')] |
"""
example cmdline:
python test/optimizer/mo/benchmark_mo_gpflowopt_lightgbm.py --datasets spambase --n 200 --rep 1 --start_id 0
"""
import os
import sys
import time
from functools import partial
import numpy as np
import argparse
import pickle as pkl
import gpflow
import gpflowopt
sys.path.insert(0, os.getcwd())
from test.test_utils import timeit, seeds
from test.test_utils import check_datasets, load_data
from mo_benchmark_function import LightGBM
from sklearn.model_selection import train_test_split
from sklearn.metrics import balanced_accuracy_score, f1_score, accuracy_score
from litebo.utils.multi_objective import Hypervolume
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=200)
parser.add_argument('--rep', type=int, default=1)
parser.add_argument('--start_id', type=int, default=0)
parser.add_argument('--datasets', type=str) # todo one dataset only
args = parser.parse_args()
max_runs = args.n
rep = args.rep
start_id = args.start_id
mth = 'gpflowopt-hvpoi'
dataset_str = args.datasets
dataset_list = dataset_str.split(',')
data_dir = './test/data/'
check_datasets(dataset_list, data_dir)
dataset = dataset_list[0] # todo one dataset only
# set problem
from mo_benchmark_function import get_setup_lightgbm
setup = get_setup_lightgbm(dataset)
# multi_objective_func = setup['multi_objective_func']
cs = setup['cs']
# run_nsgaii = setup['run_nsgaii']
problem_str = setup['problem_str']
num_inputs = setup['num_inputs']
num_objs = setup['num_objs']
referencePoint = setup['referencePoint']
real_hv = setup['real_hv']
time_limit_per_trial = 2 * setup['time_limit']
def multi_objective_func(config, x, y):
config = np.atleast_2d(config)
assert config.shape == (1, num_inputs)
config = config.flatten()
param = config.tolist()
param[0] = int(param[0]) * 50 # n_estimators *= 50
param[2] = int(param[2]) # num_leaves
param[3] = int(param[3]) # min_child_samples
print('config =', param)
"""
Caution:
from functools import partial
multi_objective_func = partial(multi_objective_func, x=x, y=y)
"""
start_time = time.time()
model = LightGBM(*param)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, stratify=y, random_state=1)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
time_taken = time.time() - start_time
acc = -balanced_accuracy_score(y_test, y_pred) # minimize
y1 = acc
y2 = time_taken
res = [y1, y2]
if any(res[i] > referencePoint[i] for i in range(len(referencePoint))):
print('[ERROR]=== objective evaluate error! objs =', res, 'referencePoint =', referencePoint)
res = [ref - 1e-5 for ref in referencePoint]
print('objs =', res)
return np.array(res).reshape(1, num_objs)
_x, _y = load_data(dataset, data_dir)
multi_objective_func = partial(multi_objective_func, x=_x, y=_y)
# Setup input domain # Caution: param order!!!
# n_estimators *= 50
domain = gpflowopt.domain.ContinuousParameter("n_estimators", 2, 20) + \
gpflowopt.domain.ContinuousParameter("learning_rate", 1e-3, 0.3) + \
gpflowopt.domain.ContinuousParameter("num_leaves", 31, 2047) + \
gpflowopt.domain.ContinuousParameter("min_child_samples", 5, 30) + \
gpflowopt.domain.ContinuousParameter("subsample", 0.7, 1) + \
gpflowopt.domain.ContinuousParameter("colsample_bytree", 0.7, 1)
# Initial evaluations
init_num = 10
assert max_runs > init_num
# X_init = gpflowopt.design.RandomDesign(init_num, domain).generate()
X_init = gpflowopt.design.LatinHyperCube(init_num, domain).generate()
Y_init = np.vstack([multi_objective_func(X_init[i, :]) for i in range(init_num)])
with timeit('%s all' % (mth,)):
for run_i in range(start_id, start_id + rep):
seed = seeds[run_i]
np.random.seed(seed)
with timeit('%s %d %d' % (mth, run_i, seed)):
# One model for each objective
objective_models = [
gpflow.gpr.GPR(X_init.copy(), Y_init[:, [i]].copy(), gpflow.kernels.Matern52(2, ARD=True))
for i in range(Y_init.shape[1])]
for model in objective_models:
model.likelihood.variance = 0.01
hvpoi = gpflowopt.acquisition.HVProbabilityOfImprovement(objective_models)
# First setup the optimization strategy for the acquisition function
# Combining MC step followed by L-BFGS-B
acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 1000),
gpflowopt.optim.SciPyOptimizer(domain)])
# Then run the BayesianOptimizer for (max_runs-init_num) iterations
optimizer = gpflowopt.BayesianOptimizer(domain, hvpoi, optimizer=acquisition_opt, verbose=True)
result = optimizer.optimize(multi_objective_func, n_iter=max_runs - init_num)
pf = optimizer.acquisition.pareto.front.value
# pf, dom = gpflowopt.pareto.non_dominated_sort(hvpoi.data[1])
# print(hvpoi.data[1])
# Save result
data = optimizer.acquisition.data # data=(X, Y)
hv_diffs = []
for i in range(data[1].shape[0]):
# hv = gpflowopt.pareto.Pareto(data[1][:i+1]).hypervolume(referencePoint) # ref_point problem
hv = Hypervolume(referencePoint).compute(data[1][:i + 1])
hv_diff = real_hv - hv
hv_diffs.append(hv_diff)
print(seed, mth, 'pareto num:', pf.shape[0])
print(seed, 'real hv =', real_hv)
print(seed, 'hv_diffs:', hv_diffs)
timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
dir_path = 'logs/mo_benchmark_%s_%d/%s/' % (problem_str, max_runs, mth)
file = 'benchmark_%s_%04d_%s.pkl' % (mth, seed, timestamp)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(os.path.join(dir_path, file), 'wb') as f:
save_item = (hv_diffs, pf, data)
pkl.dump(save_item, f)
print(dir_path, file, 'saved!')
| [
"gpflowopt.design.LatinHyperCube",
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"test.test_utils.timeit",
"os.path.join",
"test.test_utils.load_data",
"numpy.atleast_2d",
"mo_benchmark_function.LightGBM",
"os.path.exists",
"mo_bench... | [((654, 679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (677, 679), False, 'import argparse\n'), ((1107, 1145), 'test.test_utils.check_datasets', 'check_datasets', (['dataset_list', 'data_dir'], {}), '(dataset_list, data_dir)\n', (1121, 1145), False, 'from test.test_utils import check_datasets, load_data\n'), ((1274, 1301), 'mo_benchmark_function.get_setup_lightgbm', 'get_setup_lightgbm', (['dataset'], {}), '(dataset)\n', (1292, 1301), False, 'from mo_benchmark_function import get_setup_lightgbm\n'), ((2834, 2862), 'test.test_utils.load_data', 'load_data', (['dataset', 'data_dir'], {}), '(dataset, data_dir)\n', (2843, 2862), False, 'from test.test_utils import check_datasets, load_data\n'), ((2886, 2927), 'functools.partial', 'partial', (['multi_objective_func'], {'x': '_x', 'y': '_y'}), '(multi_objective_func, x=_x, y=_y)\n', (2893, 2927), False, 'from functools import partial\n'), ((308, 319), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (317, 319), False, 'import os\n'), ((1676, 1697), 'numpy.atleast_2d', 'np.atleast_2d', (['config'], {}), '(config)\n', (1689, 1697), True, 'import numpy as np\n'), ((2145, 2156), 'time.time', 'time.time', ([], {}), '()\n', (2154, 2156), False, 'import time\n'), ((2170, 2186), 'mo_benchmark_function.LightGBM', 'LightGBM', (['*param'], {}), '(*param)\n', (2178, 2186), False, 'from mo_benchmark_function import LightGBM\n'), ((2226, 2291), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'stratify': 'y', 'random_state': '(1)'}), '(x, y, test_size=0.2, stratify=y, random_state=1)\n', (2242, 2291), False, 'from sklearn.model_selection import train_test_split\n'), ((3384, 3448), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""colsample_bytree"""', '(0.7)', '(1)'], {}), "('colsample_bytree', 0.7, 1)\n", (3420, 3448), False, 'import gpflowopt\n'), ((3741, 3766), 'test.test_utils.timeit', 'timeit', (["('%s all' % (mth,))"], {}), "('%s all' % (mth,))\n", (3747, 3766), False, 'from test.test_utils import timeit, seeds\n'), ((2377, 2388), 'time.time', 'time.time', ([], {}), '()\n', (2386, 2388), False, 'import time\n'), ((2413, 2452), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2436, 2452), False, 'from sklearn.metrics import balanced_accuracy_score, f1_score, accuracy_score\n'), ((3313, 3370), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""subsample"""', '(0.7)', '(1)'], {}), "('subsample', 0.7, 1)\n", (3349, 3370), False, 'import gpflowopt\n'), ((3591, 3640), 'gpflowopt.design.LatinHyperCube', 'gpflowopt.design.LatinHyperCube', (['init_num', 'domain'], {}), '(init_num, domain)\n', (3622, 3640), False, 'import gpflowopt\n'), ((3854, 3874), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3868, 3874), True, 'import numpy as np\n'), ((2788, 2801), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2796, 2801), True, 'import numpy as np\n'), ((3235, 3299), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""min_child_samples"""', '(5)', '(30)'], {}), "('min_child_samples', 5, 30)\n", (3271, 3299), False, 'import gpflowopt\n'), ((3888, 3927), 'test.test_utils.timeit', 'timeit', (["('%s %d %d' % (mth, run_i, seed))"], {}), "('%s %d %d' % (mth, run_i, seed))\n", (3894, 3927), False, 'from test.test_utils import timeit, seeds\n'), ((4274, 4340), 'gpflowopt.acquisition.HVProbabilityOfImprovement', 'gpflowopt.acquisition.HVProbabilityOfImprovement', (['objective_models'], {}), '(objective_models)\n', (4322, 4340), False, 'import gpflowopt\n'), ((4790, 4877), 'gpflowopt.BayesianOptimizer', 'gpflowopt.BayesianOptimizer', (['domain', 'hvpoi'], {'optimizer': 'acquisition_opt', 'verbose': '(True)'}), '(domain, hvpoi, optimizer=acquisition_opt,\n verbose=True)\n', (4817, 4877), False, 'import gpflowopt\n'), ((3161, 3221), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""num_leaves"""', '(31)', '(2047)'], {}), "('num_leaves', 31, 2047)\n", (3197, 3221), False, 'import gpflowopt\n'), ((5973, 5997), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (5987, 5997), False, 'import os\n'), ((6015, 6036), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (6026, 6036), False, 'import os\n'), ((6166, 6188), 'pickle.dump', 'pkl.dump', (['save_item', 'f'], {}), '(save_item, f)\n', (6174, 6188), True, 'import pickle as pkl\n'), ((3010, 3069), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""n_estimators"""', '(2)', '(20)'], {}), "('n_estimators', 2, 20)\n", (3046, 3069), False, 'import gpflowopt\n'), ((3083, 3148), 'gpflowopt.domain.ContinuousParameter', 'gpflowopt.domain.ContinuousParameter', (['"""learning_rate"""', '(0.001)', '(0.3)'], {}), "('learning_rate', 0.001, 0.3)\n", (3119, 3148), False, 'import gpflowopt\n'), ((4074, 4110), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', (['(2)'], {'ARD': '(True)'}), '(2, ARD=True)\n', (4097, 4110), False, 'import gpflow\n'), ((4538, 4579), 'gpflowopt.optim.MCOptimizer', 'gpflowopt.optim.MCOptimizer', (['domain', '(1000)'], {}), '(domain, 1000)\n', (4565, 4579), False, 'import gpflowopt\n'), ((4644, 4682), 'gpflowopt.optim.SciPyOptimizer', 'gpflowopt.optim.SciPyOptimizer', (['domain'], {}), '(domain)\n', (4674, 4682), False, 'import gpflowopt\n'), ((5785, 5796), 'time.time', 'time.time', ([], {}), '()\n', (5794, 5796), False, 'import time\n'), ((6059, 6087), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (6071, 6087), False, 'import os\n'), ((5427, 5454), 'litebo.utils.multi_objective.Hypervolume', 'Hypervolume', (['referencePoint'], {}), '(referencePoint)\n', (5438, 5454), False, 'from litebo.utils.multi_objective import Hypervolume\n')] |
from datetime import date
from numpy import shape
from matplotlib.dates import MonthLocator, DateFormatter
class YearPlotter:
def __init__(self):
start=365*1+1
self.dates=[date.fromordinal(i) for i in range(start,start+365)]
self.monthsFmt = DateFormatter("%b")
self.months = MonthLocator(range(1, 13), bymonthday=1, interval=3)
#self.i=0
def plot(self,T,fig,ax,label='',labels=None,title=None):
#print(self.i,'fig=',fig,'ax=',ax)
#self.i+=1
shp=shape(T)
if shp[0] != 365:
raise ValueError("First dimension of T should be 365. Shape(T)="+str(shape(T)))
if len(shp)==1:
#print('one')
ax.plot(self.dates,T,label=label);
else:
#print('more than 1')
if labels is None:
labels=[str(i) for i in range(shp[1])]
for i in range(shp[1]):
ax.plot(self.dates,T[:,i],label=labels[i])
ax.xaxis.set_major_locator(self.months)
ax.xaxis.set_major_formatter(self.monthsFmt)
if not title is None:
ax.set_title(title)
#rotate and align the tick labels so they look better
fig.autofmt_xdate()
ax.grid()
ax.legend()
| [
"numpy.shape",
"matplotlib.dates.DateFormatter",
"datetime.date.fromordinal"
] | [((270, 289), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%b"""'], {}), "('%b')\n", (283, 289), False, 'from matplotlib.dates import MonthLocator, DateFormatter\n'), ((519, 527), 'numpy.shape', 'shape', (['T'], {}), '(T)\n', (524, 527), False, 'from numpy import shape\n'), ((192, 211), 'datetime.date.fromordinal', 'date.fromordinal', (['i'], {}), '(i)\n', (208, 211), False, 'from datetime import date\n'), ((635, 643), 'numpy.shape', 'shape', (['T'], {}), '(T)\n', (640, 643), False, 'from numpy import shape\n')] |
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************** Component ************************************************************
"""
Contents
--------
* `Component_Overview`
* `Component_Creation`
* `Component_Deferred_Init`
* `Component_Structure`
* `Component_Structural_Attributes`
* `Variable <Component_Variable>`
* `Function <Component_Function>`
* `Value <Component_Value>`
* `Log <Component_Log>`
* `Name <Component_Name>`
* `Preferences <Component_Prefs>`
* `User_Modifiable_Parameters`
COMMENT:
* `Methods <Component_Methods>`
COMMENT
* `Component_Execution`
* `Component_Execution_Initialization`
* `Component_Execution_Termination`
* `Component_Execution_Count_and_Time`
* `Component_Class_Reference`
.. _Component_Overview:
Overview
--------
Component is the base class for all of the objects used to create `Compositions <Composition>` in PsyNeuLink.
It defines a common set of attributes possessed, and methods used by all Component objects.
.. _Component_Creation:
Creating a Component
--------------------
A Component is never created by calling the constructor for the Component base class. However, its ``__init__()``
method is always called when a Component subclass is instantiated; that, in turn, calls a standard set of methods
(listed `below <Component_Methods>`) as part of the initialization procedure. Every Component has a core set of
`configurable parameters <Parameters>` that can be specified in the arguments of the constructor, as well
as additional parameters and attributes that may be specific to particular Components, many of which can be modified
by the user, and some of which provide useful information about the Component (see `User_Modifiable_Parameters`
and `Informational Attributes` below).
.. _Component_Deferred_Init:
*Deferred Initialization*
~~~~~~~~~~~~~~~~~~~~~~~~~
If information necessary to complete initialization is not specified in the constructor (e.g, the **owner** for a
`Port <Port_Base.owner>`, or the **sender** or **receiver** for a `Projection <Projection_Structure>`), then its
full initialization is deferred until its the information is available (e.g., the `Port <Port>` is assigned to a
`Mechanism <Mechanism>`, or a `Projection <Projection>` is assigned its `sender <Projection_Base.sender>` and `receiver
<Projection_Base.receiver>`). This allows Components to be created before all of the information they require is
available (e.g., at the beginning of a script). However, for the Component to be operational, its initialization must
be completed by a call to it `deferred_init` method. This is usually done automatically when the Component is
assigned to another Component to which it belongs (e.g., assigning a Port to a Mechanism) or to a Composition (e.g.,
a Projection to the `pathway <Process.pahtway>`) of a `Process`), as appropriate.
.. _Component_Structure:
Component Structure
-------------------
.. _Component_Structural_Attributes:
*Core Structural Attributes*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Every Component has the following set of core structural attributes that can be specified in its constructor using the
arguments listed below. These attributes are not meant to be changed by the user once the component is constructed,
with the one exception of `prefs <Component_Prefs>`.
.. _Component_Type:
* **componentType** - species the type of Component.
.. _Component_Variable:
* **variable** - used as the input to its `function <Component_Function>`. Specification of the **default_variable**
argument in the constructor for a Component determines both its format (e.g., whether its value is numeric, its
dimensionality and shape if it is an array, etc.) as well as its `default value <Component.defaults>` (the value
used when the Component is executed and no input is provided), and takes precedence over the specification of `size
<Component_Size>`.
.. technical_note::
Internally, the attribute **variable** is not directly used as input to functions, to allow for parallelization.
The attribute is maintained as a way for the user to monitor variable along the execution chain.
During parallelization however, the attribute may not accurately represent the most current value of variable
being used, due to asynchrony inherent to parallelization.
.. _Component_Size:
* **size** - the dimension of the `variable <Component.variable>` attribute. The **size** argument of the
constructor for a Component can be used as a convenient method for specifying the `variable <Component_Variable>`,
attribute in which case it will be assigned as an array of zeros of the specified size. For example,
setting **size** = 3 is equivalent to setting **variable** = [0, 0, 0] and setting **size** = [4, 3] is equivalent
to setting **variable** = [[0, 0, 0, 0], [0, 0, 0]].
.. note::
The size attribute serves a role similar to
`shape <https://numpy.org/doc/stable/reference/generated/numpy.shape.html> in Numpy`_, with the difference that
size permits the specification of `ragged arrays <https://en.wikipedia.org/wiki/Jagged_array>`_ -- that is, ones
that have elements of varying lengths, such as [[1,2],[3,4,5]].
.. _Component_Function:
* **function** - determines the computation that a Component carries out. It is always a PsyNeuLink `Function
<Function>` object (itself also a PsyNeuLink Component).
.. note::
The `function <Component.function>` of a Component can be assigned either a `Function` object or any other
callable object in python. If the latter is assigned, it is "wrapped" in a `UserDefinedFunction`.
All Components have a default `function <Component.function>` (with a default set of parameters), that is used if it
is not otherwise specified. The `function <Component.function>` can be specified in the
**function** argument of the constructor for the Component, using one of the following:
* **class** - this must be a subclass of `Function <Function>`, as in the following example::
my_component = SomeComponent(function=SomeFunction)
This will create a default instance of the specified subclass, using default values for its parameters.
* **Function** - this can be either an existing `Function <Function>` object or the constructor for one, as in the
following examples::
my_component = SomeComponent(function=SomeFunction)
or
some_function = SomeFunction(some_param=1)
my_component = SomeComponent(some_function)
The specified Function will be used as a template to create a new Function object that is assigned to the
`function` attribute of the Component.
.. note::
In the current implementation of PsyNeuLink, if a `Function <Function>` object (or the constructor for one) is
used to specify the `function <Component.function>` attribute of a Component, the Function object specified (or
created) will only *itself* be assigned to the Component if it does not already belong to another Component.
Otherwise, it is copied, and the copy is assigned to the Component.
This is so that `Functions <Function>` can be used as templates for
more than one Component, without being assigned simultaneously to multiple Components.
A `function <Component.function>` can also be specified in an entry of a
`parameter specification dictionary <ParameterPort_Specification>` assigned to the
**params** argument of the constructor for the Component, with the keyword *FUNCTION* as its key, and one of the
specifications above as its value, as in the following example::
my_component = SomeComponent(params={FUNCTION:SomeFunction(some_param=1)})
.. _Component_Value:
* **value** - the `value <Component.value>` attribute contains the result (return value) of the Component's
`function <Component.function>` after the function is called.
..
.. _Component_Log:
* **log** - the `log <Component.log>` attribute contains the Component's `Log`, that can be used to record its
`value <Component.value>`, as well as that of Components that belong to it, during initialization, validation,
execution and learning. It also has four convenience methods -- `loggable_items <Log.loggable_items>`, `set_log_conditions
<Log.set_log_conditions>`, `log_values <Log.log_values>` and `logged_items <Log.logged_items>` -- that provide access to the
corresponding methods of its Log, used to identify, configure and track items for logging.
..
.. _Component_Name:
* **name** - the `name <Component.name>` attribute contains the name assigned to the Component when it was created.
If it was not specified, a default is assigned by the `registry <Registry>` for subclass (see `Registry_Naming` for
conventions used in assigning default names and handling of duplicate names).
..
.. _Component_Prefs:
* **prefs** - the `prefs <Component.prefs>` attribute contains the `PreferenceSet` assigned to the Component when
it was created. If it was not specified, a default is assigned using `classPreferences` defined in
COMMENT:
THIS SEEMS TO BE INCORRECT:
``__init__.py``
COMMENT
`BasePreferences`
Each individual preference is accessible as an attribute of the Component, the name of which is the name of the
preference (see `Preferences` for details).
.. _User_Modifiable_Parameters:
*Parameters*
~~~~~~~~~~~~
.. _Component_Parameters:
A Component defines its `parameters <Parameters>` in its *parameters* attribute, which contains a collection of
`Parameter` objects, each of which stores a Parameter's values, `default values <Component.defaults>`, and various
`properties <Parameter_Attributes_Table>` of the parameter.
* `Parameters <Component.Parameters>` - a `Parameters class <Parameters>` defining parameters and their default values
that are used for all Components, unless overridden.
All of the parameters listed in the *parameters* class can be modified by the user (as described above). Some
can also be modified by `ControlSignals <ControlSignal>` when a `Composition executes <Composition_Execution>`.
In general, only parameters that take numerical values and/or do not affect the structure, mode of operation,
or format of the values associated with a Component can be subject to modulation. For example, for a
`TransferMechanism`, `clip <TransferMechanism.clip>`, `initial_value <TransferMechanism.initial_value>`,
`integrator_mode <TransferMechanism.integrator_mode>`, `input_ports <Mechanism_Base.input_ports>`,
`output_ports`, and `function <Mechanism_Base.function>`, are all listed in parameters, and are user-modifiable,
but are not subject to modulation; whereas `noise <TransferMechanism.noise>` and `integration_rate
<TransferMechanism.integration_rate>` can all be subject to modulation. Parameters that are subject to modulation
have the `modulable <Parameter.modulable>` attribute set to True and are associated with a `ParameterPort` to which
the ControlSignals can project (by way of a `ControlProjection`).
COMMENT:
FIX: ADD DISCUSSION ABOUT HOW TO ASSIGN DEFAULTS HERE 5/8/20
COMMENT
.. _Component_Function_Params:
* **initial_shared_parameters** - the `initial_shared_parameters <Component.function>` attribute contains a
dictionary of any parameters for the Component's functions or attributes, to be used to
instantiate the corresponding object. Each entry is the name of a parameter, and its value is the value of that parameter.
The parameters for a function can be specified when the Component is created in one of the following ways:
* in an argument of the **Component's constructor** -- if all of the allowable functions for a Component's
`function <Component.function>` share some or all of their parameters in common, the shared paramters may appear
as arguments in the constructor of the Component itself, which can be used to set their values.
* in an entry of a `parameter specification dictionary <ParameterPort_Specification>` assigned to the
**params** argument of the constructor for the Component. The entry must use the keyword
FUNCTION_PARAMS as its key, and its value must be a dictionary containing the parameters and their values.
The key for each entry in the FUNCTION_PARAMS dictionary must be the name of a parameter, and its value the
parameter's value, as in the example below::
my_component = SomeComponent(function=SomeFunction
params={FUNCTION_PARAMS:{SOME_PARAM=1, SOME_OTHER_PARAM=2}})
The parameters of functions for some Components may allow other forms of specification (see
`ParameterPort_Specification` for details concerning different ways in which the value of a
parameter can be specified).
COMMENT:
FIX: STATEMENT ABOVE ABOUT MODIFYING EXECUTION COUNT VIOLATES THIS DEFINITION, AS PROBABLY DO OTHER ATTRIBUTES
* parameters are things that govern the operation of the Mechanism (including its function) and/or can be
modified/modulated
* attributes include parameters, but also read-only attributes that reflect but do not determine the operation
(e.g., EXECUTION_COUNT)
COMMENT
.. _Component_Stateful_Parameters:
* **stateful_parameters** - a list containing all of the Component's `stateful parameters <Parameter_Statefulness>`.
COMMENT:
DESCRIPTION HERE
COMMENT
COMMENT:
.. _Component_Methods:
*Component Methods*
~~~~~~~~~~~~~~~~~~~
FOR DEVELOPERS:
There are two sets of methods that belong to every Component: one set that is called when it is initialized; and
another set that can be called to perform various operations common to all Components. Each of these is described
briefly below. All of these methods can be overridden by subclasses to implement customized operations, however
it is strongly recommended that the method be called on super() at some point, so that the standard operations are
carried out. Whether customization operations should be performed before or after the call to super is discussed in
the descriptions below where relevant.
.. _Component_Initialization_Methods:
Initialization Methods
^^^^^^^^^^^^^^^^^^^^^^
These methods can be overridden by the subclass to customize the initialization process, but should always call the
corresponding method of the Component base class (using ``super``) to insure full initialization. There are two
categories of initializion methods: validation and instantiation.
.. _Component_Validation_Methods:
* **Validation methods** perform a strictly *syntactic* check, to determine if a value being validated conforms
to the format expected for it by the Component (i.e., the type of the value and, if it is iterable, the type its
elements and/or its length). The value itself is not checked in any other way (e.g., whether it equals a particular
value or falls in a specified range). If the validation fails, and exception is raised. Validation methods never
make changes the actual value of an attribute, but they may change its format (e.g., from a list to an ndarray) to
comply with requirements of the Component.
* `_validate_variable <Component._validate_variable>` validates the value provided to the keyword:`variable`
argument in the constructor for the Component. If it is overridden, customized validation should generally
performed *prior* to the call to super(), to allow final processing by the Component base class.
* `_validate_params <Component._validate_params>` validates the value of any parameters specified in the
constructor for the Component (whether they are made directly in the argument for a parameter, or in a
`parameter specification dictionary <ParameterPort_Specification>`. If it is overridden by a subclass,
customized validation should generally be performed *after* the call to super().
* **Instantiation methods** create, assign, and/or perform *semantic* checks on the values of Component attributes.
Semantic checks may include value and/or range checks, as well as checks of formatting and/or value
compatibility with other attributes of the Component and/or the attributes of other Components (for example, the
_instantiate_function method checks that the input of the Component's `function <Comonent.function>` is compatible
with its `variable <Component.variable>`).
* `_handle_size <Component._handle_size>` converts the `variable <Component.variable>` and `size <Component.size>`
arguments to the correct dimensions (for `Mechanism <Mechanism>`, this is a 2D array and 1D
array, respectively). If **variable** is not passed as an argument, this method attempts to infer `variable
<Component.variable>` from the **size** argument, and vice versa if the **size** argument is missing.
The _handle_size method then checks that the **size** and **variable** arguments are compatible.
* `_instantiate_defaults <Component._instantiate_defaults>` first calls the validation methods, and then
assigns the default values for all of the attributes of the instance of the Component being created.
_instantiate_attributes_before_function
_instantiate_function
_instantiate_attributes_after_function
.. _Component_Callable_Methods:
Callable Methods
^^^^^^^^^^^^^^^^
initialize
COMMENT
.. _Component_Assign_Params:
* **reset_params** - reset the value of all parameters to a set of default values as specified in its **mode**
argument, using a value of `ResetMode <Component_ResetMode>`.
.. _Component_Execution:
Execution
---------
A Component is executed when its `execute <Component.execute>` method is called, which in turn calls its `function
<Component_Function>`.
.. _Component_Lazy_Updating:
*Lazy Updating*
~~~~~~~~~~~~~~~
In general, only `Compositions <Composition>` are executed from the command line (i.e., from the console or in a
script). `Mechanisms <Mechanism>` can also be executed, although this is usually just for the purposes of demonstration
or debugging, and `Functions <Function>` can only be executed if they are standalone (that is, they do not belong to
another Component). All other Components are executed only a Component that depends on them to do so. This can be
one to which a Components belongs (such as the Mechanism to which a `Port` belongs) or that otherwise requires it to
execute (for example, a updating a `Port` requires its `afferent Projections <Port_Projections>` to `execute
<Port_Execution>`). This is referred to as "lazy updating", since it means that most Components don't execute unless
and until they are required to do so. While this reduces unecessary computation, it can sometimes be confusing. For
example, when `learning <Composition_Learning>` occurs in a Composition, the modification to the `matrix
<MappingProjection.matrix>` parameter of a `MappingProjection` that occurs on a given `TRIAL <TimeScale.TRIAL>`
does not acutally appear in its `value <ParameterPort>` until the next `TRIAL <TimeScale.TRIAL>`, since it requires
that the ParameterPort for the `matrix <MappingProjection.matrix>` be executed, which does not occur until the next
time the MappingProjection is executed (i.e., in the next `TRIAL <TimeScale.TRIAL>`). Therefore, in tracking the
`value <Component.value>` of Components during execution, it is important to carefully consider the state of
execution of the Components to which they belong or on which they depend for execution.
The following attributes and methods control and provide information about the execution of a Component:
.. _Component_Execution_Initialization:
*Initialization*
~~~~~~~~~~~~~~~~
.. _Component_Reset_Stateful_Function_When:
* **reset_stateful_function_when** -- a `Condition` that determines when the Component's `reset <Component.reset>`
method is called. The `reset <Component.reset>` method and `reset_stateful_function_when
<Component.reset_stateful_function_when>` attribute only exist for Mechanisms that have `stateful
<Parameter.stateful>` `Parameters`, or that have a `function <Mechanism_Base.function>` with `stateful
<Parameter.stateful>` Parameters. When the `reset <Component.reset>` method is called, this is done without any
arguments, so that the relevant `initializer <IntegratorFunction.initializer>` attributes (or their equivalents
-- initialization attributes vary among functions) are used for reinitialization.
COMMENT:
WHAT ABOUT initializer ATTRIBUTE FOR NON-INTEGRATOR FUNCTIONS, AND FOR STATEFUL PARAMETERS ON MECHANISMS?
WHY IS THIS ATTRIBUTE ON COMPONENT RATHER THAN MECHANISM?
COMMENT
.. note::
`Mechanisms` <Mechanism>` are the only type of Component that reset when the `reset_stateful_function_when
<Component.reset_stateful_function_when>` `Condition` is satisfied. Other Component types do not reset,
although `Composition` has a `reset <Composition.reset>` method that can be used to reset all of its eligible
Mechanisms (see `Composition_Reset`)
.. _Component_Execution_Termination:
*Termination*
~~~~~~~~~~~~~
.. _Component_Is_Finished:
* **is_finished()** -- method that determines whether execution of the Component is complete for a `TRIAL
<TimeScale.TRIAL>`; it is only used if `execute_until_finished <Component_Execute_Until_Finished>` is True.
.. _Component_Execute_Until_Finished:
* **execute_until_finished** -- determines whether the Component executes until its `is_finished` method returns True.
If it is False, then the Component executes only once per call to its `execute <Component.execute>` method,
irrespective of its `is_finished` method; if it is True then, depending on how its class implements and handles its
`is_finished` method, the Component may execute more than once per call to its `execute <Component.execute>` method.
.. _Component_Num_Executions_Before_Finished:
* **num_executions_before_finished** -- contains the number of times the Component has executed prior to finishing
(and since it last finished); depending upon the class, these may all be within a single call to the Component's
`execute <Component.execute>` method, or extend over several calls. It is set to 0 each time `is_finished` evaluates
to True. Note that this is distinct from the `execution_count <Component_Execution_Count>` and `num_executions
<Component_Num_Executions>` attributes.
.. _Component_Max_Executions_Before_Finished:
* **max_executions_before_finished** -- determines the maximum number of executions allowed before finishing
(i.e., the maximum allowable value of `num_executions_before_finished <Component.num_executions_before_finished>`).
If it is exceeded, a warning message is generated. Note that this only pertains to `num_executions_before_finished
<Component_Num_Executions_Before_Finished>`, and not its `execution_count <Component_Execution_Count>`, which can be
unlimited.
.. _Component_Execution_Count_and_Time:
*Count and Time*
~~~~~~~~~~~~~~~~
.. _Component_Execution_Count:
* **execution_count** -- maintains a record of the number of times a Component has executed since it was constructed,
*excluding* executions carried out during initialization and validation, but including all others whether they are
of the Component on its own are as part of a `Composition`, and irrespective of the `context <Context>` in which
they are occur. The value can be changed "manually" or programmatically by assigning an integer
value directly to the attribute. Note that this is the distinct from the `num_executions <Component_Num_Executions>`
and `num_executions_before_finished <Component_Num_Executions_Before_Finished>` attributes.
.. _Component_Num_Executions:
* **num_executions** -- maintains a record, in a `Time` object, of the number of times a Component has executed in a
particular `context <Context>` and at different `TimeScales <TimeScale>`. The value cannot be changed. Note that this
is the distinct from the `execution_count <Component_Execution_Count>` and `num_executions_before_finished
<Component_Num_Executions_Before_Finished>` attributes.
.. _Component_Current_Execution_Time:
* **current_execution_time** -- maintains the `Time` of the last execution of the Component in the context of the
`Composition`'s current `scheduler <Composition.scheduler`, and is stored as a `time
<Context.time>` tuple of values indicating the `TimeScale.TRIAL`, `TimeScale.PASS`, and `TimeScale.TIME_STEP` of the
last execution.
.. _Component_Class_Reference:
Class Reference
---------------
COMMENT:
This module defines the Component abstract class
It also contains:
- arg_name definitions for primary Component categories:
Process
Mechanism
types:
DDM
[PDP]
Projection
types:
MappingProjection
ControlProjection
LearningProjection
Function
COMMENT
"""
import base64
import collections
import copy
import functools
import inspect
import itertools
import logging
import numbers
import types
import warnings
from abc import ABCMeta
from collections.abc import Iterable
from enum import Enum, IntEnum
import dill
import graph_scheduler
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.globals.context import \
Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context
from psyneulink.core.globals.json import JSONDumpable
from psyneulink.core.globals.keywords import \
CONTEXT, CONTROL_PROJECTION, DEFERRED_INITIALIZATION, EXECUTE_UNTIL_FINISHED, \
FUNCTION, FUNCTION_PARAMS, INIT_FULL_EXECUTE_METHOD, INPUT_PORTS, \
LEARNING, LEARNING_PROJECTION, MATRIX, MAX_EXECUTIONS_BEFORE_FINISHED, \
MODEL_SPEC_ID_PSYNEULINK, MODEL_SPEC_ID_GENERIC, MODEL_SPEC_ID_TYPE, MODEL_SPEC_ID_PARAMETER_SOURCE, \
MODEL_SPEC_ID_PARAMETER_VALUE, MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_OUTPUT_PORTS, \
MODULATORY_SPEC_KEYWORDS, NAME, OUTPUT_PORTS, OWNER, PARAMS, PREFS_ARG, \
RESET_STATEFUL_FUNCTION_WHEN, VALUE, VARIABLE
from psyneulink.core.globals.log import LogCondition
from psyneulink.core.globals.parameters import \
Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value
from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet, VERBOSE_PREF
from psyneulink.core.globals.preferences.preferenceset import \
PreferenceLevel, PreferenceSet, _assign_prefs
from psyneulink.core.globals.registry import register_category
from psyneulink.core.globals.sampleiterator import SampleIterator
from psyneulink.core.globals.utilities import \
ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, \
is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, \
get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len
from psyneulink.core.scheduling.condition import Never
from psyneulink.core.scheduling.time import Time, TimeScale
__all__ = [
'Component', 'COMPONENT_BASE_CLASS', 'component_keywords', 'ComponentError', 'ComponentLog',
'DefaultsFlexibility', 'DeferredInitRegistry', 'parameter_keywords', 'ResetMode',
]
logger = logging.getLogger(__name__)
component_keywords = {NAME, VARIABLE, VALUE, FUNCTION, FUNCTION_PARAMS, PARAMS, PREFS_ARG, CONTEXT}
DeferredInitRegistry = {}
class ResetMode(Enum):
"""
.. _Component_ResetMode:
ResetModes used for **reset_params**:
.. _CURRENT_TO_INSTANCE_DEFAULTS:
*CURRENT_TO_INSTANCE_DEFAULTS*
• resets all current values to instance default values.
.. _INSTANCE_TO_CLASS:
*INSTANCE_TO_CLASS*
• resets all instance default values to class default values.
.. _ALL_TO_CLASS_DEFAULTS:
*ALL_TO_CLASS_DEFAULTS*
• resets all current values and instance default values to \
class default values
"""
CURRENT_TO_INSTANCE_DEFAULTS = 0
INSTANCE_TO_CLASS = 1
ALL_TO_CLASS_DEFAULTS = 2
class DefaultsFlexibility(Enum):
"""
Denotes how rigid an assignment to a default is. That is, how much it can be modified, if at all,
to suit the purpose of a method/owner/etc.
e.g. when assigning a Function to a Mechanism:
``pnl.TransferMechanism(default_variable=[0, 0], function=pnl.Linear())``
the Linear function is assigned a default variable ([0]) based on it's ClassDefault,
which conflicts with the default variable specified by its future owner ([0, 0]). Since
the default for Linear was not explicitly stated, we allow the TransferMechanism to
reassign the Linear's default variable as needed (`FLEXIBLE`)
Attributes
----------
FLEXIBLE
can be modified in any way.
RIGID
cannot be modifed in any way.
INCREASE_DIMENSION
can be wrapped in a single extra dimension.
"""
FLEXIBLE = 0
RIGID = 1
INCREASE_DIMENSION = 2
parameter_keywords = set()
# suppress_validation_preference_set = BasePreferenceSet(prefs = {
# PARAM_VALIDATION_PREF: PreferenceEntry(False,PreferenceLevel.INSTANCE),
# VERBOSE_PREF: PreferenceEntry(False,PreferenceLevel.INSTANCE),
# REPORT_OUTPUT_PREF: PreferenceEntry(True,PreferenceLevel.INSTANCE)})
class ComponentLog(IntEnum):
NONE = 0
ALL = 0
DEFAULTS = NONE
class ComponentError(Exception):
def __init__(self, message, component=None):
try:
component_str = component.name
try:
if component.owner is not None:
component_str = f'{component_str} (owned by {component.owner.name})'
except AttributeError:
pass
except AttributeError:
component_str = None
if component_str is not None:
message = f'{component_str}: {message}'
super().__init__(message)
def _get_parametervalue_attr(param):
return f'_{param.name}'
def make_parameter_property(param):
def getter(self):
p = getattr(self.parameters, param.name)
if p.modulable:
return getattr(self, _get_parametervalue_attr(p))
else:
return p._get(self.most_recent_context)
def setter(self, value):
p = getattr(self.parameters, param.name)
if p.modulable:
warnings.warn(
'Setting parameter values directly using dot notation'
' may be removed in a future release. It is replaced with,'
f' for example, <object>.{param.name}.base = {value}',
FutureWarning,
)
try:
getattr(self.parameters, p.name).set(value, self.most_recent_context)
except ParameterError as e:
if 'Pass override=True to force set.' in str(e):
raise ParameterError(
f"Parameter '{p.name}' is read-only. Set at your own risk."
f' Use .parameters.{p.name}.set with override=True to force set.'
) from None
return property(getter).setter(setter)
def _has_initializers_setter(value, owning_component=None, context=None):
"""
Assign has_initializers status to Component and any of its owners up the hierarchy.
"""
if value:
# only update owner's attribute if setting to True, because there may be
# other children that have initializers
try:
owning_component.owner.parameters.has_initializers._set(value, context)
except AttributeError:
# no owner
pass
return value
# ***************************************** COMPONENT CLASS ********************************************************
class ComponentsMeta(ABCMeta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.defaults = Defaults(owner=self)
try:
parent = self.__mro__[1].parameters
except AttributeError:
parent = None
self.parameters = self.Parameters(owner=self, parent=parent)
for param in self.parameters:
if not hasattr(self, param.name):
setattr(self, param.name, make_parameter_property(param))
try:
if param.default_value.owner is None:
param.default_value.owner = param
except AttributeError:
pass
# consider removing this for explicitness
# but can be useful for simplicity
@property
def class_defaults(self):
return self.defaults
class Component(JSONDumpable, metaclass=ComponentsMeta):
"""
Component( \
default_variable=None, \
size=None, \
params=None, \
name=None, \
prefs=None, \
context=None \
)
Base class for Component.
The arguments below are ones that can be used in the constructor for any Component subclass.
.. note::
Component is an abstract class and should *never* be instantiated by a direct call to its constructor.
It should be instantiated using the constructor for a subclass.
COMMENT:
FOR API DOCUMENTATION:
The Component itself can be called without any arguments (in which case it uses its instance defaults) or
one or more variables (as defined by the subclass) followed by an optional params dictionary
The variable(s) can be a function reference, in which case the function is called to resolve the value;
however: it must be "wrapped" as an item in a list, so that it is not called before being passed
it must of course return a variable of the type expected for the variable
The size argument is an int or array of ints, which specify the size of variable and set variable to be array(s)
of zeros.
The default variableList is a list of default values, one for each of the variables defined in the child class
The params argument is a dictionary; the key for each entry is the parameter name, associated with its value.
+ Component subclasses can define the param FUNCTION:<method or Function class>
The instance defaults can be assigned at initialization or using the _instantiate_defaults class method;
- if instance defaults are not assigned on initialization, the corresponding class defaults are assigned
Each Component child class must initialize itself by calling super(childComponentName).__init__()
with a default value for its variable, and optionally an instance default paramList.
A subclass MUST either:
- implement a <class>.function method OR
- specify a default Function
- this is checked in Component._instantiate_function()
- if params[FUNCTION] is NOT specified, it is assigned to self.function (so that it can be referenced)
- if params[FUNCTION] IS specified, it assigns it's value to self.function (superceding existing value):
self.function is aliased to it (in Component._instantiate_function):
if FUNCTION is found on initialization:
if it is a reference to an instantiated function, self.function is pointed to it
if it is a class reference to a function:
it is instantiated using self.defaults.variable and FUNCTION_PARAMS (if they are there too)
this works, since _validate_params is always called after _validate_variable
so self.defaults.variable can be used to initialize function
to the method referenced by self.defaults.function
if self.function is found, an exception is raised
NOTES:
* In the current implementation, validation is:
- top-level only (items in lists, tuples and dictionaries are not checked, nor are nested items)
- for type only (it is oblivious to content)
- forgiving (e.g., no distinction is made among numberical types)
* However, more restrictive validation (e.g., recurisve, range checking, etc.) can be achieved
by overriding the class _validate_variable and _validate_params methods
COMMENT
Arguments
---------
default_variable : scalar, list or array : default [[0]]
specifies template for the input to the Component's `function <Component.function>`.
size : int, list or np.ndarray of ints : default None
specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument;
if **default_variable** is specified, it takes precedence over the specification of **size** (see
`size <Component_Size>` for additonal details).
COMMENT:
param_defaults : : default None,
COMMENT
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that can be used to specify the parameters for
the Component and/or a custom function and its parameters. Values specified for parameters in the dictionary
override any assigned to those parameters in arguments of the constructor.
name : str : for default see `name <Component_Name>`
a string used for the name of the Component; default is assigned by relevant `Registry` for Component
(see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : default Component.classPreferences
specifies the `PreferenceSet` for the Component (see `prefs <Component_Base.prefs>` for details).
context : Context : default None
specifies `context <Context>` in which Component is being initialized or executed.
Attributes
----------
variable : 2d np.array
see `variable <Component_Variable>`
size : int or array of ints
see `size <Component_Size>`
function : Function, function or method
see `function <Component_Function>`
value : 2d np.array
see `value <Component_Value>`
log : Log
see `log <Component_Log>`
execution_count : int
see `execution_count <Component_Execution_Count>`
num_executions : Time
see `num_executions <_Component_Num_Executions>`
current_execution_time : tuple(`Time.RUN`, `Time.TRIAL`, `Time.PASS`, `Time.TIME_STEP`)
see `current_execution_time <Component_Current_Execution_Time>`
execute_until_finished : bool
see `execute_until_finished <Component_Execute_Until_Finished>`
num_executions_before_finished : int
see `num_executions_before_finished <Component_Num_Executions_Before_Finished>`
max_executions_before_finished : bool
see `max_executions_before_finished <Component_Max_Executions_Before_Finished>`
stateful_parameters : list
see `stateful_parameters <Component_Stateful_Parameters>`
reset_stateful_function_when : `Condition`
see `reset_stateful_function_when <Component_reset_stateful_function_when>`
name : str
see `name <Component_Name>`
prefs : PreferenceSet
see `prefs <Component_Prefs>`
parameters : Parameters
see `parameters <Component_Parameters>` and `Parameters` for additional information.
defaults : Defaults
an object that provides access to the default values of a `Component's` `parameters`;
see `parameter defaults <Parameter_Defaults>` for additional information.
initialization_status : field of flags attribute
indicates the state of initialization of the Component;
one and only one of the following flags is always set:
* `DEFERRED_INIT <ContextFlags.DEFERRED_INIT>`
* `INITIALIZING <ContextFlags.INITIALIZING>`
* `VALIDATING <ContextFlags.VALIDATING>`
* `INITIALIZED <ContextFlags.INITIALIZED>`
* `RESET <ContextFlags.RESET>`
* `UNINITIALIZED <ContextFlags.UNINITALIZED>`
COMMENT:
FIX: THESE USED TO BE IN CONSTRUCTORS FOR ALL SUBCLASSES. INTEGRATE WITH ABOVE
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that can be used to specify the parameters for
the InputPort or its function, and/or a custom function and its parameters. Values specified for parameters in
the dictionary override any assigned to those parameters in arguments of the constructor.
name : str : default see `name <InputPort.name>`
specifies the name of the InputPort; see InputPort `name <InputPort.name>` for details.
prefs : PreferenceSet or specification dict : default Port.classPreferences
specifies the `PreferenceSet` for the InputPort; see `prefs <InputPort.prefs>` for details.
COMMENT
"""
#CLASS ATTRIBUTES
className = "COMPONENT"
suffix = " " + className
# IMPLEMENTATION NOTE: *** CHECK THAT THIS DOES NOT CAUSE ANY CHANGES AT SUBORDNIATE LEVELS TO PROPOGATE EVERYWHERE
componentCategory = None
componentType = None
standard_constructor_args = [RESET_STATEFUL_FUNCTION_WHEN, EXECUTE_UNTIL_FINISHED, MAX_EXECUTIONS_BEFORE_FINISHED]
# helper attributes for JSON model spec
_model_spec_id_parameters = 'parameters'
_model_spec_generic_type_name = NotImplemented
"""
string describing this class's generic type in universal model
specification,
if it exists and is different than the class name
"""
_model_spec_class_name_is_generic = False
"""
True if the class name is the class's generic type in universal model specification,
False otherwise
"""
_specified_variable_shape_flexibility = DefaultsFlexibility.RIGID
"""
The `DefaultsFlexibility` ._variable_shape_flexibility takes on
when variable shape was manually specified
"""
class Parameters(ParametersBase):
"""
The `Parameters` that are associated with all `Components`
Attributes
----------
variable
see `variable <Component_Variable>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
:read only: True
value
see `value <Component_Value>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
:read only: True
execute_until_finished
see `execute_until_finished <Component_Execute_Until_Finished>`
:default value: True
:type: ``bool``
execution_count
see `execution_count <Component_Execution_Count>`
:default value: 0
:type: ``int``
:read only: True
num_executions
see `num_executions <_Component_Num_Executions>`
:default value:
:type: ``Time``
:read only: True
has_initializers
see `has_initializers <Component.has_initializers>`
:default value: False
:type: ``bool``
is_finished_flag
internal parameter used by some Component types to track previous status of is_finished() method,
or to set the status reported by the is_finished (see `is_finished <Component_Is_Finished>`
:default value: True
:type: ``bool``
max_executions_before_finished
see `max_executions_before_finished <Component_Max_Executions_Before_Finished>`
:default value: 1000
:type: ``int``
num_executions_before_finished
see `num_executions_before_finished <Component_Num_Executions_Before_Finished>`
:default value: 0
:type: ``int``
:read only: True
"""
variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
value = Parameter(np.array([0]), read_only=True, pnl_internal=True)
has_initializers = Parameter(False, setter=_has_initializers_setter, pnl_internal=True)
# execution_count is not stateful because it is a global counter;
# for context-specific counts should use schedulers which store this info
execution_count = Parameter(0,
read_only=True,
loggable=False,
stateful=False,
fallback_default=True,
pnl_internal=True)
is_finished_flag = Parameter(True, loggable=False, stateful=True)
execute_until_finished = True
num_executions = Parameter(Time(), read_only=True, modulable=False, loggable=False)
num_executions_before_finished = Parameter(0, read_only=True, modulable=False)
max_executions_before_finished = Parameter(1000, modulable=False)
def _parse_variable(self, variable):
if variable is None:
return variable
try:
return convert_to_np_array(variable)
except ValueError:
return convert_all_elements_to_np_array(variable)
def _validate_variable(self, variable):
return None
def _parse_modulable(self, param_name, param_value):
from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base
from psyneulink.core.components.ports.modulatorysignals import ModulatorySignal
from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base
# assume 2-tuple with class/instance as second item is a proper
# modulatory spec, can possibly add in a flag on acceptable
# classes in the future
if (
isinstance(param_value, tuple)
and len(param_value) == 2
and (
is_instance_or_subclass(param_value[1], Component)
or (
isinstance(param_value[1], str)
and param_value[1] in MODULATORY_SPEC_KEYWORDS
)
)
):
value = param_value[0]
elif (
is_instance_or_subclass(
param_value,
(ModulatoryMechanism_Base, ModulatorySignal, ModulatoryProjection_Base)
)
or (
isinstance(param_value, str)
and param_value in MODULATORY_SPEC_KEYWORDS
)
):
value = getattr(self, param_name).default_value
else:
value = param_value
if isinstance(value, list):
value = np.asarray(value)
return value
initMethod = INIT_FULL_EXECUTE_METHOD
classPreferenceLevel = PreferenceLevel.COMPOSITION
# Any preferences specified below will override those specified in COMPOSITION_DEFAULT_PREFERENCES
# Note: only need to specify setting; level will be assigned to COMPOSITION automatically
# classPreferences = {
# PREFERENCE_SET_NAME: 'ComponentCustomClassPreferences',
# PREFERENCE_KEYWORD<pref>: <setting>...}
exclude_from_parameter_ports = [INPUT_PORTS, OUTPUT_PORTS]
# IMPLEMENTATION NOTE: This is needed so that the Port class can be used with ContentAddressableList,
# which requires that the attribute used for addressing is on the class;
# it is also declared as a property, so that any assignments are validated to be strings,
# insuring that assignment by one instance will not affect the value of others.
name = None
_deepcopy_shared_keys = frozenset([
'_init_args',
])
def __init__(self,
default_variable,
param_defaults,
size=NotImplemented, # 7/5/17 CW: this is a hack to check whether the user has passed in a size arg
function=None,
name=None,
reset_stateful_function_when=None,
prefs=None,
**kwargs):
"""Assign default preferences; enforce required params; validate and instantiate params and execute method
Initialization arguments:
- default_variable (anything): establishes type for the variable, used for validation
- size (int or list/array of ints): if specified, establishes variable if variable was not already specified
- params_default (dict): assigned as default
Note: if parameter_validation is off, validation is suppressed (for efficiency) (Component class default = on)
"""
context = Context(
source=ContextFlags.CONSTRUCTOR,
execution_phase=ContextFlags.IDLE,
execution_id=None,
)
if reset_stateful_function_when is not None:
self.reset_stateful_function_when = reset_stateful_function_when
else:
self.reset_stateful_function_when = Never()
try:
function_params = copy.copy(param_defaults[FUNCTION_PARAMS])
except (KeyError, TypeError):
function_params = {}
# if function is string, assume any unknown kwargs are for the
# corresponding UDF expression
if isinstance(function, (types.FunctionType, str)):
function_params = {
**kwargs,
**function_params
}
else:
self._handle_illegal_kwargs(**kwargs)
# allow override of standard arguments with arguments specified in
# params (here, param_defaults) argument
# (if there are duplicates, later lines override previous)
parameter_values = {
**{
'function': function,
'variable': default_variable
},
**kwargs,
**(param_defaults if param_defaults is not None else {}),
}
self._initialize_parameters(
context=context,
**parameter_values
)
var = call_with_pruned_args(
self._handle_default_variable,
default_variable=default_variable,
size=size,
**parameter_values
)
if var is None:
default_variable = self.defaults.variable
else:
default_variable = var
self.defaults.variable = copy.deepcopy(default_variable)
self.parameters.variable._user_specified = True
# ASSIGN PREFS
_assign_prefs(self, prefs, BasePreferenceSet)
# VALIDATE VARIABLE AND PARAMS, AND ASSIGN DEFAULTS
# TODO: the below overrides setting default values to None context,
# at least in stateless parameters. Possibly more. Below should be
# removed eventually
# Validate the set passed in
self._instantiate_defaults(variable=default_variable,
request_set=parameter_values, # requested set
assign_missing=True, # assign missing params from classPreferences to instanceDefaults
target_set=self.defaults.values(), # destination set to which params are being assigned
default_set=self.class_defaults.values(), # source set from which missing params are assigned
context=context,
)
self.initial_shared_parameters = collections.defaultdict(dict)
for param_name, param in self.parameters.values(show_all=True).items():
if (
isinstance(param, SharedParameter)
and not isinstance(param.source, ParameterAlias)
):
try:
if parameter_values[param_name] is not None:
isp_val = parameter_values[param_name]
else:
isp_val = copy.deepcopy(param.default_value)
except KeyError:
isp_val = copy.deepcopy(param.default_value)
if isp_val is not None:
self.initial_shared_parameters[param.attribute_name][param.shared_parameter_name] = isp_val
# we must know the final variable shape before setting up parameter
# Functions or they will mismatch
self._instantiate_parameter_classes(context)
# self.componentName = self.componentType
try:
self.componentName = self.componentName or self.componentType
except AttributeError:
self.componentName = self.componentType
# ENFORCE REGISRY
if self.__class__.__bases__[0].__bases__[0].__bases__[0].__name__ == 'ShellClass':
try:
self.__class__.__bases__[0].registry
except AttributeError:
raise ComponentError("{0} is a category class and so must implement a registry".
format(self.__class__.__bases__[0].__name__))
# ASSIGN LOG
from psyneulink.core.globals.log import Log
self.log = Log(owner=self)
# Used by run to store return value of execute
self.results = []
if function is None:
if (
param_defaults is not None
and FUNCTION in param_defaults
and param_defaults[FUNCTION] is not None
):
function = param_defaults[FUNCTION]
else:
try:
function = self.class_defaults.function
except AttributeError:
# assume function is a method on self
pass
self._runtime_params_reset = {}
# KDM 11/12/19: this exists to deal with currently unknown attribute
# setting - if not set these will be included in logs as COMMAND_LINE
# settings. Remove this eventually
self.most_recent_context = context
# INSTANTIATE ATTRIBUTES BEFORE FUNCTION
# Stub for methods that need to be executed before instantiating function
# (e.g., _instantiate_sender and _instantiate_receiver in Projection)
# Allow _instantiate_attributes_before_function of subclass
# to modify/replace function arg provided in constructor (e.g. TransferWithCosts)
function = self._instantiate_attributes_before_function(function=function, context=context) or function
# INSTANTIATE FUNCTION
# - assign initial function parameter values from ParameterPorts,
# - assign function's output to self.defaults.value (based on call of self.execute)
self._instantiate_function(function=function, function_params=function_params, context=context)
# FIX TIME 3/18/21
if '(RESULT) to (OUTPUT_CIM_TransferMechanism-1_RESULT)' in self.name:
assert True
self._instantiate_value(context=context)
# INSTANTIATE ATTRIBUTES AFTER FUNCTION
# Stub for methods that need to be executed after instantiating function
# (e.g., instantiate_output_port in Mechanism)
self._instantiate_attributes_after_function(context=context)
self._validate(context=context)
self.initialization_status = ContextFlags.INITIALIZED
self._update_parameter_components(context)
def __repr__(self):
return '({0} {1})'.format(type(self).__name__, self.name)
#return '{1}'.format(type(self).__name__, self.name)
def __lt__(self, other):
return self.name < other.name
def __deepcopy__(self, memo):
if 'no_shared' in memo and memo['no_shared']:
shared_types = tuple()
else:
shared_types = (Component, ComponentsMeta)
fun = get_deepcopy_with_shared(
self._deepcopy_shared_keys,
shared_types
)
newone = fun(self, memo)
if newone.parameters is not newone.class_parameters:
# may be in DEFERRED INIT, so parameters/defaults belongs to class
newone.parameters._owner = newone
newone.defaults._owner = newone
# by copying, this instance is no longer "inherent" to a single
# 'import psyneulink' call
newone._is_pnl_inherent = False
return newone
# ------------------------------------------------------------------------------------------------------------------
# Compilation support
# ------------------------------------------------------------------------------------------------------------------
def _get_compilation_state(self):
# FIXME: MAGIC LIST, Use stateful tag for this
whitelist = {"previous_time", "previous_value", "previous_v",
"previous_w", "random_state", "is_finished_flag",
"num_executions_before_finished", "num_executions",
"execution_count", "value", "input_ports", "output_ports"}
blacklist = { # References to other components
"objective_mechanism", "agent_rep", "projections"}
# Only mechanisms use "value" state
if not hasattr(self, 'ports'):
blacklist.add("value")
def _is_compilation_state(p):
#FIXME: This should use defaults instead of 'p.get'
return p.name not in blacklist and \
not isinstance(p, (ParameterAlias, SharedParameter)) and \
(p.name in whitelist or isinstance(p.get(), Component))
return filter(_is_compilation_state, self.parameters)
def _get_state_ids(self):
return [sp.name for sp in self._get_compilation_state()]
@property
def llvm_state_ids(self):
ids = getattr(self, "_state_ids", None)
if ids is None:
ids = self._get_state_ids()
setattr(self, "_state_ids", ids)
return ids
def _get_state_initializer(self, context):
def _convert(p):
x = p.get(context)
if isinstance(x, np.random.RandomState):
# Skip first element of random state (id string)
val = pnlvm._tupleize((*x.get_state()[1:], x.used_seed[0]))
elif isinstance(x, Time):
val = tuple(getattr(x, graph_scheduler.time._time_scale_to_attr_str(t)) for t in TimeScale)
elif isinstance(x, Component):
return x._get_state_initializer(context)
elif isinstance(x, ContentAddressableList):
return tuple(p._get_state_initializer(context) for p in x)
else:
val = pnlvm._tupleize(x)
return tuple(val for _ in range(p.history_min_length + 1))
return tuple(map(_convert, self._get_compilation_state()))
def _get_compilation_params(self):
# FIXME: MAGIC LIST, detect used parameters automatically
blacklist = {# Stateful parameters
"previous_time", "previous_value", "previous_v",
"previous_w", "random_state", "is_finished_flag",
"num_executions_before_finished", "num_executions",
"variable", "value", "saved_values", "saved_samples",
# Invalid types
"input_port_variables", "results", "simulation_results",
"monitor_for_control", "state_feature_values", "simulation_ids",
"input_labels_dict", "output_labels_dict",
"modulated_mechanisms", "grid", "control_signal_params",
"activation_derivative_fct", "input_specification",
# Reference to other components
"objective_mechanism", "agent_rep", "projections",
# Shape mismatch
"auto", "hetero", "cost", "costs", "combined_costs",
"control_signal",
# autodiff specific types
"pytorch_representation", "optimizer"}
# Mechanism's need few extra entires:
# * matrix -- is never used directly, and is flatened below
# * integration rate -- shape mismatch with param port input
if hasattr(self, 'ports'):
blacklist.update(["matrix", "integration_rate"])
def _is_compilation_param(p):
if p.name not in blacklist and not isinstance(p, (ParameterAlias, SharedParameter)):
#FIXME: this should use defaults
val = p.get()
# Check if the value type is valid for compilation
return not isinstance(val, (str, ComponentsMeta,
type(max),
type(_is_compilation_param),
type(self._get_compilation_params)))
return False
return filter(_is_compilation_param, self.parameters)
def _get_param_ids(self):
return [p.name for p in self._get_compilation_params()]
@property
def llvm_param_ids(self):
ids = getattr(self, "_param_ids", None)
if ids is None:
ids = self._get_param_ids()
setattr(self, "_param_ids", ids)
return ids
def _is_param_modulated(self, p):
try:
if p in self.owner.parameter_ports:
return True
except AttributeError:
pass
try:
if p in self.parameter_ports:
return True
except AttributeError:
pass
try:
modulated_params = (
getattr(self.parameters, p.sender.modulation).source
for p in self.owner.mod_afferents)
if p in modulated_params:
return True
except AttributeError:
pass
return False
def _get_param_initializer(self, context):
def _convert(x):
if isinstance(x, Enum):
return x.value
elif isinstance(x, SampleIterator):
if isinstance(x.generator, list):
return tuple(v for v in x.generator)
else:
return (x.start, x.step, x.num)
elif isinstance(x, Component):
return x._get_param_initializer(context)
try:
# This can't use tupleize and needs to recurse to handle
# 'search_space' list of SampleIterators
return tuple(_convert(i) for i in x)
except TypeError:
return x if x is not None else tuple()
def _get_values(p):
param = p.get(context)
# Modulated parameters change shape to array
if np.ndim(param) == 0 and self._is_param_modulated(p):
return (param,)
elif p.name == 'num_estimates':
return 0 if param is None else param
elif p.name == 'matrix': # Flatten matrix
return tuple(np.asfarray(param).flatten())
return _convert(param)
return tuple(map(_get_values, self._get_compilation_params()))
def _gen_llvm_function_reset(self, ctx, builder, *_, tags):
assert "reset" in tags
return builder
def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext,
extra_args=[], tags:frozenset):
args = [ctx.get_param_struct_type(self).as_pointer(),
ctx.get_state_struct_type(self).as_pointer(),
ctx.get_input_struct_type(self).as_pointer(),
ctx.get_output_struct_type(self).as_pointer()]
builder = ctx.create_llvm_function(args + extra_args, self, tags=tags)
params, state, arg_in, arg_out = builder.function.args[:len(args)]
if len(extra_args) == 0:
for p in params, state, arg_in, arg_out:
p.attributes.add('noalias')
if "reset" in tags:
builder = self._gen_llvm_function_reset(ctx, builder, params, state,
arg_in, arg_out, tags=tags)
else:
builder = self._gen_llvm_function_body(ctx, builder, params, state,
arg_in, arg_out, tags=tags)
builder.ret_void()
return builder.function
# ------------------------------------------------------------------------------------------------------------------
# Handlers
# ------------------------------------------------------------------------------------------------------------------
def _handle_default_variable(self, default_variable=None, size=None):
"""
Finds whether default_variable can be determined using **default_variable** and **size**
arguments.
Returns
-------
a default variable if possible
None otherwise
"""
default_variable = self._parse_arg_variable(default_variable)
if default_variable is None:
default_variable = self._handle_size(size, default_variable)
if default_variable is None or default_variable is NotImplemented:
return None
else:
self._variable_shape_flexibility = self._specified_variable_shape_flexibility
else:
self._variable_shape_flexibility = self._specified_variable_shape_flexibility
return convert_to_np_array(default_variable, dimension=1)
# ELIMINATE SYSTEM
# IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with size at the moment (which will
# be removed later), I’m keeping _handle_size in Component.py. I’ll move the bulk of the function to Mechanism
# through an override, when Composition is done. For now, only Port.py overwrites _handle_size().
def _handle_size(self, size, variable):
"""If variable is None, _handle_size tries to infer variable based on the **size** argument to the
__init__() function. This method is overwritten in subclasses like Mechanism and Port.
If self is a Mechanism, it converts variable to a 2D array, (for a Mechanism, variable[i] represents
the input from the i-th InputPort). If self is a Port, variable is a 1D array and size is a length-1 1D
array. It performs some validations on size and variable as well. This function is overridden in Port.py.
If size is NotImplemented (usually in the case of Projections/Functions), then this function passes without
doing anything. Be aware that if size is NotImplemented, then variable is never cast to a particular shape.
"""
if size is not NotImplemented:
self._variable_shape_flexibility = self._specified_variable_shape_flexibility
# region Fill in and infer variable and size if they aren't specified in args
# if variable is None and size is None:
# variable = self.class_defaults.variable
# 6/30/17 now handled in the individual subclasses' __init__() methods because each subclass has different
# expected behavior when variable is None and size is None.
def checkAndCastInt(x):
if not isinstance(x, numbers.Number):
raise ComponentError("An element ({}) in size is not a number.".format(x))
if x < 1:
raise ComponentError("An element ({}) in size is not a positive number.".format(x))
try:
int_x = int(x)
except:
raise ComponentError(
"Failed to convert an element ({}) in size argument for {} {} to an integer. size "
"should be a number, or iterable of numbers, which are integers or "
"can be converted to integers.".format(x, type(self), self.name))
if int_x != x:
if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref:
warnings.warn("When an element ({}) in the size argument was cast to "
"integer, its value changed to {}.".format(x, int_x))
return int_x
if variable is not None:
variable = np.array(variable)
if variable.dtype == object:
# CAVEAT: assuming here that object dtype implies there are list objects (i.e. array with
# different sized arrays/lists inside like [[0, 1], [2, 3, 4]]), even though putting a None
# value in the array will give object dtype. This case doesn't really make sense in our
# context though, so ignoring this case in the interest of quickly fixing 3D variable behavior
variable = np.atleast_1d(variable)
else:
variable = np.atleast_2d(variable)
variable = convert_all_elements_to_np_array(variable)
try:
if size is not None:
size = np.atleast_1d(size)
if len(np.shape(size)) > 1: # number of dimensions of size > 1
if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref:
warnings.warn(
"size had more than one dimension (size had {} dimensions), so only the first "
"element of its highest-numbered axis will be used".format(len(np.shape(size))))
while len(np.shape(size)) > 1: # reduce the dimensions of size
size = size[0]
except:
raise ComponentError("Failed to convert size (of type {}) to a 1D array.".format(type(size)))
if size is not None:
size = np.array(list(map(checkAndCastInt, size))) # convert all elements of size to int
# implementation note: for good coding practices, perhaps add setting to enable easy change of the default
# value of variable (though it's an unlikely use case), which is an array of zeros at the moment
if variable is None and size is not None:
try:
variable = []
for s in size:
variable.append(np.zeros(s))
variable = convert_to_np_array(variable)
# TODO: fix bare except
except:
raise ComponentError("variable (possibly default_variable) was not specified, but PsyNeuLink "
"was unable to infer variable from the size argument, {}. size should be"
" an integer or an array or list of integers. Either size or "
"variable must be specified.".format(size))
# the two regions below (creating size if it's None and/or expanding it) are probably obsolete (7/7/17 CW)
if size is None and variable is not None:
size = []
try:
for input_vector in variable:
size.append(len(input_vector))
size = np.array(size)
except:
raise ComponentError(
"{}: size was not specified, and unable to infer it from the variable argument ({}) "
"-- it can be an array, list, a 2D array, a list of arrays, array of lists, etc. ".
format(self.name, variable))
# endregion
if size is not None and variable is not None:
if len(size) == 1 and len(variable) > 1:
new_size = np.empty(len(variable))
new_size.fill(size[0])
size = new_size
# the two lines below were used when size was a param and are likely obsolete (7/7/17 CW)
# param_defaults['size'] = size # 7/5/17 potentially buggy? Not sure (CW)
# self.user_params_for_instantiation['size'] = None # 7/5/17 VERY HACKY: See Changyan's Notes on this.
# Both variable and size are specified
if variable is not None and size is not None:
# If they conflict, give warning
if len(size) != len(variable):
if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref:
warnings.warn("The size arg of {} conflicts with the length "
"of its variable arg ({}) at element {}: variable takes precedence".
format(self.name, size, variable))
else:
for i in range(len(size)):
if size[i] != len(variable[i]):
if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref:
warnings.warn("The size arg of {} ({}) conflicts with the length "
"of its variable arg ({}) at element {}: variable takes precedence".
format(self.name, size[i], variable[i], i))
return variable
def _handle_illegal_kwargs(self, **kwargs):
illegal_args = [
arg
for arg in kwargs.keys()
if arg not in (
self.standard_constructor_args
+ self.parameters.names(show_all=True)
# arguments to constructor
+ list(get_all_explicit_arguments(self.__class__, '__init__'))
)
]
if illegal_args:
plural = ''
if len(illegal_args) > 1:
plural = 's'
raise ComponentError(
f"Unrecognized argument{plural} in constructor for {self.name} "
f"(type: {self.__class__.__name__}): {repr(', '.join(illegal_args))}"
)
# breaking self convention here because when storing the args,
# "self" is often among them. To avoid needing to preprocess to
# avoid argument duplication, use "self_" in this method signature
def _store_deferred_init_args(self_, **kwargs):
self = self_
try:
del kwargs['self']
except KeyError:
pass
# add unspecified kwargs
kwargs_names = [
k
for k, v in inspect.signature(self.__init__).parameters.items()
if v.kind is inspect.Parameter.VAR_KEYWORD
]
self._init_args = {
k: v
for k, v in kwargs.items()
if (
k in get_all_explicit_arguments(self.__class__, '__init__')
or k in kwargs_names
)
}
try:
self._init_args.update(self._init_args['kwargs'])
del self._init_args['kwargs']
except KeyError:
pass
def _deferred_init(self, **kwargs):
"""Use in subclasses that require deferred initialization
"""
if self.initialization_status == ContextFlags.DEFERRED_INIT:
# Flag that object is now being initialized
# (usually in _instantiate_function)
self.initialization_status = ContextFlags.INITIALIZING
self._init_args.update(kwargs)
# Complete initialization
# MODIFIED 10/27/18 OLD:
super(self.__class__,self).__init__(**self._init_args)
# MODIFIED 10/27/18 NEW: FOLLOWING IS NEEDED TO HANDLE FUNCTION DEFERRED INIT (JDC)
# try:
# super(self.__class__,self).__init__(**self._init_args)
# except:
# self.__init__(**self._init_args)
# MODIFIED 10/27/18 END
# If name was assigned, "[DEFERRED INITIALIZATION]" was appended to it, so remove it
if DEFERRED_INITIALIZATION in self.name:
self.name = self.name.replace("[" + DEFERRED_INITIALIZATION + "]", "")
# Otherwise, allow class to replace std default name with class-specific one if it has a method for doing so
else:
self._assign_default_name()
del self._init_args
def _assign_deferred_init_name(self, name):
name = "{} [{}]".format(name,DEFERRED_INITIALIZATION) if name \
else "{} {}".format(DEFERRED_INITIALIZATION,self.__class__.__name__)
# Register with ProjectionRegistry or create one
register_category(entry=self,
base_class=Component,
name=name,
registry=DeferredInitRegistry,
)
def _assign_default_name(self, **kwargs):
return
def _set_parameter_value(self, param, val, context=None):
param = getattr(self.parameters, param)
param._set(val, context)
if hasattr(self, "parameter_ports"):
if param in self.parameter_ports:
new_port_value = self.parameter_ports[param].execute(context=context)
self.parameter_ports[param].parameters.value._set(new_port_value, context)
elif hasattr(self, "owner"):
if hasattr(self.owner, "parameter_ports"):
# skip Components, assume they are to be run to provide the
# value instead of given as a variable to a parameter port
if param in self.owner.parameter_ports:
try:
if any([isinstance(v, Component) for v in val]):
return
except TypeError:
if isinstance(val, Component):
return
new_port_value = self.owner.parameter_ports[param].execute(context=context)
self.owner.parameter_ports[param].parameters.value._set(new_port_value, context)
def _check_args(self, variable=None, params=None, context=None, target_set=None):
"""validate variable and params, instantiate variable (if necessary) and assign any runtime params.
Called by functions to validate variable and params
Validation can be suppressed by turning parameter_validation attribute off
target_set is a params dictionary to which params should be assigned;
Does the following:
- instantiate variable (if missing or callable)
- validate variable if PARAM_VALIDATION is set
- resets leftover runtime params back to original values (only if execute method was called directly)
- sets runtime params
- validate params if PARAM_VALIDATION is set
:param variable: (anything but a dict) - variable to validate
:param params: (dict) - params to validate
:target_set: (dict) - set to which params should be assigned
:return:
"""
# VARIABLE ------------------------------------------------------------
# If function is called without any arguments, get default for variable
if variable is None:
try:
# assigned by the Function class init when initializing
variable = self.defaults.variable
except AttributeError:
variable = self.class_defaults.variable
# If the variable is a function, call it
if callable(variable):
variable = variable()
# Validate variable if parameter_validation is set and the function was called with a variable
if self.prefs.paramValidationPref and variable is not None:
variable = self._validate_variable(variable, context=context)
# PARAMS ------------------------------------------------------------
# If params have been passed, treat as runtime params
self._validate_and_assign_runtime_params(params, context)
self.parameters.variable._set(variable, context=context)
return variable
def _validate_and_assign_runtime_params(self, runtime_params, context):
"""Validate runtime_params, cache for reset, and assign values
Check that all params belong either to Component or its function (raise error if any are found that don't)
Cache params to reset in _runtime_params_reset
"""
# # MODIFIED 5/8/20 OLD:
# # reset any runtime params that were leftover from a direct call to .execute (atypical)
# if context.execution_id in self._runtime_params_reset:
# for key in self._runtime_params_reset[context.execution_id]:
# self._set_parameter_value(key, self._runtime_params_reset[context.execution_id][key], context)
# self._runtime_params_reset[context.execution_id] = {}
# MODIFIED 5/8/20 END
from psyneulink.core.components.functions.function import is_function_type, FunctionError
def generate_error(param_name):
owner_name = ""
if hasattr(self, OWNER) and self.owner:
owner_name = f" of {self.owner.name}"
if hasattr(self.owner, OWNER) and self.owner.owner:
owner_name = f"{owner_name} of {self.owner.owner.name}"
err_msg=f"Invalid specification in runtime_params arg for {self.name}{owner_name}: '{param_name}'."
if is_function_type(self):
raise FunctionError(err_msg)
else:
raise ComponentError(err_msg)
if isinstance(runtime_params, dict):
for param_name in runtime_params:
if not isinstance(param_name, str):
generate_error(param_name)
elif param_name in self.parameters:
if param_name in {FUNCTION, INPUT_PORTS, OUTPUT_PORTS}:
generate_error(param_name)
if context.execution_id not in self._runtime_params_reset:
self._runtime_params_reset[context.execution_id] = {}
self._runtime_params_reset[context.execution_id][param_name] = getattr(self.parameters,
param_name)._get(context)
self._set_parameter_value(param_name, runtime_params[param_name], context)
# Any remaining params should either belong to the Component's function
# or, if the Component is a Function, to it or its owner
elif ( # If Component is not a function, and its function doesn't have the parameter or
(not is_function_type(self) and param_name not in self.function.parameters)
# the Component is a standalone function:
or (is_function_type(self) and not self.owner)):
generate_error(param_name)
elif runtime_params: # not None
raise ComponentError(f"Invalid specification of runtime parameters for {self.name}: {runtime_params}.")
@handle_external_context()
def _instantiate_defaults(self,
variable=None,
request_set=None,
assign_missing=True,
target_set=None,
default_set=None,
context=None
):
"""Validate variable and/or param defaults in requested set and assign values to params in target set
Variable can be any type other than a dictionary (reserved for use as params)
request_set must contain a dict of params to be assigned to target_set
If assign_missing option is set, then any params defined for the class
but not included in the requested set are assigned values from the default_set;
if request_set is None, then all values in the target_set are assigned from the default_set
Class defaults can not be passed as target_set
IMPLEMENTATION NOTE: for now, treating class defaults as hard coded;
could be changed in the future simply by commenting out code below
If not context: instantiates function and any ports specified in request set
(if they have changed from the previous value(s))
:param variable: (anything but a dict (variable) - value to assign as defaults.variable
:param request_set: (dict) - params to be assigned
:param assign_missing: (bool) - controls whether missing params are set to default_set values (default: False)
:param target_set: (dict) - param set to which assignments should be made
:param default_set: (dict) - values used for params missing from request_set (only if assign_missing is True)
:return:
"""
# Make sure all args are legal
if variable is not None:
if isinstance(variable,dict):
raise ComponentError("Dictionary passed as variable; probably trying to use param set as 1st argument")
if request_set:
if not isinstance(request_set, dict):
raise ComponentError("requested parameter set must be a dictionary")
if target_set:
if not isinstance(target_set, dict):
raise ComponentError("target parameter set must be a dictionary")
if default_set:
if not isinstance(default_set, dict):
raise ComponentError("default parameter set must be a dictionary")
# FIX: 6/3/19 [JDC] SHOULD DEAL WITH THIS AND SHAPE BELOW
# # GET VARIABLE FROM PARAM DICT IF SPECIFIED
# # (give precedence to that over variable arg specification)
# if VARIABLE in request_set and request_set[VARIABLE] is not None:
# variable = request_set[VARIABLE]
# ASSIGN SHAPE TO VARIABLE if specified
if hasattr(self, 'shape') and self.shape is not None:
# IMPLEMENTATION NOTE 6/23/17 (CW): this test is currently unused by all components. To confirm this, we
# may add an exception here (raise ComponentError("Oops this is actually used")), then run all tests.
# thus, we should consider deleting this validation
# Both variable and shape are specified
if variable is not None:
# If they conflict, raise exception, otherwise use variable (it specifies both shape and content)
if self.shape != np.array(variable).shape:
raise ComponentError(
"The shape arg of {} ({}) conflicts with the shape of its variable arg ({})".
format(self.name, self.shape, np.array(variable).shape))
# Variable is not specified, so set to array of zeros with specified shape
else:
variable = np.zeros(self.shape)
# VALIDATE VARIABLE
if context.source is not ContextFlags.COMMAND_LINE:
# if variable has been passed then validate and, if OK, assign as self.defaults.variable
variable = self._validate_variable(variable, context=context)
# If no params were passed, then done
if request_set is None and target_set is None and default_set is None:
return
# VALIDATE PARAMS
# if request_set has been passed or created then validate and, if OK, assign params to target_set
if request_set:
try:
self._validate_params(variable=variable,
request_set=request_set,
target_set=target_set,
context=context)
# variable not implemented by Mechanism subclass, so validate without it
except TypeError:
self._validate_params(request_set=request_set,
target_set=target_set,
context=context)
def _initialize_parameters(self, context=None, **param_defaults):
from psyneulink.core.components.shellclasses import (
Composition_Base, Function, Mechanism, Port, Process_Base,
Projection, System_Base
)
# excludes Function
shared_types = (
Mechanism,
Port,
Projection,
System_Base,
Process_Base,
Composition_Base,
ComponentsMeta,
types.MethodType,
types.ModuleType,
functools.partial,
)
alias_names = {p.name for p in self.class_parameters if isinstance(p, ParameterAlias)}
self.parameters = self.Parameters(owner=self, parent=self.class_parameters)
# assign defaults based on pass in params and class defaults
defaults = {
k: v for (k, v) in self.class_defaults.values(show_all=True).items()
if k not in alias_names
}
if param_defaults is not None:
# Exclude any function_params from the items to set on this Component
# because these should just be pointers to the parameters of the same
# name on this Component's function
# Exclude any pass parameters whose value is None (assume this means "use the normal default")
d = {
k: v for (k, v) in param_defaults.items()
if (
(
k not in defaults
and k not in alias_names
)
or v is not None
)
}
for p in d:
try:
parameter_obj = getattr(self.parameters, p)
except AttributeError:
# p in param_defaults does not correspond to a Parameter
continue
if d[p] is not None:
parameter_obj._user_specified = True
if parameter_obj.structural:
parameter_obj.spec = d[p]
if parameter_obj.modulable:
# later, validate this
try:
modulable_param_parser = self.parameters._get_prefixed_method(
parse=True,
modulable=True
)
parsed = modulable_param_parser(p, d[p])
if parsed is not d[p]:
# we have a modulable param spec
parameter_obj.spec = d[p]
d[p] = parsed
param_defaults[p] = parsed
except AttributeError:
pass
defaults.update(d)
for k in defaults:
defaults[k] = copy_parameter_value(
defaults[k],
shared_types=shared_types
)
self.defaults = Defaults(owner=self, **defaults)
def _is_user_specified(parameter):
return (
parameter.name in param_defaults
and param_defaults[parameter.name] is not None
)
for p in filter(lambda x: isinstance(x, ParameterAlias), self.parameters):
if _is_user_specified(p):
if _is_user_specified(p.source):
if param_defaults[p.name] is not param_defaults[p.source.name]:
raise ComponentError(
f"Multiple values ({p.name}: {param_defaults[p.name]}"
f"\t{p.source.name}: {param_defaults[p.source.name]}) "
f"assigned to identical Parameters. {p.name} is an alias "
f"of {p.source.name}",
component=self,
)
else:
param_defaults[p.source.name] = param_defaults[p.name]
for p in filter(lambda x: not isinstance(x, (ParameterAlias, SharedParameter)), self.parameters._in_dependency_order):
# copy spec so it is not overwritten later
# TODO: check if this is necessary
p.spec = copy_parameter_value(p.spec)
# set default to None context to ensure it exists
if p.getter is None and p._get(context) is None:
if p._user_specified:
val = param_defaults[p.name]
if isinstance(val, Function):
if val.owner is not None:
val = copy.deepcopy(val)
else:
val = copy_parameter_value(
p.default_value,
shared_types=shared_types
)
if isinstance(val, Function):
val.owner = self
val = p._parse(val)
p._validate(val)
p._set(val, context=context, skip_history=True, override=True)
if isinstance(p.default_value, Function):
p.default_value.owner = p
for p in self.parameters:
if p.stateful:
setattr(self, _get_parametervalue_attr(p), ParameterValue(self, p))
def _get_parsed_variable(self, parameter, variable=NotImplemented, context=None):
if variable is NotImplemented:
variable = copy.deepcopy(self.defaults.variable)
try:
parameter = getattr(self.parameters, parameter)
except TypeError:
pass
try:
parse_variable_method = getattr(
self,
f'_parse_{parameter.name}_variable'
)
return copy.deepcopy(
call_with_pruned_args(parse_variable_method, variable, context=context)
)
except AttributeError:
# no parsing method, assume same shape as owner
return variable
def _instantiate_parameter_classes(self, context=None):
"""
An optional method that will take any Parameter values in
**context** that are classes/types, and instantiate them.
"""
from psyneulink.core.components.shellclasses import Function
# (this originally occurred in _validate_params)
for p in self.parameters._in_dependency_order:
if p.getter is None:
val = p._get(context)
if (
p.name != FUNCTION
and is_instance_or_subclass(val, Function)
and not p.reference
and not isinstance(p, SharedParameter)
):
function_default_variable = self._get_parsed_variable(p, context=context)
if (
inspect.isclass(val)
and issubclass(val, Function)
):
# instantiate class val with all relevant shared parameters
# some shared parameters may not be arguments (e.g.
# transfer_fct additive_param when function is Identity)
# NOTE: this may cause an issue if there is an
# incompatibility between a shared parameter and
# the default variable, by forcing variable to
# be _user_specified, where instead the variable
# would be coerced to match
val = call_with_pruned_args(
val,
default_variable=function_default_variable,
**self.initial_shared_parameters[p.name]
)
val.owner = self
p._set(val, context)
for sub_param_name in itertools.chain(self.initial_shared_parameters[p.name], ['variable']):
try:
sub_param = getattr(val.parameters, sub_param_name)
except AttributeError:
# TransferWithCosts has SharedParameters
# referencing transfer_fct's
# additive_param or
# multiplicative_param, but Identity
# does not have them
continue
try:
orig_param_name = [x.name for x in self.parameters if isinstance(x, SharedParameter) and x.source is sub_param][0]
except IndexError:
orig_param_name = sub_param_name
sub_param._user_specified = getattr(self.parameters, orig_param_name)._user_specified
elif isinstance(val, Function):
incompatible = False
if function_default_variable.shape != val.defaults.variable.shape:
incompatible = True
if val._variable_shape_flexibility is DefaultsFlexibility.INCREASE_DIMENSION:
increased_dim = np.asarray([val.defaults.variable])
if increased_dim.shape == function_default_variable.shape:
function_default_variable = increased_dim
incompatible = False
elif val._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
incompatible = False
if incompatible:
def _create_justified_line(k, v, error_line_len=110):
return f'{k}: {v.rjust(error_line_len - len(k))}'
raise ParameterError(
f'Variable shape incompatibility between {self} and its {p.name} Parameter'
+ _create_justified_line(
f'\n{self}.variable',
f'{function_default_variable} (numpy.array shape: {np.asarray(function_default_variable).shape})'
)
+ _create_justified_line(
f'\n{self}.{p.name}.variable',
f'{val.defaults.variable} (numpy.array shape: {np.asarray(val.defaults.variable).shape})'
)
)
val._update_default_variable(
function_default_variable,
context
)
if isinstance(p.default_value, Function):
p.default_value._update_default_variable(
function_default_variable,
context
)
self._override_unspecified_shared_parameters(context)
def _override_unspecified_shared_parameters(self, context):
for param in self.parameters._in_dependency_order:
if (
isinstance(param, SharedParameter)
and not isinstance(param.source, ParameterAlias)
):
try:
obj = getattr(self.parameters, param.attribute_name)
shared_objs = [obj.default_value, obj._get(context)]
except AttributeError:
obj = getattr(self, param.attribute_name)
shared_objs = [obj]
for c in shared_objs:
if isinstance(c, Component):
try:
shared_obj_param = getattr(c.parameters, param.shared_parameter_name)
except AttributeError:
continue
if not shared_obj_param._user_specified:
if (
param.primary
and param.default_value is not None
):
shared_obj_param.default_value = copy.deepcopy(param.default_value)
shared_obj_param._set(copy.deepcopy(param.default_value), context)
shared_obj_param._user_specified = param._user_specified
elif (
param._user_specified
and not safe_equals(param.default_value, shared_obj_param.default_value)
# only show warning one time, for the non-default value if possible
and c is shared_objs[-1]
):
try:
isp_arg = self.initial_shared_parameters[param.attribute_name][param.shared_parameter_name]
# TODO: handle passed component but copied?
throw_warning = (
# arg passed directly into shared_obj, no parsing
not safe_equals(shared_obj_param._get(context), isp_arg)
# arg passed but was parsed
and not safe_equals(shared_obj_param.spec, isp_arg)
)
except KeyError:
throw_warning = True
if throw_warning:
warnings.warn(
f'Specification of the "{param.name}" parameter ({param.default_value})'
f' for {self} conflicts with specification of its shared parameter'
f' "{shared_obj_param.name}" ({shared_obj_param.default_value}) for its'
f' {param.attribute_name} ({param.source._owner._owner}). The value'
f' specified on {param.source._owner._owner} will be used.'
)
@handle_external_context()
def reset_params(self, mode=ResetMode.INSTANCE_TO_CLASS, context=None):
"""Reset current and/or instance defaults
If called with:
- CURRENT_TO_INSTANCE_DEFAULTS all current param settings are set to instance defaults
- INSTANCE_TO_CLASS all instance defaults are set to class defaults
- ALL_TO_CLASS_DEFAULTS all current and instance param settings are set to class defaults
:param mode: (ResetMode) - determines which params are reset
:return none:
"""
if not isinstance(mode, ResetMode):
warnings.warn("No ResetMode specified for reset_params; CURRENT_TO_INSTANCE_DEFAULTS will be used")
for param in self.parameters:
if mode == ResetMode.CURRENT_TO_INSTANCE_DEFAULTS:
param._set(
copy_parameter_value(param.default_value),
context=context,
skip_history=True,
skip_log=True,
)
elif mode == ResetMode.INSTANCE_TO_CLASS:
param.reset()
elif mode == ResetMode.ALL_TO_CLASS_DEFAULTS:
param.reset()
param._set(
copy_parameter_value(param.default_value),
context=context,
skip_history=True,
skip_log=True,
)
def _initialize_from_context(self, context, base_context=Context(execution_id=None), override=True, visited=None):
if context.execution_id is base_context.execution_id:
return
if visited is None:
visited = set()
for comp in self._dependent_components:
if comp not in visited:
visited.add(comp)
comp._initialize_from_context(context, base_context, override, visited=visited)
non_alias_params = [p for p in self.stateful_parameters if not isinstance(p, (ParameterAlias, SharedParameter))]
for param in non_alias_params:
if param.setter is None:
param._initialize_from_context(context, base_context, override)
# attempt to initialize any params with setters (some params with setters may depend on the
# initialization of other params)
# this pushes the problem down one level so that if there are two such that they depend on each other,
# it will still fail. in this case, it is best to resolve the problem in the setter with a default
# initialization value
for param in non_alias_params:
if param.setter is not None:
param._initialize_from_context(context, base_context, override)
def _delete_contexts(self, *contexts, check_simulation_storage=False, visited=None):
if visited is None:
visited = set()
for comp in self._dependent_components:
if comp not in visited:
visited.add(comp)
comp._delete_contexts(*contexts, check_simulation_storage=check_simulation_storage, visited=visited)
for param in self.stateful_parameters:
if not check_simulation_storage or not param.retain_old_simulation_data:
for context in contexts:
param.delete(context)
def _set_all_parameter_properties_recursively(self, visited=None, **kwargs):
if visited is None:
visited = set()
# sets a property of all parameters for this component and all its dependent components
# used currently for disabling history, but setting logging could use this
for param_name in self.parameters.names():
parameter = getattr(self.parameters, param_name)
for (k, v) in kwargs.items():
try:
setattr(parameter, k, v)
except ParameterError as e:
logger.warning(str(e) + ' Parameter has not been modified.')
for comp in self._dependent_components:
if comp not in visited:
visited.add(comp)
comp._set_all_parameter_properties_recursively(
visited=visited,
**kwargs
)
def _set_multiple_parameter_values(self, context, **kwargs):
"""
Unnecessary, but can simplify multiple parameter assignments at once
For every kwarg k, v pair, will attempt to set self.parameters.<k> to v for context
"""
for (k, v) in kwargs.items():
getattr(self.parameters, k)._set(v, context)
# ------------------------------------------------------------------------------------------------------------------
# Parsing methods
# ------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------
# Argument parsers
# ---------------------------------------------------------
def _parse_arg_generic(self, arg_val):
"""
Argument parser for any argument that does not have a specialized parser
"""
return arg_val
def _parse_arg_variable(self, variable):
"""
Transforms **variable** into a form that Components expect. Used to allow
users to pass input in convenient forms, like a single float when a list
for input ports is expected
Returns
-------
The transformed **input**
"""
if variable is None:
return variable
if not isinstance(variable, (list, np.ndarray)):
variable = np.atleast_1d(variable)
return convert_all_elements_to_np_array(variable)
# ---------------------------------------------------------
# Misc parsers
# ---------------------------------------------------------
def _parse_function_variable(self, variable, context=None):
"""
Parses the **variable** passed in to a Component into a function_variable that can be used with the
Function associated with this Component
"""
return variable
# ------------------------------------------------------------------------------------------------------------------
# Validation methods
# ------------------------------------------------------------------------------------------------------------------
def _validate(self, context=None):
"""
Eventually should contain all validation methods, occurs at end of Component.__init__
"""
# 4/18/18 kmantel: below is a draft of what such a method should look like
# it's beyond the scope of the current changes however
# # currently allows chance to validate anything in constructor defaults
# # when fleshed out, this should go over the new Parameters structure
# for param, _ in self.get_param_class_defaults().items():
# try:
# # automatically call methods of the form _validate_<param name> with the attribute
# # as single argument. Sticking to this format can allow condensed and modular validation
# getattr(self, '_validate_' + param)(getattr(self, param))
# except AttributeError:
# pass
self._validate_value()
def _validate_variable(self, variable, context=None):
"""Validate variable and return validated variable
Convert self.class_defaults.variable specification and variable (if specified) to list of 1D np.ndarrays:
VARIABLE SPECIFICATION: ENCODING:
Simple value variable: 0 -> [array([0])]
Single state array (vector) variable: [0, 1] -> [array([0, 1])]
Multiple port variables, each with a single value variable: [[0], [0]] -> [array[0], array[0]]
Perform top-level type validation of variable against the self.class_defaults.variable;
if the type is OK, the value is returned (which should be used by the function)
This can be overridden by a subclass to perform more detailed checking (e.g., range, recursive, etc.)
It is called only if the parameter_validation attribute is `True` (which it is by default)
IMPLEMENTATION NOTES:
* future versions should add hierarchical/recursive content (e.g., range) checking
* add request/target pattern?? (as per _validate_params) and return validated variable?
:param variable: (anything other than a dictionary) - variable to be validated:
:param context: (str)
:return variable: validated variable
"""
if inspect.isclass(variable):
raise ComponentError(f"Assignment of class ({variable.__name__}) "
f"as a variable (for {self.name}) is not allowed.")
# If variable is not specified, then:
# - assign to (??now np-converted version of) self.class_defaults.variable
# - mark as not having been specified
# - return
if variable is None:
try:
return self.defaults.variable
except AttributeError:
return self.class_defaults.variable
# Otherwise, do some checking on variable before converting to np.ndarray
# If variable is callable (function or object reference), call it and assign return to value to variable
# Note: check for list is necessary since function references must be passed wrapped in a list so that they are
# not called before being passed
if isinstance(variable, list) and callable(variable[0]):
variable = variable[0]()
# NOTE (7/24/17 CW): the above two lines of code can be commented out without causing any current tests to fail
# So we should either write tests for this piece of code, or remove it.
# Convert variable to np.ndarray
# Note: this insures that variable will be AT LEAST 1D; however, can also be higher:
# e.g., given a list specification of [[0],[0]], it will return a 2D np.array
variable = convert_to_np_array(variable, 1)
return variable
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate params and assign validated values to targets,
This performs top-level type validation of params
This can be overridden by a subclass to perform more detailed checking (e.g., range, recursive, etc.)
It is called only if the parameter_validation attribute is `True` (which it is by default)
IMPLEMENTATION NOTES:
* future versions should add recursive and content (e.g., range) checking
* should method return validated param set?
:param dict (request_set) - set of params to be validated:
:param dict (target_set) - repository of params that have been validated:
:return none:
"""
for param_name, param_value in request_set.items():
# setattr(self, "_"+param_name, param_value)
# Check that param is in self.defaults (if not, it is assumed to be invalid for this object)
if param_name not in self.defaults.names(show_all=True):
continue
# The default value of the param is None: suppress type checking
# IMPLEMENTATION NOTE: this can be used for params with multiple possible types,
# until type lists are implemented (see below)
if getattr(self.defaults, param_name) is None or getattr(self.defaults, param_name) is NotImplemented:
if self.prefs.verbosePref:
warnings.warn(f"{param_name} is specified as None for {self.name} which suppresses type checking.")
if target_set is not None:
target_set[param_name] = param_value
continue
# If the value in self.defaults is a type, check if param value is an instance of it
if inspect.isclass(getattr(self.defaults, param_name)):
if isinstance(param_value, getattr(self.defaults, param_name)):
target_set[param_name] = param_value
continue
# If the value is a Function class, allow any instance of Function class
from psyneulink.core.components.functions.function import Function_Base
if issubclass(getattr(self.defaults, param_name), Function_Base):
# if isinstance(param_value, (function_type, Function_Base)): <- would allow function of any kind
if isinstance(param_value, Function_Base):
target_set[param_name] = param_value
continue
# If the value in self.defaults is an object, check if param value is the corresponding class
# This occurs if the item specified by the param has not yet been implemented (e.g., a function)
if inspect.isclass(param_value):
if isinstance(getattr(self.defaults, param_name), param_value):
continue
# If the value is a projection, projection class, or a keyword for one, for anything other than
# the FUNCTION param (which is not allowed to be specified as a projection)
# then simply assign value (implication of not specifying it explicitly);
# this also allows it to pass the test below and function execution to occur for initialization;
from psyneulink.core.components.shellclasses import Projection
if (((isinstance(param_value, str) and
param_value in {CONTROL_PROJECTION, LEARNING_PROJECTION, LEARNING}) or
isinstance(param_value, Projection) or # These should be just ControlProjection or LearningProjection
inspect.isclass(param_value) and issubclass(param_value,(Projection)))
and not param_name == FUNCTION):
param_value = getattr(self.defaults, param_name)
# If self is a Function and param is a class ref for function, instantiate it as the function
from psyneulink.core.components.functions.function import Function_Base
if (isinstance(self, Function_Base) and
inspect.isclass(param_value) and
inspect.isclass(getattr(self.defaults, param_name))
and issubclass(param_value, getattr(self.defaults, param_name))):
# Assign instance to target and move on
# (compatiblity check no longer needed and can't handle function)
target_set[param_name] = param_value()
continue
# Check if param value is of same type as one with the same name in defaults
# don't worry about length
if iscompatible(param_value, getattr(self.defaults, param_name), **{kwCompatibilityLength:0}):
if isinstance(param_value, dict):
# If assign_default_FUNCTION_PARAMS is False, it means that function's class is
# compatible but different from the one in defaults;
# therefore, FUNCTION_PARAMS will not match defaults;
# instead, check that functionParams are compatible with the function's default params
if param_name == FUNCTION_PARAMS:
if not self.assign_default_FUNCTION_PARAMS:
# Get function:
try:
function = request_set[FUNCTION]
except KeyError:
# If no function is specified, self.assign_default_FUNCTION_PARAMS should be True
# (see _instantiate_defaults above)
raise ComponentError("PROGRAM ERROR: No function params for {} so should be able to "
"validate {}".format(self.name, FUNCTION_PARAMS))
else:
for entry_name, entry_value in param_value.items():
try:
getattr(function.defaults, entry_name)
except KeyError:
raise ComponentError("{0} is not a valid entry in {1} for {2} ".
format(entry_name, param_name, self.name))
# add [entry_name] entry to [param_name] dict
else:
try:
target_set[param_name][entry_name] = entry_value
# [param_name] dict not yet created, so create it
except KeyError:
target_set[param_name] = {}
target_set[param_name][entry_name] = entry_value
# target_set None
except TypeError:
pass
else:
# if param_name != FUNCTION_PARAMS:
# assert True
for entry_name, entry_value in param_value.items():
# Make sure [entry_name] is in self.defaults
try:
getattr(self.defaults, param_name)[entry_name]
except KeyError:
raise ComponentError("{0} is not a valid entry in {1} for {2} ".
format(entry_name, param_name, self.name))
# TBI: (see above)
# if not iscompatible(entry_value,
# getattr(self.defaults, param_name)[entry_name],
# **{kwCompatibilityLength:0}):
# raise ComponentError("{0} ({1}) in {2} of {3} must be a {4}".
# format(entry_name, entry_value, param_name, self.name,
# type(getattr(self.defaults, param_name)[entry_name]).__name__))
else:
# add [entry_name] entry to [param_name] dict
try:
target_set[param_name][entry_name] = entry_value
# [param_name] dict not yet created, so create it
except KeyError:
target_set[param_name] = {}
target_set[param_name][entry_name] = entry_value
# target_set None
except TypeError:
pass
elif target_set is not None:
# Copy any iterables so that deletions can be made to assignments belonging to the instance
if not isinstance(param_value, Iterable) or isinstance(param_value, str):
target_set[param_name] = param_value
else:
# hack for validation until it's streamlined
# parse modulable parameter values
if getattr(self.parameters, param_name).modulable:
try:
target_set[param_name] = param_value.copy()
except AttributeError:
try:
modulable_param_parser = self.parameters._get_prefixed_method(
parse=True,
modulable=True
)
param_value = modulable_param_parser(param_name, param_value)
target_set[param_name] = param_value
except AttributeError:
target_set[param_name] = param_value.copy()
else:
target_set[param_name] = copy.copy(param_value)
# If param is a function_type (or it has a function attribute that is one), allow any other function_type
elif callable(param_value):
target_set[param_name] = param_value
elif hasattr(param_value, FUNCTION) and callable(param_value.function):
target_set[param_name] = param_value
# It has already passed as the name of a valid param, so let it pass;
# value should be validated in subclass _validate_params override
elif isinstance(param_name, str):
# FIX: 10/3/17 - THIS IS A HACK; IT SHOULD BE HANDLED EITHER
# FIX: MORE GENERICALLY OR LOCALLY (E.G., IN OVERRIDE OF _validate_params)
if param_name == 'matrix':
if is_matrix(getattr(self.defaults, param_name)):
# FIX: ?? ASSIGN VALUE HERE, OR SIMPLY ALLOW AND ASSUME IT WILL BE PARSED ELSEWHERE
# param_value = getattr(self.defaults, param_name)
# target_set[param_name] = param_value
target_set[param_name] = param_value
else:
raise ComponentError("Value of {} param for {} ({}) must be a valid matrix specification".
format(param_name, self.name, param_value))
target_set[param_name] = param_value
# Parameter is not a valid type
else:
if type(getattr(self.defaults, param_name)) is type:
type_name = 'the name of a subclass of ' + getattr(self.defaults, param_name).__base__.__name__
raise ComponentError("Value of {} param for {} ({}) is not compatible with {}".
format(param_name, self.name, param_value, type_name))
def _get_param_value_for_modulatory_spec(self, param_name, param_value):
from psyneulink.core.globals.keywords import MODULATORY_SPEC_KEYWORDS
if isinstance(param_value, str):
param_spec = param_value
elif isinstance(param_value, Component):
param_spec = param_value.__class__.__name__
elif isinstance(param_value, type):
param_spec = param_value.__name__
else:
raise ComponentError("PROGRAM ERROR: got {} instead of string, Component, or Class".format(param_value))
if param_spec not in MODULATORY_SPEC_KEYWORDS:
return(param_value)
try:
param_default_value = getattr(self.defaults, param_name)
# Only assign default value if it is not None
if param_default_value is not None:
return param_default_value
else:
return param_value
except:
raise ComponentError("PROGRAM ERROR: Could not get default value for {} of {} (to replace spec as {})".
format(param_name, self.name, param_value))
def _get_param_value_from_tuple(self, param_spec):
"""Returns param value (first item) of a (value, projection) tuple;
"""
from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base
from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal
ALLOWABLE_TUPLE_SPEC_KEYWORDS = MODULATORY_SPEC_KEYWORDS
ALLOWABLE_TUPLE_SPEC_CLASSES = (ModulatoryProjection_Base, ModulatorySignal, ModulatoryMechanism_Base)
# If the 2nd item is a CONTROL or LEARNING SPEC, return the first item as the value
if (isinstance(param_spec, tuple) and len(param_spec) == 2 and
not isinstance(param_spec[1], (dict, list, np.ndarray)) and
(param_spec[1] in ALLOWABLE_TUPLE_SPEC_KEYWORDS or
isinstance(param_spec[1], ALLOWABLE_TUPLE_SPEC_CLASSES) or
(inspect.isclass(param_spec[1]) and issubclass(param_spec[1], ALLOWABLE_TUPLE_SPEC_CLASSES)))
):
value = param_spec[0]
# Otherwise, just return the tuple
else:
value = param_spec
return value
def _validate_function(self, function, context=None):
"""Check that either params[FUNCTION] and/or self.execute are implemented
# FROM _validate_params:
# It also checks FUNCTION:
# if it is specified and is a type reference (rather than an instance),
# it instantiates the reference (using FUNCTION_PARAMS if present)
# and puts a reference to the instance in target_set[FUNCTION]
#
This checks for an execute method in function
If a specification is not present or valid:
- it checks self.execute and, if present, kwExecute is assigned to it
- if self.execute is not present or valid, an exception is raised
When completed, there is guaranteed to be a valid method in self.function and/or self.execute;
otherwise, an exception is raised
Notes:
* no new assignments (to FUNCTION or self.execute) are made here, except:
* if FUNCTION is missing, it is assigned to self.execute (if it is present)
* no instantiations are done here;
* any assignment(s) to and/or instantiation(s) of self.execute and/or params[FUNCTION]
is/are carried out in _instantiate_function
:return:
"""
from psyneulink.core.components.shellclasses import Function
# FUNCTION is not specified, so try to assign self.function to it
if function is None:
try:
function = self.function
except AttributeError:
# self.function is also missing, so raise exception
raise ComponentError("{0} must either implement a function method or specify one in {0}.Parameters".
format(self.__class__.__name__))
# self.function is None
# IMPLEMENTATION NOTE: This is a coding error; self.function should NEVER be assigned None
if function is None:
raise ComponentError("PROGRAM ERROR: either {0} must be specified or {1}.function must be implemented for {2}".
format(FUNCTION,self.__class__.__name__, self.name))
# self.function is OK, so return
elif (
isinstance(function, types.FunctionType)
or isinstance(function, types.MethodType)
or is_instance_or_subclass(function, Function)
):
self.parameters.function._set(function, context)
return
# self.function is NOT OK, so raise exception
else:
raise ComponentError("{0} not specified and {1}.function is not a Function object or class "
"or valid method in {2}".
format(FUNCTION, self.__class__.__name__, self.name))
def _validate_value(self):
pass
def _instantiate_attributes_before_function(self, function=None, context=None):
pass
def _instantiate_function(self, function, function_params=None, context=None):
"""Instantiate function defined in <subclass>.function or <subclass>.function
Instantiate params[FUNCTION] if present, and assign it to self.function
If params[FUNCTION] is present and valid,
it is assigned as the function's execute method, overriding any direct implementation of self.function
If FUNCTION IS in params:
- if it is a Function object, it is simply assigned to self.function;
- if it is a Function class reference:
it is instantiated using self.defaults.variable and, if present, params[FUNCTION_PARAMS]
If FUNCTION IS NOT in params:
- if self.function IS implemented, it is assigned to params[FUNCTION]
- if self.function IS NOT implemented: program error (should have been caught in _validate_function)
Upon successful completion:
- self._function === self.function
- self.execute should always return the output of self.function in the first item of its output array;
this is done by Function.execute; any subclass override should do the same, so that...
- value is value[0] returned by self.execute
"""
from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction
from psyneulink.core.components.shellclasses import Function
function_variable = copy.deepcopy(
self._parse_function_variable(
self.defaults.variable,
context
)
)
# Specification is the function of a (non-instantiated?) Function class
# KDM 11/12/18: parse an instance of a Function's .function method to itself
# (not sure how worth it this is, but it existed in Scripts/Examples/Reinforcement-Learning REV)
# purposely not attempting to parse a class Function.function
# JDC 3/6/19: ?what about parameter ports for its parameters (see python function problem below)?
if isinstance(function, types.MethodType):
try:
if isinstance(function.__self__, Function):
function = function.__self__
except AttributeError:
pass
# Specification is a standard python function, so wrap as a UserDefnedFunction
# Note: parameter_ports for function's parameters will be created in_instantiate_attributes_after_function
if isinstance(function, (types.FunctionType, str)):
function = UserDefinedFunction(
default_variable=function_variable,
custom_function=function,
owner=self,
context=context,
**function_params,
)
# Specification is an already implemented Function
elif isinstance(function, Function):
if function_variable.shape != function.defaults.variable.shape:
owner_str = ''
if hasattr(self, 'owner') and self.owner is not None:
owner_str = f' of {repr(self.owner.name)}'
if function._variable_shape_flexibility is DefaultsFlexibility.RIGID:
raise ComponentError(f'Variable format ({function.defaults.variable}) of {function.name} '
f'is not compatible with the variable format ({function_variable}) '
f'of {repr(self.name)}{owner_str} to which it is being assigned.')
# f'Make sure variable for {function.name} is 2d.')
elif function._variable_shape_flexibility is DefaultsFlexibility.INCREASE_DIMENSION:
function_increased_dim = np.asarray([function.defaults.variable])
if function_variable.shape != function_increased_dim.shape:
raise ComponentError(f'Variable format ({function.defaults.variable}) of {function.name} '
f'is not compatible with the variable format ({function_variable})'
f' of {repr(self.name)}{owner_str} to which it is being assigned.')
# f'Make sure variable for {function.name} is 2d.')
# class default functions should always be copied, otherwise anything this component
# does with its function will propagate to anything else that wants to use
# the default
if function.owner is self:
try:
if function._is_pnl_inherent:
# This will most often occur if a Function instance is
# provided as a default argument in a constructor. These
# should instead be added as default values for the
# corresponding Parameter.
# Adding the function as a default constructor argument
# will lead to incorrect setting of
# Parameter._user_specified
warnings.warn(
f'{function} is generated once during import of'
' psyneulink, and is now being reused. Please report'
' this, including the script you were using, to the'
' psyneulink developers at'
' <EMAIL> or'
' https://github.com/PrincetonUniversity/PsyNeuLink/issues'
)
function = copy.deepcopy(function)
except AttributeError:
pass
elif function.owner is not None:
function = copy.deepcopy(function)
# set owner first because needed for is_initializing calls
function.owner = self
function._update_default_variable(function_variable, context)
# Specification is Function class
# Note: parameter_ports for function's parameters will be created in_instantiate_attributes_after_function
elif inspect.isclass(function) and issubclass(function, Function):
kwargs_to_instantiate = {}
if function_params is not None:
kwargs_to_instantiate.update(**function_params)
# default_variable should not be in any function_params but sometimes it is
kwargs_to_remove = ['default_variable']
for arg in kwargs_to_remove:
try:
del kwargs_to_instantiate[arg]
except KeyError:
pass
try:
kwargs_to_instantiate.update(self.initial_shared_parameters[FUNCTION])
except KeyError:
pass
# matrix is determined from ParameterPort based on string value in function_params
# update it here if needed
if MATRIX in kwargs_to_instantiate:
try:
kwargs_to_instantiate[MATRIX] = self.parameter_ports[MATRIX].defaults.value
except (AttributeError, KeyError, TypeError):
pass
_, kwargs = prune_unused_args(function.__init__, args=[], kwargs=kwargs_to_instantiate)
function = function(default_variable=function_variable, owner=self, **kwargs)
else:
raise ComponentError(f'Unsupported function type: {type(function)}, function={function}.')
self.parameters.function._set(function, context)
# KAM added 6/14/18 for functions that do not pass their has_initializers status up to their owner via property
# FIX: need comprehensive solution for has_initializers; need to determine whether ports affect mechanism's
# has_initializers status
if self.function.parameters.has_initializers._get(context):
self.parameters.has_initializers._set(True, context)
self._parse_param_port_sources()
def _instantiate_attributes_after_function(self, context=None):
if hasattr(self, "_parameter_ports"):
shared_params = [p for p in self.parameters if isinstance(p, (ParameterAlias, SharedParameter))]
sources = [p.source for p in shared_params]
for param_port in self._parameter_ports:
property_names = {param_port.name}
try:
alias_index = sources.index(param_port.source)
property_names.add(shared_params[alias_index].name)
except ValueError:
pass
for property_name in property_names:
setattr(self.__class__, "mod_" + property_name, make_property_mod(property_name, param_port.name))
setattr(self.__class__, "get_mod_" + property_name, make_stateful_getter_mod(property_name, param_port.name))
def _instantiate_value(self, context=None):
# - call self.execute to get value, since the value of a Component is defined as what is returned by its
# execute method, not its function
default_variable = copy.deepcopy(self.defaults.variable)
try:
value = self.execute(variable=default_variable, context=context)
except TypeError as e:
# don't hide other TypeErrors
if "execute() got an unexpected keyword argument 'variable'" != str(e):
raise
try:
value = self.execute(input=default_variable, context=context)
except TypeError as e:
if "execute() got an unexpected keyword argument 'input'" != str(e):
raise
value = self.execute(context=context)
if value is None:
raise ComponentError(f"PROGRAM ERROR: Execute method for {self.name} must return a value.")
self.parameters.value._set(value, context=context, skip_history=True)
try:
# Could be mutable, so assign copy
self.defaults.value = value.copy()
except AttributeError:
# Immutable, so just assign value
self.defaults.value = value
def _update_default_variable(self, new_default_variable, context=None):
from psyneulink.core.components.shellclasses import Function
self.defaults.variable = copy.deepcopy(new_default_variable)
# exclude value from validation because it isn't updated until
# _instantiate_value is called
call_with_pruned_args(
self._validate_params,
variable=new_default_variable,
request_set={
k: v.default_value
for k, v in self.parameters.values(True).items()
if k not in {'value'} and not isinstance(v, ParameterAlias)
},
target_set={},
context=context
)
self._instantiate_value(context)
for p in self.parameters._in_dependency_order:
val = p._get(context)
if (
not p.reference
and isinstance(val, Function)
and not isinstance(p, SharedParameter)
):
function_default_variable = self._get_parsed_variable(p, context=context)
try:
val._update_default_variable(function_default_variable, context)
if isinstance(p.default_value, Component):
p.default_value._update_default_variable(function_default_variable, context)
except (AttributeError, TypeError):
pass
# TODO: is it necessary to call _validate_value here?
def initialize(self, context=None):
raise ComponentError("{} class does not support initialize() method".format(self.__class__.__name__))
def _check_for_composition(self, context=None):
"""Allow Component to check whether it or its attributes are suitable for inclusion in a Composition
Called by Composition.add_node.
"""
pass
@handle_external_context(fallback_most_recent=True)
def reset(self, *args, context=None, **kwargs):
"""
If the component's execute method involves execution of an `IntegratorFunction` Function, this method
effectively begins the function's accumulation over again at the specified value, and may update related
values on the component, depending on the component type. Otherwise, it simply reassigns the Component's
value based on its default_variable.
"""
from psyneulink.core.components.functions.stateful.integratorfunctions import IntegratorFunction
if isinstance(self.function, IntegratorFunction):
new_value = self.function.reset(*args, **kwargs, context=context)
self.parameters.value.set(np.atleast_2d(new_value), context, override=True)
else:
raise ComponentError(f"Resetting {self.name} is not allowed because this Component is not stateful. "
"(It does not have an accumulator to reset).")
@handle_external_context()
def execute(self, variable=None, context=None, runtime_params=None):
"""Executes Component's `function <Component_Function>`. See Component-specific execute method for details.
"""
if context is None:
try:
context = self.owner.most_recent_context
except AttributeError:
context = self.most_recent_context
if context.source is ContextFlags.COMMAND_LINE:
self._initialize_from_context(context, override=False)
value = self._execute(variable=variable, context=context, runtime_params=runtime_params)
self.parameters.value._set(value, context=context)
return value
def _execute(self, variable=None, context=None, runtime_params=None, **kwargs):
from psyneulink.core.components.functions.function import Function
self.parameters.variable._set(variable, context=context)
if self.initialization_status & ~(ContextFlags.VALIDATING | ContextFlags.INITIALIZING):
self._increment_execution_count()
# Functions don't have Logs or maintain time
if not isinstance(self, Function):
self._update_current_execution_time(context=context)
self._increment_num_executions(
context,
[TimeScale.TIME_STEP, TimeScale.PASS, TimeScale.TRIAL, TimeScale.RUN]
)
value = None
# GET VALUE if specified in runtime_params
if runtime_params and VALUE in runtime_params:
# Get value and then pop from runtime_param, as no need to restore to previous value
value = np.atleast_1d(runtime_params.pop(VALUE))
# Eliminate any other params (including ones for function),
# since they will not be assigned and therefore should not be restored to previous value below
# (doing so would restore them to the previous previous value)
runtime_params = {}
# CALL FUNCTION if value is not specified
if value is None:
# IMPLEMENTATION NOTE: **kwargs is included to accommodate required arguments
# that are specific to particular class of Functions
# (e.g., error_matrix for LearningMechanism and controller for EVCControlMechanism)
function_variable = self._parse_function_variable(variable, context=context)
# IMPLEMENTATION NOTE: Need to pass full runtime_params (and not just function's params) since
# Mechanisms with secondary functions (e.g., IntegratorMechanism) seem them
value = self.function(variable=function_variable, context=context, params=runtime_params, **kwargs)
try:
self.function.parameters.value._set(value, context)
except AttributeError:
pass
self.most_recent_context = context
self._reset_runtime_parameters(context)
return value
def is_finished(self, context=None):
"""
Set by a Component to signal completion of its `execution <Component_Execution>` in a `TRIAL
<TimeScale.TRIAL>`; used by `Component-based Conditions <Conditions_Component_Based>` to predicate the
execution of one or more other Components on a Component.
"""
return self.parameters.is_finished_flag._get(context)
def _parse_param_port_sources(self):
if hasattr(self, '_parameter_ports'):
for param_port in self._parameter_ports:
try:
orig_source = param_port.source
param_port.source = param_port.source(self)
del self.parameter_ports.parameter_mapping[orig_source]
self.parameter_ports.parameter_mapping[param_port.source] = param_port
except TypeError:
pass
param_port.source._port = param_port
def _get_current_parameter_value(self, parameter, context=None):
from psyneulink.core.components.ports.parameterport import ParameterPortError
if parameter == "variable" or parameter == self.parameters.variable:
raise ComponentError(
f"The method '_get_current_parameter_value' is intended for retrieving the current "
f"value of a modulable parameter; 'variable' is not a modulable parameter. If looking "
f"for {self.name}'s default variable, try '{self.name}.defaults.variable'."
)
try:
parameter = getattr(self.parameters, parameter)
# just fail now if string and no corresponding parameter (AttributeError)
except TypeError:
pass
parameter_port_list = None
try:
# parameter is SharedParameter and ultimately points to
# something with a corresponding ParameterPort
parameter_port_list = parameter.final_source._owner._owner.parameter_ports
except AttributeError:
# prefer parameter ports from self over owner
try:
parameter_port_list = self._parameter_ports
except AttributeError:
try:
parameter_port_list = self.owner._parameter_ports
except AttributeError:
pass
if parameter_port_list is not None:
try:
return parameter_port_list[parameter].parameters.value._get(context)
# *parameter* string or Parameter didn't correspond to a parameter port
except TypeError:
pass
except ParameterPortError as e:
if 'Multiple ParameterPorts' in str(e):
raise
return parameter._get(context)
def _reset_runtime_parameters(self, context):
if context.execution_id in self._runtime_params_reset:
for key in self._runtime_params_reset[context.execution_id]:
self._set_parameter_value(
key,
self._runtime_params_reset[context.execution_id][key],
context
)
self._runtime_params_reset[context.execution_id] = {}
def _try_execute_param(self, param, var, context=None):
def fill_recursively(arr, value, indices=()):
if arr.ndim == 0:
try:
value = value(context=context)
except TypeError:
try:
value = value()
except TypeError:
pass
return value
try:
len_value = len(value)
len_arr = safe_len(arr)
if len_value > len_arr:
if len_arr == len_value - 1:
ignored_items_str = f'Item {len_value - 1}'
else:
ignored_items_str = f'The items {len_arr} to {len_value - 1}'
warnings.warn(
f'The length of {value} is greater than that of {arr}.'
f'{ignored_items_str} will be ignored for index {indices}'
)
except TypeError:
# if noise value is not an iterable, ignore shape warnings
pass
for i, _ in enumerate(arr):
new_indices = indices + (i,) # for error reporting
try:
arr[i] = fill_recursively(arr[i], value[i], new_indices)
except (IndexError, TypeError):
arr[i] = fill_recursively(arr[i], value, new_indices)
return arr
var = convert_all_elements_to_np_array(copy.deepcopy(var), cast_from=int, cast_to=float)
# ignore zero-length variables (e.g. empty Buffer)
if var.shape == (0, ):
return var
# handle simple wrapping of a Component (e.g. from ParameterPort in
# case of set after Component instantiation)
if (
(isinstance(param, list) and len(param) == 1)
or (isinstance(param, np.ndarray) and param.shape == (1,))
):
if isinstance(param[0], Component):
param = param[0]
# Currently most noise functions do not return noise in the same
# shape as their variable:
if isinstance(param, Component):
try:
if param.defaults.value.shape == var.shape:
return param(context=context)
except AttributeError:
pass
# special case where var is shaped same as param, but with extra dims
# assign param elements to deepest dim of var (ex: param [1, 2, 3], var [[0, 0, 0]])
try:
if param.shape != var.shape:
if param.shape == np.squeeze(var).shape:
param = param.reshape(var.shape)
except AttributeError:
pass
try:
# try to broadcast param to variable if param is numeric and regular
param_arr = np.asarray(param)
if param_arr.dtype != object:
return np.broadcast_to(param_arr, var.shape)
except ValueError:
# param not directly compatible with variable, continue elementwise
pass
fill_recursively(var, param)
return var
def _increment_execution_count(self, count=1):
self.parameters.execution_count.set(self.execution_count + count, override=True)
return self.execution_count
def _increment_num_executions(self, context, time_scales:(list, TimeScale), count=1):
# get relevant Time object:
time_scales = list(time_scales)
assert [isinstance(i, TimeScale) for i in time_scales], \
'non-TimeScale value provided in time_scales argument of _increment_num_executions'
curr_num_execs = self.parameters.num_executions._get(context)
for time_scale in time_scales:
new_val = curr_num_execs._get_by_time_scale(time_scale) + count
curr_num_execs._set_by_time_scale(time_scale, new_val)
self.parameters.num_executions.set(curr_num_execs, override=True)
return curr_num_execs
@property
def current_execution_time(self):
try:
return self._current_execution_time
except AttributeError:
self._update_current_execution_time(self.most_recent_context.string)
def get_current_execution_time(self, context=None):
if context is None:
return self.current_execution_time
else:
try:
return context.composition.scheduler.get_clock(context).time
except AttributeError:
return None
# MODIFIED 9/22/19 END
def _get_current_execution_time(self, context):
return _get_time(self, context=context)
def _update_current_execution_time(self, context):
self._current_execution_time = self._get_current_execution_time(context=context)
def _change_function(self, to_function):
pass
@property
def name(self):
try:
return self._name
except AttributeError:
return 'unnamed {0}'.format(self.__class__)
@name.setter
def name(self, value):
if not isinstance(value, str):
raise ComponentError(f"Name assigned to {self.__class__.__name__} ({value}) must be a string constant.")
self._name = value
@property
def size(self):
s = []
try:
v = np.atleast_2d(self.defaults.variable)
except AttributeError:
return None
for i in range(len(v)):
s.append(len(v[i]))
return np.array(s)
@property
def prefs(self):
# Whenever pref is accessed, use current owner as context (for level checking)
self._prefs.owner = self
return self._prefs
@prefs.setter
def prefs(self, pref_set):
if (isinstance(pref_set, PreferenceSet)):
# IMPLEMENTATION NOTE:
# - Complements dynamic assignment of owner in getter (above)
# - Needed where prefs are assigned before they've been gotten (e.g., in PreferenceSet.__init__()
# - owner needs to be assigned for call to get_pref_setting_for_level below
# MODIFIED 6/1/16
try:
pref_set.owner = self
except:
# MODIFIED 6/1/16 END
pass
self._prefs = pref_set
if self.prefs.verbosePref:
warnings.warn('PreferenceSet {0} assigned to {1}'.format(pref_set.name, self.name))
# Make sure that every pref attrib in PreferenceSet is OK
for pref_name, pref_entry in self.prefs.__dict__.items():
if '_pref' in pref_name:
value, err_msg = self.prefs.get_pref_setting_for_level(pref_name, pref_entry.level)
if err_msg and self.prefs.verbosePref:
warnings.warn(err_msg)
# FIX: VALUE RETURNED SHOULD BE OK, SO ASSIGN IT INSTEAD OF ONE IN pref_set??
# FIX: LEVEL SHOULD BE LOWER THAN REQUESTED; REPLACE RAISE WITH WARNING TO THIS EFFECT
else:
raise ComponentError("Attempt to assign non-PreferenceSet {0} to {0}.prefs".
format(pref_set, self.name))
@property
def verbosePref(self):
return self.prefs.verbosePref
@verbosePref.setter
def verbosePref(self, setting):
self.prefs.verbosePref = setting
@property
def paramValidationPref(self):
return self.prefs.paramValidationPref
@paramValidationPref.setter
def paramValidationPref(self, setting):
self.prefs.paramValidationPref = setting
@property
def reportOutputPref(self):
from psyneulink.core.compositions.report import ReportOutput
if self.prefs.reportOutputPref is False:
return ReportOutput.OFF
elif self.prefs.reportOutputPref is True:
return ReportOutput.TERSE
return self.prefs.reportOutputPref
@reportOutputPref.setter
def reportOutputPref(self, setting):
from psyneulink.core.compositions.report import ReportOutput
if setting is False:
setting = ReportOutput.OFF
elif setting is True:
setting = ReportOutput.TERSE
self.prefs.reportOutputPref = setting
@property
def logPref(self):
return self.prefs.logPref
@logPref.setter
def logPref(self, setting):
self.prefs.logPref = setting
@property
def runtimeParamModulationPref(self):
return self.prefs.runtimeParamModulationPref
@runtimeParamModulationPref.setter
def runtimeParamModulationPref(self, setting):
self.prefs.runtimeParamModulationPref = setting
@property
def initialization_status(self):
try:
return self._initialization_status
except AttributeError:
self._initialization_status = ContextFlags.INITIALIZING
return self._initialization_status
@initialization_status.setter
def initialization_status(self, flag):
"""Check that a flag is one and only one status flag
"""
if flag in INITIALIZATION_STATUS_FLAGS:
self._initialization_status = flag
elif not flag:
self._initialization_status = ContextFlags.UNINITIALIZED
elif not (flag & ContextFlags.INITIALIZATION_MASK):
raise ContextError("Attempt to assign a flag ({}) to initialization_status "
"that is not an initialization status flag".
format(str(flag)))
else:
raise ContextError("Attempt to assign more than one flag ({}) to initialization_status".
format(str(flag)))
@property
def is_initializing(self):
try:
owner_initializing = self.owner.initialization_status == ContextFlags.INITIALIZING
except AttributeError:
owner_initializing = False
return self.initialization_status == ContextFlags.INITIALIZING or owner_initializing
@property
def log(self):
try:
return self._log
except AttributeError:
if self.initialization_status == ContextFlags.DEFERRED_INIT:
raise ComponentError("Initialization of {} is deferred; try assigning {} after it is complete "
"or appropriately configuring a Composition to which it belongs".
format(self.name, 'log'))
else:
raise AttributeError
@log.setter
def log(self, log):
self._log = log
@property
def loggable_items(self):
"""Diciontary of items that can be logged in the Component's `log <Component.log>` and their current `ContextFlags`.
This is a convenience method that calls the `loggable_items <Log.loggable_items>` property of the Component's
`log <Component.log>`.
"""
return self.log.loggable_items
def set_log_conditions(self, items, log_condition=LogCondition.EXECUTION):
"""
set_log_conditions( \
items \
log_condition=EXECUTION \
)
Specifies items to be logged; these must be be `loggable_items <Component.loggable_items>` of the Component's
`log <Component.log>`. This is a convenience method that calls the `set_log_conditions <Log.set_log_conditions>`
method of the Component's `log <Component.log>`.
"""
self.log.set_log_conditions(items=items, log_condition=log_condition)
def set_delivery_conditions(self, items, delivery_condition=LogCondition.EXECUTION):
"""
_set_delivery_conditions( \
items \
delivery_condition=EXECUTION \
)
Specifies items to be delivered to external application via gRPC; these must be be `loggable_items <Component.loggable_items>`
of the Component's `log <Component.log>`. This is a convenience method that calls the `_set_delivery_conditions <Log._set_delivery_conditions>`
method of the Component's `log <Component.log>`.
"""
self.log._set_delivery_conditions(items=items, delivery_condition=delivery_condition)
def log_values(self, entries):
"""
log_values( \
entries \
)
Specifies items to be logged; ; these must be be `loggable_items <Component.loggable_items>` of the Component's
`log <Component.log>`. This is a convenience method that calls the `log_values <Log.log_values>` method
of the Component's `log <Component.log>`.
"""
self.log.log_values(entries)
def _propagate_most_recent_context(self, context=None, visited=None):
if visited is None:
visited = set([self])
if context is None:
context = self.most_recent_context
self.most_recent_context = context
# TODO: avoid duplicating objects in _dependent_components
# throughout psyneulink or at least condense these methods
for obj in self._dependent_components:
if obj not in visited:
visited.add(obj)
obj._propagate_most_recent_context(context, visited)
@property
def _dict_summary(self):
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.components.ports.port import Port
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.components.ports.parameterport import ParameterPortError
from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix
def parse_parameter_value(value):
if isinstance(value, (list, tuple)):
new_item = []
for item in value:
new_item.append(parse_parameter_value(item))
try:
value = type(value)(new_item)
except TypeError:
value = type(value)(*new_item)
elif isinstance(value, dict):
value = {
parse_parameter_value(k): parse_parameter_value(v)
for k, v in value.items()
}
elif isinstance(value, Composition):
value = value.name
elif isinstance(value, Port):
if isinstance(value, OutputPort):
state_port_name = MODEL_SPEC_ID_OUTPUT_PORTS
else:
state_port_name = MODEL_SPEC_ID_INPUT_PORTS
# assume we will use the identifier on reconstitution
value = '{0}.{1}.{2}'.format(
value.owner.name,
state_port_name,
value.name
)
elif isinstance(value, Component):
# could potentially create duplicates when it should
# create a reference to an already existent Component like
# with Compositions, but in a vacuum the full specification
# is necessary.
# in fact this would happen unless the parser specifically
# handles it like ours does
value = value._dict_summary
elif isinstance(value, (types.FunctionType)):
value = base64.encodebytes(dill.dumps(value)).decode('utf-8')
return value
# attributes (and their values) included in top-level dict
basic_attributes = ['name']
# attributes that aren't Parameters but are psyneulink-specific
# and are stored in the PNL parameters section
implicit_parameter_attributes = ['node_ordering', 'required_node_roles']
parameters_dict = {}
pnl_specific_parameters = {}
deferred_init_values = {}
if self.initialization_status is ContextFlags.DEFERRED_INIT:
deferred_init_values = copy.copy(self._init_args)
try:
deferred_init_values.update(deferred_init_values['params'])
except (KeyError, TypeError):
pass
# .parameters still refers to class parameters during deferred init
assert self.parameters._owner is not self
for p in self.parameters:
if (
p.name not in self._model_spec_parameter_blacklist
and not isinstance(p, ParameterAlias)
):
if self.initialization_status is ContextFlags.DEFERRED_INIT:
try:
val = deferred_init_values[p.name]
except KeyError:
# class default
val = p.default_value
else:
# special handling because LinearMatrix default values
# can be PNL-specific keywords. In future, generalize
# this workaround
if (
isinstance(self, LinearMatrix)
and p.name == 'matrix'
):
val = self.parameters.matrix.values[None]
elif p.spec is not None:
val = p.spec
else:
val = p.default_value
val = parse_parameter_value(val)
try:
matching_parameter_port = self.owner.parameter_ports[p.name]
if matching_parameter_port.source._owner._owner is self:
val = {
MODEL_SPEC_ID_PARAMETER_SOURCE: '{0}.{1}.{2}'.format(
self.owner.name,
MODEL_SPEC_ID_INPUT_PORTS,
p.name
),
MODEL_SPEC_ID_PARAMETER_VALUE: val,
MODEL_SPEC_ID_TYPE: type(val)
}
# ContentAddressableList uses TypeError when key not found
except (AttributeError, TypeError, ParameterPortError):
pass
# split parameters designated as PsyNeuLink-specific and
# parameters that are universal
if p.pnl_internal:
pnl_specific_parameters[p.name] = val
else:
parameters_dict[p.name] = val
for attr in implicit_parameter_attributes:
try:
pnl_specific_parameters[attr] = getattr(self, attr)
except AttributeError:
pass
if len(pnl_specific_parameters) > 0:
parameters_dict[MODEL_SPEC_ID_PSYNEULINK] = pnl_specific_parameters
function_dict = {}
try:
if isinstance(self.function, Component):
function_dict['functions'] = [self.function._dict_summary]
except AttributeError:
pass
type_dict = {}
if self._model_spec_class_name_is_generic:
type_dict[MODEL_SPEC_ID_GENERIC] = self.__class__.__name__
else:
if self._model_spec_generic_type_name is not NotImplemented:
type_dict[MODEL_SPEC_ID_GENERIC] = self._model_spec_generic_type_name
else:
type_dict[MODEL_SPEC_ID_GENERIC] = None
type_dict[MODEL_SPEC_ID_PSYNEULINK] = self.__class__.__name__
return {
**{attr: getattr(self, attr) for attr in basic_attributes},
**{self._model_spec_id_parameters: parameters_dict},
**function_dict,
**{MODEL_SPEC_ID_TYPE: type_dict}
}
@property
def logged_items(self):
"""Dictionary of all items that have entries in the log, and their currently assigned `ContextFlags`\\s
This is a convenience method that calls the `logged_items <Log.logged_items>` property of the Component's
`log <Component.log>`.
"""
return self.log.logged_items
@property
def _loggable_parameters(self):
return [param.name for param in self.parameters if param.loggable and param.user]
@property
def _variable_shape_flexibility(self):
try:
return self.__variable_shape_flexibility
except AttributeError:
self.__variable_shape_flexibility = DefaultsFlexibility.FLEXIBLE
return self.__variable_shape_flexibility
@_variable_shape_flexibility.setter
def _variable_shape_flexibility(self, value):
self.__variable_shape_flexibility = value
@property
def class_parameters(self):
return self.__class__.parameters
@property
def stateful_parameters(self):
"""
A list of all of this object's `parameters <Parameters>` whose values
may change during runtime
"""
return [param for param in self.parameters if param.stateful]
@property
def stateful_attributes(self):
return [p.name for p in self.parameters if p.initializer is not None]
@property
def initializers(self):
return [getattr(self.parameters, p).initializer for p in self.stateful_attributes]
@property
def function_parameters(self):
"""
The `parameters <Parameters>` object of this object's `function`
"""
try:
return self.function.parameters
except AttributeError:
return None
@property
def class_defaults(self):
"""
Refers to the defaults of this object's class
"""
return self.__class__.defaults
@property
def is_pnl_inherent(self):
try:
return self._is_pnl_inherent
except AttributeError:
self._is_pnl_inherent = False
return self._is_pnl_inherent
@property
def _parameter_components(self):
"""
Returns a set of Components that are values of this object's
Parameters
"""
try:
return self.__parameter_components
except AttributeError:
self.__parameter_components = set()
return self.__parameter_components
@handle_external_context()
def _update_parameter_components(self, context=None):
# store all Components in Parameters to be used in
# _dependent_components for _initialize_from_context
for p in self.parameters:
try:
param_value = p._get(context)
try:
param_value = param_value.__self__
except AttributeError:
pass
if isinstance(param_value, Component) and param_value is not self:
self._parameter_components.add(param_value)
# ControlMechanism and GatingMechanism have Parameters that only
# throw these errors
except Exception as e:
# cannot import the specific exceptions due to circularity
if 'attribute is not implemented on' not in str(e):
raise
@property
def _dependent_components(self):
"""
Returns a set of Components that will be executed if this Component is executed
"""
return list(self._parameter_components)
@property
def most_recent_context(self):
"""
used to set a default behavior for attributes that correspond to parameters
"""
try:
return self._most_recent_context
except AttributeError:
self._most_recent_context = Context(source=ContextFlags.COMMAND_LINE, execution_id=None)
return self._most_recent_context
@most_recent_context.setter
def most_recent_context(self, value):
self._most_recent_context = value
@property
def _model_spec_parameter_blacklist(self):
"""
A set of Parameter names that should not be added to the generated
constructor string
"""
return {'function', 'value'}
COMPONENT_BASE_CLASS = Component
def make_property_mod(param_name, parameter_port_name=None):
if parameter_port_name is None:
parameter_port_name = param_name
def getter(self):
warnings.warn(
f'Getting modulated parameter values with <object>.mod_<param_name>'
' may be removed in a future release. It is replaced with,'
f' for example, <object>.{param_name}.modulated',
FutureWarning
)
try:
return self._parameter_ports[parameter_port_name].value
except TypeError:
raise ComponentError("{} does not have a '{}' ParameterPort."
.format(self.name, param_name))
def setter(self, value):
raise ComponentError("Cannot set to {}'s mod_{} directly because it is computed by the ParameterPort."
.format(self.name, param_name))
prop = property(getter).setter(setter)
return prop
def make_stateful_getter_mod(param_name, parameter_port_name=None):
if parameter_port_name is None:
parameter_port_name = param_name
def getter(self, context=None):
try:
return self._parameter_ports[parameter_port_name].parameters.value.get(context)
except TypeError:
raise ComponentError("{} does not have a '{}' ParameterPort."
.format(self.name, param_name))
return getter
class ParameterValue:
def __init__(self, owner, parameter):
self._owner = owner
self._parameter = parameter
def __repr__(self):
return f'{self._owner}:\n\t{self._parameter.name}.base: {self.base}\n\t{self._parameter.name}.modulated: {self.modulated}'
@property
def modulated(self):
try:
is_modulated = (self._parameter in self._owner.parameter_ports)
except AttributeError:
is_modulated = False
try:
is_modulated = is_modulated or (self._parameter in self._owner.owner.parameter_ports)
except AttributeError:
pass
if is_modulated:
return self._owner._get_current_parameter_value(
self._parameter,
self._owner.most_recent_context
)
else:
warnings.warn(f'{self._parameter.name} is not currently modulated.')
return None
@modulated.setter
def modulated(self, value):
raise ComponentError(
f"Cannot set {self._owner.name}'s modulated {self._parameter.name}"
' value directly because it is computed by the ParameterPort.'
)
@property
def base(self):
return self._parameter.get(self._owner.most_recent_context)
@base.setter
def base(self, value):
self._parameter.set(value, self._owner.most_recent_context)
| [
"psyneulink.core.components.functions.userdefinedfunction.UserDefinedFunction",
"psyneulink.core.globals.utilities.prune_unused_args",
"psyneulink.core.globals.utilities.convert_all_elements_to_np_array",
"psyneulink.core.globals.log.Log",
"psyneulink.core.components.functions.function.FunctionError",
"co... | [((28164, 28191), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (28181, 28191), False, 'import logging\n'), ((85013, 85038), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (85036, 85038), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((104390, 104415), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (104413, 104415), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((144911, 144961), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {'fallback_most_recent': '(True)'}), '(fallback_most_recent=True)\n', (144934, 144961), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((145979, 146004), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (146002, 146004), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((174690, 174715), 'psyneulink.core.globals.context.handle_external_context', 'handle_external_context', ([], {}), '()\n', (174713, 174715), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((32831, 32851), 'psyneulink.core.globals.parameters.Defaults', 'Defaults', ([], {'owner': 'self'}), '(owner=self)\n', (32839, 32851), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((45551, 45619), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(False)'], {'setter': '_has_initializers_setter', 'pnl_internal': '(True)'}), '(False, setter=_has_initializers_setter, pnl_internal=True)\n', (45560, 45619), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((45805, 45911), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(0)'], {'read_only': '(True)', 'loggable': '(False)', 'stateful': '(False)', 'fallback_default': '(True)', 'pnl_internal': '(True)'}), '(0, read_only=True, loggable=False, stateful=False,\n fallback_default=True, pnl_internal=True)\n', (45814, 45911), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((46115, 46161), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(True)'], {'loggable': '(False)', 'stateful': '(True)'}), '(True, loggable=False, stateful=True)\n', (46124, 46161), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((46333, 46378), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(0)'], {'read_only': '(True)', 'modulable': '(False)'}), '(0, read_only=True, modulable=False)\n', (46342, 46378), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((46420, 46452), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(1000)'], {'modulable': '(False)'}), '(1000, modulable=False)\n', (46429, 46452), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((50383, 50481), 'psyneulink.core.globals.context.Context', 'Context', ([], {'source': 'ContextFlags.CONSTRUCTOR', 'execution_phase': 'ContextFlags.IDLE', 'execution_id': 'None'}), '(source=ContextFlags.CONSTRUCTOR, execution_phase=ContextFlags.IDLE,\n execution_id=None)\n', (50390, 50481), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((51785, 51908), 'psyneulink.core.globals.utilities.call_with_pruned_args', 'call_with_pruned_args', (['self._handle_default_variable'], {'default_variable': 'default_variable', 'size': 'size'}), '(self._handle_default_variable, default_variable=\n default_variable, size=size, **parameter_values)\n', (51806, 51908), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((52250, 52295), 'psyneulink.core.globals.preferences.preferenceset._assign_prefs', '_assign_prefs', (['self', 'prefs', 'BasePreferenceSet'], {}), '(self, prefs, BasePreferenceSet)\n', (52263, 52295), False, 'from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, PreferenceSet, _assign_prefs\n'), ((53125, 53154), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (53148, 53154), False, 'import collections\n'), ((54767, 54782), 'psyneulink.core.globals.log.Log', 'Log', ([], {'owner': 'self'}), '(owner=self)\n', (54770, 54782), False, 'from psyneulink.core.globals.log import Log\n'), ((57444, 57510), 'psyneulink.core.globals.utilities.get_deepcopy_with_shared', 'get_deepcopy_with_shared', (['self._deepcopy_shared_keys', 'shared_types'], {}), '(self._deepcopy_shared_keys, shared_types)\n', (57468, 57510), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((67147, 67197), 'psyneulink.core.globals.utilities.convert_to_np_array', 'convert_to_np_array', (['default_variable'], {'dimension': '(1)'}), '(default_variable, dimension=1)\n', (67166, 67197), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((78480, 78578), 'psyneulink.core.globals.registry.register_category', 'register_category', ([], {'entry': 'self', 'base_class': 'Component', 'name': 'name', 'registry': 'DeferredInitRegistry'}), '(entry=self, base_class=Component, name=name, registry=\n DeferredInitRegistry)\n', (78497, 78578), False, 'from psyneulink.core.globals.registry import register_category\n'), ((93011, 93043), 'psyneulink.core.globals.parameters.Defaults', 'Defaults', ([], {'owner': 'self'}), '(owner=self, **defaults)\n', (93019, 93043), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((105885, 105911), 'psyneulink.core.globals.context.Context', 'Context', ([], {'execution_id': 'None'}), '(execution_id=None)\n', (105892, 105911), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((110146, 110188), 'psyneulink.core.globals.utilities.convert_all_elements_to_np_array', 'convert_all_elements_to_np_array', (['variable'], {}), '(variable)\n', (110178, 110188), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((113226, 113251), 'inspect.isclass', 'inspect.isclass', (['variable'], {}), '(variable)\n', (113241, 113251), False, 'import inspect\n'), ((114712, 114744), 'psyneulink.core.globals.utilities.convert_to_np_array', 'convert_to_np_array', (['variable', '(1)'], {}), '(variable, 1)\n', (114731, 114744), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((141967, 142004), 'copy.deepcopy', 'copy.deepcopy', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (141980, 142004), False, 'import copy\n'), ((143184, 143219), 'copy.deepcopy', 'copy.deepcopy', (['new_default_variable'], {}), '(new_default_variable)\n', (143197, 143219), False, 'import copy\n'), ((156984, 157016), 'psyneulink.core.globals.context._get_time', '_get_time', (['self'], {'context': 'context'}), '(self, context=context)\n', (156993, 157016), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((157868, 157879), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (157876, 157879), True, 'import numpy as np\n'), ((176762, 176972), 'warnings.warn', 'warnings.warn', (['f"""Getting modulated parameter values with <object>.mod_<param_name> may be removed in a future release. It is replaced with, for example, <object>.{param_name}.modulated"""', 'FutureWarning'], {}), "(\n f'Getting modulated parameter values with <object>.mod_<param_name> may be removed in a future release. It is replaced with, for example, <object>.{param_name}.modulated'\n , FutureWarning)\n", (176775, 176972), False, 'import warnings\n'), ((31315, 31517), 'warnings.warn', 'warnings.warn', (['f"""Setting parameter values directly using dot notation may be removed in a future release. It is replaced with, for example, <object>.{param.name}.base = {value}"""', 'FutureWarning'], {}), "(\n f'Setting parameter values directly using dot notation may be removed in a future release. It is replaced with, for example, <object>.{param.name}.base = {value}'\n , FutureWarning)\n", (31328, 31517), False, 'import warnings\n'), ((45357, 45370), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (45365, 45370), True, 'import numpy as np\n'), ((45474, 45487), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (45482, 45487), True, 'import numpy as np\n'), ((46235, 46241), 'psyneulink.core.scheduling.time.Time', 'Time', ([], {}), '()\n', (46239, 46241), False, 'from psyneulink.core.scheduling.time import Time, TimeScale\n'), ((50718, 50725), 'psyneulink.core.scheduling.condition.Never', 'Never', ([], {}), '()\n', (50723, 50725), False, 'from psyneulink.core.scheduling.condition import Never\n'), ((50770, 50812), 'copy.copy', 'copy.copy', (['param_defaults[FUNCTION_PARAMS]'], {}), '(param_defaults[FUNCTION_PARAMS])\n', (50779, 50812), False, 'import copy\n'), ((52126, 52157), 'copy.deepcopy', 'copy.deepcopy', (['default_variable'], {}), '(default_variable)\n', (52139, 52157), False, 'import copy\n'), ((83315, 83337), 'psyneulink.core.components.functions.function.is_function_type', 'is_function_type', (['self'], {}), '(self)\n', (83331, 83337), False, 'from psyneulink.core.components.functions.function import is_function_type, FunctionError\n'), ((92879, 92939), 'psyneulink.core.globals.parameters.copy_parameter_value', 'copy_parameter_value', (['defaults[k]'], {'shared_types': 'shared_types'}), '(defaults[k], shared_types=shared_types)\n', (92899, 92939), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((94259, 94287), 'psyneulink.core.globals.parameters.copy_parameter_value', 'copy_parameter_value', (['p.spec'], {}), '(p.spec)\n', (94279, 94287), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((95461, 95498), 'copy.deepcopy', 'copy.deepcopy', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (95474, 95498), False, 'import copy\n'), ((105009, 105118), 'warnings.warn', 'warnings.warn', (['"""No ResetMode specified for reset_params; CURRENT_TO_INSTANCE_DEFAULTS will be used"""'], {}), "(\n 'No ResetMode specified for reset_params; CURRENT_TO_INSTANCE_DEFAULTS will be used'\n )\n", (105022, 105118), False, 'import warnings\n'), ((110106, 110129), 'numpy.atleast_1d', 'np.atleast_1d', (['variable'], {}), '(variable)\n', (110119, 110129), True, 'import numpy as np\n'), ((117608, 117636), 'inspect.isclass', 'inspect.isclass', (['param_value'], {}), '(param_value)\n', (117623, 117636), False, 'import inspect\n'), ((135230, 135364), 'psyneulink.core.components.functions.userdefinedfunction.UserDefinedFunction', 'UserDefinedFunction', ([], {'default_variable': 'function_variable', 'custom_function': 'function', 'owner': 'self', 'context': 'context'}), '(default_variable=function_variable, custom_function=\n function, owner=self, context=context, **function_params)\n', (135249, 135364), False, 'from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction\n'), ((153832, 153850), 'copy.deepcopy', 'copy.deepcopy', (['var'], {}), '(var)\n', (153845, 153850), False, 'import copy\n'), ((155196, 155213), 'numpy.asarray', 'np.asarray', (['param'], {}), '(param)\n', (155206, 155213), True, 'import numpy as np\n'), ((157695, 157732), 'numpy.atleast_2d', 'np.atleast_2d', (['self.defaults.variable'], {}), '(self.defaults.variable)\n', (157708, 157732), True, 'import numpy as np\n'), ((168409, 168435), 'copy.copy', 'copy.copy', (['self._init_args'], {}), '(self._init_args)\n', (168418, 168435), False, 'import copy\n'), ((178857, 178925), 'warnings.warn', 'warnings.warn', (['f"""{self._parameter.name} is not currently modulated."""'], {}), "(f'{self._parameter.name} is not currently modulated.')\n", (178870, 178925), False, 'import warnings\n'), ((46605, 46634), 'psyneulink.core.globals.utilities.convert_to_np_array', 'convert_to_np_array', (['variable'], {}), '(variable)\n', (46624, 46634), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((48381, 48398), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (48391, 48398), True, 'import numpy as np\n'), ((70080, 70098), 'numpy.array', 'np.array', (['variable'], {}), '(variable)\n', (70088, 70098), True, 'import numpy as np\n'), ((70749, 70791), 'psyneulink.core.globals.utilities.convert_all_elements_to_np_array', 'convert_all_elements_to_np_array', (['variable'], {}), '(variable)\n', (70781, 70791), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((83361, 83383), 'psyneulink.core.components.functions.function.FunctionError', 'FunctionError', (['err_msg'], {}), '(err_msg)\n', (83374, 83383), False, 'from psyneulink.core.components.functions.function import is_function_type, FunctionError\n'), ((88873, 88893), 'numpy.zeros', 'np.zeros', (['self.shape'], {}), '(self.shape)\n', (88881, 88893), True, 'import numpy as np\n'), ((95813, 95884), 'psyneulink.core.globals.utilities.call_with_pruned_args', 'call_with_pruned_args', (['parse_variable_method', 'variable'], {'context': 'context'}), '(parse_variable_method, variable, context=context)\n', (95834, 95884), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((118954, 118982), 'inspect.isclass', 'inspect.isclass', (['param_value'], {}), '(param_value)\n', (118969, 118982), False, 'import inspect\n'), ((132030, 132073), 'psyneulink.core.globals.utilities.is_instance_or_subclass', 'is_instance_or_subclass', (['function', 'Function'], {}), '(function, Function)\n', (132053, 132073), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((145715, 145739), 'numpy.atleast_2d', 'np.atleast_2d', (['new_value'], {}), '(new_value)\n', (145728, 145739), True, 'import numpy as np\n'), ((152800, 152813), 'psyneulink.core.globals.utilities.safe_len', 'safe_len', (['arr'], {}), '(arr)\n', (152808, 152813), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((155279, 155316), 'numpy.broadcast_to', 'np.broadcast_to', (['param_arr', 'var.shape'], {}), '(param_arr, var.shape)\n', (155294, 155316), True, 'import numpy as np\n'), ((176100, 176160), 'psyneulink.core.globals.context.Context', 'Context', ([], {'source': 'ContextFlags.COMMAND_LINE', 'execution_id': 'None'}), '(source=ContextFlags.COMMAND_LINE, execution_id=None)\n', (176107, 176160), False, 'from psyneulink.core.globals.context import Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context\n'), ((31807, 31954), 'psyneulink.core.globals.parameters.ParameterError', 'ParameterError', (['f"""Parameter \'{p.name}\' is read-only. Set at your own risk. Use .parameters.{p.name}.set with override=True to force set."""'], {}), '(\n f"Parameter \'{p.name}\' is read-only. Set at your own risk. Use .parameters.{p.name}.set with override=True to force set."\n )\n', (31821, 31954), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((46689, 46731), 'psyneulink.core.globals.utilities.convert_all_elements_to_np_array', 'convert_all_elements_to_np_array', (['variable'], {}), '(variable)\n', (46721, 46731), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((47531, 47581), 'psyneulink.core.globals.utilities.is_instance_or_subclass', 'is_instance_or_subclass', (['param_value[1]', 'Component'], {}), '(param_value[1], Component)\n', (47554, 47581), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((47863, 47976), 'psyneulink.core.globals.utilities.is_instance_or_subclass', 'is_instance_or_subclass', (['param_value', '(ModulatoryMechanism_Base, ModulatorySignal, ModulatoryProjection_Base)'], {}), '(param_value, (ModulatoryMechanism_Base,\n ModulatorySignal, ModulatoryProjection_Base))\n', (47886, 47976), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((64414, 64428), 'numpy.ndim', 'np.ndim', (['param'], {}), '(param)\n', (64421, 64428), True, 'import numpy as np\n'), ((70620, 70643), 'numpy.atleast_1d', 'np.atleast_1d', (['variable'], {}), '(variable)\n', (70633, 70643), True, 'import numpy as np\n'), ((70697, 70720), 'numpy.atleast_2d', 'np.atleast_2d', (['variable'], {}), '(variable)\n', (70710, 70720), True, 'import numpy as np\n'), ((70874, 70893), 'numpy.atleast_1d', 'np.atleast_1d', (['size'], {}), '(size)\n', (70887, 70893), True, 'import numpy as np\n'), ((72219, 72248), 'psyneulink.core.globals.utilities.convert_to_np_array', 'convert_to_np_array', (['variable'], {}), '(variable)\n', (72238, 72248), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((73086, 73100), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (73094, 73100), True, 'import numpy as np\n'), ((94701, 94765), 'psyneulink.core.globals.parameters.copy_parameter_value', 'copy_parameter_value', (['p.default_value'], {'shared_types': 'shared_types'}), '(p.default_value, shared_types=shared_types)\n', (94721, 94765), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((96580, 96618), 'psyneulink.core.globals.utilities.is_instance_or_subclass', 'is_instance_or_subclass', (['val', 'Function'], {}), '(val, Function)\n', (96603, 96618), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((105259, 105300), 'psyneulink.core.globals.parameters.copy_parameter_value', 'copy_parameter_value', (['param.default_value'], {}), '(param.default_value)\n', (105279, 105300), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((116285, 116394), 'warnings.warn', 'warnings.warn', (['f"""{param_name} is specified as None for {self.name} which suppresses type checking."""'], {}), "(\n f'{param_name} is specified as None for {self.name} which suppresses type checking.'\n )\n", (116298, 116394), False, 'import warnings\n'), ((129421, 129451), 'inspect.isclass', 'inspect.isclass', (['param_spec[1]'], {}), '(param_spec[1])\n', (129436, 129451), False, 'import inspect\n'), ((138875, 138900), 'inspect.isclass', 'inspect.isclass', (['function'], {}), '(function)\n', (138890, 138900), False, 'import inspect\n'), ((140035, 140110), 'psyneulink.core.globals.utilities.prune_unused_args', 'prune_unused_args', (['function.__init__'], {'args': '[]', 'kwargs': 'kwargs_to_instantiate'}), '(function.__init__, args=[], kwargs=kwargs_to_instantiate)\n', (140052, 140110), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((153105, 153240), 'warnings.warn', 'warnings.warn', (['f"""The length of {value} is greater than that of {arr}.{ignored_items_str} will be ignored for index {indices}"""'], {}), "(\n f'The length of {value} is greater than that of {arr}.{ignored_items_str} will be ignored for index {indices}'\n )\n", (153118, 153240), False, 'import warnings\n'), ((53593, 53627), 'copy.deepcopy', 'copy.deepcopy', (['param.default_value'], {}), '(param.default_value)\n', (53606, 53627), False, 'import copy\n'), ((53691, 53725), 'copy.deepcopy', 'copy.deepcopy', (['param.default_value'], {}), '(param.default_value)\n', (53704, 53725), False, 'import copy\n'), ((76631, 76685), 'psyneulink.core.globals.utilities.get_all_explicit_arguments', 'get_all_explicit_arguments', (['self.__class__', '"""__init__"""'], {}), "(self.__class__, '__init__')\n", (76657, 76685), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((88490, 88508), 'numpy.array', 'np.array', (['variable'], {}), '(variable)\n', (88498, 88508), True, 'import numpy as np\n'), ((96881, 96901), 'inspect.isclass', 'inspect.isclass', (['val'], {}), '(val)\n', (96896, 96901), False, 'import inspect\n'), ((97590, 97707), 'psyneulink.core.globals.utilities.call_with_pruned_args', 'call_with_pruned_args', (['val'], {'default_variable': 'function_default_variable'}), '(val, default_variable=function_default_variable, **\n self.initial_shared_parameters[p.name])\n', (97611, 97707), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((97947, 98016), 'itertools.chain', 'itertools.chain', (['self.initial_shared_parameters[p.name]', "['variable']"], {}), "(self.initial_shared_parameters[p.name], ['variable'])\n", (97962, 98016), False, 'import itertools\n'), ((118506, 118534), 'inspect.isclass', 'inspect.isclass', (['param_value'], {}), '(param_value)\n', (118521, 118534), False, 'import inspect\n'), ((138499, 138522), 'copy.deepcopy', 'copy.deepcopy', (['function'], {}), '(function)\n', (138512, 138522), False, 'import copy\n'), ((154953, 154968), 'numpy.squeeze', 'np.squeeze', (['var'], {}), '(var)\n', (154963, 154968), True, 'import numpy as np\n'), ((159172, 159194), 'warnings.warn', 'warnings.warn', (['err_msg'], {}), '(err_msg)\n', (159185, 159194), False, 'import warnings\n'), ((60280, 60298), 'psyneulink.core.llvm._tupleize', 'pnlvm._tupleize', (['x'], {}), '(x)\n', (60295, 60298), True, 'from psyneulink.core import llvm as pnlvm\n'), ((70921, 70935), 'numpy.shape', 'np.shape', (['size'], {}), '(size)\n', (70929, 70935), True, 'import numpy as np\n'), ((72175, 72186), 'numpy.zeros', 'np.zeros', (['s'], {}), '(s)\n', (72183, 72186), True, 'import numpy as np\n'), ((75515, 75569), 'psyneulink.core.globals.utilities.get_all_explicit_arguments', 'get_all_explicit_arguments', (['self.__class__', '"""__init__"""'], {}), "(self.__class__, '__init__')\n", (75541, 75569), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((76391, 76423), 'inspect.signature', 'inspect.signature', (['self.__init__'], {}), '(self.__init__)\n', (76408, 76423), False, 'import inspect\n'), ((94634, 94652), 'copy.deepcopy', 'copy.deepcopy', (['val'], {}), '(val)\n', (94647, 94652), False, 'import copy\n'), ((105651, 105692), 'psyneulink.core.globals.parameters.copy_parameter_value', 'copy_parameter_value', (['param.default_value'], {}), '(param.default_value)\n', (105671, 105692), False, 'from psyneulink.core.globals.parameters import Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, copy_parameter_value\n'), ((136454, 136494), 'numpy.asarray', 'np.asarray', (['[function.defaults.variable]'], {}), '([function.defaults.variable])\n', (136464, 136494), True, 'import numpy as np\n'), ((137837, 138104), 'warnings.warn', 'warnings.warn', (['f"""{function} is generated once during import of psyneulink, and is now being reused. Please report this, including the script you were using, to the psyneulink developers at <EMAIL> or https://github.com/PrincetonUniversity/PsyNeuLink/issues"""'], {}), "(\n f'{function} is generated once during import of psyneulink, and is now being reused. Please report this, including the script you were using, to the psyneulink developers at <EMAIL> or https://github.com/PrincetonUniversity/PsyNeuLink/issues'\n )\n", (137850, 138104), False, 'import warnings\n'), ((138339, 138362), 'copy.deepcopy', 'copy.deepcopy', (['function'], {}), '(function)\n', (138352, 138362), False, 'import copy\n'), ((59940, 59987), 'graph_scheduler.time._time_scale_to_attr_str', 'graph_scheduler.time._time_scale_to_attr_str', (['t'], {}), '(t)\n', (59984, 59987), False, 'import graph_scheduler\n'), ((71396, 71410), 'numpy.shape', 'np.shape', (['size'], {}), '(size)\n', (71404, 71410), True, 'import numpy as np\n'), ((84755, 84777), 'psyneulink.core.components.functions.function.is_function_type', 'is_function_type', (['self'], {}), '(self)\n', (84771, 84777), False, 'from psyneulink.core.components.functions.function import is_function_type, FunctionError\n'), ((88714, 88732), 'numpy.array', 'np.array', (['variable'], {}), '(variable)\n', (88722, 88732), True, 'import numpy as np\n'), ((102405, 102439), 'copy.deepcopy', 'copy.deepcopy', (['param.default_value'], {}), '(param.default_value)\n', (102418, 102439), False, 'import copy\n'), ((125321, 125343), 'copy.copy', 'copy.copy', (['param_value'], {}), '(param_value)\n', (125330, 125343), False, 'import copy\n'), ((64679, 64697), 'numpy.asfarray', 'np.asfarray', (['param'], {}), '(param)\n', (64690, 64697), True, 'import numpy as np\n'), ((84592, 84614), 'psyneulink.core.components.functions.function.is_function_type', 'is_function_type', (['self'], {}), '(self)\n', (84608, 84614), False, 'from psyneulink.core.components.functions.function import is_function_type, FunctionError\n'), ((99333, 99368), 'numpy.asarray', 'np.asarray', (['[val.defaults.variable]'], {}), '([val.defaults.variable])\n', (99343, 99368), True, 'import numpy as np\n'), ((102494, 102528), 'copy.deepcopy', 'copy.deepcopy', (['param.default_value'], {}), '(param.default_value)\n', (102507, 102528), False, 'import copy\n'), ((102745, 102809), 'psyneulink.core.globals.utilities.safe_equals', 'safe_equals', (['param.default_value', 'shared_obj_param.default_value'], {}), '(param.default_value, shared_obj_param.default_value)\n', (102756, 102809), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((103811, 104162), 'warnings.warn', 'warnings.warn', (['f"""Specification of the "{param.name}" parameter ({param.default_value}) for {self} conflicts with specification of its shared parameter "{shared_obj_param.name}" ({shared_obj_param.default_value}) for its {param.attribute_name} ({param.source._owner._owner}). The value specified on {param.source._owner._owner} will be used."""'], {}), '(\n f\'Specification of the "{param.name}" parameter ({param.default_value}) for {self} conflicts with specification of its shared parameter "{shared_obj_param.name}" ({shared_obj_param.default_value}) for its {param.attribute_name} ({param.source._owner._owner}). The value specified on {param.source._owner._owner} will be used.\'\n )\n', (103824, 104162), False, 'import warnings\n'), ((71344, 71358), 'numpy.shape', 'np.shape', (['size'], {}), '(size)\n', (71352, 71358), True, 'import numpy as np\n'), ((103556, 103599), 'psyneulink.core.globals.utilities.safe_equals', 'safe_equals', (['shared_obj_param.spec', 'isp_arg'], {}), '(shared_obj_param.spec, isp_arg)\n', (103567, 103599), False, 'from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len\n'), ((167829, 167846), 'dill.dumps', 'dill.dumps', (['value'], {}), '(value)\n', (167839, 167846), False, 'import dill\n'), ((100610, 100643), 'numpy.asarray', 'np.asarray', (['val.defaults.variable'], {}), '(val.defaults.variable)\n', (100620, 100643), True, 'import numpy as np\n'), ((100318, 100355), 'numpy.asarray', 'np.asarray', (['function_default_variable'], {}), '(function_default_variable)\n', (100328, 100355), True, 'import numpy as np\n')] |
import numpy as np
import gensim
import string
import re
import collections
import logging
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
import time
ae_size = 250
#logging setup
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#load 20 newsgroups dataset
print("loading dataset")
# dataset = fetch_20newsgroups(subset='all',remove=('headers', 'footers', 'quotes')).data
# dataset = ' '.join(dataset)
# dataset = unicodedata.normalize('NFKD', dataset).encode('ascii','ignore')
desired_file = open('./wilde_pictureofdoriangray.txt', 'r')
dataset = desired_file.read()
desired_file.close()
#convert dataset to list of sentences
print("converting dataset to list of sentences")
sentences = re.sub(r'-|\t|\n',' ',dataset)
# sentences = sentences.lower()
for meh in re.findall("([A-Z]+)", sentences):
sentences = sentences.replace(meh, meh.lower())
# sentences = sentences.replace(',"', '."') # replace the commas before quotation marks with periods.
sentences = re.sub(',"', '."', sentences)
sentences = re.sub('"', '', sentences)
sentences = re.sub('\.\.\.', '', sentences)
sentences = re.sub(',', ' COMMA', sentences) # add period token
sentences = re.sub('\.', ' PERIOD_TOKEN', sentences)
sentences = re.sub('\?', ' QUESTION_TOKEN', sentences)
sentences = re.sub('\!', ' EXCLAMATION_TOKEN', sentences)
sentences = re.split('_TOKEN', sentences)
sentences = [sentence.translate(string.punctuation).split() for sentence in sentences]
print(len(sentences)) # number of sentences
# 2D list to 1D list.
words = [j for i in sentences for j in i]
print(len(words))
print(len(list(set(words)))) # number of unique words
stop
#train word2vec
print("training word2vec")
a = time.time()
model = gensim.models.Word2Vec(sentences, min_count=5, size=ae_size, workers=4)
model.train(sentences, epochs=100, total_examples=len(sentences))
b = time.time()
print('Training time elapsed: {} s'.format(b-a))
## Create a modified text. ##
# sentences = [j for i in sentences for j in i]
words = list(model.wv.vocab)
ff = open('./wilde_pictureofdoriangray_tokenized.txt', 'w')
for index in range(len(sentences)):
sentence = sentences[index]
for word_index in range(len(sentence)):
word = sentence[word_index]
if word not in words:
sentence[word_index] = 'UNKNOWN'
ff.write(' '.join(sentence))
ff.write('\n')
ff.close()
print("loading dataset")
# dataset = fetch_20newsgroups(subset='all',remove=('headers', 'footers', 'quotes')).data
# dataset = ' '.join(dataset)
# dataset = unicodedata.normalize('NFKD', dataset).encode('ascii','ignore')
desired_file = open('./wilde_pictureofdoriangray_tokenized.txt', 'r')
dataset = desired_file.read()
desired_file.close()
#convert dataset to list of sentences
print("converting dataset to list of sentences")
sentences = re.sub(r'-|\t',' ',dataset)
sentences = sentences.split('\n')
empty = []
for sentence in sentences:
words = sentence.split()
if words == []:
continue
empty += [words]
sentences = empty
#train word2vec
print("training word2vec")
a = time.time()
# model = gensim.models.Word2Vec(sentences, min_count=5, size=ae_size, workers=4)
# model.train(sentences, epochs=100, total_examples=len(sentences))
b = time.time()
print('Training time elapsed: {} s'.format(b-a))
#get most common words
print("getting common words")
dataset = [item for sublist in sentences for item in sublist]
counts = collections.Counter(dataset).most_common(500)
#reduce embeddings to 2d using tsne
print("reducing embeddings to 2D")
embeddings = np.empty((500, ae_size))
for i in range(500):
embeddings[i,:] = model[counts[i][0]]
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=7500)
embeddings = tsne.fit_transform(embeddings)
#plot embeddings
print("plotting most common words")
fig, ax = plt.subplots(figsize=(30, 30))
for i in range(500):
ax.scatter(embeddings[i,0],embeddings[i,1])
ax.annotate(counts[i][0], (embeddings[i,0],embeddings[i,1]))
#save to disk
plt.savefig('w2v_visualization_1kiter_tokenized.png')
model.save('1kiter_w2v_model_tokenized.gensim')
# stop
# model = gensim.models.Word2Vec.load('./10kiter_w2v_model.gensim')
#
# limit = 25
# sentence_number = 50
#
# index2word = model.wv.index2word
# word2index = {}
# for i in range(len(index2word)):
# word2index[index2word[i]] = i
# Sentence Generation
# print(sentences)
# num_words = len(model.wv.vocab)
# max_words = 60
# print(num_words)
# for i in range(sentence_number):
# index = np.random.randint(num_words)
# string = [model.wv.index2word[index]]
# # while True:
# for j in range(max_words):
# maybe = model.predict_output_word(string, topn=limit)
# # print(maybe)
# # randomizer = np.random.randint(len(maybe))
# for j in range(limit):
# if maybe[j][0] in string:
# continue
# else:
# string += [maybe[j][0]]
# break
# if (maybe[j][0] == 'PERIOD' or maybe[j][0] == 'QUESTION' or maybe[j][0] == 'EXCLAMATION'):
# break
# if ('PERIOD' in string or 'QUESTION' in string or 'EXCLAMATION' in string):
# print(' '.join(string))
# break
# if j >= limit:
# print(' '.join(string))
# break
# print(' '.join(string))
| [
"re.split",
"logging.basicConfig",
"sklearn.manifold.TSNE",
"numpy.empty",
"gensim.models.Word2Vec",
"matplotlib.pyplot.subplots",
"time.time",
"re.findall",
"collections.Counter",
"re.sub",
"matplotlib.pyplot.savefig"
] | [((206, 301), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (225, 301), False, 'import logging\n'), ((759, 792), 're.sub', 're.sub', (['"""-|\\\\t|\\\\n"""', '""" """', 'dataset'], {}), "('-|\\\\t|\\\\n', ' ', dataset)\n", (765, 792), False, 'import re\n'), ((833, 866), 're.findall', 're.findall', (['"""([A-Z]+)"""', 'sentences'], {}), "('([A-Z]+)', sentences)\n", (843, 866), False, 'import re\n'), ((1035, 1064), 're.sub', 're.sub', (['""",\\""""', '""".\\""""', 'sentences'], {}), '(\',"\', \'."\', sentences)\n', (1041, 1064), False, 'import re\n'), ((1077, 1103), 're.sub', 're.sub', (['"""\\""""', '""""""', 'sentences'], {}), '(\'"\', \'\', sentences)\n', (1083, 1103), False, 'import re\n'), ((1116, 1150), 're.sub', 're.sub', (['"""\\\\.\\\\.\\\\."""', '""""""', 'sentences'], {}), "('\\\\.\\\\.\\\\.', '', sentences)\n", (1122, 1150), False, 'import re\n'), ((1161, 1193), 're.sub', 're.sub', (['""","""', '""" COMMA"""', 'sentences'], {}), "(',', ' COMMA', sentences)\n", (1167, 1193), False, 'import re\n'), ((1225, 1266), 're.sub', 're.sub', (['"""\\\\."""', '""" PERIOD_TOKEN"""', 'sentences'], {}), "('\\\\.', ' PERIOD_TOKEN', sentences)\n", (1231, 1266), False, 'import re\n'), ((1278, 1321), 're.sub', 're.sub', (['"""\\\\?"""', '""" QUESTION_TOKEN"""', 'sentences'], {}), "('\\\\?', ' QUESTION_TOKEN', sentences)\n", (1284, 1321), False, 'import re\n'), ((1333, 1379), 're.sub', 're.sub', (['"""\\\\!"""', '""" EXCLAMATION_TOKEN"""', 'sentences'], {}), "('\\\\!', ' EXCLAMATION_TOKEN', sentences)\n", (1339, 1379), False, 'import re\n'), ((1391, 1420), 're.split', 're.split', (['"""_TOKEN"""', 'sentences'], {}), "('_TOKEN', sentences)\n", (1399, 1420), False, 'import re\n'), ((1743, 1754), 'time.time', 'time.time', ([], {}), '()\n', (1752, 1754), False, 'import time\n'), ((1763, 1834), 'gensim.models.Word2Vec', 'gensim.models.Word2Vec', (['sentences'], {'min_count': '(5)', 'size': 'ae_size', 'workers': '(4)'}), '(sentences, min_count=5, size=ae_size, workers=4)\n', (1785, 1834), False, 'import gensim\n'), ((1905, 1916), 'time.time', 'time.time', ([], {}), '()\n', (1914, 1916), False, 'import time\n'), ((2863, 2892), 're.sub', 're.sub', (['"""-|\\\\t"""', '""" """', 'dataset'], {}), "('-|\\\\t', ' ', dataset)\n", (2869, 2892), False, 'import re\n'), ((3116, 3127), 'time.time', 'time.time', ([], {}), '()\n', (3125, 3127), False, 'import time\n'), ((3282, 3293), 'time.time', 'time.time', ([], {}), '()\n', (3291, 3293), False, 'import time\n'), ((3599, 3623), 'numpy.empty', 'np.empty', (['(500, ae_size)'], {}), '((500, ae_size))\n', (3607, 3623), True, 'import numpy as np\n'), ((3694, 3754), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(30)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(7500)'}), "(perplexity=30, n_components=2, init='pca', n_iter=7500)\n", (3698, 3754), False, 'from sklearn.manifold import TSNE\n'), ((3863, 3893), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(30, 30)'}), '(figsize=(30, 30))\n', (3875, 3893), True, 'from matplotlib import pyplot as plt\n'), ((4043, 4096), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""w2v_visualization_1kiter_tokenized.png"""'], {}), "('w2v_visualization_1kiter_tokenized.png')\n", (4054, 4096), True, 'from matplotlib import pyplot as plt\n'), ((3468, 3496), 'collections.Counter', 'collections.Counter', (['dataset'], {}), '(dataset)\n', (3487, 3496), False, 'import collections\n')] |
from collections import defaultdict
import numpy as np
import torch
from sklearn.metrics import confusion_matrix
from terminaltables import AsciiTable
from torchreid.utils import get_model_attr
def score_extraction(data_loader, model, use_gpu, labelmap=[], head_id=0):
with torch.no_grad():
out_scores, gt_labels = [], []
for _, data in enumerate(data_loader):
batch_images, batch_labels = data[0], data[1]
if use_gpu:
batch_images = batch_images.cuda()
if labelmap:
for i, label in enumerate(labelmap):
batch_labels[torch.where(batch_labels==i)] = label
out_scores.append(model(batch_images)[head_id])
gt_labels.append(batch_labels)
out_scores = torch.cat(out_scores, 0).data.cpu().numpy()
gt_labels = torch.cat(gt_labels, 0).data.cpu().numpy()
return out_scores, gt_labels
def score_extraction_from_ir(data_loader, model, labelmap=[]):
out_scores, gt_labels = [], []
for data in data_loader.dataset:
image, label = np.asarray(data[0]), data[1]
if labelmap:
label = labelmap[label]
scores = model.forward([image])[0]
out_scores.append(scores)
gt_labels.append(label)
out_scores = np.concatenate(out_scores, 0)
gt_labels = torch.cat(gt_labels, 0).data.cpu().numpy()
gt_labels = gt_labels.reshape(out_scores.shape[0], -1)
return out_scores, gt_labels
def mean_top_k_accuracy(scores, labels, k=1):
idx = np.argsort(-scores, axis=-1)[:, :k]
labels = np.array(labels)
matches = np.any(idx == labels.reshape([-1, 1]), axis=-1)
classes = np.unique(labels)
accuracy_values = []
for class_id in classes:
mask = labels == class_id
num_valid = np.sum(mask)
if num_valid == 0:
continue
accuracy_values.append(np.sum(matches[mask]) / float(num_valid))
return np.mean(accuracy_values) if len(accuracy_values) > 0 else 1.0
def mean_average_precision(scores, labels):
def _ap(in_recall, in_precision):
mrec = np.concatenate((np.zeros([1, in_recall.shape[1]], dtype=np.float32),
in_recall,
np.ones([1, in_recall.shape[1]], dtype=np.float32)))
mpre = np.concatenate((np.zeros([1, in_precision.shape[1]], dtype=np.float32),
in_precision,
np.zeros([1, in_precision.shape[1]], dtype=np.float32)))
for i in range(mpre.shape[0] - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
all_ap = []
cond = mrec[1:] != mrec[:-1]
for k in range(cond.shape[1]):
i = np.where(cond[:, k])[0]
all_ap.append(np.sum((mrec[i + 1, k] - mrec[i, k]) * mpre[i + 1, k]))
return np.array(all_ap, dtype=np.float32)
one_hot_labels = np.zeros_like(scores, dtype=np.int32)
one_hot_labels[np.arange(len(labels)), labels] = 1
idx = np.argsort(-scores, axis=0)
sorted_labels = np.take_along_axis(one_hot_labels, idx, axis=0)
matched = sorted_labels == 1
tp = np.cumsum(matched, axis=0).astype(np.float32)
fp = np.cumsum(~matched, axis=0).astype(np.float32)
num_pos = np.sum(one_hot_labels, axis=0)
valid_mask = num_pos > 0
num_pos[~valid_mask] = 1
num_pos = num_pos.astype(np.float32)
recall = tp / num_pos.reshape([1, -1])
precision = tp / (tp + fp)
ap = _ap(recall, precision)
valid_ap = ap[valid_mask]
mean_ap = np.mean(ap) if len(valid_ap) > 0 else 1.0
return mean_ap
def norm_confusion_matrix(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = np.sum(cf, axis=1, keepdims=True)
norm_cm = cf / cls_cnt
return norm_cm
def show_confusion_matrix(norm_cm):
header = ['class {}'.format(i) for i in range(norm_cm.shape[0])]
data_info = []
for line in norm_cm:
data_info.append(['{:.2f}'.format(1e2 * v) for v in line])
table_data = [header] + data_info
table = AsciiTable(table_data)
print('Confusion matrix:\n' + table.table)
def get_invalid(scores, gt_labels, data_info):
pred_labels = np.argmax(scores, axis=1)
matches = pred_labels != gt_labels
unmatched = defaultdict(list)
for i in range(len(matches)):
if matches[i]:
unmatched[gt_labels[i]].append((data_info[i], pred_labels[i]))
return unmatched
def evaluate_classification(dataloader, model, use_gpu, topk=(1,), labelmap=[]):
if get_model_attr(model, 'is_ie_model'):
scores, labels = score_extraction_from_ir(dataloader, model, labelmap)
else:
scores, labels = score_extraction(dataloader, model, use_gpu, labelmap)
m_ap = mean_average_precision(scores, labels)
cmc = []
for k in topk:
cmc.append(mean_top_k_accuracy(scores, labels, k=k))
norm_cm = norm_confusion_matrix(scores, labels)
return cmc, m_ap, norm_cm
def evaluate_multilabel_classification(dataloader, model, use_gpu):
def average_precision(output, target):
epsilon = 1e-8
# sort examples
indices = output.argsort()[::-1]
# Computes prec@i
total_count_ = np.cumsum(np.ones((len(output), 1)))
target_ = target[indices]
ind = target_ == 1
pos_count_ = np.cumsum(ind)
total = pos_count_[-1]
pos_count_[np.logical_not(ind)] = 0
pp = pos_count_ / total_count_
precision_at_i_ = np.sum(pp)
precision_at_i = precision_at_i_ / (total + epsilon)
return precision_at_i
def mAP(targs, preds, pos_thr=0.5):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if np.size(preds) == 0:
return 0
ap = np.zeros((preds.shape[1]))
# compute average precision for each class
for k in range(preds.shape[1]):
scores = preds[:, k]
targets = targs[:, k]
ap[k] = average_precision(scores, targets)
tp, fp, fn, tn = [], [], [], []
for k in range(preds.shape[0]):
scores = preds[k,:]
targets = targs[k,:]
pred = (scores > pos_thr).astype(np.int32)
tp.append(((pred + targets) == 2).sum())
fp.append(((pred - targets) == 1).sum())
fn.append(((pred - targets) == -1).sum())
tn.append(((pred + targets) == 0).sum())
p_c = [tp[i] / (tp[i] + fp[i]) if tp[i] > 0 else 0.0 for i in range(len(tp))]
r_c = [tp[i] / (tp[i] + fn[i]) if tp[i] > 0 else 0.0
for i in range(len(tp))]
f_c = [2 * p_c[i] * r_c[i] / (p_c[i] + r_c[i]) if tp[i] > 0 else 0.0
for i in range(len(tp))]
mean_p_c = sum(p_c) / len(p_c)
mean_r_c = sum(r_c) / len(r_c)
mean_f_c = sum(f_c) / len(f_c)
p_o = sum(tp) / (np.array(tp) + np.array(fp)).sum()
r_o = sum(tp) / (np.array(tp) + np.array(fn)).sum()
f_o = 2 * p_o * r_o / (p_o + r_o)
return ap.mean(), mean_p_c, mean_r_c, mean_f_c, p_o, r_o, f_o
if get_model_attr(model, 'is_ie_model'):
scores, labels = score_extraction_from_ir(dataloader, model)
else:
scores, labels = score_extraction(dataloader, model, use_gpu)
scores = 1. / (1 + np.exp(-scores))
mAP_score = mAP(labels, scores)
return mAP_score
| [
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"numpy.ones",
"torch.cat",
"numpy.argsort",
"collections.defaultdict",
"numpy.mean",
"numpy.exp",
"torch.no_grad",
"torchreid.utils.get_model_attr",
"numpy.unique",
"numpy.zeros_like",
"numpy.logical_not",
"numpy.cumsum",
"numpy.size",
"t... | [((1306, 1335), 'numpy.concatenate', 'np.concatenate', (['out_scores', '(0)'], {}), '(out_scores, 0)\n', (1320, 1335), True, 'import numpy as np\n'), ((1594, 1610), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1602, 1610), True, 'import numpy as np\n'), ((1688, 1705), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1697, 1705), True, 'import numpy as np\n'), ((2940, 2977), 'numpy.zeros_like', 'np.zeros_like', (['scores'], {'dtype': 'np.int32'}), '(scores, dtype=np.int32)\n', (2953, 2977), True, 'import numpy as np\n'), ((3044, 3071), 'numpy.argsort', 'np.argsort', (['(-scores)'], {'axis': '(0)'}), '(-scores, axis=0)\n', (3054, 3071), True, 'import numpy as np\n'), ((3092, 3139), 'numpy.take_along_axis', 'np.take_along_axis', (['one_hot_labels', 'idx'], {'axis': '(0)'}), '(one_hot_labels, idx, axis=0)\n', (3110, 3139), True, 'import numpy as np\n'), ((3301, 3331), 'numpy.sum', 'np.sum', (['one_hot_labels'], {'axis': '(0)'}), '(one_hot_labels, axis=0)\n', (3307, 3331), True, 'import numpy as np\n'), ((3701, 3726), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (3710, 3726), True, 'import numpy as np\n'), ((3796, 3829), 'numpy.sum', 'np.sum', (['cf'], {'axis': '(1)', 'keepdims': '(True)'}), '(cf, axis=1, keepdims=True)\n', (3802, 3829), True, 'import numpy as np\n'), ((4145, 4167), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (4155, 4167), False, 'from terminaltables import AsciiTable\n'), ((4282, 4307), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4291, 4307), True, 'import numpy as np\n'), ((4364, 4381), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4375, 4381), False, 'from collections import defaultdict\n'), ((4626, 4662), 'torchreid.utils.get_model_attr', 'get_model_attr', (['model', '"""is_ie_model"""'], {}), "(model, 'is_ie_model')\n", (4640, 4662), False, 'from torchreid.utils import get_model_attr\n'), ((7302, 7338), 'torchreid.utils.get_model_attr', 'get_model_attr', (['model', '"""is_ie_model"""'], {}), "(model, 'is_ie_model')\n", (7316, 7338), False, 'from torchreid.utils import get_model_attr\n'), ((281, 296), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (294, 296), False, 'import torch\n'), ((1545, 1573), 'numpy.argsort', 'np.argsort', (['(-scores)'], {'axis': '(-1)'}), '(-scores, axis=-1)\n', (1555, 1573), True, 'import numpy as np\n'), ((1815, 1827), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1821, 1827), True, 'import numpy as np\n'), ((1962, 1986), 'numpy.mean', 'np.mean', (['accuracy_values'], {}), '(accuracy_values)\n', (1969, 1986), True, 'import numpy as np\n'), ((2883, 2917), 'numpy.array', 'np.array', (['all_ap'], {'dtype': 'np.float32'}), '(all_ap, dtype=np.float32)\n', (2891, 2917), True, 'import numpy as np\n'), ((3583, 3594), 'numpy.mean', 'np.mean', (['ap'], {}), '(ap)\n', (3590, 3594), True, 'import numpy as np\n'), ((5434, 5448), 'numpy.cumsum', 'np.cumsum', (['ind'], {}), '(ind)\n', (5443, 5448), True, 'import numpy as np\n'), ((5589, 5599), 'numpy.sum', 'np.sum', (['pp'], {}), '(pp)\n', (5595, 5599), True, 'import numpy as np\n'), ((5971, 5995), 'numpy.zeros', 'np.zeros', (['preds.shape[1]'], {}), '(preds.shape[1])\n', (5979, 5995), True, 'import numpy as np\n'), ((1093, 1112), 'numpy.asarray', 'np.asarray', (['data[0]'], {}), '(data[0])\n', (1103, 1112), True, 'import numpy as np\n'), ((2615, 2647), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (2625, 2647), True, 'import numpy as np\n'), ((3184, 3210), 'numpy.cumsum', 'np.cumsum', (['matched'], {'axis': '(0)'}), '(matched, axis=0)\n', (3193, 3210), True, 'import numpy as np\n'), ((3239, 3266), 'numpy.cumsum', 'np.cumsum', (['(~matched)'], {'axis': '(0)'}), '(~matched, axis=0)\n', (3248, 3266), True, 'import numpy as np\n'), ((3736, 3766), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'pred'], {}), '(labels, pred)\n', (3752, 3766), False, 'from sklearn.metrics import confusion_matrix\n'), ((5499, 5518), 'numpy.logical_not', 'np.logical_not', (['ind'], {}), '(ind)\n', (5513, 5518), True, 'import numpy as np\n'), ((5916, 5930), 'numpy.size', 'np.size', (['preds'], {}), '(preds)\n', (5923, 5930), True, 'import numpy as np\n'), ((7513, 7528), 'numpy.exp', 'np.exp', (['(-scores)'], {}), '(-scores)\n', (7519, 7528), True, 'import numpy as np\n'), ((1908, 1929), 'numpy.sum', 'np.sum', (['matches[mask]'], {}), '(matches[mask])\n', (1914, 1929), True, 'import numpy as np\n'), ((2139, 2190), 'numpy.zeros', 'np.zeros', (['[1, in_recall.shape[1]]'], {'dtype': 'np.float32'}), '([1, in_recall.shape[1]], dtype=np.float32)\n', (2147, 2190), True, 'import numpy as np\n'), ((2265, 2315), 'numpy.ones', 'np.ones', (['[1, in_recall.shape[1]]'], {'dtype': 'np.float32'}), '([1, in_recall.shape[1]], dtype=np.float32)\n', (2272, 2315), True, 'import numpy as np\n'), ((2349, 2403), 'numpy.zeros', 'np.zeros', (['[1, in_precision.shape[1]]'], {'dtype': 'np.float32'}), '([1, in_precision.shape[1]], dtype=np.float32)\n', (2357, 2403), True, 'import numpy as np\n'), ((2481, 2535), 'numpy.zeros', 'np.zeros', (['[1, in_precision.shape[1]]'], {'dtype': 'np.float32'}), '([1, in_precision.shape[1]], dtype=np.float32)\n', (2489, 2535), True, 'import numpy as np\n'), ((2761, 2781), 'numpy.where', 'np.where', (['cond[:, k]'], {}), '(cond[:, k])\n', (2769, 2781), True, 'import numpy as np\n'), ((2811, 2865), 'numpy.sum', 'np.sum', (['((mrec[i + 1, k] - mrec[i, k]) * mpre[i + 1, k])'], {}), '((mrec[i + 1, k] - mrec[i, k]) * mpre[i + 1, k])\n', (2817, 2865), True, 'import numpy as np\n'), ((629, 659), 'torch.where', 'torch.where', (['(batch_labels == i)'], {}), '(batch_labels == i)\n', (640, 659), False, 'import torch\n'), ((1352, 1375), 'torch.cat', 'torch.cat', (['gt_labels', '(0)'], {}), '(gt_labels, 0)\n', (1361, 1375), False, 'import torch\n'), ((7085, 7097), 'numpy.array', 'np.array', (['tp'], {}), '(tp)\n', (7093, 7097), True, 'import numpy as np\n'), ((7100, 7112), 'numpy.array', 'np.array', (['fp'], {}), '(fp)\n', (7108, 7112), True, 'import numpy as np\n'), ((7145, 7157), 'numpy.array', 'np.array', (['tp'], {}), '(tp)\n', (7153, 7157), True, 'import numpy as np\n'), ((7160, 7172), 'numpy.array', 'np.array', (['fn'], {}), '(fn)\n', (7168, 7172), True, 'import numpy as np\n'), ((793, 817), 'torch.cat', 'torch.cat', (['out_scores', '(0)'], {}), '(out_scores, 0)\n', (802, 817), False, 'import torch\n'), ((857, 880), 'torch.cat', 'torch.cat', (['gt_labels', '(0)'], {}), '(gt_labels, 0)\n', (866, 880), False, 'import torch\n')] |
#
# 1. 1.9.2020 Managed to convert ODE models for economic extension to transition model ready for stochastic simulation, using separate birth death list
# See section on SC2UIR model. Not done for other two economic extensions yet
# 2. 1.9.2020 Implemented stochastic simulation (Tau-leap method) using PyGom inbuilt capabilities: for SCIR simulation only so far
# Neeed to use integer N>>1, not 1.0, for stochastic simulation. Calculates in a few minutes for N=10000, rescaled ICUfrac to 0.02 (x10). N=100000 didn't finish in 10m.
# # Model Definitions
# import required packages
import os
import csv
from sympy import symbols, init_printing
import numpy as np
import matplotlib
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sb
from matplotlib import pyplot as plt
import sympy
import itertools
import scipy
import datetime
import matplotlib.dates as mdates
from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss
from scipy.optimize import minimize
import pickle as pk
import jsonpickle as jpk
from cycler import cycler
import datetime
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import pwlf
import sys
#from IPython.core.display import display, HTML
#display(HTML("<style>.container { width:100% !important; }</style>"))
savefigs = False # whether to save specific figures for paper to .../figures directory
# This cell adds two methods to the DeterministicODE class of pygom
# dumpparams: stores params in a file './params/Model_Name.pk'
# loadparams: loads params from same file. returns None if any problem finding the file.
# e.g. will be accessed by SCIR.dumparams() or SCIR.loadparams()
# This stuff needs modules os, sys, pickle as pk.
def dumpparams(self,run_id=''): # Have to add self since this will become a method
mname = self.modelname
country = self.dbparams['country']
rname = self.dbparams['run_name']
dirnm = os.getcwd()
if run_id != '': # if run_id, turn it into run_name and use it for output filename
if run_id != rname:
print("warning: changing run_name from ",rname,'to',run_id)
self.dbparams['run_name'] = run_id
pfile = dirnm+'/params/'+run_id+'.pk'
else: # construct default run_name from mname and country
if country != '':
stmp = mname+'_'+country
else:
stmp = mname
pfile = dirnm+'/params/'+stmp+'.pk'
try:
all_params = {'params':self.params.copy(), # need copy()? you are only reading
'sbparams':self.sbparams.copy(),
'cbparams':self.cbparams.copy(),
'dbparams':self.dbparams.copy(),
'initial_values':self.initial_values # if so don't you need a copy() here too?
}
with open(pfile,'wb') as fp:
pk.dump(all_params,fp)
print('dumped params to',pfile)
except:
print('problem dumping params to ',pfile)
def loadparams(self,run_id=''): # Have to add self since this will become a method
rname = self.dbparams['run_name']
dirnm = os.getcwd()
if run_id == '':
pfile = dirnm+'/params/'+rname+'.pk'
else:
if run_id != rname:
print("warning: changing run_name from ",rname,'to',run_id)
self.dbparams['run_name'] = run_id
pfile = dirnm+'/params/'+run_id+'.pk'
try:
with open(pfile,'rb') as fp:
all_params = pk.load(fp)
print('loaded params from ',pfile,':')
except:
print("problem loading",pfile)
return None
print('------------',all_params)
nms = [x.name for x in self.param_list]
try:
self.params = all_params['params'].copy()
self.parameters = self.params.copy()
self.sbparams = all_params['sbparams'].copy()
self.cbparams = all_params['cbparams'].copy()
self.dbparams = all_params['dbparams'].copy()
self.initial_values = all_params['initial_values'] # will get copied properly?
except:
print('problem loading the params from ',pfile)
return None
return True
OdeClass = DeterministicOde().__class__
setattr(OdeClass,'dumpparams', dumpparams)
setattr(OdeClass,'loadparams', loadparams)
def Float(x):
try:
rtn = float(x)
except:
rtn = float('NaN')
return rtn
def print_ode2(self):
'''
Prints the ode in symbolic form onto the screen/console in actual
symbols rather than the word of the symbol.
Based on the PyGOM built-in but adapted for Jupyter
Corrected by <NAME> to avoid subscript format error
'''
A = self.get_ode_eqn()
B = sympy.zeros(A.rows,2)
for i in range(A.shape[0]):
B[i,0] = sympy.symbols('d' + '{' + str(self._stateList[i]) + '}'+ '/dt=')
B[i,1] = A[i]
return B
# Jupyter Specifics
from IPython.display import display, HTML
from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed
display(HTML("<style>.container { width:100% !important; }</style>"))
style = {'description_width': '100px'}
slider_layout = Layout(width='99%')
# ## Caution Extensions to SIR Model
# ### SIR model
# #### Equations
#
# \begin{equation}
# \begin{split}
# \dot{S} &= -\beta I S\\
# \dot{I} &= \beta I S - \gamma I - \mu I\\
# \dot{R} & = \gamma I \\
# \dot{D} & = \mu I
# \end{split}
# \end{equation}
#
#
# #### Variables
# * $S$: Susceptible individuals
# * $I$: Infected individuals
# * $R$: individuals who have recovered from disease and are now immune
# * $D$: Dead individuals
# * $N=S+I+R+D$ Total population size (constant)
#
# #### Parameters
# * $\beta$ rate at which infected individuals contact susceptibles and infect them
# * $\gamma$ rate at which infected individuals recover from disease and become immune
# * $\mu$ death rate for infected individuals
# #### Implementation
# Using PyGOM, we will set up my simple SCIR model ODE system
# PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
# In[8]:
# set up the symbolic SIR model, actually SIRD including deaths
def make_model(mod_name):
rtn = {}
I_0 = 0.00003
if mod_name == 'SIR':
state = ['S', 'I', 'R', 'D']
param_list = ['beta', 'gamma','mu','N']
transition = [
Transition(origin='S', destination='I', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SIR'
model.ei=1
model.confirmed=slice(1,4) # cases 1-3 i.e. I, R and D
model.recovered=slice(2,3)
model.deaths=slice(3,4)
model.I_1 = 1
x0 = [1.0-I_0, I_0, 0.0, 0.0]
model.initial_values = (x0, 0) # 0 for t[0]
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SCIR':
state = ['S', 'I', 'R', 'D', 'S_c']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='I', equation='c_0*beta*I*S_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
global SCIR_modelS
SCIR_modelS = SimulateOde(state, param_list , transition=transition)
model.modelname='SCIR'
model.ei=1
model.confirmed=slice(1,4) # cases 1-3 i.e. I, R and D
model.recovered=slice(2,3)
model.deaths=slice(3,4)
model.all_susceptibles=[0,4]
model.S_c=4
model.I_1 = 1
x0_SCIR = [1.0-I_0, I_0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SCIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC2IR':
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c',
transition_type=TransitionType.T) #,
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SC2IR'
model.ei=1
model.confirmed=slice(1,5) # cases 1-3 i.e. I, R and D
model.recovered=slice(2,3)
model.deaths=slice(3,4)
model.all_susceptibles=[0,5]
model.S_c=5
model.I_1 = 1
x0_SC2IR = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SC2IR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SEIR':
state = ['S', 'E', 'I', 'R', 'D']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SEIR'
model.ei=slice(1,3) # cases 1,2 i.e. E and I
model.confirmed=slice(2,5) # cases 2-4 i.e. I, R and D, not E
model.recovered=slice(3,4)
model.deaths=slice(4,5)
model.I_1 = 2
x0_SEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0]
model.initial_values = (x0_SEIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SCEIR':
state = ['S', 'E', 'I', 'R', 'D', 'S_c']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E', equation='c_0*beta*I*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SCEIR'
model.ei=slice(1,3) # cases 1,2 i.e. E,I
model.confirmed=slice(2,5) # cases 2-4 i.e. I, R and D, not E
model.recovered=slice(3,4)
model.deaths=slice(4,5)
model.all_susceptibles=[0,5]
model.S_c=5
model.I_1 = 2
x0_SCEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SCEIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC3EIR':
state = ['S', 'E', 'I', 'R', 'D', 'I_c', 'S_c', 'E_c']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*(I+c_0*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E_c', equation='c_0*beta*(I+c_0*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='E', destination='E_c', equation='c_2*(I+I_c)*E',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='I_c', equation='alpha*E_c',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='E', equation='c_1*E_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SC3EIR'
model.ei=slice(1,3) # cases 1,2 i.e. E,I # note E_c and I_c not included
model.confirmed=slice(2,6) # cases 2-5 i.e. I, R, D, and I_c, not E, E_c
model.recovered=slice(3,4)
model.deaths=slice(4,5)
model.all_susceptibles=[0,6]
model.S_c=6
model.I_1 = 2
x0_SC3EIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SC3EIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SEI3R':
state = ['S', 'E', 'I_1', 'I_2','I_3','R','D']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SEI3R'
model.ei=slice(1,5)
model.confirmed=slice(2,7) # cases 2-6 i.e. I1, I2, I3, R and D
model.recovered=slice(5,6)
model.deaths=slice(6,7)
model.I_1 = 2
x0_SEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SEI3R, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SCEI3R':
state = ['S', 'E', 'I_1', 'I_2','I_3','R','D','S_c']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','c_0','c_1','c_2','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I_3*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E', equation='c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SCEI3R'
model.ei=slice(1,5)
model.confirmed=slice(2,7) # cases 2-6 i.e. I1, I2, I3, R and D
model.recovered=slice(5,6)
model.deaths=slice(6,7)
model.all_susceptibles=[0,7]
model.S_c=7
model.I_1 = 2
x0_SCEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SCEI3R, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC3EI3R':
state = ['S', 'E', 'I_1', 'I_2','I_3', 'R', 'D', 'I_c', 'S_c', 'E_c']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','c_0','c_1','c_2','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E_c', equation='c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I_3*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='E', destination='E_c', equation='c_2*I_3*E',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='I_c', equation='alpha*E_c',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='E', equation='c_1*E_c',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_c', equation='c_2*I_3*I_1', # error corrected I_1, mistakenly was I_c
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I_1', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I_2', equation='p_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
model = DeterministicOde(state, param_list, transition=transition)
model.modelname='SC3EI3R'
model.ei=slice(1,5) # 1,2,3,4 i.e. E,I_1,I_2,I_3 – not E_c and I_c
model.confirmed=slice(2,8) # cases 2-7 i.e. I1, I2, I3, R, D and I_c
model.recovered=slice(5,6)
model.deaths=slice(6,7)
model.all_susceptibles=[0,8]
model.S_c=8
model.I_1 = 2
x0_SC3EI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
model.initial_values = (x0_SC3EI3R, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC2UIR':
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c', 'S_u', 'W']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', equation='-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='I', equation='beta*(I+c_0*I_c)*S-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I'),
Transition(origin='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I'),
Transition(origin='R', equation='gamma*(I+I_c)'),
Transition(origin='D', equation='mu*(I+I_c)'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)')
]
model = DeterministicOde(state, param_list, ode=transition)
model.modelname='SC2UIR'
model.ei=1 # case 1 i.e. I # note I_c not included
model.confirmed=slice(1,5) # cases 1-4 i.e. I, R, D, and I_c
model.recovered=slice(2,3)
model.deaths=slice(3,4)
model.all_susceptibles=[0,5,6]
model.S_c=5
model.I_1 = 1
x0_SC2UIR = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
model.initial_values = (x0_SC2UIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC2UIR':
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c', 'S_u', 'W']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S', transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S', transition_type=TransitionType.T),
Transition(origin='S', destination='S_u', equation='k_u*(1-W)*S', transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c', transition_type=TransitionType.T),
Transition(origin='S_c', destination='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c', transition_type=TransitionType.T),
Transition(origin='S_c', destination='S_u', equation='k_u*(1-W)*S_c', transition_type=TransitionType.T),
Transition(origin='S_u', destination='S', equation='k_1*S_u', transition_type=TransitionType.T),
Transition(origin='S_u', destination='I', equation='beta*(I+c_0*I_c)*S_u', transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I', transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I', transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I', transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c', transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c', transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c', transition_type=TransitionType.T),
Transition(origin='W', destination='D', equation='0*W', transition_type=TransitionType.T)
]
bdlist = [Transition(origin='W',equation='k_w*W*(1-kappa*S_c-W)', transition_type=TransitionType.B)
]
model = DeterministicOde(state, param_list, transition=transition)
model.birth_death_list = bdlist
model.modelname='SC2UIR'
model.ei=1 # case 1 i.e. I # note I_c not included
model.confirmed=slice(1,5) # cases 1-4 i.e. I, R, D, and I_c
model.recovered=slice(2,3)
model.deaths=slice(3,4)
model.all_susceptibles=[0,5,6]
model.S_c=5
model.I_1 = 1
x0_SC3UEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
model.initial_values = (x0_SC3UEIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC3UEIR':
state = ['S', 'E', 'I', 'R', 'D', 'I_c', 'S_c', 'E_c', 'S_u', 'W']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', equation='-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='E', equation='beta*(I+c_0*I_c)*(S+S_u)-alpha*E+c_1*E_c-c_2*(I+I_c)*E'),
Transition(origin='E_c', equation='c_0*beta*(I+c_0*I_c)*S_c-alpha*E_c-c_1*E_c+c_2*(I+I_c)*E'),
Transition(origin='I', equation='alpha*E-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I'),
Transition(origin='I_c', equation='alpha*E_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I'),
Transition(origin='R', equation='gamma*(I+I_c)'),
Transition(origin='D', equation='mu*(I+I_c)'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)')
]
model = DeterministicOde(state, param_list, ode=transition)
model.modelname='SC3UEIR'
model.ei=slice(1,3) # cases 1,2 i.e. E,I # note E_c and I_c not included
model.confirmed=slice(2,6) # cases 2-5 i.e. I, R, D, and I_c, not E, E_c
model.recovered=slice(3,4)
model.deaths=slice(4,5)
model.all_susceptibles=[0,6,8]
model.S_c=6
model.I_1 = 2
x0_SC3UEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
model.initial_values = (x0_SC3UEIR, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
if mod_name == 'SC3UEI3R':
state = ['S', 'E', 'I_1', 'I_2', 'I_3', 'R', 'D', 'I_c', 'S_c', 'E_c', 'S_u', 'W'] # order important to allow correct plot groupings
param_list = ['beta_1', 'beta_2', 'beta_3', 'p_1', 'p_2', 'alpha',
'gamma_1', 'gamma_2', 'gamma_3','mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w', 'kappa', 'N'] # order also important
transition = [
Transition(origin='S', equation='-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S+c_1*S_c-c_2*(I_3)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c-c_1*S_c+c_2*(I_3)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)'),
Transition(origin='E', equation='beta_1*(I_1+c_0*I_c)*(S+S_u)-alpha*E-c_2*(I_3)*E+c_1*E_c'),
Transition(origin='E_c', equation='c_0*beta_1*(I_1+c_0*I_c)*S_c-alpha*E_c+c_2*(I_3)*E-c_1*E_c'),
Transition(origin='I_1', equation='alpha*E-gamma_1*I_1-p_1*I_1-c_2*(I_3)*I_1+c_1*I_c'),
Transition(origin='I_c', equation='alpha*E_c-gamma_1*I_c-p_1*I_c+c_2*(I_3)*I_1-c_1*I_c'), # changed to I_c, prints better
Transition(origin='I_2', equation='p_1*(I_1+I_c)-gamma_2*I_2-p_2*I_2'),
Transition(origin='I_3', equation='p_2*I_2-gamma_3*I_3-mu*I_3'), # error corrected, this is equation for I_3 not I_2
Transition(origin='R', equation='gamma_1*(I_1+I_c)+gamma_2*I_2+gamma_3*I_3'),
Transition(origin='D', equation='mu*I_3')
]
model = DeterministicOde(state, param_list, ode=transition)
model.modelname='SC3UEI3R' # following needs to be adjusted for new models, NB add new species at end to preserve slice subsets
model.ei=slice(1,5) # 1,2,3,4 i.e. E,I_1,I_2,I_3 – not E_c and I_c
model.confirmed=slice(2,8) # cases 2-7 i.e. I1, I2, I3, R, D and I_c
model.recovered=slice(5,6) # case 5 R
model.deaths=slice(6,7) # case 6 D
model.all_susceptibles=[0,8,10]
model.S_c=8
model.I_1 = 2
x0_SC3UEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
model.initial_values = (x0_SC3UEI3R, 0)
rtn['state'] = state
rtn['param_list'] = param_list
rtn['model'] = model
return rtn
def param_copy(model):
params = model.parameters
newparams = {}
pkeys1 = list(model.params.keys())
pkeys2 = list(model.parameters.keys())
for i in range(len(pkeys1)):
newparams[pkeys1[i]] = params[pkeys2[i]]
print(newparams)
model.parameters=newparams
def param_modify(model,param,value):
params = model.parameters
newparams = {}
pkeys1 = list(model.params.keys())
pkeys2 = list(model.parameters.keys())
for i in range(len(pkeys1)):
newparams[pkeys1[i]] = params[pkeys2[i]]
newparams[param]=value
print(newparams)
model.parameters=newparams
# param_modify(SCIR_model,'beta',0.721) # requires .params to be set (see below)
def vector2params_old(b,a,g,p,u,c,k,N,modelname):
"""this earlier version of arameter translation routine is kept here for reference
allows the construction of model specific parameters for different models from a single set
based on SEI3R model with vector b,g,p as well as vector caution c and economics k
later modified for better correspondence between SEIR and SEI3R and derivates """
if 'I3' in modelname: # models with hospitalization
params = {
'beta_1' : b[1],
'beta_2' : b[2],
'beta_3' : b[3],
'alpha' : a,
'gamma_1': g[1],
'gamma_2': g[2],
'gamma_3': g[3],
'p_1' : p[1],
'p_2' : p[2],
'mu' : u}
elif 'E' in modelname:
params = {
'beta' : b[1], # see above for explanations
'alpha' : a,
'gamma': g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)),
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u)))}
else:
params = {
'beta' : b[1], # see above for explanations
'gamma': g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)),
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u)))}
if 'C' in modelname: # models with caution
params['c_0'] = c[0]
params['c_1'] = c[1]
if 'I3' in modelname: # models with hospitalization
params['c_2'] = c[2]
else:
params['c_2'] = c[2]*FracCritical
if 'U' in modelname: # models with economic correction to caution
params['k_u'] = k[0]
params['k_1'] = k[1]
params['k_w'] = k[2]
params['kappa'] = k[3]
params['N'] = N
return params
def vector2params(b,a,g,p,u,c,k,N,FracCritical,modelname):
"""allows the construction of model specific parameters for different models from a single set
based on SEI3R model with vector b,g,p as well as vector caution c and economics k"""
if 'I3' in modelname: # models with hospitalization
params = {
'beta_1' : b[1],
'beta_2' : b[2],
'beta_3' : b[3],
'alpha' : a,
'gamma_1': g[1],
'gamma_2': g[2],
'gamma_3': g[3],
'p_1' : p[1],
'p_2' : p[2],
'mu' : u}
elif 'E' in modelname:
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
#irat = 1
params = {
'beta' : b[1], # see above for explanations
'alpha' : a,
'gamma': (g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)))/irat,
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u))/irat)}
else:
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
#irat = 1
params = {
#'beta' : np.sqrt(b[1]*a), # see above for explanations
'beta' : b[1], # see above for explanations
'gamma': (g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)))/irat,
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u))/irat)}
if 'C' in modelname: # models with caution
params['c_0'] = c[0]
params['c_1'] = c[1]
if 'I3' in modelname: # models with hospitalization
params['c_2'] = c[2]
else:
params['c_2'] = c[2]*FracCritical
if 'U' in modelname: # models with economic correction to caution
params['k_u'] = k[0]
params['k_1'] = k[1]
params['k_w'] = k[2]
params['kappa'] = k[3]
params['N'] = N
return params
def params2vector(params,modelname='SC3UEI3R'): # requires I3 in modelname
b = [None,None,None,None]
g = [None,None,None,None]
p = [None,None,None]
c = [None,None,None]
k = [None,None,None,None]
b[0]=0.0
b[1]=params['beta_1']
b[2]=params['beta_2']
b[3]=params['beta_3']
g[0]=0.0
g[1]=params['gamma_1']
g[2]=params['gamma_2']
g[3]=params['gamma_3']
p[0]=0.0
p[1]=params['p_1']
p[2]=params['p_2']
a=params['alpha']
u=params['mu']
N=params['N']
if 'C' in modelname: # models with caution
c[0]=params['c_1']
c[1]=params['c_2']
c[2]=params['c_3']
if 'U' in modelname: # models with economic correction to caution
k[0] = params['k_u']
k[1] = params['k_1']
k[2] = params['k_w']
k[3] = params['kappa']
return (b,a,g,p,u,c,k,N)
def base2vectors(sbparams,cbparams,fbparams):
""" converts dictionary of bae parameters to vector of parameters and then to pygom simulation parameters"""
Exposure =sbparams['Exposure']
IncubPeriod = sbparams['IncubPeriod']
DurMildInf = sbparams['DurMildInf']
FracMild = sbparams['FracMild']
FracSevere = sbparams['FracSevere']
FracCritical = sbparams['FracCritical']
CFR = sbparams['CFR']
TimeICUDeath = sbparams['TimeICUDeath']
DurHosp = sbparams['DurHosp']
ICUFrac = sbparams['ICUFrac']
I0 = sbparams['I0']
CautionFactor = cbparams['CautionFactor']
CautionRetention = cbparams['CautionRetention']
CautionICUFrac = cbparams['CautionICUFrac']
EconomicRetention = cbparams['EconomicRetention']
EconomicCostOfCaution = cbparams['EconomicCostOfCaution']
FracConfirmedDet = fbparams['FracConfirmedDet']
FracRecoveredDet = fbparams['FracRecoveredDet']
FracDeathsDet = fbparams['FracDeathsDet']
N=1
b=np.zeros(4) # beta
g=np.zeros(4) # gamma
p=np.zeros(3) # progression
c=np.zeros(3) # caution
k=np.zeros(4) # economic caution
a=1/IncubPeriod # transition rate from exposed to infected
b=Exposure*np.array([0,1,0,0])/N # hospitalized cases don't transmit
u=(1/TimeICUDeath)*(CFR/FracCritical) # death rate from ICU
g[3]=(1/TimeICUDeath)-u # recovery rate
p[2]=(1/DurHosp)*(FracCritical/(FracCritical+FracSevere))
g[2]=(1/DurHosp)-p[2]
g[1]=(1/DurMildInf)*FracMild
p[1]=(1/DurMildInf)-g[1]
c[0]=CautionFactor
c[1]=1/CautionRetention
c[2]=1/(N*ICUFrac*CautionICUFrac) # this is the rate coefficient giving 1/day at I3 = denominator
k[0]=1/EconomicRetention # assumes default rate is same as 1
k[1]=1/EconomicRetention # this is always correct
k[2]=1/EconomicRetention # assumes default rate is same as 1
k[3]=EconomicCostOfCaution
return(b,a,g,p,u,c,k,N,FracCritical,I0)
def base2params(sbparams,cbparams,fbparams,smodel):
b,a,g,p,u,c,k,N,FracCritical,I0 = base2vectors(sbparams,cbparams,fbparams)
return(vector2params(b,a,g,p,u,c,k,N,FracCritical,smodel))
def base2ICs(I0,N,smodel,cmodels):
model = cmodels[smodel]
(x0old,t0) = model.initial_values
nstates = len(x0old)
x0 = [0.]*nstates
x0[0] = N*(1-I0)
if model.I_1 < nstates:
x0[model.I_1] = N*I0
else:
print('error, initial infectives location out of bounds',model.I_1,'not <',nstates)
return (x0,t0)
# Set up multimodel consistent sets of parameters, based on standard set defined by Dr. <NAME> for SEI3RD
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days
FracMild=0.8 #Fraction of infections that are mild
FracSevere=0.15 #Fraction of infections that are severe
FracCritical=0.05 #Fraction of infections that are critical
CFR=0.02 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=7 #Time from ICU admission to death, days
DurHosp=11 #Duration of hospitalization, days
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
I0 = 0.00003 # Fraction of population initially infected
sbparams = {'Exposure':Exposure,'IncubPeriod':IncubPeriod,'DurMildInf':DurMildInf,
'FracMild':FracMild,'FracSevere':FracSevere,'FracCritical':FracCritical,
'CFR':CFR,'TimeICUDeath':TimeICUDeath,'DurHosp':DurHosp,'ICUFrac':ICUFrac,'I0':I0}
# Model extension by <NAME> to include caution
CautionFactor= 0.3 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 14. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.25 # Fraction of ICUs occupied leading to 90% of susceptibles in caution
EconomicRetention = CautionRetention # Duration of economic dominant state of susceptibles (here same as caution, typically longer)
EconomicCostOfCaution = 0.5 # Cost to economy of individual exercising caution
cbparams = {'CautionFactor':CautionFactor,'CautionRetention':CautionRetention,'CautionICUFrac':CautionICUFrac,
'EconomicRetention':EconomicRetention,'EconomicCostOfCaution':EconomicCostOfCaution}
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=1.0 # Fraction of recovered individuals measured : plots made with this parameter NYI
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
fbparams = {'FracConfirmedDet':FracConfirmedDet,'FracRecoveredDet':FracRecoveredDet,'FracDeathsDet':FracDeathsDet}
b,a,g,p,u,c,k,N,FracCritical,I0 = base2vectors(sbparams,cbparams,fbparams)
# extra data-related params for defining a run, including possible fitting with sliders:
dbparams = {'run_name':'','country':'','data_src':'owid'}
smodels = ['SIR','SCIR','SC2IR','SEIR','SCEIR','SC3EIR','SEI3R','SCEI3R','SC3EI3R','SC2UIR','SC3UEIR','SC3UEI3R']
# Initialize all models
cmodels = {}
fullmodels = {}
for smodel in smodels:
fullmodels[smodel] = make_model(smodel)
cmodels[smodel] = fullmodels[smodel]['model']
params_in=vector2params(b,a,g,p,u,c,k,N,FracCritical,smodel)
cmodels[smodel].initial_values = base2ICs(I0,N,smodel,cmodels)
fullmodels[smodel]['model'].parameters = params_in # sets symbolic name parameters
fullmodels[smodel]['model'].params = params_in # sets string params
cmodels[smodel].parameters = params_in
cmodels[smodel].sbparams = sbparams
cmodels[smodel].cbparams = cbparams
cmodels[smodel].fbparams = fbparams
dbparams['run_name'] = smodel # default value when no country yet
cmodels[smodel].dbparams = dbparams
modelnm = smodel+'_model'
exec(modelnm+" = cmodels[smodel]")
| [
"pygom.DeterministicOde",
"pickle.dump",
"sympy.zeros",
"os.getcwd",
"pygom.SimulateOde",
"numpy.zeros",
"pandas.plotting.register_matplotlib_converters",
"pygom.Transition",
"pickle.load",
"numpy.array",
"ipywidgets.widgets.Layout",
"IPython.display.HTML"
] | [((1221, 1253), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (1251, 1253), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((5369, 5388), 'ipywidgets.widgets.Layout', 'Layout', ([], {'width': '"""99%"""'}), "(width='99%')\n", (5375, 5388), False, 'from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed\n'), ((2024, 2035), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2033, 2035), False, 'import os\n'), ((3270, 3281), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3279, 3281), False, 'import os\n'), ((4308, 4326), 'pygom.DeterministicOde', 'DeterministicOde', ([], {}), '()\n', (4324, 4326), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((4871, 4893), 'sympy.zeros', 'sympy.zeros', (['A.rows', '(2)'], {}), '(A.rows, 2)\n', (4882, 4893), False, 'import sympy\n'), ((5252, 5312), 'IPython.display.HTML', 'HTML', (['"""<style>.container { width:100% !important; }</style>"""'], {}), "('<style>.container { width:100% !important; }</style>')\n", (5256, 5312), False, 'from IPython.display import display, HTML\n'), ((39590, 39601), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (39598, 39601), True, 'import numpy as np\n'), ((39619, 39630), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (39627, 39630), True, 'import numpy as np\n'), ((39649, 39660), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (39657, 39660), True, 'import numpy as np\n'), ((39685, 39696), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (39693, 39696), True, 'import numpy as np\n'), ((39717, 39728), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (39725, 39728), True, 'import numpy as np\n'), ((7030, 7088), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (7046, 7088), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((8490, 8548), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (8506, 8548), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((8598, 8651), 'pygom.SimulateOde', 'SimulateOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (8609, 8651), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10686, 10744), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (10702, 10744), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((11932, 11990), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (11948, 11990), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((13581, 13639), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (13597, 13639), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((16270, 16328), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (16286, 16328), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((18277, 18335), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (18293, 18335), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((20614, 20672), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (20630, 20672), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((24058, 24116), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (24074, 24116), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25665, 25716), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'ode': 'transition'}), '(state, param_list, ode=transition)\n', (25681, 25716), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((28347, 28405), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'transition': 'transition'}), '(state, param_list, transition=transition)\n', (28363, 28405), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((30194, 30245), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'ode': 'transition'}), '(state, param_list, ode=transition)\n', (30210, 30245), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32555, 32606), 'pygom.DeterministicOde', 'DeterministicOde', (['state', 'param_list'], {'ode': 'transition'}), '(state, param_list, ode=transition)\n', (32571, 32606), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((3010, 3033), 'pickle.dump', 'pk.dump', (['all_params', 'fp'], {}), '(all_params, fp)\n', (3017, 3033), True, 'import pickle as pk\n'), ((3623, 3634), 'pickle.load', 'pk.load', (['fp'], {}), '(fp)\n', (3630, 3634), True, 'import pickle as pk\n'), ((6624, 6722), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""I"""', 'equation': '"""beta*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='I', equation='beta*I*S',\n transition_type=TransitionType.T)\n", (6634, 6722), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((6755, 6853), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (6765, 6853), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((6885, 6980), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (6895, 6980), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((7677, 7775), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""I"""', 'equation': '"""beta*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='I', equation='beta*I*S',\n transition_type=TransitionType.T)\n", (7687, 7775), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((7808, 7907), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*I*S',\n transition_type=TransitionType.T)\n", (7818, 7907), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((7940, 8039), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (7950, 8039), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((8072, 8178), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""I"""', 'equation': '"""c_0*beta*I*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='I', equation='c_0*beta*I*S_c',\n transition_type=TransitionType.T)\n", (8082, 8178), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((8211, 8309), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (8221, 8309), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((8341, 8436), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (8351, 8436), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((9309, 9417), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""I"""', 'equation': '"""beta*(I+c_0*I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S',\n transition_type=TransitionType.T)\n", (9319, 9417), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((9450, 9555), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*(I+I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',\n transition_type=TransitionType.T)\n", (9460, 9555), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((9588, 9687), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (9598, 9687), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((9720, 9839), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""I_c"""', 'equation': '"""c_0*beta*(I+c_0*I_c)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='I_c', equation=\n 'c_0*beta*(I+c_0*I_c)*S_c', transition_type=TransitionType.T)\n", (9730, 9839), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((9871, 9969), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (9881, 9969), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10001, 10096), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (10011, 10096), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10128, 10233), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""I_c"""', 'equation': '"""c_2*(I+I_c)*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',\n transition_type=TransitionType.T)\n", (10138, 10233), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10266, 10367), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""R"""', 'equation': '"""gamma*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='R', equation='gamma*I_c',\n transition_type=TransitionType.T)\n", (10276, 10367), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10400, 10499), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""I"""', 'equation': '"""c_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='I', equation='c_1*I_c',\n transition_type=TransitionType.T)\n", (10410, 10499), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((10532, 10630), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""D"""', 'equation': '"""mu*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='D', equation='mu*I_c',\n transition_type=TransitionType.T)\n", (10542, 10630), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((11392, 11490), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""beta*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation='beta*I*S',\n transition_type=TransitionType.T)\n", (11402, 11490), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((11523, 11621), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I', equation='alpha*E', transition_type\n =TransitionType.T)\n", (11533, 11621), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((11653, 11751), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (11663, 11751), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((11783, 11878), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (11793, 11878), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((12638, 12736), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""beta*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation='beta*I*S',\n transition_type=TransitionType.T)\n", (12648, 12736), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((12769, 12868), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*I*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*I*S',\n transition_type=TransitionType.T)\n", (12779, 12868), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((12901, 13000), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (12911, 13000), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((13033, 13139), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""E"""', 'equation': '"""c_0*beta*I*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='E', equation='c_0*beta*I*S_c',\n transition_type=TransitionType.T)\n", (13043, 13139), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((13172, 13270), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I', equation='alpha*E', transition_type\n =TransitionType.T)\n", (13182, 13270), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((13302, 13400), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (13312, 13400), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((13432, 13527), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (13442, 13527), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((14362, 14470), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""beta*(I+c_0*I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation='beta*(I+c_0*I_c)*S',\n transition_type=TransitionType.T)\n", (14372, 14470), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((14503, 14608), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*(I+I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',\n transition_type=TransitionType.T)\n", (14513, 14608), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((14641, 14740), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (14651, 14740), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((14773, 14892), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""E_c"""', 'equation': '"""c_0*beta*(I+c_0*I_c)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='E_c', equation=\n 'c_0*beta*(I+c_0*I_c)*S_c', transition_type=TransitionType.T)\n", (14783, 14892), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((14924, 15022), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I', equation='alpha*E', transition_type\n =TransitionType.T)\n", (14934, 15022), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15054, 15159), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""E_c"""', 'equation': '"""c_2*(I+I_c)*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='E_c', equation='c_2*(I+I_c)*E',\n transition_type=TransitionType.T)\n", (15064, 15159), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15192, 15295), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'destination': '"""I_c"""', 'equation': '"""alpha*E_c"""', 'transition_type': 'TransitionType.T'}), "(origin='E_c', destination='I_c', equation='alpha*E_c',\n transition_type=TransitionType.T)\n", (15202, 15295), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15328, 15427), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'destination': '"""E"""', 'equation': '"""c_1*E_c"""', 'transition_type': 'TransitionType.T'}), "(origin='E_c', destination='E', equation='c_1*E_c',\n transition_type=TransitionType.T)\n", (15338, 15427), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15460, 15558), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (15470, 15558), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15590, 15695), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""I_c"""', 'equation': '"""c_2*(I+I_c)*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',\n transition_type=TransitionType.T)\n", (15600, 15695), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15728, 15823), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (15738, 15823), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15855, 15956), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""R"""', 'equation': '"""gamma*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='R', equation='gamma*I_c',\n transition_type=TransitionType.T)\n", (15865, 15956), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((15989, 16088), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""I"""', 'equation': '"""c_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='I', equation='c_1*I_c',\n transition_type=TransitionType.T)\n", (15999, 16088), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((16121, 16219), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""D"""', 'equation': '"""mu*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='D', equation='mu*I_c',\n transition_type=TransitionType.T)\n", (16131, 16219), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17156, 17283), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation=\n '(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S', transition_type=TransitionType.T)\n", (17166, 17283), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17315, 17414), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I_1"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I_1', equation='alpha*E',\n transition_type=TransitionType.T)\n", (17325, 17414), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17447, 17550), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""R"""', 'equation': '"""gamma_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='R', equation='gamma_1*I_1',\n transition_type=TransitionType.T)\n", (17457, 17550), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17583, 17686), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""R"""', 'equation': '"""gamma_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='R', equation='gamma_2*I_2',\n transition_type=TransitionType.T)\n", (17593, 17686), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17719, 17822), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""R"""', 'equation': '"""gamma_3*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='R', equation='gamma_3*I_3',\n transition_type=TransitionType.T)\n", (17729, 17822), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17855, 17956), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""I_2"""', 'equation': '"""p_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='I_2', equation='p_1*I_1',\n transition_type=TransitionType.T)\n", (17865, 17956), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((17989, 18090), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""I_3"""', 'equation': '"""p_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='I_3', equation='p_2*I_2',\n transition_type=TransitionType.T)\n", (17999, 18090), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((18123, 18221), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""D"""', 'equation': '"""mu*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='D', equation='mu*I_3',\n transition_type=TransitionType.T)\n", (18133, 18221), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19060, 19187), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation=\n '(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S', transition_type=TransitionType.T)\n", (19070, 19187), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19219, 19320), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*I_3*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*I_3*S',\n transition_type=TransitionType.T)\n", (19229, 19320), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19353, 19452), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (19363, 19452), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19485, 19625), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""E"""', 'equation': '"""c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='E', equation=\n 'c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S_c', transition_type=\n TransitionType.T)\n", (19495, 19625), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19652, 19751), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I_1"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I_1', equation='alpha*E',\n transition_type=TransitionType.T)\n", (19662, 19751), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19784, 19887), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""R"""', 'equation': '"""gamma_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='R', equation='gamma_1*I_1',\n transition_type=TransitionType.T)\n", (19794, 19887), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((19920, 20023), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""R"""', 'equation': '"""gamma_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='R', equation='gamma_2*I_2',\n transition_type=TransitionType.T)\n", (19930, 20023), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((20056, 20159), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""R"""', 'equation': '"""gamma_3*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='R', equation='gamma_3*I_3',\n transition_type=TransitionType.T)\n", (20066, 20159), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((20192, 20293), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""I_2"""', 'equation': '"""p_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='I_2', equation='p_1*I_1',\n transition_type=TransitionType.T)\n", (20202, 20293), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((20326, 20427), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""I_3"""', 'equation': '"""p_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='I_3', equation='p_2*I_2',\n transition_type=TransitionType.T)\n", (20336, 20427), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((20460, 20558), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""D"""', 'equation': '"""mu*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='D', equation='mu*I_3',\n transition_type=TransitionType.T)\n", (20470, 20558), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((21480, 21627), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""E"""', 'equation': '"""(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='E', equation=\n '(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S', transition_type=\n TransitionType.T)\n", (21490, 21627), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((21654, 21810), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""E_c"""', 'equation': '"""c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='E_c', equation=\n 'c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S_c',\n transition_type=TransitionType.T)\n", (21664, 21810), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((21838, 21939), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*I_3*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*I_3*S',\n transition_type=TransitionType.T)\n", (21848, 21939), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((21972, 22071), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (21982, 22071), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22104, 22203), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""I_1"""', 'equation': '"""alpha*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='I_1', equation='alpha*E',\n transition_type=TransitionType.T)\n", (22114, 22203), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22236, 22337), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'destination': '"""E_c"""', 'equation': '"""c_2*I_3*E"""', 'transition_type': 'TransitionType.T'}), "(origin='E', destination='E_c', equation='c_2*I_3*E',\n transition_type=TransitionType.T)\n", (22246, 22337), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22370, 22473), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'destination': '"""I_c"""', 'equation': '"""alpha*E_c"""', 'transition_type': 'TransitionType.T'}), "(origin='E_c', destination='I_c', equation='alpha*E_c',\n transition_type=TransitionType.T)\n", (22380, 22473), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22506, 22605), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'destination': '"""E"""', 'equation': '"""c_1*E_c"""', 'transition_type': 'TransitionType.T'}), "(origin='E_c', destination='E', equation='c_1*E_c',\n transition_type=TransitionType.T)\n", (22516, 22605), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22638, 22741), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""R"""', 'equation': '"""gamma_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='R', equation='gamma_1*I_1',\n transition_type=TransitionType.T)\n", (22648, 22741), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22774, 22879), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""I_c"""', 'equation': '"""c_2*I_3*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='I_c', equation='c_2*I_3*I_1',\n transition_type=TransitionType.T)\n", (22784, 22879), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((22960, 23063), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""R"""', 'equation': '"""gamma_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='R', equation='gamma_1*I_c',\n transition_type=TransitionType.T)\n", (22970, 23063), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23096, 23197), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""I_1"""', 'equation': '"""c_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='I_1', equation='c_1*I_c',\n transition_type=TransitionType.T)\n", (23106, 23197), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23234, 23337), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""R"""', 'equation': '"""gamma_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='R', equation='gamma_2*I_2',\n transition_type=TransitionType.T)\n", (23244, 23337), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23370, 23473), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""R"""', 'equation': '"""gamma_3*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='R', equation='gamma_3*I_3',\n transition_type=TransitionType.T)\n", (23380, 23473), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23506, 23607), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'destination': '"""I_2"""', 'equation': '"""p_1*I_1"""', 'transition_type': 'TransitionType.T'}), "(origin='I_1', destination='I_2', equation='p_1*I_1',\n transition_type=TransitionType.T)\n", (23516, 23607), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23640, 23741), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""I_2"""', 'equation': '"""p_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='I_2', equation='p_1*I_c',\n transition_type=TransitionType.T)\n", (23650, 23741), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23774, 23875), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'destination': '"""I_3"""', 'equation': '"""p_2*I_2"""', 'transition_type': 'TransitionType.T'}), "(origin='I_2', destination='I_3', equation='p_2*I_2',\n transition_type=TransitionType.T)\n", (23784, 23875), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((23908, 24006), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'destination': '"""D"""', 'equation': '"""mu*I_3"""', 'transition_type': 'TransitionType.T'}), "(origin='I_3', destination='D', equation='mu*I_3',\n transition_type=TransitionType.T)\n", (23918, 24006), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((24920, 25021), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'equation': '"""-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u"""'}), "(origin='S', equation=\n '-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u')\n", (24930, 25021), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25030, 25133), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'equation': '"""-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c"""'}), "(origin='S_c', equation=\n '-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c')\n", (25040, 25133), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25142, 25231), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_u"""', 'equation': '"""-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u"""'}), "(origin='S_u', equation=\n '-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u')\n", (25152, 25231), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25240, 25333), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'equation': '"""beta*(I+c_0*I_c)*S-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I"""'}), "(origin='I', equation=\n 'beta*(I+c_0*I_c)*S-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I')\n", (25250, 25333), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25342, 25447), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'equation': '"""c_0*beta*(I+c_0*I_c)*S_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I"""'}), "(origin='I_c', equation=\n 'c_0*beta*(I+c_0*I_c)*S_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I')\n", (25352, 25447), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25456, 25504), 'pygom.Transition', 'Transition', ([], {'origin': '"""R"""', 'equation': '"""gamma*(I+I_c)"""'}), "(origin='R', equation='gamma*(I+I_c)')\n", (25466, 25504), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25518, 25563), 'pygom.Transition', 'Transition', ([], {'origin': '"""D"""', 'equation': '"""mu*(I+I_c)"""'}), "(origin='D', equation='mu*(I+I_c)')\n", (25528, 25563), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((25577, 25633), 'pygom.Transition', 'Transition', ([], {'origin': '"""W"""', 'equation': '"""k_w*W*(1-kappa*S_c-W)"""'}), "(origin='W', equation='k_w*W*(1-kappa*S_c-W)')\n", (25587, 25633), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((26502, 26610), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""I"""', 'equation': '"""beta*(I+c_0*I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S',\n transition_type=TransitionType.T)\n", (26512, 26610), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((26620, 26725), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_c"""', 'equation': '"""c_2*(I+I_c)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',\n transition_type=TransitionType.T)\n", (26630, 26725), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((26735, 26838), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'destination': '"""S_u"""', 'equation': '"""k_u*(1-W)*S"""', 'transition_type': 'TransitionType.T'}), "(origin='S', destination='S_u', equation='k_u*(1-W)*S',\n transition_type=TransitionType.T)\n", (26745, 26838), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((26848, 26947), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S"""', 'equation': '"""c_1*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S', equation='c_1*S_c',\n transition_type=TransitionType.T)\n", (26858, 26947), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((26957, 27076), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""I_c"""', 'equation': '"""c_0*beta*(I+c_0*I_c)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='I_c', equation=\n 'c_0*beta*(I+c_0*I_c)*S_c', transition_type=TransitionType.T)\n", (26967, 27076), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27085, 27192), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'destination': '"""S_u"""', 'equation': '"""k_u*(1-W)*S_c"""', 'transition_type': 'TransitionType.T'}), "(origin='S_c', destination='S_u', equation='k_u*(1-W)*S_c',\n transition_type=TransitionType.T)\n", (27095, 27192), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27202, 27301), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_u"""', 'destination': '"""S"""', 'equation': '"""k_1*S_u"""', 'transition_type': 'TransitionType.T'}), "(origin='S_u', destination='S', equation='k_1*S_u',\n transition_type=TransitionType.T)\n", (27212, 27301), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27314, 27426), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_u"""', 'destination': '"""I"""', 'equation': '"""beta*(I+c_0*I_c)*S_u"""', 'transition_type': 'TransitionType.T'}), "(origin='S_u', destination='I', equation='beta*(I+c_0*I_c)*S_u',\n transition_type=TransitionType.T)\n", (27324, 27426), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27440, 27545), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""I_c"""', 'equation': '"""c_2*(I+I_c)*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',\n transition_type=TransitionType.T)\n", (27450, 27545), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27559, 27657), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""R"""', 'equation': '"""gamma*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='R', equation='gamma*I', transition_type\n =TransitionType.T)\n", (27569, 27657), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27667, 27762), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'destination': '"""D"""', 'equation': '"""mu*I"""', 'transition_type': 'TransitionType.T'}), "(origin='I', destination='D', equation='mu*I', transition_type=\n TransitionType.T)\n", (27677, 27762), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27772, 27871), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""I"""', 'equation': '"""c_1*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='I', equation='c_1*I_c',\n transition_type=TransitionType.T)\n", (27782, 27871), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27881, 27982), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""R"""', 'equation': '"""gamma*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='R', equation='gamma*I_c',\n transition_type=TransitionType.T)\n", (27891, 27982), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((27993, 28091), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'destination': '"""D"""', 'equation': '"""mu*I_c"""', 'transition_type': 'TransitionType.T'}), "(origin='I_c', destination='D', equation='mu*I_c',\n transition_type=TransitionType.T)\n", (28003, 28091), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((28101, 28195), 'pygom.Transition', 'Transition', ([], {'origin': '"""W"""', 'destination': '"""D"""', 'equation': '"""0*W"""', 'transition_type': 'TransitionType.T'}), "(origin='W', destination='D', equation='0*W', transition_type=\n TransitionType.T)\n", (28111, 28195), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((28227, 28322), 'pygom.Transition', 'Transition', ([], {'origin': '"""W"""', 'equation': '"""k_w*W*(1-kappa*S_c-W)"""', 'transition_type': 'TransitionType.B'}), "(origin='W', equation='k_w*W*(1-kappa*S_c-W)', transition_type=\n TransitionType.B)\n", (28237, 28322), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29265, 29366), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'equation': '"""-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u"""'}), "(origin='S', equation=\n '-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u')\n", (29275, 29366), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29375, 29478), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'equation': '"""-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c"""'}), "(origin='S_c', equation=\n '-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c')\n", (29385, 29478), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29487, 29576), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_u"""', 'equation': '"""-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u"""'}), "(origin='S_u', equation=\n '-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u')\n", (29497, 29576), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29585, 29679), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'equation': '"""beta*(I+c_0*I_c)*(S+S_u)-alpha*E+c_1*E_c-c_2*(I+I_c)*E"""'}), "(origin='E', equation=\n 'beta*(I+c_0*I_c)*(S+S_u)-alpha*E+c_1*E_c-c_2*(I+I_c)*E')\n", (29595, 29679), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29688, 29786), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'equation': '"""c_0*beta*(I+c_0*I_c)*S_c-alpha*E_c-c_1*E_c+c_2*(I+I_c)*E"""'}), "(origin='E_c', equation=\n 'c_0*beta*(I+c_0*I_c)*S_c-alpha*E_c-c_1*E_c+c_2*(I+I_c)*E')\n", (29698, 29786), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29795, 29872), 'pygom.Transition', 'Transition', ([], {'origin': '"""I"""', 'equation': '"""alpha*E-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I"""'}), "(origin='I', equation='alpha*E-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I')\n", (29805, 29872), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29886, 29976), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'equation': '"""alpha*E_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I"""'}), "(origin='I_c', equation=\n 'alpha*E_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I')\n", (29896, 29976), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((29985, 30033), 'pygom.Transition', 'Transition', ([], {'origin': '"""R"""', 'equation': '"""gamma*(I+I_c)"""'}), "(origin='R', equation='gamma*(I+I_c)')\n", (29995, 30033), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((30047, 30092), 'pygom.Transition', 'Transition', ([], {'origin': '"""D"""', 'equation': '"""mu*(I+I_c)"""'}), "(origin='D', equation='mu*(I+I_c)')\n", (30057, 30092), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((30106, 30162), 'pygom.Transition', 'Transition', ([], {'origin': '"""W"""', 'equation': '"""k_w*W*(1-kappa*S_c-W)"""'}), "(origin='W', equation='k_w*W*(1-kappa*S_c-W)')\n", (30116, 30162), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31257, 31389), 'pygom.Transition', 'Transition', ([], {'origin': '"""S"""', 'equation': '"""-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S+c_1*S_c-c_2*(I_3)*S-k_u*(1-W)*S+k_1*S_u"""'}), "(origin='S', equation=\n '-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S+c_1*S_c-c_2*(I_3)*S-k_u*(1-W)*S+k_1*S_u'\n )\n", (31267, 31389), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31393, 31527), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_c"""', 'equation': '"""-c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c-c_1*S_c+c_2*(I_3)*S-k_u*(1-W)*S_c"""'}), "(origin='S_c', equation=\n '-c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c-c_1*S_c+c_2*(I_3)*S-k_u*(1-W)*S_c'\n )\n", (31403, 31527), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31531, 31653), 'pygom.Transition', 'Transition', ([], {'origin': '"""S_u"""', 'equation': '"""-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u"""'}), "(origin='S_u', equation=\n '-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'\n )\n", (31541, 31653), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31657, 31713), 'pygom.Transition', 'Transition', ([], {'origin': '"""W"""', 'equation': '"""k_w*W*(1-kappa*S_c-W)"""'}), "(origin='W', equation='k_w*W*(1-kappa*S_c-W)')\n", (31667, 31713), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31727, 31823), 'pygom.Transition', 'Transition', ([], {'origin': '"""E"""', 'equation': '"""beta_1*(I_1+c_0*I_c)*(S+S_u)-alpha*E-c_2*(I_3)*E+c_1*E_c"""'}), "(origin='E', equation=\n 'beta_1*(I_1+c_0*I_c)*(S+S_u)-alpha*E-c_2*(I_3)*E+c_1*E_c')\n", (31737, 31823), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31832, 31932), 'pygom.Transition', 'Transition', ([], {'origin': '"""E_c"""', 'equation': '"""c_0*beta_1*(I_1+c_0*I_c)*S_c-alpha*E_c+c_2*(I_3)*E-c_1*E_c"""'}), "(origin='E_c', equation=\n 'c_0*beta_1*(I_1+c_0*I_c)*S_c-alpha*E_c+c_2*(I_3)*E-c_1*E_c')\n", (31842, 31932), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((31941, 32032), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_1"""', 'equation': '"""alpha*E-gamma_1*I_1-p_1*I_1-c_2*(I_3)*I_1+c_1*I_c"""'}), "(origin='I_1', equation=\n 'alpha*E-gamma_1*I_1-p_1*I_1-c_2*(I_3)*I_1+c_1*I_c')\n", (31951, 32032), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32041, 32134), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_c"""', 'equation': '"""alpha*E_c-gamma_1*I_c-p_1*I_c+c_2*(I_3)*I_1-c_1*I_c"""'}), "(origin='I_c', equation=\n 'alpha*E_c-gamma_1*I_c-p_1*I_c+c_2*(I_3)*I_1-c_1*I_c')\n", (32051, 32134), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32175, 32245), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_2"""', 'equation': '"""p_1*(I_1+I_c)-gamma_2*I_2-p_2*I_2"""'}), "(origin='I_2', equation='p_1*(I_1+I_c)-gamma_2*I_2-p_2*I_2')\n", (32185, 32245), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32259, 32322), 'pygom.Transition', 'Transition', ([], {'origin': '"""I_3"""', 'equation': '"""p_2*I_2-gamma_3*I_3-mu*I_3"""'}), "(origin='I_3', equation='p_2*I_2-gamma_3*I_3-mu*I_3')\n", (32269, 32322), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32392, 32468), 'pygom.Transition', 'Transition', ([], {'origin': '"""R"""', 'equation': '"""gamma_1*(I_1+I_c)+gamma_2*I_2+gamma_3*I_3"""'}), "(origin='R', equation='gamma_1*(I_1+I_c)+gamma_2*I_2+gamma_3*I_3')\n", (32402, 32468), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((32482, 32523), 'pygom.Transition', 'Transition', ([], {'origin': '"""D"""', 'equation': '"""mu*I_3"""'}), "(origin='D', equation='mu*I_3')\n", (32492, 32523), False, 'from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss\n'), ((39853, 39875), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (39861, 39875), True, 'import numpy as np\n')] |
import os
import warnings
import numpy as np
from torch import nn
import torch
import math
import torch.optim as optim
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sys import platform as sys_pf
if sys_pf == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from config import EMB_PATH
from dataloading import SentenceDataset
from models import BaselineDNN
from training import train_dataset, eval_dataset
from utils.load_datasets import load_MR, load_Semeval2017A
from utils.load_embeddings import load_word_vectors
def reject_outliers(data, m = 2.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def get_class_labels(y):
return np.unique(y)
def get_class_weights(y):
"""
Returns the normalized weights for each class
based on the frequencies of the samples
:param y: list of true labels (the labels must be hashable)
:return: dictionary with the weight for each class
"""
weights = compute_class_weight('balanced', np.unique(y), y)
d = {c: w for c, w in zip(np.unique(y), weights)}
return d
def class_weigths(targets, to_pytorch=False):
w = get_class_weights(targets)
labels = get_class_labels(targets)
if to_pytorch:
return torch.FloatTensor([w[l] for l in sorted(labels)])
return labels
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
########################################################
# Configuration
########################################################
# Download the embeddings of your choice
# for example http://nlp.stanford.edu/data/glove.6B.zip
# 1 - point to the pretrained embeddings file (must be in /embeddings folder)
EMBEDDINGS = os.path.join(EMB_PATH, "glove.6B.50d.txt")
# 2 - set the correct dimensionality of the embeddings
EMB_DIM = 50
EMB_TRAINABLE = False
BATCH_SIZE = 50
EPOCHS = 50
DATASET = "Semeval2017A" # options: "MR", "Semeval2017A"
# if your computer has a CUDA compatible gpu use it, otherwise use the cpu
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
########################################################
# Define PyTorch datasets and dataloaders
########################################################
# load word embeddings
print("loading word embeddings...")
word2idx, idx2word, embeddings = load_word_vectors(EMBEDDINGS, EMB_DIM)
# load the raw data
if DATASET == "Semeval2017A":
X_train, y_train, X_test, y_test = load_Semeval2017A()
elif DATASET == "MR":
X_train, y_train, X_test, y_test = load_MR()
else:
raise ValueError("Invalid dataset")
# convert data labels from strings to integers
le = LabelEncoder()
print("###############EX1###################")
y_train = le.fit_transform(y_train) # EX1
y_test = le.fit_transform(y_test) # EX1
n_classes = le.classes_.size # EX1 - LabelEncoder.classes_.size
y_train_temp = list(le.inverse_transform(y_train))
y_test_temp = list(le.inverse_transform(y_test))
for i in range(10):
print(y_train_temp[i], y_train[i])
# Define our PyTorch-based Dataset
print("###############EX2###################")
##Initializing train_set's and test_set's average length to zero, update later
train_set = SentenceDataset(X_train, y_train, word2idx,0)
test_set = SentenceDataset(X_test, y_test, word2idx,0)
for i in range(10):
print(train_set.data[i], train_set.labels[i])
print("###############EX3###################")
print("calculating average length with and witout outliers...")
#############################################################################
# TRAIN LENGTHS, AVERAGE TRAIN LENGTH, TRAIN WORD EMBEDDINGS
#############################################################################
train_lengths = [len(sentence) for sentence in train_set.data]
train_lengths_without_outliers_1 = list(reject_outliers(np.array(train_lengths)))
train_avg_length = int(np.mean(train_lengths))
train_avg_length_without_outliers_1 = int(np.mean(train_lengths_without_outliers_1))
train_set.avg_length = train_avg_length_without_outliers_1
print("Average length with outliers is: ", train_avg_length)
print("Average length without outliers is: ", train_avg_length_without_outliers_1)
print("printing 5 word embeddings in the original and the transformed form using average legngth...")
train_word_embeddings = [train_set[index] for index in range(len(train_set))]
for i in range(5):
print("WORD EMBEDDING ",i)
print("sentence: ", X_train[i],", target: ",y_train_temp[i])
print("sentence's word embedding: ",train_word_embeddings[i][0], ", label: ",train_word_embeddings[i][1] )
print()
#############################################################################
# TEST LENGTHS, AVERAGE TEST LENGTH, TEST WORD EMBEDDINGS
#############################################################################
test_lengths = [len(sentence) for sentence in test_set.data]
test_lengths_without_outliers_1 = list(reject_outliers(np.array(test_lengths)))
test_avg_length = int(np.mean(test_lengths))
test_avg_length_without_outliers_1 = int(np.mean(test_lengths_without_outliers_1))
test_set.avg_length = test_avg_length_without_outliers_1
#EX4 - Define our PyTorch-based DataLoader
train_loader = DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, shuffle=False)
#############################################################################
# Model Definition (Model, Loss Function, Optimizer)
#############################################################################
model = BaselineDNN(output_size=n_classes, # EX8
embeddings=embeddings,
trainable_emb=EMB_TRAINABLE)
train_word_embeddings = [x[0] for x in train_word_embeddings]
train_word_embeddings_torched = torch.torch.from_numpy(np.array(train_word_embeddings))
train_lengths_torched = torch.from_numpy(np.asarray(train_lengths))
# move the mode weight to cpu or gpu
model.to(DEVICE)
# We optimize ONLY those parameters that are trainable (p.requires_grad==True)
criterion = nn.CrossEntropyLoss() #nn.CrossEntropyLoss().to(DEVICE) #nn.BCEWithLogitsLoss() , criterion = nn.CrossEntropyLoss().cuda() # EX8
parameters = [] # EX8
for p in model.parameters():
if(p.requires_grad):
parameters.append(p)
optimizer = optim.Adam(parameters,
lr=5e-4,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=1e-4) # EX8
#############################################################################
# Training Pipeline
#############################################################################
losses_train = []
losses_test = []
prev_loss = 100
e = 0.1
for epoch in range(EPOCHS):
train_dataset(epoch, train_loader, model, criterion, optimizer)
# evaluate the performance of the model, on both data sets
train_loss, (y_train_gold, y_train_pred) = eval_dataset(train_loader, model, criterion)
test_loss, (y_test_gold, y_test_pred) = eval_dataset(test_loader, model, criterion)
losses_train.append(train_loss)
losses_test.append(test_loss)
prev_loss = test_loss
print('F1_train: {}'.format(f1_score(y_train_gold, y_train_pred, average="macro")))
print('Accuracy_train: {}'.format(accuracy_score(y_train_gold, y_train_pred)))
print('Recall_train: {}'.format(recall_score(y_train_gold, y_train_pred, average="macro")))
print('F1_test: {}'.format(f1_score(y_test_gold, y_test_pred, average="macro")))
print('Accuracy_test: {}'.format(accuracy_score(y_test_gold, y_test_pred)))
print('Recall_test: {}'.format(recall_score(y_test_gold, y_test_pred, average="macro")))
losses_train_arr = np.array(losses_train)
losses_test_arr = np.array(losses_test)
fig = plt.figure()
plt.plot(losses_train, label="train data")
plt.plot(losses_test, label="test data")
fig.suptitle('Loss - epochs for both train and test set', fontsize=20)
plt.xlabel('epochs', fontsize=18)
plt.ylabel('cummulative running loss', fontsize=16)
plt.legend()
plt.show() | [
"dataloading.SentenceDataset",
"models.BaselineDNN",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.figure",
"numpy.mean",
"sklearn.metrics.f1_score",
"training.eval_dataset",
"os.path.join",
"numpy.unique",
"torch.utils.data.DataLoader",
"sklearn.preprocessing.LabelEncoder",
"utils.load... | [((1647, 1713), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UndefinedMetricWarning'}), "('ignore', category=UndefinedMetricWarning)\n", (1670, 1713), False, 'import warnings\n'), ((2036, 2078), 'os.path.join', 'os.path.join', (['EMB_PATH', '"""glove.6B.50d.txt"""'], {}), "(EMB_PATH, 'glove.6B.50d.txt')\n", (2048, 2078), False, 'import os\n'), ((2653, 2691), 'utils.load_embeddings.load_word_vectors', 'load_word_vectors', (['EMBEDDINGS', 'EMB_DIM'], {}), '(EMBEDDINGS, EMB_DIM)\n', (2670, 2691), False, 'from utils.load_embeddings import load_word_vectors\n'), ((2973, 2987), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2985, 2987), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3518, 3564), 'dataloading.SentenceDataset', 'SentenceDataset', (['X_train', 'y_train', 'word2idx', '(0)'], {}), '(X_train, y_train, word2idx, 0)\n', (3533, 3564), False, 'from dataloading import SentenceDataset\n'), ((3575, 3619), 'dataloading.SentenceDataset', 'SentenceDataset', (['X_test', 'y_test', 'word2idx', '(0)'], {}), '(X_test, y_test, word2idx, 0)\n', (3590, 3619), False, 'from dataloading import SentenceDataset\n'), ((5523, 5589), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(dataset=train_set, batch_size=BATCH_SIZE, shuffle=True)\n', (5533, 5589), False, 'from torch.utils.data import DataLoader\n'), ((5604, 5670), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'BATCH_SIZE', 'shuffle': '(False)'}), '(dataset=test_set, batch_size=BATCH_SIZE, shuffle=False)\n', (5614, 5670), False, 'from torch.utils.data import DataLoader\n'), ((5889, 5980), 'models.BaselineDNN', 'BaselineDNN', ([], {'output_size': 'n_classes', 'embeddings': 'embeddings', 'trainable_emb': 'EMB_TRAINABLE'}), '(output_size=n_classes, embeddings=embeddings, trainable_emb=\n EMB_TRAINABLE)\n', (5900, 5980), False, 'from models import BaselineDNN\n'), ((6392, 6413), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6411, 6413), False, 'from torch import nn\n'), ((6642, 6731), 'torch.optim.Adam', 'optim.Adam', (['parameters'], {'lr': '(0.0005)', 'betas': '(0.9, 0.999)', 'eps': '(1e-08)', 'weight_decay': '(0.0001)'}), '(parameters, lr=0.0005, betas=(0.9, 0.999), eps=1e-08,\n weight_decay=0.0001)\n', (6652, 6731), True, 'import torch.optim as optim\n'), ((8052, 8074), 'numpy.array', 'np.array', (['losses_train'], {}), '(losses_train)\n', (8060, 8074), True, 'import numpy as np\n'), ((8093, 8114), 'numpy.array', 'np.array', (['losses_test'], {}), '(losses_test)\n', (8101, 8114), True, 'import numpy as np\n'), ((8122, 8134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8132, 8134), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8177), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_train'], {'label': '"""train data"""'}), "(losses_train, label='train data')\n", (8143, 8177), True, 'import matplotlib.pyplot as plt\n'), ((8179, 8219), 'matplotlib.pyplot.plot', 'plt.plot', (['losses_test'], {'label': '"""test data"""'}), "(losses_test, label='test data')\n", (8187, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8292, 8325), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {'fontsize': '(18)'}), "('epochs', fontsize=18)\n", (8302, 8325), True, 'import matplotlib.pyplot as plt\n'), ((8326, 8377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cummulative running loss"""'], {'fontsize': '(16)'}), "('cummulative running loss', fontsize=16)\n", (8336, 8377), True, 'import matplotlib.pyplot as plt\n'), ((8378, 8390), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8388, 8390), True, 'import matplotlib.pyplot as plt\n'), ((8391, 8401), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8399, 8401), True, 'import matplotlib.pyplot as plt\n'), ((473, 496), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (487, 496), False, 'import matplotlib\n'), ((916, 928), 'numpy.median', 'np.median', (['d'], {}), '(d)\n', (925, 928), True, 'import numpy as np\n'), ((1019, 1031), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1028, 1031), True, 'import numpy as np\n'), ((2783, 2802), 'utils.load_datasets.load_Semeval2017A', 'load_Semeval2017A', ([], {}), '()\n', (2800, 2802), False, 'from utils.load_datasets import load_MR, load_Semeval2017A\n'), ((4190, 4212), 'numpy.mean', 'np.mean', (['train_lengths'], {}), '(train_lengths)\n', (4197, 4212), True, 'import numpy as np\n'), ((4256, 4297), 'numpy.mean', 'np.mean', (['train_lengths_without_outliers_1'], {}), '(train_lengths_without_outliers_1)\n', (4263, 4297), True, 'import numpy as np\n'), ((5300, 5321), 'numpy.mean', 'np.mean', (['test_lengths'], {}), '(test_lengths)\n', (5307, 5321), True, 'import numpy as np\n'), ((5364, 5404), 'numpy.mean', 'np.mean', (['test_lengths_without_outliers_1'], {}), '(test_lengths_without_outliers_1)\n', (5371, 5404), True, 'import numpy as np\n'), ((6142, 6173), 'numpy.array', 'np.array', (['train_word_embeddings'], {}), '(train_word_embeddings)\n', (6150, 6173), True, 'import numpy as np\n'), ((6216, 6241), 'numpy.asarray', 'np.asarray', (['train_lengths'], {}), '(train_lengths)\n', (6226, 6241), True, 'import numpy as np\n'), ((7097, 7160), 'training.train_dataset', 'train_dataset', (['epoch', 'train_loader', 'model', 'criterion', 'optimizer'], {}), '(epoch, train_loader, model, criterion, optimizer)\n', (7110, 7160), False, 'from training import train_dataset, eval_dataset\n'), ((7272, 7316), 'training.eval_dataset', 'eval_dataset', (['train_loader', 'model', 'criterion'], {}), '(train_loader, model, criterion)\n', (7284, 7316), False, 'from training import train_dataset, eval_dataset\n'), ((7361, 7404), 'training.eval_dataset', 'eval_dataset', (['test_loader', 'model', 'criterion'], {}), '(test_loader, model, criterion)\n', (7373, 7404), False, 'from training import train_dataset, eval_dataset\n'), ((1337, 1349), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1346, 1349), True, 'import numpy as np\n'), ((2365, 2390), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2388, 2390), False, 'import torch\n'), ((2864, 2873), 'utils.load_datasets.load_MR', 'load_MR', ([], {}), '()\n', (2871, 2873), False, 'from utils.load_datasets import load_MR, load_Semeval2017A\n'), ((4141, 4164), 'numpy.array', 'np.array', (['train_lengths'], {}), '(train_lengths)\n', (4149, 4164), True, 'import numpy as np\n'), ((5253, 5275), 'numpy.array', 'np.array', (['test_lengths'], {}), '(test_lengths)\n', (5261, 5275), True, 'import numpy as np\n'), ((888, 903), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (897, 903), True, 'import numpy as np\n'), ((7539, 7592), 'sklearn.metrics.f1_score', 'f1_score', (['y_train_gold', 'y_train_pred'], {'average': '"""macro"""'}), "(y_train_gold, y_train_pred, average='macro')\n", (7547, 7592), False, 'from sklearn.metrics import f1_score\n'), ((7633, 7675), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_train_gold', 'y_train_pred'], {}), '(y_train_gold, y_train_pred)\n', (7647, 7675), False, 'from sklearn.metrics import accuracy_score\n'), ((7714, 7771), 'sklearn.metrics.recall_score', 'recall_score', (['y_train_gold', 'y_train_pred'], {'average': '"""macro"""'}), "(y_train_gold, y_train_pred, average='macro')\n", (7726, 7771), False, 'from sklearn.metrics import recall_score\n'), ((7805, 7856), 'sklearn.metrics.f1_score', 'f1_score', (['y_test_gold', 'y_test_pred'], {'average': '"""macro"""'}), "(y_test_gold, y_test_pred, average='macro')\n", (7813, 7856), False, 'from sklearn.metrics import f1_score\n'), ((7896, 7936), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test_gold', 'y_test_pred'], {}), '(y_test_gold, y_test_pred)\n', (7910, 7936), False, 'from sklearn.metrics import accuracy_score\n'), ((7974, 8029), 'sklearn.metrics.recall_score', 'recall_score', (['y_test_gold', 'y_test_pred'], {'average': '"""macro"""'}), "(y_test_gold, y_test_pred, average='macro')\n", (7986, 8029), False, 'from sklearn.metrics import recall_score\n'), ((1385, 1397), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1394, 1397), True, 'import numpy as np\n')] |
import numpy as np
def scale_img(img: np.ndarray,
new_min: int = 0,
new_max: int = 1) -> np.ndarray:
"""
Scale an image by the absolute max and min in the array to have dynamic
range new_min to new_max. Useful for visualization.
Parameters
----------
img : np.ndarray
new_min : int
new_max : int
Returns
-------
np.ndarray:
New image with shape equal to img, scaled to [new_min, new_max]
"""
i_min = np.nanmin(img)
i_max = np.nanmax(img)
if i_min == i_max:
# then image is constant image and clip between new_min and new_max
return np.clip(img, new_min, new_max)
img_scaled = (img - i_min) / (i_max - i_min) * (new_max - new_min)
img_scaled += new_min
return img_scaled
| [
"numpy.nanmax",
"numpy.nanmin",
"numpy.clip"
] | [((491, 505), 'numpy.nanmin', 'np.nanmin', (['img'], {}), '(img)\n', (500, 505), True, 'import numpy as np\n'), ((518, 532), 'numpy.nanmax', 'np.nanmax', (['img'], {}), '(img)\n', (527, 532), True, 'import numpy as np\n'), ((647, 677), 'numpy.clip', 'np.clip', (['img', 'new_min', 'new_max'], {}), '(img, new_min, new_max)\n', (654, 677), True, 'import numpy as np\n')] |
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
#
# License: BSD 3 clause
from functools import update_wrapper
import functools
import numpy as np
import scipy.sparse as sp
import scipy
import scipy.stats
from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa
from .._config import config_context, get_config
from ..externals._packaging.version import parse as parse_version
np_version = parse_version(np.__version__)
sp_version = parse_version(scipy.__version__)
if sp_version >= parse_version('1.4'):
from scipy.sparse.linalg import lobpcg
else:
# Backport of lobpcg functionality from scipy 1.4.0, can be removed
# once support for sp_version < parse_version('1.4') is dropped
# mypy error: Name 'lobpcg' already defined (possibly by an import)
from ..externals._lobpcg import lobpcg # type: ignore # noqa
def _object_dtype_isnan(X):
return X != X
# TODO: replace by copy=False, when only scipy > 1.1 is supported.
def _astype_copy_false(X):
"""Returns the copy=False parameter for
{ndarray, csr_matrix, csc_matrix}.astype when possible,
otherwise don't specify
"""
if sp_version >= parse_version('1.1') or not sp.issparse(X):
return {'copy': False}
else:
return {}
def _joblib_parallel_args(**kwargs):
"""Set joblib.Parallel arguments in a compatible way for 0.11 and 0.12+
For joblib 0.11 this maps both ``prefer`` and ``require`` parameters to
a specific ``backend``.
Parameters
----------
prefer : str in {'processes', 'threads'} or None
Soft hint to choose the default backend if no specific backend
was selected with the parallel_backend context manager.
require : 'sharedmem' or None
Hard condstraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
See joblib.Parallel documentation for more details
"""
import joblib
if parse_version(joblib.__version__) >= parse_version('0.12'):
return kwargs
extra_args = set(kwargs.keys()).difference({'prefer', 'require'})
if extra_args:
raise NotImplementedError('unhandled arguments %s with joblib %s'
% (list(extra_args), joblib.__version__))
args = {}
if 'prefer' in kwargs:
prefer = kwargs['prefer']
if prefer not in ['threads', 'processes', None]:
raise ValueError('prefer=%s is not supported' % prefer)
args['backend'] = {'threads': 'threading',
'processes': 'multiprocessing',
None: None}[prefer]
if 'require' in kwargs:
require = kwargs['require']
if require not in [None, 'sharedmem']:
raise ValueError('require=%s is not supported' % require)
if require == 'sharedmem':
args['backend'] = 'threading'
return args
class loguniform(scipy.stats.reciprocal):
"""A class supporting log-uniform random variables.
Parameters
----------
low : float
The minimum value
high : float
The maximum value
Methods
-------
rvs(self, size=None, random_state=None)
Generate log-uniform random variables
The most useful method for Scikit-learn usage is highlighted here.
For a full list, see
`scipy.stats.reciprocal
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.reciprocal.html>`_.
This list includes all functions of ``scipy.stats`` continuous
distributions such as ``pdf``.
Notes
-----
This class generates values between ``low`` and ``high`` or
low <= loguniform(low, high).rvs() <= high
The logarithmic probability density function (PDF) is uniform. When
``x`` is a uniformly distributed random variable between 0 and 1, ``10**x``
are random variables that are equally likely to be returned.
This class is an alias to ``scipy.stats.reciprocal``, which uses the
reciprocal distribution:
https://en.wikipedia.org/wiki/Reciprocal_distribution
Examples
--------
>>> from sklearn.utils.fixes import loguniform
>>> rv = loguniform(1e-3, 1e1)
>>> rvs = rv.rvs(random_state=42, size=1000)
>>> rvs.min() # doctest: +SKIP
0.0010435856341129003
>>> rvs.max() # doctest: +SKIP
9.97403052786026
"""
def _take_along_axis(arr, indices, axis):
"""Implements a simplified version of np.take_along_axis if numpy
version < 1.15"""
if np_version >= parse_version('1.15'):
return np.take_along_axis(arr=arr, indices=indices, axis=axis)
else:
if axis is None:
arr = arr.flatten()
if not np.issubdtype(indices.dtype, np.intp):
raise IndexError('`indices` must be an integer array')
if arr.ndim != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = (
list(range(axis)) +
[None] +
list(range(axis+1, indices.ndim))
)
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr.shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
fancy_index = tuple(fancy_index)
return arr[fancy_index]
# remove when https://github.com/joblib/joblib/issues/1071 is fixed
def delayed(function):
"""Decorator used to capture the arguments of a function."""
@functools.wraps(function)
def delayed_function(*args, **kwargs):
return _FuncWrapper(function), args, kwargs
return delayed_function
class _FuncWrapper:
""""Load the global configuration before calling the function."""
def __init__(self, function):
self.function = function
self.config = get_config()
update_wrapper(self, self.function)
def __call__(self, *args, **kwargs):
with config_context(**self.config):
return self.function(*args, **kwargs)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implements a simplified linspace function as of numpy verion >= 1.16.
As of numpy 1.16, the arguments start and stop can be array-like and
there is an optional argument `axis`.
For simplicity, we only allow 1d array-like to be passed to start and stop.
See: https://github.com/numpy/numpy/pull/12388 and numpy 1.16 release
notes about start and stop arrays for linspace logspace and geomspace.
Returns
-------
out : ndarray of shape (num, n_start) or (num,)
The output array with `n_start=start.shape[0]` columns.
"""
if np_version < parse_version('1.16'):
start = np.asanyarray(start) * 1.0
stop = np.asanyarray(stop) * 1.0
dt = np.result_type(start, stop, float(num))
if dtype is None:
dtype = dt
if start.ndim == 0 == stop.ndim:
return np.linspace(start=start, stop=stop, num=num,
endpoint=endpoint, retstep=retstep, dtype=dtype)
if start.ndim != 1 or stop.ndim != 1 or start.shape != stop.shape:
raise ValueError("start and stop must be 1d array-like of same"
" shape.")
n_start = start.shape[0]
out = np.empty((num, n_start), dtype=dtype)
step = np.empty(n_start, dtype=np.float)
for i in range(n_start):
out[:, i], step[i] = np.linspace(start=start[i], stop=stop[i],
num=num, endpoint=endpoint,
retstep=True, dtype=dtype)
if axis != 0:
out = np.moveaxis(out, 0, axis)
if retstep:
return out, step
else:
return out
else:
return np.linspace(start=start, stop=stop, num=num, endpoint=endpoint,
retstep=retstep, dtype=dtype, axis=axis)
| [
"numpy.moveaxis",
"numpy.empty",
"scipy.sparse.issparse",
"numpy.asanyarray",
"functools.update_wrapper",
"numpy.arange",
"numpy.linspace",
"functools.wraps",
"numpy.take_along_axis",
"numpy.issubdtype"
] | [((6125, 6150), 'functools.wraps', 'functools.wraps', (['function'], {}), '(function)\n', (6140, 6150), False, 'import functools\n'), ((4886, 4941), 'numpy.take_along_axis', 'np.take_along_axis', ([], {'arr': 'arr', 'indices': 'indices', 'axis': 'axis'}), '(arr=arr, indices=indices, axis=axis)\n', (4904, 4941), True, 'import numpy as np\n'), ((6476, 6511), 'functools.update_wrapper', 'update_wrapper', (['self', 'self.function'], {}), '(self, self.function)\n', (6490, 6511), False, 'from functools import update_wrapper\n'), ((7973, 8010), 'numpy.empty', 'np.empty', (['(num, n_start)'], {'dtype': 'dtype'}), '((num, n_start), dtype=dtype)\n', (7981, 8010), True, 'import numpy as np\n'), ((8026, 8059), 'numpy.empty', 'np.empty', (['n_start'], {'dtype': 'np.float'}), '(n_start, dtype=np.float)\n', (8034, 8059), True, 'import numpy as np\n'), ((8491, 8600), 'numpy.linspace', 'np.linspace', ([], {'start': 'start', 'stop': 'stop', 'num': 'num', 'endpoint': 'endpoint', 'retstep': 'retstep', 'dtype': 'dtype', 'axis': 'axis'}), '(start=start, stop=stop, num=num, endpoint=endpoint, retstep=\n retstep, dtype=dtype, axis=axis)\n', (8502, 8600), True, 'import numpy as np\n'), ((1413, 1427), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (1424, 1427), True, 'import scipy.sparse as sp\n'), ((5025, 5062), 'numpy.issubdtype', 'np.issubdtype', (['indices.dtype', 'np.intp'], {}), '(indices.dtype, np.intp)\n', (5038, 5062), True, 'import numpy as np\n'), ((7378, 7398), 'numpy.asanyarray', 'np.asanyarray', (['start'], {}), '(start)\n', (7391, 7398), True, 'import numpy as np\n'), ((7420, 7439), 'numpy.asanyarray', 'np.asanyarray', (['stop'], {}), '(stop)\n', (7433, 7439), True, 'import numpy as np\n'), ((7609, 7707), 'numpy.linspace', 'np.linspace', ([], {'start': 'start', 'stop': 'stop', 'num': 'num', 'endpoint': 'endpoint', 'retstep': 'retstep', 'dtype': 'dtype'}), '(start=start, stop=stop, num=num, endpoint=endpoint, retstep=\n retstep, dtype=dtype)\n', (7620, 7707), True, 'import numpy as np\n'), ((8126, 8226), 'numpy.linspace', 'np.linspace', ([], {'start': 'start[i]', 'stop': 'stop[i]', 'num': 'num', 'endpoint': 'endpoint', 'retstep': '(True)', 'dtype': 'dtype'}), '(start=start[i], stop=stop[i], num=num, endpoint=endpoint,\n retstep=True, dtype=dtype)\n', (8137, 8226), True, 'import numpy as np\n'), ((8353, 8378), 'numpy.moveaxis', 'np.moveaxis', (['out', '(0)', 'axis'], {}), '(out, 0, axis)\n', (8364, 8378), True, 'import numpy as np\n'), ((5855, 5867), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5864, 5867), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 00:16:07 2020
@author: tranl
"""
import time, sys, math
import numpy as np
import pandas as pd
from tqdm import tqdm
from binancepy import MarketData
from indicators import Bbands, average_true_range
from utility import timestr, print_
###TRADING RULES
QUANTPRE = { 'BTCUSDT': 3, 'ETHUSDT': 3, 'BCHUSDT': 2, 'XRPUSDT': 1, 'EOSUSDT': 1, 'LTCUSDT': 3, \
'TRXUSDT': 0, 'ETCUSDT': 2, 'LINKUSDT': 2, 'XLMUSDT': 0, 'ADAUSDT': 0, 'XMRUSDT': 3, \
'DASHUSDT': 3, 'ZECUSDT': 3, 'XTZUSDT': 1, 'BNBUSDT': 2, 'ATOMUSDT': 2, 'ONTUSDT': 1, \
'IOTAUSDT': 1, 'BATUSDT': 1, 'VETUSDT': 0, 'NEOUSDT': 2, 'QTUMUSDT': 1, 'IOSTUSDT': 0 }
PRICEPRE = { 'BTCUSDT': 2, 'ETHUSDT': 2, 'BCHUSDT': 2, 'XRPUSDT': 4, 'EOSUSDT': 3, 'LTCUSDT': 2, \
'TRXUSDT': 5, 'ETCUSDT':3, 'LINKUSDT': 3 , 'XLMUSDT': 5, 'ADAUSDT': 5, 'XMRUSDT': 2, \
'DASHUSDT': 2, 'ZECUSDT': 2, 'XTZUSDT': 3, 'BNBUSDT': 3, 'ATOMUSDT': 3, 'ONTUSDT': 4, \
'IOTAUSDT': 4, 'BATUSDT': 4, 'VETUSDT': 6, 'NEOUSDT': 3, 'QTUMUSDT': 3, 'IOSTUSDT': 6 }
SIDE = {'BUY': 1.0, 'SELL': -1.0}
min_in_ms = int(60*1000)
sec_in_ms = 1000
###%%%
class Portfolio:
def __init__( self,
client,
tradeIns = []):
'''
Portfolio class
'''
self.client = client
self.tradeIns = tradeIns.copy()
self.orderSize = 0
self.equityDist = {'BUY': 0, 'SELL': 0}
self.locks = { 'BUY': [], 'SELL': []}
def equity_distribution(self, longPct=0.5, shortPct=0.5, currency='USDT', orderPct=0.1):
'''
Retrun number of buy/sell orders with currenty equity
longPct : percentage of equity assigned for buying
shortPct : percentage of equity assigned for selling
orderPct : percentage of equity for a single order
'''
balance = self.client.balance()
equity, available = 0, 0
for b in balance:
if b['asset']==currency:
equity, available = float(b['balance']), float(b['withdrawAvailable'])
break
long_equity = longPct*equity
short_equity = shortPct*equity
info = pd.DataFrame(self.client.position_info())
short_info = info[info['positionAmt'].astype(float) < 0]
long_info = info[info['positionAmt'].astype(float) > 0]
short_position = abs(short_info['positionAmt'].astype(float) @ short_info['entryPrice'].astype(float))
long_position = abs(long_info['positionAmt'].astype(float) @ long_info['entryPrice'].astype(float))
self.orderSize = round(orderPct*equity, 2)
long_order = int((long_equity - long_position)/self.orderSize)
short_order = int((short_equity - short_position)/self.orderSize)
self.equityDist = {'BUY': long_order, 'SELL': short_order}
return long_order, short_order
def position_locks(self, prelocks={ 'BUY': [], 'SELL': []}):
'''
Check for open positions and return a tradable instruments
'''
info = self.client.position_info()
self.locks = prelocks
for pos in info:
amt = float(pos['positionAmt'])
if amt < 0 and not pos['symbol'] in self.locks['SELL']: self.locks['SELL'].append(pos['symbol'])
elif amt > 0 and not pos['symbol'] in self.locks['BUY']: self.locks['BUY'].append(pos['symbol'])
drop_out = set(self.locks['SELL']).intersection(self.locks['BUY'])
for s in drop_out: self.tradeIns.remove(s)
return self.tradeIns
###%%%
class TradingModel:
def __init__( self,
symbol: str,
testnet: bool,
modelType: str,
marketData,
pdObserve: int,
pdEstimate: int,
features: dict = None,
inputData = None,
orderSize = 1.0, #USDT
breath: float = 0.01/100):
'''
Trading Model class
'''
self.symbol = symbol
self.testnet = testnet
self.modelType = modelType
self.marketData = marketData
self.pdObserve = pdObserve
self.pdEstimate = pdEstimate
self.inputData = inputData
self.timeLimit = int(self.pdObserve*10)
self.orderSize = orderSize
self.breath = breath
self.signalLock = []
def add_signal_lock(self, slock=None):
'''
Add a signal to lock positions i.e. abandon BUY/SELL the instrument
'''
if (slock is not None) and (not slock in self.signalLock):
self.signalLock.append(slock)
def remove_signal_lock(self, slock=None):
'''
Remove a signal from lock positions i.e. allows BUY/SELL the instrument
'''
if (slock is not None) and (slock in self.signalLock):
self.signalLock.remove(slock)
def build_initial_input(self, period=180):
'''
Download and store historical data
'''
if self.modelType=='bollinger':
min_in_candle = 1
num_klns = period
t_server = self.marketData.server_time()['serverTime']
t_start = t_server - num_klns*min_in_candle*60*1000
df = klns_to_df(self.marketData.candles_data(interval='1m', startTime=t_start, limit=num_klns), ['_t', '_o', '_h', '_l', '_c', '_v'])
if self.inputData is None:
self.inputData = df
else:
df = df[df['_t'] > self.inputData['_t'].iloc[-1]]
self.inputData = self.inputData.append(df, ignore_index=True)
return self.inputData
def get_last_signal(self, dataObserve=None):
'''
Process the lastest data for a potential singal
'''
if self.modelType=='bollinger':
_data = dataObserve[dataObserve['_t'] > self.inputData['_t'].iloc[-1]]
_data = self.inputData.append(_data, ignore_index=True)
_, bb_up, bb_down = Bbands(_data['_c'], window=self.pdEstimate, numsd=2.5)
# up cross
crit1 = _data['_c'].shift(1) < bb_up.shift(1)
crit2 = _data['_c'] > bb_up
up_cross = _data[crit1 & crit2]
# down cross
crit1 = _data['_c'].shift(1) > bb_down.shift(1)
crit2 = _data['_c'] < bb_down
dn_cross = _data[crit1 & crit2]
_data['side'] = np.zeros(_data.shape[0])
_data.loc[up_cross.index, 'side'] = -1.
_data.loc[dn_cross.index, 'side'] = 1.
_side = _data['side'].iloc[-1]
atr, _ = average_true_range(_data.copy(), period=self.pdEstimate, alpha=0.3, highlow=False)
if _side == 1. and not 'BUY' in self.signalLock:
return {'side': 'BUY', 'positionSide': 'LONG', '_t': _data['_t'].iloc[-1], '_p': _data['_c'].iloc[-1], 'atr' : atr}
elif _side == -1. and not 'SELL' in self.signalLock:
return {'side': 'SELL', 'positionSide': 'SHORT', '_t': _data['_t'].iloc[-1], '_p': _data['_c'].iloc[-1], 'atr' : atr}
return None
#%%%%
class Signal:
def __init__(self,
symbol: str,
side: str,
size: float,
orderType: str,
positionSide: str = 'BOTH',
price: float = None,
startTime: int = time.time()*1000,
expTime: float = (time.time()+60)*1000,
stopLoss: float = None,
takeProfit: float = None,
timeLimit: int = None, #minutes
timeInForce: float = None):
'''
Signal class to monitor price movements
To change currency pair -> symbol = 'ethusdt'
To change side -> side = 'BUY'/'SELL'
To change order size -> size = float (dollar amount)
To change order type -> orderType = 'MARKET'/'LIMIT'
To change price -> price = float (required for 'LIMIT' order type)
stopLoss, takeProfit -- dollar amount
To change time in force -> timeInForce = 'GTC'/'IOC'/'FOK' (reuired for 'LIMIT' order type)
'''
self.symbol = symbol
self.side = side #BUY, SELL
self.positionSide = positionSide #LONG, SHORT
self.orderType = orderType #LIMIT, MARKET, STOP, TAKE_PROFIT
# predefined vars
self.price = float(price)
if size < self.price*10**(-QUANTPRE[symbol]):
size = self.price*10**(-QUANTPRE[symbol])*1.01
self.size = float(size) #USDT
self.quantity = round(self.size/self.price, QUANTPRE[self.symbol])
self.startTime = int(startTime)
self.expTime = expTime
# 3 exit barriers
if stopLoss is not None: self.stopLoss = round(float(stopLoss), 4)
else: self.stopLoss = None
if takeProfit is not None: self.takeProfit = round(float(takeProfit), 4)
else: self.takeProfit = None
if timeLimit is not None: self.timeLimit = int(timeLimit*sec_in_ms) # miliseconds
else: self.timeLimit = None
self.timeInForce = timeInForce
self.status = 'WAITING' #'ORDERED' #'ACTIVE' #'CNT_ORDERED' #'CLOSED' # 'EXPIRED'
self.limitPrice, self.orderTime = None, None
self.excPrice, self.excTime = None, None
self.cntlimitPrice, self.cntTime, self.cntType = None, None, None
self.clsPrice, self.clsTime = None, None
self.orderId = None
self.cntorderId = None
self.pricePath = []
self.exitSign = None
'''
Function to check and set STATUS of the signals :
- WAITING
- ORDERED
- ACTIVE
- CNT_ORDERED
- CLOSED
- EXPIRED
'''
def is_waiting(self):
return bool(self.status == 'WAITING')
def set_waiting(self):
self.status = 'WAITING'
def is_ordered(self):
return bool(self.status == 'ORDERED')
def set_ordered(self, orderId, orderTime=None, limitPrice=None):
self.status = 'ORDERED'
self.orderId = int(orderId)
self.orderTime, self.limitPrice = orderTime, limitPrice
def is_active(self):
return bool(self.status == 'ACTIVE')
def set_active(self, excTime=time.time()*1000, excPrice=None, excQty: float = None):
self.excPrice = float(excPrice)
self.excTime = int(excTime)
self.quantity = round(float(excQty), QUANTPRE[self.symbol])
self.status = 'ACTIVE'
def is_cnt_ordered(self):
return bool(self.status == 'CNT_ORDERED')
def set_cnt_ordered(self, cntorderId, cntType=None, cntTime=None, cntlimitPrice=None):
self.status = 'CNT_ORDERED'
self.cntorderId = int(cntorderId)
self.cntType, self.cntTime, self.cntlimitPrice = cntType, cntTime, cntlimitPrice
def is_closed(self):
return bool(self.status == 'CLOSED')
def set_closed(self, clsTime=time.time()*1000, clsPrice=None):
self.clsTime = int(clsTime)
if clsPrice is not None: self.clsPrice = float(clsPrice)
else: self.clsPrice = None
self.status = 'CLOSED'
def is_expired(self):
return bool(self.status == 'EXPIRED')
def set_expired(self):
self.status = 'EXPIRED'
def get_quantity(self):
'''
Return quantity
'''
return self.quantity
def counter_order(self):
'''
Return counter (close) order with same size but opposite side
'''
if self.side=='BUY': side = 'SELL'
else: side = 'BUY'
if self.positionSide == 'LONG': posSide = 'SHORT'
elif self.positionSide =='SHORT': posSide = 'LONG'
else: posSide = 'BOTH'
counter = {'side': side, 'positionSide': posSide, 'type': self.orderType, \
'amt': self.get_quantity(),'TIF': self.timeInForce}
return counter
def path_update(self, lastPrice, lastTime):
'''
Update last traded prices to pricePath
'''
self.pricePath.append({'timestamp': int(lastTime), 'price': float(lastPrice)})
def get_price_path(self):
'''
Return price movements since the entry
'''
return pd.DataFrame(self.pricePath)
def exit_triggers(self, lastTime=None, lastPrice=None, retrace=False):
'''
Return a exit signal upon 3 barrier triggers
'''
if not self.is_active() or len(self.pricePath)<=1:
return None, None
else:
exit_sign = None
if lastTime is None and lastPrice is None:
_t, _p = self.pricePath[-1]['timestamp'], self.pricePath[-1]['price']
pos = SIDE[self.side]*(_p - self.excPrice)
if self.takeProfit is not None and pos > self.takeProfit:
exit_sign = 'takeProfit'
if self.stopLoss is not None:
if retrace:
prices = pd.DataFrame(self.pricePath)
prices['pos'] = SIDE[self.side]*(prices['price'] - self.excPrice)
loss_idx = prices.idxmin(axis=0)['pos']
max_loss = prices.loc[loss_idx]['pos']
foundSL = (max_loss < -1.0*self.stopLoss) and (pos > -0.5*self.stopLoss)
else: foundSL = (pos < -1.0*self.stopLoss)
if foundSL: exit_sign = 'stopLoss'
if self.timeLimit is not None and _t - self.excTime >= self.timeLimit and pos > 0:
exit_sign = 'timeLimit'
self.exitSign = exit_sign
return exit_sign, pos
def __str__(self):
'''
Print out infomation of the signal
'''
s = 'Singal info: ' + self.symbol
gen_ = ' status:' + str(self.status) + ' side:' + str(self.side) + ' type:' + str(self.orderType) + ' quantity:' + str(self.get_quantity())
if self.is_waiting() or self.is_expired():
id_ = ' Id:None '
price_ = ' price:' + str(self.price) + ' time:' + timestr(self.startTime, end='s')
elif self.is_ordered():
id_ = ' Id:'+ str(self.orderId)
if self.orderType=='LIMIT':
price_ = ' price:' + str(self.limitPrice) + ' TIF:' + str(self.timeInForce) + ' time:' + timestr(self.startTime, end='s')
else: price_ = ' type:' + str(self.orderType) + ' time:' + timestr(self.orderTime, end='s')
elif self.is_active():
id_ = ' Id:'+ str(self.orderId)
if self.orderType=='LIMIT':
price_ = ' price:' + str(self.excPrice) + ' TIF:' + str(self.timeInForce) + ' time:' + timestr(self.excTime, end='s')
else: price_ = ' price:' + str(self.excPrice) + ' time:' + timestr(self.excTime, end='s')
elif self.is_cnt_ordered():
gen_ = ' status:' + str(self.status) + ' side:' + str(self.counter_order()['side']) + ' type:' + str(self.cntType) + ' quantity:' + str(self.get_quantity())
id_ = ' Id:'+ str(self.cntorderId)
if self.cntType=='LIMIT':
price_ = ' price:' + str(self.cntlimitPrice) + ' TIF:' + str(self.timeInForce) + ' time:' + timestr(self.cntTime, end='s')
else: price_ = ' type:' + str(self.cntType) + ' time:' + timestr(self.cntTime, end='s')
elif self.is_closed():
gen_ = ' status:' + str(self.status) + ' side:' + str(self.counter_order()['side']) + ' type:' + str(self.cntType) + ' quantity:' + str(self.get_quantity())
id_ = ' Id: ' + str(self.cntorderId)
price_ = ' price:' + str(self.clsPrice) + ' time:' + timestr(self.clsTime, end='s')
if self.stopLoss is None: sl_ = 'None'
else: sl_ = str(self.stopLoss)
if self.takeProfit is None: tp_ = 'None'
else: tp_ = str(self.takeProfit)
if self.timeLimit is None: tl_ = 'None'
else: tl_ = str(int(self.timeLimit/sec_in_ms))
exits_ = ' exits:[' + sl_ + ', ' + tp_ + ', ' + tl_ + ']'
s += id_ + gen_ + price_ + exits_
return s
###%%%
def klns_to_df(market_data, feats):
'''
Return a pd.DataFrame from candles data received from the exchange
'''
fts = list(str(f) for f in feats)
df_ = pd.DataFrame(market_data, columns = ['_t', '_o', '_h', '_l', '_c', '_v', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])
df_[['_o', '_h', '_l', '_c', '_v']] = df_[['_o', '_h', '_l', '_c', '_v']].astype(float)
return df_[fts]
| [
"pandas.DataFrame",
"indicators.Bbands",
"numpy.zeros",
"time.time",
"utility.timestr"
] | [((16587, 16737), 'pandas.DataFrame', 'pd.DataFrame', (['market_data'], {'columns': "['_t', '_o', '_h', '_l', '_c', '_v', 'close_time', 'quote_av', 'trades',\n 'tb_base_av', 'tb_quote_av', 'ignore']"}), "(market_data, columns=['_t', '_o', '_h', '_l', '_c', '_v',\n 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])\n", (16599, 16737), True, 'import pandas as pd\n'), ((12594, 12622), 'pandas.DataFrame', 'pd.DataFrame', (['self.pricePath'], {}), '(self.pricePath)\n', (12606, 12622), True, 'import pandas as pd\n'), ((6163, 6217), 'indicators.Bbands', 'Bbands', (["_data['_c']"], {'window': 'self.pdEstimate', 'numsd': '(2.5)'}), "(_data['_c'], window=self.pdEstimate, numsd=2.5)\n", (6169, 6217), False, 'from indicators import Bbands, average_true_range\n'), ((6583, 6607), 'numpy.zeros', 'np.zeros', (['_data.shape[0]'], {}), '(_data.shape[0])\n', (6591, 6607), True, 'import numpy as np\n'), ((7558, 7569), 'time.time', 'time.time', ([], {}), '()\n', (7567, 7569), False, 'import time, sys, math\n'), ((10595, 10606), 'time.time', 'time.time', ([], {}), '()\n', (10604, 10606), False, 'import time, sys, math\n'), ((11287, 11298), 'time.time', 'time.time', ([], {}), '()\n', (11296, 11298), False, 'import time, sys, math\n'), ((7611, 7622), 'time.time', 'time.time', ([], {}), '()\n', (7620, 7622), False, 'import time, sys, math\n'), ((14387, 14419), 'utility.timestr', 'timestr', (['self.startTime'], {'end': '"""s"""'}), "(self.startTime, end='s')\n", (14394, 14419), False, 'from utility import timestr, print_\n'), ((13317, 13345), 'pandas.DataFrame', 'pd.DataFrame', (['self.pricePath'], {}), '(self.pricePath)\n', (13329, 13345), True, 'import pandas as pd\n'), ((14641, 14673), 'utility.timestr', 'timestr', (['self.startTime'], {'end': '"""s"""'}), "(self.startTime, end='s')\n", (14648, 14673), False, 'from utility import timestr, print_\n'), ((14745, 14777), 'utility.timestr', 'timestr', (['self.orderTime'], {'end': '"""s"""'}), "(self.orderTime, end='s')\n", (14752, 14777), False, 'from utility import timestr, print_\n'), ((14996, 15026), 'utility.timestr', 'timestr', (['self.excTime'], {'end': '"""s"""'}), "(self.excTime, end='s')\n", (15003, 15026), False, 'from utility import timestr, print_\n'), ((15098, 15128), 'utility.timestr', 'timestr', (['self.excTime'], {'end': '"""s"""'}), "(self.excTime, end='s')\n", (15105, 15128), False, 'from utility import timestr, print_\n'), ((15527, 15557), 'utility.timestr', 'timestr', (['self.cntTime'], {'end': '"""s"""'}), "(self.cntTime, end='s')\n", (15534, 15557), False, 'from utility import timestr, print_\n'), ((15627, 15657), 'utility.timestr', 'timestr', (['self.cntTime'], {'end': '"""s"""'}), "(self.cntTime, end='s')\n", (15634, 15657), False, 'from utility import timestr, print_\n'), ((15972, 16002), 'utility.timestr', 'timestr', (['self.clsTime'], {'end': '"""s"""'}), "(self.clsTime, end='s')\n", (15979, 16002), False, 'from utility import timestr, print_\n')] |
import numpy as np
from . import base
class ClassicMLP(object):
def __init__(self, num_inputs, num_hidden, num_outputs):
# initialize layers and activations
self.layer_hidden = base.BiasLayer(num_neurons=num_hidden,
num_inputs=num_inputs)
self.activ_hidden = base.SigmoidActivation()
self.layer_output = base.BiasLayer(num_neurons=num_outputs,
num_inputs=num_hidden)
self.activ_output = base.SigmoidActivation()
# set error function
self.error_func = base.sq_error
def evaluate(self, input_):
z_hidden = self.layer_hidden.feed_forward(input_)
y_hidden = self.activ_hidden.feed_forward(z_hidden)
z_output = self.layer_output.feed_forward(y_hidden)
y_output = self.activ_output.feed_forward(z_output)
return y_output
def get_weight_errors(self, input_, expected_output):
y_error_output = self.error_func(expected_output,
self.evaluate(input_))
# back propagate output layer
z_error_output = self.activ_output.back_propagate(y_error_output)
x_error_output, wcorr_output = self.layer_output.back_propagate(
z_error_output)
# back propagate hidden layer
z_error_hidden = self.activ_hidden.back_propagate(x_error_output)
x_error_hidden, wcorr_hidden = self.layer_hidden.back_propagate(
z_error_hidden)
return (wcorr_output, wcorr_hidden)
def train_online(self, input_, expected_output, learning_rate):
# get weight corrections by evaluation and back propagation
wcorr_output, wcorr_hidden = self.get_weight_errors(input_,
expected_output)
self.layer_output.correct_weights(acc_wcorr_output,
learning_rate=learning_rate)
self.layer_hidden.correct_weights(acc_wcorr_hidden,
learning_rate=learning_rate)
# return training error
return np.mean(np.abs(expected_output - self.evaluate(input_)))
def train_batch(self, inputs, expected_outputs, learning_rate):
acc_wcorr_output = np.zeros_like(self.layer_output.w)
acc_wcorr_hidden = np.zeros_like(self.layer_hidden.w)
for input_, expected_output in zip(inputs, expected_outputs):
# get weight errors by evaluation and back propagation
wcorr_output, wcorr_hidden = self.get_weight_errors(
input_, expected_output)
# accumulate weight errors
acc_wcorr_output += wcorr_output
acc_wcorr_hidden += wcorr_hidden
self.layer_output.correct_weights(acc_wcorr_output,
learning_rate=learning_rate)
self.layer_hidden.correct_weights(acc_wcorr_hidden,
learning_rate=learning_rate)
return np.mean(np.abs(expected_output - self.evaluate(input_)))
class SoftmaxMLP(ClassicMLP):
def __init__(self, num_inputs, num_hidden, num_outputs):
# initialize layers and activations
self.layer_hidden = base.BiasLayer(num_neurons=num_hidden,
num_inputs=num_inputs)
self.activ_hidden = base.SigmoidActivation()
self.layer_output = base.BiasLayer(num_neurons=num_outputs,
num_inputs=num_hidden)
self.activ_output = base.SoftmaxActivation()
# set error function
self.error_func = base.ce_softmax_error | [
"numpy.zeros_like"
] | [((2367, 2401), 'numpy.zeros_like', 'np.zeros_like', (['self.layer_output.w'], {}), '(self.layer_output.w)\n', (2380, 2401), True, 'import numpy as np\n'), ((2430, 2464), 'numpy.zeros_like', 'np.zeros_like', (['self.layer_hidden.w'], {}), '(self.layer_hidden.w)\n', (2443, 2464), True, 'import numpy as np\n')] |
'''
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
'''
import os
import datetime
import argparse
import numpy as np
import pandas as pd
import torch
torch.backends.cudnn.deterministic = True
import utils
parser = argparse.ArgumentParser(description='X-ray embedding script')
parser.add_argument('--model', default='densenet',
choices=["histogram", "histogram-nozeros", "xrv", "covidnet", "densenet"],
help='Type of embedding to create'
)
parser.add_argument('--mask', default='unmasked',
choices=(
'unmasked',
'masked'
),
help='Choose between using unmasked or masked/equalized inputs'
)
parser.add_argument('--input', type=str, required=True, help='Either "covidx" to use the covidx dataset, or a path to a "metadata_preprocessed.csv" file')
parser.add_argument('--name', type=str, required=True, help='Name of the dataset, this is used as the first field in the output filename')
parser.add_argument('--output_dir', type=str, default="datasets/embeddings/", help='Path to an empty directory where outputs will be saved. This directory will be created if it does not exist.')
parser.add_argument('--overwrite', action='store_true', default=False, help='Ignore checking whether the output file already exists')
parser.add_argument('--gpu', type=int, default=0, help='ID of the GPU to run on.')
args = parser.parse_args()
def run_covidnet_model(sess, image_tensor, pred_tensor, images, global_max_pool=False, embedding_size=2048, batch_size=128):
num_samples = images.shape[0]
image_embeddings = np.zeros((num_samples, embedding_size), dtype=np.float32)
for i in range(0, num_samples, batch_size):
image_batch = images[i:i+batch_size]
out = sess.run(pred_tensor, feed_dict={image_tensor: image_batch})
if global_max_pool:
out = np.maximum(out, axis=(1,2))
else:
out = np.mean(out, axis=(1,2))
image_embeddings[i:i+batch_size] = out
return image_embeddings
def main():
print("Starting x-ray embedding script at %s" % (str(datetime.datetime.now())))
## Ensure files aren't deleted and output directory exists
output_fn = os.path.join(
args.output_dir,
f"{args.name}_{args.mask}_{args.model}.npy"
)
if os.path.exists(output_fn):
if args.overwrite:
print("WARNING: The output file already exists, but we are deleting that and moving on.")
else:
print("WARNING: The output file already exists and `--overwrite` was not specified, exiting...")
return
if os.path.exists(args.output_dir):
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
## Load imagery
if args.input == "covidx":
if args.mask == "unmasked":
images = utils.get_raw_covidx_images(masked=False)
elif args.mask == "masked":
images = utils.get_raw_covidx_images(masked=True)
images = utils.transform_to_equalized(images)
else:
df = pd.read_csv(args.input)
if args.mask == "unmasked":
images = utils.get_images(df["unmasked_image_path"].values)
elif args.mask == "masked":
images = utils.get_images(df["masked_image_path"].values)
images = utils.transform_to_equalized(images)
## Embed imagery
if args.model == "covidnet":
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" if args.gpu is None else str(args.gpu)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
sess = tf.Session()
tf.get_default_graph()
saver = tf.train.import_meta_graph(os.path.join("data/pretrained_models/COVIDNet-CXR_Large/", "model.meta"))
saver.restore(sess, os.path.join("data/pretrained_models/COVIDNet-CXR_Large/", "model-8485"))
graph = tf.get_default_graph()
image_tensor = graph.get_tensor_by_name("input_1:0")
pred_tensor = graph.get_tensor_by_name("post_relu/Relu:0")
images = utils.transform_to_covidnet(images)
embeddings = run_covidnet_model(
sess, image_tensor, pred_tensor, images, global_max_pool=False,
)
elif args.model == "xrv":
device = torch.device('cuda:%d' % (args.gpu) if torch.cuda.is_available() else 'cpu')
xrv_model = utils.get_xrv_model(device)
images = utils.transform_to_xrv(images)
embeddings = utils.run_densenet_model(
xrv_model, device, images, global_max_pool=False, embedding_size=1024, batch_size=64
)
elif args.model == "densenet":
device = torch.device('cuda:%d' % (args.gpu) if torch.cuda.is_available() else 'cpu')
densenet_model = utils.get_densenet121(device)
images = utils.transform_to_standardized(images)
embeddings = utils.run_densenet_model(
densenet_model, device, images, global_max_pool=False, embedding_size=1024, batch_size=64
)
elif args.model == "histogram":
embeddings = utils.get_histogram_intensities(images)
elif args.model == "histogram-nozeros":
embeddings = utils.get_histogram_intensities(images, True)
## Write output
np.save(output_fn, embeddings)
if __name__ == "__main__":
main()
| [
"numpy.maximum",
"argparse.ArgumentParser",
"pandas.read_csv",
"utils.get_histogram_intensities",
"numpy.mean",
"utils.get_raw_covidx_images",
"tensorflow.get_default_graph",
"os.path.join",
"os.path.exists",
"utils.run_densenet_model",
"utils.transform_to_standardized",
"datetime.datetime.now... | [((259, 320), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""X-ray embedding script"""'}), "(description='X-ray embedding script')\n", (282, 320), False, 'import argparse\n'), ((1589, 1646), 'numpy.zeros', 'np.zeros', (['(num_samples, embedding_size)'], {'dtype': 'np.float32'}), '((num_samples, embedding_size), dtype=np.float32)\n', (1597, 1646), True, 'import numpy as np\n'), ((2200, 2274), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""{args.name}_{args.mask}_{args.model}.npy"""'], {}), "(args.output_dir, f'{args.name}_{args.mask}_{args.model}.npy')\n", (2212, 2274), False, 'import os\n'), ((2304, 2329), 'os.path.exists', 'os.path.exists', (['output_fn'], {}), '(output_fn)\n', (2318, 2329), False, 'import os\n'), ((2610, 2641), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (2624, 2641), False, 'import os\n'), ((5264, 5294), 'numpy.save', 'np.save', (['output_fn', 'embeddings'], {}), '(output_fn, embeddings)\n', (5271, 5294), True, 'import numpy as np\n'), ((2674, 2717), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (2685, 2717), False, 'import os\n'), ((3049, 3072), 'pandas.read_csv', 'pd.read_csv', (['args.input'], {}), '(args.input)\n', (3060, 3072), True, 'import pandas as pd\n'), ((3639, 3651), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3649, 3651), True, 'import tensorflow as tf\n'), ((3660, 3682), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3680, 3682), True, 'import tensorflow as tf\n'), ((3919, 3941), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3939, 3941), True, 'import tensorflow as tf\n'), ((4089, 4124), 'utils.transform_to_covidnet', 'utils.transform_to_covidnet', (['images'], {}), '(images)\n', (4116, 4124), False, 'import utils\n'), ((1862, 1890), 'numpy.maximum', 'np.maximum', (['out'], {'axis': '(1, 2)'}), '(out, axis=(1, 2))\n', (1872, 1890), True, 'import numpy as np\n'), ((1922, 1947), 'numpy.mean', 'np.mean', (['out'], {'axis': '(1, 2)'}), '(out, axis=(1, 2))\n', (1929, 1947), True, 'import numpy as np\n'), ((2828, 2869), 'utils.get_raw_covidx_images', 'utils.get_raw_covidx_images', ([], {'masked': '(False)'}), '(masked=False)\n', (2855, 2869), False, 'import utils\n'), ((3130, 3180), 'utils.get_images', 'utils.get_images', (["df['unmasked_image_path'].values"], {}), "(df['unmasked_image_path'].values)\n", (3146, 3180), False, 'import utils\n'), ((3726, 3798), 'os.path.join', 'os.path.join', (['"""data/pretrained_models/COVIDNet-CXR_Large/"""', '"""model.meta"""'], {}), "('data/pretrained_models/COVIDNet-CXR_Large/', 'model.meta')\n", (3738, 3798), False, 'import os\n'), ((3828, 3900), 'os.path.join', 'os.path.join', (['"""data/pretrained_models/COVIDNet-CXR_Large/"""', '"""model-8485"""'], {}), "('data/pretrained_models/COVIDNet-CXR_Large/', 'model-8485')\n", (3840, 3900), False, 'import os\n'), ((4397, 4424), 'utils.get_xrv_model', 'utils.get_xrv_model', (['device'], {}), '(device)\n', (4416, 4424), False, 'import utils\n'), ((4442, 4472), 'utils.transform_to_xrv', 'utils.transform_to_xrv', (['images'], {}), '(images)\n', (4464, 4472), False, 'import utils\n'), ((4494, 4608), 'utils.run_densenet_model', 'utils.run_densenet_model', (['xrv_model', 'device', 'images'], {'global_max_pool': '(False)', 'embedding_size': '(1024)', 'batch_size': '(64)'}), '(xrv_model, device, images, global_max_pool=False,\n embedding_size=1024, batch_size=64)\n', (4518, 4608), False, 'import utils\n'), ((2093, 2116), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2114, 2116), False, 'import datetime\n'), ((2927, 2967), 'utils.get_raw_covidx_images', 'utils.get_raw_covidx_images', ([], {'masked': '(True)'}), '(masked=True)\n', (2954, 2967), False, 'import utils\n'), ((2989, 3025), 'utils.transform_to_equalized', 'utils.transform_to_equalized', (['images'], {}), '(images)\n', (3017, 3025), False, 'import utils\n'), ((3238, 3286), 'utils.get_images', 'utils.get_images', (["df['masked_image_path'].values"], {}), "(df['masked_image_path'].values)\n", (3254, 3286), False, 'import utils\n'), ((3308, 3344), 'utils.transform_to_equalized', 'utils.transform_to_equalized', (['images'], {}), '(images)\n', (3336, 3344), False, 'import utils\n'), ((4782, 4811), 'utils.get_densenet121', 'utils.get_densenet121', (['device'], {}), '(device)\n', (4803, 4811), False, 'import utils\n'), ((4829, 4868), 'utils.transform_to_standardized', 'utils.transform_to_standardized', (['images'], {}), '(images)\n', (4860, 4868), False, 'import utils\n'), ((4890, 5010), 'utils.run_densenet_model', 'utils.run_densenet_model', (['densenet_model', 'device', 'images'], {'global_max_pool': '(False)', 'embedding_size': '(1024)', 'batch_size': '(64)'}), '(densenet_model, device, images, global_max_pool=\n False, embedding_size=1024, batch_size=64)\n', (4914, 5010), False, 'import utils\n'), ((4339, 4364), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4362, 4364), False, 'import torch\n'), ((5086, 5125), 'utils.get_histogram_intensities', 'utils.get_histogram_intensities', (['images'], {}), '(images)\n', (5117, 5125), False, 'import utils\n'), ((4719, 4744), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4742, 4744), False, 'import torch\n'), ((5192, 5237), 'utils.get_histogram_intensities', 'utils.get_histogram_intensities', (['images', '(True)'], {}), '(images, True)\n', (5223, 5237), False, 'import utils\n')] |
from typing import Tuple, List, Dict, TextIO
import pickle
import os
import copy
import sqlite3
from multiprocessing import Pool
import numpy as np
from functools import partial
from mrnet.core.mol_entry import MoleculeEntry
from mrnet.utils.visualization import (
visualize_molecule_entry,
visualize_molecule_count_histogram,
generate_latex_header,
generate_latex_footer,
latex_emit_molecule,
latex_emit_reaction,
visualize_molecules,
)
from mrnet.stochastic.serialize import rate
get_metadata = """
SELECT * FROM metadata;
"""
def get_reaction(n: int):
return (
"""
SELECT reactant_1,
reactant_2,
product_1,
product_2,
dG
FROM reactions_"""
+ str(n)
+ " WHERE reaction_id = ?;"
)
def update_rate(shard: int):
return (
"UPDATE reactions_"
+ str(shard)
+ """
SET rate = ?
WHERE reaction_id = ?;
"""
)
def does_reaction_exist(n):
return (
"SELECT reaction_id FROM reactions_" + str(n) + " WHERE reaction_string = ?1;"
)
def get_reaction_string(n: int):
return (
"""
SELECT reaction_string
FROM reactions_"""
+ str(n)
+ " WHERE reaction_id = ?1;"
)
def find_duplicate_reactions(
db_path: str,
shard_size: int,
number_of_shards: int,
number_of_reactions: int,
shard: int,
):
repeats = []
con = sqlite3.connect(db_path)
cur = con.cursor()
get_reaction_string_sql = get_reaction_string(shard)
does_reaction_exist_sql = []
for i in range(number_of_shards):
does_reaction_exist_sql.append(does_reaction_exist(i))
base_index = shard * shard_size
top_index = min(number_of_reactions, (shard + 1) * shard_size)
for index in range(base_index, top_index):
duplicate_indices = []
reaction_string = list(cur.execute(get_reaction_string_sql, (index,)))[0][0]
for sql in does_reaction_exist_sql:
for row in cur.execute(sql, (reaction_string,)):
duplicate_indices.append(row[0])
if len(duplicate_indices) != 1:
repeats.append(sorted(duplicate_indices))
return repeats
class NetworkUpdater:
"""
class to manage the state required for updating a sharded database.
This could easily be a single function, but i anticipate that we will
be adding more methods in the future.
"""
def __init__(
self, network_folder: str, number_of_threads=6 # used in duplicate checking
):
self.network_folder = network_folder
self.db_postfix = "/rn.sqlite"
self.connection = sqlite3.connect(self.network_folder + self.db_postfix)
self.number_of_threads = number_of_threads
cur = self.connection.cursor()
md = list(cur.execute(get_metadata))[0]
self.number_of_species = md[0]
self.number_of_reactions = md[1]
self.shard_size = md[2]
self.number_of_shards = md[3]
self.update_rates_sql = {}
self.get_reactions_sql = {}
for i in range(self.number_of_shards):
self.update_rates_sql[i] = update_rate(i)
self.get_reactions_sql[i] = get_reaction(i)
def update_rates(self, pairs: List[Tuple[int, float]]):
cur = self.connection.cursor()
for (index, r) in pairs:
shard = index // self.shard_size
cur.execute(self.update_rates_sql[shard], (r, index))
self.connection.commit()
def recompute_all_rates(
self, temperature, constant_barrier, commit_frequency=10000
):
cur = self.connection.cursor()
for index in range(self.number_of_reactions):
shard = index // self.shard_size
res = list(cur.execute(self.get_reactions_sql[shard], (int(index),)))[0]
dG = res[4]
new_rate = rate(dG, temperature, constant_barrier)
cur.execute(self.update_rates_sql[shard], (new_rate, index))
if index % commit_frequency == 0:
self.connection.commit()
self.connection.commit()
def find_duplicates(self):
f = partial(
find_duplicate_reactions,
self.network_folder + self.db_postfix,
self.shard_size,
self.number_of_shards,
self.number_of_reactions,
)
with Pool(self.number_of_threads) as p:
repeats_unordered = p.map(f, range(self.number_of_shards))
repeated = set()
for xs in repeats_unordered:
for x in xs:
repeated.add(tuple(sorted(x)))
return repeated
def set_duplicate_reaction_rates_to_zero(self):
repeats = self.find_duplicates()
update_list = []
for xs in repeats:
head = True
for x in xs:
if head:
head = False
else:
update_list.append((x, 0.0))
self.update_rates(update_list)
def collect_duplicate_pathways(pathways: List[List[int]]) -> Dict[frozenset, dict]:
pathway_dict: Dict[frozenset, dict] = {}
for pathway in pathways:
key = frozenset(pathway)
if key in pathway_dict:
pathway_dict[key]["frequency"] += 1
else:
pathway_dict[key] = {"pathway": pathway, "frequency": 1}
return pathway_dict
def update_state(state, reaction):
for species_index in reaction["reactants"]:
state[species_index] -= 1
for species_index in reaction["products"]:
state[species_index] += 1
class SimulationAnalyzer:
"""
A class to analyze the resutls of a set of MC runs
"""
def __init__(self, network_folder: str, mol_list: List[MoleculeEntry]):
initial_state_postfix = "/initial_state"
simulation_histories_postfix = "/simulation_histories"
database_postfix = "/rn.sqlite"
reports_postfix = "/reports"
self.connection = sqlite3.connect(network_folder + database_postfix)
cur = self.connection.cursor()
md = list(cur.execute(get_metadata))[0]
self.number_of_species = md[0]
self.number_of_reactions = md[1]
self.shard_size = md[2]
self.number_of_shards = md[3]
self.get_reactions_sql = {}
for i in range(self.number_of_shards):
self.get_reactions_sql[i] = get_reaction(i)
self.network_folder = network_folder
self.histories_folder = network_folder + simulation_histories_postfix
self.reports_folder = network_folder + reports_postfix
try:
os.mkdir(self.reports_folder)
except FileExistsError:
pass
with open(network_folder + initial_state_postfix, "r") as f:
initial_state_list = [int(c) for c in f.readlines()]
self.initial_state = np.array(initial_state_list, dtype=int)
self.mol_entries = {}
for entry in mol_list:
self.mol_entries[entry.parameters["ind"]] = entry
self.reaction_data: Dict[int, dict] = {}
self.reaction_pathways_dict: Dict[int, Dict[frozenset, dict]] = dict()
self.reaction_histories = list()
self.time_histories = list()
self.observed_reactions: Dict[int, int] = {}
histories_contents = sorted(os.listdir(self.histories_folder))
reaction_histories_contents = [
x for x in histories_contents if x.startswith("reactions")
]
time_histories_contents = [
x for x in histories_contents if x.startswith("times")
]
reaction_seeds = [x.split("_")[1] for x in reaction_histories_contents]
time_seeds = [x.split("_")[1] for x in reaction_histories_contents]
if reaction_seeds != time_seeds:
raise ValueError("Reactions and times not from same set of initial seeds!")
for filename in reaction_histories_contents:
reaction_history = list()
with open(self.histories_folder + "/" + filename) as f:
for line in f:
reaction_history.append(int(line.strip()))
self.reaction_histories.append(np.array(reaction_history))
for filename in time_histories_contents:
time_history = list()
with open(self.histories_folder + "/" + filename) as f:
for line in f:
time_history.append(float(line.strip()))
self.time_histories.append(np.array(time_history))
self.number_simulations = len(self.reaction_histories)
visualize_molecules(
self.reports_folder + "/molecule_diagrams", self.mol_entries
)
def index_to_reaction(self, reaction_index):
shard = reaction_index // self.shard_size
if reaction_index in self.reaction_data:
return self.reaction_data[reaction_index]
else:
print("fetching data for reaction", reaction_index)
cur = self.connection.cursor()
# reaction_index is type numpy.int64 which sqlite doesn't like.
res = list(
cur.execute(self.get_reactions_sql[shard], (int(reaction_index),))
)[0]
reaction = {}
reaction["reactants"] = [i for i in res[0:2] if i >= 0]
reaction["products"] = [i for i in res[2:4] if i >= 0]
reaction["dG"] = res[4]
self.reaction_data[reaction_index] = reaction
return reaction
def extract_species_consumption_info(
self, target_species_index: int
) -> Tuple[Dict[int, int], Dict[int, int], List[int]]:
"""
given a target molecule, return all the ways the molecule was
created, all the ways the molecule was consumed and the ending
frequencies of the molecule for each simulation.
"""
# if a reaction has the target species twice as a reactant or product
# it will be counted twice
producing_reactions = {}
consuming_reactions = {}
final_counts = []
for reaction_history in self.reaction_histories:
running_count = self.initial_state[target_species_index]
for reaction_index in reaction_history:
reaction = self.index_to_reaction(reaction_index)
for reactant_index in reaction["reactants"]:
if target_species_index == reactant_index:
running_count -= 1
if reaction_index not in consuming_reactions:
consuming_reactions[reaction_index] = 1
else:
consuming_reactions[reaction_index] += 1
for product_index in reaction["products"]:
if target_species_index == product_index:
running_count += 1
if reaction_index not in producing_reactions:
producing_reactions[reaction_index] = 1
else:
producing_reactions[reaction_index] += 1
final_counts.append(running_count)
return producing_reactions, consuming_reactions, final_counts
def extract_reaction_pathways(self, target_species_index: int):
"""
given a reaction history and a target molecule, find the
first reaction which produced the target molecule (if any).
Apply that reaction to the initial state to produce a partial
state array. Missing reactants have negative values in the
partial state array. Now loop through the reaction history
to resolve the missing reactants.
"""
print("extracting pathways to", target_species_index)
reaction_pathway_list = []
for reaction_history_num, reaction_history in enumerate(
self.reaction_histories
):
# current approach is a hack. Sometimes it can fall into an inifite loop
# if pathway gets too long, we assume that this has happened.
infinite_loop = False
print("scanning history", reaction_history_num, "for pathway")
# -1 if target wasn't produced
# index of reaction if target was produced
reaction_producing_target_index = -1
for reaction_index in reaction_history:
reaction = self.index_to_reaction(reaction_index)
if target_species_index in reaction["products"]:
reaction_producing_target_index = reaction_index
break
if reaction_producing_target_index == -1:
continue
else:
pathway = [reaction_producing_target_index]
partial_state = np.copy(self.initial_state)
final_reaction = self.index_to_reaction(pathway[0])
update_state(partial_state, final_reaction)
negative_species = list(np.where(partial_state < 0)[0])
while len(negative_species) != 0:
if len(pathway) > 1000:
infinite_loop = True
break
for species_index in negative_species:
for reaction_index in reaction_history:
reaction = self.index_to_reaction(reaction_index)
if species_index in reaction["products"]:
update_state(partial_state, reaction)
pathway.insert(0, reaction_index)
break
negative_species = list(np.where(partial_state < 0)[0])
if not infinite_loop:
reaction_pathway_list.append(pathway)
reaction_pathway_dict = collect_duplicate_pathways(reaction_pathway_list)
self.reaction_pathways_dict[target_species_index] = reaction_pathway_dict
def generate_consumption_report(self, mol_entry: MoleculeEntry):
target_species_index = mol_entry.parameters["ind"]
(
producing_reactions,
consuming_reactions,
final_counts,
) = self.extract_species_consumption_info(target_species_index)
histogram_file = (
self.reports_folder
+ "/final_count_histogram_"
+ str(target_species_index)
+ ".pdf"
)
visualize_molecule_count_histogram(final_counts, histogram_file)
with open(
self.reports_folder
+ "/consumption_report_"
+ str(target_species_index)
+ ".tex",
"w",
) as f:
generate_latex_header(f)
f.write("consumption report for")
latex_emit_molecule(f, target_species_index)
f.write("\n\n")
f.write("molecule frequency at end of simulations")
f.write(
"\\raisebox{-.5\\height}{"
+ "\\includegraphics[scale=0.5]{"
+ "./final_count_histogram_"
+ str(target_species_index)
+ ".pdf"
+ "}}\n\n"
)
f.write("producing reactions:\n\n\n")
for reaction_index, frequency in sorted(
producing_reactions.items(), key=lambda item: -item[1]
):
f.write(str(frequency) + " occurrences:\n")
self.latex_emit_reaction(f, reaction_index)
f.write("consuming reactions:\n\n\n")
for reaction_index, frequency in sorted(
consuming_reactions.items(), key=lambda item: -item[1]
):
f.write(str(frequency) + " occurrences:\n")
self.latex_emit_reaction(f, reaction_index)
generate_latex_footer(f)
def generate_pathway_report(self, mol_entry: MoleculeEntry, min_frequency: int):
target_species_index = mol_entry.parameters["ind"]
if target_species_index not in self.reaction_pathways_dict:
self.extract_reaction_pathways(target_species_index)
with open(
self.reports_folder
+ "/pathway_report_"
+ str(target_species_index)
+ ".tex",
"w",
) as f:
pathways = self.reaction_pathways_dict[target_species_index]
generate_latex_header(f)
f.write("pathway report for\n\n")
latex_emit_molecule(f, target_species_index)
self.latex_emit_initial_state(f)
f.write("\\newpage\n\n\n")
for _, unique_pathway in sorted(
pathways.items(), key=lambda item: -item[1]["frequency"]
):
frequency = unique_pathway["frequency"]
if frequency > min_frequency:
f.write(str(frequency) + " occurrences:\n")
for reaction_index in unique_pathway["pathway"]:
self.latex_emit_reaction(f, reaction_index)
f.write("\\newpage\n")
else:
break
generate_latex_footer(f)
def latex_emit_initial_state(self, f: TextIO):
f.write("\n\n initial state:\n\n\n")
for species_index in range(self.number_of_species):
num = self.initial_state[species_index]
if num > 0:
f.write(str(num) + " molecules of ")
latex_emit_molecule(f, species_index)
f.write("\n\n")
def latex_emit_reaction(self, f: TextIO, reaction_index: int):
reaction = self.index_to_reaction(reaction_index)
latex_emit_reaction(f, reaction, reaction_index)
def generate_simulation_history_report(self, history_num):
with open(
self.reports_folder
+ "/simulation_history_report_"
+ str(history_num)
+ ".tex",
"w",
) as f:
generate_latex_header(f)
f.write("simulation " + str(history_num))
f.write("\n\n\n")
for reaction_index in self.reaction_histories[history_num]:
f.write("\n\n\n")
self.latex_emit_reaction(f, reaction_index)
generate_latex_footer(f)
def generate_list_of_all_reactions_report(self):
with open(
self.reports_folder + "/list_of_all_reactions.tex",
"w",
) as f:
generate_latex_header(f)
for reaction_index in range(self.number_of_reactions):
f.write("\n\n\n")
self.latex_emit_reaction(f, reaction_index)
generate_latex_footer(f)
def generate_list_of_all_species_report(self):
with open(
self.reports_folder + "/list_of_all_species.tex",
"w",
) as f:
generate_latex_header(f)
for species_index in range(self.number_of_species):
f.write("\n\n\n")
latex_emit_molecule(f, species_index)
generate_latex_footer(f)
def compute_reaction_tally(self):
if len(self.observed_reactions) == 0:
for history in self.reaction_histories:
for reaction_index in history:
if reaction_index in self.observed_reactions:
self.observed_reactions[reaction_index] += 1
else:
self.observed_reactions[reaction_index] = 1
def frequently_occouring_reactions(self, number: int):
"""
return a list of the number most frequently occouring reactions
"""
self.compute_reaction_tally()
return list(
map(
lambda pair: pair[0],
sorted(self.observed_reactions.items(), key=lambda pair: -pair[1])[
0:number
],
)
)
def generate_reaction_tally_report(self, cutoff: int):
self.compute_reaction_tally()
with open(self.reports_folder + "/reaction_tally_report.tex", "w") as f:
generate_latex_header(f)
f.write("reaction tally report")
f.write("\n\n\n")
for (reaction_index, number) in sorted(
self.observed_reactions.items(), key=lambda pair: -pair[1]
):
if number > cutoff:
f.write(str(number) + " occourances of:")
self.latex_emit_reaction(f, reaction_index)
generate_latex_footer(f)
def generate_time_dep_profiles(self, frequency: int = 1):
"""
Generate plottable time-dependent profiles of species and rxns from raw KMC output, obtain final states.
:param frequency (int): The system state will be sampled after every n
reactions, where n is the frequency. Default is 1, meaning that each
step will be sampled.
:return dict containing species profiles, reaction profiles, and final states from each simulation.
{species_profiles: [ {mol_ind1: [n(t0), n(t1)...], mol_ind2: [...], ... }, {...}, ... ]
reaction_profiles: [ {rxn_ind1: [n(t0), n(t1)...], rxn_ind2: [...], ...}, {...}, ...]
final_states: [ {mol_ind1: n1, mol_ind2: ..., ...}, {...}, ...],
snapshot_times: [[t0, t1, ...], [...], ...]}
"""
species_profiles = list()
reaction_profiles = list()
snapshot_times = list()
final_states = list()
for n_sim in range(self.number_simulations):
sim_time_history = self.time_histories[n_sim]
sim_rxn_history = self.reaction_histories[n_sim]
state = copy.deepcopy(self.initial_state)
rxn_counts = dict()
snaps = [0.0]
sim_species_profile = dict()
sim_rxn_profile = dict()
for ii, mol_ind in enumerate(state):
sim_species_profile[ii] = [self.initial_state[ii]]
for index in range(self.number_of_reactions):
sim_rxn_profile[index] = [0]
rxn_counts[index] = 0
total_iterations = len(sim_rxn_history)
for iter in range(total_iterations):
rxn_ind = sim_rxn_history[iter]
t = sim_time_history[iter]
rxn_counts[rxn_ind] += 1
update_state(state, self.index_to_reaction(rxn_ind))
for i, v in enumerate(state):
if v < 0:
raise ValueError(
"State invalid: simulation {}, negative specie {}, time {}, step {}, reaction {}".format(
n_sim, i, t, iter, rxn_ind
)
)
if iter + 1 % frequency == 0:
snaps.append(t)
for i, v in enumerate(state):
sim_species_profile[i].append(v)
for rxn, count in rxn_counts.items():
sim_rxn_profile[rxn].append(count)
# Always add the final state
if sim_time_history[-1] not in snaps:
snaps.append(sim_time_history[-1])
for i, v in enumerate(state):
sim_species_profile[i].append(v)
for rxn, count in rxn_counts.items():
sim_rxn_profile[rxn].append(count)
species_profiles.append(sim_species_profile)
reaction_profiles.append(sim_rxn_profile)
final_states.append(state)
snapshot_times.append(snaps)
return {
"species_profiles": species_profiles,
"reaction_profiles": reaction_profiles,
"final_states": final_states,
"snapshot_times": snapshot_times,
}
def final_state_analysis(self, final_states):
"""
Gather statistical analysis of the final states of simulation.
Args:
final_states: list of dicts of final states, as generated in generate_time_dep_profiles()
:return: list of tuples containing statistical data for each species, sorted from highest to low avg occurrence
"""
# For each molecule, compile an array of its final amounts
state_arrays = dict()
for iter, final_state in enumerate(final_states):
for index, amt in enumerate(final_state):
# Store the amount, and convert key from mol_ind to entry_id
if index not in state_arrays:
state_arrays[index] = np.zeros(self.number_simulations)
state_arrays[index][iter] = amt
analyzed_states = dict() # will contain statistical results of final states
for mol_entry, state_array in state_arrays.items():
analyzed_states[mol_entry] = (np.mean(state_array), np.std(state_array))
# Sort from highest avg final amount to lowest
sorted_analyzed_states = sorted(
[(entry_id, data_tup) for entry_id, data_tup in analyzed_states.items()],
key=lambda x: x[1][0],
reverse=True,
)
return sorted_analyzed_states
def rank_reaction_counts(self):
"""
Given reaction histories, identify the most commonly occurring reactions, on average.
Can rank generally, or by reactions of a certain type.
Args:
Returns:
reaction_data: list of reactions and their avg, std of times fired. Sorted by the average times fired.
[(rxn1, (avg, std)), (rxn2, (avg, std)) ... ]
"""
reaction_data = dict() # keeping record of each iteration
# Loop to count all reactions fired
for n_sim in range(self.number_simulations):
rxns_fired = set(self.reaction_histories[n_sim])
for rxn_ind in rxns_fired:
if rxn_ind not in reaction_data:
reaction_data[rxn_ind] = list()
reaction_data[rxn_ind].append(
np.sum(self.reaction_histories[n_sim] == rxn_ind)
)
reaction_analysis = dict()
for rxn_ind, counts in reaction_data.items():
reaction_analysis[rxn_ind] = (
np.mean(np.array(counts)),
np.std(np.array(counts)),
)
# Sort reactions by the average amount fired
sorted_reaction_analysis = sorted(
[(i, c) for i, c in reaction_analysis.items()],
key=lambda x: x[1][0],
reverse=True,
)
return sorted_reaction_analysis
| [
"os.mkdir",
"numpy.sum",
"mrnet.utils.visualization.visualize_molecules",
"mrnet.stochastic.serialize.rate",
"numpy.mean",
"mrnet.utils.visualization.visualize_molecule_count_histogram",
"numpy.copy",
"numpy.std",
"mrnet.utils.visualization.generate_latex_footer",
"mrnet.utils.visualization.latex_... | [((1461, 1485), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (1476, 1485), False, 'import sqlite3\n'), ((2688, 2742), 'sqlite3.connect', 'sqlite3.connect', (['(self.network_folder + self.db_postfix)'], {}), '(self.network_folder + self.db_postfix)\n', (2703, 2742), False, 'import sqlite3\n'), ((4193, 4335), 'functools.partial', 'partial', (['find_duplicate_reactions', '(self.network_folder + self.db_postfix)', 'self.shard_size', 'self.number_of_shards', 'self.number_of_reactions'], {}), '(find_duplicate_reactions, self.network_folder + self.db_postfix,\n self.shard_size, self.number_of_shards, self.number_of_reactions)\n', (4200, 4335), False, 'from functools import partial\n'), ((6022, 6072), 'sqlite3.connect', 'sqlite3.connect', (['(network_folder + database_postfix)'], {}), '(network_folder + database_postfix)\n', (6037, 6072), False, 'import sqlite3\n'), ((8635, 8721), 'mrnet.utils.visualization.visualize_molecules', 'visualize_molecules', (["(self.reports_folder + '/molecule_diagrams')", 'self.mol_entries'], {}), "(self.reports_folder + '/molecule_diagrams', self.\n mol_entries)\n", (8654, 8721), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((14519, 14583), 'mrnet.utils.visualization.visualize_molecule_count_histogram', 'visualize_molecule_count_histogram', (['final_counts', 'histogram_file'], {}), '(final_counts, histogram_file)\n', (14553, 14583), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((17770, 17818), 'mrnet.utils.visualization.latex_emit_reaction', 'latex_emit_reaction', (['f', 'reaction', 'reaction_index'], {}), '(f, reaction, reaction_index)\n', (17789, 17818), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((3914, 3953), 'mrnet.stochastic.serialize.rate', 'rate', (['dG', 'temperature', 'constant_barrier'], {}), '(dG, temperature, constant_barrier)\n', (3918, 3953), False, 'from mrnet.stochastic.serialize import rate\n'), ((4417, 4445), 'multiprocessing.Pool', 'Pool', (['self.number_of_threads'], {}), '(self.number_of_threads)\n', (4421, 4445), False, 'from multiprocessing import Pool\n'), ((6663, 6692), 'os.mkdir', 'os.mkdir', (['self.reports_folder'], {}), '(self.reports_folder)\n', (6671, 6692), False, 'import os\n'), ((6910, 6949), 'numpy.array', 'np.array', (['initial_state_list'], {'dtype': 'int'}), '(initial_state_list, dtype=int)\n', (6918, 6949), True, 'import numpy as np\n'), ((7373, 7406), 'os.listdir', 'os.listdir', (['self.histories_folder'], {}), '(self.histories_folder)\n', (7383, 7406), False, 'import os\n'), ((14781, 14805), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (14802, 14805), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((14865, 14909), 'mrnet.utils.visualization.latex_emit_molecule', 'latex_emit_molecule', (['f', 'target_species_index'], {}), '(f, target_species_index)\n', (14884, 14909), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((15910, 15934), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (15931, 15934), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((16481, 16505), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (16502, 16505), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((16565, 16609), 'mrnet.utils.visualization.latex_emit_molecule', 'latex_emit_molecule', (['f', 'target_species_index'], {}), '(f, target_species_index)\n', (16584, 16609), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((17239, 17263), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (17260, 17263), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((18077, 18101), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (18098, 18101), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((18366, 18390), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (18387, 18390), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((18574, 18598), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (18595, 18598), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((18774, 18798), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (18795, 18798), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((18978, 19002), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (18999, 19002), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((19169, 19193), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (19190, 19193), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((20226, 20250), 'mrnet.utils.visualization.generate_latex_header', 'generate_latex_header', (['f'], {}), '(f)\n', (20247, 20250), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((20644, 20668), 'mrnet.utils.visualization.generate_latex_footer', 'generate_latex_footer', (['f'], {}), '(f)\n', (20665, 20668), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((21848, 21881), 'copy.deepcopy', 'copy.deepcopy', (['self.initial_state'], {}), '(self.initial_state)\n', (21861, 21881), False, 'import copy\n'), ((8227, 8253), 'numpy.array', 'np.array', (['reaction_history'], {}), '(reaction_history)\n', (8235, 8253), True, 'import numpy as np\n'), ((8539, 8561), 'numpy.array', 'np.array', (['time_history'], {}), '(time_history)\n', (8547, 8561), True, 'import numpy as np\n'), ((12852, 12879), 'numpy.copy', 'np.copy', (['self.initial_state'], {}), '(self.initial_state)\n', (12859, 12879), True, 'import numpy as np\n'), ((17566, 17603), 'mrnet.utils.visualization.latex_emit_molecule', 'latex_emit_molecule', (['f', 'species_index'], {}), '(f, species_index)\n', (17585, 17603), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((19118, 19155), 'mrnet.utils.visualization.latex_emit_molecule', 'latex_emit_molecule', (['f', 'species_index'], {}), '(f, species_index)\n', (19137, 19155), False, 'from mrnet.utils.visualization import visualize_molecule_entry, visualize_molecule_count_histogram, generate_latex_header, generate_latex_footer, latex_emit_molecule, latex_emit_reaction, visualize_molecules\n'), ((25026, 25046), 'numpy.mean', 'np.mean', (['state_array'], {}), '(state_array)\n', (25033, 25046), True, 'import numpy as np\n'), ((25048, 25067), 'numpy.std', 'np.std', (['state_array'], {}), '(state_array)\n', (25054, 25067), True, 'import numpy as np\n'), ((24757, 24790), 'numpy.zeros', 'np.zeros', (['self.number_simulations'], {}), '(self.number_simulations)\n', (24765, 24790), True, 'import numpy as np\n'), ((26218, 26267), 'numpy.sum', 'np.sum', (['(self.reaction_histories[n_sim] == rxn_ind)'], {}), '(self.reaction_histories[n_sim] == rxn_ind)\n', (26224, 26267), True, 'import numpy as np\n'), ((26443, 26459), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (26451, 26459), True, 'import numpy as np\n'), ((26485, 26501), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (26493, 26501), True, 'import numpy as np\n'), ((13049, 13076), 'numpy.where', 'np.where', (['(partial_state < 0)'], {}), '(partial_state < 0)\n', (13057, 13076), True, 'import numpy as np\n'), ((13741, 13768), 'numpy.where', 'np.where', (['(partial_state < 0)'], {}), '(partial_state < 0)\n', (13749, 13768), True, 'import numpy as np\n')] |
import numpy as np
# Global Model Assumptions
HORIZON = 20 # years, length of time the model covers.
year = np.arange(1,HORIZON+1) # an index for temporal calculations.
SAMPSIZE = 1000 # the number of iterations in the Monte Carlo simulation.
run = np.arange(1, SAMPSIZE+1) # the iteration index.
TAXRATE = 38 # %
DISCOUNTRATE = 12 # %/year, used for discounted cash flow calculations.
DEPRPER = 7 # years, the depreciation schedule for the capital. | [
"numpy.arange"
] | [((109, 134), 'numpy.arange', 'np.arange', (['(1)', '(HORIZON + 1)'], {}), '(1, HORIZON + 1)\n', (118, 134), True, 'import numpy as np\n'), ((250, 276), 'numpy.arange', 'np.arange', (['(1)', '(SAMPSIZE + 1)'], {}), '(1, SAMPSIZE + 1)\n', (259, 276), True, 'import numpy as np\n')] |
"""This module contains auxiliary functions for RD predictions used in the main notebook."""
import json
import matplotlib as plt
import pandas as pd
import numpy as np
import statsmodels as sm
from auxiliary.auxiliary_predictions import *
from auxiliary.auxiliary_plots import *
from auxiliary.auxiliary_tables import *
def prepare_data(data):
"""
Adds variables needed for analysis to data.
"""
# Add constant to data to use in regressions later.
data.loc[:, "const"] = 1
# Add dummy for being above the cutoff in next GPA
data["nextGPA_above_cutoff"] = np.NaN
data.loc[data.nextGPA >= 0, "nextGPA_above_cutoff"] = 1
data.loc[data.nextGPA < 0, "nextGPA_above_cutoff"] = 0
# Add dummy for cumulative GPA being above the cutoff
data["nextCGPA_above_cutoff"] = np.NaN
data.loc[data.nextCGPA >= 0, "nextCGPA_above_cutoff"] = 1
data.loc[data.nextCGPA < 0, "nextCGPA_above_cutoff"] = 0
# Remove zeros from total credits for people whose next GPA is missing
data["total_credits_year2"] = data["totcredits_year2"]
data.loc[np.isnan(data.nextGPA) == True, "total_credits_year2"] = np.NaN
# Add variable for campus specific cutoff
data["cutoff"] = 1.5
data.loc[data.loc_campus3 == 1, "cutoff"] = 1.6
return data
def calculate_bin_frequency(data, bins):
"""
Calculates the frequency of different bins in a dataframe.
Args:
------
data(pd.DataFrame): Dataframe that contains the raw data.
bins(column): Name of column that contains the variable that should be assessed.
Returns:
---------
bin_frequency(pd.DataFrame): Dataframe that contains the frequency of each bin in data and and a constant.
"""
bin_frequency = pd.DataFrame(data[bins].value_counts())
bin_frequency.reset_index(level=0, inplace=True)
bin_frequency.rename(columns={"index": "bins", bins: "freq"}, inplace=True)
bin_frequency = bin_frequency.sort_values(by=["bins"])
bin_frequency["const"] = 1
return bin_frequency
def create_groups_dict(data, keys, columns):
"""
Function creates a dictionary containing different subsets of a dataset. Subsets are created using dummies.
Args:
------
data(pd.DataFrame): Dataset that should be split into subsets.
keys(list): List of keys that should be used in the dataframe.
columns(list): List of dummy variables in dataset that are used for creating subsets.
Returns:
---------
groups_dict(dictionary)
"""
groups_dict = {}
for i in range(len(keys)):
groups_dict[keys[i]] = data[data[columns[i]] == 1]
return groups_dict
def create_predictions(data, outcome, regressors, bandwidth):
steps = np.arange(-1.2, 1.25, 0.05)
predictions_df = pd.DataFrame([])
# Ensure there are no missings in the outcome variable.
data = data.dropna(subset=[outcome])
# Loop through bins or 'steps'.
for step in steps:
df = data[(data.dist_from_cut >= (step - bandwidth)) &
(data.dist_from_cut <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df[outcome], df[regressors], hasconst=True)
result = model.fit(cov_type='cluster', cov_kwds={
'groups': df['clustervar']})
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'dist_from_cut'] = step
if step < 0:
predictions_df.loc[step, 'gpalscutoff'] = 1
else:
predictions_df.loc[step, 'gpalscutoff'] = 0
predictions_df.loc[step, 'gpaXgpalscutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * predictions_df.loc[step, 'gpalscutoff']
predictions_df.loc[step, 'gpaXgpagrcutoff'] = (predictions_df.loc[
step, 'dist_from_cut']) * (1 - predictions_df.loc[step, 'gpalscutoff'])
predictions_df.loc[step, 'const'] = 1
# Make prediction for each step based on regression of each step and
# save value in the prediction dataframe.
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[
predictions_df.loc[step, 'const'],
predictions_df.loc[step, 'gpalscutoff'],
predictions_df.loc[step, 'gpaXgpalscutoff'],
predictions_df.loc[step, 'gpaXgpagrcutoff']
]])
predictions_df.round(4)
return predictions_df
def create_bin_frequency_predictions(data, steps, bandwidth):
"""
"""
predictions_df = pd.DataFrame([])
# Loop through bins or 'steps'.
for step in steps:
df = data[(data.bins >= (step - bandwidth)) &
(data.bins <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df['freq'], df[['const', 'bins']], hasconst=True)
result = model.fit()
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'bins'] = step
predictions_df.loc[step, 'const'] = 1
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[predictions_df.loc[step, 'const'],
predictions_df.loc[
step, 'bins'],
]])
predictions_df.round(4)
return predictions_df
def create_fig3_predictions(groups_dict, regressors, bandwidth):
"""
Compute predicted outcomes for figure 3.
"""
predictions_groups_dict = {}
# Loop through groups:
for group in groups_dict:
steps = np.arange(-1.2, 1.25, 0.05)
predictions_df = pd.DataFrame([])
# Loop through bins or 'steps'.
for step in steps:
# Select dataframe from the dictionary.
df = groups_dict[group][(groups_dict[group].dist_from_cut >= (step - bandwidth)) &
(groups_dict[group].dist_from_cut <= (step + bandwidth))]
# Run regression for with all values in the range specified above.
model = sm.regression.linear_model.OLS(
df['left_school'], df[regressors], hasconst=True)
result = model.fit(cov_type='cluster', cov_kwds={
'groups': df['clustervar']})
# Fill in row for each step in the prediction datframe.
predictions_df.loc[step, 'dist_from_cut'] = step
if step < 0:
predictions_df.loc[step, 'gpalscutoff'] = 1
else:
predictions_df.loc[step, 'gpalscutoff'] = 0
predictions_df.loc[step, 'gpaXgpalscutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * predictions_df.loc[step, 'gpalscutoff']
predictions_df.loc[step, 'gpaXgpagrcutoff'] = (
predictions_df.loc[step, 'dist_from_cut']) * (1 - predictions_df.loc[step, 'gpalscutoff'])
predictions_df.loc[step, 'const'] = 1
# Make prediction for each step based on regression of each step
# and save value in the prediction dataframe.
predictions_df.loc[step, 'prediction'] = result.predict(exog=[[
predictions_df.loc[step, 'const'],
predictions_df.loc[step, 'gpalscutoff'],
predictions_df.loc[step, 'gpaXgpalscutoff'],
predictions_df.loc[step, 'gpaXgpagrcutoff']
]])
predictions_df = predictions_df.round(4)
# Save the predictions for all groups in a dictionary.
predictions_groups_dict[group] = predictions_df
return predictions_groups_dict
def bootstrap_predictions(n, data, outcome, regressors, bandwidth):
"""
Compute predicted outcome from bootstrap with replacement.
"""
bootstrap_pred = pd.DataFrame({})
for i in range(0, n):
bootstrap = data.sample(n=len(data), replace=True)
pred = create_predictions(
data=bootstrap, outcome=outcome, regressors=regressors, bandwidth=bandwidth)
bootstrap_pred['pred_' + str(i)] = pred.prediction
i = +1
return bootstrap_pred
def get_confidence_interval(data, lbound, ubound, index_var):
"""
Compute confidence interval from data of bootstrapped predictions.
"""
confidence_interval = pd.DataFrame({})
for i in data.index:
confidence_interval.loc[i, "lower_bound"] = np.percentile(data.loc[
i, :], lbound)
confidence_interval.loc[i, "upper_bound"] = np.percentile(data.loc[
i, :], ubound)
confidence_interval[index_var] = confidence_interval.index
return confidence_interval
def bandwidth_sensitivity_summary(
data, outcome, groups_dict_keys, groups_dict_columns, regressors
):
"""
Creates table that summarizes the results for the analysis of bandwidth sensitivity.
"""
bandwidths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2]
arrays = [
np.array([0.1, 0.1, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4,
0.5, 0.5, 0.6, 0.6, 0.7, 0.7, 0.8, 0.8,
0.9, 0.9, 1, 1, 1.1, 1.1, 1.2, 1.2, ]
),
np.array(["probation", "p-value"] * 12),
]
summary = pd.DataFrame(index=arrays, columns=groups_dict_keys)
for val in bandwidths:
sample = data[abs(data["dist_from_cut"]) < val]
groups_dict = create_groups_dict(
sample, groups_dict_keys, groups_dict_columns)
table = estimate_RDD_multiple_datasets(
groups_dict, groups_dict_keys, outcome, regressors
)
summary.loc[(val, "probation"), :] = table["GPA below cutoff (1)"]
summary.loc[(val, "p-value"), :] = table["P-Value (1)"]
for i in summary.columns:
if (summary.loc[(val, "p-value"), i] < 0.1) == False:
summary.loc[(val, "p-value"), i] = "."
summary.loc[(val, "probation"), i] = "x"
return summary
def trim_data(groups_dict, trim_perc, case1, case2):
""" Creates trimmed data for upper and lower bound analysis by trimming the top and bottom percent of
students from control or treatment group. This can be used for the upper bound and lower bound.
* For lower bound use `case1 = True` and `case2 = False`
* For upper bound use `case1 = False` and `case2 = True`.
Args:
--------
groups_dict(dictionary): Dictionary that holds all datasets that should be trimmed.
trim_perc(pd.Series/pd.DataFrame): Series oder dataframe that for each dataset in groups dict specifies
how much should be trimmed.
case1(True or False): Specifies whether lower or upper bound should be trimmed in the case where the the trimamount
is positive and the control group is trimmed.
case2(True or False): Specifies whether lower or upper bound should be trimmed in the case where the the trimamount
is negative and the treatment group is trimmed.
Returns:
---------
trimmed_dict(dictionary): Dictionary holding the trimmed datasets.
"""
trimmed_dict = {}
for key in groups_dict.keys():
# Create data to be trimmed
data = groups_dict[key].copy()
control = data[data.dist_from_cut >= 0].copy()
treat = data[data.dist_from_cut < 0].copy()
trimamount = float(trim_perc[key])
# Trim control group
if trimamount > 0:
n = round(len(control[control.left_school == 1]) * trimamount)
control.sort_values("nextGPA", inplace=True, ascending=case1)
trimmed_students = control.iloc[0:n]
trimmed_students_ids = list(trimmed_students.identifier)
trimmed_control = control[
control.identifier.isin(trimmed_students_ids) == False
]
df = pd.concat([trimmed_control, treat], axis=0)
# If the trim amount is negative, we need to trim the treatment instead
# of the control group.
elif trimamount < 0:
trimamount = abs(trimamount)
n = round(len(treat[treat.left_school == 1]) * trimamount)
treat.sort_values("nextGPA", inplace=True, ascending=case2)
trimmed_students = treat.iloc[0:n]
trimmed_students_ids = list(trimmed_students.identifier)
trimmed_treat = treat[treat.identifier.isin(
trimmed_students_ids) == False]
df = pd.concat([trimmed_treat, control], axis=0)
trimmed_dict[key] = df
return trimmed_dict | [
"pandas.DataFrame",
"numpy.isnan",
"numpy.percentile",
"statsmodels.regression.linear_model.OLS",
"numpy.arange",
"numpy.array",
"pandas.concat"
] | [((2753, 2780), 'numpy.arange', 'np.arange', (['(-1.2)', '(1.25)', '(0.05)'], {}), '(-1.2, 1.25, 0.05)\n', (2762, 2780), True, 'import numpy as np\n'), ((2802, 2818), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (2814, 2818), True, 'import pandas as pd\n'), ((4655, 4671), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (4667, 4671), True, 'import pandas as pd\n'), ((8101, 8117), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (8113, 8117), True, 'import pandas as pd\n'), ((8604, 8620), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (8616, 8620), True, 'import pandas as pd\n'), ((9627, 9679), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'arrays', 'columns': 'groups_dict_keys'}), '(index=arrays, columns=groups_dict_keys)\n', (9639, 9679), True, 'import pandas as pd\n'), ((3195, 3269), 'statsmodels.regression.linear_model.OLS', 'sm.regression.linear_model.OLS', (['df[outcome]', 'df[regressors]'], {'hasconst': '(True)'}), '(df[outcome], df[regressors], hasconst=True)\n', (3225, 3269), True, 'import statsmodels as sm\n'), ((4929, 5014), 'statsmodels.regression.linear_model.OLS', 'sm.regression.linear_model.OLS', (["df['freq']", "df[['const', 'bins']]"], {'hasconst': '(True)'}), "(df['freq'], df[['const', 'bins']], hasconst=True\n )\n", (4959, 5014), True, 'import statsmodels as sm\n'), ((5869, 5896), 'numpy.arange', 'np.arange', (['(-1.2)', '(1.25)', '(0.05)'], {}), '(-1.2, 1.25, 0.05)\n', (5878, 5896), True, 'import numpy as np\n'), ((5922, 5938), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (5934, 5938), True, 'import pandas as pd\n'), ((8698, 8735), 'numpy.percentile', 'np.percentile', (['data.loc[i, :]', 'lbound'], {}), '(data.loc[i, :], lbound)\n', (8711, 8735), True, 'import numpy as np\n'), ((8855, 8892), 'numpy.percentile', 'np.percentile', (['data.loc[i, :]', 'ubound'], {}), '(data.loc[i, :], ubound)\n', (8868, 8892), True, 'import numpy as np\n'), ((9373, 9504), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4, 0.5, 0.5, 0.6, 0.6, 0.7, 0.7, 0.8,\n 0.8, 0.9, 0.9, 1, 1, 1.1, 1.1, 1.2, 1.2]'], {}), '([0.1, 0.1, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4, 0.5, 0.5, 0.6, 0.6, 0.7, \n 0.7, 0.8, 0.8, 0.9, 0.9, 1, 1, 1.1, 1.1, 1.2, 1.2])\n', (9381, 9504), True, 'import numpy as np\n'), ((9565, 9604), 'numpy.array', 'np.array', (["(['probation', 'p-value'] * 12)"], {}), "(['probation', 'p-value'] * 12)\n", (9573, 9604), True, 'import numpy as np\n'), ((6347, 6432), 'statsmodels.regression.linear_model.OLS', 'sm.regression.linear_model.OLS', (["df['left_school']", 'df[regressors]'], {'hasconst': '(True)'}), "(df['left_school'], df[regressors], hasconst=True\n )\n", (6377, 6432), True, 'import statsmodels as sm\n'), ((12305, 12348), 'pandas.concat', 'pd.concat', (['[trimmed_control, treat]'], {'axis': '(0)'}), '([trimmed_control, treat], axis=0)\n', (12314, 12348), True, 'import pandas as pd\n'), ((1087, 1109), 'numpy.isnan', 'np.isnan', (['data.nextGPA'], {}), '(data.nextGPA)\n', (1095, 1109), True, 'import numpy as np\n'), ((12913, 12956), 'pandas.concat', 'pd.concat', (['[trimmed_treat, control]'], {'axis': '(0)'}), '([trimmed_treat, control], axis=0)\n', (12922, 12956), True, 'import pandas as pd\n')] |
import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from torch.nn.init import xavier_normal_
from transformers import RobertaModel
import random
class RelationExtractor(nn.Module):
def __init__(self, embedding_dim, relation_dim, num_entities, pretrained_embeddings, device,
entdrop=0.0, reldrop=0.0, scoredrop=0.0, l3_reg=0.0, model='ComplEx', ls=0.0, do_batch_norm=True, freeze=True):
super(RelationExtractor, self).__init__()
self.device = device
self.model = model
self.freeze = freeze
self.label_smoothing = ls
self.l3_reg = l3_reg
self.do_batch_norm = do_batch_norm
if not self.do_batch_norm:
print('Not doing batch norm')
self.roberta_pretrained_weights = 'roberta-base'
self.roberta_model = RobertaModel.from_pretrained('/sdb/xmh/Projects/Pytorch/EmbedKGQA/roberta-base')
for param in self.roberta_model.parameters():
param.requires_grad = True
if self.model == 'DistMult':
multiplier = 1
self.getScores = self.DistMult
elif self.model == 'SimplE':
multiplier = 2
self.getScores = self.SimplE
elif self.model == 'ComplEx':
multiplier = 2
self.getScores = self.ComplEx
elif self.model == 'TuckER':
# W_torch = torch.from_numpy(np.load(w_matrix))
# self.W = nn.Parameter(
# torch.Tensor(W_torch),
# requires_grad = not self.freeze
# )
self.W = nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (relation_dim, relation_dim, relation_dim)),
dtype=torch.float, device="cuda", requires_grad=True))
multiplier = 1
self.getScores = self.TuckER
elif self.model == 'RESCAL':
self.getScores = self.RESCAL
multiplier = 1
else:
print('Incorrect model specified:', self.model)
exit(0)
print('Model is', self.model)
self.hidden_dim = 768
self.relation_dim = relation_dim * multiplier
if self.model == 'RESCAL':
self.relation_dim = relation_dim * relation_dim
self.num_entities = num_entities
# self.loss = torch.nn.BCELoss(reduction='sum')
self.loss = self.kge_loss
# best: all dropout 0
self.rel_dropout = torch.nn.Dropout(reldrop)
self.ent_dropout = torch.nn.Dropout(entdrop)
self.score_dropout = torch.nn.Dropout(scoredrop)
self.fcnn_dropout = torch.nn.Dropout(0.1)
# self.pretrained_embeddings = pretrained_embeddings
# random.shuffle(pretrained_embeddings)
# print(pretrained_embeddings[0])
print('Frozen:', self.freeze)
self.embedding = nn.Embedding.from_pretrained(torch.stack(pretrained_embeddings, dim=0), freeze=self.freeze)
# self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pretrained_embeddings), freeze=self.freeze)
print(self.embedding.weight.shape)
# self.embedding = nn.Embedding(self.num_entities, self.relation_dim)
# self.embedding.weight.requires_grad = False
# xavier_normal_(self.embedding.weight.data)
self.mid1 = 512
self.mid2 = 512
self.mid3 = 512
self.mid4 = 512
# self.lin1 = nn.Linear(self.hidden_dim, self.mid1)
# self.lin2 = nn.Linear(self.mid1, self.mid2)
# self.lin3 = nn.Linear(self.mid2, self.mid3)
# self.lin4 = nn.Linear(self.mid3, self.mid4)
# self.hidden2rel = nn.Linear(self.mid4, self.relation_dim)
self.hidden2rel = nn.Linear(self.hidden_dim, self.relation_dim)
self.hidden2rel_base = nn.Linear(self.mid2, self.relation_dim)
if self.model in ['DistMult', 'TuckER', 'RESCAL', 'SimplE']:
self.bn0 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
self.bn2 = torch.nn.BatchNorm1d(self.embedding.weight.size(1))
else:
self.bn0 = torch.nn.BatchNorm1d(multiplier)
self.bn2 = torch.nn.BatchNorm1d(multiplier)
self.logsoftmax = torch.nn.LogSoftmax(dim=-1)
self._klloss = torch.nn.KLDivLoss(reduction='sum')
def set_bn_eval(self):
self.bn0.eval()
self.bn2.eval()
def kge_loss(self, scores, targets):
# loss = torch.mean(scores*targets)
return self._klloss(
F.log_softmax(scores, dim=1), F.normalize(targets.float(), p=1, dim=1)
)
def applyNonLinear(self, outputs):
# outputs = self.fcnn_dropout(self.lin1(outputs))
# outputs = F.relu(outputs)
# outputs = self.fcnn_dropout(self.lin2(outputs))
# outputs = F.relu(outputs)
# outputs = self.lin3(outputs)
# outputs = F.relu(outputs)
# outputs = self.lin4(outputs)
# outputs = F.relu(outputs)
outputs = self.hidden2rel(outputs)
# outputs = self.hidden2rel_base(outputs)
return outputs
def TuckER(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
x = head.view(-1, 1, head.size(1))
W_mat = torch.mm(relation, self.W.view(relation.size(1), -1))
W_mat = W_mat.view(-1, head.size(1), head.size(1))
W_mat = self.rel_dropout(W_mat)
x = torch.bmm(x, W_mat)
x = x.view(-1, head.size(1))
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def RESCAL(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
ent_dim = head.size(1)
head = head.view(-1, 1, ent_dim)
relation = relation.view(-1, ent_dim, ent_dim)
relation = self.rel_dropout(relation)
x = torch.bmm(head, relation)
x = x.view(-1, ent_dim)
x = self.bn2(x)
x = self.score_dropout(x)
x = torch.mm(x, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(x)
return pred
def DistMult(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s = self.bn2(s)
s = self.score_dropout(s)
ans = torch.mm(s, self.embedding.weight.transpose(1,0))
pred = torch.sigmoid(ans)
return pred
def SimplE(self, head, relation):
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
s = head * relation
s_head, s_tail = torch.chunk(s, 2, dim=1)
s = torch.cat([s_tail, s_head], dim=1)
s = self.bn2(s)
s = self.score_dropout(s)
s = torch.mm(s, self.embedding.weight.transpose(1,0))
s = 0.5 * s
pred = torch.sigmoid(s)
return pred
def ComplEx(self, head, relation):
head = torch.stack(list(torch.chunk(head, 2, dim=1)), dim=1)
if self.do_batch_norm:
head = self.bn0(head)
head = self.ent_dropout(head)
relation = self.rel_dropout(relation)
head = head.permute(1, 0, 2)
re_head = head[0]
im_head = head[1]
re_relation, im_relation = torch.chunk(relation, 2, dim=1)
re_tail, im_tail = torch.chunk(self.embedding.weight, 2, dim =1)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = torch.stack([re_score, im_score], dim=1)
if self.do_batch_norm:
score = self.bn2(score)
score = self.score_dropout(score)
score = score.permute(1, 0, 2)
re_score = score[0]
im_score = score[1]
score = torch.mm(re_score, re_tail.transpose(1,0)) + torch.mm(im_score, im_tail.transpose(1,0))
# pred = torch.sigmoid(score)
pred = score
return pred
def getQuestionEmbedding(self, question_tokenized, attention_mask):
roberta_last_hidden_states = self.roberta_model(question_tokenized, attention_mask=attention_mask)[0]
states = roberta_last_hidden_states.transpose(1,0)
cls_embedding = states[0]
question_embedding = cls_embedding
# question_embedding = torch.mean(roberta_last_hidden_states, dim=1)
return question_embedding
def forward(self, question_tokenized, attention_mask, p_head, p_tail):
question_embedding = self.getQuestionEmbedding(question_tokenized, attention_mask)
rel_embedding = self.applyNonLinear(question_embedding)
p_head = self.embedding(p_head)
pred = self.getScores(p_head, rel_embedding) # 通过ComplEx进行链接预测
actual = p_tail
if self.label_smoothing:
actual = ((1.0-self.label_smoothing)*actual) + (1.0/actual.size(1))
loss = self.loss(pred, actual)
if not self.freeze:
if self.l3_reg:
norm = torch.norm(self.embedding.weight, p=3, dim=-1)
loss = loss + self.l3_reg * torch.sum(norm)
return loss
def get_score_ranked(self, head, question_tokenized, attention_mask):
question_embedding = self.getQuestionEmbedding(question_tokenized.unsqueeze(0), attention_mask.unsqueeze(0))
rel_embedding = self.applyNonLinear(question_embedding)
head = self.embedding(head).unsqueeze(0)
scores = self.getScores(head, rel_embedding)
# top2 = torch.topk(scores, k=2, largest=True, sorted=True)
# return top2
return scores
| [
"torch.nn.Dropout",
"numpy.random.uniform",
"torch.bmm",
"torch.stack",
"torch.nn.LogSoftmax",
"torch.norm",
"torch.nn.KLDivLoss",
"torch.nn.BatchNorm1d",
"torch.cat",
"transformers.RobertaModel.from_pretrained",
"torch.sigmoid",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch... | [((925, 1010), 'transformers.RobertaModel.from_pretrained', 'RobertaModel.from_pretrained', (['"""/sdb/xmh/Projects/Pytorch/EmbedKGQA/roberta-base"""'], {}), "('/sdb/xmh/Projects/Pytorch/EmbedKGQA/roberta-base'\n )\n", (953, 1010), False, 'from transformers import RobertaModel\n'), ((2551, 2576), 'torch.nn.Dropout', 'torch.nn.Dropout', (['reldrop'], {}), '(reldrop)\n', (2567, 2576), False, 'import torch\n'), ((2604, 2629), 'torch.nn.Dropout', 'torch.nn.Dropout', (['entdrop'], {}), '(entdrop)\n', (2620, 2629), False, 'import torch\n'), ((2659, 2686), 'torch.nn.Dropout', 'torch.nn.Dropout', (['scoredrop'], {}), '(scoredrop)\n', (2675, 2686), False, 'import torch\n'), ((2715, 2736), 'torch.nn.Dropout', 'torch.nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2731, 2736), False, 'import torch\n'), ((3804, 3849), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', 'self.relation_dim'], {}), '(self.hidden_dim, self.relation_dim)\n', (3813, 3849), True, 'import torch.nn as nn\n'), ((3881, 3920), 'torch.nn.Linear', 'nn.Linear', (['self.mid2', 'self.relation_dim'], {}), '(self.mid2, self.relation_dim)\n', (3890, 3920), True, 'import torch.nn as nn\n'), ((4296, 4323), 'torch.nn.LogSoftmax', 'torch.nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (4315, 4323), False, 'import torch\n'), ((4355, 4390), 'torch.nn.KLDivLoss', 'torch.nn.KLDivLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (4373, 4390), False, 'import torch\n'), ((5501, 5520), 'torch.bmm', 'torch.bmm', (['x', 'W_mat'], {}), '(x, W_mat)\n', (5510, 5520), False, 'import torch\n'), ((5696, 5712), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (5709, 5712), False, 'import torch\n'), ((6025, 6050), 'torch.bmm', 'torch.bmm', (['head', 'relation'], {}), '(head, relation)\n', (6034, 6050), False, 'import torch\n'), ((6221, 6237), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (6234, 6237), False, 'import torch\n'), ((6578, 6596), 'torch.sigmoid', 'torch.sigmoid', (['ans'], {}), '(ans)\n', (6591, 6596), False, 'import torch\n'), ((6827, 6851), 'torch.chunk', 'torch.chunk', (['s', '(2)'], {'dim': '(1)'}), '(s, 2, dim=1)\n', (6838, 6851), False, 'import torch\n'), ((6864, 6898), 'torch.cat', 'torch.cat', (['[s_tail, s_head]'], {'dim': '(1)'}), '([s_tail, s_head], dim=1)\n', (6873, 6898), False, 'import torch\n'), ((7054, 7070), 'torch.sigmoid', 'torch.sigmoid', (['s'], {}), '(s)\n', (7067, 7070), False, 'import torch\n'), ((7477, 7508), 'torch.chunk', 'torch.chunk', (['relation', '(2)'], {'dim': '(1)'}), '(relation, 2, dim=1)\n', (7488, 7508), False, 'import torch\n'), ((7536, 7580), 'torch.chunk', 'torch.chunk', (['self.embedding.weight', '(2)'], {'dim': '(1)'}), '(self.embedding.weight, 2, dim=1)\n', (7547, 7580), False, 'import torch\n'), ((7730, 7770), 'torch.stack', 'torch.stack', (['[re_score, im_score]'], {'dim': '(1)'}), '([re_score, im_score], dim=1)\n', (7741, 7770), False, 'import torch\n'), ((2981, 3022), 'torch.stack', 'torch.stack', (['pretrained_embeddings'], {'dim': '(0)'}), '(pretrained_embeddings, dim=0)\n', (2992, 3022), False, 'import torch\n'), ((4178, 4210), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['multiplier'], {}), '(multiplier)\n', (4198, 4210), False, 'import torch\n'), ((4234, 4266), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['multiplier'], {}), '(multiplier)\n', (4254, 4266), False, 'import torch\n'), ((4594, 4622), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (4607, 4622), True, 'import torch.nn.functional as F\n'), ((7165, 7192), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(1)'}), '(head, 2, dim=1)\n', (7176, 7192), False, 'import torch\n'), ((9199, 9245), 'torch.norm', 'torch.norm', (['self.embedding.weight'], {'p': '(3)', 'dim': '(-1)'}), '(self.embedding.weight, p=3, dim=-1)\n', (9209, 9245), False, 'import torch\n'), ((9290, 9305), 'torch.sum', 'torch.sum', (['norm'], {}), '(norm)\n', (9299, 9305), False, 'import torch\n'), ((1707, 1775), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(relation_dim, relation_dim, relation_dim)'], {}), '(-1, 1, (relation_dim, relation_dim, relation_dim))\n', (1724, 1775), True, 'import numpy as np\n')] |
import numpy as np
from .base import AbstractABC
from .utils import get_random_other_index
class ABC(AbstractABC):
"""
Artificial Bee Colony (ABC) implementation as defined in [1].
[1] Karaboga, Dervis. An idea based on honey bee swarm for numerical optimization.
Vol. 200. Technical report-tr06, Erciyes university, engineering faculty,
computer engineering department, 2005.
"""
def __init__(
self,
population_size,
fitness_fn,
init_fn,
callback_fn=None,
scouting_threshold=None,
termination_threshold=1e-12,
enforce_bounds=None,
):
"""
Args:
population_size
Number of "worker bees" to use during the search. The algorithm will keep track
of this many solutions.
fitness_fn
Function to be minimized, with following signature:
```
Args:
x: solutions in flight, np.ndarray of shape (population_size, solution_dimension)
Returns:
Fitness evaluations: np.ndarray of shape (population_size,)
```
init_fn
Function returning the set of initial solutions, with signature:
```
Args:
population_size
Number of new solutions to initialize
Returns:
Initial set of solutions, np.ndarray of shape (population_size, solution_dimension)
```
callback_fn
User defined function called first after initialization, then at the end of each
generation. Intended for logging purpose.
Function ought to have the following signature:
```
Args:
abc: the parent ABC instance (i.e. self)
logger: a python Logger instance
```
scouting_threshold
Number of updates without improvement after which a solution is replaced by a new one.
Defaults to population_size * dimension.
termination_threshold
Stop if |best_fitness - worse_fitness| < termination_threshold
Defaults to 1e-12
enforce_bounds
2D list, the min and max for each dimension, e.g. [[-1, 1], [None, 2], [0, 1]].
Ensures the fitness function is never called with out of bounds values.
Defaults to not enforcing bounds.
"""
super().__init__(
population_size,
fitness_fn,
init_fn,
callback_fn,
scouting_threshold,
termination_threshold,
)
if enforce_bounds is not None and not isinstance(enforce_bounds, (np.ndarray, list)):
raise ValueError('Bounds must be a list or numpy array')
elif enforce_bounds is not None and np.ndim(enforce_bounds) != 2:
ndim = np.ndim(enforce_bounds)
raise ValueError(f'Bounds must be a 2D array but got an array of dim {ndim}')
self._enforce_bounds = enforce_bounds is not None
if self._enforce_bounds:
self._clip_min = np.array([m[0] for m in enforce_bounds])
self._clip_max = np.array([m[1] for m in enforce_bounds])
def update_solutions(self, solution_indices_to_update):
new_solutions = []
for idx in solution_indices_to_update:
solution = self.solutions[idx]
dim_i = np.random.randint(0, self.dimension)
sol_j = get_random_other_index(idx, self.population_size)
new_solution = np.copy(solution)
other_solution = self.solutions[sol_j]
eps = np.random.uniform(-1, 1)
new_solution[dim_i] += eps * (solution[dim_i] - other_solution[dim_i])
new_solutions.append(new_solution)
if self._enforce_bounds:
return np.clip(new_solutions, self._clip_min, self._clip_max)
else:
return np.stack(new_solutions)
| [
"numpy.stack",
"numpy.random.uniform",
"numpy.copy",
"numpy.ndim",
"numpy.clip",
"numpy.random.randint",
"numpy.array"
] | [((3158, 3198), 'numpy.array', 'np.array', (['[m[0] for m in enforce_bounds]'], {}), '([m[0] for m in enforce_bounds])\n', (3166, 3198), True, 'import numpy as np\n'), ((3228, 3268), 'numpy.array', 'np.array', (['[m[1] for m in enforce_bounds]'], {}), '([m[1] for m in enforce_bounds])\n', (3236, 3268), True, 'import numpy as np\n'), ((3468, 3504), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.dimension'], {}), '(0, self.dimension)\n', (3485, 3504), True, 'import numpy as np\n'), ((3603, 3620), 'numpy.copy', 'np.copy', (['solution'], {}), '(solution)\n', (3610, 3620), True, 'import numpy as np\n'), ((3691, 3715), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3708, 3715), True, 'import numpy as np\n'), ((3900, 3954), 'numpy.clip', 'np.clip', (['new_solutions', 'self._clip_min', 'self._clip_max'], {}), '(new_solutions, self._clip_min, self._clip_max)\n', (3907, 3954), True, 'import numpy as np\n'), ((3988, 4011), 'numpy.stack', 'np.stack', (['new_solutions'], {}), '(new_solutions)\n', (3996, 4011), True, 'import numpy as np\n'), ((2923, 2946), 'numpy.ndim', 'np.ndim', (['enforce_bounds'], {}), '(enforce_bounds)\n', (2930, 2946), True, 'import numpy as np\n'), ((2874, 2897), 'numpy.ndim', 'np.ndim', (['enforce_bounds'], {}), '(enforce_bounds)\n', (2881, 2897), True, 'import numpy as np\n')] |
import numpy as np
from .model import Model
from ..util import add_intersect, sigmoid
class LogisticRegression(Model):
'''
used when dependent variable (y)is categorical
'''
#no regularization
def __init__(self, epochs=1000, alph=0.3):
super(LogisticRegression, self).__init__()
self.epochs=epochs
self.alph=alph #taxa de aprendizagem
self.theta=None #theta are randomly initialized values
def fit(self, dataset): #using x_train and y_train to train the model
X,y = dataset.getXy()
X = add_intersect(X) #vai pôr X e uma coluna de uns com o mesmo no de linhas lado a lado
self.X=X
self.y=y
self.train_gd(X,y)
self.is_fitted=True
def train_gd(self, X, y): #gradient descendent: update theta values until cost function reaches its minimum
n = X.shape[1] #no de colunas
self.history={}
self.theta=np.zeros(n)
for epoch in range(self.epochs):
z = np.dot(self.theta, X.T)
h = sigmoid(z) #predicted value
gradiente = np.dot(X.T, (h-y)) / y.size
self.theta -= self.alph * gradiente
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, X):
assert self.is_fitted, 'model must be fitted before predicting'
_x = np.hstack(([1],X))
z = np.dot(self.theta, _x)
h = sigmoid(z) #predicted value
if h <0.5: #threshold value
return 0
else:
return 1
def cost(self, X=None, y=None, theta=None): #dá a medida de quão longe o valor previsto está do output original
X=add_intersect(X) if X is not None else self.X
y=y if y is not None else self.y
theta=theta if theta is not None else self.theta
z = np.dot(self.theta, self.X.T)
y1 = sigmoid(z) #predicted value
return -(1/len(self.X)) * np.sum(self.y*np.log(y1) + (1-self.y)*np.log(1-y1)) #negative function is to maximize the probability by minimizing loss function.
#Decreasing the cost will increase the maximum likelihood
class LogisticRegressionReg(LogisticRegression):
#with L2 regularization, aka, Ridge Regression
#solves overfitting by penalizing the cost function
#it adds a penalty term in the cost function
#lambda is the regularization parameter, which controls the trade-off between fitting the training data well vs keeping the params small to avoid overfitting.
def __init__(self,epochs = 1000, alph=0.3,lambd = 1):
super(LogisticRegressionReg, self).__init__(epochs=epochs, alph=alph)
self.lambd = lambd
def fit(self, dataset): #using x_train and y_train to train the model
X,y = dataset.getXy()
X = add_intersect(X) #vai pôr X e uma coluna de uns com o mesmo no de linhas lado a lado
self.X=X
self.y=y
self.train_gd(X,y)
self.is_fitted=True
def train_gd(self, X, y):
m = X.shape[0] #no de linhas
n = X.shape[1] #no de colunas
self.history ={}
self.theta = np.zeros(n)
lambdas = np.full(m, self.lambd)
lambdas[0] = 0
for epoch in range(self.epochs):
z = np.dot(X, self.theta)
h = sigmoid(z) #predicted value
gradiente = np.dot(X.T, (h-y)) / y.size
gradiente[1:] = gradiente[1:] + (self.lambd/m) * self.theta[1:]
self.theta -= self.alph * gradiente
self.history[epoch] = [self.theta[:], self.cost()]
def predict(self, X):
assert self.is_fitted, 'model must be fitted before predicting'
_x = np.hstack(([1],X))
z = np.dot(self.theta, _x)
h = sigmoid(z) #predicted value
if h <0.5:
return 0
else:
return 1
def cost(self, X=None, y=None, theta=None):
# it adds a penalty term in the cost function
X=add_intersect(X) if X is not None else self.X
y=y if y is not None else self.y
theta=theta if theta is not None else self.theta
z = np.dot(self.theta, self.X.T)
y1 = sigmoid(z) #predicted value
penalizacao = np.dot(self.theta[1:],self.theta[1:]) * self.lambd / (2*len(self.X))
cost = -(1/len(self.X)) * np.sum(self.y*np.log(y1) + (1-self.y)*np.log(1-y1)) + penalizacao
return cost | [
"numpy.full",
"numpy.log",
"numpy.zeros",
"numpy.hstack",
"numpy.dot"
] | [((928, 939), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (936, 939), True, 'import numpy as np\n'), ((1340, 1359), 'numpy.hstack', 'np.hstack', (['([1], X)'], {}), '(([1], X))\n', (1349, 1359), True, 'import numpy as np\n'), ((1371, 1393), 'numpy.dot', 'np.dot', (['self.theta', '_x'], {}), '(self.theta, _x)\n', (1377, 1393), True, 'import numpy as np\n'), ((1810, 1838), 'numpy.dot', 'np.dot', (['self.theta', 'self.X.T'], {}), '(self.theta, self.X.T)\n', (1816, 1838), True, 'import numpy as np\n'), ((3164, 3175), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3172, 3175), True, 'import numpy as np\n'), ((3194, 3216), 'numpy.full', 'np.full', (['m', 'self.lambd'], {}), '(m, self.lambd)\n', (3201, 3216), True, 'import numpy as np\n'), ((3714, 3733), 'numpy.hstack', 'np.hstack', (['([1], X)'], {}), '(([1], X))\n', (3723, 3733), True, 'import numpy as np\n'), ((3745, 3767), 'numpy.dot', 'np.dot', (['self.theta', '_x'], {}), '(self.theta, _x)\n', (3751, 3767), True, 'import numpy as np\n'), ((4153, 4181), 'numpy.dot', 'np.dot', (['self.theta', 'self.X.T'], {}), '(self.theta, self.X.T)\n', (4159, 4181), True, 'import numpy as np\n'), ((997, 1020), 'numpy.dot', 'np.dot', (['self.theta', 'X.T'], {}), '(self.theta, X.T)\n', (1003, 1020), True, 'import numpy as np\n'), ((3297, 3318), 'numpy.dot', 'np.dot', (['X', 'self.theta'], {}), '(X, self.theta)\n', (3303, 3318), True, 'import numpy as np\n'), ((1089, 1107), 'numpy.dot', 'np.dot', (['X.T', '(h - y)'], {}), '(X.T, h - y)\n', (1095, 1107), True, 'import numpy as np\n'), ((3387, 3405), 'numpy.dot', 'np.dot', (['X.T', '(h - y)'], {}), '(X.T, h - y)\n', (3393, 3405), True, 'import numpy as np\n'), ((4245, 4283), 'numpy.dot', 'np.dot', (['self.theta[1:]', 'self.theta[1:]'], {}), '(self.theta[1:], self.theta[1:])\n', (4251, 4283), True, 'import numpy as np\n'), ((1928, 1938), 'numpy.log', 'np.log', (['y1'], {}), '(y1)\n', (1934, 1938), True, 'import numpy as np\n'), ((1952, 1966), 'numpy.log', 'np.log', (['(1 - y1)'], {}), '(1 - y1)\n', (1958, 1966), True, 'import numpy as np\n'), ((4362, 4372), 'numpy.log', 'np.log', (['y1'], {}), '(y1)\n', (4368, 4372), True, 'import numpy as np\n'), ((4386, 4400), 'numpy.log', 'np.log', (['(1 - y1)'], {}), '(1 - y1)\n', (4392, 4400), True, 'import numpy as np\n')] |
import os
import time
import sys
import argparse
import logging
import numpy as np
import yaml
from attrdict import AttrDict
from pprint import pprint
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed as dist
from paddlenlp.transformers import TransformerModel, CrossEntropyCriterion
sys.path.append("../")
import reader
from util.record import AverageStatistical
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default="../configs/transformer.big.yaml",
type=str,
help="Path of the config file. ")
args = parser.parse_args()
return args
def do_train(args):
paddle.enable_static()
if args.is_distributed:
fleet.init(is_collective=True)
gpu_id = int(os.getenv("FLAGS_selected_gpus", "0"))
places = paddle.CUDAPlace(
gpu_id) if args.use_gpu else paddle.static.cpu_places()
trainer_count = 1 if args.use_gpu else len(places)
else:
if args.use_gpu:
places = paddle.static.cuda_places()
else:
places = paddle.static.cpu_places()
paddle.set_device("cpu")
trainer_count = len(places)
# Set seed for CE
random_seed = eval(str(args.random_seed))
if random_seed is not None:
paddle.seed(random_seed)
# Define data loader
(train_loader), (eval_loader) = reader.create_data_loader(args, places)
train_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(train_program, startup_program):
src_word = paddle.static.data(
name="src_word", shape=[None, None], dtype="int64")
trg_word = paddle.static.data(
name="trg_word", shape=[None, None], dtype="int64")
lbl_word = paddle.static.data(
name="lbl_word", shape=[None, None, 1], dtype="int64")
# Define model
transformer = TransformerModel(
src_vocab_size=args.src_vocab_size,
trg_vocab_size=args.trg_vocab_size,
max_length=args.max_length + 1,
n_layer=args.n_layer,
n_head=args.n_head,
d_model=args.d_model,
d_inner_hid=args.d_inner_hid,
dropout=args.dropout,
weight_sharing=args.weight_sharing,
bos_id=args.bos_idx,
eos_id=args.eos_idx)
# Define loss
criterion = CrossEntropyCriterion(args.label_smooth_eps, args.bos_idx)
logits = transformer(src_word=src_word, trg_word=trg_word)
sum_cost, avg_cost, token_num = criterion(logits, lbl_word)
scheduler = paddle.optimizer.lr.NoamDecay(
args.d_model, args.warmup_steps, args.learning_rate, last_epoch=0)
# Define optimizer
optimizer = paddle.optimizer.Adam(
learning_rate=scheduler,
beta1=args.beta1,
beta2=args.beta2,
epsilon=float(args.eps),
parameters=transformer.parameters())
if args.is_distributed:
build_strategy = paddle.static.BuildStrategy()
exec_strategy = paddle.static.ExecutionStrategy()
dist_strategy = fleet.DistributedStrategy()
dist_strategy.build_strategy = build_strategy
dist_strategy.execution_strategy = exec_strategy
dist_strategy.fuse_grad_size_in_MB = 16
if args.use_amp:
dist_strategy.amp = True
dist_strategy.amp_configs = {
'custom_white_list': ['softmax', 'layer_norm', 'gelu'],
'init_loss_scaling': args.scale_loss,
}
optimizer = fleet.distributed_optimizer(
optimizer, strategy=dist_strategy)
else:
if args.use_amp:
amp_list = paddle.static.amp.AutoMixedPrecisionLists(
custom_white_list=['softmax', 'layer_norm'],
custom_black_list=['lookup_table_v2'])
optimizer = paddle.static.amp.decorate(
optimizer,
amp_list,
init_loss_scaling=args.scale_loss,
use_dynamic_loss_scaling=True,
use_pure_fp16=args.use_pure_fp16)
optimizer.minimize(avg_cost)
if args.is_distributed:
exe = paddle.static.Executor(places)
else:
exe = paddle.static.Executor()
build_strategy = paddle.static.BuildStrategy()
exec_strategy = paddle.static.ExecutionStrategy()
compiled_train_program = paddle.static.CompiledProgram(
train_program).with_data_parallel(
loss_name=avg_cost.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
exe.run(startup_program)
if not args.is_distributed and args.use_amp:
optimizer.amp_init(places[0])
# the best cross-entropy value with label smoothing
loss_normalizer = -(
(1. - args.label_smooth_eps) * np.log(
(1. - args.label_smooth_eps)) + args.label_smooth_eps *
np.log(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20))
step_idx = 0
# For benchmark
reader_cost_avg = AverageStatistical()
batch_cost_avg = AverageStatistical()
batch_ips_avg = AverageStatistical()
for pass_id in range(args.epoch):
batch_id = 0
batch_start = time.time()
pass_start_time = batch_start
for data in train_loader:
# NOTE: used for benchmark and use None as default.
if args.max_iter and step_idx == args.max_iter:
return
if trainer_count == 1:
data = [data]
train_reader_cost = time.time() - batch_start
if args.is_distributed:
outs = exe.run(train_program,
feed=[{
'src_word': data[i][0],
'trg_word': data[i][1],
'lbl_word': data[i][2],
} for i in range(trainer_count)],
fetch_list=[sum_cost.name, token_num.name])
else:
outs = exe.run(compiled_train_program,
feed=[{
'src_word': data[i][0],
'trg_word': data[i][1],
'lbl_word': data[i][2],
} for i in range(trainer_count)],
fetch_list=[sum_cost.name, token_num.name])
scheduler.step()
train_batch_cost = time.time() - batch_start
reader_cost_avg.record(train_reader_cost)
batch_cost_avg.record(train_batch_cost)
batch_ips_avg.record(train_batch_cost, np.asarray(outs[1]).sum())
if step_idx % args.print_step == 0:
sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[
1])
# Sum the cost from multi-devices
total_sum_cost = sum_cost_val.sum()
total_token_num = token_num_val.sum()
total_avg_cost = total_sum_cost / total_token_num
if step_idx == 0:
logging.info(
"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, "
"normalized loss: %f, ppl: %f" %
(step_idx, pass_id, batch_id, total_avg_cost,
total_avg_cost - loss_normalizer,
np.exp([min(total_avg_cost, 100)])))
else:
train_avg_batch_cost = args.print_step / batch_cost_avg.get_total_time(
)
logging.info(
"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, "
"normalized loss: %f, ppl: %f, avg_speed: %.2f step/s, "
"batch_cost: %.5f sec, reader_cost: %.5f sec, tokens: %d, "
"ips: %.5f words/sec" %
(step_idx, pass_id, batch_id, total_avg_cost,
total_avg_cost - loss_normalizer,
np.exp([min(total_avg_cost, 100)]),
train_avg_batch_cost, batch_cost_avg.get_average(),
reader_cost_avg.get_average(),
batch_ips_avg.get_total_cnt(),
batch_ips_avg.get_average_per_sec()))
reader_cost_avg.reset()
batch_cost_avg.reset()
batch_ips_avg.reset()
if step_idx % args.save_step == 0 and step_idx != 0:
if args.save_model and dist.get_rank() == 0:
model_path = os.path.join(
args.save_model, "step_" + str(step_idx), "transformer")
paddle.static.save(train_program, model_path)
batch_id += 1
step_idx += 1
batch_start = time.time()
if args.save_model and dist.get_rank() == 0:
model_path = os.path.join(args.save_model, "step_final", "transformer")
paddle.static.save(train_program, model_path)
paddle.disable_static()
if __name__ == "__main__":
ARGS = parse_args()
yaml_file = ARGS.config
with open(yaml_file, 'rt') as f:
args = AttrDict(yaml.safe_load(f))
pprint(args)
do_train(args)
| [
"argparse.ArgumentParser",
"paddle.enable_static",
"paddle.distributed.fleet.DistributedStrategy",
"paddle.static.program_guard",
"paddle.static.ExecutionStrategy",
"yaml.safe_load",
"pprint.pprint",
"paddle.static.BuildStrategy",
"paddlenlp.transformers.CrossEntropyCriterion",
"os.path.join",
"... | [((319, 341), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (334, 341), False, 'import sys\n'), ((450, 504), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'FORMAT'}), '(level=logging.INFO, format=FORMAT)\n', (469, 504), False, 'import logging\n'), ((514, 541), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (531, 541), False, 'import logging\n'), ((575, 600), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (598, 600), False, 'import argparse\n'), ((830, 852), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (850, 852), False, 'import paddle\n'), ((1557, 1596), 'reader.create_data_loader', 'reader.create_data_loader', (['args', 'places'], {}), '(args, places)\n', (1582, 1596), False, 'import reader\n'), ((1618, 1641), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (1639, 1641), False, 'import paddle\n'), ((1664, 1687), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (1685, 1687), False, 'import paddle\n'), ((5424, 5444), 'util.record.AverageStatistical', 'AverageStatistical', ([], {}), '()\n', (5442, 5444), False, 'from util.record import AverageStatistical\n'), ((5466, 5486), 'util.record.AverageStatistical', 'AverageStatistical', ([], {}), '()\n', (5484, 5486), False, 'from util.record import AverageStatistical\n'), ((5507, 5527), 'util.record.AverageStatistical', 'AverageStatistical', ([], {}), '()\n', (5525, 5527), False, 'from util.record import AverageStatistical\n'), ((9492, 9515), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (9513, 9515), False, 'import paddle\n'), ((889, 919), 'paddle.distributed.fleet.init', 'fleet.init', ([], {'is_collective': '(True)'}), '(is_collective=True)\n', (899, 919), True, 'import paddle.distributed.fleet as fleet\n'), ((1470, 1494), 'paddle.seed', 'paddle.seed', (['random_seed'], {}), '(random_seed)\n', (1481, 1494), False, 'import paddle\n'), ((1697, 1756), 'paddle.static.program_guard', 'paddle.static.program_guard', (['train_program', 'startup_program'], {}), '(train_program, startup_program)\n', (1724, 1756), False, 'import paddle\n'), ((1777, 1847), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""src_word"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='src_word', shape=[None, None], dtype='int64')\n", (1795, 1847), False, 'import paddle\n'), ((1880, 1950), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""trg_word"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='trg_word', shape=[None, None], dtype='int64')\n", (1898, 1950), False, 'import paddle\n'), ((1983, 2056), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""lbl_word"""', 'shape': '[None, None, 1]', 'dtype': '"""int64"""'}), "(name='lbl_word', shape=[None, None, 1], dtype='int64')\n", (2001, 2056), False, 'import paddle\n'), ((2116, 2448), 'paddlenlp.transformers.TransformerModel', 'TransformerModel', ([], {'src_vocab_size': 'args.src_vocab_size', 'trg_vocab_size': 'args.trg_vocab_size', 'max_length': '(args.max_length + 1)', 'n_layer': 'args.n_layer', 'n_head': 'args.n_head', 'd_model': 'args.d_model', 'd_inner_hid': 'args.d_inner_hid', 'dropout': 'args.dropout', 'weight_sharing': 'args.weight_sharing', 'bos_id': 'args.bos_idx', 'eos_id': 'args.eos_idx'}), '(src_vocab_size=args.src_vocab_size, trg_vocab_size=args.\n trg_vocab_size, max_length=args.max_length + 1, n_layer=args.n_layer,\n n_head=args.n_head, d_model=args.d_model, d_inner_hid=args.d_inner_hid,\n dropout=args.dropout, weight_sharing=args.weight_sharing, bos_id=args.\n bos_idx, eos_id=args.eos_idx)\n', (2132, 2448), False, 'from paddlenlp.transformers import TransformerModel, CrossEntropyCriterion\n'), ((2606, 2664), 'paddlenlp.transformers.CrossEntropyCriterion', 'CrossEntropyCriterion', (['args.label_smooth_eps', 'args.bos_idx'], {}), '(args.label_smooth_eps, args.bos_idx)\n', (2627, 2664), False, 'from paddlenlp.transformers import TransformerModel, CrossEntropyCriterion\n'), ((2823, 2924), 'paddle.optimizer.lr.NoamDecay', 'paddle.optimizer.lr.NoamDecay', (['args.d_model', 'args.warmup_steps', 'args.learning_rate'], {'last_epoch': '(0)'}), '(args.d_model, args.warmup_steps, args.\n learning_rate, last_epoch=0)\n', (2852, 2924), False, 'import paddle\n'), ((4536, 4566), 'paddle.static.Executor', 'paddle.static.Executor', (['places'], {}), '(places)\n', (4558, 4566), False, 'import paddle\n'), ((4591, 4615), 'paddle.static.Executor', 'paddle.static.Executor', ([], {}), '()\n', (4613, 4615), False, 'import paddle\n'), ((4641, 4670), 'paddle.static.BuildStrategy', 'paddle.static.BuildStrategy', ([], {}), '()\n', (4668, 4670), False, 'import paddle\n'), ((4695, 4728), 'paddle.static.ExecutionStrategy', 'paddle.static.ExecutionStrategy', ([], {}), '()\n', (4726, 4728), False, 'import paddle\n'), ((5610, 5621), 'time.time', 'time.time', ([], {}), '()\n', (5619, 5621), False, 'import time\n'), ((9374, 9432), 'os.path.join', 'os.path.join', (['args.save_model', '"""step_final"""', '"""transformer"""'], {}), "(args.save_model, 'step_final', 'transformer')\n", (9386, 9432), False, 'import os\n'), ((9441, 9486), 'paddle.static.save', 'paddle.static.save', (['train_program', 'model_path'], {}), '(train_program, model_path)\n', (9459, 9486), False, 'import paddle\n'), ((9685, 9697), 'pprint.pprint', 'pprint', (['args'], {}), '(args)\n', (9691, 9697), False, 'from pprint import pprint\n'), ((941, 978), 'os.getenv', 'os.getenv', (['"""FLAGS_selected_gpus"""', '"""0"""'], {}), "('FLAGS_selected_gpus', '0')\n", (950, 978), False, 'import os\n'), ((997, 1021), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['gpu_id'], {}), '(gpu_id)\n', (1013, 1021), False, 'import paddle\n'), ((1056, 1082), 'paddle.static.cpu_places', 'paddle.static.cpu_places', ([], {}), '()\n', (1080, 1082), False, 'import paddle\n'), ((1198, 1225), 'paddle.static.cuda_places', 'paddle.static.cuda_places', ([], {}), '()\n', (1223, 1225), False, 'import paddle\n'), ((1261, 1287), 'paddle.static.cpu_places', 'paddle.static.cpu_places', ([], {}), '()\n', (1285, 1287), False, 'import paddle\n'), ((1300, 1324), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (1317, 1324), False, 'import paddle\n'), ((3249, 3278), 'paddle.static.BuildStrategy', 'paddle.static.BuildStrategy', ([], {}), '()\n', (3276, 3278), False, 'import paddle\n'), ((3307, 3340), 'paddle.static.ExecutionStrategy', 'paddle.static.ExecutionStrategy', ([], {}), '()\n', (3338, 3340), False, 'import paddle\n'), ((3369, 3396), 'paddle.distributed.fleet.DistributedStrategy', 'fleet.DistributedStrategy', ([], {}), '()\n', (3394, 3396), True, 'import paddle.distributed.fleet as fleet\n'), ((3862, 3924), 'paddle.distributed.fleet.distributed_optimizer', 'fleet.distributed_optimizer', (['optimizer'], {'strategy': 'dist_strategy'}), '(optimizer, strategy=dist_strategy)\n', (3889, 3924), True, 'import paddle.distributed.fleet as fleet\n'), ((9291, 9302), 'time.time', 'time.time', ([], {}), '()\n', (9300, 9302), False, 'import time\n'), ((9331, 9346), 'paddle.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9344, 9346), True, 'import paddle.distributed as dist\n'), ((9658, 9675), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (9672, 9675), False, 'import yaml\n'), ((4012, 4141), 'paddle.static.amp.AutoMixedPrecisionLists', 'paddle.static.amp.AutoMixedPrecisionLists', ([], {'custom_white_list': "['softmax', 'layer_norm']", 'custom_black_list': "['lookup_table_v2']"}), "(custom_white_list=['softmax',\n 'layer_norm'], custom_black_list=['lookup_table_v2'])\n", (4053, 4141), False, 'import paddle\n'), ((4207, 4364), 'paddle.static.amp.decorate', 'paddle.static.amp.decorate', (['optimizer', 'amp_list'], {'init_loss_scaling': 'args.scale_loss', 'use_dynamic_loss_scaling': '(True)', 'use_pure_fp16': 'args.use_pure_fp16'}), '(optimizer, amp_list, init_loss_scaling=args.\n scale_loss, use_dynamic_loss_scaling=True, use_pure_fp16=args.use_pure_fp16\n )\n', (4233, 4364), False, 'import paddle\n'), ((4763, 4807), 'paddle.static.CompiledProgram', 'paddle.static.CompiledProgram', (['train_program'], {}), '(train_program)\n', (4792, 4807), False, 'import paddle\n'), ((5212, 5247), 'numpy.log', 'np.log', (['(1.0 - args.label_smooth_eps)'], {}), '(1.0 - args.label_smooth_eps)\n', (5218, 5247), True, 'import numpy as np\n'), ((5296, 5361), 'numpy.log', 'np.log', (['(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20)'], {}), '(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20)\n', (5302, 5361), True, 'import numpy as np\n'), ((5938, 5949), 'time.time', 'time.time', ([], {}), '()\n', (5947, 5949), False, 'import time\n'), ((6893, 6904), 'time.time', 'time.time', ([], {}), '()\n', (6902, 6904), False, 'import time\n'), ((7198, 7215), 'numpy.array', 'np.array', (['outs[0]'], {}), '(outs[0])\n', (7206, 7215), True, 'import numpy as np\n'), ((7217, 7234), 'numpy.array', 'np.array', (['outs[1]'], {}), '(outs[1])\n', (7225, 7234), True, 'import numpy as np\n'), ((9166, 9211), 'paddle.static.save', 'paddle.static.save', (['train_program', 'model_path'], {}), '(train_program, model_path)\n', (9184, 9211), False, 'import paddle\n'), ((7076, 7095), 'numpy.asarray', 'np.asarray', (['outs[1]'], {}), '(outs[1])\n', (7086, 7095), True, 'import numpy as np\n'), ((8996, 9011), 'paddle.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9009, 9011), True, 'import paddle.distributed as dist\n')] |
import tensorflow as tf
import tensornets as nets
import cv2
import numpy as np
import time
inputs = tf.placeholder(tf.float32, [None, 416, 416, 3])
model = nets.YOLOv3COCO(inputs, nets.Darknet19)
#model = nets.YOLOv2(inputs, nets.Darknet19)
#frame=cv2.imread("D://pyworks//yolo//truck.jpg",1)
classes={'0':'person','1':'bicycle','2':'car','3':'bike','5':'bus','7':'truck','37':'sports ball'}
list_of_classes=[0,1,2,3,5,7,37]
with tf.Session() as sess:
sess.run(model.pretrained())
#"D://pyworks//yolo//videoplayback.mp4"
cap = cv2.VideoCapture("/home/linx3/Downloads/testing.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
img=cv2.resize(frame,(416,416))
imge=np.array(img).reshape(-1,416,416,3)
start_time=time.time()
preds = sess.run(model.preds, {inputs: model.preprocess(imge)})
print("--- %s seconds ---" % (time.time() - start_time))
boxes = model.get_boxes(preds, imge.shape[1:3])
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 700,700)
#print("--- %s seconds ---" % (time.time() - start_time))
boxes1=np.array(boxes)
for j in list_of_classes:
count =0
if str(j) in classes:
lab=classes[str(j)]
if len(boxes1) !=0:
for i in range(len(boxes1[j])):
box=boxes1[j][i]
if boxes1[j][i][4]>=.40:
count += 1
cv2.rectangle(img,(box[0],box[1]),(box[2],box[3]),(0,255,0),1)
cv2.putText(img, lab, (box[0],box[1]), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), lineType=cv2.LINE_AA)
print(lab,": ",count)
cv2.imshow("image",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.putText",
"cv2.waitKey",
"tensorflow.Session",
"cv2.imshow",
"time.time",
"cv2.VideoCapture",
"tensorflow.placeholder",
"cv2.namedWindow",
"numpy.array",
"cv2.rectangle",
"cv2.resizeWindow",
"cv2.destroyAllWindows",
"tensornets.YOLOv3COCO",
"cv2.resize"
] | [((103, 150), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 416, 416, 3]'], {}), '(tf.float32, [None, 416, 416, 3])\n', (117, 150), True, 'import tensorflow as tf\n'), ((159, 198), 'tensornets.YOLOv3COCO', 'nets.YOLOv3COCO', (['inputs', 'nets.Darknet19'], {}), '(inputs, nets.Darknet19)\n', (174, 198), True, 'import tensornets as nets\n'), ((1853, 1876), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1874, 1876), False, 'import cv2\n'), ((435, 447), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (445, 447), True, 'import tensorflow as tf\n'), ((540, 593), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""/home/linx3/Downloads/testing.mp4"""'], {}), "('/home/linx3/Downloads/testing.mp4')\n", (556, 593), False, 'import cv2\n'), ((666, 695), 'cv2.resize', 'cv2.resize', (['frame', '(416, 416)'], {}), '(frame, (416, 416))\n', (676, 695), False, 'import cv2\n'), ((762, 773), 'time.time', 'time.time', ([], {}), '()\n', (771, 773), False, 'import time\n'), ((976, 1019), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (991, 1019), False, 'import cv2\n'), ((1028, 1063), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""image"""', '(700)', '(700)'], {}), "('image', 700, 700)\n", (1044, 1063), False, 'import cv2\n'), ((1144, 1159), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (1152, 1159), True, 'import numpy as np\n'), ((1747, 1771), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1757, 1771), False, 'import cv2\n'), ((707, 720), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (715, 720), True, 'import numpy as np\n'), ((1782, 1796), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1793, 1796), False, 'import cv2\n'), ((885, 896), 'time.time', 'time.time', ([], {}), '()\n', (894, 896), False, 'import time\n'), ((1512, 1582), 'cv2.rectangle', 'cv2.rectangle', (['img', '(box[0], box[1])', '(box[2], box[3])', '(0, 255, 0)', '(1)'], {}), '(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)\n', (1525, 1582), False, 'import cv2\n'), ((1599, 1709), 'cv2.putText', 'cv2.putText', (['img', 'lab', '(box[0], box[1])', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)'], {'lineType': 'cv2.LINE_AA'}), '(img, lab, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, \n 0, 255), lineType=cv2.LINE_AA)\n', (1610, 1709), False, 'import cv2\n')] |
from __future__ import print_function
import sys, pdb
sys.path.insert(0, '.')
import vgg, time
import tensorflow as tf, numpy as np, os
import stylenet
from argparse import ArgumentParser
from vgg import read_img, list_files
vgg_path = 'vgg19.mat'
def build_parser():
parser = ArgumentParser(description='Real-time style transfer')
parser.add_argument('--gpu', '-g', default=0, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--dataset', '-d', default='../../train2014s', type=str,
help='dataset directory path (according to the paper, use MSCOCO 80k images)')
parser.add_argument('--style_image', '-s', type=str, required=True,
help='style image path')
parser.add_argument('--batchsize', '-b', type=int, default=16,
help='batch size (default value is 1)')
parser.add_argument('--ckpt', '-c', default='ckpt', type=str,
help='the global step of checkpoint file desired to restore.')
parser.add_argument('--lambda_tv', '-l_tv', default=2e2, type=float,
help='weight of total variation regularization according to the paper to be set between 10e-4 and 10e-6.')
parser.add_argument('--lambda_feat', '-l_feat', default=7.5e0, type=float)
parser.add_argument('--lambda_style', '-l_style', default=1e2, type=float)
parser.add_argument('--epoch', '-e', default=2, type=int)
parser.add_argument('--lr', '-l', default=1e-3, type=float)
return parser
def main():
parser = build_parser()
options = parser.parse_args()
if options.gpu > -1:
device = '/gpu:{}'.format(options.gpu)
else:
device = '/cpu:0'
batchsize = options.batchsize
# content targets
content_targets = [os.path.join(options.dataset, fn) for fn in list_files(options.dataset)]
content_targets = content_targets[:-(len(content_targets) % batchsize)]
print('total training data size: ', len(content_targets))
batch_shape = (batchsize,224,224,3)
# style target
style_target = read_img(options.style_image)
style_shape = (1,) + style_target.shape
with tf.device(device), tf.Session() as sess:
# style target feature
# compute gram maxtrix of style target
if not os.path.isfile(vgg_path):
print ("Pretrained vgg net does not exsited " + vgg_path)
print ("Plese download pretrained vgg net from http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat")
return ;
style_image = tf.placeholder(tf.float32, shape=style_shape, name='style_image')
vggstyletarget = vgg.net(vgg_path, vgg.preprocess(style_image))
style_vgg = vgg.get_style_vgg(vggstyletarget, style_image, np.array([style_target]))
# content target feature
content_vgg = {}
inputs = tf.placeholder(tf.float32, shape=batch_shape, name="inputs")
content_net = vgg.net(vgg_path, vgg.preprocess(inputs))
content_vgg['relu4_2'] = content_net['relu4_2']
# feature after transformation
outputs = stylenet.net(inputs/255.0)
vggoutputs = vgg.net(vgg_path, vgg.preprocess(outputs))
# compute feature loss
loss_f = options.lambda_feat * vgg.total_content_loss(vggoutputs, content_vgg, batchsize)
# compute style loss
loss_s = options.lambda_style * vgg.total_style_loss(vggoutputs, style_vgg, batchsize)
# total variation denoising
loss_tv = options.lambda_tv * vgg.total_variation_regularization(outputs, batchsize, batch_shape)
# total loss
loss = loss_f + loss_s + loss_tv
with tf.Session() as sess:
if not os.path.exists(options.ckpt):
os.makedirs(options.ckpt)
save_path = os.path.join(options.ckpt,'1.ckpt')
#training
train_step = tf.train.AdamOptimizer(options.lr).minimize(loss)
sess.run(tf.global_variables_initializer())
total_step = 0
for epoch in range(options.epoch):
print('epoch: ', epoch)
step = 0
while step * batchsize < len(content_targets):
time_start = time.time()
batch = np.zeros(batch_shape, dtype=np.float32)
for i, img in enumerate(content_targets[step * batchsize : (step + 1) * batchsize]):
batch[i] = read_img(img).astype(np.float32) # (224,224,3)
step += 1
total_step += 1
loss_, _= sess.run([loss, train_step,], feed_dict= {inputs:batch})
time_elapse = time.time() - time_start
should_save = total_step % 2000 == 0
if total_step % 1 == 0:
print('[step {}] elapse time: {} loss: {}'.format(total_step, time_elapse, loss_))
if should_save:
saver = tf.train.Saver()
res = saver.save(sess, save_path)
print('Save checkpoint')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"vgg.list_files",
"vgg.read_img",
"vgg.preprocess",
"vgg.total_variation_regularization",
"os.path.isfile",
"os.path.join",
"vgg.total_content_loss",
"os.path.exists",
"tensorflow.placeholder",
"stylenet.net",
"tensorflow.train.Saver",
"tensorflow.global_variables_... | [((54, 77), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (69, 77), False, 'import sys, pdb\n'), ((287, 341), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Real-time style transfer"""'}), "(description='Real-time style transfer')\n", (301, 341), False, 'from argparse import ArgumentParser\n'), ((2146, 2175), 'vgg.read_img', 'read_img', (['options.style_image'], {}), '(options.style_image)\n', (2154, 2175), False, 'from vgg import read_img, list_files\n'), ((1850, 1883), 'os.path.join', 'os.path.join', (['options.dataset', 'fn'], {}), '(options.dataset, fn)\n', (1862, 1883), False, 'import tensorflow as tf, numpy as np, os\n'), ((2230, 2247), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (2239, 2247), True, 'import tensorflow as tf, numpy as np, os\n'), ((2249, 2261), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2259, 2261), True, 'import tensorflow as tf, numpy as np, os\n'), ((2642, 2707), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'style_shape', 'name': '"""style_image"""'}), "(tf.float32, shape=style_shape, name='style_image')\n", (2656, 2707), True, 'import tensorflow as tf, numpy as np, os\n'), ((2958, 3018), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'batch_shape', 'name': '"""inputs"""'}), "(tf.float32, shape=batch_shape, name='inputs')\n", (2972, 3018), True, 'import tensorflow as tf, numpy as np, os\n'), ((3198, 3226), 'stylenet.net', 'stylenet.net', (['(inputs / 255.0)'], {}), '(inputs / 255.0)\n', (3210, 3226), False, 'import stylenet\n'), ((3801, 3813), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3811, 3813), True, 'import tensorflow as tf, numpy as np, os\n'), ((3947, 3983), 'os.path.join', 'os.path.join', (['options.ckpt', '"""1.ckpt"""'], {}), "(options.ckpt, '1.ckpt')\n", (3959, 3983), False, 'import tensorflow as tf, numpy as np, os\n'), ((1894, 1921), 'vgg.list_files', 'list_files', (['options.dataset'], {}), '(options.dataset)\n', (1904, 1921), False, 'from vgg import read_img, list_files\n'), ((2365, 2389), 'os.path.isfile', 'os.path.isfile', (['vgg_path'], {}), '(vgg_path)\n', (2379, 2389), False, 'import tensorflow as tf, numpy as np, os\n'), ((2751, 2778), 'vgg.preprocess', 'vgg.preprocess', (['style_image'], {}), '(style_image)\n', (2765, 2778), False, 'import vgg, time\n'), ((2847, 2871), 'numpy.array', 'np.array', (['[style_target]'], {}), '([style_target])\n', (2855, 2871), True, 'import tensorflow as tf, numpy as np, os\n'), ((3059, 3081), 'vgg.preprocess', 'vgg.preprocess', (['inputs'], {}), '(inputs)\n', (3073, 3081), False, 'import vgg, time\n'), ((3272, 3295), 'vgg.preprocess', 'vgg.preprocess', (['outputs'], {}), '(outputs)\n', (3286, 3295), False, 'import vgg, time\n'), ((3368, 3426), 'vgg.total_content_loss', 'vgg.total_content_loss', (['vggoutputs', 'content_vgg', 'batchsize'], {}), '(vggoutputs, content_vgg, batchsize)\n', (3390, 3426), False, 'import vgg, time\n'), ((3505, 3559), 'vgg.total_style_loss', 'vgg.total_style_loss', (['vggoutputs', 'style_vgg', 'batchsize'], {}), '(vggoutputs, style_vgg, batchsize)\n', (3525, 3559), False, 'import vgg, time\n'), ((3643, 3710), 'vgg.total_variation_regularization', 'vgg.total_variation_regularization', (['outputs', 'batchsize', 'batch_shape'], {}), '(outputs, batchsize, batch_shape)\n', (3677, 3710), False, 'import vgg, time\n'), ((3859, 3887), 'os.path.exists', 'os.path.exists', (['options.ckpt'], {}), '(options.ckpt)\n', (3873, 3887), False, 'import tensorflow as tf, numpy as np, os\n'), ((3901, 3926), 'os.makedirs', 'os.makedirs', (['options.ckpt'], {}), '(options.ckpt)\n', (3912, 3926), False, 'import tensorflow as tf, numpy as np, os\n'), ((4090, 4123), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4121, 4123), True, 'import tensorflow as tf, numpy as np, os\n'), ((4023, 4057), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['options.lr'], {}), '(options.lr)\n', (4045, 4057), True, 'import tensorflow as tf, numpy as np, os\n'), ((4349, 4360), 'time.time', 'time.time', ([], {}), '()\n', (4358, 4360), False, 'import vgg, time\n'), ((4402, 4441), 'numpy.zeros', 'np.zeros', (['batch_shape'], {'dtype': 'np.float32'}), '(batch_shape, dtype=np.float32)\n', (4410, 4441), True, 'import tensorflow as tf, numpy as np, os\n'), ((4822, 4833), 'time.time', 'time.time', ([], {}), '()\n', (4831, 4833), False, 'import vgg, time\n'), ((5193, 5209), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5207, 5209), True, 'import tensorflow as tf, numpy as np, os\n'), ((4573, 4586), 'vgg.read_img', 'read_img', (['img'], {}), '(img)\n', (4581, 4586), False, 'from vgg import read_img, list_files\n')] |
"""
Collection of MXNet general functions, wrapped to fit Ivy syntax and signature.
"""
# global
import ivy
_round = round
import logging
import mxnet as _mx
import numpy as _np
import math as _math
from numbers import Number
from operator import mul as _mul
from functools import reduce as _reduce
import multiprocessing as _multiprocessing
# local
from ivy.functional.ivy import default_dtype
from ivy.functional.ivy.device import default_device
from ivy.functional.backends.mxnet.device import _callable_dev
from ivy.functional.backends.mxnet.general import unstack
from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context,\
_scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array
#temporary imports
from ivy.functional.backends.mxnet.general import linspace
DTYPE_TO_STR = {_np.dtype('int8'): 'int8',
_np.dtype('int16'): 'int16',
_np.dtype('int32'): 'int32',
_np.dtype('int64'): 'int64',
_np.dtype('uint8'): 'uint8',
_np.dtype('uint16'): 'uint16',
_np.dtype('uint32'): 'uint32',
_np.dtype('uint64'): 'uint64',
'bfloat16': 'bfloat16',
_np.dtype('float16'): 'float16',
_np.dtype('float32'): 'float32',
_np.dtype('float64'): 'float64',
_np.dtype('bool'): 'bool',
_np.int8: 'int8',
_np.int16: 'int16',
_np.int32: 'int32',
_np.int64: 'int64',
_np.uint8: 'uint8',
_np.uint16: 'uint16',
_np.uint32: 'uint32',
_np.uint64: 'uint64',
_np.float16: 'float16',
_np.float32: 'float32',
_np.float64: 'float64',
_np.bool_: 'bool'}
DTYPE_FROM_STR = {'int8': _np.int8,
'int16': _np.int16,
'int32': _np.int32,
'int64': _np.int64,
'uint8': _np.uint8,
'uint16': _np.uint16,
'uint32': _np.uint32,
'uint64': _np.uint64,
'bfloat16': 'bfloat16',
'float16': _np.float16,
'float32': _np.float32,
'float64': _np.float64,
'bool': _np.bool_}
# API #
# ----#
def dtype_bits(dtype_in):
dtype_str = dtype_to_str(dtype_in)
if 'bool' in dtype_str:
return 1
return int(dtype_str.replace("<class 'numpy.", '').replace("'>", '').replace('uint', '').replace(
'int', '').replace('bfloat', '').replace('float', ''))
equal = lambda x1, x2: x1 == x2
equal.__name__ = 'equal'
shape = lambda x, as_tensor=False: _mx.nd.shape_array(x) if as_tensor else x.shape
shape.__name__ = 'shape'
get_num_dims = lambda x, as_tensor=False:\
_mx.nd.shape_array(_mx.nd.shape_array(x)).reshape([]) if as_tensor else len(x.shape)
minimum = lambda x, y: _mx.nd.array(_mx.nd.minimum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))
maximum = lambda x, y: _mx.nd.array(_mx.nd.maximum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))
@_handle_flat_arrays_in_out
def clip(x, x_min, x_max):
return _mx.nd.clip(_mx.nd.array(x), x_min, x_max)
# noinspection PyShadowingBuiltins
@_handle_flat_arrays_in_out
def abs(x):
return _mx.nd.abs(x)
argmin = lambda x, axis=0: _mx.nd.argmin(x, axis)
@_handle_flat_arrays_in_out
def cast(x, dtype):
return x.astype(dtype)
astype = cast
# noinspection PyUnresolvedReferences
def arange(stop, start=0, step=1, dtype=None, dev=None):
cont = _mxnet_init_context(default_device(dev))
stop = stop if isinstance(stop, Number) else stop.asscalar()
start = start if isinstance(start, Number) else start.asscalar()
step = step if isinstance(step, Number) else step.asscalar()
return _mx.nd.arange(start, stop, ctx=cont, step=step, dtype=dtype)
@_handle_flat_arrays_in_out
def concatenate(xs, axis=-1):
return _mx.nd.concat(*xs, dim=axis)
def stack(xs, axis=0):
if xs[0].shape == ():
return _mx.nd.reshape(_mx.nd.stack(*[_flat_array_to_1_dim_array(x) for x in xs], axis=axis), -1)
return _mx.nd.stack(*xs, axis=axis)
def transpose(x, axes=None):
if axes is None:
num_dims = len(x.shape)
axes = list(range(num_dims))
axes.reverse()
return _mx.nd.transpose(x, axes)
@_handle_flat_arrays_in_out
def where(condition, x1, x2):
x_shape = list(x1.shape)
condition_shape = list(condition.shape)
if x_shape == condition_shape:
res = _mx.nd.where(condition, x1, x2)
return res
tile_reps = [int(x / c) for x, c in zip(x_shape, condition_shape)]
tiled_condition = _mx.nd.tile(condition, tile_reps)
return _mx.nd.where(tiled_condition, x1, x2)
def indices_where(x):
x_shape = x.shape
x_flat = x.reshape((1, -1,))
flat_indices = x_flat.astype('int32').tostype('csr').indices
if flat_indices.shape == (0,):
res = flat_indices.reshape((0, len(x_shape)))
return res
res = _mx.nd.swapaxes(_mx.nd.unravel_index(flat_indices, x_shape), 0, 1)
return res
reshape = lambda x, new_shape: x.reshape(new_shape)
def broadcast_to(x, new_shape):
x_shape = list(x.shape)
num_x_dims = len(x_shape)
num_shape_dims = len(new_shape)
diff = num_shape_dims - num_x_dims
if diff == 0:
return _mx.nd.broadcast_to(x, new_shape)
x = _mx.nd.reshape(x, [1]*diff + x_shape)
return _mx.nd.broadcast_to(x, new_shape)
def squeeze(x, axis=None):
if x.shape == ():
if axis is None or axis == 0 or axis == -1:
return x
raise Exception('tried to squeeze a zero-dimensional input by axis {}'.format(axis))
res = _mx.nd.squeeze(x, axis)
if axis is None:
return _1_dim_array_to_flat_array(res)
return res
# noinspection PyShadowingNames
def zeros_like(x, dtype=None, dev=None):
if x.shape == ():
return _mx.nd.array(0., ctx=_mxnet_init_context(default_device(dev)))
mx_zeros = _mx.nd.zeros_like(x, ctx=_mxnet_init_context(default_device(dev)))
return mx_zeros if not dtype else mx_zeros.astype(dtype)
def full(shape, fill_value, dtype=None, device=None):
shape = ivy.shape_to_tuple(shape)
cont = _mxnet_init_context(default_device(device))
if len(shape) == 0 or 0 in shape:
return _1_dim_array_to_flat_array(
_mx.nd.full((1,), fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value))))
return _mx.nd.full(shape, fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value)))
# noinspection PyUnusedLocal
one_hot = lambda indices, depth, dev=None: _mx.nd.one_hot(indices, depth)
def cross(x1, x2):
a1 = x1[..., 0:1]
a2 = x1[..., 1:2]
a3 = x1[..., 2:3]
b1 = x2[..., 0:1]
b2 = x2[..., 1:2]
b3 = x2[..., 2:3]
res1 = a2*b3 - a3*b2
res2 = a3*b1 - a1*b3
res3 = a1*b2 - a2*b1
res = _mx.nd.concat(res1, res2, res3, dim=-1)
return res
def matmul(x1, x2):
expanded = False
x1_shape = list(x1.shape)
x2_shape = list(x2.shape)
if len(x1_shape) != 3:
num_x1_dims = len(x1_shape)
x1 = _mx.nd.reshape(x1, [1]*max(2-num_x1_dims, 0) + [-1] + x1_shape[-min(num_x1_dims, 2):])
expanded = True
if len(x2_shape) != 3:
num_x2_dims = len(x2_shape)
x2 = _mx.nd.reshape(x2, [1]*max(2-num_x2_dims, 0) + [-1] + x2_shape[-min(num_x2_dims, 2):])
expanded = True
x1_batch_size = x1.shape[0]
x2_batch_size = x2.shape[0]
if x1_batch_size > x2_batch_size:
x2 = _mx.nd.tile(x2, (int(x1_batch_size/x2_batch_size), 1, 1))
elif x2_batch_size > x1_batch_size:
x1 = _mx.nd.tile(x1, (int(x2_batch_size / x1_batch_size), 1, 1))
res = _mx.nd.batch_dot(x1, x2)
if expanded:
return _mx.nd.reshape(res, list(x1_shape[:-1]) + [res.shape[-1]])
return res
def identity(n, dtype='float32', batch_shape=None, dev=None):
mat = _mx.nd.eye(n, dtype=dtype).copyto(_mxnet_init_context(default_device(dev)))
if batch_shape is None:
return mat
else:
reshape_dims = [1]*len(batch_shape) + [n, n]
tile_dims = list(batch_shape) + [1, 1]
res = _mx.nd.tile(_mx.nd.reshape(mat, reshape_dims), tile_dims)
return res
def meshgrid(*xs, indexing='ij'):
# ToDo: implement this without reliance on NumPy backend
xs_np = [x.as_np_ndarray() for x in xs]
return tuple([item.as_nd_ndarray() for item in _mx.np.meshgrid(*xs_np, indexing=indexing)])
def linear_resample(x, num_samples, axis=-1):
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
x_pre_size = _reduce(_mul, x_pre_shape) if x_pre_shape else 1
num_pre_dims = len(x_pre_shape)
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
x_post_size = _reduce(_mul, x_post_shape) if x_post_shape else 1
num_post_dims = len(x_post_shape)
xp = _mx.nd.reshape(_mx.nd.arange(num_vals*x_pre_size*x_post_size), x_shape)
x_coords = _mx.nd.arange(num_samples) * ((num_vals-1)/(num_samples-1)) * x_post_size
x_coords = _mx.nd.reshape(x_coords, [1]*num_pre_dims + [num_samples] + [1]*num_post_dims)
x_coords = _mx.nd.broadcast_to(x_coords, x_pre_shape + [num_samples] + x_post_shape)
slc = [slice(None)] * num_x_dims
slc[axis] = slice(0, 1, 1)
x_coords = x_coords + xp[tuple(slc)]
x = _mx.nd.reshape(x, (-1,))
xp = _mx.nd.reshape(xp, (-1,))
x_coords = _mx.nd.reshape(x_coords, (-1,))
ret = _mx.nd.array(_mx.np.interp(x_coords.asnumpy(), xp.asnumpy(), x.asnumpy()))
return _mx.nd.reshape(ret, x_pre_shape + [num_samples] + x_post_shape)
def dtype(x, as_str=False):
dt = x.dtype
if as_str:
return dtype_to_str(dt)
return x.dtype
def dtype_to_str(dtype_in):
if isinstance(dtype_in, str):
return dtype_in
return DTYPE_TO_STR[dtype_in]
def dtype_from_str(dtype_in):
if not isinstance(dtype_in, str):
return dtype_in
return DTYPE_FROM_STR[dtype_in]
# noinspection PyUnusedLocal
def compile(func, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):
logging.warning('MXnet does not support compiling arbitrary functions, '
'consider writing a function using MXNet Symbolic backend instead for compiling.\n'
'Now returning the unmodified function.')
return func
current_framework_str = lambda: 'mxnet'
current_framework_str.__name__ = 'current_framework_str'
multiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)
| [
"mxnet.nd.tile",
"multiprocessing.get_context",
"mxnet.nd.concat",
"mxnet.np.meshgrid",
"mxnet.nd.transpose",
"mxnet.nd.batch_dot",
"mxnet.nd.shape_array",
"logging.warning",
"mxnet.nd.squeeze",
"mxnet.nd.eye",
"mxnet.nd.argmin",
"mxnet.nd.broadcast_to",
"ivy.functional.ivy.default_dtype",
... | [((876, 893), 'numpy.dtype', '_np.dtype', (['"""int8"""'], {}), "('int8')\n", (885, 893), True, 'import numpy as _np\n'), ((919, 937), 'numpy.dtype', '_np.dtype', (['"""int16"""'], {}), "('int16')\n", (928, 937), True, 'import numpy as _np\n'), ((964, 982), 'numpy.dtype', '_np.dtype', (['"""int32"""'], {}), "('int32')\n", (973, 982), True, 'import numpy as _np\n'), ((1009, 1027), 'numpy.dtype', '_np.dtype', (['"""int64"""'], {}), "('int64')\n", (1018, 1027), True, 'import numpy as _np\n'), ((1054, 1072), 'numpy.dtype', '_np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1063, 1072), True, 'import numpy as _np\n'), ((1099, 1118), 'numpy.dtype', '_np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (1108, 1118), True, 'import numpy as _np\n'), ((1146, 1165), 'numpy.dtype', '_np.dtype', (['"""uint32"""'], {}), "('uint32')\n", (1155, 1165), True, 'import numpy as _np\n'), ((1193, 1212), 'numpy.dtype', '_np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (1202, 1212), True, 'import numpy as _np\n'), ((1280, 1300), 'numpy.dtype', '_np.dtype', (['"""float16"""'], {}), "('float16')\n", (1289, 1300), True, 'import numpy as _np\n'), ((1329, 1349), 'numpy.dtype', '_np.dtype', (['"""float32"""'], {}), "('float32')\n", (1338, 1349), True, 'import numpy as _np\n'), ((1378, 1398), 'numpy.dtype', '_np.dtype', (['"""float64"""'], {}), "('float64')\n", (1387, 1398), True, 'import numpy as _np\n'), ((1427, 1444), 'numpy.dtype', '_np.dtype', (['"""bool"""'], {}), "('bool')\n", (1436, 1444), True, 'import numpy as _np\n'), ((3441, 3454), 'mxnet.nd.abs', '_mx.nd.abs', (['x'], {}), '(x)\n', (3451, 3454), True, 'import mxnet as _mx\n'), ((3483, 3505), 'mxnet.nd.argmin', '_mx.nd.argmin', (['x', 'axis'], {}), '(x, axis)\n', (3496, 3505), True, 'import mxnet as _mx\n'), ((3958, 4018), 'mxnet.nd.arange', '_mx.nd.arange', (['start', 'stop'], {'ctx': 'cont', 'step': 'step', 'dtype': 'dtype'}), '(start, stop, ctx=cont, step=step, dtype=dtype)\n', (3971, 4018), True, 'import mxnet as _mx\n'), ((4092, 4120), 'mxnet.nd.concat', '_mx.nd.concat', (['*xs'], {'dim': 'axis'}), '(*xs, dim=axis)\n', (4105, 4120), True, 'import mxnet as _mx\n'), ((4288, 4316), 'mxnet.nd.stack', '_mx.nd.stack', (['*xs'], {'axis': 'axis'}), '(*xs, axis=axis)\n', (4300, 4316), True, 'import mxnet as _mx\n'), ((4472, 4497), 'mxnet.nd.transpose', '_mx.nd.transpose', (['x', 'axes'], {}), '(x, axes)\n', (4488, 4497), True, 'import mxnet as _mx\n'), ((4824, 4857), 'mxnet.nd.tile', '_mx.nd.tile', (['condition', 'tile_reps'], {}), '(condition, tile_reps)\n', (4835, 4857), True, 'import mxnet as _mx\n'), ((4869, 4906), 'mxnet.nd.where', '_mx.nd.where', (['tiled_condition', 'x1', 'x2'], {}), '(tiled_condition, x1, x2)\n', (4881, 4906), True, 'import mxnet as _mx\n'), ((5547, 5586), 'mxnet.nd.reshape', '_mx.nd.reshape', (['x', '([1] * diff + x_shape)'], {}), '(x, [1] * diff + x_shape)\n', (5561, 5586), True, 'import mxnet as _mx\n'), ((5596, 5629), 'mxnet.nd.broadcast_to', '_mx.nd.broadcast_to', (['x', 'new_shape'], {}), '(x, new_shape)\n', (5615, 5629), True, 'import mxnet as _mx\n'), ((5857, 5880), 'mxnet.nd.squeeze', '_mx.nd.squeeze', (['x', 'axis'], {}), '(x, axis)\n', (5871, 5880), True, 'import mxnet as _mx\n'), ((6353, 6378), 'ivy.shape_to_tuple', 'ivy.shape_to_tuple', (['shape'], {}), '(shape)\n', (6371, 6378), False, 'import ivy\n'), ((6785, 6815), 'mxnet.nd.one_hot', '_mx.nd.one_hot', (['indices', 'depth'], {}), '(indices, depth)\n', (6799, 6815), True, 'import mxnet as _mx\n'), ((7054, 7093), 'mxnet.nd.concat', '_mx.nd.concat', (['res1', 'res2', 'res3'], {'dim': '(-1)'}), '(res1, res2, res3, dim=-1)\n', (7067, 7093), True, 'import mxnet as _mx\n'), ((7882, 7906), 'mxnet.nd.batch_dot', '_mx.nd.batch_dot', (['x1', 'x2'], {}), '(x1, x2)\n', (7898, 7906), True, 'import mxnet as _mx\n'), ((9279, 9365), 'mxnet.nd.reshape', '_mx.nd.reshape', (['x_coords', '([1] * num_pre_dims + [num_samples] + [1] * num_post_dims)'], {}), '(x_coords, [1] * num_pre_dims + [num_samples] + [1] *\n num_post_dims)\n', (9293, 9365), True, 'import mxnet as _mx\n'), ((9373, 9446), 'mxnet.nd.broadcast_to', '_mx.nd.broadcast_to', (['x_coords', '(x_pre_shape + [num_samples] + x_post_shape)'], {}), '(x_coords, x_pre_shape + [num_samples] + x_post_shape)\n', (9392, 9446), True, 'import mxnet as _mx\n'), ((9564, 9588), 'mxnet.nd.reshape', '_mx.nd.reshape', (['x', '(-1,)'], {}), '(x, (-1,))\n', (9578, 9588), True, 'import mxnet as _mx\n'), ((9598, 9623), 'mxnet.nd.reshape', '_mx.nd.reshape', (['xp', '(-1,)'], {}), '(xp, (-1,))\n', (9612, 9623), True, 'import mxnet as _mx\n'), ((9639, 9670), 'mxnet.nd.reshape', '_mx.nd.reshape', (['x_coords', '(-1,)'], {}), '(x_coords, (-1,))\n', (9653, 9670), True, 'import mxnet as _mx\n'), ((9767, 9830), 'mxnet.nd.reshape', '_mx.nd.reshape', (['ret', '(x_pre_shape + [num_samples] + x_post_shape)'], {}), '(ret, x_pre_shape + [num_samples] + x_post_shape)\n', (9781, 9830), True, 'import mxnet as _mx\n'), ((10328, 10533), 'logging.warning', 'logging.warning', (['"""MXnet does not support compiling arbitrary functions, consider writing a function using MXNet Symbolic backend instead for compiling.\nNow returning the unmodified function."""'], {}), '(\n """MXnet does not support compiling arbitrary functions, consider writing a function using MXNet Symbolic backend instead for compiling.\nNow returning the unmodified function."""\n )\n', (10343, 10533), False, 'import logging\n'), ((2789, 2810), 'mxnet.nd.shape_array', '_mx.nd.shape_array', (['x'], {}), '(x)\n', (2807, 2810), True, 'import mxnet as _mx\n'), ((3322, 3337), 'mxnet.nd.array', '_mx.nd.array', (['x'], {}), '(x)\n', (3334, 3337), True, 'import mxnet as _mx\n'), ((3727, 3746), 'ivy.functional.ivy.device.default_device', 'default_device', (['dev'], {}), '(dev)\n', (3741, 3746), False, 'from ivy.functional.ivy.device import default_device\n'), ((4680, 4711), 'mxnet.nd.where', '_mx.nd.where', (['condition', 'x1', 'x2'], {}), '(condition, x1, x2)\n', (4692, 4711), True, 'import mxnet as _mx\n'), ((5185, 5228), 'mxnet.nd.unravel_index', '_mx.nd.unravel_index', (['flat_indices', 'x_shape'], {}), '(flat_indices, x_shape)\n', (5205, 5228), True, 'import mxnet as _mx\n'), ((5505, 5538), 'mxnet.nd.broadcast_to', '_mx.nd.broadcast_to', (['x', 'new_shape'], {}), '(x, new_shape)\n', (5524, 5538), True, 'import mxnet as _mx\n'), ((5917, 5948), 'ivy.functional.backends.mxnet._1_dim_array_to_flat_array', '_1_dim_array_to_flat_array', (['res'], {}), '(res)\n', (5943, 5948), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n'), ((6410, 6432), 'ivy.functional.ivy.device.default_device', 'default_device', (['device'], {}), '(device)\n', (6424, 6432), False, 'from ivy.functional.ivy.device import default_device\n'), ((8837, 8863), 'functools.reduce', '_reduce', (['_mul', 'x_pre_shape'], {}), '(_mul, x_pre_shape)\n', (8844, 8863), True, 'from functools import reduce as _reduce\n'), ((9005, 9032), 'functools.reduce', '_reduce', (['_mul', 'x_post_shape'], {}), '(_mul, x_post_shape)\n', (9012, 9032), True, 'from functools import reduce as _reduce\n'), ((9118, 9168), 'mxnet.nd.arange', '_mx.nd.arange', (['(num_vals * x_pre_size * x_post_size)'], {}), '(num_vals * x_pre_size * x_post_size)\n', (9131, 9168), True, 'import mxnet as _mx\n'), ((10762, 10799), 'multiprocessing.get_context', '_multiprocessing.get_context', (['context'], {}), '(context)\n', (10790, 10799), True, 'import multiprocessing as _multiprocessing\n'), ((3045, 3079), 'ivy.functional.backends.mxnet._scalar_or_flat_array_to_scalar', '_scalar_or_flat_array_to_scalar', (['x'], {}), '(x)\n', (3076, 3079), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n'), ((3081, 3115), 'ivy.functional.backends.mxnet._scalar_or_flat_array_to_scalar', '_scalar_or_flat_array_to_scalar', (['y'], {}), '(y)\n', (3112, 3115), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n'), ((3169, 3203), 'ivy.functional.backends.mxnet._scalar_or_flat_array_to_scalar', '_scalar_or_flat_array_to_scalar', (['x'], {}), '(x)\n', (3200, 3203), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n'), ((3205, 3239), 'ivy.functional.backends.mxnet._scalar_or_flat_array_to_scalar', '_scalar_or_flat_array_to_scalar', (['y'], {}), '(y)\n', (3236, 3239), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n'), ((6677, 6709), 'ivy.functional.ivy.default_dtype', 'default_dtype', (['dtype', 'fill_value'], {}), '(dtype, fill_value)\n', (6690, 6709), False, 'from ivy.functional.ivy import default_dtype\n'), ((8089, 8115), 'mxnet.nd.eye', '_mx.nd.eye', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (8099, 8115), True, 'import mxnet as _mx\n'), ((8143, 8162), 'ivy.functional.ivy.device.default_device', 'default_device', (['dev'], {}), '(dev)\n', (8157, 8162), False, 'from ivy.functional.ivy.device import default_device\n'), ((8348, 8381), 'mxnet.nd.reshape', '_mx.nd.reshape', (['mat', 'reshape_dims'], {}), '(mat, reshape_dims)\n', (8362, 8381), True, 'import mxnet as _mx\n'), ((9190, 9216), 'mxnet.nd.arange', '_mx.nd.arange', (['num_samples'], {}), '(num_samples)\n', (9203, 9216), True, 'import mxnet as _mx\n'), ((6202, 6221), 'ivy.functional.ivy.device.default_device', 'default_device', (['dev'], {}), '(dev)\n', (6216, 6221), False, 'from ivy.functional.ivy.device import default_device\n'), ((8605, 8647), 'mxnet.np.meshgrid', '_mx.np.meshgrid', (['*xs_np'], {'indexing': 'indexing'}), '(*xs_np, indexing=indexing)\n', (8620, 8647), True, 'import mxnet as _mx\n'), ((2928, 2949), 'mxnet.nd.shape_array', '_mx.nd.shape_array', (['x'], {}), '(x)\n', (2946, 2949), True, 'import mxnet as _mx\n'), ((6120, 6139), 'ivy.functional.ivy.device.default_device', 'default_device', (['dev'], {}), '(dev)\n', (6134, 6139), False, 'from ivy.functional.ivy.device import default_device\n'), ((6578, 6610), 'ivy.functional.ivy.default_dtype', 'default_dtype', (['dtype', 'fill_value'], {}), '(dtype, fill_value)\n', (6591, 6610), False, 'from ivy.functional.ivy import default_dtype\n'), ((4217, 4246), 'ivy.functional.backends.mxnet._flat_array_to_1_dim_array', '_flat_array_to_1_dim_array', (['x'], {}), '(x)\n', (4243, 4246), False, 'from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context, _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in, _flat_array_to_1_dim_array, _1_dim_array_to_flat_array\n')] |
import nltk
import numpy as np
import tensorflow as tf
from nltk.tokenize import sent_tokenize
from tensorflow.keras import backend as K
from transformers import BertTokenizer, TFBertModel
NB_OF_SENTS = 30
nltk.download("punkt")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert = TFBertModel.from_pretrained("bert-base-uncased")
def get_model(lstm_cell_size=150):
in_id = tf.keras.layers.Input((NB_OF_SENTS, 768), name="input_shape")
lstm_later, forward_h, forward_c = tf.keras.layers.LSTM(lstm_cell_size, return_sequences=True, return_state=True)(
in_id)
linear = tf.keras.layers.Dense(lstm_cell_size)(forward_h)
attention = tf.keras.layers.dot([lstm_later, linear], axes=(-1))
attention = tf.keras.layers.Activation('softmax', name='attention_vec')(attention)
attention = tf.keras.layers.RepeatVector(lstm_cell_size)(attention)
attention = tf.keras.layers.Permute([2, 1])(attention)
sent_representation = tf.keras.layers.multiply([lstm_later, attention])
sent_representation = tf.keras.layers.Lambda(lambda xin: K.sum(xin, axis=1))(sent_representation)
sent_representation_final = tf.keras.layers.Concatenate()([sent_representation, forward_h])
drop = tf.keras.layers.Dropout(0.2)(sent_representation_final)
predictions = tf.keras.layers.Dense(2, activation='softmax')(drop)
model = tf.keras.Model(inputs=in_id, outputs=predictions)
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['acc'])
return model
def get_embedding(text: str):
text = text.lower()
sents = sent_tokenize(text)
snt_good = []
for idx, snt in enumerate(sents):
if idx == NB_OF_SENTS:
break
snt_good.append(snt)
while len(snt_good) != NB_OF_SENTS:
snt_good.append('')
assert (len(snt_good) == NB_OF_SENTS)
encoded_inputs = tokenizer(snt_good, padding=True, truncation=True, return_tensors="tf", max_length=60)
output = bert(encoded_inputs)
y = tf.keras.layers.GlobalAveragePooling1D()(output[0])
y = np.reshape(y, (-1, 30, 768))
return y
def load_model():
model = get_model()
model.load_weights('model_weights/')
return model
| [
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.Dense",
"transformers.TFBertModel.from_pretrained",
"tensorflow.keras.layers.dot",
"nltk.download",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.I... | [((208, 230), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (221, 230), False, 'import nltk\n'), ((243, 293), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (272, 293), False, 'from transformers import BertTokenizer, TFBertModel\n'), ((301, 349), 'transformers.TFBertModel.from_pretrained', 'TFBertModel.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (328, 349), False, 'from transformers import BertTokenizer, TFBertModel\n'), ((399, 460), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(NB_OF_SENTS, 768)'], {'name': '"""input_shape"""'}), "((NB_OF_SENTS, 768), name='input_shape')\n", (420, 460), True, 'import tensorflow as tf\n'), ((673, 723), 'tensorflow.keras.layers.dot', 'tf.keras.layers.dot', (['[lstm_later, linear]'], {'axes': '(-1)'}), '([lstm_later, linear], axes=-1)\n', (692, 723), True, 'import tensorflow as tf\n'), ((970, 1019), 'tensorflow.keras.layers.multiply', 'tf.keras.layers.multiply', (['[lstm_later, attention]'], {}), '([lstm_later, attention])\n', (994, 1019), True, 'import tensorflow as tf\n'), ((1368, 1417), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'in_id', 'outputs': 'predictions'}), '(inputs=in_id, outputs=predictions)\n', (1382, 1417), True, 'import tensorflow as tf\n'), ((1428, 1473), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1452, 1473), True, 'import tensorflow as tf\n'), ((1642, 1661), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (1655, 1661), False, 'from nltk.tokenize import sent_tokenize\n'), ((2117, 2145), 'numpy.reshape', 'np.reshape', (['y', '(-1, 30, 768)'], {}), '(y, (-1, 30, 768))\n', (2127, 2145), True, 'import numpy as np\n'), ((500, 578), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['lstm_cell_size'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(lstm_cell_size, return_sequences=True, return_state=True)\n', (520, 578), True, 'import tensorflow as tf\n'), ((608, 645), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['lstm_cell_size'], {}), '(lstm_cell_size)\n', (629, 645), True, 'import tensorflow as tf\n'), ((742, 801), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {'name': '"""attention_vec"""'}), "('softmax', name='attention_vec')\n", (768, 801), True, 'import tensorflow as tf\n'), ((829, 873), 'tensorflow.keras.layers.RepeatVector', 'tf.keras.layers.RepeatVector', (['lstm_cell_size'], {}), '(lstm_cell_size)\n', (857, 873), True, 'import tensorflow as tf\n'), ((901, 932), 'tensorflow.keras.layers.Permute', 'tf.keras.layers.Permute', (['[2, 1]'], {}), '([2, 1])\n', (924, 932), True, 'import tensorflow as tf\n'), ((1154, 1183), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (1181, 1183), True, 'import tensorflow as tf\n'), ((1229, 1257), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1252, 1257), True, 'import tensorflow as tf\n'), ((1303, 1349), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (1324, 1349), True, 'import tensorflow as tf\n'), ((2057, 2097), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'tf.keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (2095, 2097), True, 'import tensorflow as tf\n'), ((1081, 1099), 'tensorflow.keras.backend.sum', 'K.sum', (['xin'], {'axis': '(1)'}), '(xin, axis=1)\n', (1086, 1099), True, 'from tensorflow.keras import backend as K\n')] |
"""
Sea Ice Diagnostics.
====================
Diagnostic to produce a series of images which are useful for evaluating
the behaviour of the a sea ice model.
There are three kinds of plots shown here.
1. Sea ice Extent maps plots with a stereoscoic projection.
2. Maps plots of individual models ice fracrtion.
3. Time series plots for the total ice extent.
All three kinds of plots are made for both Summer and Winter in both the
North and Southern hemisphere.
Note that this diagnostic assumes that the preprocessors do the bulk of the
hard work, and that the cube received by this diagnostic (via the settings.yml
and metadata.yml files) has no time component, a small number of depth layers,
and a latitude and longitude coordinates.
This diagnostic takes data from either North or South hemisphere, and
from either December-January-February or June-July-August. This diagnostic
requires the data to be 2D+time, and typically expects the data field to be
the sea ice cover.
An approproate preprocessor would be::
preprocessors:
timeseries_NHW_ice_extent: # North Hemisphere Winter ice_extent
custom_order: true
extract_time:
start_year: 1960
start_month: 12
start_day: 1
end_year: 2005
end_month: 9
end_day: 31
extract_season:
season: DJF
extract_region:
start_longitude: -180.
end_longitude: 180.
start_latitude: 0.
end_latitude: 90.
Note that this recipe may not function on machines with no access to the
internet, as cartopy may try to download the shapefiles. The solution to
this issue is the put the relevant cartopy shapefiles on a disk visible to your
machine, then link that path to ESMValTool via the `auxiliary_data_dir`
variable. The cartopy masking files can be downloaded from::
https://www.naturalearthdata.com/downloads/
Here, cartopy uses the 1:10, physical coastlines and land files::
110m_coastline.dbf 110m_coastline.shp 110m_coastline.shx
110m_land.dbf 110m_land.shp 110m_land.shx
This tool is part of the ocean diagnostic tools package in the ESMValTool.
Author: <NAME> (PML)
<EMAIL>
"""
import logging
import os
import sys
from itertools import product
import cartopy
import iris
import iris.coord_categorisation
import iris.quickplot as qplt
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
# Note that this recipe may not function on machines with no access to
# the internet, as cartopy may try to download geographic files.
def create_ice_cmap(threshold=0.15):
"""
Create colour map with ocean blue below a threshold and white above.
Parameters
----------
threshold: float
The threshold for the line between blue and white.
Returns
-------
matplotlib.colors.LinearSegmentedColormap:
The resulting colour map.
"""
threshold = threshold / 100.
ice_cmap_dict = {
'red': ((0., 0.0313, 0.0313), (threshold, 0.0313, 1.), (1., 1., 1.)),
'green': ((0., 0.237, 0.237), (threshold, 0.237, 1.), (1., 1., 1.)),
'blue': ((0., 0.456, 0.456), (threshold, 0.456, 1.), (1., 1., 1.))
}
return matplotlib.colors.LinearSegmentedColormap('ice_cmap', ice_cmap_dict)
def calculate_area_time_series(cube, plot_type, threshold):
"""
Calculate the area of unmasked cube cells.
Requires a cube with two spacial dimensions. (no depth coordinate).
Parameters
----------
cube: iris.cube.Cube
Data Cube
plot_type: str
The type of plot: ice extent or ice area
threshold: float
The threshold for ice fraction (typically 15%)
Returns
-------
numpy array:
An numpy array containing the time points.
numpy.array:
An numpy array containing the total ice extent or total ice area.
"""
data = []
times = diagtools.cube_time_to_float(cube)
for time_itr, time in enumerate(times):
icedata = cube[time_itr].data
area = iris.analysis.cartography.area_weights(cube[time_itr])
if plot_type.lower() == 'ice extent':
# Ice extend is the area with more than 15% ice cover.
icedata = np.ma.masked_where(icedata < threshold, icedata)
total_area = np.ma.masked_where(icedata.mask, area.data).sum()
if plot_type.lower() == 'ice area':
# Ice area is cover * cell area
total_area = np.sum(icedata * area)
logger.debug('Calculating time series area: %s, %s, %s,', time_itr,
time, total_area)
data.append(total_area)
######
# Create a small dummy output array
data = np.array(data)
return times, data
def make_ts_plots(
cfg,
metadata,
filename,
):
"""
Make a ice extent and ice area time series plot for an individual model.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
metadata: dict
The metadata dictionairy for a specific model.
filename: str
The preprocessed model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
iris.coord_categorisation.add_year(cube, 'time')
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# # Load threshold, pole, season.
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Making plots for each layer
for plot_type in ['Ice Extent', 'Ice Area']:
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
times, data = calculate_area_time_series(cube_layer, plot_type,
threshold)
plt.plot(times, data)
# Add title to plot
title = ' '.join(
[metadata['dataset'], pole, 'hemisphere', season, plot_type])
if layer:
title = ' '.join([
title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'
])
plt.title(title)
# y axis label:
plt.ylabel(' '.join([plot_type, 'm^2']))
# Determine image filename:
suffix = '_'.join(['ts', metadata['preprocessor'], season, pole,
plot_type, str(layer_index)])\
+ image_extention
suffix = suffix.replace(' ', '')
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def make_polar_map(
cube,
pole='North',
cmap='Blues_r',
):
"""
Make a polar stereoscopic map plot.
The cube is the opened cube (two dimensional),
pole is the polar region (North/South)
cmap is the colourmap,
Parameters
----------
cube: iris.cube.Cube
Data Cube
pole: str
The hemisphere
cmap: str
The string describing the matplotlib colourmap.
Returns
----------
matplotlib.pyplot.figure:
The matplotlib figure where the map was drawn.
matplotlib.pyplot.axes:
The matplotlib axes where the map was drawn.
"""
fig = plt.figure()
fig.set_size_inches(7, 7)
# ####
# Set limits, based on https://nedbatchelder.com/blog/200806/pylint.html
if pole not in ['North', 'South']:
logger.fatal('make_polar_map: hemisphere not provided.')
if pole == 'North': # North Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.NorthPolarStereo())
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.SouthPolarStereo())
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
linrange = np.linspace(0., 100., 21.)
qplt.contourf(cube, linrange, cmap=cmap, linewidth=0, rasterized=True)
plt.tight_layout()
try:
ax1.add_feature(
cartopy.feature.LAND,
zorder=10,
facecolor=[0.8, 0.8, 0.8],
)
except ConnectionRefusedError:
logger.error('Cartopy was unable add coastlines due to a '
'connection error.')
ax1.gridlines(
linewidth=0.5, color='black', zorder=20, alpha=0.5, linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
return fig
def get_pole(cube):
"""
Figure out the hemisphere and returns it as a string (North or South).
Parameters
----------
cube: iris.cube.Cube
Data Cube
Returns
----------
str:
The hemisphere (North or South)
"""
margin = 5.
if np.max(cube.coord('latitude').points) < 0. + margin:
return 'South'
if np.min(cube.coord('latitude').points) > 0. - margin:
return 'North'
logger.fatal('get_pole: Not able to determine hemisphere.')
return False
def get_time_string(cube):
"""
Return a climatological season string in the format: "year season".
Parameters
----------
cube: iris.cube.Cube
Data Cube
Returns
----------
str:
The climatological season as a string
"""
season = cube.coord('clim_season').points
year = cube.coord('year').points
return str(int(year[0])) + ' ' + season[0].upper()
def get_year(cube):
"""
Return the cube year as a string.
Parameters
----------
cube: iris.cube.Cube
Data Cube
Returns
----------
str:
The year as a string
"""
year = cube.coord('year').points
return str(int(year))
def get_season(cube):
"""
Return a climatological season time string.
Parameters
----------
cube: iris.cube.Cube
Data Cube
Returns
----------
str:
The climatological season as a string
"""
season = cube.coord('clim_season').points
return season[0].upper()
def make_map_plots(
cfg,
metadata,
filename,
):
"""
Make a simple map plot for an individual model.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
metadata: dict
The metadata dictionairy for a specific model.
filename: str
The preprocessed model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
iris.coord_categorisation.add_year(cube, 'time')
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention and threshold.
image_extention = diagtools.get_image_format(cfg)
threshold = float(cfg['threshold'])
# Making plots for each layer
plot_types = ['Fractional cover', 'Ice Extent']
plot_times = [0, -1]
for plot_type, plot_time in product(plot_types, plot_times):
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
if plot_type == 'Fractional cover':
cmap = 'Blues_r'
if plot_type == 'Ice Extent':
cmap = create_ice_cmap(threshold)
cube = cube_layer[plot_time]
# use cube to determine which hemisphere, season and year.
pole = get_pole(cube)
time_str = get_time_string(cube)
# Make the polar map.
make_polar_map(cube, pole=pole, cmap=cmap)
# Add title to plot
title = ' '.join([metadata['dataset'], plot_type, time_str])
if layer:
title = ' '.join([
title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'
])
plt.title(title)
# Determine image filename:
suffix = '_'.join(
['ortho_map', plot_type, time_str,
str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def agregate_by_season(cube):
"""
Aggregate the cube into seasonal means.
Note that it is not currently possible to do this in the preprocessor,
as the seasonal mean changes the cube units.
Parameters
----------
cube: iris.cube.Cube
Data Cube
Returns
----------
iris.cube.Cube:
Data Cube with the seasonal means
"""
if not cube.coords('clim_season'):
iris.coord_categorisation.add_season(cube, 'time', name='clim_season')
if not cube.coords('season_year'):
iris.coord_categorisation.add_season_year(
cube, 'time', name='season_year')
return cube.aggregated_by(['clim_season', 'season_year'],
iris.analysis.MEAN)
def make_map_extent_plots(
cfg,
metadata,
filename,
):
"""
Make an extent map plot showing several times for an individual model.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
metadata: dict
The metadata dictionairy for a specific model.
filename: str
The preprocessed model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
iris.coord_categorisation.add_year(cube, 'time')
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Load threshold, pole and season
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Start making figure
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
fig = plt.figure()
fig.set_size_inches(7, 7)
if pole == 'North': # North Hemisphere
projection = cartopy.crs.NorthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
projection = cartopy.crs.SouthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
try:
ax1.add_feature(
cartopy.feature.LAND, zorder=10, facecolor=[0.8, 0.8, 0.8])
except ConnectionRefusedError:
logger.error('Cartopy was unable add coastlines due to a '
'connection error.')
ax1.gridlines(
linewidth=0.5, color='black', zorder=20, alpha=0.5, linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
times = np.array(cube.coord('time').points.astype(float))
plot_desc = {}
for time_itr, time in enumerate(times):
cube = cube_layer[time_itr]
line_width = 1
color = plt.cm.jet(float(time_itr) / float(len(times)))
label = get_year(cube)
plot_desc[time] = {'label': label,
'c': [color, ],
'lw': [line_width, ],
'ls': ['-', ]}
layer = str(layer)
qplt.contour(cube,
[threshold, ],
colors=plot_desc[time]['c'],
linewidths=plot_desc[time]['lw'],
linestyles=plot_desc[time]['ls'],
rasterized=True)
# Add legend
legend_size = len(plot_desc) + 1
ncols = int(legend_size / 25) + 1
ax1.set_position([
ax1.get_position().x0,
ax1.get_position().y0,
ax1.get_position().width * (1. - 0.1 * ncols),
ax1.get_position().height
])
fig.set_size_inches(7 + ncols * 1.2, 7)
# Construct dummy plots.
for i in sorted(plot_desc):
plt.plot(
[],
[],
c=plot_desc[i]['c'][0],
lw=plot_desc[i]['lw'][0],
ls=plot_desc[i]['ls'][0],
label=plot_desc[i]['label'],
)
legd = ax1.legend(
loc='center left',
ncol=ncols,
prop={'size': 10},
bbox_to_anchor=(1., 0.5))
legd.draw_frame(False)
legd.get_frame().set_alpha(0.)
# Add title to plot
title = ' '.join([
metadata['dataset'],
])
if layer:
title = ' '.join([
title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'
])
plt.title(title)
# Determine image filename:
suffix = '_'.join(['ortho_map', pole, season, str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
"""
Load the config file and metadata, then pass them the plot making tools.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
"""
cartopy.config['data_dir'] = cfg['auxiliary_data_dir']
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename,
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
######
# extent maps plots of individual models
make_map_extent_plots(cfg, metadatas[filename], filename)
######
# maps plots of individual models
make_map_plots(cfg, metadatas[filename], filename)
######
# time series plots o
make_ts_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.sum",
"esmvaltool.diag_scripts.ocean.diagnostic_tools.cube_time_to_float",
"matplotlib.pyplot.figure",
"iris.load_cube",
"iris.coord_categorisation.add_year",
"matplotlib.pyplot.gca",
"carto... | [((2619, 2645), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (2635, 2645), False, 'import os\n'), ((2678, 2711), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2699, 2711), False, 'import logging\n'), ((3500, 3568), 'matplotlib.colors.LinearSegmentedColormap', 'matplotlib.colors.LinearSegmentedColormap', (['"""ice_cmap"""', 'ice_cmap_dict'], {}), "('ice_cmap', ice_cmap_dict)\n", (3541, 3568), False, 'import matplotlib\n'), ((4196, 4230), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.cube_time_to_float', 'diagtools.cube_time_to_float', (['cube'], {}), '(cube)\n', (4224, 4230), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((4990, 5004), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4998, 5004), True, 'import numpy as np\n'), ((5481, 5505), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (5495, 5505), False, 'import iris\n'), ((5510, 5558), 'iris.coord_categorisation.add_year', 'iris.coord_categorisation.add_year', (['cube', '"""time"""'], {}), "(cube, 'time')\n", (5544, 5558), False, 'import iris\n'), ((5570, 5619), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (5589, 5619), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((5820, 5856), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (5850, 5856), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((5914, 5945), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (5940, 5945), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((8332, 8344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8342, 8344), True, 'import matplotlib.pyplot as plt\n'), ((8968, 8997), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(21.0)'], {}), '(0.0, 100.0, 21.0)\n', (8979, 8997), True, 'import numpy as np\n'), ((8999, 9069), 'iris.quickplot.contourf', 'qplt.contourf', (['cube', 'linrange'], {'cmap': 'cmap', 'linewidth': '(0)', 'rasterized': '(True)'}), '(cube, linrange, cmap=cmap, linewidth=0, rasterized=True)\n', (9012, 9069), True, 'import iris.quickplot as qplt\n'), ((9074, 9092), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9090, 9092), True, 'import matplotlib.pyplot as plt\n'), ((11596, 11620), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (11610, 11620), False, 'import iris\n'), ((11625, 11673), 'iris.coord_categorisation.add_year', 'iris.coord_categorisation.add_year', (['cube', '"""time"""'], {}), "(cube, 'time')\n", (11659, 11673), False, 'import iris\n'), ((11685, 11734), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (11704, 11734), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((11935, 11971), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (11965, 11971), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((12044, 12075), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (12070, 12075), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((12260, 12291), 'itertools.product', 'product', (['plot_types', 'plot_times'], {}), '(plot_types, plot_times)\n', (12267, 12291), False, 'from itertools import product\n'), ((15147, 15171), 'iris.load_cube', 'iris.load_cube', (['filename'], {}), '(filename)\n', (15161, 15171), False, 'import iris\n'), ((15176, 15224), 'iris.coord_categorisation.add_year', 'iris.coord_categorisation.add_year', (['cube', '"""time"""'], {}), "(cube, 'time')\n", (15210, 15224), False, 'import iris\n'), ((15236, 15285), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.bgc_units', 'diagtools.bgc_units', (['cube', "metadata['short_name']"], {}), "(cube, metadata['short_name'])\n", (15255, 15285), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((15486, 15522), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.make_cube_layer_dict', 'diagtools.make_cube_layer_dict', (['cube'], {}), '(cube)\n', (15516, 15522), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((15580, 15611), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_format', 'diagtools.get_image_format', (['cfg'], {}), '(cfg)\n', (15606, 15611), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((2647, 2666), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2664, 2666), False, 'import logging\n'), ((4329, 4383), 'iris.analysis.cartography.area_weights', 'iris.analysis.cartography.area_weights', (['cube[time_itr]'], {}), '(cube[time_itr])\n', (4367, 4383), False, 'import iris\n'), ((14369, 14439), 'iris.coord_categorisation.add_season', 'iris.coord_categorisation.add_season', (['cube', '"""time"""'], {'name': '"""clim_season"""'}), "(cube, 'time', name='clim_season')\n", (14405, 14439), False, 'import iris\n'), ((14487, 14562), 'iris.coord_categorisation.add_season_year', 'iris.coord_categorisation.add_season_year', (['cube', '"""time"""'], {'name': '"""season_year"""'}), "(cube, 'time', name='season_year')\n", (14528, 14562), False, 'import iris\n'), ((15859, 15871), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15869, 15871), True, 'import matplotlib.pyplot as plt\n'), ((18899, 18915), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (18908, 18915), True, 'import matplotlib.pyplot as plt\n'), ((19559, 19570), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19568, 19570), True, 'import matplotlib.pyplot as plt\n'), ((20043, 20086), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_input_files', 'diagtools.get_input_files', (['cfg'], {'index': 'index'}), '(cfg, index=index)\n', (20068, 20086), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((20734, 20750), 'esmvaltool.diag_scripts.shared.run_diagnostic', 'run_diagnostic', ([], {}), '()\n', (20748, 20750), False, 'from esmvaltool.diag_scripts.shared import run_diagnostic\n'), ((4519, 4567), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(icedata < threshold)', 'icedata'], {}), '(icedata < threshold, icedata)\n', (4537, 4567), True, 'import numpy as np\n'), ((4756, 4778), 'numpy.sum', 'np.sum', (['(icedata * area)'], {}), '(icedata * area)\n', (4762, 4778), True, 'import numpy as np\n'), ((6424, 6445), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'data'], {}), '(times, data)\n', (6432, 6445), True, 'import matplotlib.pyplot as plt\n'), ((6780, 6796), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6789, 6796), True, 'import matplotlib.pyplot as plt\n'), ((7671, 7682), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7680, 7682), True, 'import matplotlib.pyplot as plt\n'), ((8733, 8758), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (8756, 8758), False, 'import cartopy\n'), ((8925, 8950), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (8948, 8950), False, 'import cartopy\n'), ((13154, 13170), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13163, 13170), True, 'import matplotlib.pyplot as plt\n'), ((13928, 13939), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13937, 13939), True, 'import matplotlib.pyplot as plt\n'), ((15980, 16010), 'cartopy.crs.NorthPolarStereo', 'cartopy.crs.NorthPolarStereo', ([], {}), '()\n', (16008, 16010), False, 'import cartopy\n'), ((16029, 16068), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'projection'}), '(111, projection=projection)\n', (16040, 16068), True, 'import matplotlib.pyplot as plt\n'), ((16218, 16248), 'cartopy.crs.SouthPolarStereo', 'cartopy.crs.SouthPolarStereo', ([], {}), '()\n', (16246, 16248), False, 'import cartopy\n'), ((16267, 16306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': 'projection'}), '(111, projection=projection)\n', (16278, 16306), True, 'import matplotlib.pyplot as plt\n'), ((17461, 17611), 'iris.quickplot.contour', 'qplt.contour', (['cube', '[threshold]'], {'colors': "plot_desc[time]['c']", 'linewidths': "plot_desc[time]['lw']", 'linestyles': "plot_desc[time]['ls']", 'rasterized': '(True)'}), "(cube, [threshold], colors=plot_desc[time]['c'], linewidths=\n plot_desc[time]['lw'], linestyles=plot_desc[time]['ls'], rasterized=True)\n", (17473, 17611), True, 'import iris.quickplot as qplt\n'), ((18175, 18301), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'c': "plot_desc[i]['c'][0]", 'lw': "plot_desc[i]['lw'][0]", 'ls': "plot_desc[i]['ls'][0]", 'label': "plot_desc[i]['label']"}), "([], [], c=plot_desc[i]['c'][0], lw=plot_desc[i]['lw'][0], ls=\n plot_desc[i]['ls'][0], label=plot_desc[i]['label'])\n", (18183, 18301), True, 'import matplotlib.pyplot as plt\n'), ((19128, 19161), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (19144, 19161), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((19295, 19349), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (19319, 19349), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((19533, 19550), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (19544, 19550), True, 'import matplotlib.pyplot as plt\n'), ((7370, 7424), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (7394, 7424), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((7640, 7657), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (7651, 7657), True, 'import matplotlib.pyplot as plt\n'), ((8657, 8687), 'cartopy.crs.NorthPolarStereo', 'cartopy.crs.NorthPolarStereo', ([], {}), '()\n', (8685, 8687), False, 'import cartopy\n'), ((8847, 8877), 'cartopy.crs.SouthPolarStereo', 'cartopy.crs.SouthPolarStereo', ([], {}), '()\n', (8875, 8877), False, 'import cartopy\n'), ((9491, 9500), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9498, 9500), True, 'import matplotlib.pyplot as plt\n'), ((13444, 13477), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (13460, 13477), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((13627, 13681), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.get_image_path', 'diagtools.get_image_path', (['cfg', 'metadata'], {'suffix': 'suffix'}), '(cfg, metadata, suffix=suffix)\n', (13651, 13681), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((13897, 13914), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (13908, 13914), True, 'import matplotlib.pyplot as plt\n'), ((16117, 16142), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (16140, 16142), False, 'import cartopy\n'), ((16357, 16382), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (16380, 16382), False, 'import cartopy\n'), ((19188, 19214), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (19204, 19214), False, 'import os\n'), ((4593, 4636), 'numpy.ma.masked_where', 'np.ma.masked_where', (['icedata.mask', 'area.data'], {}), '(icedata.mask, area.data)\n', (4611, 4636), True, 'import numpy as np\n'), ((7194, 7227), 'esmvaltool.diag_scripts.ocean.diagnostic_tools.folder', 'diagtools.folder', (["cfg['plot_dir']"], {}), "(cfg['plot_dir'])\n", (7210, 7227), True, 'from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n'), ((7251, 7277), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7267, 7277), False, 'import os\n'), ((13508, 13534), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (13524, 13534), False, 'import os\n'), ((16789, 16798), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16796, 16798), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
try:
def downsample_axis(myarr, factor, axis, estimator=np.nanmean, truncate=False):
"""
Downsample an ND array by averaging over *factor* pixels along an axis.
Crops right side if the shape is not a multiple of factor.
This code is pure np and should be fast.
Parameters
----------
myarr : `~numpy.ndarray`
The array to downsample
factor : int
The factor to downsample by
axis : int
The axis to downsample along
estimator : function
defaults to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
truncate : bool
Whether to truncate the last chunk or average over a smaller number.
e.g., if you downsample [1,2,3,4] by a factor of 3, you could get either
[2] or [2,4] if truncate is True or False, respectively.
"""
# size of the dimension of interest
xs = myarr.shape[axis]
if xs % int(factor) != 0:
if truncate:
view = [slice(None) for ii in range(myarr.ndim)]
view[axis] = slice(None,xs-(xs % int(factor)))
crarr = myarr[view]
else:
newshape = list(myarr.shape)
newshape[axis] = (factor - xs % int(factor))
extension = np.empty(newshape) * np.nan
crarr = np.concatenate((myarr,extension), axis=axis)
else:
crarr = myarr
def makeslice(startpoint,axis=axis,step=factor):
# make empty slices
view = [slice(None) for ii in range(myarr.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,None,step)
return view
# The extra braces here are crucial: We're adding an extra dimension so we
# can average across it!
stacked_array = np.concatenate([[crarr[makeslice(ii)]] for ii in range(factor)])
dsarr = estimator(stacked_array, axis=0)
return dsarr
except AttributeError:
import warnings
warnings.warn("Numpy doesn't have a nanmean attribute; a more recent version of numpy is required.")
def downsample_axis(*args, **kwargs):
raise AttributeError("This version of numpy doesn't possess a nanmean.")
def downsample_header(header, factor, axis):
"""
Downsample a FITS header along an axis using the FITS convention for axis number
"""
header = header.copy()
cd = 'CDELT{0:d}'.format(axis)
cp = 'CRPIX{0:d}'.format(axis)
scalefactor = 1./factor
header[cp] = (header[cp]-1)*scalefactor + scalefactor/2. + 0.5
header[cd] = header[cd]*factor
return header
| [
"warnings.warn",
"numpy.concatenate",
"numpy.empty"
] | [((2242, 2352), 'warnings.warn', 'warnings.warn', (['"""Numpy doesn\'t have a nanmean attribute; a more recent version of numpy is required."""'], {}), '(\n "Numpy doesn\'t have a nanmean attribute; a more recent version of numpy is required."\n )\n', (2255, 2352), False, 'import warnings\n'), ((1559, 1604), 'numpy.concatenate', 'np.concatenate', (['(myarr, extension)'], {'axis': 'axis'}), '((myarr, extension), axis=axis)\n', (1573, 1604), True, 'import numpy as np\n'), ((1507, 1525), 'numpy.empty', 'np.empty', (['newshape'], {}), '(newshape)\n', (1515, 1525), True, 'import numpy as np\n')] |
import json
import numpy as np
import pandas as pd
from dypro.dynamic import NormalMeanVarChart, NormalMeanSChart, NormalMeanRChart
from dypro.config import Parameters, AdjConf, PlotConf
from dypro.create_csv import (
create_proposed_cpk,
created_proposed_yeild,
create_previous_cpk,
)
from dypro.dynamic.optimize import BrenthOptimizer
from dypro.plot import PlotGraph
from dypro._decorator import RunTime
CHART_LIST = [NormalMeanVarChart(), NormalMeanSChart(), NormalMeanRChart()]
CHART_NAME = ["v", "s", "r"]
FIGNAME = [
"$S^2$ control chart",
"$S$ control chart",
"$R$ control chart",
]
K2_DIR = ["csv/v_k2.csv", "csv/s_k2.csv", "csv/r_k2.csv"]
SUBGROUP_SIZE = [5, 10, 15, 20]
N = 5
FIGSIZE = (9, 6)
RESULT_DIR = "proposed_vs_previous"
@RunTime()
def main():
# load config
with open("conf.json") as f:
conf = json.load(f)
# create instance object for config
param = Parameters(mean=1.506, sigma=0.1398, USL=2.0, LSL=1.0)
adj_conf = AdjConf(
n=np.arange(2, conf["n_max"] + 1),
k1=np.arange(0, conf["k1_max"] + conf["k1_num"], conf["k1_num"]),
)
for chart, k2_dir, chartname, figname in zip(
CHART_LIST, K2_DIR, CHART_NAME, FIGNAME
):
# optimizer
optimizer = BrenthOptimizer(chart, power=conf["power"])
# read k2 table
k2_df = pd.read_csv(k2_dir)
# setting plot_conf
plot_conf = PlotConf(k2_df=k2_df, figsize=FIGSIZE, dpi=conf["dpi"])
################
# create table #
################
# proposed cpk table
proposed_df = create_proposed_cpk(chart=chart, k2_df=k2_df, param=param)
bothe_k1 = np.array(optimizer.get_mean_adjustment(adj_conf.n))
pearn_k2 = np.array(
[optimizer.get_var_adjustment(n) for n in adj_conf.n]
).flatten()
plotter = PlotGraph(
chart=chart,
proposed_df=proposed_df,
param=param,
adj_conf=adj_conf,
plot_conf=plot_conf,
bothe_k1=bothe_k1,
pearn_k2=pearn_k2,
figname=figname,
)
plotter.cpk(save_path=f"{RESULT_DIR}/cpk_comparison_{chartname}.png", ci=False)
plotter.cpk(
save_path=f"{RESULT_DIR}/cpk(PI)_comparison_{chartname}.png", ci=True
)
plotter.cpk_ratio(save_path=f"{RESULT_DIR}/cpk_ratio_{chartname}.png")
plotter.ncppm(save_path=f"{RESULT_DIR}/ncppm_comparsion_{chartname}.png")
plotter.ncppm_ratio(
save_path=f"{RESULT_DIR}/ncppm_ratio_comparsion_{chartname}.png"
)
if __name__ == "__main__":
main()
| [
"json.load",
"dypro._decorator.RunTime",
"pandas.read_csv",
"dypro.dynamic.optimize.BrenthOptimizer",
"dypro.dynamic.NormalMeanSChart",
"dypro.dynamic.NormalMeanRChart",
"dypro.create_csv.create_proposed_cpk",
"dypro.dynamic.NormalMeanVarChart",
"numpy.arange",
"dypro.plot.PlotGraph",
"dypro.con... | [((768, 777), 'dypro._decorator.RunTime', 'RunTime', ([], {}), '()\n', (775, 777), False, 'from dypro._decorator import RunTime\n'), ((434, 454), 'dypro.dynamic.NormalMeanVarChart', 'NormalMeanVarChart', ([], {}), '()\n', (452, 454), False, 'from dypro.dynamic import NormalMeanVarChart, NormalMeanSChart, NormalMeanRChart\n'), ((456, 474), 'dypro.dynamic.NormalMeanSChart', 'NormalMeanSChart', ([], {}), '()\n', (472, 474), False, 'from dypro.dynamic import NormalMeanVarChart, NormalMeanSChart, NormalMeanRChart\n'), ((476, 494), 'dypro.dynamic.NormalMeanRChart', 'NormalMeanRChart', ([], {}), '()\n', (492, 494), False, 'from dypro.dynamic import NormalMeanVarChart, NormalMeanSChart, NormalMeanRChart\n'), ((922, 976), 'dypro.config.Parameters', 'Parameters', ([], {'mean': '(1.506)', 'sigma': '(0.1398)', 'USL': '(2.0)', 'LSL': '(1.0)'}), '(mean=1.506, sigma=0.1398, USL=2.0, LSL=1.0)\n', (932, 976), False, 'from dypro.config import Parameters, AdjConf, PlotConf\n'), ((856, 868), 'json.load', 'json.load', (['f'], {}), '(f)\n', (865, 868), False, 'import json\n'), ((1271, 1314), 'dypro.dynamic.optimize.BrenthOptimizer', 'BrenthOptimizer', (['chart'], {'power': "conf['power']"}), "(chart, power=conf['power'])\n", (1286, 1314), False, 'from dypro.dynamic.optimize import BrenthOptimizer\n'), ((1356, 1375), 'pandas.read_csv', 'pd.read_csv', (['k2_dir'], {}), '(k2_dir)\n', (1367, 1375), True, 'import pandas as pd\n'), ((1425, 1480), 'dypro.config.PlotConf', 'PlotConf', ([], {'k2_df': 'k2_df', 'figsize': 'FIGSIZE', 'dpi': "conf['dpi']"}), "(k2_df=k2_df, figsize=FIGSIZE, dpi=conf['dpi'])\n", (1433, 1480), False, 'from dypro.config import Parameters, AdjConf, PlotConf\n'), ((1608, 1666), 'dypro.create_csv.create_proposed_cpk', 'create_proposed_cpk', ([], {'chart': 'chart', 'k2_df': 'k2_df', 'param': 'param'}), '(chart=chart, k2_df=k2_df, param=param)\n', (1627, 1666), False, 'from dypro.create_csv import create_proposed_cpk, created_proposed_yeild, create_previous_cpk\n'), ((1872, 2036), 'dypro.plot.PlotGraph', 'PlotGraph', ([], {'chart': 'chart', 'proposed_df': 'proposed_df', 'param': 'param', 'adj_conf': 'adj_conf', 'plot_conf': 'plot_conf', 'bothe_k1': 'bothe_k1', 'pearn_k2': 'pearn_k2', 'figname': 'figname'}), '(chart=chart, proposed_df=proposed_df, param=param, adj_conf=\n adj_conf, plot_conf=plot_conf, bothe_k1=bothe_k1, pearn_k2=pearn_k2,\n figname=figname)\n', (1881, 2036), False, 'from dypro.plot import PlotGraph\n'), ((1011, 1042), 'numpy.arange', 'np.arange', (['(2)', "(conf['n_max'] + 1)"], {}), "(2, conf['n_max'] + 1)\n", (1020, 1042), True, 'import numpy as np\n'), ((1055, 1116), 'numpy.arange', 'np.arange', (['(0)', "(conf['k1_max'] + conf['k1_num'])", "conf['k1_num']"], {}), "(0, conf['k1_max'] + conf['k1_num'], conf['k1_num'])\n", (1064, 1116), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for embedding related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import tempfile
import numpy as np
import tensorflow as tf
from texar.tf.data import embedding
Py3 = sys.version_info[0] == 3 # pylint: disable=invalid-name
class EmbeddingTest(tf.test.TestCase):
"""Tests embedding related operations.
"""
def test_load_glove(self):
"""Tests the load_glove function.
"""
word_vec_lines = ["word 1.2 3.4 5.6", "词 1. 3. 5."]
glove_file = tempfile.NamedTemporaryFile(mode="w+")
if Py3:
glove_file.write('\n'.join(word_vec_lines))
else:
glove_file.write('\n'.join(word_vec_lines).encode("utf-8"))
glove_file.flush()
vocab = {"word": 0, "词": 1}
word_vecs = np.zeros([2, 3])
word_vecs = embedding.load_glove(glove_file.name, vocab, word_vecs)
self.assertEqual(word_vecs.shape[0], 2)
self.assertEqual(word_vecs.shape[1], 3)
np.testing.assert_array_equal(word_vecs[0], [1.2, 3.4, 5.6])
np.testing.assert_array_equal(word_vecs[1], [1., 3., 5.])
def test_load_word2vec(self):
"""Tests the load_word2vec function.
"""
header = "2 3"
words = ["word", "词"]
vec = np.array([1.2, 3.4, 5.6], dtype='float32')
w2v_file = tempfile.NamedTemporaryFile()
w2v_file.write(tf.compat.as_bytes(header + "\n"))
for word in words:
w2v_file.write(tf.compat.as_bytes(word + " "))
w2v_file.write(vec.tostring() + b'\n')
w2v_file.flush()
vocab = {"word": 0, "词": 1}
word_vecs = np.zeros([2, 3])
word_vecs = embedding.load_word2vec(w2v_file.name, vocab, word_vecs)
self.assertEqual(word_vecs.shape[0], 2)
self.assertEqual(word_vecs.shape[1], 3)
np.testing.assert_array_equal(word_vecs[0], vec)
np.testing.assert_array_equal(word_vecs[1], vec)
def test_embedding(self):
"""Tests :class:`texar.tf.data.embedding.Embedding`.
"""
vocab = {"word": 0, "词": 1}
emb = embedding.Embedding(vocab)
self.assertEqual(len(emb.word_vecs), len(vocab))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"tempfile.NamedTemporaryFile",
"texar.tf.data.embedding.Embedding",
"numpy.testing.assert_array_equal",
"texar.tf.data.embedding.load_word2vec",
"numpy.zeros",
"texar.tf.data.embedding.load_glove",
"numpy.array",
"tensorflow.compat.as_bytes"
] | [((2970, 2984), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2982, 2984), True, 'import tensorflow as tf\n'), ((1261, 1299), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+"""'}), "(mode='w+')\n", (1288, 1299), False, 'import tempfile\n'), ((1541, 1557), 'numpy.zeros', 'np.zeros', (['[2, 3]'], {}), '([2, 3])\n', (1549, 1557), True, 'import numpy as np\n'), ((1579, 1634), 'texar.tf.data.embedding.load_glove', 'embedding.load_glove', (['glove_file.name', 'vocab', 'word_vecs'], {}), '(glove_file.name, vocab, word_vecs)\n', (1599, 1634), False, 'from texar.tf.data import embedding\n'), ((1740, 1800), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['word_vecs[0]', '[1.2, 3.4, 5.6]'], {}), '(word_vecs[0], [1.2, 3.4, 5.6])\n', (1769, 1800), True, 'import numpy as np\n'), ((1809, 1869), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['word_vecs[1]', '[1.0, 3.0, 5.0]'], {}), '(word_vecs[1], [1.0, 3.0, 5.0])\n', (1838, 1869), True, 'import numpy as np\n'), ((2026, 2068), 'numpy.array', 'np.array', (['[1.2, 3.4, 5.6]'], {'dtype': '"""float32"""'}), "([1.2, 3.4, 5.6], dtype='float32')\n", (2034, 2068), True, 'import numpy as np\n'), ((2088, 2117), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2115, 2117), False, 'import tempfile\n'), ((2394, 2410), 'numpy.zeros', 'np.zeros', (['[2, 3]'], {}), '([2, 3])\n', (2402, 2410), True, 'import numpy as np\n'), ((2432, 2488), 'texar.tf.data.embedding.load_word2vec', 'embedding.load_word2vec', (['w2v_file.name', 'vocab', 'word_vecs'], {}), '(w2v_file.name, vocab, word_vecs)\n', (2455, 2488), False, 'from texar.tf.data import embedding\n'), ((2594, 2642), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['word_vecs[0]', 'vec'], {}), '(word_vecs[0], vec)\n', (2623, 2642), True, 'import numpy as np\n'), ((2651, 2699), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['word_vecs[1]', 'vec'], {}), '(word_vecs[1], vec)\n', (2680, 2699), True, 'import numpy as np\n'), ((2854, 2880), 'texar.tf.data.embedding.Embedding', 'embedding.Embedding', (['vocab'], {}), '(vocab)\n', (2873, 2880), False, 'from texar.tf.data import embedding\n'), ((2141, 2174), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (["(header + '\\n')"], {}), "(header + '\\n')\n", (2159, 2174), True, 'import tensorflow as tf\n'), ((2230, 2260), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (["(word + ' ')"], {}), "(word + ' ')\n", (2248, 2260), True, 'import tensorflow as tf\n')] |
import numpy as np
import subprocess as sp
from threading import Thread
n_samples = 44100
proc = sp.Popen(['cat'], stdin=sp.PIPE, stdout=sp.PIPE)
out_arr = np.ones(n_samples, dtype=np.int16)
def reader():
in_arr = np.fromfile(proc.stdout, np.int16, n_samples)
assert np.all(np.equal(in_arr, out_arr))
reader_thread = Thread(target=reader)
reader_thread.start()
out_arr.tofile(proc.stdin)
| [
"threading.Thread",
"subprocess.Popen",
"numpy.fromfile",
"numpy.ones",
"numpy.equal"
] | [((99, 147), 'subprocess.Popen', 'sp.Popen', (["['cat']"], {'stdin': 'sp.PIPE', 'stdout': 'sp.PIPE'}), "(['cat'], stdin=sp.PIPE, stdout=sp.PIPE)\n", (107, 147), True, 'import subprocess as sp\n'), ((158, 192), 'numpy.ones', 'np.ones', (['n_samples'], {'dtype': 'np.int16'}), '(n_samples, dtype=np.int16)\n', (165, 192), True, 'import numpy as np\n'), ((329, 350), 'threading.Thread', 'Thread', ([], {'target': 'reader'}), '(target=reader)\n', (335, 350), False, 'from threading import Thread\n'), ((221, 266), 'numpy.fromfile', 'np.fromfile', (['proc.stdout', 'np.int16', 'n_samples'], {}), '(proc.stdout, np.int16, n_samples)\n', (232, 266), True, 'import numpy as np\n'), ((285, 310), 'numpy.equal', 'np.equal', (['in_arr', 'out_arr'], {}), '(in_arr, out_arr)\n', (293, 310), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the simulation subpackage
"""
#pylint: disable=import-outside-toplevel, no-self-use
import unittest
import numpy as np
from scipy.spatial.distance import squareform
import rsatoolbox
import rsatoolbox.model as model
class TestSimulation(unittest.TestCase):
def test_make_design(self):
import rsatoolbox.simulation.sim as sim
# Test for make_design
cond_vec, _ = sim.make_design(4, 8)
self.assertEqual(cond_vec.size, 32)
def test_make_signal(self):
# Test make signal
import rsatoolbox.simulation.sim as sim
M = model.ModelFixed("test", np.array([2, 2, 2, 1, 1, 1]))
RDM = M.predict(None)
D = squareform(RDM)
H = rsatoolbox.util.matrix.centering(D.shape[0])
G = -0.5 * (H @ D @ H)
S = sim.make_signal(G, 40, make_exact=True)
Diff = S@S.T/40 - G
self.assertTrue(np.all(np.abs(Diff) < 1e-7))
def test_make_data(self):
# Test for make_data
import rsatoolbox.simulation.sim as sim
cond_vec, _ = sim.make_design(4, 8)
M = model.ModelFixed("test", np.array([2, 3, 4, 1, 1.1, 0.9]))
D = sim.make_dataset(M, None, cond_vec, n_channel=40)
self.assertEqual(D[0].n_obs, 32)
self.assertEqual(D[0].n_channel, 40)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"rsatoolbox.simulation.sim.make_dataset",
"numpy.abs",
"rsatoolbox.util.matrix.centering",
"scipy.spatial.distance.squareform",
"rsatoolbox.simulation.sim.make_signal",
"numpy.array",
"rsatoolbox.simulation.sim.make_design"
] | [((1377, 1392), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1390, 1392), False, 'import unittest\n'), ((453, 474), 'rsatoolbox.simulation.sim.make_design', 'sim.make_design', (['(4)', '(8)'], {}), '(4, 8)\n', (468, 474), True, 'import rsatoolbox.simulation.sim as sim\n'), ((736, 751), 'scipy.spatial.distance.squareform', 'squareform', (['RDM'], {}), '(RDM)\n', (746, 751), False, 'from scipy.spatial.distance import squareform\n'), ((764, 808), 'rsatoolbox.util.matrix.centering', 'rsatoolbox.util.matrix.centering', (['D.shape[0]'], {}), '(D.shape[0])\n', (796, 808), False, 'import rsatoolbox\n'), ((852, 891), 'rsatoolbox.simulation.sim.make_signal', 'sim.make_signal', (['G', '(40)'], {'make_exact': '(True)'}), '(G, 40, make_exact=True)\n', (867, 891), True, 'import rsatoolbox.simulation.sim as sim\n'), ((1103, 1124), 'rsatoolbox.simulation.sim.make_design', 'sim.make_design', (['(4)', '(8)'], {}), '(4, 8)\n', (1118, 1124), True, 'import rsatoolbox.simulation.sim as sim\n'), ((1208, 1257), 'rsatoolbox.simulation.sim.make_dataset', 'sim.make_dataset', (['M', 'None', 'cond_vec'], {'n_channel': '(40)'}), '(M, None, cond_vec, n_channel=40)\n', (1224, 1257), True, 'import rsatoolbox.simulation.sim as sim\n'), ((664, 692), 'numpy.array', 'np.array', (['[2, 2, 2, 1, 1, 1]'], {}), '([2, 2, 2, 1, 1, 1])\n', (672, 692), True, 'import numpy as np\n'), ((1162, 1194), 'numpy.array', 'np.array', (['[2, 3, 4, 1, 1.1, 0.9]'], {}), '([2, 3, 4, 1, 1.1, 0.9])\n', (1170, 1194), True, 'import numpy as np\n'), ((951, 963), 'numpy.abs', 'np.abs', (['Diff'], {}), '(Diff)\n', (957, 963), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def color_to_grayscale(color_array: np.ndarray) -> np.ndarray:
return cv2.cvtColor(color_array, cv2.COLOR_BGR2GRAY)
def calcPSD(input_image, output_image, flag):
# Complex input image with zero imaginary component
X = np.fft.rfft2(input_image).real
np.power(X, 2, out=X)
# X += 1
# np.log(X, out=X)
# # plt.imshow(X, cmap="gray")
# plt.imshow(X)
# plt.show()
print(X)
complex_image = np.zeros(shape=(input_image.shape[0], input_image.shape[1], 2), dtype=np.float32)
complex_image[:, :, 0] = input_image
cv2.dft(complex_image, dst=complex_image)
complex_image[:, 0, :] = 0 # Hmm
psd = cv2.magnitude(complex_image[:, :, 0], complex_image[:, :, 1])
cv2.pow(psd, 2, dst=psd)
print(psd)
if flag:
imlog = psd + 1
np.log(imlog, out=imlog)
output_image = imlog
else:
output_image = psd
# print(output_image)
plt.imshow(X, cmap="gray")
plt.show()
if __name__ == "__main__":
image = cv2.imread("test4_split0.png")
# image = cv2.imread("test.png")
image = color_to_grayscale(image)
print("image type: ", image.dtype)
new_size = image.shape[0] & -2, image.shape[1] & -2 # Even number of rows / columns
new_image = np.zeros(shape=new_size, dtype=np.float32)
new_image[:, :] = image[:new_size[0], :new_size[1]]
calcPSD(new_image, new_size, 0)
print("Success!")
| [
"cv2.magnitude",
"matplotlib.pyplot.show",
"numpy.log",
"cv2.cvtColor",
"numpy.power",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"cv2.imread",
"cv2.dft",
"numpy.fft.rfft2",
"cv2.pow"
] | [((139, 184), 'cv2.cvtColor', 'cv2.cvtColor', (['color_array', 'cv2.COLOR_BGR2GRAY'], {}), '(color_array, cv2.COLOR_BGR2GRAY)\n', (151, 184), False, 'import cv2\n'), ((333, 354), 'numpy.power', 'np.power', (['X', '(2)'], {'out': 'X'}), '(X, 2, out=X)\n', (341, 354), True, 'import numpy as np\n'), ((499, 585), 'numpy.zeros', 'np.zeros', ([], {'shape': '(input_image.shape[0], input_image.shape[1], 2)', 'dtype': 'np.float32'}), '(shape=(input_image.shape[0], input_image.shape[1], 2), dtype=np.\n float32)\n', (507, 585), True, 'import numpy as np\n'), ((628, 669), 'cv2.dft', 'cv2.dft', (['complex_image'], {'dst': 'complex_image'}), '(complex_image, dst=complex_image)\n', (635, 669), False, 'import cv2\n'), ((722, 783), 'cv2.magnitude', 'cv2.magnitude', (['complex_image[:, :, 0]', 'complex_image[:, :, 1]'], {}), '(complex_image[:, :, 0], complex_image[:, :, 1])\n', (735, 783), False, 'import cv2\n'), ((788, 812), 'cv2.pow', 'cv2.pow', (['psd', '(2)'], {'dst': 'psd'}), '(psd, 2, dst=psd)\n', (795, 812), False, 'import cv2\n'), ((996, 1022), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X'], {'cmap': '"""gray"""'}), "(X, cmap='gray')\n", (1006, 1022), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1035, 1037), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1110), 'cv2.imread', 'cv2.imread', (['"""test4_split0.png"""'], {}), "('test4_split0.png')\n", (1090, 1110), False, 'import cv2\n'), ((1334, 1376), 'numpy.zeros', 'np.zeros', ([], {'shape': 'new_size', 'dtype': 'np.float32'}), '(shape=new_size, dtype=np.float32)\n', (1342, 1376), True, 'import numpy as np\n'), ((298, 323), 'numpy.fft.rfft2', 'np.fft.rfft2', (['input_image'], {}), '(input_image)\n', (310, 323), True, 'import numpy as np\n'), ((874, 898), 'numpy.log', 'np.log', (['imlog'], {'out': 'imlog'}), '(imlog, out=imlog)\n', (880, 898), True, 'import numpy as np\n')] |
import glob
import os
from typing import Any, Dict, List, Tuple
import subprocess
import numpy as np
from rasterio.windows import Window
import srem
from constants import OLI_BAND_ID, REFLECTANCE_SCALING_FACTOR
def get_band_id(path: str) -> int:
band_name = os.path.splitext(os.path.basename(path))[0].split('_')[-1]
band_id = OLI_BAND_ID[band_name].value
return band_id
def get_pixel_angle_files(angle_file: str,
band_id: int,
output_dir: str) -> Tuple[str, str]:
cwd = os.getcwd()
angle_file = os.path.abspath(angle_file)
os.chdir(output_dir)
cmd = f'l8_angles {angle_file} BOTH 1 -b {band_id}'
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)
solar_angle_file = glob.glob(os.path.join(output_dir, f'*solar_B0{band_id}.img'))[0]
sensor_angle_file = glob.glob(os.path.join(output_dir, f'*sensor_B0{band_id}.img'))[0]
os.chdir(cwd)
return solar_angle_file, sensor_angle_file
def srem_worker(data: List[np.ndarray],
window: Window,
ij: int,
global_args: Dict[Any, Any]) -> np.ndarray:
nodata_mask = (data[0] == 0)
surface_reflectance = srem.srem(
toa_reflectance=data[0],
wavelength=global_args['wavelength'],
solar_azimuth_angle_deg=data[1] / 100.,
solar_zenith_angle_deg=data[2] / 100.,
sensor_azimuth_angle_deg=data[3] / 100.,
sensor_zenith_angle_deg=data[4] / 100.,
)
# surface reflectance is scaled in this example.
scaled_sr = \
surface_reflectance * REFLECTANCE_SCALING_FACTOR
# crop values less than 1 for defining 1 as minimum value.
scaled_sr[scaled_sr < 1] = 1
scaled_sr[nodata_mask] = 0
scaled_sr = scaled_sr.astype(global_args['dtype'])
scaled_sr = np.expand_dims(scaled_sr, axis=0)
return scaled_sr
| [
"subprocess.run",
"os.path.abspath",
"os.path.basename",
"os.getcwd",
"numpy.expand_dims",
"srem.srem",
"os.path.join",
"os.chdir"
] | [((545, 556), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (554, 556), False, 'import os\n'), ((574, 601), 'os.path.abspath', 'os.path.abspath', (['angle_file'], {}), '(angle_file)\n', (589, 601), False, 'import os\n'), ((606, 626), 'os.chdir', 'os.chdir', (['output_dir'], {}), '(output_dir)\n', (614, 626), False, 'import os\n'), ((687, 757), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.DEVNULL'}), '(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)\n', (701, 757), False, 'import subprocess\n'), ((942, 955), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (950, 955), False, 'import os\n'), ((1221, 1469), 'srem.srem', 'srem.srem', ([], {'toa_reflectance': 'data[0]', 'wavelength': "global_args['wavelength']", 'solar_azimuth_angle_deg': '(data[1] / 100.0)', 'solar_zenith_angle_deg': '(data[2] / 100.0)', 'sensor_azimuth_angle_deg': '(data[3] / 100.0)', 'sensor_zenith_angle_deg': '(data[4] / 100.0)'}), "(toa_reflectance=data[0], wavelength=global_args['wavelength'],\n solar_azimuth_angle_deg=data[1] / 100.0, solar_zenith_angle_deg=data[2] /\n 100.0, sensor_azimuth_angle_deg=data[3] / 100.0,\n sensor_zenith_angle_deg=data[4] / 100.0)\n", (1230, 1469), False, 'import srem\n'), ((1835, 1868), 'numpy.expand_dims', 'np.expand_dims', (['scaled_sr'], {'axis': '(0)'}), '(scaled_sr, axis=0)\n', (1849, 1868), True, 'import numpy as np\n'), ((791, 842), 'os.path.join', 'os.path.join', (['output_dir', 'f"""*solar_B0{band_id}.img"""'], {}), "(output_dir, f'*solar_B0{band_id}.img')\n", (803, 842), False, 'import os\n'), ((881, 933), 'os.path.join', 'os.path.join', (['output_dir', 'f"""*sensor_B0{band_id}.img"""'], {}), "(output_dir, f'*sensor_B0{band_id}.img')\n", (893, 933), False, 'import os\n'), ((283, 305), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (299, 305), False, 'import os\n')] |
import itertools
import random
import hashlib
import yaml
from typing import Any, List, Optional, Union
import numpy as np
from .config.cfg import SweepConfig
from .run import SweepRun
from .params import HyperParameter, HyperParameterSet
def yaml_hash(value: Any) -> str:
return hashlib.md5(
yaml.dump(value, default_flow_style=True, sort_keys=True).encode("ascii")
).hexdigest()
def grid_search_next_runs(
runs: List[SweepRun],
sweep_config: Union[dict, SweepConfig],
validate: bool = False,
n: int = 1,
randomize_order: bool = False,
) -> List[Optional[SweepRun]]:
"""Suggest runs with Hyperparameters drawn from a grid.
>>> suggestion = grid_search_next_runs([], {'method': 'grid', 'parameters': {'a': {'values': [1, 2, 3]}}})
>>> assert suggestion[0].config['a']['value'] == 1
Args:
runs: The runs in the sweep.
sweep_config: The sweep's config.
randomize_order: Whether to randomize the order of the grid search.
n: The number of runs to draw
validate: Whether to validate `sweep_config` against the SweepConfig JSONschema.
If true, will raise a Validation error if `sweep_config` does not conform to
the schema. If false, will attempt to run the sweep with an unvalidated schema.
Returns:
The suggested runs.
"""
# make sure the sweep config is valid
if validate:
sweep_config = SweepConfig(sweep_config)
if sweep_config["method"] != "grid":
raise ValueError("Invalid sweep configuration for grid_search_next_run.")
if "parameters" not in sweep_config:
raise ValueError('Grid search requires "parameters" section')
params = HyperParameterSet.from_config(sweep_config["parameters"])
# Check that all parameters are categorical or constant
for p in params:
if p.type not in [
HyperParameter.CATEGORICAL,
HyperParameter.CONSTANT,
HyperParameter.INT_UNIFORM,
HyperParameter.Q_UNIFORM,
]:
raise ValueError(
f"Parameter {p.name} is a disallowed type with grid search. Grid search requires all parameters "
f"to be categorical, constant, int_uniform, or q_uniform. Specification of probabilities for "
f"categorical parameters is disallowed in grid search"
)
# convert bounded int_uniform and q_uniform parameters to categorical parameters
for i, p in enumerate(params):
if p.type == HyperParameter.INT_UNIFORM:
params[i] = HyperParameter(
p.name,
{
"distribution": "categorical",
"values": [
val for val in range(p.config["min"], p.config["max"] + 1)
],
},
)
elif p.type == HyperParameter.Q_UNIFORM:
params[i] = HyperParameter(
p.name,
{
"distribution": "categorical",
"values": np.arange(
p.config["min"], p.config["max"], p.config["q"]
).tolist(),
},
)
# we can only deal with discrete params in a grid search
discrete_params = HyperParameterSet(
[p for p in params if p.type == HyperParameter.CATEGORICAL]
)
# build an iterator over all combinations of param values
param_names = [p.name for p in discrete_params]
param_values = [p.config["values"] for p in discrete_params]
param_hashes = [
[yaml_hash(value) for value in p.config["values"]] for p in discrete_params
]
value_hash_lookup = {
name: dict(zip(hashes, vals))
for name, vals, hashes in zip(param_names, param_values, param_hashes)
}
all_param_hashes = list(itertools.product(*param_hashes))
if randomize_order:
random.shuffle(all_param_hashes)
param_hashes_seen = set(
[
tuple(
yaml_hash(run.config[name]["value"])
for name in param_names
if name in run.config
)
for run in runs
]
)
hash_gen = (
hash_val for hash_val in all_param_hashes if hash_val not in param_hashes_seen
)
retval: List[Optional[SweepRun]] = []
for _ in range(n):
# this is O(1)
next_hash = next(hash_gen, None)
# we have searched over the entire parameter space
if next_hash is None:
retval.append(None)
return retval
for param, hash_val in zip(discrete_params, next_hash):
param.value = value_hash_lookup[param.name][hash_val]
run = SweepRun(config=params.to_config())
retval.append(run)
param_hashes_seen.add(next_hash)
return retval
| [
"random.shuffle",
"yaml.dump",
"numpy.arange",
"itertools.product"
] | [((3870, 3902), 'itertools.product', 'itertools.product', (['*param_hashes'], {}), '(*param_hashes)\n', (3887, 3902), False, 'import itertools\n'), ((3936, 3968), 'random.shuffle', 'random.shuffle', (['all_param_hashes'], {}), '(all_param_hashes)\n', (3950, 3968), False, 'import random\n'), ((309, 366), 'yaml.dump', 'yaml.dump', (['value'], {'default_flow_style': '(True)', 'sort_keys': '(True)'}), '(value, default_flow_style=True, sort_keys=True)\n', (318, 366), False, 'import yaml\n'), ((3076, 3134), 'numpy.arange', 'np.arange', (["p.config['min']", "p.config['max']", "p.config['q']"], {}), "(p.config['min'], p.config['max'], p.config['q'])\n", (3085, 3134), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
from deepspeech.modules.mask import make_non_pad_mask
from deepspeech.modules.mask import make_pad_mask
class TestU2Model(unittest.TestCase):
def setUp(self):
paddle.set_device('cpu')
self.lengths = paddle.to_tensor([5, 3, 2])
self.masks = np.array([
[True, True, True, True, True],
[True, True, True, False, False],
[True, True, False, False, False],
])
self.pad_masks = np.array([
[False, False, False, False, False],
[False, False, False, True, True],
[False, False, True, True, True],
])
def test_make_non_pad_mask(self):
res = make_non_pad_mask(self.lengths)
res2 = ~make_pad_mask(self.lengths)
self.assertSequenceEqual(res.numpy().tolist(), self.masks.tolist())
self.assertSequenceEqual(res.numpy().tolist(), res2.numpy().tolist())
def test_make_pad_mask(self):
res = make_pad_mask(self.lengths)
res1 = ~make_non_pad_mask(self.lengths)
self.assertSequenceEqual(res.numpy().tolist(), self.pad_masks.tolist())
self.assertSequenceEqual(res.numpy().tolist(), res1.tolist())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"deepspeech.modules.mask.make_pad_mask",
"deepspeech.modules.mask.make_non_pad_mask",
"numpy.array",
"paddle.set_device",
"paddle.to_tensor"
] | [((1870, 1885), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1883, 1885), False, 'import unittest\n'), ((834, 858), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (851, 858), False, 'import paddle\n'), ((882, 909), 'paddle.to_tensor', 'paddle.to_tensor', (['[5, 3, 2]'], {}), '([5, 3, 2])\n', (898, 909), False, 'import paddle\n'), ((931, 1046), 'numpy.array', 'np.array', (['[[True, True, True, True, True], [True, True, True, False, False], [True, \n True, False, False, False]]'], {}), '([[True, True, True, True, True], [True, True, True, False, False],\n [True, True, False, False, False]])\n', (939, 1046), True, 'import numpy as np\n'), ((1115, 1236), 'numpy.array', 'np.array', (['[[False, False, False, False, False], [False, False, False, True, True], [\n False, False, True, True, True]]'], {}), '([[False, False, False, False, False], [False, False, False, True, \n True], [False, False, True, True, True]])\n', (1123, 1236), True, 'import numpy as np\n'), ((1332, 1363), 'deepspeech.modules.mask.make_non_pad_mask', 'make_non_pad_mask', (['self.lengths'], {}), '(self.lengths)\n', (1349, 1363), False, 'from deepspeech.modules.mask import make_non_pad_mask\n'), ((1611, 1638), 'deepspeech.modules.mask.make_pad_mask', 'make_pad_mask', (['self.lengths'], {}), '(self.lengths)\n', (1624, 1638), False, 'from deepspeech.modules.mask import make_pad_mask\n'), ((1380, 1407), 'deepspeech.modules.mask.make_pad_mask', 'make_pad_mask', (['self.lengths'], {}), '(self.lengths)\n', (1393, 1407), False, 'from deepspeech.modules.mask import make_pad_mask\n'), ((1655, 1686), 'deepspeech.modules.mask.make_non_pad_mask', 'make_non_pad_mask', (['self.lengths'], {}), '(self.lengths)\n', (1672, 1686), False, 'from deepspeech.modules.mask import make_non_pad_mask\n')] |
import numpy as np
import unittest
import warnings
from context import lir
from lir.calibration import IsotonicCalibrator
from lir.util import Xn_to_Xy, Xy_to_Xn
import math
warnings.simplefilter("error")
def _cllr(lr0, lr1):
with np.errstate(divide='ignore'):
cllr0 = np.mean(np.log2(1 + lr0))
cllr1 = np.mean(np.log2(1 + 1/lr1))
return .5 * (cllr0 + cllr1)
def _pdf(X, mu, sigma):
return np.exp(-np.power(X - mu, 2) / (2*sigma*sigma)) / math.sqrt(2*math.pi*sigma*sigma)
class TestIsotonicRegression(unittest.TestCase):
def test_lr_1(self):
score_class0 = np.arange(0, 1, .1)
score_class1 = np.arange(0, 1, .1)
X, y = Xn_to_Xy(score_class0, score_class1)
irc = IsotonicCalibrator()
lr0, lr1 = Xy_to_Xn(irc.fit_transform(X, y), y)
self.assertEqual(score_class0.shape, lr0.shape)
self.assertEqual(score_class1.shape, lr1.shape)
np.testing.assert_almost_equal(lr0, [1.]*lr0.shape[0])
np.testing.assert_almost_equal(lr1, [1.]*lr1.shape[0])
def run_cllrmin(self, lr0, lr1, places=7):
lr0 = np.array(lr0)
lr1 = np.array(lr1)
X, y = Xn_to_Xy(lr0, lr1)
cllr = _cllr(lr0, lr1)
irc = IsotonicCalibrator()
lrmin0, lrmin1 = Xy_to_Xn(irc.fit_transform(X / (X + 1), y), y)
cllrmin = _cllr(lrmin0, lrmin1)
self.assertAlmostEqual(cllr, cllrmin, places=places)
def test_cllrmin(self):
self.run_cllrmin([1]*10, [1]*10)
self.run_cllrmin([1], [1]*10)
self.run_cllrmin([4, .25, .25, .25, .25, 1], [4, 4, 4, 4, .25, 1])
#np.random.seed(0)
X0 = np.random.normal(loc=0, scale=1, size=(40000,))
X1 = np.random.normal(loc=1, scale=1, size=(40000,))
lr0 = _pdf(X0, 1, 1) / _pdf(X0, 0, 1)
lr1 = _pdf(X1, 1, 1) / _pdf(X1, 0, 1)
self.run_cllrmin(lr0, lr1, places=2)
self.run_cllrmin(lr0, lr1[:30000], places=2)
def test_lr_almost_1(self):
score_class0 = np.arange(0, 1, .1)
score_class1 = np.arange(.05, 1.05, .1)
X, y = Xn_to_Xy(score_class0, score_class1)
irc = IsotonicCalibrator()
lr0, lr1 = Xy_to_Xn(irc.fit_transform(X, y), y)
self.assertEqual(score_class0.shape, lr0.shape)
self.assertEqual(score_class1.shape, lr1.shape)
np.testing.assert_almost_equal(lr0, np.concatenate([[0], [1.]*(lr0.shape[0]-1)]))
np.testing.assert_almost_equal(lr1, np.concatenate([[1.]*(lr1.shape[0]-1), [np.inf]]))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"lir.calibration.IsotonicCalibrator",
"warnings.simplefilter",
"math.sqrt",
"numpy.testing.assert_almost_equal",
"numpy.log2",
"numpy.power",
"numpy.errstate",
"lir.util.Xn_to_Xy",
"numpy.arange",
"numpy.array",
"numpy.random.normal",
"numpy.concatenate"
] | [((178, 208), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (199, 208), False, 'import warnings\n'), ((2554, 2569), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2567, 2569), False, 'import unittest\n'), ((241, 269), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (252, 269), True, 'import numpy as np\n'), ((479, 517), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * sigma * sigma)'], {}), '(2 * math.pi * sigma * sigma)\n', (488, 517), False, 'import math\n'), ((611, 631), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (620, 631), True, 'import numpy as np\n'), ((654, 674), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (663, 674), True, 'import numpy as np\n'), ((689, 725), 'lir.util.Xn_to_Xy', 'Xn_to_Xy', (['score_class0', 'score_class1'], {}), '(score_class0, score_class1)\n', (697, 725), False, 'from lir.util import Xn_to_Xy, Xy_to_Xn\n'), ((740, 760), 'lir.calibration.IsotonicCalibrator', 'IsotonicCalibrator', ([], {}), '()\n', (758, 760), False, 'from lir.calibration import IsotonicCalibrator\n'), ((937, 994), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lr0', '([1.0] * lr0.shape[0])'], {}), '(lr0, [1.0] * lr0.shape[0])\n', (967, 994), True, 'import numpy as np\n'), ((1000, 1057), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['lr1', '([1.0] * lr1.shape[0])'], {}), '(lr1, [1.0] * lr1.shape[0])\n', (1030, 1057), True, 'import numpy as np\n'), ((1117, 1130), 'numpy.array', 'np.array', (['lr0'], {}), '(lr0)\n', (1125, 1130), True, 'import numpy as np\n'), ((1145, 1158), 'numpy.array', 'np.array', (['lr1'], {}), '(lr1)\n', (1153, 1158), True, 'import numpy as np\n'), ((1174, 1192), 'lir.util.Xn_to_Xy', 'Xn_to_Xy', (['lr0', 'lr1'], {}), '(lr0, lr1)\n', (1182, 1192), False, 'from lir.util import Xn_to_Xy, Xy_to_Xn\n'), ((1239, 1259), 'lir.calibration.IsotonicCalibrator', 'IsotonicCalibrator', ([], {}), '()\n', (1257, 1259), False, 'from lir.calibration import IsotonicCalibrator\n'), ((1658, 1705), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': '(40000,)'}), '(loc=0, scale=1, size=(40000,))\n', (1674, 1705), True, 'import numpy as np\n'), ((1719, 1766), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1)', 'scale': '(1)', 'size': '(40000,)'}), '(loc=1, scale=1, size=(40000,))\n', (1735, 1766), True, 'import numpy as np\n'), ((2013, 2033), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (2022, 2033), True, 'import numpy as np\n'), ((2056, 2082), 'numpy.arange', 'np.arange', (['(0.05)', '(1.05)', '(0.1)'], {}), '(0.05, 1.05, 0.1)\n', (2065, 2082), True, 'import numpy as np\n'), ((2096, 2132), 'lir.util.Xn_to_Xy', 'Xn_to_Xy', (['score_class0', 'score_class1'], {}), '(score_class0, score_class1)\n', (2104, 2132), False, 'from lir.util import Xn_to_Xy, Xy_to_Xn\n'), ((2147, 2167), 'lir.calibration.IsotonicCalibrator', 'IsotonicCalibrator', ([], {}), '()\n', (2165, 2167), False, 'from lir.calibration import IsotonicCalibrator\n'), ((295, 311), 'numpy.log2', 'np.log2', (['(1 + lr0)'], {}), '(1 + lr0)\n', (302, 311), True, 'import numpy as np\n'), ((337, 357), 'numpy.log2', 'np.log2', (['(1 + 1 / lr1)'], {}), '(1 + 1 / lr1)\n', (344, 357), True, 'import numpy as np\n'), ((2380, 2429), 'numpy.concatenate', 'np.concatenate', (['[[0], [1.0] * (lr0.shape[0] - 1)]'], {}), '([[0], [1.0] * (lr0.shape[0] - 1)])\n', (2394, 2429), True, 'import numpy as np\n'), ((2470, 2524), 'numpy.concatenate', 'np.concatenate', (['[[1.0] * (lr1.shape[0] - 1), [np.inf]]'], {}), '([[1.0] * (lr1.shape[0] - 1), [np.inf]])\n', (2484, 2524), True, 'import numpy as np\n'), ((438, 457), 'numpy.power', 'np.power', (['(X - mu)', '(2)'], {}), '(X - mu, 2)\n', (446, 457), True, 'import numpy as np\n')] |
import numpy as np
import random
# Part I
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
def hinge_loss_single(feature_vector, label, theta, theta_0):
"""
Finds the hinge loss on a single data point given specific classification parameters
Args:
:param feature_vector: A numpy array describing the given data point.
:param label: A real valued number, the correct classification of the data point
:param theta: A numpy array describing the linear classifier
:param theta_0: A real valued number representing the offset parameter
:return: A real number representing the hinge loss associated with the given datapoint and parameters
"""
y = np.dot(theta, feature_vector) + theta_0
loss = max(0.0, 1 - y * label)
return loss
def hinge_loss_full(feature_matrix, labels, theta, theta_0):
"""
Finds the total hinge loss on a set of data given specific classification parameters
Args:
:param feature_matrix: A numpy matrix describing the given data, Each row represents a single data point
:param labels: A numpy array where Kth element of the array is the correct classification of the Kth row
of the feature matrix.
:param theta: A numpy array describing the linear classification
:param theta_0: A real valued number representing the offset parameter
:return: A real number representing the hinge loss associated with the given dataset and parameters.
This number should be the average hinge loss across all of the points in the feature matrix
"""
loss = 0
for idx in range(len(feature_matrix)):
loss += hinge_loss_single(feature_matrix[idx], labels[idx], theta, theta_0)
return loss / len(labels)
def perceptron_single_step_update(feature_vector, label, current_theta, current_theta_0):
"""
Property updates the classification parameter, theta and theta_0 on a single step of the perceptron
algorithm.
Args:
:param feature_vector: A numpy array describing a single data point.
:param label: The correct classification of the feature vector
:param current_theta: The current theta being used by the perceptron algorithm before this update
:param current_theta_0: The current theta_0 being used by the perceptron algorithm before this update
:return: A tuple where the first element is a numpy array with value of the theta after the current
update has completed and the second element is a real valued number iwth the value of theta_0 after
the current update has completed.
"""
if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 1e-7:
return current_theta + label * feature_vector, current_theta_0 + label
return current_theta, current_theta_0
def perceptron(feature_matrix, labels, T):
"""
Runs the full perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
:param feature_matrix: - A numpy matrix describing the given data. Each row
represents a single data point.
:param labels: - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
:param T: - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
:return: A tuple where the first element is a numpy array with the value of
theta, the linear classification parameter, after T iterations through the
feature matrix and the second element is a real number with the value of
theta_0, the offset classification parameter, after T iterations through
the feature matrix.
"""
samples, features = feature_matrix.shape
theta = np.zeros(features)
theta_0 = 0.0
for t in range(T):
for i in get_order(samples):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i], labels[i], theta, theta_0)
return theta, theta_0
def average_perceptron(feature_matrix, labels, T):
"""
Runs the average perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
:param feature_matrix: - A numpy matrix describing the given data. Each row
represents a single data point.
:param labels: - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
:param T: - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
:return: A tuple where the first element is a numpy array with the value of
the average theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the average theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
Hint: It is difficult to keep a running average; however, it is simple to
find a sum and divide.
"""
(samples, features) = feature_matrix.shape
theta = np.zeros(features)
theta_sum = np.zeros(features)
theta_0 = 0.0
theta_0_sum = 0.0
for t in range(T):
for i in get_order(samples):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i], labels[i], theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
return theta_sum / (samples * T), theta_0_sum / (samples * T)
def pegasos_single_step_update(feature_vector, label, l, eta, current_theta, current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the Pegasos algorithm
Args:
:param feature_vector: - A numpy array describing a single data point.
:param label: - The correct classification of the feature vector.
:param l: - The lamba value being used to update the parameters.
:param eta: - Learning rate to update parameters.
:param current_theta: - The current theta being used by the Pegasos
algorithm before this update.
:param current_theta_0: - The current theta_0 being used by the
Pegasos algorithm before this update.
:return: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
multi = 1 - (eta * l)
if label * (np.dot(feature_vector, current_theta) + current_theta_0) <= 1:
return (multi * current_theta) + (eta * label * feature_vector), current_theta_0 + (eta * label)
return multi * current_theta, current_theta_0
def pegasos(feature_matrix, labels, T, L):
"""
Runs the Pegasos algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
For each update, set learning rate = 1/sqrt(t),
where t is a counter for the number of updates performed so far (between 1
and nT inclusive).
Args:
:param feature_matrix: A numpy matrix describing the given data.
Each row represents a single data point.
:param labels: A numpy array where the kth element of the array is the correct classification of the kth row of the feature matrix.
:param T:An integer indicating how many times the algorithm
should iterate through the feature matrix.
:param L:The lamba value being used to update the Pegasos
algorithm parameters.
:return: A tuple where the first element is a numpy array with the value of
the theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
"""
samples, features = feature_matrix.shape
theta = np.zeros(features)
theta_0 = 0
count = 0
for t in range(T):
for i in get_order(samples):
count += 1
eta = 1.0 / np.sqrt(count)
theta, theta_0 = pegasos_single_step_update(feature_matrix[i], labels[i], L, eta, theta, theta_0)
return theta, theta_0
def classify(feature_matrix, theta, theta_0):
"""
A classification function that uses theta and theta_0 to classify a set of
data points.
Args:
:param feature_matrix:- A numpy matrix describing the given data. Each row
represents a single data point.
theta - A numpy array describing the linear classifier.
:param theta: - A numpy array describing the linear classifier.
:param theta_0: - A real valued number representing the offset parameter.
:return: A numpy array of 1s and -1s where the kth element of the array is
the predicted classification of the kth row of the feature matrix using the
given theta and theta_0. If a prediction is GREATER THAN zero, it should
be considered a positive classification.
"""
samples, features = feature_matrix.shape
predictions = np.zeros(samples)
for i in range(samples):
feature_vector = feature_matrix[i]
prediction = np.dot(theta, feature_vector) + theta_0
if prediction > 0:
predictions[i] = 1
else:
predictions[i] = -1
return predictions
def accuracy(predictions, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (predictions == targets).mean()
def classifier_accuracy(
classifier,
train_feature_matrix,
val_feature_matrix,
train_labels,
val_labels,
**kwargs):
"""
Trains a linear classifier and computes accuracy.
The classifier is trained on the train data. The classifier's
accuracy on the train and validation data is then returned.
Args:
classifier - A classifier function that takes arguments
(feature matrix, labels, **kwargs) and returns (theta, theta_0)
train_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
val_feature_matrix - A numpy matrix describing the validation
data. Each row represents a single data point.
train_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the training
feature matrix.
val_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the validation
feature matrix.
**kwargs - Additional named arguments to pass to the classifier
(e.g. T or L)
Returns: A tuple in which the first element is the (scalar) accuracy of the
trained classifier on the training data and the second element is the
accuracy of the trained classifier on the validation data.
"""
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
train_predictions = classify(train_feature_matrix, theta, theta_0)
val_predictions = classify(val_feature_matrix, theta, theta_0)
train_accuracy = accuracy(train_predictions, train_labels)
validation_accuracy = accuracy(val_predictions, val_labels)
return train_accuracy, validation_accuracy
| [
"random.shuffle",
"numpy.zeros",
"random.seed",
"numpy.dot",
"numpy.sqrt"
] | [((4317, 4335), 'numpy.zeros', 'np.zeros', (['features'], {}), '(features)\n', (4325, 4335), True, 'import numpy as np\n'), ((5954, 5972), 'numpy.zeros', 'np.zeros', (['features'], {}), '(features)\n', (5962, 5972), True, 'import numpy as np\n'), ((5989, 6007), 'numpy.zeros', 'np.zeros', (['features'], {}), '(features)\n', (5997, 6007), True, 'import numpy as np\n'), ((8864, 8882), 'numpy.zeros', 'np.zeros', (['features'], {}), '(features)\n', (8872, 8882), True, 'import numpy as np\n'), ((10028, 10045), 'numpy.zeros', 'np.zeros', (['samples'], {}), '(samples)\n', (10036, 10045), True, 'import numpy as np\n'), ((956, 985), 'numpy.dot', 'np.dot', (['theta', 'feature_vector'], {}), '(theta, feature_vector)\n', (962, 985), True, 'import numpy as np\n'), ((252, 266), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (263, 266), False, 'import random\n'), ((316, 339), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (330, 339), False, 'import random\n'), ((10139, 10168), 'numpy.dot', 'np.dot', (['theta', 'feature_vector'], {}), '(theta, feature_vector)\n', (10145, 10168), True, 'import numpy as np\n'), ((2875, 2912), 'numpy.dot', 'np.dot', (['current_theta', 'feature_vector'], {}), '(current_theta, feature_vector)\n', (2881, 2912), True, 'import numpy as np\n'), ((7381, 7418), 'numpy.dot', 'np.dot', (['feature_vector', 'current_theta'], {}), '(feature_vector, current_theta)\n', (7387, 7418), True, 'import numpy as np\n'), ((9020, 9034), 'numpy.sqrt', 'np.sqrt', (['count'], {}), '(count)\n', (9027, 9034), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.kfac.fisher_factors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb
from tensorflow.contrib.kfac.python.ops import fisher_factors as ff
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import test
def make_damping_func(damping):
return fb._package_func(lambda: damping, damping)
class FisherFactorTestingDummy(ff.FisherFactor):
"""Dummy class to test the non-abstract methods on ff.FisherFactor."""
@property
def _var_scope(self):
return 'dummy/a_b_c'
@property
def _cov_shape(self):
raise NotImplementedError
@property
def _num_sources(self):
return 1
@property
def _dtype(self):
return dtypes.float32
def _compute_new_cov(self):
raise NotImplementedError
def instantiate_covariance(self):
pass
def make_inverse_update_ops(self):
return []
def get_cov(self):
return NotImplementedError
def left_multiply(self, x, damping):
return NotImplementedError
def right_multiply(self, x, damping):
return NotImplementedError
def left_multiply_matpower(self, x, exp, damping):
return NotImplementedError
def right_multiply_matpower(self, x, exp, damping):
return NotImplementedError
def instantiate_inv_variables(self):
return NotImplementedError
class InverseProvidingFactorTestingDummy(ff.InverseProvidingFactor):
"""Dummy class to test the non-abstract methods on ff.InverseProvidingFactor.
"""
def __init__(self, shape):
self._shape = shape
super(InverseProvidingFactorTestingDummy, self).__init__()
@property
def _var_scope(self):
return 'dummy/a_b_c'
@property
def _cov_shape(self):
return self._shape
@property
def _num_sources(self):
return 1
@property
def _dtype(self):
return dtypes.float32
def _compute_new_cov(self):
raise NotImplementedError
def instantiate_covariance(self):
pass
class NumericalUtilsTest(test.TestCase):
def testComputeCovAgainstNumpy(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
npr.seed(0)
random_seed.set_random_seed(200)
x = npr.randn(100, 3)
cov = ff.compute_cov(array_ops.constant(x))
np_cov = np.dot(x.T, x) / x.shape[0]
self.assertAllClose(sess.run(cov), np_cov)
def testComputeCovAgainstNumpyWithAlternativeNormalizer(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
npr.seed(0)
random_seed.set_random_seed(200)
normalizer = 10.
x = npr.randn(100, 3)
cov = ff.compute_cov(array_ops.constant(x), normalizer=normalizer)
np_cov = np.dot(x.T, x) / normalizer
self.assertAllClose(sess.run(cov), np_cov)
def testAppendHomog(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
npr.seed(0)
m, n = 3, 4
a = npr.randn(m, n)
a_homog = ff.append_homog(array_ops.constant(a))
np_result = np.hstack([a, np.ones((m, 1))])
self.assertAllClose(sess.run(a_homog), np_result)
class NameStringUtilFunctionTest(test.TestCase):
def _make_tensor(self):
x = array_ops.placeholder(dtypes.float64, (3, 1))
w = array_ops.constant(npr.RandomState(0).randn(3, 3))
y = math_ops.matmul(w, x)
g = gradients_impl.gradients(y, x)[0]
return g
def testScopeStringFromParamsSingleTensor(self):
with tf_ops.Graph().as_default():
g = self._make_tensor()
scope_string = ff.scope_string_from_params(g)
self.assertEqual('gradients_MatMul_grad_MatMul_1', scope_string)
def testScopeStringFromParamsMultipleTensors(self):
with tf_ops.Graph().as_default():
x = array_ops.constant(1,)
y = array_ops.constant(2,)
scope_string = ff.scope_string_from_params((x, y))
self.assertEqual('Const_Const_1', scope_string)
def testScopeStringFromParamsMultipleTypes(self):
with tf_ops.Graph().as_default():
x = array_ops.constant(1,)
y = array_ops.constant(2,)
scope_string = ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4,
(x, y)])
self.assertEqual('1-2-3_foo_True_4_Const__Const_1', scope_string)
def testScopeStringFromParamsUnsupportedType(self):
with tf_ops.Graph().as_default():
x = array_ops.constant(1,)
y = array_ops.constant(2,)
unsupported = 1.2 # Floats are not supported.
with self.assertRaises(ValueError):
ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4, (x, y),
unsupported])
def testScopeStringFromName(self):
with tf_ops.Graph().as_default():
g = self._make_tensor()
scope_string = ff.scope_string_from_name(g)
self.assertEqual('gradients_MatMul_grad_MatMul_1', scope_string)
def testScalarOrTensorToString(self):
with tf_ops.Graph().as_default():
self.assertEqual(ff.scalar_or_tensor_to_string(5.), repr(5.))
g = self._make_tensor()
scope_string = ff.scope_string_from_name(g)
self.assertEqual(ff.scalar_or_tensor_to_string(g), scope_string)
class FisherFactorTest(test.TestCase):
def testMakeInverseUpdateOps(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
factor = FisherFactorTestingDummy()
self.assertEqual(0, len(factor.make_inverse_update_ops()))
class InverseProvidingFactorTest(test.TestCase):
def testRegisterDampedInverse(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
shape = [2, 2]
factor = InverseProvidingFactorTestingDummy(shape)
factor_var_scope = 'dummy/a_b_c'
damping_funcs = [make_damping_func(0.1),
make_damping_func(0.1),
make_damping_func(1e-5),
make_damping_func(1e-5)]
for damping_func in damping_funcs:
factor.register_inverse(damping_func)
factor.instantiate_inv_variables()
inv = factor.get_inverse(damping_funcs[0])
self.assertEqual(inv, factor.get_inverse(damping_funcs[1]))
self.assertNotEqual(inv, factor.get_inverse(damping_funcs[2]))
self.assertEqual(factor.get_inverse(damping_funcs[2]),
factor.get_inverse(damping_funcs[3]))
factor_vars = tf_ops.get_collection(tf_ops.GraphKeys.GLOBAL_VARIABLES,
factor_var_scope)
self.assertEqual(set([inv, factor.get_inverse(damping_funcs[2])]),
set(factor_vars))
self.assertEqual(shape, inv.get_shape())
def testRegisterMatpower(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
shape = [3, 3]
factor = InverseProvidingFactorTestingDummy(shape)
factor_var_scope = 'dummy/a_b_c'
# TODO(b/74201126): Change to using the same func for both once
# Topohash is in place.
damping_func_1 = make_damping_func(0.5)
damping_func_2 = make_damping_func(0.5)
factor.register_matpower(-0.5, damping_func_1)
factor.register_matpower(2, damping_func_2)
factor.instantiate_inv_variables()
factor_vars = tf_ops.get_collection(tf_ops.GraphKeys.GLOBAL_VARIABLES,
factor_var_scope)
matpower1 = factor.get_matpower(-0.5, damping_func_1)
matpower2 = factor.get_matpower(2, damping_func_2)
self.assertEqual(set([matpower1, matpower2]), set(factor_vars))
self.assertEqual(shape, matpower1.get_shape())
self.assertEqual(shape, matpower2.get_shape())
def testMakeInverseUpdateOps(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
factor = FisherFactorTestingDummy()
self.assertEqual(0, len(factor.make_inverse_update_ops()))
def testMakeInverseUpdateOpsManyInversesEigenDecomp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
cov = np.array([[1., 2.], [3., 4.]])
factor = InverseProvidingFactorTestingDummy(cov.shape)
factor._cov = array_ops.constant(cov, dtype=dtypes.float32)
damping_funcs = []
for i in range(1, ff.EIGENVALUE_DECOMPOSITION_THRESHOLD + 1):
damping_funcs.append(make_damping_func(1./i))
for i in range(ff.EIGENVALUE_DECOMPOSITION_THRESHOLD):
factor.register_inverse(damping_funcs[i])
factor.instantiate_inv_variables()
ops = factor.make_inverse_update_ops()
self.assertEqual(1, len(ops))
sess.run(tf_variables.global_variables_initializer())
new_invs = []
sess.run(ops)
for i in range(ff.EIGENVALUE_DECOMPOSITION_THRESHOLD):
# The inverse op will assign the damped inverse of cov to the inv var.
new_invs.append(sess.run(factor.get_inverse(damping_funcs[i])))
# We want to see that the new invs are all different from each other.
for i in range(len(new_invs)):
for j in range(i + 1, len(new_invs)):
# Just check the first element.
self.assertNotEqual(new_invs[i][0][0], new_invs[j][0][0])
def testMakeInverseUpdateOpsMatPowerEigenDecomp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
cov = np.array([[6., 2.], [2., 4.]])
factor = InverseProvidingFactorTestingDummy(cov.shape)
factor._cov = array_ops.constant(cov, dtype=dtypes.float32)
exp = 2 # NOTE(mattjj): must be int to test with np.linalg.matrix_power
damping = 0.5
damping_func = make_damping_func(damping)
factor.register_matpower(exp, damping_func)
factor.instantiate_inv_variables()
ops = factor.make_inverse_update_ops()
self.assertEqual(1, len(ops))
sess.run(tf_variables.global_variables_initializer())
sess.run(ops[0])
matpower = sess.run(factor.get_matpower(exp, damping_func))
matpower_np = np.linalg.matrix_power(cov + np.eye(2) * damping, exp)
self.assertAllClose(matpower, matpower_np)
def testMakeInverseUpdateOpsNoEigenDecomp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
cov = np.array([[5., 2.], [2., 4.]]) # NOTE(mattjj): must be symmetric
factor = InverseProvidingFactorTestingDummy(cov.shape)
factor._cov = array_ops.constant(cov, dtype=dtypes.float32)
damping_func = make_damping_func(0)
factor.register_inverse(damping_func)
factor.instantiate_inv_variables()
ops = factor.make_inverse_update_ops()
self.assertEqual(1, len(ops))
sess.run(tf_variables.global_variables_initializer())
# The inverse op will assign the damped inverse of cov to the inv var.
old_inv = sess.run(factor.get_inverse(damping_func))
self.assertAllClose(
sess.run(ff.inverse_initializer(cov.shape, dtypes.float32)), old_inv)
sess.run(ops)
new_inv = sess.run(factor.get_inverse(damping_func))
self.assertAllClose(new_inv, np.linalg.inv(cov))
class FullFactorTest(test.TestCase):
def testFullFactorInit(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), name='a/b/c')
factor = ff.FullFactor((tensor,), 32)
factor.instantiate_cov_variables()
self.assertEqual([6, 6], factor.get_cov().get_shape().as_list())
def testFullFactorInitFloat64(self):
with tf_ops.Graph().as_default():
dtype = dtypes.float64_ref
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c')
factor = ff.FullFactor((tensor,), 32)
factor.instantiate_cov_variables()
cov = factor.get_cov()
self.assertEqual(cov.dtype, dtype)
self.assertEqual([6, 6], cov.get_shape().as_list())
def testMakeCovarianceUpdateOp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([1., 2.], name='a/b/c')
factor = ff.FullFactor((tensor,), 2)
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[0.75, 0.5], [0.5, 1.5]], new_cov)
class NaiveDiagonalFactorTest(test.TestCase):
def testNaiveDiagonalFactorInit(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), name='a/b/c')
factor = ff.NaiveDiagonalFactor((tensor,), 32)
factor.instantiate_cov_variables()
self.assertEqual([6, 1], factor.get_cov_var().get_shape().as_list())
def testNaiveDiagonalFactorInitFloat64(self):
with tf_ops.Graph().as_default():
dtype = dtypes.float64_ref
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c')
factor = ff.NaiveDiagonalFactor((tensor,), 32)
factor.instantiate_cov_variables()
cov = factor.get_cov_var()
self.assertEqual(cov.dtype, dtype)
self.assertEqual([6, 1], cov.get_shape().as_list())
def testMakeCovarianceUpdateOp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([1., 2.], name='a/b/c')
factor = ff.NaiveDiagonalFactor((tensor,), 2)
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[0.75], [1.5]], new_cov)
class EmbeddingInputKroneckerFactorTest(test.TestCase):
def testInitialization(self):
with tf_ops.Graph().as_default():
input_ids = array_ops.constant([[0], [1], [4]])
vocab_size = 5
factor = ff.EmbeddingInputKroneckerFactor(input_ids, vocab_size)
factor.instantiate_cov_variables()
cov = factor.get_cov_var()
self.assertEqual(cov.shape.as_list(), [vocab_size])
def testCovarianceUpdateOp(self):
with tf_ops.Graph().as_default():
input_ids = array_ops.constant([[0], [1], [4]])
vocab_size = 5
factor = ff.EmbeddingInputKroneckerFactor(input_ids, vocab_size)
factor.instantiate_cov_variables()
cov_update_op = factor.make_covariance_update_op(0.0)
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(cov_update_op)
self.assertAllClose(np.array([1., 1., 0., 0., 1.]) / 3., new_cov)
class ConvDiagonalFactorTest(test.TestCase):
def setUp(self):
self.batch_size = 10
self.height = self.width = 32
self.in_channels = 3
self.out_channels = 1
self.kernel_height = self.kernel_width = 3
self.strides = [1, 2, 2, 1]
self.data_format = 'NHWC'
self.padding = 'SAME'
self.kernel_shape = [
self.kernel_height, self.kernel_width, self.in_channels,
self.out_channels
]
def testInit(self):
with tf_ops.Graph().as_default():
inputs = random_ops.random_uniform(
[self.batch_size, self.height, self.width, self.in_channels])
outputs_grads = [
random_ops.random_uniform([
self.batch_size, self.height // self.strides[1],
self.width // self.strides[2], self.out_channels
]) for _ in range(3)
]
factor = ff.ConvDiagonalFactor(
inputs,
outputs_grads,
self.kernel_shape,
self.strides,
self.padding,
data_format=self.data_format)
factor.instantiate_cov_variables()
# Ensure covariance matrix's shape makes sense.
self.assertEqual([
self.kernel_height * self.kernel_width * self.in_channels,
self.out_channels
],
factor.get_cov_var().shape.as_list())
def testMakeCovarianceUpdateOp(self):
with tf_ops.Graph().as_default():
# Construct all arguments such that convolution kernel is applied in
# exactly one spatial location.
inputs = np.random.randn(
1, # batch_size
self.kernel_height,
self.kernel_width,
self.in_channels) # in_channels
outputs_grad = np.random.randn(
1, # batch_size
1, # output_height
1, # output_width
self.out_channels)
factor = ff.ConvDiagonalFactor(
constant_op.constant(inputs), [constant_op.constant(outputs_grad)],
self.kernel_shape,
strides=[1, 1, 1, 1],
padding='VALID')
factor.instantiate_cov_variables()
# Completely forget initial value on first update.
cov_update_op = factor.make_covariance_update_op(0.0)
# Ensure new covariance value is same as outer-product of inputs/outputs
# vectorized, squared.
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
cov = sess.run(cov_update_op)
expected_cov = np.outer(inputs.flatten(), outputs_grad.flatten())**2
self.assertAllClose(expected_cov, cov)
def testHasBias(self):
with tf_ops.Graph().as_default():
inputs = random_ops.random_uniform(
[self.batch_size, self.height, self.width, self.in_channels])
outputs_grads = [
random_ops.random_uniform([
self.batch_size, self.height // self.strides[1],
self.width // self.strides[2], self.out_channels
]) for _ in range(3)
]
factor = ff.ConvDiagonalFactor(
inputs,
outputs_grads,
self.kernel_shape,
self.strides,
self.padding,
data_format=self.data_format,
has_bias=True)
factor.instantiate_cov_variables()
# Ensure shape accounts for bias.
self.assertEqual([
self.kernel_height * self.kernel_width * self.in_channels + 1,
self.out_channels
],
factor.get_cov_var().shape.as_list())
# Ensure update op doesn't crash.
cov_update_op = factor.make_covariance_update_op(0.0)
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(cov_update_op)
class FullyConnectedKroneckerFactorTest(test.TestCase):
def _testFullyConnectedKroneckerFactorInit(self,
has_bias,
final_shape,
dtype=dtypes.float32_ref):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c')
factor = ff.FullyConnectedKroneckerFactor((tensor,), has_bias=has_bias)
factor.instantiate_cov_variables()
cov = factor.get_cov()
self.assertEqual(cov.dtype, dtype)
self.assertEqual(final_shape, cov.get_shape().as_list())
def testFullyConnectedKroneckerFactorInitNoBias(self):
for dtype in (dtypes.float32_ref, dtypes.float64_ref):
self._testFullyConnectedKroneckerFactorInit(False, [3, 3], dtype=dtype)
def testFullyConnectedKroneckerFactorInitWithBias(self):
for dtype in (dtypes.float32_ref, dtypes.float64_ref):
self._testFullyConnectedKroneckerFactorInit(True, [4, 4], dtype=dtype)
def testMakeCovarianceUpdateOpWithBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c')
factor = ff.FullyConnectedKroneckerFactor((tensor,), has_bias=True)
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[3, 3.5, 1], [3.5, 5.5, 1.5], [1, 1.5, 1]], new_cov)
def testMakeCovarianceUpdateOpNoBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c')
factor = ff.FullyConnectedKroneckerFactor((tensor,))
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[3, 3.5], [3.5, 5.5]], new_cov)
class ConvFactorTestCase(test.TestCase):
def assertMatrixRank(self, rank, matrix, atol=1e-5):
assert rank <= matrix.shape[0], 'Rank cannot be larger than matrix size.'
eigvals = np.linalg.eigvals(matrix)
nnz_eigvals = np.sum(eigvals > atol)
self.assertEqual(
rank,
nnz_eigvals,
msg=('Found %d of %d expected non-zero eigenvalues: %s.' %
(nnz_eigvals, rank, eigvals)))
class ConvInputKroneckerFactorTest(ConvFactorTestCase):
def test3DConvolution(self):
with tf_ops.Graph().as_default():
batch_size = 1
width = 3
in_channels = 3**3
out_channels = 4
factor = ff.ConvInputKroneckerFactor(
inputs=random_ops.random_uniform(
(batch_size, width, width, width, in_channels), seed=0),
filter_shape=(width, width, width, in_channels, out_channels),
padding='SAME',
strides=(2, 2, 2),
extract_patches_fn='extract_convolution_patches',
has_bias=False)
factor.instantiate_cov_variables()
# Ensure shape of covariance matches input size of filter.
input_size = in_channels * (width**3)
self.assertEqual([input_size, input_size],
factor.get_cov_var().shape.as_list())
# Ensure cov_update_op doesn't crash.
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(factor.make_covariance_update_op(0.0))
cov = sess.run(factor.get_cov_var())
# Cov should be rank-8, as the filter will be applied at each corner of
# the 4-D cube.
self.assertMatrixRank(8, cov)
def testPointwiseConv2d(self):
with tf_ops.Graph().as_default():
batch_size = 1
width = 3
in_channels = 3**2
out_channels = 4
factor = ff.ConvInputKroneckerFactor(
inputs=random_ops.random_uniform(
(batch_size, width, width, in_channels), seed=0),
filter_shape=(1, 1, in_channels, out_channels),
padding='SAME',
strides=(1, 1, 1, 1),
extract_patches_fn='extract_pointwise_conv2d_patches',
has_bias=False)
factor.instantiate_cov_variables()
# Ensure shape of covariance matches input size of filter.
self.assertEqual([in_channels, in_channels],
factor.get_cov_var().shape.as_list())
# Ensure cov_update_op doesn't crash.
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(factor.make_covariance_update_op(0.0))
cov = sess.run(factor.get_cov_var())
# Cov should be rank-9, as the filter will be applied at each location.
self.assertMatrixRank(9, cov)
def testStrides(self):
with tf_ops.Graph().as_default():
batch_size = 1
width = 3
in_channels = 3**2
out_channels = 4
factor = ff.ConvInputKroneckerFactor(
inputs=random_ops.random_uniform(
(batch_size, width, width, in_channels), seed=0),
filter_shape=(1, 1, in_channels, out_channels),
padding='SAME',
strides=(1, 2, 1, 1),
extract_patches_fn='extract_image_patches',
has_bias=False)
factor.instantiate_cov_variables()
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(factor.make_covariance_update_op(0.0))
cov = sess.run(factor.get_cov_var())
# Cov should be the sum of 3 * 2 = 6 outer products.
self.assertMatrixRank(6, cov)
def testDilationRate(self):
with tf_ops.Graph().as_default():
batch_size = 1
width = 3
in_channels = 2
out_channels = 4
factor = ff.ConvInputKroneckerFactor(
inputs=random_ops.random_uniform(
(batch_size, width, width, in_channels), seed=0),
filter_shape=(3, 3, in_channels, out_channels),
padding='SAME',
extract_patches_fn='extract_image_patches',
strides=(1, 1, 1, 1),
dilation_rate=(1, width, width, 1),
has_bias=False)
factor.instantiate_cov_variables()
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(factor.make_covariance_update_op(0.0))
cov = sess.run(factor.get_cov_var())
# Cov should be rank = in_channels, as only the center of the filter
# receives non-zero input for each input channel.
self.assertMatrixRank(in_channels, cov)
def testConvInputKroneckerFactorInitNoBias(self):
with tf_ops.Graph().as_default():
tensor = array_ops.ones((64, 1, 2, 3), name='a/b/c')
factor = ff.ConvInputKroneckerFactor(
inputs=tensor,
filter_shape=(1, 2, 3, 4),
padding='SAME',
has_bias=False)
factor.instantiate_cov_variables()
self.assertEqual([1 * 2 * 3, 1 * 2 * 3],
factor.get_cov().get_shape().as_list())
def testConvInputKroneckerFactorInit(self):
with tf_ops.Graph().as_default():
tensor = array_ops.ones((64, 1, 2, 3), name='a/b/c')
factor = ff.ConvInputKroneckerFactor(
tensor, filter_shape=(1, 2, 3, 4), padding='SAME', has_bias=True)
factor.instantiate_cov_variables()
self.assertEqual([1 * 2 * 3 + 1, 1 * 2 * 3 + 1],
factor.get_cov().get_shape().as_list())
def testConvInputKroneckerFactorInitFloat64(self):
with tf_ops.Graph().as_default():
dtype = dtypes.float64_ref
tensor = array_ops.ones((64, 1, 2, 3), name='a/b/c', dtype=dtypes.float64)
factor = ff.ConvInputKroneckerFactor(
tensor, filter_shape=(1, 2, 3, 4), padding='SAME', has_bias=True)
factor.instantiate_cov_variables()
cov = factor.get_cov()
self.assertEqual(cov.dtype, dtype)
self.assertEqual([1 * 2 * 3 + 1, 1 * 2 * 3 + 1],
cov.get_shape().as_list())
def testMakeCovarianceUpdateOpWithBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
input_shape = (2, 1, 1, 1)
tensor = array_ops.constant(
np.arange(1, 1 + np.prod(input_shape)).reshape(input_shape).astype(
np.float32))
factor = ff.ConvInputKroneckerFactor(
tensor, filter_shape=(1, 1, 1, 1), padding='SAME', has_bias=True)
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(0.))
self.assertAllClose(
[
[(1. + 4.) / 2., (1. + 2.) / 2.], #
[(1. + 2.) / 2., (1. + 1.) / 2.]
], #
new_cov)
def testMakeCovarianceUpdateOpNoBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
input_shape = (2, 1, 1, 1)
tensor = array_ops.constant(
np.arange(1, 1 + np.prod(input_shape)).reshape(input_shape).astype(
np.float32))
factor = ff.ConvInputKroneckerFactor(
tensor, filter_shape=(1, 1, 1, 1), padding='SAME')
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(0.))
self.assertAllClose([[(1. + 4.) / 2.]], new_cov)
class ConvOutputKroneckerFactorTest(ConvFactorTestCase):
def test3DConvolution(self):
with tf_ops.Graph().as_default():
batch_size = 1
width = 3
out_channels = width**3
factor = ff.ConvOutputKroneckerFactor(outputs_grads=[
random_ops.random_uniform(
(batch_size, width, width, width, out_channels), seed=0)
])
factor.instantiate_cov_variables()
with self.test_session() as sess:
sess.run(tf_variables.global_variables_initializer())
sess.run(factor.make_covariance_update_op(0.0))
cov = sess.run(factor.get_cov())
# Cov should be rank 3^3, as each spatial position donates a rank-1
# update.
self.assertMatrixRank(width**3, cov)
def testConvOutputKroneckerFactorInit(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3, 4, 5), name='a/b/c')
factor = ff.ConvOutputKroneckerFactor((tensor,))
factor.instantiate_cov_variables()
self.assertEqual([5, 5], factor.get_cov().get_shape().as_list())
def testConvOutputKroneckerFactorInitFloat64(self):
with tf_ops.Graph().as_default():
dtype = dtypes.float64_ref
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3, 4, 5), dtype=dtype, name='a/b/c')
factor = ff.ConvOutputKroneckerFactor((tensor,))
factor.instantiate_cov_variables()
cov = factor.get_cov()
self.assertEqual(cov.dtype, dtype)
self.assertEqual([5, 5], cov.get_shape().as_list())
def testMakeCovarianceUpdateOp(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = np.arange(1, 17).reshape(2, 2, 2, 2).astype(np.float32)
factor = ff.ConvOutputKroneckerFactor((array_ops.constant(tensor),))
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[43, 46.5], [46.5, 51.5]], new_cov)
class FullyConnectedMultiKFTest(test.TestCase):
def testFullyConnectedMultiKFInit(self):
with tf_ops.Graph().as_default():
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), name='a/b/c')
factor = ff.FullyConnectedMultiKF((tensor,), has_bias=False)
factor.instantiate_cov_variables()
self.assertEqual([3, 3], factor.get_cov().get_shape().as_list())
def testFullyConnectedMultiKFInitFloat64(self):
with tf_ops.Graph().as_default():
dtype = dtypes.float64_ref
random_seed.set_random_seed(200)
tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c')
factor = ff.FullyConnectedMultiKF((tensor,), has_bias=False)
factor.instantiate_cov_variables()
cov = factor.get_cov()
self.assertEqual(cov.dtype, dtype)
self.assertEqual([3, 3], cov.get_shape().as_list())
def testMakeCovarianceUpdateOpWithBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c')
factor = ff.FullyConnectedMultiKF((tensor,), has_bias=True)
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[3, 3.5, 1], [3.5, 5.5, 1.5], [1, 1.5, 1]], new_cov)
def testMakeCovarianceUpdateOpNoBias(self):
with tf_ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c')
factor = ff.FullyConnectedMultiKF((tensor,))
factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
new_cov = sess.run(factor.make_covariance_update_op(.5))
self.assertAllClose([[3, 3.5], [3.5, 5.5]], new_cov)
if __name__ == '__main__':
test.main()
| [
"numpy.linalg.eigvals",
"tensorflow.python.ops.array_ops.constant",
"numpy.sum",
"numpy.random.seed",
"tensorflow.python.framework.random_seed.set_random_seed",
"numpy.ones",
"tensorflow.python.framework.constant_op.constant",
"numpy.arange",
"tensorflow.contrib.kfac.python.ops.fisher_factors.NaiveD... | [((1563, 1606), 'tensorflow.contrib.kfac.python.ops.fisher_blocks._package_func', 'fb._package_func', (['(lambda : damping)', 'damping'], {}), '(lambda : damping, damping)\n', (1579, 1606), True, 'from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb\n'), ((33008, 33019), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (33017, 33019), False, 'from tensorflow.python.platform import test\n'), ((4377, 4422), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float64', '(3, 1)'], {}), '(dtypes.float64, (3, 1))\n', (4398, 4422), False, 'from tensorflow.python.ops import array_ops\n'), ((4490, 4511), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['w', 'x'], {}), '(w, x)\n', (4505, 4511), False, 'from tensorflow.python.ops import math_ops\n'), ((21842, 21867), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['matrix'], {}), '(matrix)\n', (21859, 21867), True, 'import numpy as np\n'), ((21886, 21908), 'numpy.sum', 'np.sum', (['(eigvals > atol)'], {}), '(eigvals > atol)\n', (21892, 21908), True, 'import numpy as np\n'), ((3338, 3349), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (3346, 3349), True, 'import numpy.random as npr\n'), ((3356, 3388), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (3383, 3388), False, 'from tensorflow.python.framework import random_seed\n'), ((3400, 3417), 'numpy.random.randn', 'npr.randn', (['(100)', '(3)'], {}), '(100, 3)\n', (3409, 3417), True, 'import numpy.random as npr\n'), ((3700, 3711), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (3708, 3711), True, 'import numpy.random as npr\n'), ((3718, 3750), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (3745, 3750), False, 'from tensorflow.python.framework import random_seed\n'), ((3785, 3802), 'numpy.random.randn', 'npr.randn', (['(100)', '(3)'], {}), '(100, 3)\n', (3794, 3802), True, 'import numpy.random as npr\n'), ((4072, 4083), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (4080, 4083), True, 'import numpy.random as npr\n'), ((4113, 4128), 'numpy.random.randn', 'npr.randn', (['m', 'n'], {}), '(m, n)\n', (4122, 4128), True, 'import numpy.random as npr\n'), ((4520, 4550), 'tensorflow.python.ops.gradients_impl.gradients', 'gradients_impl.gradients', (['y', 'x'], {}), '(y, x)\n', (4544, 4550), False, 'from tensorflow.python.ops import gradients_impl\n'), ((4708, 4738), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_params', 'ff.scope_string_from_params', (['g'], {}), '(g)\n', (4735, 4738), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((4913, 4934), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(1)'], {}), '(1)\n', (4931, 4934), False, 'from tensorflow.python.ops import array_ops\n'), ((4946, 4967), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(2)'], {}), '(2)\n', (4964, 4967), False, 'from tensorflow.python.ops import array_ops\n'), ((4990, 5025), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_params', 'ff.scope_string_from_params', (['(x, y)'], {}), '((x, y))\n', (5017, 5025), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((5181, 5202), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(1)'], {}), '(1)\n', (5199, 5202), False, 'from tensorflow.python.ops import array_ops\n'), ((5214, 5235), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(2)'], {}), '(2)\n', (5232, 5235), False, 'from tensorflow.python.ops import array_ops\n'), ((5258, 5322), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_params', 'ff.scope_string_from_params', (["[[1, 2, 3], 'foo', True, 4, (x, y)]"], {}), "([[1, 2, 3], 'foo', True, 4, (x, y)])\n", (5285, 5322), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((5548, 5569), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(1)'], {}), '(1)\n', (5566, 5569), False, 'from tensorflow.python.ops import array_ops\n'), ((5581, 5602), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['(2)'], {}), '(2)\n', (5599, 5602), False, 'from tensorflow.python.ops import array_ops\n'), ((5949, 5977), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_name', 'ff.scope_string_from_name', (['g'], {}), '(g)\n', (5974, 5977), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((6248, 6276), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_name', 'ff.scope_string_from_name', (['g'], {}), '(g)\n', (6273, 6276), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((6472, 6504), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (6499, 6504), False, 'from tensorflow.python.framework import random_seed\n'), ((6748, 6780), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (6775, 6780), False, 'from tensorflow.python.framework import random_seed\n'), ((7545, 7619), 'tensorflow.python.framework.ops.get_collection', 'tf_ops.get_collection', (['tf_ops.GraphKeys.GLOBAL_VARIABLES', 'factor_var_scope'], {}), '(tf_ops.GraphKeys.GLOBAL_VARIABLES, factor_var_scope)\n', (7566, 7619), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((7902, 7934), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (7929, 7934), False, 'from tensorflow.python.framework import random_seed\n'), ((8412, 8486), 'tensorflow.python.framework.ops.get_collection', 'tf_ops.get_collection', (['tf_ops.GraphKeys.GLOBAL_VARIABLES', 'factor_var_scope'], {}), '(tf_ops.GraphKeys.GLOBAL_VARIABLES, factor_var_scope)\n', (8433, 8486), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((8907, 8939), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (8934, 8939), False, 'from tensorflow.python.framework import random_seed\n'), ((9183, 9215), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (9210, 9215), False, 'from tensorflow.python.framework import random_seed\n'), ((9228, 9262), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (9236, 9262), True, 'import numpy as np\n'), ((9340, 9385), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['cov'], {'dtype': 'dtypes.float32'}), '(cov, dtype=dtypes.float32)\n', (9358, 9385), False, 'from tensorflow.python.ops import array_ops\n'), ((10483, 10515), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (10510, 10515), False, 'from tensorflow.python.framework import random_seed\n'), ((10528, 10562), 'numpy.array', 'np.array', (['[[6.0, 2.0], [2.0, 4.0]]'], {}), '([[6.0, 2.0], [2.0, 4.0]])\n', (10536, 10562), True, 'import numpy as np\n'), ((10640, 10685), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['cov'], {'dtype': 'dtypes.float32'}), '(cov, dtype=dtypes.float32)\n', (10658, 10685), False, 'from tensorflow.python.ops import array_ops\n'), ((11405, 11437), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (11432, 11437), False, 'from tensorflow.python.framework import random_seed\n'), ((11450, 11484), 'numpy.array', 'np.array', (['[[5.0, 2.0], [2.0, 4.0]]'], {}), '([[5.0, 2.0], [2.0, 4.0]])\n', (11458, 11484), True, 'import numpy as np\n'), ((11597, 11642), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['cov'], {'dtype': 'dtypes.float32'}), '(cov, dtype=dtypes.float32)\n', (11615, 11642), False, 'from tensorflow.python.ops import array_ops\n'), ((12408, 12440), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (12435, 12440), False, 'from tensorflow.python.framework import random_seed\n'), ((12456, 12492), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'name': '"""a/b/c"""'}), "((2, 3), name='a/b/c')\n", (12470, 12492), False, 'from tensorflow.python.ops import array_ops\n'), ((12508, 12536), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullFactor', 'ff.FullFactor', (['(tensor,)', '(32)'], {}), '((tensor,), 32)\n', (12521, 12536), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((12766, 12798), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (12793, 12798), False, 'from tensorflow.python.framework import random_seed\n'), ((12814, 12863), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'dtype': 'dtype', 'name': '"""a/b/c"""'}), "((2, 3), dtype=dtype, name='a/b/c')\n", (12828, 12863), False, 'from tensorflow.python.ops import array_ops\n'), ((12879, 12907), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullFactor', 'ff.FullFactor', (['(tensor,)', '(32)'], {}), '((tensor,), 32)\n', (12892, 12907), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((13191, 13223), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (13218, 13223), False, 'from tensorflow.python.framework import random_seed\n'), ((13239, 13283), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0, 2.0]'], {'name': '"""a/b/c"""'}), "([1.0, 2.0], name='a/b/c')\n", (13257, 13283), False, 'from tensorflow.python.ops import array_ops\n'), ((13297, 13324), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullFactor', 'ff.FullFactor', (['(tensor,)', '(2)'], {}), '((tensor,), 2)\n', (13310, 13324), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((13686, 13718), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (13713, 13718), False, 'from tensorflow.python.framework import random_seed\n'), ((13734, 13770), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'name': '"""a/b/c"""'}), "((2, 3), name='a/b/c')\n", (13748, 13770), False, 'from tensorflow.python.ops import array_ops\n'), ((13786, 13823), 'tensorflow.contrib.kfac.python.ops.fisher_factors.NaiveDiagonalFactor', 'ff.NaiveDiagonalFactor', (['(tensor,)', '(32)'], {}), '((tensor,), 32)\n', (13808, 13823), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((14066, 14098), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (14093, 14098), False, 'from tensorflow.python.framework import random_seed\n'), ((14114, 14163), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'dtype': 'dtype', 'name': '"""a/b/c"""'}), "((2, 3), dtype=dtype, name='a/b/c')\n", (14128, 14163), False, 'from tensorflow.python.ops import array_ops\n'), ((14179, 14216), 'tensorflow.contrib.kfac.python.ops.fisher_factors.NaiveDiagonalFactor', 'ff.NaiveDiagonalFactor', (['(tensor,)', '(32)'], {}), '((tensor,), 32)\n', (14201, 14216), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((14504, 14536), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (14531, 14536), False, 'from tensorflow.python.framework import random_seed\n'), ((14552, 14596), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[1.0, 2.0]'], {'name': '"""a/b/c"""'}), "([1.0, 2.0], name='a/b/c')\n", (14570, 14596), False, 'from tensorflow.python.ops import array_ops\n'), ((14610, 14646), 'tensorflow.contrib.kfac.python.ops.fisher_factors.NaiveDiagonalFactor', 'ff.NaiveDiagonalFactor', (['(tensor,)', '(2)'], {}), '((tensor,), 2)\n', (14632, 14646), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((15011, 15046), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[0], [1], [4]]'], {}), '([[0], [1], [4]])\n', (15029, 15046), False, 'from tensorflow.python.ops import array_ops\n'), ((15083, 15138), 'tensorflow.contrib.kfac.python.ops.fisher_factors.EmbeddingInputKroneckerFactor', 'ff.EmbeddingInputKroneckerFactor', (['input_ids', 'vocab_size'], {}), '(input_ids, vocab_size)\n', (15115, 15138), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((15364, 15399), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[0], [1], [4]]'], {}), '([[0], [1], [4]])\n', (15382, 15399), False, 'from tensorflow.python.ops import array_ops\n'), ((15436, 15491), 'tensorflow.contrib.kfac.python.ops.fisher_factors.EmbeddingInputKroneckerFactor', 'ff.EmbeddingInputKroneckerFactor', (['input_ids', 'vocab_size'], {}), '(input_ids, vocab_size)\n', (15468, 15491), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((16323, 16415), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['[self.batch_size, self.height, self.width, self.in_channels]'], {}), '([self.batch_size, self.height, self.width, self.\n in_channels])\n', (16348, 16415), False, 'from tensorflow.python.ops import random_ops\n'), ((16665, 16791), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvDiagonalFactor', 'ff.ConvDiagonalFactor', (['inputs', 'outputs_grads', 'self.kernel_shape', 'self.strides', 'self.padding'], {'data_format': 'self.data_format'}), '(inputs, outputs_grads, self.kernel_shape, self.\n strides, self.padding, data_format=self.data_format)\n', (16686, 16791), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((17343, 17418), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.kernel_height', 'self.kernel_width', 'self.in_channels'], {}), '(1, self.kernel_height, self.kernel_width, self.in_channels)\n', (17358, 17418), True, 'import numpy as np\n'), ((17510, 17553), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(1)', 'self.out_channels'], {}), '(1, 1, 1, self.out_channels)\n', (17525, 17553), True, 'import numpy as np\n'), ((18458, 18550), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['[self.batch_size, self.height, self.width, self.in_channels]'], {}), '([self.batch_size, self.height, self.width, self.\n in_channels])\n', (18483, 18550), False, 'from tensorflow.python.ops import random_ops\n'), ((18800, 18941), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvDiagonalFactor', 'ff.ConvDiagonalFactor', (['inputs', 'outputs_grads', 'self.kernel_shape', 'self.strides', 'self.padding'], {'data_format': 'self.data_format', 'has_bias': '(True)'}), '(inputs, outputs_grads, self.kernel_shape, self.\n strides, self.padding, data_format=self.data_format, has_bias=True)\n', (18821, 18941), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((19860, 19892), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (19887, 19892), False, 'from tensorflow.python.framework import random_seed\n'), ((19908, 19957), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'dtype': 'dtype', 'name': '"""a/b/c"""'}), "((2, 3), dtype=dtype, name='a/b/c')\n", (19922, 19957), False, 'from tensorflow.python.ops import array_ops\n'), ((19973, 20035), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedKroneckerFactor', 'ff.FullyConnectedKroneckerFactor', (['(tensor,)'], {'has_bias': 'has_bias'}), '((tensor,), has_bias=has_bias)\n', (20005, 20035), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((20723, 20755), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (20750, 20755), False, 'from tensorflow.python.framework import random_seed\n'), ((20771, 20829), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[1.0, 2.0], [3.0, 4.0]]'], {'name': '"""a/b/c"""'}), "([[1.0, 2.0], [3.0, 4.0]], name='a/b/c')\n", (20789, 20829), False, 'from tensorflow.python.ops import array_ops\n'), ((20841, 20899), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedKroneckerFactor', 'ff.FullyConnectedKroneckerFactor', (['(tensor,)'], {'has_bias': '(True)'}), '((tensor,), has_bias=True)\n', (20873, 20899), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((21265, 21297), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (21292, 21297), False, 'from tensorflow.python.framework import random_seed\n'), ((21313, 21371), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[1.0, 2.0], [3.0, 4.0]]'], {'name': '"""a/b/c"""'}), "([[1.0, 2.0], [3.0, 4.0]], name='a/b/c')\n", (21331, 21371), False, 'from tensorflow.python.ops import array_ops\n'), ((21383, 21426), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedKroneckerFactor', 'ff.FullyConnectedKroneckerFactor', (['(tensor,)'], {}), '((tensor,))\n', (21415, 21426), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((26323, 26366), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(64, 1, 2, 3)'], {'name': '"""a/b/c"""'}), "((64, 1, 2, 3), name='a/b/c')\n", (26337, 26366), False, 'from tensorflow.python.ops import array_ops\n'), ((26382, 26487), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvInputKroneckerFactor', 'ff.ConvInputKroneckerFactor', ([], {'inputs': 'tensor', 'filter_shape': '(1, 2, 3, 4)', 'padding': '"""SAME"""', 'has_bias': '(False)'}), "(inputs=tensor, filter_shape=(1, 2, 3, 4),\n padding='SAME', has_bias=False)\n", (26409, 26487), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((26776, 26819), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(64, 1, 2, 3)'], {'name': '"""a/b/c"""'}), "((64, 1, 2, 3), name='a/b/c')\n", (26790, 26819), False, 'from tensorflow.python.ops import array_ops\n'), ((26835, 26933), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvInputKroneckerFactor', 'ff.ConvInputKroneckerFactor', (['tensor'], {'filter_shape': '(1, 2, 3, 4)', 'padding': '"""SAME"""', 'has_bias': '(True)'}), "(tensor, filter_shape=(1, 2, 3, 4), padding=\n 'SAME', has_bias=True)\n", (26862, 26933), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((27239, 27304), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(64, 1, 2, 3)'], {'name': '"""a/b/c"""', 'dtype': 'dtypes.float64'}), "((64, 1, 2, 3), name='a/b/c', dtype=dtypes.float64)\n", (27253, 27304), False, 'from tensorflow.python.ops import array_ops\n'), ((27320, 27418), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvInputKroneckerFactor', 'ff.ConvInputKroneckerFactor', (['tensor'], {'filter_shape': '(1, 2, 3, 4)', 'padding': '"""SAME"""', 'has_bias': '(True)'}), "(tensor, filter_shape=(1, 2, 3, 4), padding=\n 'SAME', has_bias=True)\n", (27347, 27418), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((27945, 28043), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvInputKroneckerFactor', 'ff.ConvInputKroneckerFactor', (['tensor'], {'filter_shape': '(1, 1, 1, 1)', 'padding': '"""SAME"""', 'has_bias': '(True)'}), "(tensor, filter_shape=(1, 1, 1, 1), padding=\n 'SAME', has_bias=True)\n", (27972, 28043), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((28689, 28767), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvInputKroneckerFactor', 'ff.ConvInputKroneckerFactor', (['tensor'], {'filter_shape': '(1, 1, 1, 1)', 'padding': '"""SAME"""'}), "(tensor, filter_shape=(1, 1, 1, 1), padding='SAME')\n", (28716, 28767), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((29840, 29872), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (29867, 29872), False, 'from tensorflow.python.framework import random_seed\n'), ((29888, 29930), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3, 4, 5)'], {'name': '"""a/b/c"""'}), "((2, 3, 4, 5), name='a/b/c')\n", (29902, 29930), False, 'from tensorflow.python.ops import array_ops\n'), ((29946, 29985), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvOutputKroneckerFactor', 'ff.ConvOutputKroneckerFactor', (['(tensor,)'], {}), '((tensor,))\n', (29974, 29985), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((30230, 30262), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (30257, 30262), False, 'from tensorflow.python.framework import random_seed\n'), ((30278, 30333), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3, 4, 5)'], {'dtype': 'dtype', 'name': '"""a/b/c"""'}), "((2, 3, 4, 5), dtype=dtype, name='a/b/c')\n", (30292, 30333), False, 'from tensorflow.python.ops import array_ops\n'), ((30349, 30388), 'tensorflow.contrib.kfac.python.ops.fisher_factors.ConvOutputKroneckerFactor', 'ff.ConvOutputKroneckerFactor', (['(tensor,)'], {}), '((tensor,))\n', (30377, 30388), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((30672, 30704), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (30699, 30704), False, 'from tensorflow.python.framework import random_seed\n'), ((31217, 31249), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (31244, 31249), False, 'from tensorflow.python.framework import random_seed\n'), ((31265, 31301), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'name': '"""a/b/c"""'}), "((2, 3), name='a/b/c')\n", (31279, 31301), False, 'from tensorflow.python.ops import array_ops\n'), ((31317, 31368), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedMultiKF', 'ff.FullyConnectedMultiKF', (['(tensor,)'], {'has_bias': '(False)'}), '((tensor,), has_bias=False)\n', (31341, 31368), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((31609, 31641), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (31636, 31641), False, 'from tensorflow.python.framework import random_seed\n'), ((31657, 31706), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(2, 3)'], {'dtype': 'dtype', 'name': '"""a/b/c"""'}), "((2, 3), dtype=dtype, name='a/b/c')\n", (31671, 31706), False, 'from tensorflow.python.ops import array_ops\n'), ((31722, 31773), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedMultiKF', 'ff.FullyConnectedMultiKF', (['(tensor,)'], {'has_bias': '(False)'}), '((tensor,), has_bias=False)\n', (31746, 31773), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((32065, 32097), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (32092, 32097), False, 'from tensorflow.python.framework import random_seed\n'), ((32113, 32171), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[1.0, 2.0], [3.0, 4.0]]'], {'name': '"""a/b/c"""'}), "([[1.0, 2.0], [3.0, 4.0]], name='a/b/c')\n", (32131, 32171), False, 'from tensorflow.python.ops import array_ops\n'), ((32183, 32233), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedMultiKF', 'ff.FullyConnectedMultiKF', (['(tensor,)'], {'has_bias': '(True)'}), '((tensor,), has_bias=True)\n', (32207, 32233), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((32599, 32631), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(200)'], {}), '(200)\n', (32626, 32631), False, 'from tensorflow.python.framework import random_seed\n'), ((32647, 32705), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['[[1.0, 2.0], [3.0, 4.0]]'], {'name': '"""a/b/c"""'}), "([[1.0, 2.0], [3.0, 4.0]], name='a/b/c')\n", (32665, 32705), False, 'from tensorflow.python.ops import array_ops\n'), ((32717, 32752), 'tensorflow.contrib.kfac.python.ops.fisher_factors.FullyConnectedMultiKF', 'ff.FullyConnectedMultiKF', (['(tensor,)'], {}), '((tensor,))\n', (32741, 32752), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((3445, 3466), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['x'], {}), '(x)\n', (3463, 3466), False, 'from tensorflow.python.ops import array_ops\n'), ((3483, 3497), 'numpy.dot', 'np.dot', (['x.T', 'x'], {}), '(x.T, x)\n', (3489, 3497), True, 'import numpy as np\n'), ((3830, 3851), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['x'], {}), '(x)\n', (3848, 3851), False, 'from tensorflow.python.ops import array_ops\n'), ((3891, 3905), 'numpy.dot', 'np.dot', (['x.T', 'x'], {}), '(x.T, x)\n', (3897, 3905), True, 'import numpy as np\n'), ((4161, 4182), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['a'], {}), '(a)\n', (4179, 4182), False, 'from tensorflow.python.ops import array_ops\n'), ((5707, 5784), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scope_string_from_params', 'ff.scope_string_from_params', (["[[1, 2, 3], 'foo', True, 4, (x, y), unsupported]"], {}), "([[1, 2, 3], 'foo', True, 4, (x, y), unsupported])\n", (5734, 5784), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((6151, 6185), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scalar_or_tensor_to_string', 'ff.scalar_or_tensor_to_string', (['(5.0)'], {}), '(5.0)\n', (6180, 6185), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((6300, 6332), 'tensorflow.contrib.kfac.python.ops.fisher_factors.scalar_or_tensor_to_string', 'ff.scalar_or_tensor_to_string', (['g'], {}), '(g)\n', (6329, 6332), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((9785, 9828), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (9826, 9828), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((11022, 11065), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (11063, 11065), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((11869, 11912), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (11910, 11912), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((12272, 12290), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (12285, 12290), True, 'import numpy as np\n'), ((13382, 13425), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (13423, 13425), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((14704, 14747), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (14745, 14747), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((16456, 16586), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['[self.batch_size, self.height // self.strides[1], self.width // self.\n strides[2], self.out_channels]'], {}), '([self.batch_size, self.height // self.strides[1],\n self.width // self.strides[2], self.out_channels])\n', (16481, 16586), False, 'from tensorflow.python.ops import random_ops\n'), ((17691, 17719), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['inputs'], {}), '(inputs)\n', (17711, 17719), False, 'from tensorflow.python.framework import constant_op\n'), ((18591, 18721), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['[self.batch_size, self.height // self.strides[1], self.width // self.\n strides[2], self.out_channels]'], {}), '([self.batch_size, self.height // self.strides[1],\n self.width // self.strides[2], self.out_channels])\n', (18616, 18721), False, 'from tensorflow.python.ops import random_ops\n'), ((20957, 21000), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (20998, 21000), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((21484, 21527), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (21525, 21527), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((28107, 28150), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (28148, 28150), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((28836, 28879), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (28877, 28879), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((30908, 30951), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (30949, 30951), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((32291, 32334), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (32332, 32334), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((32810, 32853), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (32851, 32853), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((3274, 3288), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (3286, 3288), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((3636, 3650), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (3648, 3650), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((4008, 4022), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (4020, 4022), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((4216, 4231), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (4223, 4231), True, 'import numpy as np\n'), ((4450, 4468), 'numpy.random.RandomState', 'npr.RandomState', (['(0)'], {}), '(0)\n', (4465, 4468), True, 'import numpy.random as npr\n'), ((4628, 4642), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (4640, 4642), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((4874, 4888), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (4886, 4888), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((5142, 5156), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (5154, 5156), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((5509, 5523), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (5521, 5523), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((5869, 5883), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (5881, 5883), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((6099, 6113), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (6111, 6113), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((6437, 6451), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (6449, 6451), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((6713, 6727), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (6725, 6727), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((7867, 7881), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (7879, 7881), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((8872, 8886), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (8884, 8886), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((9119, 9133), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (9131, 9133), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((10419, 10433), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (10431, 10433), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((11341, 11355), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (11353, 11355), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((12096, 12145), 'tensorflow.contrib.kfac.python.ops.fisher_factors.inverse_initializer', 'ff.inverse_initializer', (['cov.shape', 'dtypes.float32'], {}), '(cov.shape, dtypes.float32)\n', (12118, 12145), True, 'from tensorflow.contrib.kfac.python.ops import fisher_factors as ff\n'), ((12373, 12387), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (12385, 12387), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((12698, 12712), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (12710, 12712), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((13127, 13141), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (13139, 13141), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((13651, 13665), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (13663, 13665), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((13998, 14012), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (14010, 14012), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((14440, 14454), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (14452, 14454), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((14964, 14978), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (14976, 14978), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((15317, 15331), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (15329, 15331), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((15651, 15694), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (15692, 15694), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((16279, 16293), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (16291, 16293), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((17186, 17200), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (17198, 17200), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((17722, 17756), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['outputs_grad'], {}), '(outputs_grad)\n', (17742, 17756), False, 'from tensorflow.python.framework import constant_op\n'), ((18172, 18215), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (18213, 18215), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((18414, 18428), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (18426, 18428), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((19444, 19487), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (19485, 19487), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((19825, 19839), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (19837, 19839), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((20659, 20673), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (20671, 20673), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((21201, 21215), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (21213, 21215), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((22176, 22190), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (22188, 22190), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((22352, 22437), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(batch_size, width, width, width, in_channels)'], {'seed': '(0)'}), '((batch_size, width, width, width, in_channels),\n seed=0)\n', (22377, 22437), False, 'from tensorflow.python.ops import random_ops\n'), ((23027, 23070), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (23068, 23070), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((23353, 23367), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (23365, 23367), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((23529, 23603), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(batch_size, width, width, in_channels)'], {'seed': '(0)'}), '((batch_size, width, width, in_channels), seed=0)\n', (23554, 23603), False, 'from tensorflow.python.ops import random_ops\n'), ((24148, 24191), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (24189, 24191), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((24444, 24458), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (24456, 24458), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((24620, 24694), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(batch_size, width, width, in_channels)'], {'seed': '(0)'}), '((batch_size, width, width, in_channels), seed=0)\n', (24645, 24694), False, 'from tensorflow.python.ops import random_ops\n'), ((25006, 25049), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (25047, 25049), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((25288, 25302), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (25300, 25302), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((25461, 25535), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(batch_size, width, width, in_channels)'], {'seed': '(0)'}), '((batch_size, width, width, in_channels), seed=0)\n', (25486, 25535), False, 'from tensorflow.python.ops import random_ops\n'), ((25893, 25936), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (25934, 25936), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((26279, 26293), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (26291, 26293), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((26732, 26746), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (26744, 26746), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((27162, 27176), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (27174, 27176), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((27699, 27713), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (27711, 27713), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((28443, 28457), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (28455, 28457), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((29099, 29113), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (29111, 29113), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((29472, 29515), 'tensorflow.python.ops.variables.global_variables_initializer', 'tf_variables.global_variables_initializer', ([], {}), '()\n', (29513, 29515), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((29805, 29819), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (29817, 29819), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((30162, 30176), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (30174, 30176), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((30608, 30622), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (30620, 30622), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((30821, 30847), 'tensorflow.python.ops.array_ops.constant', 'array_ops.constant', (['tensor'], {}), '(tensor)\n', (30839, 30847), False, 'from tensorflow.python.ops import array_ops\n'), ((31182, 31196), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (31194, 31196), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((31541, 31555), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (31553, 31555), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((32001, 32015), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (32013, 32015), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((32535, 32549), 'tensorflow.python.framework.ops.Graph', 'tf_ops.Graph', ([], {}), '()\n', (32547, 32549), True, 'from tensorflow.python.framework import ops as tf_ops\n'), ((11205, 11214), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11211, 11214), True, 'import numpy as np\n'), ((15766, 15801), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.0, 0.0, 1.0]'], {}), '([1.0, 1.0, 0.0, 0.0, 1.0])\n', (15774, 15801), True, 'import numpy as np\n'), ((29266, 29352), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(batch_size, width, width, width, out_channels)'], {'seed': '(0)'}), '((batch_size, width, width, width, out_channels),\n seed=0)\n', (29291, 29352), False, 'from tensorflow.python.ops import random_ops\n'), ((30720, 30736), 'numpy.arange', 'np.arange', (['(1)', '(17)'], {}), '(1, 17)\n', (30729, 30736), True, 'import numpy as np\n'), ((27852, 27872), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (27859, 27872), True, 'import numpy as np\n'), ((28596, 28616), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (28603, 28616), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import pandas
import os
directory = r'/root/PycharmProjects/earth-rover/'
names = ['spectrometer', 'odometery',]
for filename in os.listdir(directory):
print(filename)
if filename.startswith("spectrometer") and filename.endswith(".csv"):
print(filename)
er_df_spec = pandas.read_csv(filename)
fig = plt.figure()
spec_time_vec = er_df_spec['%time']
spec_wavelengths_vec = er_df_spec[list(map(lambda x: ('field.wavelength%s' % x), range(2048)))]
spec_intensities_vec = er_df_spec[list(map(lambda x: ('field.intensities%s' % x), range(2048)))]
top = cm.get_cmap('jet', 128)
bottom = cm.get_cmap('gray', 128)
newcolors = np.vstack((top(np.linspace(0, 1, 128)),
bottom(np.linspace(0, 1, 128))))
newcmp = ListedColormap(newcolors, name='OrangeBlue')
spec_wavelengths_vec_np = spec_wavelengths_vec.to_numpy()
X = spec_wavelengths_vec_np
ax = fig.add_subplot(111, projection='3d')
x = spec_wavelengths_vec_np[0,:]
y = range(spec_wavelengths_vec_np.shape[0])
X,Y = np.meshgrid(x,y)
ax.plot_surface(X, Y, spec_intensities_vec, facecolors=newcmp((X - X.min()) / (X.max() - X.min())), alpha=0.7, linewidth=0, antialiased=False, shade=False)
plt.xlabel('wavelength (nm)')
plt.ylabel('sample number')
ax.set_zlabel('irradiance (uncalibrated)')
plt.show()
print('done!') | [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.cm.get_cmap",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.colors.ListedColormap",
"os.listdir"
] | [((318, 339), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (328, 339), False, 'import os\n'), ((481, 506), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {}), '(filename)\n', (496, 506), False, 'import pandas\n'), ((521, 533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (531, 533), True, 'import matplotlib.pyplot as plt\n'), ((803, 826), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""', '(128)'], {}), "('jet', 128)\n", (814, 826), False, 'from matplotlib import cm\n'), ((844, 868), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""gray"""', '(128)'], {}), "('gray', 128)\n", (855, 868), False, 'from matplotlib import cm\n'), ((1010, 1054), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['newcolors'], {'name': '"""OrangeBlue"""'}), "(newcolors, name='OrangeBlue')\n", (1024, 1054), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap\n'), ((1316, 1333), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1327, 1333), True, 'import numpy as np\n'), ((1506, 1535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavelength (nm)"""'], {}), "('wavelength (nm)')\n", (1516, 1535), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1571), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sample number"""'], {}), "('sample number')\n", (1554, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((904, 926), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(128)'], {}), '(0, 1, 128)\n', (915, 926), True, 'import numpy as np\n'), ((967, 989), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(128)'], {}), '(0, 1, 128)\n', (978, 989), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.