code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import plotly.graph_objs as go
import pandas as pd
import os
from ipywidgets import (Tab, SelectMultiple, Accordion, ToggleButton,
VBox, HBox, HTML, Image, Button, Text, Dropdown)
from ipywidgets import HBox, VBox, Image, Layout, HTML
import numpy as np
from scipy.spatial import ConvexHull
from sklearn.cluster import KMeans, DBSCAN, OPTICS
import json
class Figure1:
def __init__(self, csv_file_path, base_path='.',
inchi_key='<KEY>',
clustering=None):
self.organic_inchi_col = '_raw_organic_0_inchikey'
self.org_conc_col = '_rxn_molarity_organic'
self.inorg_conc_col = '_rxn_molarity_inorganic'
self.acid_conc_col = '_rxn_molarity_acid'
self.exp_name_col = 'name'
self.table_cols = ['_rxn_mixingtime_s', '_rxn_mixingtime2_s'
'_rxn_reactiontime_s', '_rxn_stirrate_rpm']
self.table_col_names = ['Mixing Time Stage 1 (s)', 'Mixing Time Stage 2 (s)',
'Reaction Time (s)', 'Reaction Stirrate (rpm)']
self.current_amine_inchi = inchi_key
self.base_path = base_path
self.clustering = clustering
self.full_perovskite_data = pd.read_csv(
csv_file_path, low_memory=False)
self.inchis = pd.read_csv('./data/inventory.csv')
self.inchi_dict = dict(zip(self.inchis['Chemical Name'],
self.inchis['InChI Key (ID)']))
self.chem_dict = dict(zip(self.inchis['InChI Key (ID)'],
self.inchis['Chemical Name']))
#self.state_spaces = pd.read_csv('./perovskitedata/state_spaces.csv')
self.ss_dict = json.load(open('./data/s_spaces.json', 'r'))
self.generate_plot(self.current_amine_inchi)
self.setup_widgets()
def generate_plot(self, inchi_key):
if inchi_key in self.ss_dict:
self.setup_hull(hull_points=self.ss_dict[inchi_key])
else:
self.setup_hull()
self.gen_amine_traces(inchi_key)
self.setup_plot(yaxis_label=self.chem_dict[inchi_key]+' (M)')
def setup_hull(self, hull_points=[[0., 0., 0.]]):
xp, yp, zp = zip(*hull_points)
self.hull_mesh = go.Mesh3d(x=xp,
y=yp,
z=zp,
color='green',
opacity=0.50,
alphahull=0)
def setup_success_hull(self, success_hull, success_points):
if success_hull:
xp, yp, zp = zip(*success_points[success_hull.vertices])
self.success_hull_plot = go.Mesh3d(x=xp,
y=yp,
z=zp,
color='red',
opacity=0.50,
alphahull=0)
else:
self.success_hull_plot = go.Mesh3d(x=[0],
y=[0],
z=[0],
color='red',
opacity=0.50,
alphahull=0)
def gen_amine_traces(self, inchi_key, amine_short_name='Me2NH2I'):
amine_data = self.full_perovskite_data.loc[
self.full_perovskite_data[self.organic_inchi_col]
== inchi_key]
success_hull = None
#print(f'Total points: {len(amine_data)}')
# print(self.ss_dict.keys())
if inchi_key in self.ss_dict:
xp, yp, zp = zip(*self.ss_dict[inchi_key])
else:
xp, yp, zp = [0], [0], [0]
self.max_inorg = max([amine_data[self.inorg_conc_col].max(), max(xp)])
self.max_org = max([amine_data[self.org_conc_col].max(), max(yp)])
self.max_acid = max([amine_data[self.acid_conc_col].max(), max(zp)])
# Splitting by crystal scores. Assuming crystal scores from 1-4
self.amine_crystal_dfs = []
for i in range(1, 5):
self.amine_crystal_dfs.append(
amine_data.loc[amine_data['_out_crystalscore'] == i])
# print(len(self.amine_crystal_dfs[3]))
self.amine_crystal_traces = []
self.trace_colors = ['rgba(65, 118, 244, 1.0)',
'rgba(92, 244, 65, 1.0)',
'rgba(244, 238, 66, 1.0)',
'rgba(244, 66, 66, 1.0)']
for i, df in enumerate(self.amine_crystal_dfs):
trace = go.Scatter3d(
x=df[self.inorg_conc_col],
y=df[self.org_conc_col],
z=df[self.acid_conc_col],
mode='markers',
name='Score {}'.format(i+1),
text=["""<b>Inorganic</b>: {:.3f} <br><b>{}</b>: {:.3f}<br><b>Solvent</b>: {:.3f}""".format(
row[self.inorg_conc_col],
self.chem_dict[row[self.organic_inchi_col]],
row[self.org_conc_col],
row[self.acid_conc_col])
for idx, row in df.iterrows()],
hoverinfo='text',
marker=dict(
size=4,
color=self.trace_colors[i],
line=dict(
width=0.2
),
opacity=1.0
)
)
self.amine_crystal_traces.append(trace)
if i == 3:
success_points = np.dstack((df[self.inorg_conc_col],
df[self.org_conc_col],
df[self.acid_conc_col]))[0]
if len(success_points) > 3:
success_hull = ConvexHull(success_points)
else:
success_hull = None
self.setup_success_hull(success_hull, success_points)
self.data = self.amine_crystal_traces
self.data += [self.success_hull_plot]
self.data += [self.hull_mesh]
# if self.hull_mesh:
def setup_plot(self, xaxis_label='Lead Iodide [PbI3] (M)',
yaxis_label='Dimethylammonium Iodide<br>[Me2NH2I] (M)',
zaxis_label='Formic Acid [FAH] (M)'):
self.layout = go.Layout(
scene=dict(
xaxis=dict(
title=xaxis_label,
tickmode='linear',
dtick=0.5,
range=[0, self.max_inorg],
),
yaxis=dict(
title=yaxis_label,
tickmode='linear',
dtick=0.5,
range=[0, self.max_org],
),
zaxis=dict(
title=zaxis_label,
tickmode='linear',
dtick=1.0,
range=[0, self.max_acid],
),
),
legend=go.layout.Legend(
x=0,
y=1,
traceorder="normal",
font=dict(
family="sans-serif",
size=12,
color="black"
),
bgcolor="LightSteelBlue",
bordercolor="Black",
borderwidth=2
),
width=975,
height=700,
margin=go.layout.Margin(
l=20,
r=20,
b=20,
t=20,
pad=2
),
)
try:
with self.fig.batch_update():
for i, trace in enumerate(self.data):
self.fig.data[i].x = trace.x
self.fig.data[i].y = trace.y
self.fig.data[i].z = trace.z
self.fig.data[i].text = trace.text
self.fig.layout.update(self.layout)
except:
self.fig = go.FigureWidget(data=self.data, layout=self.layout)
for trace in self.fig.data[:-2]:
trace.on_click(self.show_data_3d_callback)
def setup_widgets(self, image_folder='data/images'):
image_folder = self.base_path + '/' + image_folder
#self.image_list = os.listdir(image_folder)
self.image_list = json.load(open('./data/image_list.json', 'r'))
self.image_list = self.image_list['image_list']
# Data Filter Setup
reset_plot = Button(
description='Reset',
disabled=False,
tooltip='Reset the colors of the plot'
)
xy_check = Button(
description='Show X-Y axes',
disabled=False,
button_style='',
tooltip='Click to show X-Y axes'
)
show_success_hull = ToggleButton(
value=True,
description='Show success hull',
disabled=False,
button_style='',
tooltip='Toggle to show/hide success hull',
icon='check'
)
show_hull_check = ToggleButton(
value=True,
description='Show State Space',
disabled=False,
button_style='',
tooltip='Toggle to show/hide state space',
icon='check'
)
unique_inchis = self.full_perovskite_data[self.organic_inchi_col].unique(
)
self.select_amine = Dropdown(
options=[row['Chemical Name'] for
i, row in self.inchis.iterrows()
if row['InChI Key (ID)'] in unique_inchis],
description='Amine:',
disabled=False,
)
reset_plot.on_click(self.reset_plot_callback)
xy_check.on_click(self.set_xy_camera)
show_success_hull.observe(self.toggle_success_mesh, 'value')
show_hull_check.observe(self.toggle_mesh, 'value')
self.select_amine.observe(self.select_amine_callback, 'value')
# Experiment data tab setup
self.experiment_table = HTML()
self.experiment_table.value = "Please click on a point"
"to explore experiment details"
#self.image_data = {}
with open("{}/{}".format(image_folder, 'not_found.png'), "rb") as f:
b = f.read()
image_data = b
self.image_widget = Image(
value=image_data,
layout=Layout(height='400px', width='650px')
)
experiment_view_vbox = VBox(
[HBox([self.experiment_table, self.image_widget])])
plot_tabs = Tab([VBox([self.fig,
HBox([self.select_amine]),
HBox([xy_check, show_success_hull,
show_hull_check,
reset_plot])]),
])
plot_tabs.set_title(0, 'Chemical Space')
self.full_widget = VBox([plot_tabs, experiment_view_vbox])
self.full_widget.layout.align_items = 'center'
def select_amine_callback(self, state):
new_amine_name = state['new']
new_amine_inchi = self.inchi_dict[new_amine_name]
amine_data = self.full_perovskite_data[
self.full_perovskite_data[self.organic_inchi_col] ==
new_amine_inchi]
self.current_amine_inchi = new_amine_inchi
self.generate_plot(self.current_amine_inchi)
self.reset_plot_callback(None)
def get_plate_options(self):
plates = set()
for df in self.amine_crystal_dfs:
for i, row in df.iterrows():
name = str(row['name'])
plate_name = '_'.join(name.split('_')[: -1])
plates.add(plate_name)
plate_options = []
for i, plate in enumerate(plates):
plate_options.append(plate)
return plate_options
def generate_table(self, row, columns, column_names):
table_html = """ <table border="1" style="width:100%;">
<tbody>"""
for i, column in enumerate(columns):
if isinstance(row[column], str):
value = row[column].split('_')[-1]
else:
value = np.round(row[column], decimals=3)
table_html += """
<tr>
<td style="padding: 8px;">{}</td>
<td style="padding: 8px;">{}</td>
</tr>
""".format(column_names[i], value)
table_html += """
</tbody>
</table>
"""
return table_html
def toggle_mesh(self, state):
with self.fig.batch_update():
self.fig.data[-1].visible = state.new
def toggle_success_mesh(self, state):
with self.fig.batch_update():
self.fig.data[-2].visible = state.new
def set_xy_camera(self, state):
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0.0, y=0.0, z=2.5)
)
self.fig['layout'].update(
scene=dict(camera=camera),
)
def reset_plot_callback(self, b):
with self.fig.batch_update():
for i in range(len(self.fig.data[:4])):
self.fig.data[i].marker.color = self.trace_colors[i]
self.fig.data[i].marker.size = 4
def show_data_3d_callback(self, trace, point, selector):
if point.point_inds and point.trace_index < 4:
selected_experiment = self.amine_crystal_dfs[
point.trace_index].iloc[point.point_inds[0]]
with self.fig.batch_update():
for i in range(len(self.fig.data[: 4])):
color = self.trace_colors[i].split(',')
color[-1] = '0.5)'
color = ','.join(color)
if i == point.trace_index:
marker_colors = [color for x in range(len(trace['x']))]
marker_colors[point.point_inds[0]
] = self.trace_colors[i]
self.fig.data[i].marker.color = marker_colors
self.fig.data[i].marker.size = 6
else:
self.fig.data[i].marker.color = color
self.fig.data[i].marker.size = 4
self.populate_data(selected_experiment)
def populate_data(self, selected_experiment):
name = selected_experiment[self.exp_name_col]
img_filename = name+'_side.jpg'
image_folder = os.path.join('data', 'images')
if img_filename in self.image_list:
with open(os.path.join(image_folder, img_filename), "rb") as f:
b = f.read()
#self.image_data[img_filename] = b
self.image_widget.value = b
else:
with open(os.path.join(image_folder, 'not_found.png'), "rb") as f:
b = f.read()
self.image_widget.value = b
#self.image_widget.value = self.image_data['not_found.png']
columns = [self.exp_name_col, self.acid_conc_col,
self.inorg_conc_col, self.org_conc_col] + self.table_cols
column_names = ['Well ID', 'Formic Acid [FAH]', 'Lead Iodide [PbI2]',
'{}'.format(self.chem_dict[self.current_amine_inchi])] + self.table_col_names
prefix = '_'.join(name.split('_')[:-1])
self.selected_plate = prefix
self.experiment_table.value = '<p>Plate ID:<br> {}</p>'.format(
prefix) + self.generate_table(selected_experiment.reindex(columns),
columns, column_names)
@property
def plot(self):
return self.full_widget
| [
"ipywidgets.HBox",
"numpy.dstack",
"pandas.read_csv",
"ipywidgets.ToggleButton",
"ipywidgets.VBox",
"ipywidgets.Button",
"os.path.join",
"scipy.spatial.ConvexHull",
"ipywidgets.Layout",
"plotly.graph_objs.Mesh3d",
"plotly.graph_objs.FigureWidget",
"plotly.graph_objs.layout.Margin",
"numpy.ro... | [((1232, 1276), 'pandas.read_csv', 'pd.read_csv', (['csv_file_path'], {'low_memory': '(False)'}), '(csv_file_path, low_memory=False)\n', (1243, 1276), True, 'import pandas as pd\n'), ((1312, 1347), 'pandas.read_csv', 'pd.read_csv', (['"""./data/inventory.csv"""'], {}), "('./data/inventory.csv')\n", (1323, 1347), True, 'import pandas as pd\n'), ((2257, 2325), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'xp', 'y': 'yp', 'z': 'zp', 'color': '"""green"""', 'opacity': '(0.5)', 'alphahull': '(0)'}), "(x=xp, y=yp, z=zp, color='green', opacity=0.5, alphahull=0)\n", (2266, 2325), True, 'import plotly.graph_objs as go\n'), ((8647, 8735), 'ipywidgets.Button', 'Button', ([], {'description': '"""Reset"""', 'disabled': '(False)', 'tooltip': '"""Reset the colors of the plot"""'}), "(description='Reset', disabled=False, tooltip=\n 'Reset the colors of the plot')\n", (8653, 8735), False, 'from ipywidgets import Tab, SelectMultiple, Accordion, ToggleButton, VBox, HBox, HTML, Image, Button, Text, Dropdown\n'), ((8797, 8903), 'ipywidgets.Button', 'Button', ([], {'description': '"""Show X-Y axes"""', 'disabled': '(False)', 'button_style': '""""""', 'tooltip': '"""Click to show X-Y axes"""'}), "(description='Show X-Y axes', disabled=False, button_style='',\n tooltip='Click to show X-Y axes')\n", (8803, 8903), False, 'from ipywidgets import Tab, SelectMultiple, Accordion, ToggleButton, VBox, HBox, HTML, Image, Button, Text, Dropdown\n'), ((8987, 9139), 'ipywidgets.ToggleButton', 'ToggleButton', ([], {'value': '(True)', 'description': '"""Show success hull"""', 'disabled': '(False)', 'button_style': '""""""', 'tooltip': '"""Toggle to show/hide success hull"""', 'icon': '"""check"""'}), "(value=True, description='Show success hull', disabled=False,\n button_style='', tooltip='Toggle to show/hide success hull', icon='check')\n", (8999, 9139), False, 'from ipywidgets import Tab, SelectMultiple, Accordion, ToggleButton, VBox, HBox, HTML, Image, Button, Text, Dropdown\n'), ((9245, 9395), 'ipywidgets.ToggleButton', 'ToggleButton', ([], {'value': '(True)', 'description': '"""Show State Space"""', 'disabled': '(False)', 'button_style': '""""""', 'tooltip': '"""Toggle to show/hide state space"""', 'icon': '"""check"""'}), "(value=True, description='Show State Space', disabled=False,\n button_style='', tooltip='Toggle to show/hide state space', icon='check')\n", (9257, 9395), False, 'from ipywidgets import Tab, SelectMultiple, Accordion, ToggleButton, VBox, HBox, HTML, Image, Button, Text, Dropdown\n'), ((10211, 10217), 'ipywidgets.HTML', 'HTML', ([], {}), '()\n', (10215, 10217), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n'), ((11096, 11135), 'ipywidgets.VBox', 'VBox', (['[plot_tabs, experiment_view_vbox]'], {}), '([plot_tabs, experiment_view_vbox])\n', (11100, 11135), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n'), ((14840, 14870), 'os.path.join', 'os.path.join', (['"""data"""', '"""images"""'], {}), "('data', 'images')\n", (14852, 14870), False, 'import os\n'), ((2698, 2764), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'xp', 'y': 'yp', 'z': 'zp', 'color': '"""red"""', 'opacity': '(0.5)', 'alphahull': '(0)'}), "(x=xp, y=yp, z=zp, color='red', opacity=0.5, alphahull=0)\n", (2707, 2764), True, 'import plotly.graph_objs as go\n'), ((3052, 3121), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': '[0]', 'y': '[0]', 'z': '[0]', 'color': '"""red"""', 'opacity': '(0.5)', 'alphahull': '(0)'}), "(x=[0], y=[0], z=[0], color='red', opacity=0.5, alphahull=0)\n", (3061, 3121), True, 'import plotly.graph_objs as go\n'), ((7587, 7634), 'plotly.graph_objs.layout.Margin', 'go.layout.Margin', ([], {'l': '(20)', 'r': '(20)', 'b': '(20)', 't': '(20)', 'pad': '(2)'}), '(l=20, r=20, b=20, t=20, pad=2)\n', (7603, 7634), True, 'import plotly.graph_objs as go\n'), ((8144, 8195), 'plotly.graph_objs.FigureWidget', 'go.FigureWidget', ([], {'data': 'self.data', 'layout': 'self.layout'}), '(data=self.data, layout=self.layout)\n', (8159, 8195), True, 'import plotly.graph_objs as go\n'), ((10568, 10605), 'ipywidgets.Layout', 'Layout', ([], {'height': '"""400px"""', 'width': '"""650px"""'}), "(height='400px', width='650px')\n", (10574, 10605), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n'), ((10667, 10715), 'ipywidgets.HBox', 'HBox', (['[self.experiment_table, self.image_widget]'], {}), '([self.experiment_table, self.image_widget])\n', (10671, 10715), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n'), ((12377, 12410), 'numpy.round', 'np.round', (['row[column]'], {'decimals': '(3)'}), '(row[column], decimals=3)\n', (12385, 12410), True, 'import numpy as np\n'), ((5680, 5768), 'numpy.dstack', 'np.dstack', (['(df[self.inorg_conc_col], df[self.org_conc_col], df[self.acid_conc_col])'], {}), '((df[self.inorg_conc_col], df[self.org_conc_col], df[self.\n acid_conc_col]))\n', (5689, 5768), True, 'import numpy as np\n'), ((5935, 5961), 'scipy.spatial.ConvexHull', 'ConvexHull', (['success_points'], {}), '(success_points)\n', (5945, 5961), False, 'from scipy.spatial import ConvexHull\n'), ((14937, 14977), 'os.path.join', 'os.path.join', (['image_folder', 'img_filename'], {}), '(image_folder, img_filename)\n', (14949, 14977), False, 'import os\n'), ((15151, 15194), 'os.path.join', 'os.path.join', (['image_folder', '"""not_found.png"""'], {}), "(image_folder, 'not_found.png')\n", (15163, 15194), False, 'import os\n'), ((10791, 10816), 'ipywidgets.HBox', 'HBox', (['[self.select_amine]'], {}), '([self.select_amine])\n', (10795, 10816), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n'), ((10849, 10913), 'ipywidgets.HBox', 'HBox', (['[xy_check, show_success_hull, show_hull_check, reset_plot]'], {}), '([xy_check, show_success_hull, show_hull_check, reset_plot])\n', (10853, 10913), False, 'from ipywidgets import HBox, VBox, Image, Layout, HTML\n')] |
'''
Source: https://www.kaggle.com/helmehelmuto/cnn-keras-and-innvestigate
Use as a test benchmark
'''
import numpy as np
import pandas as pd
# Merge the two Data set together
df = pd.read_csv('../input/pdb_data_no_dups.csv').merge(pd.read_csv('../input/pdb_data_seq.csv'), how='inner', on='structureId')
# Drop rows with missing labels
df = df[[type(c) == type('') for c in df.classification.values]]
df = df[[type(c) == type('') for c in df.sequence.values]]
# select proteins
df = df[df.macromoleculeType_x == 'Protein']
df.reset_index()
df.shape
import matplotlib.pyplot as plt
from collections import Counter
# count numbers of instances per class
cnt = Counter(df.classification)
# select only 10 most common classes!
top_classes = 10
# sort classes
sorted_classes = cnt.most_common()[:top_classes]
classes = [c[0] for c in sorted_classes]
counts = [c[1] for c in sorted_classes]
print("at least " + str(counts[-1]) + " instances per class")
# apply to dataframe
print(str(df.shape[0]) + " instances before")
df = df[[c in classes for c in df.classification]]
print(str(df.shape[0]) + " instances after")
seqs = df.sequence.values
lengths = [len(s) for s in seqs]
# visualize
fig, axarr = plt.subplots(1,2, figsize=(20,5))
axarr[0].bar(range(len(classes)), counts)
plt.sca(axarr[0])
plt.xticks(range(len(classes)), classes, rotation='vertical')
axarr[0].set_ylabel('frequency')
axarr[1].hist(lengths, bins=100, normed=False)
axarr[1].set_xlabel('sequence length')
axarr[1].set_ylabel('# sequences')
plt.show()
from sklearn.preprocessing import LabelBinarizer
# Transform labels to one-hot
lb = LabelBinarizer()
Y = lb.fit_transform(df.classification)
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
# maximum length of sequence, everything afterwards is discarded!
max_length = 256
#create and fit tokenizer
tokenizer = Tokenizer(char_level=True)
tokenizer.fit_on_texts(seqs)
#represent input data as word rank number sequences
X = tokenizer.texts_to_sequences(seqs)
X = sequence.pad_sequences(X, maxlen=max_length)
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
embedding_dim = 8
# create the model
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1, embedding_dim, input_length=max_length))
model.add(Conv1D(filters=64, kernel_size=6, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(top_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=.2)
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=128)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import itertools
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
print("train-acc = " + str(accuracy_score(np.argmax(y_train, axis=1), np.argmax(train_pred, axis=1))))
print("test-acc = " + str(accuracy_score(np.argmax(y_test, axis=1), np.argmax(test_pred, axis=1))))
# Compute confusion matrix
cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(test_pred, axis=1))
# Plot normalized confusion matrix
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
plt.figure(figsize=(10,10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(lb.classes_))
plt.xticks(tick_marks, lb.classes_, rotation=90)
plt.yticks(tick_marks, lb.classes_)
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], '.2f'), horizontalalignment="center", color="white" if cm[i, j] > cm.max() / 2. else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print(classification_report(np.argmax(y_test, axis=1), np.argmax(test_pred, axis=1), target_names=lb.classes_))
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"matplotlib.pyplot.imshow",
"sklearn.preprocessing.LabelBinarizer",
"keras.layers.MaxPooling1D",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"keras.layers.Flatten",
"mat... | [((663, 689), 'collections.Counter', 'Counter', (['df.classification'], {}), '(df.classification)\n', (670, 689), False, 'from collections import Counter\n'), ((1202, 1237), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 5)'}), '(1, 2, figsize=(20, 5))\n', (1214, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'matplotlib.pyplot.sca', 'plt.sca', (['axarr[0]'], {}), '(axarr[0])\n', (1285, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1521, 1523), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1626), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1624, 1626), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((1938, 1964), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'char_level': '(True)'}), '(char_level=True)\n', (1947, 1964), False, 'from keras.preprocessing.text import Tokenizer\n'), ((2089, 2133), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X'], {'maxlen': 'max_length'}), '(X, maxlen=max_length)\n', (2111, 2133), False, 'from keras.preprocessing import text, sequence\n'), ((2356, 2368), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2366, 2368), False, 'from keras.models import Sequential\n'), ((2953, 2990), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (2969, 2990), False, 'from sklearn.model_selection import train_test_split\n'), ((3709, 3741), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (3728, 3741), True, 'import numpy as np\n'), ((3742, 3770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3752, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3828), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.Blues'}), "(cm, interpolation='nearest', cmap=plt.cm.Blues)\n", (3780, 3828), True, 'import matplotlib.pyplot as plt\n'), ((3829, 3858), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion matrix"""'], {}), "('Confusion matrix')\n", (3838, 3858), True, 'import matplotlib.pyplot as plt\n'), ((3859, 3873), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3871, 3873), True, 'import matplotlib.pyplot as plt\n'), ((3915, 3963), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'lb.classes_'], {'rotation': '(90)'}), '(tick_marks, lb.classes_, rotation=90)\n', (3925, 3963), True, 'import matplotlib.pyplot as plt\n'), ((3964, 3999), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'lb.classes_'], {}), '(tick_marks, lb.classes_)\n', (3974, 3999), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (4213, 4227), True, 'import matplotlib.pyplot as plt\n'), ((4228, 4257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (4238, 4257), True, 'import matplotlib.pyplot as plt\n'), ((4258, 4268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4266, 4268), True, 'import matplotlib.pyplot as plt\n'), ((234, 274), 'pandas.read_csv', 'pd.read_csv', (['"""../input/pdb_data_seq.csv"""'], {}), "('../input/pdb_data_seq.csv')\n", (245, 274), True, 'import pandas as pd\n'), ((2469, 2537), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(6)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=6, padding='same', activation='relu')\n", (2475, 2537), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2549, 2574), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2561, 2574), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2586, 2654), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, padding='same', activation='relu')\n", (2592, 2654), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2666, 2691), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (2678, 2691), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2703, 2712), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2710, 2712), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2724, 2753), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2729, 2753), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((2765, 2805), 'keras.layers.Dense', 'Dense', (['top_classes'], {'activation': '"""softmax"""'}), "(top_classes, activation='softmax')\n", (2770, 2805), False, 'from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten\n'), ((3560, 3585), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (3569, 3585), True, 'import numpy as np\n'), ((3587, 3615), 'numpy.argmax', 'np.argmax', (['test_pred'], {'axis': '(1)'}), '(test_pred, axis=1)\n', (3596, 3615), True, 'import numpy as np\n'), ((183, 227), 'pandas.read_csv', 'pd.read_csv', (['"""../input/pdb_data_no_dups.csv"""'], {}), "('../input/pdb_data_no_dups.csv')\n", (194, 227), True, 'import pandas as pd\n'), ((4298, 4323), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (4307, 4323), True, 'import numpy as np\n'), ((4325, 4353), 'numpy.argmax', 'np.argmax', (['test_pred'], {'axis': '(1)'}), '(test_pred, axis=1)\n', (4334, 4353), True, 'import numpy as np\n'), ((3349, 3375), 'numpy.argmax', 'np.argmax', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (3358, 3375), True, 'import numpy as np\n'), ((3377, 3406), 'numpy.argmax', 'np.argmax', (['train_pred'], {'axis': '(1)'}), '(train_pred, axis=1)\n', (3386, 3406), True, 'import numpy as np\n'), ((3451, 3476), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (3460, 3476), True, 'import numpy as np\n'), ((3478, 3506), 'numpy.argmax', 'np.argmax', (['test_pred'], {'axis': '(1)'}), '(test_pred, axis=1)\n', (3487, 3506), True, 'import numpy as np\n')] |
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library methods for working with centralized data used in simulation."""
import abc
import collections
from typing import Callable, Iterable, List, Optional
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.simulation.datasets import client_data
# TODO(b/186139255): Merge with client_data.py once ConcreteClientData is
# removed.
class SerializableClientData(client_data.ClientData, metaclass=abc.ABCMeta):
"""Object to hold a federated dataset with serializable dataset construction.
In contrast to `tff.simulation.datasets.ClientData`, this implementation
uses a serializable dataset constructor for each client. This enables all
sub-classes to access `SerializableClientData.dataset_computation`.
"""
@abc.abstractproperty
def serializable_dataset_fn(self):
"""A callable accepting a client ID and returning a `tf.data.Dataset`.
Note that this callable must be traceable by TF, as it will be used in the
context of a `tf.function`.
"""
pass
def create_tf_dataset_for_client(self, client_id: str) -> tf.data.Dataset:
"""Creates a new `tf.data.Dataset` containing the client training examples.
This function will create a dataset for a given client, given that
`client_id` is contained in the `client_ids` property of the `ClientData`.
Unlike `create_dataset`, this method need not be serializable.
Args:
client_id: The string client_id for the desired client.
Returns:
A `tf.data.Dataset` object.
"""
if client_id not in self.client_ids:
raise ValueError(
'ID [{i}] is not a client in this ClientData. See '
'property `client_ids` for the list of valid ids.'.format(
i=client_id))
return self.serializable_dataset_fn(client_id)
@property
def dataset_computation(self):
"""A `tff.Computation` accepting a client ID, returning a dataset.
Note: the `dataset_computation` property is intended as a TFF-specific
performance optimization for distributed execution.
"""
if (not hasattr(self, '_cached_dataset_computation')) or (
self._cached_dataset_computation is None):
@computations.tf_computation(tf.string)
def dataset_computation(client_id):
return self.serializable_dataset_fn(client_id)
self._cached_dataset_computation = dataset_computation
return self._cached_dataset_computation
def create_tf_dataset_from_all_clients(self,
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Creates a new `tf.data.Dataset` containing _all_ client examples.
This function is intended for use training centralized, non-distributed
models (num_clients=1). This can be useful as a point of comparison
against federated models.
Currently, the implementation produces a dataset that contains
all examples from a single client in order, and so generally additional
shuffling should be performed.
Args:
seed: Optional, a seed to determine the order in which clients are
processed in the joined dataset. The seed can be any 32-bit unsigned
integer or an array of such integers.
Returns:
A `tf.data.Dataset` object.
"""
client_ids = self.client_ids.copy()
np.random.RandomState(seed=seed).shuffle(client_ids)
nested_dataset = tf.data.Dataset.from_tensor_slices(client_ids)
# We apply serializable_dataset_fn here to avoid loading all client datasets
# in memory, which is slow. Note that tf.data.Dataset.map implicitly wraps
# the input mapping in a tf.function.
example_dataset = nested_dataset.flat_map(self.serializable_dataset_fn)
return example_dataset
def preprocess(
self, preprocess_fn: Callable[[tf.data.Dataset], tf.data.Dataset]
) -> 'PreprocessSerializableClientData':
"""Applies `preprocess_fn` to each client's data."""
py_typecheck.check_callable(preprocess_fn)
return PreprocessSerializableClientData(self, preprocess_fn)
@classmethod
def from_clients_and_tf_fn(
cls,
client_ids: Iterable[str],
serializable_dataset_fn: Callable[[str], tf.data.Dataset],
) -> 'ConcreteClientData':
"""Constructs a `ClientData` based on the given function.
Args:
client_ids: A non-empty list of strings to use as input to
`create_dataset_fn`.
serializable_dataset_fn: A function that takes a client_id from the above
list, and returns a `tf.data.Dataset`. This function must be
serializable and usable within the context of a `tf.function` and
`tff.Computation`.
Returns:
A `ConcreteSerializableClientData` object.
"""
return ConcreteSerializableClientData(client_ids, serializable_dataset_fn)
class PreprocessSerializableClientData(SerializableClientData):
"""Applies a preprocessing function to every dataset it returns.
This class delegates all other aspects of implementation to its underlying
`SerializableClientData` object, simply wiring in its `preprocess_fn`
where necessary. Note that this `preprocess_fn` must be serializable by
TensorFlow.
"""
def __init__( # pylint: disable=super-init-not-called
self, underlying_client_data: SerializableClientData,
preprocess_fn: Callable[[tf.data.Dataset], tf.data.Dataset]):
py_typecheck.check_type(underlying_client_data, SerializableClientData)
py_typecheck.check_callable(preprocess_fn)
self._underlying_client_data = underlying_client_data
self._preprocess_fn = preprocess_fn
example_dataset = self._preprocess_fn(
self._underlying_client_data.create_tf_dataset_for_client(
next(iter(underlying_client_data.client_ids))))
self._element_type_structure = example_dataset.element_spec
self._dataset_computation = None
def serializable_dataset_fn(client_id: str) -> tf.data.Dataset:
return self._preprocess_fn(
self._underlying_client_data.serializable_dataset_fn(client_id)) # pylint:disable=protected-access
self._serializable_dataset_fn = serializable_dataset_fn
@property
def serializable_dataset_fn(self):
return self._serializable_dataset_fn
@property
def client_ids(self):
return self._underlying_client_data.client_ids
def create_tf_dataset_for_client(self, client_id: str) -> tf.data.Dataset:
return self._preprocess_fn(
self._underlying_client_data.create_tf_dataset_for_client(client_id))
@property
def element_type_structure(self):
return self._element_type_structure
class ConcreteSerializableClientData(SerializableClientData):
"""A generic `SerializableClientData` object.
This is a simple implementation of `SerializableClientData`, where datasets
are specified as a function from `client_id` to a `tf.data.Dataset`, where
this function must be serializable by a `tf.function`.
"""
def __init__( # pylint: disable=super-init-not-called
self,
client_ids: Iterable[str],
serializable_dataset_fn: Callable[[str], tf.data.Dataset],
):
"""Creates a `SerializableClientData` from clients and a mapping function.
Args:
client_ids: A non-empty iterable of `string` objects, representing ids for
each client.
serializable_dataset_fn: A function that takes as input a `string`, and
returns a `tf.data.Dataset`. This must be traceable by TF and TFF. That
is, it must be compatible with both `tf.function` and `tff.Computation`
wrappers.
"""
py_typecheck.check_type(client_ids, collections.abc.Iterable)
py_typecheck.check_callable(serializable_dataset_fn)
if not client_ids:
raise ValueError('At least one client_id is required.')
self._client_ids = list(client_ids)
self._serializable_dataset_fn = serializable_dataset_fn
example_dataset = serializable_dataset_fn(next(iter(client_ids)))
self._element_type_structure = example_dataset.element_spec
@property
def client_ids(self) -> List[str]:
return self._client_ids
@property
def serializable_dataset_fn(self):
return self._serializable_dataset_fn
@property
def element_type_structure(self):
return self._element_type_structure
| [
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow_federated.python.common_libs.py_typecheck.check_type",
"tensorflow_federated.python.common_libs.py_typecheck.check_callable",
"tensorflow_federated.python.core.api.computations.tf_computation",
"numpy.random.RandomState"
] | [((4134, 4180), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['client_ids'], {}), '(client_ids)\n', (4168, 4180), True, 'import tensorflow as tf\n'), ((4681, 4723), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['preprocess_fn'], {}), '(preprocess_fn)\n', (4708, 4723), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((6107, 6178), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['underlying_client_data', 'SerializableClientData'], {}), '(underlying_client_data, SerializableClientData)\n', (6130, 6178), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((6183, 6225), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['preprocess_fn'], {}), '(preprocess_fn)\n', (6210, 6225), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((8289, 8350), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['client_ids', 'collections.abc.Iterable'], {}), '(client_ids, collections.abc.Iterable)\n', (8312, 8350), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((8355, 8407), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['serializable_dataset_fn'], {}), '(serializable_dataset_fn)\n', (8382, 8407), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((2902, 2940), 'tensorflow_federated.python.core.api.computations.tf_computation', 'computations.tf_computation', (['tf.string'], {}), '(tf.string)\n', (2929, 2940), False, 'from tensorflow_federated.python.core.api import computations\n'), ((4060, 4092), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (4081, 4092), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 09:51:41 2016
@author: mmorel
"""
import numpy as np
import cv2
from actionRecognize import ActionRecognize
def get3dPCA(vcube, dimens):
x,y,t = dimens
sliceXY = np.swapaxes(vcube,1,2).reshape(t,y*x,order='F')#.swapaxes(0,1)
#sliceXYn = sliceXY / sliceXY.max(axis=0)
sliceXT = np.swapaxes(vcube,0,2).reshape(x,y*t)#.swapaxes(0,1)
#sliceXTn = sliceXT / sliceXT.max(axis=0)
sliceYT = np.swapaxes(vcube,0,1).reshape(y,x*t,order='F')#.swapaxes(0,1)
#sliceYTn = sliceYT / sliceYT.max(axis=0)
mean1, eigenXY = cv2.PCACompute(sliceXY, None)
mean2, eigenXT = cv2.PCACompute(sliceXT, None)
mean3, eigenYT = cv2.PCACompute(sliceYT, None)
#print (repr(eigenXY))
return (eigenXY,eigenXT,eigenYT)
def getPrincipleAngle(eigVecs1, eigVecs2):
#e2t = cv2.transpose(eigVecs2)
#e2t = cv2.transpose(eigVecs2)
evm = cv2.gemm(eigVecs1,eigVecs2,1,None,1,flags=cv2.GEMM_2_T)
#evm = np.outer(eigVecs1,e2t)
w,u,vt = cv2.SVDecomp(evm)
#print (repr(w))
#print (repr(float(w[0])))
return float(w[0]);
def getAveragePrincipleAngle(eigVec3d1, eigVec3d2):
paXY = getPrincipleAngle(eigVec3d1[0],eigVec3d2[0])
paXT = getPrincipleAngle(eigVec3d1[1],eigVec3d2[1])
paYT = getPrincipleAngle(eigVec3d1[2],eigVec3d2[2])
return abs(paXY) + abs(paXT) + abs(paYT) # / 3.0;
def main(arg=None):
dimens = (2,3,4)
frames = []
frames.append([[1,2],[3,4],[5,6]])
frames.append([[7,8],[9,10],[11,12]])
frames.append([[13,14],[15,16],[17,18]])
frames.append([[19,20],[21,22],[23,24]])
vcube = np.array(frames,dtype=np.float32)
cube1 = np.array( [[[1,2],[3,4],[5,6]], [[7,8],[9,10],[11,12]], [[13,14],[15,16],[17,18]], [[19,20],[21,22],[23,24]]], dtype=np.float32 )
cube2 = np.array( [[[1,1],[1,1],[1,0]], [[1,1],[1, 0],[1, 1]], [[0, 1],[1, 1],[1, 1]], [[1, 1],[0, 1],[1, 1]]], dtype=np.float32 )
#cube1 = np.array( [[[1,1,1],[1,1,1],[1,1,0]], [[1,1,1],[1,0,1],[1,1,1]], [[0,1,1],[1,1,1],[1,1,1]], [[1,1,1],[0,1,1],[1,1,1]]], dtype=np.float32 )
#cube2 = np.array( [[[1,1,0],[1,1,1],[1,1,0]], [[1,1,1],[1,0,1],[1,1,1]], [[0,1,1],[1,1,1],[1,1,1]], [[1,1,1],[0,1,1],[1,1,1]]], dtype=np.float32 )
#cube2 = np.array( [[[3,1,5],[11,7,1],[0,2,9]], [[10,3,16],[1,0,31],[7,9,0]], [[4,8,1],[3,87,9],[55,0,9]], [[7,12,1],[0,1,1],[1,1,1]]], dtype=np.float32 )#cube2 = np.array( [[[13,14],[15,16]], [[17,18],[19,20]], [[21,22],[23,24]]], dtype=np.float32 )
#print (repr(cube))
#row_sums = cube1.sum(axis=1)
#cube1 = cube1 / row_sums[:, np.newaxis]
#row_sums = cube2.sum(axis=1)
#cube2 = cube2 / row_sums[:, np.newaxis]
#x,y,t = (2,3,4)
#xyslice = np.swapaxes(vcube,1,2).reshape(t,y*x,order='F').swapaxes(0,1)
#xtslice = np.swapaxes(vcube,0,2).reshape(x,y*t).swapaxes(0,1)
#ytslice = np.swapaxes(vcube,0,1).reshape(y,x*t,order='F').swapaxes(0,1)
pca1 = get3dPCA(cube1,dimens)
pca2 = get3dPCA(cube2,dimens)
pa = getAveragePrincipleAngle(pca1,pca2)
print (repr(pa))
#print (repr(xtslice))
#print (repr(ytslice))
#actionRecognizer = ActionRecognize('actions/',(50,100,45))
if __name__ == '__main__':
main() | [
"cv2.SVDecomp",
"numpy.swapaxes",
"numpy.array",
"cv2.gemm",
"cv2.PCACompute"
] | [((598, 627), 'cv2.PCACompute', 'cv2.PCACompute', (['sliceXY', 'None'], {}), '(sliceXY, None)\n', (612, 627), False, 'import cv2\n'), ((650, 679), 'cv2.PCACompute', 'cv2.PCACompute', (['sliceXT', 'None'], {}), '(sliceXT, None)\n', (664, 679), False, 'import cv2\n'), ((704, 733), 'cv2.PCACompute', 'cv2.PCACompute', (['sliceYT', 'None'], {}), '(sliceYT, None)\n', (718, 733), False, 'import cv2\n'), ((931, 991), 'cv2.gemm', 'cv2.gemm', (['eigVecs1', 'eigVecs2', '(1)', 'None', '(1)'], {'flags': 'cv2.GEMM_2_T'}), '(eigVecs1, eigVecs2, 1, None, 1, flags=cv2.GEMM_2_T)\n', (939, 991), False, 'import cv2\n'), ((1034, 1051), 'cv2.SVDecomp', 'cv2.SVDecomp', (['evm'], {}), '(evm)\n', (1046, 1051), False, 'import cv2\n'), ((1661, 1695), 'numpy.array', 'np.array', (['frames'], {'dtype': 'np.float32'}), '(frames, dtype=np.float32)\n', (1669, 1695), True, 'import numpy as np\n'), ((1707, 1858), 'numpy.array', 'np.array', (['[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]], [[13, 14], [15, 16],\n [17, 18]], [[19, 20], [21, 22], [23, 24]]]'], {'dtype': 'np.float32'}), '([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]], [[13, 14],\n [15, 16], [17, 18]], [[19, 20], [21, 22], [23, 24]]], dtype=np.float32)\n', (1715, 1858), True, 'import numpy as np\n'), ((1849, 1986), 'numpy.array', 'np.array', (['[[[1, 1], [1, 1], [1, 0]], [[1, 1], [1, 0], [1, 1]], [[0, 1], [1, 1], [1, 1\n ]], [[1, 1], [0, 1], [1, 1]]]'], {'dtype': 'np.float32'}), '([[[1, 1], [1, 1], [1, 0]], [[1, 1], [1, 0], [1, 1]], [[0, 1], [1, \n 1], [1, 1]], [[1, 1], [0, 1], [1, 1]]], dtype=np.float32)\n', (1857, 1986), True, 'import numpy as np\n'), ((222, 246), 'numpy.swapaxes', 'np.swapaxes', (['vcube', '(1)', '(2)'], {}), '(vcube, 1, 2)\n', (233, 246), True, 'import numpy as np\n'), ((349, 373), 'numpy.swapaxes', 'np.swapaxes', (['vcube', '(0)', '(2)'], {}), '(vcube, 0, 2)\n', (360, 373), True, 'import numpy as np\n'), ((465, 489), 'numpy.swapaxes', 'np.swapaxes', (['vcube', '(0)', '(1)'], {}), '(vcube, 0, 1)\n', (476, 489), True, 'import numpy as np\n')] |
import torch as th
import torch.nn.functional as F
import numpy as np
import wandb
import argparse
import yaml
import os
import torch_scatter as th_s
from tqdm import tqdm
import copy
from dataset import BaseDataset, data_to_device
from model import Predictor
from misc_utils import np_temp_seed, th_temp_seed, booltype, DummyContext, DummyScaler
from plot_utils import *
from losses import get_loss_func, get_sim_func
def run_train_epoch(step,epoch,model,dl_d,run_d,use_wandb,optimizer,amp_context,scaler,stdout=True):
# stuff related to device
dev = th.device(run_d["device"])
nb = run_d["non_blocking"]
# set up loss func
loss_func = get_loss_func(run_d["loss"])
# train
model.train()
for b_idx, b in tqdm(enumerate(dl_d["primary"]["train"]),desc="> train",total=len(dl_d["primary"]["train"])):
optimizer.zero_grad()
b = data_to_device(b,dev,nb)
with amp_context:
b_pred = model(b)
b_targ = b["spec"]
b_loss = loss_func(b_pred,b_targ)
b_mean_loss = th.mean(b_loss,dim=0)
b_sum_loss = th.sum(b_loss,dim=0)
if run_d["batch_loss_agg"] == "mean":
scaler.scale(b_mean_loss).backward()
else:
assert run_d["batch_loss_agg"] == "sum"
scaler.scale(b_sum_loss).backward()
scaler.step(optimizer)
scaler.update()
step += 1
optimizer.zero_grad()
return step, epoch, {}
def run_val(step,epoch,model,dl_d,run_d,use_wandb,amp_context,stdout=True):
# stuff related to device
dev = th.device(run_d["device"])
nb = run_d["non_blocking"]
# set up loss func
loss_func = get_loss_func(run_d["loss"])
sim_func = get_sim_func(run_d["sim"])
# validation
model.eval()
pred, targ, sim, loss, mol_id = [], [], [], [], []
with th.no_grad():
for b_idx, b in tqdm(enumerate(dl_d["primary"]["val"]),desc="> val",total=len(dl_d["primary"]["val"])):
b = data_to_device(b,dev,nb)
with amp_context:
b_pred = model(b)
b_targ = b["spec"]
b_loss = loss_func(b_pred,b_targ)
b_sim = sim_func(b_pred,b_targ)
b_mol_id = b["mol_id"]
pred.append(b_pred.detach().to("cpu",non_blocking=nb))
targ.append(b_targ.detach().to("cpu",non_blocking=nb))
loss.append(b_loss.detach().to("cpu",non_blocking=nb))
sim.append(b_sim.detach().to("cpu",non_blocking=nb))
mol_id.append(b_mol_id.detach().to("cpu",non_blocking=nb))
pred = th.cat(pred,dim=0)
targ = th.cat(targ,dim=0)
spec_loss = th.cat(loss,dim=0)
spec_sim = th.cat(sim,dim=0)
mol_id = th.cat(mol_id,dim=0)
un_mol_id, un_mol_idx = th.unique(mol_id,dim=0,return_inverse=True)
mol_loss = th_s.scatter_mean(spec_loss,un_mol_idx,dim=0,dim_size=un_mol_id.shape[0])
mol_sim = th_s.scatter_mean(spec_sim,un_mol_idx,dim=0,dim_size=un_mol_id.shape[0])
spec_mean_loss = th.mean(spec_loss,dim=0)
spec_mean_sim = th.mean(spec_sim,dim=0)
mol_mean_loss = th.mean(mol_loss,dim=0)
mol_mean_sim = th.mean(mol_sim,dim=0)
out_d = {
"pred": pred.float(),
"targ": targ.float(),
"mol_id": mol_id,
"spec_sim": spec_sim.float(), # for tracking
"spec_mean_loss": spec_mean_loss.float(),
"mol_mean_loss": mol_mean_loss.float(),
"spec_mean_sim": spec_mean_sim.float(),
"mol_mean_sim": mol_mean_sim.float()
}
spec_sim_hist = plot_sim_hist(run_d["sim"],spec_sim.numpy())
mol_sim_hist = plot_sim_hist(run_d["sim"],mol_sim.numpy())
if stdout:
print(f"> step {step}, epoch {epoch}: val, spec_mean_loss = {spec_mean_loss:.4}, mol_mean_loss = {mol_mean_loss:.4}")
if use_wandb:
log_dict = {
"Epoch": epoch,
"val_spec_loss": spec_mean_loss,
"val_spec_sim": spec_mean_sim,
"val_mol_loss": mol_mean_loss,
"val_mol_sim": mol_mean_sim
}
if run_d["save_media"]:
log_dict["val_spec_sim_hist"] = wandb.Image(spec_sim_hist)
log_dict["val_mol_sim_hist"] = wandb.Image(mol_sim_hist)
wandb.log(log_dict, commit=False)
return step, epoch, out_d
def run_track(step,epoch,model,dl_d,run_d,use_wandb,ds,data_d,score_d,stdout=True):
# stuff related to device
dev = th.device(run_d["device"])
nb = run_d["non_blocking"]
# set up loss func
loss_func = get_loss_func(run_d["loss"])
sim_func = get_sim_func(run_d["sim"])
# tracking
if run_d["save_media"] and run_d["num_track"] > 0:
model.to(dev)
model.eval()
# get top/bottom k similarity
topk, argtopk = th.topk(score_d["spec_sim"],run_d["num_track"],largest=True)
bottomk, argbottomk = th.topk(score_d["spec_sim"],run_d["num_track"],largest=False)
topk_str = "[" + ",".join([f"{val:.2f}" for val in topk.tolist()]) + "]"
bottomk_str = "[" + ",".join([f"{val:.2f}" for val in bottomk.tolist()]) + "]"
if stdout:
print(f"> tracking: topk = {topk_str} , bottomk = {bottomk_str}")
val_idx = dl_d["primary"]["val"].dataset.indices
track_dl_d = ds.get_track_dl(
val_idx,
num_rand_idx=run_d["num_track"],
topk_idx=argtopk.numpy(),
bottomk_idx=argbottomk.numpy()
)
for dl_type,dl in track_dl_d.items():
for d_idx, d in enumerate(dl):
# import pdb; pdb.set_trace()
d = data_to_device(d,dev,nb)
pred = model(d)
targ = d["spec"]
loss = loss_func(pred,targ)
sim = sim_func(pred,targ)
smiles = d["smiles"][0]
prec_mz_bin = d["prec_mz_bin"][0]
if "cfm_mbs" in d:
cfm_mbs = d["cfm_mbs"][0]
else:
cfm_mbs = None
if "simple_mbs" in d:
simple_mbs = d["simple_mbs"][0]
else:
simple_mbs = None
targ = targ.cpu().detach().numpy()
if run_d["pred_viz"]:
pred = pred.cpu().detach().numpy()
else:
pred = np.zeros_like(targ)
loss = loss.item()
sim = sim.item()
plot_data = plot_spec(
targ,pred,
data_d["mz_max"],
data_d["mz_bin_res"],
loss_type=run_d["loss"],
loss=loss,
sim_type=run_d["sim"],
sim=sim,
prec_mz_bin=prec_mz_bin,
smiles=smiles,
cfm_mbs=cfm_mbs,
simple_mbs=simple_mbs,
plot_title=run_d["track_plot_title"]
)
if use_wandb:
dl_type = dl_type.rstrip("")
log_dict = {
"Epoch": epoch,
f"{dl_type}_{d_idx}": wandb.Image(plot_data)
}
wandb.log(log_dict, commit=False)
return step, epoch, {}
def run_test(step,epoch,model,dl_d,run_d,use_wandb,amp_context,exclude=["train","val"],stdout=True):
# stuff related to device
dev = th.device(run_d["device"])
nb = run_d["non_blocking"]
# set up loss func
loss_func = get_loss_func(run_d["loss"])
sim_func = get_sim_func(run_d["sim"])
# test
if run_d["do_test"]:
model.to(dev)
model.eval()
out_d = {}
for order in ["primary","secondary"]:
out_d[order] = {}
for dl_key,dl in dl_d[order].items():
if dl_key in exclude:
continue
pred, targ, sim, loss, mol_id = [], [], [], [], []
with th.no_grad():
for b_idx, b in tqdm(enumerate(dl),desc=f"> {dl_key}",total=len(dl)):
b = data_to_device(b,dev,nb)
with amp_context:
b_pred = model(b)
b_targ = b["spec"]
b_loss = loss_func(b_pred,b_targ)
b_sim = sim_func(b_pred,b_targ)
b_mol_id = b["mol_id"]
pred.append(b_pred.detach().to("cpu",non_blocking=nb))
targ.append(b_targ.detach().to("cpu",non_blocking=nb))
loss.append(b_loss.detach().to("cpu",non_blocking=nb))
sim.append(b_sim.detach().to("cpu",non_blocking=nb))
mol_id.append(b_mol_id.detach().to("cpu",non_blocking=nb))
pred = th.cat(pred,dim=0)
targ = th.cat(targ,dim=0)
spec_loss = th.cat(loss,dim=0)
spec_sim = th.cat(sim,dim=0)
mol_id = th.cat(mol_id,dim=0)
un_mol_id, un_mol_idx = th.unique(mol_id,dim=0,return_inverse=True)
mol_loss = th_s.scatter_mean(spec_loss,un_mol_idx,dim=0,dim_size=un_mol_id.shape[0])
mol_sim = th_s.scatter_mean(spec_sim,un_mol_idx,dim=0,dim_size=un_mol_id.shape[0])
spec_mean_loss = th.mean(spec_loss,dim=0)
spec_mean_sim = th.mean(spec_sim,dim=0)
mol_mean_loss = th.mean(mol_loss,dim=0)
mol_mean_sim = th.mean(mol_sim,dim=0)
out_d[order][dl_key] = {
"pred": pred.float(),
"targ": targ.float(),
"mol_id": mol_id
}
spec_sim_hist = plot_sim_hist(run_d["sim"],spec_sim.numpy())
mol_sim_hist = plot_sim_hist(run_d["sim"],mol_sim.numpy())
if stdout:
print(f"> {dl_key}, spec_mean_loss = {spec_mean_loss:.4}, mol_mean_loss = {mol_mean_loss:.4}")
if use_wandb:
log_dict = {
f"{dl_key}_spec_loss": spec_mean_loss,
f"{dl_key}_spec_sim": spec_mean_sim,
f"{dl_key}_mol_loss": mol_mean_loss,
f"{dl_key}_mol_sim": mol_mean_sim,
}
if run_d["save_media"]:
log_dict[f"{dl_key}_spec_sim_hist"] = wandb.Image(spec_sim_hist)
log_dict[f"{dl_key}_mol_sim_hist"] = wandb.Image(mol_sim_hist)
wandb.log(log_dict, commit=False)
else:
out_d = {}
return step, epoch, out_d
def run_match_2(step,epoch,model,dl_d,run_d,use_wandb,amp_context,stdout=True):
# TODO: make this function more general!
# stuff related to device
dev = th.device(run_d["device"])
nb = run_d["non_blocking"]
# set up loss func
sim_func = get_sim_func(run_d["sim"])
assert run_d["sim"] == "cos"
# match
if run_d["do_matching_2"]:
model.to(dev)
model.eval()
out_d = {}
# rename dl_d
dl_d = dl_d.copy()
dl_d["primary"]["train"] = dl_d["primary"]["train_2"]
del dl_d["primary"]["train_2"]
ds_d = {}
for order in ["primary","secondary"]:
ds_d[order] = {}
for dl_key in dl_d[order].keys():
ds_d[order][dl_key] = {
"real_spec": [],
"pred_spec": [],
"spec_ids": [],
"mol_ids": [],
"prec_types": [],
"col_energies": []
}
splits = [(("primary","test"),(("primary","val"),("primary","train")))]
for dl_key in dl_d["secondary"].keys():
query = ("secondary",dl_key)
ref = (("primary","train"),("primary","val"),("primary","test"))
splits.append((query,ref))
# print(splits)
base_ds = dl_d["primary"]["train"].dataset.dataset
for order in ["primary","secondary"]:
for dl_key in ds_d[order].keys():
real_spec, pred_spec, spec_ids, mol_ids, prec_types, col_energies = [], [], [], [], [], []
dl = dl_d[order][dl_key]
with th.no_grad():
for b_idx, b in tqdm(enumerate(dl),desc=f"> collecting {order} {dl_key}",total=len(dl)):
b_targ = b["spec"]
b_spec_id = b["spectrum_id"]
b_mol_id = b["mol_id"]
b_prec_type = b["prec_type"]
b_col_energy = b["col_energy"]
real_spec.append(b_targ.cpu())
spec_ids.append(b_spec_id.cpu())
mol_ids.append(b_mol_id.cpu())
prec_types.extend(b_prec_type)
col_energies.extend(b_col_energy)
if not (order == "primary" and dl_key in ["train","val"]):
b = data_to_device(b,dev,nb)
with amp_context:
b_pred = model(b)
pred_spec.append(b_pred.cpu())
cur_d = ds_d[order][dl_key]
cur_d["real_spec"] = th.cat(real_spec,dim=0)
cur_d["spec_ids"] = th.cat(spec_ids,dim=0)
cur_d["mol_ids"] = th.cat(mol_ids,dim=0)
cur_d["prec_types"] = th.tensor([base_ds.prec_type_c2i[prec_type] for prec_type in prec_types])
cur_d["col_energies"] = th.tensor(col_energies)
if len(pred_spec) > 0:
cur_d["pred_spec"] = th.cat(pred_spec,dim=0)
for split in splits:
query_ds = split[0]
ref_dses = split[1]
r_spec, r_spec_ids, r_mol_ids, r_prec_types, r_col_energies = [], [], [], [], []
q_spec, q_spec_ids, q_mol_ids, q_prec_types, q_col_energies = [], [], [], [], []
# set up query
q_order, q_ds_key = query_ds
query_d = ds_d[q_order][q_ds_key]
r_spec.append(query_d["pred_spec"])
r_spec_ids.append(query_d["spec_ids"])
r_mol_ids.append(query_d["mol_ids"])
r_prec_types.append(query_d["prec_types"])
r_col_energies.append(query_d["col_energies"])
q_spec.append(query_d["real_spec"])
q_spec_ids.append(query_d["spec_ids"])
q_mol_ids.append(query_d["mol_ids"])
q_prec_types.append(query_d["prec_types"])
q_col_energies.append(query_d["col_energies"])
# set up ref
for ref_ds in ref_dses:
r_order, r_ds_key = ref_ds
ref_d = ds_d[r_order][r_ds_key]
r_spec.append(ref_d["real_spec"])
r_spec_ids.append(ref_d["spec_ids"])
r_mol_ids.append(ref_d["mol_ids"])
r_prec_types.append(ref_d["prec_types"])
r_col_energies.append(ref_d["col_energies"])
r_spec = th.cat(r_spec,dim=0)
r_spec_ids = th.cat(r_spec_ids,dim=0)
r_mol_ids = th.cat(r_mol_ids,dim=0)
r_prec_types = th.cat(r_prec_types,dim=0)
r_col_energies = th.cat(r_col_energies,dim=0)
q_spec = th.cat(q_spec,dim=0)
q_spec_ids = th.cat(q_spec_ids,dim=0)
q_mol_ids = th.cat(q_mol_ids,dim=0)
q_prec_types = th.cat(q_prec_types,dim=0)
q_col_energies = th.cat(q_col_energies,dim=0)
# set up dataloader
q_ds = th.utils.data.TensorDataset(q_spec,q_spec_ids,q_prec_types,q_col_energies)
q_dl = th.utils.data.DataLoader(
q_ds,
num_workers=0,
batch_size=200,
drop_last=False,
shuffle=False,
pin_memory=True
)
q_ranks, q_norm_ranks, q_mean_sims = [], [], []
# send r stuff to device
r_spec = F.normalize(r_spec,p=2,dim=1).to(dev,non_blocking=nb)
r_spec_ids = r_spec_ids.to(dev,non_blocking=nb)
r_prec_types = r_prec_types.to(dev,non_blocking=nb)
r_col_energies = r_col_energies.to(dev,non_blocking=nb)
# import pdb; pdb.set_trace()
# r = b_r_spec.shape[0]
# q = b_q_spec.shape[0]
# b_r_spec = r_spec.repeat(q,1)
# b_q_spec = b_q_spec.repeat(r,1)
# b_q_sims = sim_func(b_q_spec,b_r_spec).reshape(q,r)
for b_idx, b in tqdm(enumerate(q_dl),desc=f"> m2 {q_ds_key}",total=len(q_dl)):
b_q_spec = b[0].to(dev,non_blocking=nb)
b_q_spec_ids = b[1].to(dev,non_blocking=nb)
b_q_prec_types = b[2].to(dev,non_blocking=nb)
b_q_col_energies = b[3].to(dev,non_blocking=nb)
b_q_prec_types_mask = b_q_prec_types.unsqueeze(1)==r_prec_types.unsqueeze(0)
b_q_col_energies_mask = th.isclose(b_q_col_energies.unsqueeze(1),r_col_energies.unsqueeze(0))
b_q_mask = b_q_prec_types_mask&b_q_col_energies_mask
assert th.all(th.any(b_q_mask,dim=1))
b_q_spec = F.normalize(b_q_spec,p=2,dim=1)
b_q_sims = th.matmul(b_q_spec,r_spec.T)
b_q_sims = b_q_mask.float()*b_q_sims+((~b_q_mask).float())*-1.
b_q_sort = th.argsort(-b_q_sims+0.00001*th.rand_like(b_q_sims),dim=1)
b_q_match = (r_spec_ids[b_q_sort]==b_q_spec_ids.unsqueeze(1)).float()
b_q_num_sims = th.sum(b_q_mask.float(),dim=1)
b_q_rank = (th.argmax(b_q_match,dim=1)+1).float()
b_q_norm_rank = (b_q_rank-1.) / th.clamp(b_q_num_sims-1.,1.)
b_q_sum_sims = th.sum(b_q_mask.float()*b_q_sims,dim=1)
b_q_mean_sims = b_q_sum_sims / b_q_num_sims
q_ranks.append(b_q_rank.cpu())
q_norm_ranks.append(b_q_norm_rank.cpu())
q_mean_sims.append(b_q_mean_sims.cpu())
spec_rank = th.cat(q_ranks,dim=0)
spec_norm_rank = th.cat(q_norm_ranks,dim=0)
spec_mean_sim = th.cat(q_mean_sims,dim=0)
assert th.all(spec_norm_rank<=1.) and th.all(spec_norm_rank>=0.), (th.min(spec_norm_rank),th.max(spec_norm_rank))
assert th.all(spec_mean_sim<=1.) and th.all(spec_mean_sim>=0.), (th.min(spec_mean_sim),th.max(spec_mean_sim))
un_mol_ids, mol_idx = th.unique(q_mol_ids,return_inverse=True)
mol_rank = th_s.scatter_mean(spec_rank,mol_idx,dim=0,dim_size=un_mol_ids.shape[0])
mol_norm_rank = th_s.scatter_mean(spec_norm_rank,mol_idx,dim=0,dim_size=un_mol_ids.shape[0])
mol_mean_sim = th_s.scatter_mean(spec_mean_sim,mol_idx,dim=0,dim_size=un_mol_ids.shape[0])
spec_mean_rank = th.mean(spec_rank)
spec_mean_norm_rank = th.mean(spec_norm_rank)
spec_recall_at_1 = th.mean((spec_norm_rank<=0.01).float())
spec_recall_at_5 = th.mean((spec_norm_rank<=0.05).float())
spec_recall_at_10 = th.mean((spec_norm_rank<=0.10).float())
mol_mean_rank = th.mean(mol_rank)
mol_mean_norm_rank = th.mean(mol_norm_rank)
mol_recall_at_1 = th.mean((mol_norm_rank<=0.01).float())
mol_recall_at_5 = th.mean((mol_norm_rank<=0.05).float())
mol_recall_at_10 = th.mean((mol_norm_rank<=0.10).float())
if stdout:
print(f"> by spectrum_id: mean_rank = {spec_mean_rank:.2f}, recall @1 = {spec_recall_at_1:.2f}, @5 = {spec_recall_at_5:.2f}, @10 = {spec_recall_at_10:.2f}")
print(f"> by mol_id: mean_rank = {mol_mean_rank:.2f}, recall @1 = {mol_recall_at_1:.2f}, @5 = {mol_recall_at_5:.2f}, @10 = {mol_recall_at_10:.2f}")
spec_cand_sim_mean_hist = plot_cand_sim_mean_hist(spec_mean_sim.numpy())
mol_cand_sim_mean_hist = plot_cand_sim_mean_hist(mol_mean_sim.numpy())
if use_wandb:
log_dict = {
f"m2_{q_ds_key}_spec_rank": spec_mean_rank,
f"m2_{q_ds_key}_spec_norm_rank": spec_mean_norm_rank,
f"m2_{q_ds_key}_spec_top1%": spec_recall_at_1,
f"m2_{q_ds_key}_spec_top5%": spec_recall_at_5,
f"m2_{q_ds_key}_spec_top10%": spec_recall_at_10,
f"m2_{q_ds_key}_mol_rank": mol_mean_rank,
f"m2_{q_ds_key}_mol_norm_rank": mol_mean_norm_rank,
f"m2_{q_ds_key}_mol_top1%": mol_recall_at_1,
f"m2_{q_ds_key}_mol_top5%": mol_recall_at_5,
f"m2_{q_ds_key}_mol_top10%": mol_recall_at_10
}
if run_d["save_media"]:
log_dict[f"m2_{q_ds_key}_spec_cand_sim_mean_hist"] = wandb.Image(spec_cand_sim_mean_hist)
log_dict[f"m2_{q_ds_key}_mol_cand_sim_mean_hist"] = wandb.Image(mol_cand_sim_mean_hist)
wandb.log(log_dict, commit=False)
else:
out_d = {}
return step, epoch, out_d
def get_ds_model(data_d,model_d,run_d):
with th_temp_seed(model_d["model_seed"]):
dset_types = set()
embed_types = model_d["embed_types"]
for embed_type in embed_types:
if embed_type == "fp":
dset_types.add("fp")
elif embed_type in ["gat","wln","gin_pt"]:
dset_types.add("graph")
elif embed_type in ["cnn","tf"]:
dset_types.add("seq")
elif embed_type in ["gf"]:
dset_types.add("gf")
else:
raise ValueError(f"invalid embed_type {embed_type}")
dset_types = list(dset_types)
assert len(dset_types)>0, dset_types
ds = BaseDataset(*dset_types,**data_d)
dim_d = ds.get_data_dims()
model = Predictor(dim_d,**model_d)
dev = th.device(run_d["device"])
model.to(dev)
return ds, model
def train(data_d,model_d,run_d,use_wandb):
# set seeds
th.manual_seed(run_d["train_seed"])
np.random.seed(run_d["train_seed"]//2)
# set parallel strategy
if run_d["parallel_strategy"] == "fd":
parallel_strategy = "file_descriptor"
else:
parallel_strategy = "file_system"
th.multiprocessing.set_sharing_strategy(parallel_strategy)
# set determinism (this seems to only affect CNN)
if run_d["cuda_deterministic"]:
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
th.use_deterministic_algorithms(run_d["cuda_deterministic"])
# load dataset, set up model
ds, model = get_ds_model(data_d,model_d,run_d)
# set up dataloader
dl_d = ds.get_dataloaders(run_d)
# set up optimizer
if run_d["optimizer"] == "adam":
optimizer = th.optim.Adam(model.parameters(),lr=run_d["learning_rate"],weight_decay=run_d["weight_decay"])
elif run_d["optimizer"] == "adamw":
optimizer = th.optim.AdamW(model.parameters(),lr=run_d["learning_rate"],weight_decay=run_d["weight_decay"])
else:
raise NotImplementedError
# set up scheduler
if run_d["scheduler"] == "step":
scheduler = th.optim.lr_scheduler.StepLR(
optimizer,
run_d["scheduler_period"],
gamma=run_d["scheduler_ratio"]
)
elif run_d["scheduler"] == "plateau":
scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="min",
patience=run_d["scheduler_period"],
factor=run_d["scheduler_ratio"]
)
else:
raise NotImplementedError
# set up amp stuff
if run_d["amp"]:
amp_context = th.cuda.amp.autocast()
scaler = th.cuda.amp.GradScaler()
else:
amp_context = DummyContext()
scaler = DummyScaler()
# basic stuff
best_val_mean_loss = 1000000. # start with really big num
best_val_mean_sim = 0.
best_epoch = -1
best_state_dict = copy.deepcopy(model.state_dict())
early_stop_count = 0
early_stop_thresh = run_d["early_stop_thresh"]
step = 0
epoch = 0
dev = th.device(run_d["device"])
# restore from previous run (if applicable)
if use_wandb:
mr_fp = os.path.join(wandb.run.dir,"chkpt.pkl")
best_fp = os.path.join(wandb.run.dir,"best_chkpt.pkl")
temp_mr_fp = os.path.join(wandb.run.dir,"temp_chkpt.pkl")
if os.path.isfile(mr_fp):
print(">>> reloading model from most recent checkpoint")
mr_d = th.load(mr_fp,map_location="cpu")
model.load_state_dict(mr_d["mr_model_sd"])
model.to(dev)
best_state_dict = copy.deepcopy(model.state_dict())
optimizer.load_state_dict(mr_d["optimizer_sd"])
scheduler.load_state_dict(mr_d["scheduler_sd"])
best_val_mean_loss = mr_d["best_val_mean_loss"]
best_val_mean_sim = mr_d["best_val_mean_sim"]
best_epoch = mr_d["best_epoch"]
early_stop_count = mr_d["early_stop_count"]
step = mr_d["step"]
epoch = mr_d["epoch"]+1
elif os.path.isfile(best_fp):
print(">>> reloading model from best checkpoint")
best_d = th.load(best_fp,map_location="cpu")
model.load_state_dict(best_d["best_model_sd"])
model.to(dev)
best_state_dict = copy.deepcopy(model.state_dict())
os.remove(best_fp)
else:
print(">>> no checkpoint detected")
mr_d = {
"mr_model_sd": model.state_dict(),
"best_model_sd": best_state_dict,
"optimizer_sd": optimizer.state_dict(),
"scheduler_sd": scheduler.state_dict(),
"best_val_mean_loss": best_val_mean_loss,
"best_val_mean_sim": best_val_mean_sim,
"best_epoch": best_epoch,
"early_stop_count": early_stop_count,
"step": step,
"epoch": epoch-1
}
if run_d["save_state"]:
th.save(mr_d,temp_mr_fp)
os.replace(temp_mr_fp,mr_fp)
wandb.save("chkpt.pkl")
while epoch < run_d["num_epochs"]:
print(f">>> start epoch {epoch}")
# training, single epoch
step, epoch, train_d = run_train_epoch(step,epoch,model,dl_d,run_d,use_wandb,optimizer,amp_context,scaler)
# validation
step, epoch, val_d = run_val(step,epoch,model,dl_d,run_d,use_wandb,amp_context)
if run_d["scheduler"] == "step":
scheduler.step()
elif run_d["scheduler"] == "plateau":
scheduler.step(val_d[f'{run_d["stop_key"]}_mean_loss'])
# tracking
step, epoch, track_d = run_track(step,epoch,model,dl_d,run_d,use_wandb,ds,data_d,val_d)
# early stopping
val_mean_loss = val_d[f'{run_d["stop_key"]}_mean_loss']
val_mean_sim = val_d[f'{run_d["stop_key"]}_mean_sim']
print(f"> val loss delta: {val_mean_loss-best_val_mean_loss}")
if best_val_mean_loss < val_mean_loss:
early_stop_count += 1
print(f"> val loss DID NOT decrease, early stop count at {early_stop_count}/{early_stop_thresh}")
else:
best_val_mean_loss = val_mean_loss
best_val_mean_sim = val_mean_sim
best_epoch = epoch
early_stop_count = 0
# update state dicts
model.to("cpu")
best_state_dict = copy.deepcopy(model.state_dict())
model.to(dev)
print("> val loss DID decrease, early stop count reset")
if early_stop_count == early_stop_thresh:
print("> early stopping NOW")
break
if use_wandb:
# save model
mr_d = {
"mr_model_sd": model.state_dict(),
"best_model_sd": best_state_dict,
"optimizer_sd": optimizer.state_dict(),
"scheduler_sd": scheduler.state_dict(),
"best_val_mean_loss": best_val_mean_loss,
"best_val_mean_sim": best_val_mean_sim,
"best_epoch": best_epoch,
"early_stop_count": early_stop_count,
"step": step,
"epoch": epoch
}
if run_d["save_state"]:
th.save(mr_d,temp_mr_fp)
os.replace(temp_mr_fp,mr_fp)
wandb.save("chkpt.pkl")
# sync wandb
wandb.log({"commit": epoch}, commit=True)
epoch += 1
model.load_state_dict(best_state_dict)
step, epoch, test_d = run_test(step,epoch,model,dl_d,run_d,use_wandb,amp_context)
model.load_state_dict(best_state_dict)
step, epoch, match_2_d = run_match_2(step,epoch,model,dl_d,run_d,use_wandb,amp_context)
if use_wandb:
# final save, only include the best state (to reduce size of uploads)
mr_d = {
"best_model_sd": best_state_dict,
"best_val_mean_loss": best_val_mean_loss,
"best_val_mean_sim": best_val_mean_sim,
"best_epoch": best_epoch
}
if run_d["save_state"]:
# saving to temp first for atomic
th.save(mr_d,temp_mr_fp)
os.replace(temp_mr_fp,mr_fp)
wandb.save("chkpt.pkl")
# metrics
log_dict = {
"best_val_mean_loss": best_val_mean_loss,
"best_val_mean_sim": best_val_mean_sim,
"best_epoch": best_epoch
}
wandb.log(log_dict, commit=False)
# sync wandb
wandb.log({"commit": epoch}, commit=True)
return
def load_config(template_fp,custom_fp,device_id):
assert os.path.isfile(template_fp), template_fp
if custom_fp:
assert os.path.isfile(custom_fp), custom_fp
with open(template_fp,"r") as template_file:
config_d = yaml.load(template_file, Loader=yaml.FullLoader)
# overwrite parts of the config
if custom_fp:
with open(custom_fp,"r") as custom_file:
custom_d = yaml.load(custom_file, Loader=yaml.FullLoader)
config_d["account_name"] = custom_d["account_name"]
config_d["project_name"] = custom_d["project_name"]
if custom_d["run_name"] is None:
config_d["run_name"] = os.path.splitext(os.path.basename(custom_fp))[0]
else:
config_d["run_name"] = custom_d["run_name"]
for k,v in custom_d.items():
if k not in ["account_name","project_name","run_name"]:
for k2,v2 in v.items():
config_d[k][k2] = v2
account_name = config_d["account_name"]
project_name = config_d["project_name"]
run_name = config_d["run_name"]
data_d = config_d["data"]
model_d = config_d["model"]
run_d = config_d["run"]
# overwrite device if necessary
if device_id:
if device_id < 0:
run_d["device"] = "cpu"
else:
run_d["device"] = f"cuda:{device_id}"
return account_name, project_name, run_name, data_d, model_d, run_d
def init_or_resume_wandb_run(
account_name=None,
project_name=None,
run_name=None,
data_d=None,
model_d=None,
run_d=None,
wandb_meta_dp=None,
job_id=None,
job_id_dp=None,
is_sweep=False,
group_name=None):
"""
for compatibility with VV preemption
"""
# imp_keys = ["learning_rate","ff_num_layers","ff_layer_type","dropout"]
do_preempt = not (job_id is None)
do_load = not (run_d["load_id"] is None)
if is_sweep:
assert do_preempt
# set up run_id
if do_preempt:
assert not (job_id_dp is None)
assert not do_load
job_id_fp = os.path.join(job_id_dp,f"{job_id}.yml")
if os.path.isfile(job_id_fp):
print(f">>> resuming {job_id}")
with open(job_id_fp,"r") as file:
job_dict = yaml.safe_load(file)
run_id = job_dict["run_id"]
# run_dp = job_dict["run_dp"]
wandb_config = None
# print("wandb_config",wandb_config)
else:
print(f">>> starting {job_id}")
run_id = None
# run_dp = None
wandb_config = {**data_d,**model_d,**run_d}
# print("wandb_config",[(k,wandb_config[k]) for k in imp_keys])
resume = "allow"
else:
run_id = None
resume = "never"
wandb_config = {**data_d,**model_d,**run_d}
# init run
# if this is a sweep, some things passed into config will be overwritten
run = wandb.init(project=project_name,name=run_name,id=run_id,config=wandb_config,resume=resume,dir=wandb_meta_dp,group=group_name)
# update config
run_config_d = dict(run.config)
# print("run_config_d",[(k,run_config_d[k]) for k in imp_keys])
for d in [data_d,model_d,run_d]:
for k in d.keys():
d[k] = run_config_d[k]
# copy files and update job file
if do_preempt:
if not run_id is None:
# shutil.copy(os.path.join(run_dp,"chkpt.pkl"),os.path.join(wandb.run.dir,"chkpt.pkl"))
wandb.restore("chkpt.pkl",root=run.dir,replace=True)
assert os.path.isfile(os.path.join(run.dir,"chkpt.pkl"))
with open(job_id_fp,"w") as file:
yaml.dump(dict(run_id=run.id), file)
if do_load:
assert not do_preempt
load_run_dp = os.path.join(account_name,project_name,run_d["load_id"])
wandb.restore("chkpt.pkl",run_path=load_run_dp,root=run.dir,replace=False)
os.replace(os.path.join(run.dir,"chkpt.pkl"),os.path.join(run.dir,"best_chkpt.pkl"))
# train model
train(data_d,model_d,run_d,True)
# cleanup
if do_preempt:
# remove the job_id file
os.remove(job_id_fp)
if is_sweep:
# overwrite old chkpt.pkl (to reduce memory usage)
th.save(dict(),os.path.join(run.dir,"chkpt.pkl"))
run.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--template_fp", type=str, default="config/template.yml", help="path to template config file")
parser.add_argument("-w", "--use_wandb", type=booltype, default=False, help="whether to turn wandb on")
parser.add_argument("-d", "--device_id", type=int, required=False, help="device id (-1 for cpu)")
parser.add_argument("-c", "--custom_fp", type=str, required=False, help="path to custom config file")
parser.add_argument("-i", "--job_id", type=int, required=False, help="job_id for preemption")
parser.add_argument("-j", "--job_id_dp", type=str, default="job_id", help="directory where job_id files are stored")
parser.add_argument("-m", "--wandb_meta_dp", type=str, default=os.getcwd(), help="path to directory in which the wandb directory will exist")
parser.add_argument("-n", "--num_seeds", type=int, default=0)
flags = parser.parse_args()
use_wandb = flags.use_wandb
account_name, project_name, run_name, data_d, model_d, run_d = load_config(flags.template_fp,flags.custom_fp,flags.device_id)
if use_wandb:
if flags.num_seeds > 0:
assert not flags.job_id is None
assert flags.num_seeds > 1, flags.num_seeds
# check how many have been completed
job_id_fp = os.path.join(flags.job_id_dp,f"{flags.job_id}.yml")
if os.path.isfile(job_id_fp):
with open(job_id_fp,"r") as file:
num_complete = yaml.safe_load(file)["num_complete"]
else:
num_complete = 0
with open(job_id_fp,"w") as file:
yaml.dump(dict(num_complete=num_complete),file)
# run multiple
if run_d["train_seed"] is None:
meta_seed = 420420420
else:
meta_seed = run_d["train_seed"]
with np_temp_seed(meta_seed):
seed_range = np.arange(0,int(1e6))
model_seeds = np.random.choice(seed_range,replace=False,size=(flags.num_seeds,))
train_seeds = np.random.choice(seed_range,replace=False,size=(flags.num_seeds,))
group_name = f"{run_name}_rand"
for i in range(num_complete,flags.num_seeds):
model_d["model_seed"] = model_seeds[i]
run_d["train_seed"] = train_seeds[i]
run_d["cuda_deterministic"] = False
job_id_i = f"{flags.job_id}_{i}"
run_name_i = f"{run_name}_{i}"
init_or_resume_wandb_run(
account_name=account_name,
project_name=project_name,
run_name=run_name_i,
data_d=data_d,
model_d=model_d,
run_d=run_d,
wandb_meta_dp=flags.wandb_meta_dp,
job_id=job_id_i,
job_id_dp=flags.job_id_dp,
group_name=group_name
)
num_complete += 1
with open(job_id_fp,"w") as file:
yaml.dump(dict(num_complete=num_complete),file)
# cleanup
os.remove(job_id_fp)
else:
# just run one
init_or_resume_wandb_run(
account_name=account_name,
project_name=project_name,
run_name=run_name,
data_d=data_d,
model_d=model_d,
run_d=run_d,
wandb_meta_dp=flags.wandb_meta_dp,
job_id=flags.job_id,
job_id_dp=flags.job_id_dp
)
else:
train(data_d,model_d,run_d,use_wandb)
| [
"torch.any",
"wandb.log",
"torch.utils.data.DataLoader",
"torch.max",
"model.Predictor",
"wandb.init",
"dataset.data_to_device",
"yaml.load",
"torch.min",
"torch.sum",
"torch_scatter.scatter_mean",
"losses.get_loss_func",
"os.remove",
"torch.unique",
"torch.cuda.amp.GradScaler",
"argpa... | [((558, 584), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (567, 584), True, 'import torch as th\n'), ((646, 674), 'losses.get_loss_func', 'get_loss_func', (["run_d['loss']"], {}), "(run_d['loss'])\n", (659, 674), False, 'from losses import get_loss_func, get_sim_func\n'), ((1425, 1451), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (1434, 1451), True, 'import torch as th\n'), ((1513, 1541), 'losses.get_loss_func', 'get_loss_func', (["run_d['loss']"], {}), "(run_d['loss'])\n", (1526, 1541), False, 'from losses import get_loss_func, get_sim_func\n'), ((1554, 1580), 'losses.get_sim_func', 'get_sim_func', (["run_d['sim']"], {}), "(run_d['sim'])\n", (1566, 1580), False, 'from losses import get_loss_func, get_sim_func\n'), ((2285, 2304), 'torch.cat', 'th.cat', (['pred'], {'dim': '(0)'}), '(pred, dim=0)\n', (2291, 2304), True, 'import torch as th\n'), ((2312, 2331), 'torch.cat', 'th.cat', (['targ'], {'dim': '(0)'}), '(targ, dim=0)\n', (2318, 2331), True, 'import torch as th\n'), ((2344, 2363), 'torch.cat', 'th.cat', (['loss'], {'dim': '(0)'}), '(loss, dim=0)\n', (2350, 2363), True, 'import torch as th\n'), ((2375, 2393), 'torch.cat', 'th.cat', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (2381, 2393), True, 'import torch as th\n'), ((2403, 2424), 'torch.cat', 'th.cat', (['mol_id'], {'dim': '(0)'}), '(mol_id, dim=0)\n', (2409, 2424), True, 'import torch as th\n'), ((2449, 2494), 'torch.unique', 'th.unique', (['mol_id'], {'dim': '(0)', 'return_inverse': '(True)'}), '(mol_id, dim=0, return_inverse=True)\n', (2458, 2494), True, 'import torch as th\n'), ((2505, 2581), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_loss', 'un_mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_id.shape[0]'}), '(spec_loss, un_mol_idx, dim=0, dim_size=un_mol_id.shape[0])\n', (2522, 2581), True, 'import torch_scatter as th_s\n'), ((2590, 2665), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_sim', 'un_mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_id.shape[0]'}), '(spec_sim, un_mol_idx, dim=0, dim_size=un_mol_id.shape[0])\n', (2607, 2665), True, 'import torch_scatter as th_s\n'), ((2681, 2706), 'torch.mean', 'th.mean', (['spec_loss'], {'dim': '(0)'}), '(spec_loss, dim=0)\n', (2688, 2706), True, 'import torch as th\n'), ((2723, 2747), 'torch.mean', 'th.mean', (['spec_sim'], {'dim': '(0)'}), '(spec_sim, dim=0)\n', (2730, 2747), True, 'import torch as th\n'), ((2764, 2788), 'torch.mean', 'th.mean', (['mol_loss'], {'dim': '(0)'}), '(mol_loss, dim=0)\n', (2771, 2788), True, 'import torch as th\n'), ((2804, 2827), 'torch.mean', 'th.mean', (['mol_sim'], {'dim': '(0)'}), '(mol_sim, dim=0)\n', (2811, 2827), True, 'import torch as th\n'), ((3897, 3923), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (3906, 3923), True, 'import torch as th\n'), ((3985, 4013), 'losses.get_loss_func', 'get_loss_func', (["run_d['loss']"], {}), "(run_d['loss'])\n", (3998, 4013), False, 'from losses import get_loss_func, get_sim_func\n'), ((4026, 4052), 'losses.get_sim_func', 'get_sim_func', (["run_d['sim']"], {}), "(run_d['sim'])\n", (4038, 4052), False, 'from losses import get_loss_func, get_sim_func\n'), ((6142, 6168), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (6151, 6168), True, 'import torch as th\n'), ((6230, 6258), 'losses.get_loss_func', 'get_loss_func', (["run_d['loss']"], {}), "(run_d['loss'])\n", (6243, 6258), False, 'from losses import get_loss_func, get_sim_func\n'), ((6271, 6297), 'losses.get_sim_func', 'get_sim_func', (["run_d['sim']"], {}), "(run_d['sim'])\n", (6283, 6297), False, 'from losses import get_loss_func, get_sim_func\n'), ((8762, 8788), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (8771, 8788), True, 'import torch as th\n'), ((8849, 8875), 'losses.get_sim_func', 'get_sim_func', (["run_d['sim']"], {}), "(run_d['sim'])\n", (8861, 8875), False, 'from losses import get_loss_func, get_sim_func\n'), ((17768, 17794), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (17777, 17794), True, 'import torch as th\n'), ((17887, 17922), 'torch.manual_seed', 'th.manual_seed', (["run_d['train_seed']"], {}), "(run_d['train_seed'])\n", (17901, 17922), True, 'import torch as th\n'), ((17924, 17964), 'numpy.random.seed', 'np.random.seed', (["(run_d['train_seed'] // 2)"], {}), "(run_d['train_seed'] // 2)\n", (17938, 17964), True, 'import numpy as np\n'), ((18113, 18171), 'torch.multiprocessing.set_sharing_strategy', 'th.multiprocessing.set_sharing_strategy', (['parallel_strategy'], {}), '(parallel_strategy)\n', (18152, 18171), True, 'import torch as th\n'), ((18310, 18370), 'torch.use_deterministic_algorithms', 'th.use_deterministic_algorithms', (["run_d['cuda_deterministic']"], {}), "(run_d['cuda_deterministic'])\n", (18341, 18370), True, 'import torch as th\n'), ((19713, 19739), 'torch.device', 'th.device', (["run_d['device']"], {}), "(run_d['device'])\n", (19722, 19739), True, 'import torch as th\n'), ((24281, 24308), 'os.path.isfile', 'os.path.isfile', (['template_fp'], {}), '(template_fp)\n', (24295, 24308), False, 'import os\n'), ((26740, 26876), 'wandb.init', 'wandb.init', ([], {'project': 'project_name', 'name': 'run_name', 'id': 'run_id', 'config': 'wandb_config', 'resume': 'resume', 'dir': 'wandb_meta_dp', 'group': 'group_name'}), '(project=project_name, name=run_name, id=run_id, config=\n wandb_config, resume=resume, dir=wandb_meta_dp, group=group_name)\n', (26750, 26876), False, 'import wandb\n'), ((27992, 28017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28015, 28017), False, 'import argparse\n'), ((840, 866), 'dataset.data_to_device', 'data_to_device', (['b', 'dev', 'nb'], {}), '(b, dev, nb)\n', (854, 866), False, 'from dataset import BaseDataset, data_to_device\n'), ((1667, 1679), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (1677, 1679), True, 'import torch as th\n'), ((3716, 3749), 'wandb.log', 'wandb.log', (['log_dict'], {'commit': '(False)'}), '(log_dict, commit=False)\n', (3725, 3749), False, 'import wandb\n'), ((4198, 4260), 'torch.topk', 'th.topk', (["score_d['spec_sim']", "run_d['num_track']"], {'largest': '(True)'}), "(score_d['spec_sim'], run_d['num_track'], largest=True)\n", (4205, 4260), True, 'import torch as th\n'), ((4283, 4346), 'torch.topk', 'th.topk', (["score_d['spec_sim']", "run_d['num_track']"], {'largest': '(False)'}), "(score_d['spec_sim'], run_d['num_track'], largest=False)\n", (4290, 4346), True, 'import torch as th\n'), ((17145, 17180), 'misc_utils.th_temp_seed', 'th_temp_seed', (["model_d['model_seed']"], {}), "(model_d['model_seed'])\n", (17157, 17180), False, 'from misc_utils import np_temp_seed, th_temp_seed, booltype, DummyContext, DummyScaler\n'), ((17661, 17695), 'dataset.BaseDataset', 'BaseDataset', (['*dset_types'], {}), '(*dset_types, **data_d)\n', (17672, 17695), False, 'from dataset import BaseDataset, data_to_device\n'), ((17734, 17761), 'model.Predictor', 'Predictor', (['dim_d'], {}), '(dim_d, **model_d)\n', (17743, 17761), False, 'from model import Predictor\n'), ((18921, 19024), 'torch.optim.lr_scheduler.StepLR', 'th.optim.lr_scheduler.StepLR', (['optimizer', "run_d['scheduler_period']"], {'gamma': "run_d['scheduler_ratio']"}), "(optimizer, run_d['scheduler_period'], gamma=\n run_d['scheduler_ratio'])\n", (18949, 19024), True, 'import torch as th\n'), ((19324, 19346), 'torch.cuda.amp.autocast', 'th.cuda.amp.autocast', ([], {}), '()\n', (19344, 19346), True, 'import torch as th\n'), ((19358, 19382), 'torch.cuda.amp.GradScaler', 'th.cuda.amp.GradScaler', ([], {}), '()\n', (19380, 19382), True, 'import torch as th\n'), ((19406, 19420), 'misc_utils.DummyContext', 'DummyContext', ([], {}), '()\n', (19418, 19420), False, 'from misc_utils import np_temp_seed, th_temp_seed, booltype, DummyContext, DummyScaler\n'), ((19432, 19445), 'misc_utils.DummyScaler', 'DummyScaler', ([], {}), '()\n', (19443, 19445), False, 'from misc_utils import np_temp_seed, th_temp_seed, booltype, DummyContext, DummyScaler\n'), ((19811, 19851), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""chkpt.pkl"""'], {}), "(wandb.run.dir, 'chkpt.pkl')\n", (19823, 19851), False, 'import os\n'), ((19863, 19908), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""best_chkpt.pkl"""'], {}), "(wandb.run.dir, 'best_chkpt.pkl')\n", (19875, 19908), False, 'import os\n'), ((19923, 19968), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""temp_chkpt.pkl"""'], {}), "(wandb.run.dir, 'temp_chkpt.pkl')\n", (19935, 19968), False, 'import os\n'), ((19973, 19994), 'os.path.isfile', 'os.path.isfile', (['mr_fp'], {}), '(mr_fp)\n', (19987, 19994), False, 'import os\n'), ((24118, 24151), 'wandb.log', 'wandb.log', (['log_dict'], {'commit': '(False)'}), '(log_dict, commit=False)\n', (24127, 24151), False, 'import wandb\n'), ((24169, 24210), 'wandb.log', 'wandb.log', (["{'commit': epoch}"], {'commit': '(True)'}), "({'commit': epoch}, commit=True)\n", (24178, 24210), False, 'import wandb\n'), ((24346, 24371), 'os.path.isfile', 'os.path.isfile', (['custom_fp'], {}), '(custom_fp)\n', (24360, 24371), False, 'import os\n'), ((24443, 24491), 'yaml.load', 'yaml.load', (['template_file'], {'Loader': 'yaml.FullLoader'}), '(template_file, Loader=yaml.FullLoader)\n', (24452, 24491), False, 'import yaml\n'), ((26040, 26080), 'os.path.join', 'os.path.join', (['job_id_dp', 'f"""{job_id}.yml"""'], {}), "(job_id_dp, f'{job_id}.yml')\n", (26052, 26080), False, 'import os\n'), ((26085, 26110), 'os.path.isfile', 'os.path.isfile', (['job_id_fp'], {}), '(job_id_fp)\n', (26099, 26110), False, 'import os\n'), ((27473, 27531), 'os.path.join', 'os.path.join', (['account_name', 'project_name', "run_d['load_id']"], {}), "(account_name, project_name, run_d['load_id'])\n", (27485, 27531), False, 'import os\n'), ((27532, 27609), 'wandb.restore', 'wandb.restore', (['"""chkpt.pkl"""'], {'run_path': 'load_run_dp', 'root': 'run.dir', 'replace': '(False)'}), "('chkpt.pkl', run_path=load_run_dp, root=run.dir, replace=False)\n", (27545, 27609), False, 'import wandb\n'), ((27799, 27819), 'os.remove', 'os.remove', (['job_id_fp'], {}), '(job_id_fp)\n', (27808, 27819), False, 'import os\n'), ((982, 1004), 'torch.mean', 'th.mean', (['b_loss'], {'dim': '(0)'}), '(b_loss, dim=0)\n', (989, 1004), True, 'import torch as th\n'), ((1020, 1041), 'torch.sum', 'th.sum', (['b_loss'], {'dim': '(0)'}), '(b_loss, dim=0)\n', (1026, 1041), True, 'import torch as th\n'), ((1794, 1820), 'dataset.data_to_device', 'data_to_device', (['b', 'dev', 'nb'], {}), '(b, dev, nb)\n', (1808, 1820), False, 'from dataset import BaseDataset, data_to_device\n'), ((3627, 3653), 'wandb.Image', 'wandb.Image', (['spec_sim_hist'], {}), '(spec_sim_hist)\n', (3638, 3653), False, 'import wandb\n'), ((3688, 3713), 'wandb.Image', 'wandb.Image', (['mol_sim_hist'], {}), '(mol_sim_hist)\n', (3699, 3713), False, 'import wandb\n'), ((12068, 12089), 'torch.cat', 'th.cat', (['r_spec'], {'dim': '(0)'}), '(r_spec, dim=0)\n', (12074, 12089), True, 'import torch as th\n'), ((12105, 12130), 'torch.cat', 'th.cat', (['r_spec_ids'], {'dim': '(0)'}), '(r_spec_ids, dim=0)\n', (12111, 12130), True, 'import torch as th\n'), ((12145, 12169), 'torch.cat', 'th.cat', (['r_mol_ids'], {'dim': '(0)'}), '(r_mol_ids, dim=0)\n', (12151, 12169), True, 'import torch as th\n'), ((12187, 12214), 'torch.cat', 'th.cat', (['r_prec_types'], {'dim': '(0)'}), '(r_prec_types, dim=0)\n', (12193, 12214), True, 'import torch as th\n'), ((12234, 12263), 'torch.cat', 'th.cat', (['r_col_energies'], {'dim': '(0)'}), '(r_col_energies, dim=0)\n', (12240, 12263), True, 'import torch as th\n'), ((12275, 12296), 'torch.cat', 'th.cat', (['q_spec'], {'dim': '(0)'}), '(q_spec, dim=0)\n', (12281, 12296), True, 'import torch as th\n'), ((12312, 12337), 'torch.cat', 'th.cat', (['q_spec_ids'], {'dim': '(0)'}), '(q_spec_ids, dim=0)\n', (12318, 12337), True, 'import torch as th\n'), ((12352, 12376), 'torch.cat', 'th.cat', (['q_mol_ids'], {'dim': '(0)'}), '(q_mol_ids, dim=0)\n', (12358, 12376), True, 'import torch as th\n'), ((12394, 12421), 'torch.cat', 'th.cat', (['q_prec_types'], {'dim': '(0)'}), '(q_prec_types, dim=0)\n', (12400, 12421), True, 'import torch as th\n'), ((12441, 12470), 'torch.cat', 'th.cat', (['q_col_energies'], {'dim': '(0)'}), '(q_col_energies, dim=0)\n', (12447, 12470), True, 'import torch as th\n'), ((12503, 12580), 'torch.utils.data.TensorDataset', 'th.utils.data.TensorDataset', (['q_spec', 'q_spec_ids', 'q_prec_types', 'q_col_energies'], {}), '(q_spec, q_spec_ids, q_prec_types, q_col_energies)\n', (12530, 12580), True, 'import torch as th\n'), ((12588, 12703), 'torch.utils.data.DataLoader', 'th.utils.data.DataLoader', (['q_ds'], {'num_workers': '(0)', 'batch_size': '(200)', 'drop_last': '(False)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(q_ds, num_workers=0, batch_size=200, drop_last=\n False, shuffle=False, pin_memory=True)\n', (12612, 12703), True, 'import torch as th\n'), ((14529, 14551), 'torch.cat', 'th.cat', (['q_ranks'], {'dim': '(0)'}), '(q_ranks, dim=0)\n', (14535, 14551), True, 'import torch as th\n'), ((14571, 14598), 'torch.cat', 'th.cat', (['q_norm_ranks'], {'dim': '(0)'}), '(q_norm_ranks, dim=0)\n', (14577, 14598), True, 'import torch as th\n'), ((14617, 14643), 'torch.cat', 'th.cat', (['q_mean_sims'], {'dim': '(0)'}), '(q_mean_sims, dim=0)\n', (14623, 14643), True, 'import torch as th\n'), ((14898, 14939), 'torch.unique', 'th.unique', (['q_mol_ids'], {'return_inverse': '(True)'}), '(q_mol_ids, return_inverse=True)\n', (14907, 14939), True, 'import torch as th\n'), ((14953, 15027), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_rank', 'mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_ids.shape[0]'}), '(spec_rank, mol_idx, dim=0, dim_size=un_mol_ids.shape[0])\n', (14970, 15027), True, 'import torch_scatter as th_s\n'), ((15044, 15123), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_norm_rank', 'mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_ids.shape[0]'}), '(spec_norm_rank, mol_idx, dim=0, dim_size=un_mol_ids.shape[0])\n', (15061, 15123), True, 'import torch_scatter as th_s\n'), ((15139, 15217), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_mean_sim', 'mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_ids.shape[0]'}), '(spec_mean_sim, mol_idx, dim=0, dim_size=un_mol_ids.shape[0])\n', (15156, 15217), True, 'import torch_scatter as th_s\n'), ((15235, 15253), 'torch.mean', 'th.mean', (['spec_rank'], {}), '(spec_rank)\n', (15242, 15253), True, 'import torch as th\n'), ((15279, 15302), 'torch.mean', 'th.mean', (['spec_norm_rank'], {}), '(spec_norm_rank)\n', (15286, 15302), True, 'import torch as th\n'), ((15509, 15526), 'torch.mean', 'th.mean', (['mol_rank'], {}), '(mol_rank)\n', (15516, 15526), True, 'import torch as th\n'), ((15551, 15573), 'torch.mean', 'th.mean', (['mol_norm_rank'], {}), '(mol_norm_rank)\n', (15558, 15573), True, 'import torch as th\n'), ((19086, 19222), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'th.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'patience': "run_d['scheduler_period']", 'factor': "run_d['scheduler_ratio']"}), "(optimizer, mode='min', patience=\n run_d['scheduler_period'], factor=run_d['scheduler_ratio'])\n", (19125, 19222), True, 'import torch as th\n'), ((20066, 20100), 'torch.load', 'th.load', (['mr_fp'], {'map_location': '"""cpu"""'}), "(mr_fp, map_location='cpu')\n", (20073, 20100), True, 'import torch as th\n'), ((20559, 20582), 'os.path.isfile', 'os.path.isfile', (['best_fp'], {}), '(best_fp)\n', (20573, 20582), False, 'import os\n'), ((23253, 23294), 'wandb.log', 'wandb.log', (["{'commit': epoch}"], {'commit': '(True)'}), "({'commit': epoch}, commit=True)\n", (23262, 23294), False, 'import wandb\n'), ((23885, 23910), 'torch.save', 'th.save', (['mr_d', 'temp_mr_fp'], {}), '(mr_d, temp_mr_fp)\n', (23892, 23910), True, 'import torch as th\n'), ((23913, 23942), 'os.replace', 'os.replace', (['temp_mr_fp', 'mr_fp'], {}), '(temp_mr_fp, mr_fp)\n', (23923, 23942), False, 'import os\n'), ((23945, 23968), 'wandb.save', 'wandb.save', (['"""chkpt.pkl"""'], {}), "('chkpt.pkl')\n", (23955, 23968), False, 'import wandb\n'), ((24599, 24645), 'yaml.load', 'yaml.load', (['custom_file'], {'Loader': 'yaml.FullLoader'}), '(custom_file, Loader=yaml.FullLoader)\n', (24608, 24645), False, 'import yaml\n'), ((27231, 27285), 'wandb.restore', 'wandb.restore', (['"""chkpt.pkl"""'], {'root': 'run.dir', 'replace': '(True)'}), "('chkpt.pkl', root=run.dir, replace=True)\n", (27244, 27285), False, 'import wandb\n'), ((27620, 27654), 'os.path.join', 'os.path.join', (['run.dir', '"""chkpt.pkl"""'], {}), "(run.dir, 'chkpt.pkl')\n", (27632, 27654), False, 'import os\n'), ((27654, 27693), 'os.path.join', 'os.path.join', (['run.dir', '"""best_chkpt.pkl"""'], {}), "(run.dir, 'best_chkpt.pkl')\n", (27666, 27693), False, 'import os\n'), ((27904, 27938), 'os.path.join', 'os.path.join', (['run.dir', '"""chkpt.pkl"""'], {}), "(run.dir, 'chkpt.pkl')\n", (27916, 27938), False, 'import os\n'), ((28724, 28735), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28733, 28735), False, 'import os\n'), ((29233, 29285), 'os.path.join', 'os.path.join', (['flags.job_id_dp', 'f"""{flags.job_id}.yml"""'], {}), "(flags.job_id_dp, f'{flags.job_id}.yml')\n", (29245, 29285), False, 'import os\n'), ((29291, 29316), 'os.path.isfile', 'os.path.isfile', (['job_id_fp'], {}), '(job_id_fp)\n', (29305, 29316), False, 'import os\n'), ((30616, 30636), 'os.remove', 'os.remove', (['job_id_fp'], {}), '(job_id_fp)\n', (30625, 30636), False, 'import os\n'), ((4897, 4923), 'dataset.data_to_device', 'data_to_device', (['d', 'dev', 'nb'], {}), '(d, dev, nb)\n', (4911, 4923), False, 'from dataset import BaseDataset, data_to_device\n'), ((7204, 7223), 'torch.cat', 'th.cat', (['pred'], {'dim': '(0)'}), '(pred, dim=0)\n', (7210, 7223), True, 'import torch as th\n'), ((7234, 7253), 'torch.cat', 'th.cat', (['targ'], {'dim': '(0)'}), '(targ, dim=0)\n', (7240, 7253), True, 'import torch as th\n'), ((7269, 7288), 'torch.cat', 'th.cat', (['loss'], {'dim': '(0)'}), '(loss, dim=0)\n', (7275, 7288), True, 'import torch as th\n'), ((7303, 7321), 'torch.cat', 'th.cat', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (7309, 7321), True, 'import torch as th\n'), ((7334, 7355), 'torch.cat', 'th.cat', (['mol_id'], {'dim': '(0)'}), '(mol_id, dim=0)\n', (7340, 7355), True, 'import torch as th\n'), ((7383, 7428), 'torch.unique', 'th.unique', (['mol_id'], {'dim': '(0)', 'return_inverse': '(True)'}), '(mol_id, dim=0, return_inverse=True)\n', (7392, 7428), True, 'import torch as th\n'), ((7442, 7518), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_loss', 'un_mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_id.shape[0]'}), '(spec_loss, un_mol_idx, dim=0, dim_size=un_mol_id.shape[0])\n', (7459, 7518), True, 'import torch_scatter as th_s\n'), ((7530, 7605), 'torch_scatter.scatter_mean', 'th_s.scatter_mean', (['spec_sim', 'un_mol_idx'], {'dim': '(0)', 'dim_size': 'un_mol_id.shape[0]'}), '(spec_sim, un_mol_idx, dim=0, dim_size=un_mol_id.shape[0])\n', (7547, 7605), True, 'import torch_scatter as th_s\n'), ((7624, 7649), 'torch.mean', 'th.mean', (['spec_loss'], {'dim': '(0)'}), '(spec_loss, dim=0)\n', (7631, 7649), True, 'import torch as th\n'), ((7669, 7693), 'torch.mean', 'th.mean', (['spec_sim'], {'dim': '(0)'}), '(spec_sim, dim=0)\n', (7676, 7693), True, 'import torch as th\n'), ((7713, 7737), 'torch.mean', 'th.mean', (['mol_loss'], {'dim': '(0)'}), '(mol_loss, dim=0)\n', (7720, 7737), True, 'import torch as th\n'), ((7756, 7779), 'torch.mean', 'th.mean', (['mol_sim'], {'dim': '(0)'}), '(mol_sim, dim=0)\n', (7763, 7779), True, 'import torch as th\n'), ((10628, 10652), 'torch.cat', 'th.cat', (['real_spec'], {'dim': '(0)'}), '(real_spec, dim=0)\n', (10634, 10652), True, 'import torch as th\n'), ((10676, 10699), 'torch.cat', 'th.cat', (['spec_ids'], {'dim': '(0)'}), '(spec_ids, dim=0)\n', (10682, 10699), True, 'import torch as th\n'), ((10722, 10744), 'torch.cat', 'th.cat', (['mol_ids'], {'dim': '(0)'}), '(mol_ids, dim=0)\n', (10728, 10744), True, 'import torch as th\n'), ((10770, 10843), 'torch.tensor', 'th.tensor', (['[base_ds.prec_type_c2i[prec_type] for prec_type in prec_types]'], {}), '([base_ds.prec_type_c2i[prec_type] for prec_type in prec_types])\n', (10779, 10843), True, 'import torch as th\n'), ((10872, 10895), 'torch.tensor', 'th.tensor', (['col_energies'], {}), '(col_energies)\n', (10881, 10895), True, 'import torch as th\n'), ((13823, 13856), 'torch.nn.functional.normalize', 'F.normalize', (['b_q_spec'], {'p': '(2)', 'dim': '(1)'}), '(b_q_spec, p=2, dim=1)\n', (13834, 13856), True, 'import torch.nn.functional as F\n'), ((13870, 13899), 'torch.matmul', 'th.matmul', (['b_q_spec', 'r_spec.T'], {}), '(b_q_spec, r_spec.T)\n', (13879, 13899), True, 'import torch as th\n'), ((14653, 14682), 'torch.all', 'th.all', (['(spec_norm_rank <= 1.0)'], {}), '(spec_norm_rank <= 1.0)\n', (14659, 14682), True, 'import torch as th\n'), ((14684, 14713), 'torch.all', 'th.all', (['(spec_norm_rank >= 0.0)'], {}), '(spec_norm_rank >= 0.0)\n', (14690, 14713), True, 'import torch as th\n'), ((14713, 14735), 'torch.min', 'th.min', (['spec_norm_rank'], {}), '(spec_norm_rank)\n', (14719, 14735), True, 'import torch as th\n'), ((14736, 14758), 'torch.max', 'th.max', (['spec_norm_rank'], {}), '(spec_norm_rank)\n', (14742, 14758), True, 'import torch as th\n'), ((14770, 14798), 'torch.all', 'th.all', (['(spec_mean_sim <= 1.0)'], {}), '(spec_mean_sim <= 1.0)\n', (14776, 14798), True, 'import torch as th\n'), ((14800, 14828), 'torch.all', 'th.all', (['(spec_mean_sim >= 0.0)'], {}), '(spec_mean_sim >= 0.0)\n', (14806, 14828), True, 'import torch as th\n'), ((14828, 14849), 'torch.min', 'th.min', (['spec_mean_sim'], {}), '(spec_mean_sim)\n', (14834, 14849), True, 'import torch as th\n'), ((14850, 14871), 'torch.max', 'th.max', (['spec_mean_sim'], {}), '(spec_mean_sim)\n', (14856, 14871), True, 'import torch as th\n'), ((17013, 17046), 'wandb.log', 'wandb.log', (['log_dict'], {'commit': '(False)'}), '(log_dict, commit=False)\n', (17022, 17046), False, 'import wandb\n'), ((20649, 20685), 'torch.load', 'th.load', (['best_fp'], {'map_location': '"""cpu"""'}), "(best_fp, map_location='cpu')\n", (20656, 20685), True, 'import torch as th\n'), ((20810, 20828), 'os.remove', 'os.remove', (['best_fp'], {}), '(best_fp)\n', (20819, 20828), False, 'import os\n'), ((23148, 23173), 'torch.save', 'th.save', (['mr_d', 'temp_mr_fp'], {}), '(mr_d, temp_mr_fp)\n', (23155, 23173), True, 'import torch as th\n'), ((23177, 23206), 'os.replace', 'os.replace', (['temp_mr_fp', 'mr_fp'], {}), '(temp_mr_fp, mr_fp)\n', (23187, 23206), False, 'import os\n'), ((23210, 23233), 'wandb.save', 'wandb.save', (['"""chkpt.pkl"""'], {}), "('chkpt.pkl')\n", (23220, 23233), False, 'import wandb\n'), ((26199, 26219), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (26213, 26219), False, 'import yaml\n'), ((27309, 27343), 'os.path.join', 'os.path.join', (['run.dir', '"""chkpt.pkl"""'], {}), "(run.dir, 'chkpt.pkl')\n", (27321, 27343), False, 'import os\n'), ((29666, 29689), 'misc_utils.np_temp_seed', 'np_temp_seed', (['meta_seed'], {}), '(meta_seed)\n', (29678, 29689), False, 'from misc_utils import np_temp_seed, th_temp_seed, booltype, DummyContext, DummyScaler\n'), ((29748, 29816), 'numpy.random.choice', 'np.random.choice', (['seed_range'], {'replace': '(False)', 'size': '(flags.num_seeds,)'}), '(seed_range, replace=False, size=(flags.num_seeds,))\n', (29764, 29816), True, 'import numpy as np\n'), ((29833, 29901), 'numpy.random.choice', 'np.random.choice', (['seed_range'], {'replace': '(False)', 'size': '(flags.num_seeds,)'}), '(seed_range, replace=False, size=(flags.num_seeds,))\n', (29849, 29901), True, 'import numpy as np\n'), ((5398, 5417), 'numpy.zeros_like', 'np.zeros_like', (['targ'], {}), '(targ)\n', (5411, 5417), True, 'import numpy as np\n'), ((5947, 5980), 'wandb.log', 'wandb.log', (['log_dict'], {'commit': '(False)'}), '(log_dict, commit=False)\n', (5956, 5980), False, 'import wandb\n'), ((6578, 6590), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (6588, 6590), True, 'import torch as th\n'), ((8523, 8556), 'wandb.log', 'wandb.log', (['log_dict'], {'commit': '(False)'}), '(log_dict, commit=False)\n', (8532, 8556), False, 'import wandb\n'), ((9922, 9934), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (9932, 9934), True, 'import torch as th\n'), ((10949, 10973), 'torch.cat', 'th.cat', (['pred_spec'], {'dim': '(0)'}), '(pred_spec, dim=0)\n', (10955, 10973), True, 'import torch as th\n'), ((12819, 12850), 'torch.nn.functional.normalize', 'F.normalize', (['r_spec'], {'p': '(2)', 'dim': '(1)'}), '(r_spec, p=2, dim=1)\n', (12830, 12850), True, 'import torch.nn.functional as F\n'), ((13784, 13807), 'torch.any', 'th.any', (['b_q_mask'], {'dim': '(1)'}), '(b_q_mask, dim=1)\n', (13790, 13807), True, 'import torch as th\n'), ((14254, 14287), 'torch.clamp', 'th.clamp', (['(b_q_num_sims - 1.0)', '(1.0)'], {}), '(b_q_num_sims - 1.0, 1.0)\n', (14262, 14287), True, 'import torch as th\n'), ((16879, 16915), 'wandb.Image', 'wandb.Image', (['spec_cand_sim_mean_hist'], {}), '(spec_cand_sim_mean_hist)\n', (16890, 16915), False, 'import wandb\n'), ((16973, 17008), 'wandb.Image', 'wandb.Image', (['mol_cand_sim_mean_hist'], {}), '(mol_cand_sim_mean_hist)\n', (16984, 17008), False, 'import wandb\n'), ((21290, 21315), 'torch.save', 'th.save', (['mr_d', 'temp_mr_fp'], {}), '(mr_d, temp_mr_fp)\n', (21297, 21315), True, 'import torch as th\n'), ((21319, 21348), 'os.replace', 'os.replace', (['temp_mr_fp', 'mr_fp'], {}), '(temp_mr_fp, mr_fp)\n', (21329, 21348), False, 'import os\n'), ((21352, 21375), 'wandb.save', 'wandb.save', (['"""chkpt.pkl"""'], {}), "('chkpt.pkl')\n", (21362, 21375), False, 'import wandb\n'), ((24833, 24860), 'os.path.basename', 'os.path.basename', (['custom_fp'], {}), '(custom_fp)\n', (24849, 24860), False, 'import os\n'), ((5912, 5934), 'wandb.Image', 'wandb.Image', (['plot_data'], {}), '(plot_data)\n', (5923, 5934), False, 'import wandb\n'), ((6677, 6703), 'dataset.data_to_device', 'data_to_device', (['b', 'dev', 'nb'], {}), '(b, dev, nb)\n', (6691, 6703), False, 'from dataset import BaseDataset, data_to_device\n'), ((8422, 8448), 'wandb.Image', 'wandb.Image', (['spec_sim_hist'], {}), '(spec_sim_hist)\n', (8433, 8448), False, 'import wandb\n'), ((8492, 8517), 'wandb.Image', 'wandb.Image', (['mol_sim_hist'], {}), '(mol_sim_hist)\n', (8503, 8517), False, 'import wandb\n'), ((29376, 29396), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (29390, 29396), False, 'import yaml\n'), ((10457, 10483), 'dataset.data_to_device', 'data_to_device', (['b', 'dev', 'nb'], {}), '(b, dev, nb)\n', (10471, 10483), False, 'from dataset import BaseDataset, data_to_device\n'), ((14010, 14032), 'torch.rand_like', 'th.rand_like', (['b_q_sims'], {}), '(b_q_sims)\n', (14022, 14032), True, 'import torch as th\n'), ((14180, 14207), 'torch.argmax', 'th.argmax', (['b_q_match'], {'dim': '(1)'}), '(b_q_match, dim=1)\n', (14189, 14207), True, 'import torch as th\n')] |
# profile_1d.py
import numpy as np
from scipy import stats
from inspect import getsource
from ..load_sim import LoadSim
class Profile1D:
@LoadSim.Decorators.check_pickle
def get_profile1d(self, num, fields_y, field_x='r', bins=None, statistic='mean',
prefix='profile1d', savdir=None, force_override=False):
"""
Function to calculate 1D profile(s) and pickle using
scipy.stats.binned_statistics
Parameters
----------
num : int
vtk output number
fields_y : (list of) str
Fields to be profiled
fields_x : str
Field for binning
bins : int or sequence of scalars, optional
If bins is an int, it defines the number of equal-width bins in the
given range. If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
Values in x that are smaller than lowest bin edge are assigned to
bin number 0, values beyond the highest bin are assigned to
bins[-1]. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1). The default value is np.linspace(x.min(),
x.max(), 50)
statistic : (list of) string or callable
The statistic to compute (default is ‘mean’). The following
statistics are available:
‘mean’ : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
‘std’ : compute the standard deviation within each bin. This is
implicitly calculated with ddof=0.
‘median’ : compute the median of values for points within each bin.
Empty bins will be represented by NaN.
‘count’ : compute the count of points within each bin. This is
identical to an unweighted histogram. values array is not
referenced.
‘sum’ : compute the sum of values for points within each bin. This
is identical to a weighted histogram.
‘min’ : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
‘max’ : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
function : a user-defined function which takes a 1D array of values,
and outputs a single numerical statistic. This function will be
called on the values in each bin. Empty bins will be represented by
function([]), or NaN if this returns an error.
savdir : str, optional
Directory to pickle results
prefix : str
Prefix for python pickle file
force_override : bool
Flag to force read of starpar_vtk file even when pickle exists
"""
fields_y = np.atleast_1d(fields_y)
statistic = np.atleast_1d(statistic)
ds = self.load_vtk(num)
ddy = ds.get_field(fields_y)
ddx = ds.get_field(field_x)
x1d = ddx[field_x].data.flatten()
if bins is None:
bins = np.linspace(x1d.min(), x1d.max(), 50)
res = dict()
res[field_x] = dict()
for y in fields_y:
res[y] = dict()
get_lambda_name = lambda l: getsource(l).split('=')[0].strip()
# Compute statistics
for y in fields_y:
y1d = ddy[y].data.flatten()
for st in statistic:
# Get name of statistic
if callable(st):
if st.__name__ == "<lambda>":
name = get_lambda_name(st)
else:
name = st.__name__
else:
name = st
st, bine, _ = stats.binned_statistic(x1d, y1d, st, bins=bins)
# Store result
res[y][name] = st
# bin edges
res[field_x]['bine'] = bine
# bin centers
res[field_x]['binc'] = 0.5*(bine[1:] + bine[:-1])
# Time of the snapshot
res['time_code'] = ds.domain['time']
res['time'] = ds.domain['time']*self.u.Myr
return res
| [
"scipy.stats.binned_statistic",
"inspect.getsource",
"numpy.atleast_1d"
] | [((2949, 2972), 'numpy.atleast_1d', 'np.atleast_1d', (['fields_y'], {}), '(fields_y)\n', (2962, 2972), True, 'import numpy as np\n'), ((2993, 3017), 'numpy.atleast_1d', 'np.atleast_1d', (['statistic'], {}), '(statistic)\n', (3006, 3017), True, 'import numpy as np\n'), ((3923, 3970), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['x1d', 'y1d', 'st'], {'bins': 'bins'}), '(x1d, y1d, st, bins=bins)\n', (3945, 3970), False, 'from scipy import stats\n'), ((3412, 3424), 'inspect.getsource', 'getsource', (['l'], {}), '(l)\n', (3421, 3424), False, 'from inspect import getsource\n')] |
import glob
import os.path
import pandas as pd
import argparse
import numpy as np
import scipy.stats
from scipy.stats import beta
#V0.1.2
BETA_SHAPE1_MIN = 0.1
BETA_SHAPE1_MAX = 10
BETA_SHAPE2_MIN_CIS = 1
BETA_SHAPE2_MIN_TRANS = 5
BETA_SHAPE2_MAX_CIS = 1000000
BETA_SHAPE2_MAX_TRANS = 100000000
def estimate_beta_function_paras(top_pvalues_perm):
mean = np.mean(top_pvalues_perm)
variance = np.var(top_pvalues_perm)
alpha_para = mean * (mean * (1 - mean ) / variance - 1)
beta_para = alpha_para * (1 / mean - 1)
return alpha_para,beta_para
def correction_function_fdr(pValue, top_pvalues_perm, nPerm):
fdrPval = len(np.where(top_pvalues_perm<pValue)[0])/nPerm
if(fdrPval>1.0):
fdrPval =1.0
return fdrPval
def add_global_fdr_measures(QTL_Dir, OutputDir, relevantGenes, qtl_results_file="top_qtl_results_all.txt"):
if QTL_Dir[-1:] == "/" :
QTL_Dir = QTL_Dir[:-1]
if OutputDir[-1:] == "/" :
OutputDir = OutputDir[:-1]
if relevantGenes is not None :
genesToParse = pd.read_csv(relevantGenes, header=None)[0].values
toRead = set(QTL_Dir+"/Permutation.pValues."+genesToParse+".txt")
permutationInformtionToProcess = (glob.glob(QTL_Dir+"/Permutation.pValues.*.txt"))
if relevantGenes is not None :
permutationInformtionToProcess = set(permutationInformtionToProcess).intersection(toRead)
pValueBuffer = []
genesTested = 0
for file in permutationInformtionToProcess :
#print(file)
pValueBuffer.extend(np.loadtxt(file))
genesTested +=1
nPerm = len(pValueBuffer)/genesTested
pValueBuffer=np.float_(pValueBuffer)
alpha_para, beta_para = estimate_beta_function_paras(pValueBuffer)
beta_dist_mm = scipy.stats.beta(alpha_para,beta_para)
correction_function_beta = lambda x: beta_dist_mm.cdf(x)
qtlResults = pd.read_table(QTL_Dir+"/"+qtl_results_file,sep='\t')
if relevantGenes is not None :
qtlResults = qtlResults.loc[qtlResults['feature_id'].isin(genesToParse)]
qtlResults['empirical_global_p_value'] = correction_function_beta(qtlResults["p_value"])
fdrBuffer = []
for p in qtlResults["p_value"] :
fdrBuffer.append(correction_function_fdr(p , pValueBuffer, nPerm))
qtlResults['emperical_global_fdr'] = fdrBuffer
qtlResults.to_csv(QTL_Dir+"/top_qtl_results_all_global_FDR_info.txt",sep='\t',index=False)
def parse_args():
parser = argparse.ArgumentParser(description='Run global gene and snp wide correction on QTLs.')
parser.add_argument('--input_dir','-id',required=True)
parser.add_argument('--ouput_dir','-od',required=True)
parser.add_argument('--gene_selection','-gs',required=False, default=None)
parser.add_argument('--qtl_filename','-qf',required=False, default=None)
args = parser.parse_args()
return args
if __name__=='__main__':
args = parse_args()
inputDir = args.input_dir
outputDir = args.ouput_dir
relevantGenes = args.gene_selection
qtlFileName = args.qtl_filename
if qtlFileName is not None :
add_global_fdr_measures(inputDir, outputDir, relevantGenes, qtlFileName)
else :
add_global_fdr_measures(inputDir, outputDir, relevantGenes)
| [
"numpy.mean",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.where",
"numpy.var",
"pandas.read_table",
"numpy.loadtxt",
"numpy.float_",
"glob.glob"
] | [((360, 385), 'numpy.mean', 'np.mean', (['top_pvalues_perm'], {}), '(top_pvalues_perm)\n', (367, 385), True, 'import numpy as np\n'), ((401, 425), 'numpy.var', 'np.var', (['top_pvalues_perm'], {}), '(top_pvalues_perm)\n', (407, 425), True, 'import numpy as np\n'), ((1213, 1262), 'glob.glob', 'glob.glob', (["(QTL_Dir + '/Permutation.pValues.*.txt')"], {}), "(QTL_Dir + '/Permutation.pValues.*.txt')\n", (1222, 1262), False, 'import glob\n'), ((1646, 1669), 'numpy.float_', 'np.float_', (['pValueBuffer'], {}), '(pValueBuffer)\n', (1655, 1669), True, 'import numpy as np\n'), ((1882, 1939), 'pandas.read_table', 'pd.read_table', (["(QTL_Dir + '/' + qtl_results_file)"], {'sep': '"""\t"""'}), "(QTL_Dir + '/' + qtl_results_file, sep='\\t')\n", (1895, 1939), True, 'import pandas as pd\n'), ((2468, 2560), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run global gene and snp wide correction on QTLs."""'}), "(description=\n 'Run global gene and snp wide correction on QTLs.')\n", (2491, 2560), False, 'import argparse\n'), ((1540, 1556), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (1550, 1556), True, 'import numpy as np\n'), ((643, 678), 'numpy.where', 'np.where', (['(top_pvalues_perm < pValue)'], {}), '(top_pvalues_perm < pValue)\n', (651, 678), True, 'import numpy as np\n'), ((1046, 1085), 'pandas.read_csv', 'pd.read_csv', (['relevantGenes'], {'header': 'None'}), '(relevantGenes, header=None)\n', (1057, 1085), True, 'import pandas as pd\n')] |
# We import the necessary packages
from functions_global import *
import numpy as np
import pandas as pd
from scipy.optimize import differential_evolution
# Cost function
costfn = rmse
# We start by loading the raw data and tidying it up.
# It contains 4 data points per vaccine configuration per time point.
dataRaw = pd.read_csv('../../data/VNA.csv')
timesData = dataRaw['days'].tolist() # List of time points
nMeasurements = 4
# We construct the arrays of data for each vaccine configuration
PBS = [] # Non-adjuvanted vaccine
MF59 = [] # Vaccine with MF59
AS03 = [] # Vaccine with AS03
Diluvac = [] #Vaccine with Diluvac
X_data = [] # List of (repeated) time points
for i in range(len(timesData)):
for j in range(1,nMeasurements+1):
X_data.append(timesData[i])
PBS.append(dataRaw.T.iloc[j][i])
# for j in range(nMeasurements+1,2*nMeasurements+1):
# MF59.append(dataRaw.T.iloc[j][i])
# for j in range(2*nMeasurements+1,3*nMeasurements+1):
# AS03.append(dataRaw.T.iloc[j][i])
# for j in range(3*nMeasurements+1,4*nMeasurements+1):
# Diluvac.append(dataRaw.T.iloc[j][i])
PBS = np.column_stack((X_data, PBS))
#MF59 = np.column_stack((X_data, MF59))
#AS03 = np.column_stack((X_data, AS03))
#Diluvac = np.column_stack((X_data, Diluvac))
# Boundary values for the parameters to be estimated in the base case
# gammaNA gammaHA mu dmax
bounds_PBS = [(0.1, 2.5), (0.1, 7.5), (0.2, 1.0), (0.1, 0.3)]
nTotal = len(X_data)
nDays = nTotal/nMeasurements
nRuns = 2500
# indices of the datapoints separated by day
perDay = [np.arange(nMeasurements*i, nMeasurements*(i+1)) for i in range(nDays)]
def oneRun():
# select a random sample of 1 datapoint per day
idx_sample = [np.random.choice(x) for x in perDay]
sample = PBS[idx_sample]
args = (sample[:,0], sample[:,1])
estimation = differential_evolution(costfn, bounds_PBS, args=args)
params = estimation.x
fun_base = estimation.fun
return params
gammaNA_list = []
gammaHA_list = []
mu_list = []
dmax_list = []
for run in range(nRuns):
params = oneRun()
gammaNA, gammaHA, mu, dmax = params
gammaNA_list.append(gammaNA)
gammaHA_list.append(gammaHA)
mu_list.append(mu)
dmax_list.append(dmax)
best_fit_params = {'gammaNA': gammaNA_list, 'gammaHA': gammaHA_list,
'mu': mu_list, 'dmax': dmax_list}
best_fit_params = pd.DataFrame(best_fit_params)
best_fit_params.to_csv('../../params/bootstrap_base.csv', index=False)
| [
"scipy.optimize.differential_evolution",
"pandas.read_csv",
"numpy.random.choice",
"numpy.column_stack",
"pandas.DataFrame",
"numpy.arange"
] | [((321, 354), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/VNA.csv"""'], {}), "('../../data/VNA.csv')\n", (332, 354), True, 'import pandas as pd\n'), ((1135, 1165), 'numpy.column_stack', 'np.column_stack', (['(X_data, PBS)'], {}), '((X_data, PBS))\n', (1150, 1165), True, 'import numpy as np\n'), ((2423, 2452), 'pandas.DataFrame', 'pd.DataFrame', (['best_fit_params'], {}), '(best_fit_params)\n', (2435, 2452), True, 'import pandas as pd\n'), ((1605, 1658), 'numpy.arange', 'np.arange', (['(nMeasurements * i)', '(nMeasurements * (i + 1))'], {}), '(nMeasurements * i, nMeasurements * (i + 1))\n', (1614, 1658), True, 'import numpy as np\n'), ((1883, 1936), 'scipy.optimize.differential_evolution', 'differential_evolution', (['costfn', 'bounds_PBS'], {'args': 'args'}), '(costfn, bounds_PBS, args=args)\n', (1905, 1936), False, 'from scipy.optimize import differential_evolution\n'), ((1761, 1780), 'numpy.random.choice', 'np.random.choice', (['x'], {}), '(x)\n', (1777, 1780), True, 'import numpy as np\n')] |
import numpy as np
def free_energy(weights, max_energy=None, zero_point_energy=1.0e-12,
supermax_value=np.nan):
"""Perform a transformation of probability weights to free energies.
The free energy of probabilities is a transformation of -ln(p)
where p is the properly normalized probability.
In addition to this simple transformation we set a maximum free
energy (max_energy) and a mask value (supermax_value) for values
greater than the max energy. This is to account for missing
observations or extreme outliers. This can be disabled by setting
max_energy to None.
Free energy values are relative to a zero-point energy, which in
the absence of a theoretically determined value can be simply
determined by the minimum value in the dataset. If the
zero_point_energy value is given this simply fixes this minimum
free energy to the zero-point energy so that the rest of the
values have some meaningful value. Typically, we set this to
something to close to but greater than 0. Because a 0 free energy
is equivalent to a probability of 1.0, which would make the
dataset non-normalized.
Parameters
----------
weights : arraylike of float
The normalized weights to transform
max_energy : float or None
Sets a maximum value for free energies. Free energies larger
than this will be replaced by the 'supermax_value'. If None
the masking is not performed.
(Default = None)
supermax_value : numpy scalar
The value that replaces free energies that are larger than the
max energy. Typically is a numpy NaN so as to omit the data.
(Default = np.nan)
zero_point_energy : float or None
Free energies are relative and the minimum value will be set
to this zero point energy. This is to avoid having 0 free
energy values. If None this is not done.
(Default = 1.0e-12)
Returns
-------
free_energies : arraylike
The free energy for each weight.
"""
# assume the weights are normalized
# -log(weights)
free_energies = np.negative(np.log(weights))
if zero_point_energy is not None:
# set the min as the 0 energy
free_energies = free_energies - free_energies.min()
# then increase everything by the zero-point energy so there are
# no zero energy states
free_energies += zero_point_energy
# energies greater than the max are set to the supermax_value
if max_energy is not None:
free_energies[np.where(free_energies > max_energy)] = supermax_value
return free_energies
| [
"numpy.where",
"numpy.log"
] | [((2162, 2177), 'numpy.log', 'np.log', (['weights'], {}), '(weights)\n', (2168, 2177), True, 'import numpy as np\n'), ((2585, 2621), 'numpy.where', 'np.where', (['(free_energies > max_energy)'], {}), '(free_energies > max_energy)\n', (2593, 2621), True, 'import numpy as np\n')] |
__author__ = 'misken'
import numpy as np
import scipy.stats as stats
import scipy.optimize
import math
def poissoninv(prob, mean):
"""
Return the cumulative inverse of the Poisson distribution.
Useful for capacity planning approximations. Uses normal
approximation to the Poisson distribution for mean > 50.
Parameters
----------
mean : float
mean of the Poisson distribution
prob :
percentile desired
Returns
-------
int
minimum value, c, such that P(X>c) <= prob
"""
return stats.poisson.ppf(prob, mean)
def erlangb_direct(load, c):
"""
Return the the probability of loss in M/G/c/c system.
Parameters
----------
load : float
average arrival rate * average service time (units are erlangs)
c : int
number of servers
Returns
-------
float
probability arrival finds system full
"""
p = stats.poisson.pmf(c, load) / stats.poisson.cdf(c, load)
return p
def erlangb(load, c):
"""
Return the the probability of loss in M/G/c/c system using recursive approach.
Much faster than direct computation via
scipy.stats.poisson.pmf(c, load) / scipy.stats.poisson.cdf(c, load)
Parameters
----------
load : float
average arrival rate * average service time (units are erlangs)
c : int
number of servers
Returns
-------
float
probability arrival finds system full
"""
invb = 1.0
for j in range(1, c + 1):
invb = 1.0 + invb * j / load
b = 1.0 / invb
return b
def erlangc(load, c):
"""
Return the the probability of delay in M/M/c/inf system using recursive Erlang B approach.
Parameters
----------
load : float
average arrival rate * average service time (units are erlangs)
c : int
number of servers
Returns
-------
float
probability all servers busy
"""
rho = load / float(c)
# if rho >= 1.0:
# raise ValueError("rho must be less than 1.0")
eb = erlangb(load, c)
ec = 1.0 / (rho + (1 - rho) * (1.0 / eb))
return ec
def erlangcinv(prob, load):
"""
Return the number of servers such that probability of delay in M/M/c/inf system is
less than specified probability
Parameters
----------
prob : float
threshold delay probability
load : float
average arrival rate * average service time (units are erlangs)
Returns
-------
c : int
number of servers
"""
c = np.ceil(load)
ec = erlangc(load, c)
if ec <= prob:
return c
else:
while ec > prob:
c += 1
ec = erlangc(load, c)
return c
def mmc_prob_n(n, arr_rate, svc_rate, c):
"""
Return the the probability of n customers in system in M/M/c/inf queue.
Uses recursive approach from <NAME>. (1994), "Stochastic Models: An Algorithmic Approach",
<NAME> and <NAME> (Section 4.5.1, p287)
Parameters
----------
n : int
number of customers for which probability is desired
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
probability n customers in system (in service plus in queue)
"""
rho = arr_rate / (svc_rate * float(c))
# Step 0: Initialization - p[0] is initialized to one via creation method
pbar = np.ones(max(n + 1, c))
# Step 1: compute pbar
for j in range(1, c):
pbar[j] = arr_rate * pbar[j - 1] / (j * svc_rate)
# Step 2: compute normalizing constant and normalize pbar
gamma = np.sum(pbar) + rho * pbar[c - 1] / (1 - rho)
p = pbar / gamma
# Step 3: compute probs beyond c - 1
for j in range(c, n + 1):
p[j] = p[c - 1] * (rho ** (j - c + 1))
return p[n]
def mmc_mean_qsize(arr_rate, svc_rate, c):
"""
Return the the mean queue size in M/M/c/inf queue.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean number of customers in queue
"""
rho = arr_rate / (svc_rate * float(c))
mean_qsize = (rho ** 2 / (1 - rho) ** 2) * mmc_prob_n(c - 1, arr_rate, svc_rate, c)
return mean_qsize
def mmc_mean_syssize(arr_rate, svc_rate, c):
"""
Return the the mean system size in M/M/c/inf queue.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean number of customers in queue + service
"""
load = arr_rate / svc_rate
rho = load / float(c)
mean_qsize = (rho ** 2 / (1 - rho) ** 2) * mmc_prob_n(c - 1, arr_rate, svc_rate, c)
mean_syssize = mean_qsize + load
return mean_syssize
def mmc_mean_qwait(arr_rate, svc_rate, c):
"""
Return the the mean wait in queue time in M/M/c/inf queue.
Uses mmc_mean_qsize along with Little's Law.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean wait time in queue
"""
return mmc_mean_qsize(arr_rate, svc_rate, c) / arr_rate
def mmc_mean_systime(arr_rate, svc_rate, c):
"""
Return the mean time in system (wait in queue + service time) in M/M/c/inf queue.
Uses mmc_mean_qsize along with Little's Law (via mmc_mean_qwait) and relationship between W and Wq..
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean wait time in queue
"""
return mmc_mean_qwait(arr_rate, svc_rate, c) + 1 / svc_rate
def mmc_prob_wait_normal(arr_rate, svc_rate, c):
"""
Return the approximate probability of waiting (i.e. erlang C) in M/M/c/inf queue using a normal approximation.
Uses normal approximation approach by <NAME> Green, "Insights
on Service System Design from a Normal Approximation to Erlang's
Delay Formula", POM, V7, No3, Fall 1998, pp282-293
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
approximate probability of delay in queue
"""
load = arr_rate / svc_rate
prob_wait = 1.0 - stats.norm.cdf(c - load - 0.5) / np.sqrt(load)
return prob_wait
def mgc_prob_wait_erlangc(arr_rate, svc_rate, c):
"""
Return the approximate probability of waiting in M/G/c/inf queue using Erlang-C as approximation.
It's well known that the Erlang-C formula, P(W>0) in M/M/c is a good approximation for
P(W>0) in M/G/c. See, for example, Tjims (1994) on p296 or Whitt (1993) "Approximations
for the GI/G/m queue", Production and Operations Management, 2, 2.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
approximate probability of delay in queue
"""
load = arr_rate / svc_rate
prob_wait = erlangc(load, c)
return prob_wait
def mm1_qwait_cdf(t, arr_rate, svc_rate):
"""
Return P(Wq < t) in M/M/1/inf queue.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
Returns
-------
float
probability wait time in queue is < t
"""
rho = arr_rate / svc_rate
term1 = rho
term2 = -svc_rate * (1 - rho) * t
prob_wq_lt_t = 1.0 - term1 * np.exp(term2)
return prob_wq_lt_t
def mmc_qwait_cdf(t, arr_rate, svc_rate, c):
"""
Return P(Wq < t) in M/M/c/inf queue.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
probability wait time in queue is < t
"""
rho = arr_rate / (svc_rate * float(c))
term1 = rho / (1 - rho)
term2 = mmc_prob_n(c - 1, arr_rate, svc_rate, c)
term3 = -c * svc_rate * (1 - rho) * t
prob_wq_lt_t = 1.0 - term1 * term2 * np.exp(term3)
return prob_wq_lt_t
def mmc_qwait_cdf_inv(t, prob, arr_rate, svc_rate):
"""
Return the number of servers such that probability of delay < t in M/M/c/inf system is
greater than specified prob
Parameters
----------
t : float
wait time threshold
prob : float
threshold delay probability
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
Returns
-------
c : int
number of servers
"""
c = math.ceil(arr_rate / svc_rate)
pwait_lt_t = mmc_qwait_cdf(t, arr_rate, svc_rate, c)
if pwait_lt_t >= prob:
return c
else:
while pwait_lt_t < prob:
c += 1
pwait_lt_t = mmc_qwait_cdf(t, arr_rate, svc_rate, c)
return c
def mm1_qwait_pctile(p, arr_rate, svc_rate):
"""
Return p'th percentile of P(Wq < t) in M/M/1/inf queue.
Parameters
----------
p : float
percentile of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
Returns
-------
float
t such that P(wait time in queue is < t) = p
"""
# For initial guess, we'll use percentile from similar M/M/1 system
init_guess = 1/svc_rate
waitq_pctile = scipy.optimize.newton(_mm1_waitq_pctile_wrap,init_guess,args=(p, arr_rate, svc_rate))
return waitq_pctile
def _mm1_waitq_pctile_wrap(t, p, arr_rate, svc_rate):
return mm1_qwait_cdf(t, arr_rate, svc_rate) - p
def mmc_qwait_pctile(p, arr_rate, svc_rate, c):
"""
Return p'th percentile of P(Wq < t) in M/M/c/inf queue.
Parameters
----------
p : float
percentile of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
t such that P(wait time in queue is < t) = p
"""
# For initial guess, we'll use percentile from similar M/M/1 system
init_guess = mm1_qwait_pctile(p, arr_rate, c * svc_rate)
waitq_pctile = scipy.optimize.newton(_mmc_waitq_pctile_wrap,init_guess,args=(p, arr_rate, svc_rate, c))
return waitq_pctile
def _mmc_waitq_pctile_wrap(t, p, arr_rate, svc_rate, c):
return mmc_qwait_cdf(t, arr_rate, svc_rate, c) - p
def mdc_mean_qwait_cosmetatos(arr_rate, svc_rate, c):
"""
Return the approximate mean queue wait in M/D/c/inf queue using Cosmetatos approximation.
See Cosmetatos, George P. "Approximate explicit formulae for the average queueing time in the processes (M/D/r)
and (D/M/r)." Infor 13.3 (1975): 328-331.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean number of customers in queue
"""
rho = arr_rate / (svc_rate * float(c))
term1 = 0.5
term2 = (c - 1) * (np.sqrt(4 + 5 * c) - 2) / (16 * c)
term3 = (1 - rho) / rho
term4 = mmc_mean_qwait(arr_rate, svc_rate, c)
mean_qwait = term1 * (1 + term2 * term3) * term4
return mean_qwait
def mdc_mean_qsize_cosmetatos(arr_rate, svc_rate, c):
"""
Return the approximate mean queue size in M/D/c/inf queue using Cosmetatos approximation.
See Cosmetatos, <NAME>. "Approximate explicit formulae for the average queueing time in the processes (M/D/r)
and (D/M/r)." Infor 13.3 (1975): 328-331.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
mean number of customers in queue
"""
mean_qwait = mdc_mean_qwait_cosmetatos(arr_rate, svc_rate, c)
mean_qsize = mean_qwait * arr_rate
return mean_qsize
def mgc_mean_qwait_kimura(arr_rate, svc_rate, c, cv2_svc_time):
"""
Return the approximate mean queue wait in M/G/c/inf queue using Kimura approximation.
See Kimura, Toshikazu. "Approximations for multi-server queues: system interpolations."
Queueing Systems 17.3-4 (1994): 347-382.
It's based on interpolation between an M/D/c and a M/M/c queueing system.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
term1 = 1.0 + cv2_svc_time
term2 = 2.0 * cv2_svc_time / mmc_mean_qwait(arr_rate, svc_rate, c)
term3 = (1.0 - cv2_svc_time) / mdc_mean_qwait_cosmetatos(arr_rate, svc_rate, c)
mean_qwait = term1 / (term2 + term3)
return mean_qwait
def mgc_mean_qsize_kimura(arr_rate, svc_rate, c, cv2_svc_time):
"""
Return the approximate mean queue size in M/G/c/inf queue using Kimura approximation.
See Kimura, Toshikazu. "Approximations for multi-server queues: system interpolations."
Queueing Systems 17.3-4 (1994): 347-382.
It's based on interpolation between an M/D/c and a M/M/c queueing system.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean number of customers in queue
"""
mean_qwait = mgc_mean_qwait_kimura(arr_rate, svc_rate, c, cv2_svc_time)
mean_qsize = mean_qwait * arr_rate
return mean_qsize
def mgc_qwait_cdf_whitt(t, arr_rate, svc_rate, c, cs2):
"""
Return the approximate P(Wq <= t) in M/G/c/inf queue using Whitt's G/C/c approximation.
Comparison of Whitt's approximation with the van Hoorn and Tijms M/G/c specific approximation suggests that using
Whitt's is sufficiently accurate and much easier in that we don't have to numerically integrate
excess service time distributions.
Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2
(Spring 1993): 114-161.
<NAME>, <NAME>, and <NAME>. "Approximations for the waiting time
distribution of the M/G/c queue." Performance Evaluation 2.1 (1982): 22-28.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(Wq <= t)
"""
pwait_lt_t = ggm_qwait_cdf_whitt(t, arr_rate, svc_rate, c, 1.0, cs2)
return pwait_lt_t
def mgc_mean_qwait_bjorklund(arr_rate, svc_rate, c, cv2_svc_time):
"""
Return the approximate mean queue wait in M/G/c/inf queue using Bjorklund and Elldin approximation.
See Kimura, Toshikazu. "Approximations for multi-server queues: system interpolations."
Queueing Systems 17.3-4 (1994): 347-382.
It's based on interpolation between an M/D/c and a M/M/c queueing system.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean number of customers in queue
"""
term1 = cv2_svc_time * mmc_mean_qwait(arr_rate, svc_rate, c)
term2 = (1.0 - cv2_svc_time) * mdc_mean_qwait_cosmetatos(arr_rate, svc_rate, c)
mean_qwait = term1 + term2
return mean_qwait
def mgc_mean_qsize_bjorklund(arr_rate, svc_rate, c, cv2_svc_time):
"""
Return the approximate mean queue size in M/G/c/inf queue using Bjorklund and Elldin approximation.
See Kimura, Toshikazu. "Approximations for multi-server queues: system interpolations."
Queueing Systems 17.3-4 (1994): 347-382.
It's based on interpolation between an M/D/c and a M/M/c queueing system.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean number of customers in queue
"""
mean_qwait = mgc_mean_qwait_bjorklund(arr_rate, svc_rate, c, cv2_svc_time)
mean_qsize = mean_qwait * arr_rate
return mean_qsize
def mgc_qcondwait_pctile_firstorder_2moment(prob, arr_rate, svc_rate, c, cv2_svc_time):
"""
Return an approximate conditional queue wait percentile in M/G/c/inf system.
The approximation is based on a first order approximation using the M/M/c delay percentile.
See <NAME>. (1994), "Stochastic Models: An Algorithmic Approach", <NAME> and Sons, Chichester
Chapter 4, p299-300
The percentile is conditional on Wq>0 (i.e. on event customer waits)
This 1st order approximation is OK for 0<=CVSquared<=2 and prob>1-Prob(Delay)
Note that for Prob(Delay) we use MMC as approximation for same quantity in MGC.
Justification in Tijms (p296)
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
t such that P(wait time in queue is < t | wait time in queue is > 0) = prob
"""
load = arr_rate / svc_rate
# Compute corresponding prob for unconditional wait (see p274 of Tjims)
equivalent_uncond_prob = 1.0 - (1.0 - prob) * erlangc(load, c)
# Compute conditional wait time percentile for M/M/c system to use in approximation
condwaitq_pctile_mmc = mmc_qwait_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# First order approximation for conditional wait time in queue
condwaitq_pctile = 0.5 * (1.0 + cv2_svc_time) * condwaitq_pctile_mmc
return condwaitq_pctile
def mgc_qcondwait_pctile_secondorder_2moment(prob, arr_rate, svc_rate, c, cv2_svc_time):
"""
Return an approximate conditional queue wait percentile in M/G/c/inf system.
The approximation is based on a second order approximation using the M/M/c delay percentile.
See Tijms, H.C. (1994), "Stochastic Models: An Algorithmic Approach", <NAME> and Sons, Chichester
Chapter 4, p299-300
The percentile is conditional on Wq>0 (i.e. on event customer waits)
This approximation is based on interpolation between corresponding M/M/c and M/D/c systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
t such that P(wait time in queue is < t | wait time in queue is > 0) = prob
"""
load = arr_rate / svc_rate
# Compute corresponding prob for unconditional wait (see p274 of Tjims)
equivalent_uncond_prob = 1.0 - (1.0 - prob) * erlangc(load, c)
# Compute conditional wait time percentile for M/M/c system to use in approximation
condwaitq_pctile_mmc = mmc_qwait_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# Compute conditional wait time percentile for M/D/c system to use in approximation
# TODO: implement mdc_qwait_pctile
condqwait_pctile_mdc = mdc_waitq_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# Second order approximation for conditional wait time in queue
condwaitq_pctile = (1.0 - cv2_svc_time) * condqwait_pctile_mdc + cv2_svc_time * condwaitq_pctile_mmc
return condwaitq_pctile
def mg1_mean_qsize(arr_rate, svc_rate, cv2_svc_time):
"""
Return the mean queue size in M/G/1/inf queue using P-K formula.
See any decent queueing book.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean number of customers in queue
"""
rho = arr_rate / svc_rate
mean_qsize = (arr_rate ** 2) * cv2_svc_time/(2 * (1.0 - rho))
return mean_qsize
def mg1_mean_qwait(arr_rate, svc_rate, cs2):
"""
Return the mean queue wait in M/G/1/inf queue using P-K formula along with Little's Law.
See any decent queueing book.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
mean_qsize = mg1_mean_qsize(arr_rate, svc_rate, cs2)
mean_qwait = mean_qsize / arr_rate
return mean_qwait
def gamma_0(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 0.24
term2 = (1 - rho) * (m - 1) * (math.sqrt(4 + 5 * m) - 2 ) / (16 * m * rho)
return min(term1, term2)
def _ggm_mean_qwait_whitt_phi_1(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
return 1.0 + gamma_0(m, rho)
def _ggm_mean_qwait_whitt_phi_2(m, rho):
"""
See p124 immediately after Eq 2.18.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
return 1.0 - 4.0 * gamma_0(m, rho)
def _ggm_mean_qwait_whitt_phi_3(m, rho):
"""
See p124 immediately after Eq 2.20.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = _ggm_mean_qwait_whitt_phi_2(m, rho)
term2 = math.exp(-2.0 * (1 - rho) / (3.0 * rho))
return term1 * term2
def _ggm_mean_qwait_whitt_phi_4(m, rho):
"""
See p125 , Eq 2.21.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 1.0
term2 = 0.5 * (_ggm_mean_qwait_whitt_phi_1(m, rho) + _ggm_mean_qwait_whitt_phi_3(m, rho))
return min(term1, term2)
def _ggm_mean_qwait_whitt_psi_0(c2, m, rho):
"""
See p125 , Eq 2.22.
:param c2: float
common squared CV for both arrival and service process
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
if c2 >= 1:
return 1.0
else:
return _ggm_mean_qwait_whitt_phi_4(m, rho) ** (2 * (1 - c2))
def _ggm_mean_qwait_whitt_phi_0(rho, ca2, cs2, m):
"""
See p125 , Eq 2.25.
:param rho: float
lambda / (mu * m)
:param ca2: float
squared CV for arrival process
:param cs2: float
squared CV for service process
:param m: int
number of servers
:return: float
"""
if ca2 >= cs2:
term1 = _ggm_mean_qwait_whitt_phi_1(m, rho) * (4 * (ca2 - cs2) / (4 * ca2 - 3 * cs2))
term2 = (cs2 / (4 * ca2 - 3 * cs2)) * _ggm_mean_qwait_whitt_psi_0((ca2 + cs2) / 2.0, m, rho)
return term1 + term2
else:
term1 = _ggm_mean_qwait_whitt_phi_3(m, rho) * ((cs2 - ca2) / (2 * ca2 + 2 * cs2))
term2 = ( (cs2 + 3 * ca2) / (2 * ca2 + 2 * cs2) )
term3 = _ggm_mean_qwait_whitt_psi_0((ca2 + cs2) / 2.0, m, rho)
check = term2 * term3 / term1
#print (check)
return term1 + term2 * term3
def ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean queue wait in GI/G/c/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
if rho >= 1.0:
raise ValueError("rho must be less than 1.0")
# Now implement Eq 2.24 on p 125
# Hack - for some reason I can't get this approximation to match Table 2 in the above
# reference for the case of D/M/m. However, if I use Eq 2.20 (specific for the D/M/m case),
# I do match the expected results. So, for now, I'll trap for this case.
if ca2 == 0 and cs2 == 1:
qwait = dmm_mean_qwait_whitt(arr_rate, svc_rate, m)
else:
term1 = _ggm_mean_qwait_whitt_phi_0(rho, ca2, cs2, m)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
qwait = term1 * term2 * term3
return qwait
def ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate P(Wq > 0) in GI/G/c/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# For ca2 = 1 (e.g. Poisson arrivals), Whitt uses fact that Erlang-C works well for M/G/c
if ca2 == 1:
pwait = mgc_prob_wait_erlangc(arr_rate, svc_rate, m)
else:
pi = _ggm_prob_wait_whitt_pi(m, rho, ca2, cs2)
pwait = min(pi, 1)
return pwait
def _ggm_prob_wait_whitt_z(ca2, cs2):
"""
Equation 3.8 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
approximation for intermediate term z (see Eq 3.6)
"""
z = (ca2 + cs2) / (1.0 + cs2)
return z
def _ggm_prob_wait_whitt_gamma(m, rho, z):
"""
Equation 3.5 on p136 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
z : float
intermediate term approximated in Eq 3.8
Returns
-------
float
intermediate term gamma (see Eq 3.5)
"""
term1 = m - m * rho - 0.5
term2 = np.sqrt(m * rho * z)
gamma = term1 / term2
return gamma
def _ggm_prob_wait_whitt_pi_6(m, rho, z):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
z : float
intermediate term approximated in Eq 3.8
Returns
-------
float
intermediate term pi_6 (see Eq 3.11)
"""
pi_6 = 1.0 - stats.norm.cdf((m - m * rho - 0.5) / np.sqrt(m * rho * z))
return pi_6
def _ggm_prob_wait_whitt_pi_5(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
term1 = 2.0 * (1.0 - rho) * np.sqrt(m) / (1.0 + ca2)
term2 = (1.0 - rho) * np.sqrt(m)
term3 = erlangc(rho * m, m) * (1.0 - stats.norm.cdf(term1)) / (1.0 - stats.norm.cdf(term2))
pi_5 = min(1.0,term3)
return pi_5
def _ggm_prob_wait_whitt_pi_4(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
term1 = (1.0 + cs2) * (1.0 - rho) * np.sqrt(m) / (ca2 + cs2)
term2 = (1.0 - rho) * np.sqrt(m)
term3 = erlangc(rho * m, m) * (1.0 - stats.norm.cdf(term1)) / (1.0 - stats.norm.cdf(term2))
pi_4 = min(1.0,term3)
return pi_4
def _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
pi_4 = _ggm_prob_wait_whitt_pi_4(m, rho, ca2, cs2)
pi_5 = _ggm_prob_wait_whitt_pi_5(m, rho, ca2, cs2)
pi_1 = (rho ** 2) * pi_4 + (1.0 - rho **2) * pi_5
return pi_1
def _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_2(see Eq 3.11)
"""
pi_1 = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
z = _ggm_prob_wait_whitt_z(ca2, cs2)
pi_6 = _ggm_prob_wait_whitt_pi_6(m, rho, z)
pi_2 = ca2 * pi_1 + (1.0 - ca2) * pi_6
return pi_2
def _ggm_prob_wait_whitt_pi_3(m, rho, ca2, cs2):
"""
Part of Equation 3.11 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
pi_2 = _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2)
pi_1 = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
term1 = 2.0 * (1.0 - ca2) * (gamma - 0.5)
term2 = 1.0 - term1
pi_3 = term1 * pi_2 + term2 * pi_1
return pi_3
def _ggm_prob_wait_whitt_pi(m, rho, ca2, cs2):
"""
Equation 3.10 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
intermediate term pi_5(see Eq 3.11)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
if m <= 6 or gamma <= 0.5 or ca2 >= 1:
pi = _ggm_prob_wait_whitt_pi_1(m, rho, ca2, cs2)
elif m >= 7 and gamma >= 1.0 and ca2 < 1:
pi = _ggm_prob_wait_whitt_pi_2(m, rho, ca2, cs2)
else:
pi = _ggm_prob_wait_whitt_pi_3(m, rho, ca2, cs2)
return pi
def _ggm_prob_wait_whitt_whichpi(m, rho, ca2, cs2):
"""
Equation 3.10 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
Primarily used for debugging and validation of the approximation implementation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
m : int
number of servers
rho : float
traffic intensity; arr_rate / (svc_rate * m)
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
int
the pi case used in the approximation (1, 2, or 3)
"""
z = _ggm_prob_wait_whitt_z(ca2, cs2)
gamma = _ggm_prob_wait_whitt_gamma(m, rho, z)
if m <= 6 or gamma <= 0.5 or ca2 >= 1:
whichpi = 1
elif m >= 7 and gamma >= 1.0 and ca2 < 1:
whichpi = 2
else:
whichpi = 3
return whichpi
def _ggm_qcondwait_whitt_ds3(cs2):
"""
Return the approximate E(V^3)/(EV)^2 where V is a service time; based on either a hyperexponential
or Erlang distribution. Used in approximation of conditional wait time CDF (conditional on W>0).
Whitt refers to conditional wait as D in his paper:
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
This is Equation 4.3 on p146. Note that there is a typo in the original paper in which the first term
for Case 1 is shown as cubed, whereas it should be squared. This can be confirmed by seeing Eq 51 in
Whitt's paper on the QNA (Bell Systems Technical Journal, Nov 1983).
Parameters
----------
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
if cs2 >= 1:
ds3 = 3.0 * cs2 * (1.0 + cs2)
else:
ds3 = (2 * cs2 + 1.0) * (cs2 + 1.0)
return ds3
def ggm_qcondwait_whitt_cd2(rho, cs2):
"""
Return the approximate squared coefficient of conditional wait time (aka delay) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
This is Equation 4.2 on p145.
Parameters
----------
rho : float
traffic intensity; arr_rate / (svc_rate * m)
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
term1 = 2 * rho - 1.0
term2 = 4 * (1.0 - rho) * _ggm_qcondwait_whitt_ds3(cs2)
term3 = 3.0 * (cs2 + 1.0) ** 2
cd2 = term1+ term2 / term3
return cd2
def ggm_qwait_whitt_cw2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate squared coefficient of wait time in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
scv of wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
pwait = ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2)
cd2 = ggm_qcondwait_whitt_cd2(rho, cs2)
cw2 = (cd2 + 1 - pwait) / pwait
return cw2
def ggm_qcondwait_whitt_ed(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean conditional wait time (aka delay) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
pwait = ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2)
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2) / pwait
return meanwait
def ggm_qcondwait_whitt_vard(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate variance of conditional wait time (aka delay) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
pwait = ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2)
cd2 = ggm_qcondwait_whitt_cd2(rho, cs2)
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
vard = (meanwait ** 2) * cd2 / (pwait ** 2)
return vard
def ggm_qcondwait_whitt_ed2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate 2nd moment of conditional wait time (aka delay) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
pwait = ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2)
vard = ggm_qcondwait_whitt_vard(arr_rate, svc_rate, m, ca2, cs2)
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
# Compute conditional wait
meandelay = meanwait / pwait
ed2 = vard + meandelay ** 2
return ed2
def ggm_qwait_whitt_varw(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate variance of wait time (aka delay) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
cw2 = ggm_qwait_whitt_cw2(arr_rate, svc_rate, m, ca2, cs2)
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
varw = (meanwait ** 2) * cw2
return varw
def ggm_qwait_whitt_ew2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate 2nd moment of wait time in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
varw = ggm_qwait_whitt_varw(arr_rate, svc_rate, m, ca2, cs2)
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
ew2 = varw + meanwait ** 2
return ew2
def ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate soujourn time (wait + service) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
meanwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
sojourn = meanwait + 1.0 / svc_rate
return sojourn
def ggm_sojourn_whitt_var(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate variance of soujourn time (wait + service) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
varwait = ggm_qwait_whitt_varw(arr_rate, svc_rate, m, ca2, cs2)
sojourn = varwait + cs2 * (1.0 / svc_rate) ** 2
return sojourn
def ggm_sojourn_whitt_et2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate 2nd moment of soujourn time (wait + service) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
varsojourn = ggm_sojourn_whitt_var(arr_rate, svc_rate, m, ca2, cs2)
meansojourn = ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2)
et2 = varsojourn + meansojourn ** 2
return et2
def ggm_sojourn_whitt_cv2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate scv of soujourn time (wait + service) in G/G/m queue
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
varsojourn = ggm_sojourn_whitt_var(arr_rate, svc_rate, m, ca2, cs2)
meansojourn = ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2)
cv2 = varsojourn / meansojourn ** 2
return cv2
def ggm_mean_qsize_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean queue size in GI/G/c/inf queue using Whitt's 1993 approximation and Little's Law.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
# Use Eq 2.24 on p 125 to compute mean wait time in queue
qwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
# Now use Little's Law
return qwait * arr_rate
def ggm_mean_syssize_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean system size in GI/G/c/inf queue using Whitt's 1993 approximation and Little's Law.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
# Use Eq 2.24 on p 125 to compute mean wait time in queue
mean_sojourn = ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2)
# Now use Little's Law
return mean_sojourn * arr_rate
def dmm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2=0.0, cs2=1.0):
"""
Return the approximate mean queue size in D/M/m/inf queue using Whitt's 1993 approximation.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. Specifically, this approximation
is Eq 2.20 on p124.
This, along with mdm_mean_qwait_whitt are refinements of the Cosmetatos approximations.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution (0 for D)
cs2 : float
squared coefficient of variation for service time distribution (1 for M)
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# Now implement Eq 2.20 on p 124
term1 = _ggm_mean_qwait_whitt_phi_3(m, rho)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
return term1 * term2 * term3
def mdm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2=0.0, cs2=1.0):
"""
Return the approximate mean queue size in M/D/m/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. Specifically, this approximation
is Eq 2.16 on p124.
This, along with dmm_mean_qwait_whitt are refinements of the Cosmetatos approximations.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution (0 for D)
cs2 : float
squared coefficient of variation for service time distribution (1 for M)
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# Now implement Eq 2.16 on p 124
term1 = _ggm_mean_qwait_whitt_phi_1(m, rho)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
return term1 * term2 * term3
def fit_balanced_hyperexpon2(mean, cs2):
"""
Return the branching probability and rates for a balanced H2 distribution based
on a specified mean and scv. Intended for scv's > 1.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
cs2 : float
squared coefficient of variation for desired distribution
Returns
-------
tuple (float p, float rate1, float rate2)
branching probability and exponential rates
"""
p1 = 0.5 * (1 + np.sqrt((cs2-1) / (cs2+1)))
p2 = 1 - p1
mu1 = 2 * p1 / mean
mu2 = 2 * p2 / mean
return (p1, mu1, mu2)
def hyperexpon_cdf(x, probs, rates):
"""
Return the P(X < x) where X is hypergeometric with probabilities and exponential rates
in lists probs and rates.
Parameters
----------
probs : list of floats
branching probabilities for hyperexponential
probs : list of floats
exponential rates
Returns
-------
float
P(X<x) where X~hyperexponetial(probs, rates)
"""
sumproduct = sum([p * np.exp(-r * x) for (p, r) in zip(probs, rates)])
prob_lt_x = 1.0 - sumproduct
return prob_lt_x
def ggm_qcondwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(D <= t) where D = (W|W>0) in G/G/m queue using Whitt's two moment
approximation.
See Section 4 of <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on an approach he originally used for G/G/1 queues in QNA. There are different
cases based on the value of an approximation for the scv of D.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(D <= t | )
"""
rho = arr_rate / (svc_rate * float(c))
ed = ggm_mean_qwait_whitt(arr_rate, svc_rate, c, ca2, cs2) / ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
cd2 = ggm_qcondwait_whitt_cd2(rho,cs2)
if cd2 > 1.01:
# Hyperexponential approx
p1, gamma1, gamma2 = fit_balanced_hyperexpon2(ed, cd2)
p2 = 1.0 - p1
prob_wait_ltx = hyperexpon_cdf(t, [p1,p2], [gamma1, gamma2])
elif cd2 >= 0.99 and cd2 <= 1.01:
# Exponential approx
prob_wait_ltx = stats.expon.cdf(t,scale=ed)
elif cd2 >= 0.501 and cd2 < 0.99:
# Convolution of two exponentials approx
vard = ggm_qcondwait_whitt_vard(arr_rate, svc_rate, c, ca2, cs2)
gamma2 = 2.0 / (ed + np.sqrt(2 * vard - ed ** 2))
gamma1 = 1.0 / (ed - 1.0 / gamma2)
prob_wait_gtx = (gamma1 * np.exp(-gamma2 * t) - gamma2 * np.exp(-gamma1 * t)) / (gamma1 - gamma2)
prob_wait_ltx = 1.0 - prob_wait_gtx
else:
# Erlang approx
gamma1 = 2.0 / ed
prob_wait_gtx = np.exp(-gamma1 * t) * (1.0 + gamma1 * t)
prob_wait_ltx = 1.0 - prob_wait_gtx
return prob_wait_ltx
def ggm_qwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(W <= t) in G/G/m queue using Whitt's two moment
approximation for conditional wait and the P(W>0).
See Section 4 of Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
See ggm_qcondwait_cdf_whitt for more details.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(W <= t | )
"""
qcondwait = ggm_qcondwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2)
pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
qwait = qcondwait * pdelay + (1.0 - pdelay)
return qwait
def ggm_qwait_pctile_whitt(p, arr_rate, svc_rate, c, ca2, cs2):
"""
Return approx p'th percentile of P(Wq < t) in G/G/c/inf queue using Whitt's two moment
approximation for the wait time CDF
Parameters
----------
p : float
percentile of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
Returns
-------
float
t such that P(wait time in queue is < t) = p
"""
# For initial guess, we'll use percentile from similar M/M/1 system
init_guess = mm1_qwait_pctile(p, arr_rate, c * svc_rate)
waitq_pctile = scipy.optimize.newton(_ggm_waitq_pctile_whitt_wrap,init_guess,args=(p, arr_rate, svc_rate, c, ca2, cs2))
return waitq_pctile
def _ggm_waitq_pctile_whitt_wrap(t, p, arr_rate, svc_rate, c, ca2, cs2):
return ggm_qwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2) - p
def _ggm_qsize_prob_gt_0_whitt_5_2(arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(Q>0) in G/G/m queue using Whitt's simple
approximation involving rho and P(W>0).
This approximation is exact for M/M/m and has strong theoretical
support for GI/M/m. It's described by Whitt as "crude" but is
"a useful quick approximation".
See Section 5 of Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. In
particular, this is Equation 5.2.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(Q > 0)
"""
rho = arr_rate / (svc_rate * float(c))
pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
prob_gt_0 = rho * pdelay
return prob_gt_0
def _ggm_qsize_prob_gt_0_whitt_5_1(arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(Q>0) in G/G/m queue using Whitt's approximation
which is based on an exact expression for P(Q>0) given the CDF's
of an interarrival time and a waiting time .
This approximation is exact for M/M/m and has strong theoretical
support for GI/M/m - see Equation 5.1. It is preferred to the cruder
approximation given in Equation 5.2 (see ggm_qsize_prob_gt_0_whitt_5_2).
See Section 5 of Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. In
particular, this is Equation 5.1.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(Q > 0
"""
rho = arr_rate / (svc_rate * float(c))
pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
# TODO - implement Equation 5.1 of Whitt (1995)
return 0
def ggm_qsize_whitt_cq2(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate squared coefficient of queue size in G/G/m queue.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Equation 5.6.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
scv of number in queue
"""
eq = ggm_mean_qsize_whitt(arr_rate, svc_rate, m, ca2, cs2)
cw2 = ggm_qwait_whitt_cw2(arr_rate, svc_rate, m, ca2, cs2)
cq2 = (1/eq) + cw2
return cq2
def hyper_erlang_moment(rates, stages, probs, moment):
terms = [probs[i - 1] * math.factorial(stages[i - 1] + moment - 1) * (1 / math.factorial(stages[i - 1] - 1)) * (
stages[i - 1] * rates[i - 1]) ** (-moment)
for i in range(1, len(rates) + 1)]
return sum(terms)
| [
"numpy.ceil",
"scipy.stats.poisson.pmf",
"numpy.sqrt",
"math.ceil",
"math.factorial",
"scipy.stats.norm.cdf",
"math.sqrt",
"scipy.stats.poisson.ppf",
"scipy.stats.poisson.cdf",
"numpy.sum",
"numpy.exp",
"math.exp",
"scipy.stats.expon.cdf"
] | [((561, 590), 'scipy.stats.poisson.ppf', 'stats.poisson.ppf', (['prob', 'mean'], {}), '(prob, mean)\n', (578, 590), True, 'import scipy.stats as stats\n'), ((2592, 2605), 'numpy.ceil', 'np.ceil', (['load'], {}), '(load)\n', (2599, 2605), True, 'import numpy as np\n'), ((9931, 9961), 'math.ceil', 'math.ceil', (['(arr_rate / svc_rate)'], {}), '(arr_rate / svc_rate)\n', (9940, 9961), False, 'import math\n'), ((24643, 24683), 'math.exp', 'math.exp', (['(-2.0 * (1 - rho) / (3.0 * rho))'], {}), '(-2.0 * (1 - rho) / (3.0 * rho))\n', (24651, 24683), False, 'import math\n'), ((30426, 30446), 'numpy.sqrt', 'np.sqrt', (['(m * rho * z)'], {}), '(m * rho * z)\n', (30433, 30446), True, 'import numpy as np\n'), ((946, 972), 'scipy.stats.poisson.pmf', 'stats.poisson.pmf', (['c', 'load'], {}), '(c, load)\n', (963, 972), True, 'import scipy.stats as stats\n'), ((975, 1001), 'scipy.stats.poisson.cdf', 'stats.poisson.cdf', (['c', 'load'], {}), '(c, load)\n', (992, 1001), True, 'import scipy.stats as stats\n'), ((3808, 3820), 'numpy.sum', 'np.sum', (['pbar'], {}), '(pbar)\n', (3814, 3820), True, 'import numpy as np\n'), ((31952, 31962), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (31959, 31962), True, 'import numpy as np\n'), ((32899, 32909), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (32906, 32909), True, 'import numpy as np\n'), ((7163, 7193), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(c - load - 0.5)'], {}), '(c - load - 0.5)\n', (7177, 7193), True, 'import scipy.stats as stats\n'), ((7196, 7209), 'numpy.sqrt', 'np.sqrt', (['load'], {}), '(load)\n', (7203, 7209), True, 'import numpy as np\n'), ((8615, 8628), 'numpy.exp', 'np.exp', (['term2'], {}), '(term2)\n', (8621, 8628), True, 'import numpy as np\n'), ((9332, 9345), 'numpy.exp', 'np.exp', (['term3'], {}), '(term3)\n', (9338, 9345), True, 'import numpy as np\n'), ((31901, 31911), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (31908, 31911), True, 'import numpy as np\n'), ((32037, 32058), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['term2'], {}), '(term2)\n', (32051, 32058), True, 'import scipy.stats as stats\n'), ((32848, 32858), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (32855, 32858), True, 'import numpy as np\n'), ((32984, 33005), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['term2'], {}), '(term2)\n', (32998, 33005), True, 'import scipy.stats as stats\n'), ((54845, 54875), 'numpy.sqrt', 'np.sqrt', (['((cs2 - 1) / (cs2 + 1))'], {}), '((cs2 - 1) / (cs2 + 1))\n', (54852, 54875), True, 'import numpy as np\n'), ((56966, 56994), 'scipy.stats.expon.cdf', 'stats.expon.cdf', (['t'], {'scale': 'ed'}), '(t, scale=ed)\n', (56981, 56994), True, 'import scipy.stats as stats\n'), ((12610, 12628), 'numpy.sqrt', 'np.sqrt', (['(4 + 5 * c)'], {}), '(4 + 5 * c)\n', (12617, 12628), True, 'import numpy as np\n'), ((23798, 23818), 'math.sqrt', 'math.sqrt', (['(4 + 5 * m)'], {}), '(4 + 5 * m)\n', (23807, 23818), False, 'import math\n'), ((31126, 31146), 'numpy.sqrt', 'np.sqrt', (['(m * rho * z)'], {}), '(m * rho * z)\n', (31133, 31146), True, 'import numpy as np\n'), ((32005, 32026), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['term1'], {}), '(term1)\n', (32019, 32026), True, 'import scipy.stats as stats\n'), ((32952, 32973), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['term1'], {}), '(term1)\n', (32966, 32973), True, 'import scipy.stats as stats\n'), ((55421, 55435), 'numpy.exp', 'np.exp', (['(-r * x)'], {}), '(-r * x)\n', (55427, 55435), True, 'import numpy as np\n'), ((57492, 57511), 'numpy.exp', 'np.exp', (['(-gamma1 * t)'], {}), '(-gamma1 * t)\n', (57498, 57511), True, 'import numpy as np\n'), ((63060, 63102), 'math.factorial', 'math.factorial', (['(stages[i - 1] + moment - 1)'], {}), '(stages[i - 1] + moment - 1)\n', (63074, 63102), False, 'import math\n'), ((63110, 63143), 'math.factorial', 'math.factorial', (['(stages[i - 1] - 1)'], {}), '(stages[i - 1] - 1)\n', (63124, 63143), False, 'import math\n'), ((57184, 57211), 'numpy.sqrt', 'np.sqrt', (['(2 * vard - ed ** 2)'], {}), '(2 * vard - ed ** 2)\n', (57191, 57211), True, 'import numpy as np\n'), ((57291, 57310), 'numpy.exp', 'np.exp', (['(-gamma2 * t)'], {}), '(-gamma2 * t)\n', (57297, 57310), True, 'import numpy as np\n'), ((57322, 57341), 'numpy.exp', 'np.exp', (['(-gamma1 * t)'], {}), '(-gamma1 * t)\n', (57328, 57341), True, 'import numpy as np\n')] |
import nrefocus
import numpy as np
import qpimage
from skimage.restoration import unwrap
from ._bhfield import simulate_sphere
def field2ap_corr(field):
"""Determine amplitude and offset-corrected phase from a field
The phase jumps sometimes appear after phase unwrapping.
Parameters
----------
field: 2d complex np.ndarray
Complex input field
Returns
-------
amp: 2d real np.ndarray
Amplitude data
pha: 2d real np.ndarray
Phase data, corrected for 2PI offsets
"""
phase = unwrap.unwrap_phase(np.angle(field), seed=47)
samples = []
samples.append(phase[:, :3].flatten())
samples.append(phase[:, -3:].flatten())
samples.append(phase[:3, 3:-3].flatten())
samples.append(phase[-3:, 3:-3].flatten())
pha_offset = np.median(np.hstack(samples))
num_2pi = np.round(pha_offset / (2 * np.pi))
phase -= num_2pi * 2 * np.pi
ampli = np.abs(field)
return ampli, phase
def mie(radius=5e-6, sphere_index=1.339, medium_index=1.333,
wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80),
center=(39.5, 39.5), focus=0, arp=True):
"""Mie-simulated field behind a dielectric sphere
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
focus: float
.. versionadded:: 0.5.0
Axial focus position [m] measured from the center of the
sphere in the direction of light propagation.
arp: bool
Use arbitrary precision (ARPREC) in BHFIELD computations
Returns
-------
qpi: qpimage.QPImage
Quantitative phase data set
"""
# simulation parameters
radius_um = radius * 1e6 # radius of sphere in um
propd_um = radius_um # simulate propagation through full sphere
propd_lamd = radius / wavelength # radius in wavelengths
wave_nm = wavelength * 1e9
# Qpsphere models define the position of the sphere with an index in
# the array (because it is easier to work with). The pixel
# indices run from (0, 0) to grid_size (without endpoint). BHFIELD
# requires the extent to be given in µm. The distance in µm between
# first and last pixel (measured from pixel center) is
# (grid_size - 1) * pixel_size,
size_um = (np.array(grid_size) - 1) * pixel_size * 1e6
# The same holds for the offset. If we use size_um here,
# we already take into account the half-pixel offset.
offset_um = np.array(center) * pixel_size * 1e6 - size_um / 2
kwargs = {"radius_sphere_um": radius_um,
"refractive_index_medium": medium_index,
"refractive_index_sphere": sphere_index,
"measurement_position_um": propd_um,
"wavelength_nm": wave_nm,
"size_simulation_um": size_um,
"shape_grid": grid_size,
"offset_x_um": offset_um[0],
"offset_y_um": offset_um[1]}
background = np.exp(1j * 2 * np.pi * propd_lamd * medium_index)
field = simulate_sphere(arp=arp, **kwargs) / background
# refocus
refoc = nrefocus.refocus(field,
d=-((radius+focus) / pixel_size),
nm=medium_index,
res=wavelength / pixel_size)
# Phase (2PI offset corrected) and amplitude
amp, pha = field2ap_corr(refoc)
meta_data = {"pixel size": pixel_size,
"wavelength": wavelength,
"medium index": medium_index,
"sim center": center,
"sim radius": radius,
"sim index": sphere_index,
"sim model": "mie",
}
qpi = qpimage.QPImage(data=(pha, amp),
which_data="phase,amplitude",
meta_data=meta_data)
return qpi
| [
"numpy.abs",
"numpy.hstack",
"numpy.angle",
"numpy.exp",
"numpy.array",
"qpimage.QPImage",
"nrefocus.refocus",
"numpy.round"
] | [((851, 885), 'numpy.round', 'np.round', (['(pha_offset / (2 * np.pi))'], {}), '(pha_offset / (2 * np.pi))\n', (859, 885), True, 'import numpy as np\n'), ((931, 944), 'numpy.abs', 'np.abs', (['field'], {}), '(field)\n', (937, 944), True, 'import numpy as np\n'), ((3331, 3383), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * propd_lamd * medium_index)'], {}), '(1.0j * 2 * np.pi * propd_lamd * medium_index)\n', (3337, 3383), True, 'import numpy as np\n'), ((3470, 3579), 'nrefocus.refocus', 'nrefocus.refocus', (['field'], {'d': '(-((radius + focus) / pixel_size))', 'nm': 'medium_index', 'res': '(wavelength / pixel_size)'}), '(field, d=-((radius + focus) / pixel_size), nm=medium_index,\n res=wavelength / pixel_size)\n', (3486, 3579), False, 'import nrefocus\n'), ((4070, 4158), 'qpimage.QPImage', 'qpimage.QPImage', ([], {'data': '(pha, amp)', 'which_data': '"""phase,amplitude"""', 'meta_data': 'meta_data'}), "(data=(pha, amp), which_data='phase,amplitude', meta_data=\n meta_data)\n", (4085, 4158), False, 'import qpimage\n'), ((567, 582), 'numpy.angle', 'np.angle', (['field'], {}), '(field)\n', (575, 582), True, 'import numpy as np\n'), ((817, 835), 'numpy.hstack', 'np.hstack', (['samples'], {}), '(samples)\n', (826, 835), True, 'import numpy as np\n'), ((2667, 2686), 'numpy.array', 'np.array', (['grid_size'], {}), '(grid_size)\n', (2675, 2686), True, 'import numpy as np\n'), ((2846, 2862), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (2854, 2862), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('Cat.jpg')
b,g,r = cv2.split(img)
PixelsPerWave = 10
WaveSize = 2.0 * np.pi/PixelsPerWave
m,n = g.shape
cm = np.int(m/2)
cn = np.int(n/2)
WaveMat = np.zeros([m,n])
for i in range(m):
for j in range(n):
WaveMat[i][j] = np.sin(np.sqrt((cm - i)**2 + (j - cn)**2)*WaveSize)
ImMat = ((WaveMat + 1.0)/2.0)
WaveB = np.float64(b)*ImMat
WaveG = np.float64(g)*ImMat
WaveR = np.float64(r)*ImMat
WaveImage = cv2.merge((np.uint8(WaveR),np.uint8(WaveG),np.uint8(WaveB)))
BFreq = np.fft.fftshift(np.fft.fft(b)) * ImMat
GFreq = np.fft.fftshift(np.fft.fft(g)) * ImMat
RFreq = np.fft.fftshift(np.fft.fft(r)) * ImMat
WeveFreqB = np.absolute(np.fft.ifft(np.fft.ifftshift(BFreq)))
WeveFreqG = np.absolute(np.fft.ifft(np.fft.ifftshift(GFreq)))
WeveFreqR = np.absolute(np.fft.ifft(np.fft.ifftshift(RFreq)))
WaveFreqImage = cv2.merge((np.uint8(WeveFreqR),np.uint8(WeveFreqG),np.uint8(WeveFreqB)))
plt.figure(1)
plt.subplot(222)
plt.title('2D Sin')
plt.imshow(np.uint8(ImMat*255), cmap='Greys_r')
plt.subplot(221)
plt.title('Image')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.subplot(223)
plt.title('Product Image*Sin')
plt.imshow(WaveImage)
plt.subplot(224)
plt.title('Product ImFrequency*Sin')
plt.imshow(WaveFreqImage)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.uint8",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.float64",
"numpy.fft.fft",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"cv2.split",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"numpy.int",
"numpy.fft.ifftshift",
"cv2.im... | [((78, 99), 'cv2.imread', 'cv2.imread', (['"""Cat.jpg"""'], {}), "('Cat.jpg')\n", (88, 99), False, 'import cv2\n'), ((109, 123), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (118, 123), False, 'import cv2\n'), ((207, 220), 'numpy.int', 'np.int', (['(m / 2)'], {}), '(m / 2)\n', (213, 220), True, 'import numpy as np\n'), ((225, 238), 'numpy.int', 'np.int', (['(n / 2)'], {}), '(n / 2)\n', (231, 238), True, 'import numpy as np\n'), ((250, 266), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (258, 266), True, 'import numpy as np\n'), ((1013, 1026), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1023, 1026), True, 'from matplotlib import pyplot as plt\n'), ((1028, 1044), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (1039, 1044), True, 'from matplotlib import pyplot as plt\n'), ((1046, 1065), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Sin"""'], {}), "('2D Sin')\n", (1055, 1065), True, 'from matplotlib import pyplot as plt\n'), ((1116, 1132), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (1127, 1132), True, 'from matplotlib import pyplot as plt\n'), ((1134, 1152), 'matplotlib.pyplot.title', 'plt.title', (['"""Image"""'], {}), "('Image')\n", (1143, 1152), True, 'from matplotlib import pyplot as plt\n'), ((1204, 1220), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (1215, 1220), True, 'from matplotlib import pyplot as plt\n'), ((1222, 1252), 'matplotlib.pyplot.title', 'plt.title', (['"""Product Image*Sin"""'], {}), "('Product Image*Sin')\n", (1231, 1252), True, 'from matplotlib import pyplot as plt\n'), ((1254, 1275), 'matplotlib.pyplot.imshow', 'plt.imshow', (['WaveImage'], {}), '(WaveImage)\n', (1264, 1275), True, 'from matplotlib import pyplot as plt\n'), ((1277, 1293), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (1288, 1293), True, 'from matplotlib import pyplot as plt\n'), ((1295, 1331), 'matplotlib.pyplot.title', 'plt.title', (['"""Product ImFrequency*Sin"""'], {}), "('Product ImFrequency*Sin')\n", (1304, 1331), True, 'from matplotlib import pyplot as plt\n'), ((1333, 1358), 'matplotlib.pyplot.imshow', 'plt.imshow', (['WaveFreqImage'], {}), '(WaveFreqImage)\n', (1343, 1358), True, 'from matplotlib import pyplot as plt\n'), ((1360, 1370), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1368, 1370), True, 'from matplotlib import pyplot as plt\n'), ((429, 442), 'numpy.float64', 'np.float64', (['b'], {}), '(b)\n', (439, 442), True, 'import numpy as np\n'), ((458, 471), 'numpy.float64', 'np.float64', (['g'], {}), '(g)\n', (468, 471), True, 'import numpy as np\n'), ((487, 500), 'numpy.float64', 'np.float64', (['r'], {}), '(r)\n', (497, 500), True, 'import numpy as np\n'), ((1078, 1099), 'numpy.uint8', 'np.uint8', (['(ImMat * 255)'], {}), '(ImMat * 255)\n', (1086, 1099), True, 'import numpy as np\n'), ((1165, 1201), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1177, 1201), False, 'import cv2\n'), ((531, 546), 'numpy.uint8', 'np.uint8', (['WaveR'], {}), '(WaveR)\n', (539, 546), True, 'import numpy as np\n'), ((547, 562), 'numpy.uint8', 'np.uint8', (['WaveG'], {}), '(WaveG)\n', (555, 562), True, 'import numpy as np\n'), ((563, 578), 'numpy.uint8', 'np.uint8', (['WaveB'], {}), '(WaveB)\n', (571, 578), True, 'import numpy as np\n'), ((608, 621), 'numpy.fft.fft', 'np.fft.fft', (['b'], {}), '(b)\n', (618, 621), True, 'import numpy as np\n'), ((656, 669), 'numpy.fft.fft', 'np.fft.fft', (['g'], {}), '(g)\n', (666, 669), True, 'import numpy as np\n'), ((704, 717), 'numpy.fft.fft', 'np.fft.fft', (['r'], {}), '(r)\n', (714, 717), True, 'import numpy as np\n'), ((766, 789), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['BFreq'], {}), '(BFreq)\n', (782, 789), True, 'import numpy as np\n'), ((829, 852), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['GFreq'], {}), '(GFreq)\n', (845, 852), True, 'import numpy as np\n'), ((892, 915), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['RFreq'], {}), '(RFreq)\n', (908, 915), True, 'import numpy as np\n'), ((948, 967), 'numpy.uint8', 'np.uint8', (['WeveFreqR'], {}), '(WeveFreqR)\n', (956, 967), True, 'import numpy as np\n'), ((968, 987), 'numpy.uint8', 'np.uint8', (['WeveFreqG'], {}), '(WeveFreqG)\n', (976, 987), True, 'import numpy as np\n'), ((988, 1007), 'numpy.uint8', 'np.uint8', (['WeveFreqB'], {}), '(WeveFreqB)\n', (996, 1007), True, 'import numpy as np\n'), ((342, 380), 'numpy.sqrt', 'np.sqrt', (['((cm - i) ** 2 + (j - cn) ** 2)'], {}), '((cm - i) ** 2 + (j - cn) ** 2)\n', (349, 380), True, 'import numpy as np\n')] |
# %%
import torch
import torch.nn.functional as F
import math
import numpy as np
# %%
def scaled_dot_product_attention(Q, K, V, dk=4):
##matmul Q and K
QK = torch.matmul(Q, K.T)
##scale QK by the sqrt of dk
matmul_scaled = QK / math.sqrt(dk)
attention_weights = F.softmax(matmul_scaled, dim=-1)
## matmul attention_weights by V
output = torch.matmul(attention_weights, V)
return output, attention_weights
# %%
def print_attention(Q, K, V, n_digits = 3):
temp_out, temp_attn = scaled_dot_product_attention(Q, K, V)
temp_out, temp_attn = temp_out.numpy(), temp_attn.numpy()
print ('Attention weights are:')
print (np.round(temp_attn, n_digits))
print()
print ('Output is:')
print (np.around(temp_out, n_digits))
# %%
temp_k = torch.Tensor([[10,0,0],
[0,10,0],
[0,0,10],
[0,0,10]]) # (4, 3)
temp_v = torch.Tensor([[ 1,0, 1],
[ 10,0, 2],
[ 100,5, 0],
[1000,6, 0]]) # (4, 3)
# %%
# This `query` aligns with the second `key`,
# so the second `value` is returned.
temp_q = torch.Tensor([[0, 10, 0]]) # (1, 3)
print_attention(temp_q, temp_k, temp_v)
# %%
# This query aligns with a repeated key (third and fourth),
# so all associated values get averaged.
temp_q = torch.Tensor([[0, 0, 10]]) # (1, 3)
print_attention(temp_q, temp_k, temp_v)
# %%
# This query aligns equally with the first and second key,
# so their values get averaged.
temp_q = torch.Tensor([[10, 10, 0]]) # (1, 3)
print_attention(temp_q, temp_k, temp_v)
# %%
temp_q = torch.Tensor([[0, 10, 0], [0, 0, 10], [10, 10, 0]]) # (3, 3)
print_attention(temp_q, temp_k, temp_v) | [
"math.sqrt",
"torch.Tensor",
"torch.matmul",
"numpy.around",
"torch.nn.functional.softmax",
"numpy.round"
] | [((831, 893), 'torch.Tensor', 'torch.Tensor', (['[[10, 0, 0], [0, 10, 0], [0, 0, 10], [0, 0, 10]]'], {}), '([[10, 0, 0], [0, 10, 0], [0, 0, 10], [0, 0, 10]])\n', (843, 893), False, 'import torch\n'), ((977, 1041), 'torch.Tensor', 'torch.Tensor', (['[[1, 0, 1], [10, 0, 2], [100, 5, 0], [1000, 6, 0]]'], {}), '([[1, 0, 1], [10, 0, 2], [100, 5, 0], [1000, 6, 0]])\n', (989, 1041), False, 'import torch\n'), ((1225, 1251), 'torch.Tensor', 'torch.Tensor', (['[[0, 10, 0]]'], {}), '([[0, 10, 0]])\n', (1237, 1251), False, 'import torch\n'), ((1425, 1451), 'torch.Tensor', 'torch.Tensor', (['[[0, 0, 10]]'], {}), '([[0, 0, 10]])\n', (1437, 1451), False, 'import torch\n'), ((1615, 1642), 'torch.Tensor', 'torch.Tensor', (['[[10, 10, 0]]'], {}), '([[10, 10, 0]])\n', (1627, 1642), False, 'import torch\n'), ((1712, 1763), 'torch.Tensor', 'torch.Tensor', (['[[0, 10, 0], [0, 0, 10], [10, 10, 0]]'], {}), '([[0, 10, 0], [0, 0, 10], [10, 10, 0]])\n', (1724, 1763), False, 'import torch\n'), ((175, 195), 'torch.matmul', 'torch.matmul', (['Q', 'K.T'], {}), '(Q, K.T)\n', (187, 195), False, 'import torch\n'), ((307, 339), 'torch.nn.functional.softmax', 'F.softmax', (['matmul_scaled'], {'dim': '(-1)'}), '(matmul_scaled, dim=-1)\n', (316, 339), True, 'import torch.nn.functional as F\n'), ((394, 428), 'torch.matmul', 'torch.matmul', (['attention_weights', 'V'], {}), '(attention_weights, V)\n', (406, 428), False, 'import torch\n'), ((262, 275), 'math.sqrt', 'math.sqrt', (['dk'], {}), '(dk)\n', (271, 275), False, 'import math\n'), ((700, 729), 'numpy.round', 'np.round', (['temp_attn', 'n_digits'], {}), '(temp_attn, n_digits)\n', (708, 729), True, 'import numpy as np\n'), ((782, 811), 'numpy.around', 'np.around', (['temp_out', 'n_digits'], {}), '(temp_out, n_digits)\n', (791, 811), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@brief test log(time=10s)
"""
import unittest
import numpy
from sklearn.tree._criterion import MSE # pylint: disable=E0611
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split
from pyquickhelper.pycode import ExtTestCase
from mlinsights.mlmodel.piecewise_tree_regression import PiecewiseTreeRegressor
from mlinsights.mlmodel._piecewise_tree_regression_common import ( # pylint: disable=E0611,E0401
_test_criterion_init, _test_criterion_node_impurity,
_test_criterion_node_impurity_children, _test_criterion_update,
_test_criterion_node_value, _test_criterion_proxy_impurity_improvement,
_test_criterion_impurity_improvement
)
from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion # pylint: disable=E0611, E0401
class TestPiecewiseDecisionTreeExperimentLinear(ExtTestCase):
def test_criterions(self):
X = numpy.array([[10., 12., 13.]]).T
y = numpy.array([20., 22., 23.])
c1 = MSE(1, X.shape[0])
c2 = LinearRegressorCriterion(X)
self.assertNotEmpty(c1)
self.assertNotEmpty(c2)
w = numpy.ones((y.shape[0],))
self.assertEqual(w.sum(), X.shape[0])
ind = numpy.arange(y.shape[0]).astype(numpy.int64)
ys = y.astype(float).reshape((y.shape[0], 1))
_test_criterion_init(c1, ys, w, 1., ind, 0, y.shape[0])
_test_criterion_init(c2, ys, w, 1., ind, 0, y.shape[0])
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_criterion.pyx#L886
v1 = _test_criterion_node_value(c1)
v2 = _test_criterion_node_value(c2)
self.assertEqual(v1, v2)
i1 = _test_criterion_node_impurity(c1)
i2 = _test_criterion_node_impurity(c2)
self.assertGreater(i1, i2)
self.assertGreater(i2, 0)
p1 = _test_criterion_proxy_impurity_improvement(c1)
p2 = _test_criterion_proxy_impurity_improvement(c2)
self.assertTrue(numpy.isnan(p1), numpy.isnan(p2))
X = numpy.array([[1., 2., 3.]]).T
y = numpy.array([1., 2., 3.])
c1 = MSE(1, X.shape[0])
c2 = LinearRegressorCriterion(X)
w = numpy.ones((y.shape[0],))
ind = numpy.arange(y.shape[0]).astype(numpy.int64)
ys = y.astype(float).reshape((y.shape[0], 1))
_test_criterion_init(c1, ys, w, 1., ind, 0, y.shape[0])
_test_criterion_init(c2, ys, w, 1., ind, 0, y.shape[0])
i1 = _test_criterion_node_impurity(c1)
i2 = _test_criterion_node_impurity(c2)
self.assertGreater(i1, i2)
v1 = _test_criterion_node_value(c1)
v2 = _test_criterion_node_value(c2)
self.assertEqual(v1, v2)
p1 = _test_criterion_proxy_impurity_improvement(c1)
p2 = _test_criterion_proxy_impurity_improvement(c2)
self.assertTrue(numpy.isnan(p1), numpy.isnan(p2))
X = numpy.array([[1., 2., 10., 11.]]).T
y = numpy.array([0.9, 1.1, 1.9, 2.1])
c1 = MSE(1, X.shape[0])
c2 = LinearRegressorCriterion(X)
w = numpy.ones((y.shape[0],))
ind = numpy.arange(y.shape[0]).astype(numpy.int64)
ys = y.astype(float).reshape((y.shape[0], 1))
_test_criterion_init(c1, ys, w, 1., ind, 0, y.shape[0])
_test_criterion_init(c2, ys, w, 1., ind, 0, y.shape[0])
i1 = _test_criterion_node_impurity(c1)
i2 = _test_criterion_node_impurity(c2)
self.assertGreater(i1, i2)
v1 = _test_criterion_node_value(c1)
v2 = _test_criterion_node_value(c2)
self.assertEqual(v1, v2)
p1 = _test_criterion_proxy_impurity_improvement(c1)
p2 = _test_criterion_proxy_impurity_improvement(c2)
self.assertTrue(numpy.isnan(p1), numpy.isnan(p2))
X = numpy.array([[1., 2., 10., 11.]]).T
y = numpy.array([0.9, 1.1, 1.9, 2.1])
c1 = MSE(1, X.shape[0])
c2 = LinearRegressorCriterion(X)
w = numpy.ones((y.shape[0],))
ind = numpy.array([0, 3, 2, 1], dtype=ind.dtype)
ys = y.astype(float).reshape((y.shape[0], 1))
_test_criterion_init(c1, ys, w, 1., ind, 1, y.shape[0])
_test_criterion_init(c2, ys, w, 1., ind, 1, y.shape[0])
i1 = _test_criterion_node_impurity(c1)
i2 = _test_criterion_node_impurity(c2)
self.assertGreater(i1, i2)
v1 = _test_criterion_node_value(c1)
v2 = _test_criterion_node_value(c2)
self.assertEqual(v1, v2)
p1 = _test_criterion_proxy_impurity_improvement(c1)
p2 = _test_criterion_proxy_impurity_improvement(c2)
self.assertTrue(numpy.isnan(p1), numpy.isnan(p2))
for i in range(2, 4):
_test_criterion_update(c1, i)
_test_criterion_update(c2, i)
left1, right1 = _test_criterion_node_impurity_children(c1)
left2, right2 = _test_criterion_node_impurity_children(c2)
self.assertGreater(left1, left2)
self.assertGreater(right1, right2)
v1 = _test_criterion_node_value(c1)
v2 = _test_criterion_node_value(c2)
self.assertEqual(v1, v2)
try:
# scikit-learn >= 0.24
p1 = _test_criterion_impurity_improvement(
c1, 0., left1, right1)
p2 = _test_criterion_impurity_improvement(
c2, 0., left2, right2)
except TypeError:
# scikit-learn < 0.23
p1 = _test_criterion_impurity_improvement(c1, 0.)
p2 = _test_criterion_impurity_improvement(c2, 0.)
self.assertGreater(p1, p2 - 1.)
dest = numpy.empty((2, ))
c2.node_beta(dest)
self.assertGreater(dest[0], 0)
self.assertGreater(dest[1], 0)
def test_criterions_check_value(self):
X = numpy.array([[10., 12., 13.]]).T
y = numpy.array([[20., 22., 23.]]).T
c2 = LinearRegressorCriterion.create(X, y)
coef = numpy.empty((3, ))
c2.node_beta(coef)
self.assertEqual(coef[:2], numpy.array([1, 10]))
def test_decision_tree_criterion(self):
X = numpy.array([[1., 2., 10., 11.]]).T
y = numpy.array([0.9, 1.1, 1.9, 2.1])
clr1 = DecisionTreeRegressor(max_depth=1)
clr1.fit(X, y)
p1 = clr1.predict(X)
crit = LinearRegressorCriterion(X)
clr2 = DecisionTreeRegressor(criterion=crit, max_depth=1)
clr2.fit(X, y)
p2 = clr2.predict(X)
self.assertEqual(p1, p2)
self.assertEqual(clr1.tree_.node_count, clr2.tree_.node_count)
def test_decision_tree_criterion_iris(self):
iris = datasets.load_iris()
X, y = iris.data, iris.target
clr1 = DecisionTreeRegressor()
clr1.fit(X, y)
p1 = clr1.predict(X)
clr2 = DecisionTreeRegressor(criterion=LinearRegressorCriterion(X))
clr2.fit(X, y)
p2 = clr2.predict(X)
self.assertEqual(p1.shape, p2.shape)
def test_decision_tree_criterion_iris_dtc(self):
iris = datasets.load_iris()
X, y = iris.data, iris.target
clr1 = DecisionTreeRegressor()
clr1.fit(X, y)
p1 = clr1.predict(X)
clr2 = PiecewiseTreeRegressor(criterion='mselin')
clr2.fit(X, y)
p2 = clr2.predict(X)
self.assertEqual(p1.shape, p2.shape)
self.assertTrue(hasattr(clr2, 'betas_'))
self.assertTrue(hasattr(clr2, 'leaves_mapping_'))
self.assertEqual(len(clr2.leaves_index_), clr2.tree_.n_leaves)
self.assertEqual(len(clr2.leaves_mapping_), clr2.tree_.n_leaves)
self.assertEqual(clr2.betas_.shape[1], X.shape[1] + 1)
self.assertEqual(clr2.betas_.shape[0], clr2.tree_.n_leaves)
sc1 = clr1.score(X, y)
sc2 = clr2.score(X, y)
self.assertGreater(sc1, sc2)
mp = clr2._mapping_train(X) # pylint: disable=W0212
self.assertIsInstance(mp, dict)
self.assertGreater(len(mp), 2)
def test_decision_tree_criterion_iris_dtc_traintest(self):
iris = datasets.load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr1 = DecisionTreeRegressor()
clr1.fit(X_train, y_train)
p1 = clr1.predict(X_train)
clr2 = PiecewiseTreeRegressor(criterion='mselin')
clr2.fit(X_train, y_train)
p2 = clr2.predict(X_train)
self.assertEqual(p1.shape, p2.shape)
self.assertTrue(hasattr(clr2, 'betas_'))
self.assertTrue(hasattr(clr2, 'leaves_mapping_'))
self.assertEqual(len(clr2.leaves_index_), clr2.tree_.n_leaves)
self.assertEqual(len(clr2.leaves_mapping_), clr2.tree_.n_leaves)
self.assertEqual(clr2.betas_.shape[1], X.shape[1] + 1)
self.assertEqual(clr2.betas_.shape[0], clr2.tree_.n_leaves)
sc1 = clr1.score(X_test, y_test)
sc2 = clr2.score(X_test, y_test)
self.assertGreater(abs(sc1 - sc2), -0.1)
if __name__ == "__main__":
unittest.main()
| [
"mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_impurity_improvement",
"sklearn.tree.DecisionTreeRegressor",
"mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement",
"numpy.array",
"mlinsights.mlmodel.piecewise_tree_regression.PiecewiseTreeRegr... | [((9066, 9081), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9079, 9081), False, 'import unittest\n'), ((1037, 1068), 'numpy.array', 'numpy.array', (['[20.0, 22.0, 23.0]'], {}), '([20.0, 22.0, 23.0])\n', (1048, 1068), False, 'import numpy\n'), ((1079, 1097), 'sklearn.tree._criterion.MSE', 'MSE', (['(1)', 'X.shape[0]'], {}), '(1, X.shape[0])\n', (1082, 1097), False, 'from sklearn.tree._criterion import MSE\n'), ((1111, 1138), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (1135, 1138), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((1215, 1240), 'numpy.ones', 'numpy.ones', (['(y.shape[0],)'], {}), '((y.shape[0],))\n', (1225, 1240), False, 'import numpy\n'), ((1408, 1464), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c1', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c1, ys, w, 1.0, ind, 0, y.shape[0])\n', (1428, 1464), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1472, 1528), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c2', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c2, ys, w, 1.0, ind, 0, y.shape[0])\n', (1492, 1528), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1641, 1671), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c1'], {}), '(c1)\n', (1667, 1671), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1685, 1715), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c2'], {}), '(c2)\n', (1711, 1715), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1762, 1795), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c1'], {}), '(c1)\n', (1791, 1795), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1809, 1842), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c2'], {}), '(c2)\n', (1838, 1842), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1925, 1971), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c1'], {}), '(c1)\n', (1967, 1971), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((1985, 2031), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c2'], {}), '(c2)\n', (2027, 2031), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2145, 2173), 'numpy.array', 'numpy.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2156, 2173), False, 'import numpy\n'), ((2184, 2202), 'sklearn.tree._criterion.MSE', 'MSE', (['(1)', 'X.shape[0]'], {}), '(1, X.shape[0])\n', (2187, 2202), False, 'from sklearn.tree._criterion import MSE\n'), ((2216, 2243), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (2240, 2243), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((2256, 2281), 'numpy.ones', 'numpy.ones', (['(y.shape[0],)'], {}), '((y.shape[0],))\n', (2266, 2281), False, 'import numpy\n'), ((2403, 2459), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c1', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c1, ys, w, 1.0, ind, 0, y.shape[0])\n', (2423, 2459), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2467, 2523), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c2', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c2, ys, w, 1.0, ind, 0, y.shape[0])\n', (2487, 2523), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2536, 2569), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c1'], {}), '(c1)\n', (2565, 2569), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2583, 2616), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c2'], {}), '(c2)\n', (2612, 2616), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2665, 2695), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c1'], {}), '(c1)\n', (2691, 2695), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2709, 2739), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c2'], {}), '(c2)\n', (2735, 2739), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2786, 2832), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c1'], {}), '(c1)\n', (2828, 2832), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((2846, 2892), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c2'], {}), '(c2)\n', (2888, 2892), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3012, 3045), 'numpy.array', 'numpy.array', (['[0.9, 1.1, 1.9, 2.1]'], {}), '([0.9, 1.1, 1.9, 2.1])\n', (3023, 3045), False, 'import numpy\n'), ((3059, 3077), 'sklearn.tree._criterion.MSE', 'MSE', (['(1)', 'X.shape[0]'], {}), '(1, X.shape[0])\n', (3062, 3077), False, 'from sklearn.tree._criterion import MSE\n'), ((3091, 3118), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (3115, 3118), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((3131, 3156), 'numpy.ones', 'numpy.ones', (['(y.shape[0],)'], {}), '((y.shape[0],))\n', (3141, 3156), False, 'import numpy\n'), ((3278, 3334), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c1', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c1, ys, w, 1.0, ind, 0, y.shape[0])\n', (3298, 3334), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3342, 3398), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c2', 'ys', 'w', '(1.0)', 'ind', '(0)', 'y.shape[0]'], {}), '(c2, ys, w, 1.0, ind, 0, y.shape[0])\n', (3362, 3398), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3411, 3444), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c1'], {}), '(c1)\n', (3440, 3444), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3458, 3491), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c2'], {}), '(c2)\n', (3487, 3491), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3540, 3570), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c1'], {}), '(c1)\n', (3566, 3570), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3584, 3614), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c2'], {}), '(c2)\n', (3610, 3614), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3661, 3707), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c1'], {}), '(c1)\n', (3703, 3707), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3721, 3767), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c2'], {}), '(c2)\n', (3763, 3767), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((3887, 3920), 'numpy.array', 'numpy.array', (['[0.9, 1.1, 1.9, 2.1]'], {}), '([0.9, 1.1, 1.9, 2.1])\n', (3898, 3920), False, 'import numpy\n'), ((3934, 3952), 'sklearn.tree._criterion.MSE', 'MSE', (['(1)', 'X.shape[0]'], {}), '(1, X.shape[0])\n', (3937, 3952), False, 'from sklearn.tree._criterion import MSE\n'), ((3966, 3993), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (3990, 3993), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((4006, 4031), 'numpy.ones', 'numpy.ones', (['(y.shape[0],)'], {}), '((y.shape[0],))\n', (4016, 4031), False, 'import numpy\n'), ((4046, 4088), 'numpy.array', 'numpy.array', (['[0, 3, 2, 1]'], {'dtype': 'ind.dtype'}), '([0, 3, 2, 1], dtype=ind.dtype)\n', (4057, 4088), False, 'import numpy\n'), ((4151, 4207), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c1', 'ys', 'w', '(1.0)', 'ind', '(1)', 'y.shape[0]'], {}), '(c1, ys, w, 1.0, ind, 1, y.shape[0])\n', (4171, 4207), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4215, 4271), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_init', '_test_criterion_init', (['c2', 'ys', 'w', '(1.0)', 'ind', '(1)', 'y.shape[0]'], {}), '(c2, ys, w, 1.0, ind, 1, y.shape[0])\n', (4235, 4271), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4284, 4317), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c1'], {}), '(c1)\n', (4313, 4317), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4331, 4364), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity', '_test_criterion_node_impurity', (['c2'], {}), '(c2)\n', (4360, 4364), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4413, 4443), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c1'], {}), '(c1)\n', (4439, 4443), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4457, 4487), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c2'], {}), '(c2)\n', (4483, 4487), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4534, 4580), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c1'], {}), '(c1)\n', (4576, 4580), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4594, 4640), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_proxy_impurity_improvement', '_test_criterion_proxy_impurity_improvement', (['c2'], {}), '(c2)\n', (4636, 4640), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5988, 6025), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion.create', 'LinearRegressorCriterion.create', (['X', 'y'], {}), '(X, y)\n', (6019, 6025), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((6041, 6058), 'numpy.empty', 'numpy.empty', (['(3,)'], {}), '((3,))\n', (6052, 6058), False, 'import numpy\n'), ((6249, 6282), 'numpy.array', 'numpy.array', (['[0.9, 1.1, 1.9, 2.1]'], {}), '([0.9, 1.1, 1.9, 2.1])\n', (6260, 6282), False, 'import numpy\n'), ((6298, 6332), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'max_depth': '(1)'}), '(max_depth=1)\n', (6319, 6332), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((6401, 6428), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (6425, 6428), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((6444, 6494), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': 'crit', 'max_depth': '(1)'}), '(criterion=crit, max_depth=1)\n', (6465, 6494), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((6716, 6736), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (6734, 6736), False, 'from sklearn import datasets\n'), ((6790, 6813), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (6811, 6813), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((7108, 7128), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (7126, 7128), False, 'from sklearn import datasets\n'), ((7182, 7205), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (7203, 7205), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((7273, 7315), 'mlinsights.mlmodel.piecewise_tree_regression.PiecewiseTreeRegressor', 'PiecewiseTreeRegressor', ([], {'criterion': '"""mselin"""'}), "(criterion='mselin')\n", (7295, 7315), False, 'from mlinsights.mlmodel.piecewise_tree_regression import PiecewiseTreeRegressor\n'), ((8113, 8133), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (8131, 8133), False, 'from sklearn import datasets\n'), ((8215, 8237), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (8231, 8237), False, 'from sklearn.model_selection import train_test_split\n'), ((8253, 8276), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (8274, 8276), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((8362, 8404), 'mlinsights.mlmodel.piecewise_tree_regression.PiecewiseTreeRegressor', 'PiecewiseTreeRegressor', ([], {'criterion': '"""mselin"""'}), "(criterion='mselin')\n", (8384, 8404), False, 'from mlinsights.mlmodel.piecewise_tree_regression import PiecewiseTreeRegressor\n'), ((992, 1025), 'numpy.array', 'numpy.array', (['[[10.0, 12.0, 13.0]]'], {}), '([[10.0, 12.0, 13.0]])\n', (1003, 1025), False, 'import numpy\n'), ((2056, 2071), 'numpy.isnan', 'numpy.isnan', (['p1'], {}), '(p1)\n', (2067, 2071), False, 'import numpy\n'), ((2073, 2088), 'numpy.isnan', 'numpy.isnan', (['p2'], {}), '(p2)\n', (2084, 2088), False, 'import numpy\n'), ((2103, 2133), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 3.0]])\n', (2114, 2133), False, 'import numpy\n'), ((2917, 2932), 'numpy.isnan', 'numpy.isnan', (['p1'], {}), '(p1)\n', (2928, 2932), False, 'import numpy\n'), ((2934, 2949), 'numpy.isnan', 'numpy.isnan', (['p2'], {}), '(p2)\n', (2945, 2949), False, 'import numpy\n'), ((2964, 3001), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 10.0, 11.0]]'], {}), '([[1.0, 2.0, 10.0, 11.0]])\n', (2975, 3001), False, 'import numpy\n'), ((3792, 3807), 'numpy.isnan', 'numpy.isnan', (['p1'], {}), '(p1)\n', (3803, 3807), False, 'import numpy\n'), ((3809, 3824), 'numpy.isnan', 'numpy.isnan', (['p2'], {}), '(p2)\n', (3820, 3824), False, 'import numpy\n'), ((3839, 3876), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 10.0, 11.0]]'], {}), '([[1.0, 2.0, 10.0, 11.0]])\n', (3850, 3876), False, 'import numpy\n'), ((4665, 4680), 'numpy.isnan', 'numpy.isnan', (['p1'], {}), '(p1)\n', (4676, 4680), False, 'import numpy\n'), ((4682, 4697), 'numpy.isnan', 'numpy.isnan', (['p2'], {}), '(p2)\n', (4693, 4697), False, 'import numpy\n'), ((4742, 4771), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_update', '_test_criterion_update', (['c1', 'i'], {}), '(c1, i)\n', (4764, 4771), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4784, 4813), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_update', '_test_criterion_update', (['c2', 'i'], {}), '(c2, i)\n', (4806, 4813), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4842, 4884), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity_children', '_test_criterion_node_impurity_children', (['c1'], {}), '(c1)\n', (4880, 4884), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((4913, 4955), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_impurity_children', '_test_criterion_node_impurity_children', (['c2'], {}), '(c2)\n', (4951, 4955), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5065, 5095), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c1'], {}), '(c1)\n', (5091, 5095), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5113, 5143), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_node_value', '_test_criterion_node_value', (['c2'], {}), '(c2)\n', (5139, 5143), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5705, 5722), 'numpy.empty', 'numpy.empty', (['(2,)'], {}), '((2,))\n', (5716, 5722), False, 'import numpy\n'), ((5897, 5930), 'numpy.array', 'numpy.array', (['[[10.0, 12.0, 13.0]]'], {}), '([[10.0, 12.0, 13.0]])\n', (5908, 5930), False, 'import numpy\n'), ((5942, 5975), 'numpy.array', 'numpy.array', (['[[20.0, 22.0, 23.0]]'], {}), '([[20.0, 22.0, 23.0]])\n', (5953, 5975), False, 'import numpy\n'), ((6122, 6142), 'numpy.array', 'numpy.array', (['[1, 10]'], {}), '([1, 10])\n', (6133, 6142), False, 'import numpy\n'), ((6201, 6238), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 10.0, 11.0]]'], {}), '([[1.0, 2.0, 10.0, 11.0]])\n', (6212, 6238), False, 'import numpy\n'), ((1301, 1325), 'numpy.arange', 'numpy.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (1313, 1325), False, 'import numpy\n'), ((2296, 2320), 'numpy.arange', 'numpy.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (2308, 2320), False, 'import numpy\n'), ((3171, 3195), 'numpy.arange', 'numpy.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (3183, 3195), False, 'import numpy\n'), ((5258, 5318), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_impurity_improvement', '_test_criterion_impurity_improvement', (['c1', '(0.0)', 'left1', 'right1'], {}), '(c1, 0.0, left1, right1)\n', (5294, 5318), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5360, 5420), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_impurity_improvement', '_test_criterion_impurity_improvement', (['c2', '(0.0)', 'left2', 'right2'], {}), '(c2, 0.0, left2, right2)\n', (5396, 5420), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((6913, 6940), 'mlinsights.mlmodel.piecewise_tree_regression_criterion_linear.LinearRegressorCriterion', 'LinearRegressorCriterion', (['X'], {}), '(X)\n', (6937, 6940), False, 'from mlinsights.mlmodel.piecewise_tree_regression_criterion_linear import LinearRegressorCriterion\n'), ((5530, 5575), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_impurity_improvement', '_test_criterion_impurity_improvement', (['c1', '(0.0)'], {}), '(c1, 0.0)\n', (5566, 5575), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n'), ((5596, 5641), 'mlinsights.mlmodel._piecewise_tree_regression_common._test_criterion_impurity_improvement', '_test_criterion_impurity_improvement', (['c2', '(0.0)'], {}), '(c2, 0.0)\n', (5632, 5641), False, 'from mlinsights.mlmodel._piecewise_tree_regression_common import _test_criterion_init, _test_criterion_node_impurity, _test_criterion_node_impurity_children, _test_criterion_update, _test_criterion_node_value, _test_criterion_proxy_impurity_improvement, _test_criterion_impurity_improvement\n')] |
"""
The MIT License (MIT)
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This library is used and modified in General Artificial Intelligence for Game Playing by <NAME>, 2017.
See the original license above.
"""
import random
import numpy as np
import tensorflow as tf
from reinforcement.replay_buffer import ReplayBuffer
class NeuralQLearner(object):
def __init__(self, session,
optimizer,
q_network,
state_dim,
num_actions,
batch_size=32,
init_exp=0.5, # initial exploration prob
final_exp=0.1, # final exploration prob
anneal_steps=10000, # N steps for annealing exploration
replay_buffer_size=10000,
store_replay_every=5, # how frequent to store experience
discount_factor=0.9, # discount future rewards
target_update_frequency=100,
reg_param=0.01, # regularization constants
double_q_learning=False,
summary_writer=None,
summary_every=100):
# tensorflow machinery
self.session = session
self.optimizer = optimizer
self.summary_writer = summary_writer
# model components
self.q_network = q_network
self.replay_buffer = ReplayBuffer(buffer_size=replay_buffer_size)
# Q learning parameters
self.batch_size = batch_size
self.state_dim = state_dim
self.num_actions = num_actions
self.exploration = init_exp
self.init_exp = init_exp
self.final_exp = final_exp
self.anneal_steps = anneal_steps
self.discount_factor = discount_factor
self.double_q_learning = double_q_learning
self.target_update_frequency = target_update_frequency
# training parameters
self.reg_param = reg_param
# counters
self.store_replay_every = store_replay_every
self.store_experience_cnt = 0
self.train_iteration = 0
self.last_cost = 1
# create and initialize variables
self.create_variables()
self.session.run(tf.global_variables_initializer())
# make sure all variables are initialized
self.session.run(tf.assert_variables_initialized())
if self.summary_writer is not None:
# graph was not available when journalist was created
self.summary_writer.add_graph(self.session.graph)
self.summary_every = summary_every
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
self.sess = self.session
def create_variables(self):
# compute action from a state: a* = argmax_a Q(s_t,a)
with tf.name_scope("predict_actions"):
# raw state representation
self.states = tf.placeholder(tf.float32, (None, self.state_dim), name="states")
self.is_training = tf.placeholder(tf.bool)
# initialize Q network
with tf.variable_scope("q_network"):
self.q_outputs = self.q_network(self.states, self.is_training)
# predict actions from Q network
self.action_scores = tf.identity(self.q_outputs, name="action_scores")
# tf.histogram_summary("action_scores", self.action_scores)
self.predicted_actions = tf.argmax(self.action_scores, dimension=1, name="predicted_actions")
# estimate rewards using the next state: r(s_t,a_t) + argmax_a Q(s_{t+1}, a)
with tf.name_scope("estimate_future_rewards"):
self.next_states = tf.placeholder(tf.float32, (None, self.state_dim), name="next_states")
self.next_state_mask = tf.placeholder(tf.float32, (None,), name="next_state_masks")
if self.double_q_learning:
# reuse Q network for action selection
with tf.variable_scope("q_network", reuse=True):
self.q_next_outputs = self.q_network(self.next_states)
self.action_selection = tf.argmax(tf.stop_gradient(self.q_next_outputs), 1, name="action_selection")
# tf.histogram_summary("action_selection", self.action_selection)
self.action_selection_mask = tf.one_hot(self.action_selection, self.num_actions, 1, 0)
# use target network for action evaluation
with tf.variable_scope("target_network"):
self.target_outputs = self.q_network(self.next_states) * tf.cast(self.action_selection_mask,
tf.float32)
self.action_evaluation = tf.reduce_sum(self.target_outputs, reduction_indices=[1, ])
# tf.histogram_summary("action_evaluation", self.action_evaluation)
self.target_values = self.action_evaluation * self.next_state_mask
else:
# initialize target network
with tf.variable_scope("target_network"):
self.target_outputs = self.q_network(self.next_states, self.is_training)
# compute future rewards
# self.next_action_scores = tf.stop_gradient(self.target_outputs)
self.next_action_scores = self.target_outputs
self.target_values = tf.reduce_max(self.next_action_scores,
reduction_indices=[1, ]) * self.next_state_mask
# tf.histogram_summary("next_action_scores", self.next_action_scores)
self.rewards = tf.placeholder(tf.float32, (None,), name="rewards")
self.future_rewards = self.rewards + self.discount_factor * self.target_values
# compute loss and gradients
with tf.name_scope("compute_temporal_differences"):
# compute temporal difference loss / Q-learning difference (in active learning)
self.action_mask = tf.placeholder(tf.float32, (None, self.num_actions), name="action_mask")
self.masked_action_scores = tf.reduce_sum(self.action_scores * self.action_mask, reduction_indices=[1, ])
self.temp_diff = self.future_rewards - self.masked_action_scores
self.td_loss = tf.reduce_mean(tf.square(self.temp_diff))
if self.reg_param == None:
# Not using regularization loss
self.loss = self.td_loss
else:
# L2 regularization loss
q_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="q_network")
self.reg_loss = self.reg_param * tf.reduce_sum(
[tf.reduce_sum(tf.square(x)) for x in q_network_variables])
self.loss = self.td_loss + self.reg_loss
self.train_op = self.optimizer.minimize(self.loss)
# update target network with Q network
with tf.name_scope("update_target_network"):
self.target_network_update = []
q_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="q_network")
target_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target_network")
for v_source, v_target in zip(q_network_variables, target_network_variables):
# update x' <- x
update_op = v_target.assign(v_source)
self.target_network_update.append(update_op)
self.target_network_update = tf.group(*self.target_network_update)
self.no_op = tf.no_op()
def storeExperience(self, state, action, reward, next_state, done):
# If there's a tweak needed, for example, to not store specific state etc... this is the place.
# Or you can omit patterns with negative rewards. This can depend on the game and in general use, we recommend
# to leave this unused.
# if reward < 0:
# next_state = state
# always store end states
if self.store_experience_cnt % self.store_replay_every == 0 or done:
self.replay_buffer.add(state, action, reward, next_state, done)
self.store_experience_cnt += 1
def eGreedyAction(self, states, explore=True, is_training=True):
if explore and self.exploration > random.random():
return random.randint(0, self.num_actions - 1)
else:
return self.session.run(self.predicted_actions, {self.states: states, self.is_training: is_training})[0]
def annealExploration(self, stategy='linear'):
ratio = max((self.anneal_steps - self.train_iteration) / float(self.anneal_steps), 0)
self.exploration = (self.init_exp - self.final_exp) * ratio + self.final_exp
def updateModel(self):
# not enough experiences yet
if self.replay_buffer.count() < self.batch_size:
return
# we want at least 1/10 of buffer full
if self.replay_buffer.count() < self.replay_buffer.buffer_size / 10:
return
batch = self.replay_buffer.get_batch(self.batch_size)
states = np.zeros((self.batch_size, self.state_dim))
rewards = np.zeros((self.batch_size,))
action_mask = np.zeros((self.batch_size, self.num_actions))
next_states = np.zeros((self.batch_size, self.state_dim))
next_state_mask = np.zeros((self.batch_size,))
for k, (s0, a, r, s1, done) in enumerate(batch):
states[k] = s0
rewards[k] = r
action_mask[k][a] = 1
# check terminal state
if not done:
next_states[k] = s1
next_state_mask[k] = 1
# perform one update of training
cost, _ = self.session.run([
self.loss,
self.train_op,
], {
self.states: states,
self.next_states: next_states,
self.next_state_mask: next_state_mask,
self.action_mask: action_mask,
self.rewards: rewards,
self.is_training: True
})
self.last_cost = cost
# update target network using Q-network
if self.train_iteration % self.target_update_frequency == 0:
self.session.run(self.target_network_update)
self.annealExploration()
self.train_iteration += 1
def measure_summaries(self, i_episode, score, steps, negative_rewards_count):
report_measures = ([tf.Summary.Value(tag='score', simple_value=score),
tf.Summary.Value(tag='exploration_rate', simple_value=self.exploration),
tf.Summary.Value(tag='number_of_steps', simple_value=steps),
tf.Summary.Value(tag='loss', simple_value=float(self.last_cost))])
self.summary_writer.add_summary(tf.Summary(value=report_measures), i_episode)
| [
"tensorflow.reduce_sum",
"tensorflow.group",
"tensorflow.Summary.Value",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.square",
"random.randint",
"tensorflow.one_hot",
"reinforcement.replay_buffer.ReplayBuffer",
"tensorflow.variable_scope",
"tensorflow.global_variables",
"tensorflow... | [((2370, 2414), 'reinforcement.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'buffer_size': 'replay_buffer_size'}), '(buffer_size=replay_buffer_size)\n', (2382, 2414), False, 'from reinforcement.replay_buffer import ReplayBuffer\n'), ((8602, 8612), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (8610, 8612), True, 'import tensorflow as tf\n'), ((10139, 10182), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.state_dim)'], {}), '((self.batch_size, self.state_dim))\n', (10147, 10182), True, 'import numpy as np\n'), ((10201, 10229), 'numpy.zeros', 'np.zeros', (['(self.batch_size,)'], {}), '((self.batch_size,))\n', (10209, 10229), True, 'import numpy as np\n'), ((10252, 10297), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.num_actions)'], {}), '((self.batch_size, self.num_actions))\n', (10260, 10297), True, 'import numpy as np\n'), ((10320, 10363), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.state_dim)'], {}), '((self.batch_size, self.state_dim))\n', (10328, 10363), True, 'import numpy as np\n'), ((10390, 10418), 'numpy.zeros', 'np.zeros', (['(self.batch_size,)'], {}), '((self.batch_size,))\n', (10398, 10418), True, 'import numpy as np\n'), ((3202, 3235), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3233, 3235), True, 'import tensorflow as tf\n'), ((3313, 3346), 'tensorflow.assert_variables_initialized', 'tf.assert_variables_initialized', ([], {}), '()\n', (3344, 3346), True, 'import tensorflow as tf\n'), ((3605, 3626), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3624, 3626), True, 'import tensorflow as tf\n'), ((3787, 3819), 'tensorflow.name_scope', 'tf.name_scope', (['"""predict_actions"""'], {}), "('predict_actions')\n", (3800, 3819), True, 'import tensorflow as tf\n'), ((3886, 3951), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.state_dim)'], {'name': '"""states"""'}), "(tf.float32, (None, self.state_dim), name='states')\n", (3900, 3951), True, 'import tensorflow as tf\n'), ((3983, 4006), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (3997, 4006), True, 'import tensorflow as tf\n'), ((4249, 4298), 'tensorflow.identity', 'tf.identity', (['self.q_outputs'], {'name': '"""action_scores"""'}), "(self.q_outputs, name='action_scores')\n", (4260, 4298), True, 'import tensorflow as tf\n'), ((4408, 4476), 'tensorflow.argmax', 'tf.argmax', (['self.action_scores'], {'dimension': '(1)', 'name': '"""predicted_actions"""'}), "(self.action_scores, dimension=1, name='predicted_actions')\n", (4417, 4476), True, 'import tensorflow as tf\n'), ((4576, 4616), 'tensorflow.name_scope', 'tf.name_scope', (['"""estimate_future_rewards"""'], {}), "('estimate_future_rewards')\n", (4589, 4616), True, 'import tensorflow as tf\n'), ((4649, 4719), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.state_dim)'], {'name': '"""next_states"""'}), "(tf.float32, (None, self.state_dim), name='next_states')\n", (4663, 4719), True, 'import tensorflow as tf\n'), ((4755, 4815), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None,)'], {'name': '"""next_state_masks"""'}), "(tf.float32, (None,), name='next_state_masks')\n", (4769, 4815), True, 'import tensorflow as tf\n'), ((6635, 6686), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None,)'], {'name': '"""rewards"""'}), "(tf.float32, (None,), name='rewards')\n", (6649, 6686), True, 'import tensorflow as tf\n'), ((6829, 6874), 'tensorflow.name_scope', 'tf.name_scope', (['"""compute_temporal_differences"""'], {}), "('compute_temporal_differences')\n", (6842, 6874), True, 'import tensorflow as tf\n'), ((6999, 7071), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.num_actions)'], {'name': '"""action_mask"""'}), "(tf.float32, (None, self.num_actions), name='action_mask')\n", (7013, 7071), True, 'import tensorflow as tf\n'), ((7112, 7187), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.action_scores * self.action_mask)'], {'reduction_indices': '[1]'}), '(self.action_scores * self.action_mask, reduction_indices=[1])\n', (7125, 7187), True, 'import tensorflow as tf\n'), ((7959, 7997), 'tensorflow.name_scope', 'tf.name_scope', (['"""update_target_network"""'], {}), "('update_target_network')\n", (7972, 7997), True, 'import tensorflow as tf\n'), ((8077, 8147), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q_network"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q_network')\n", (8094, 8147), True, 'import tensorflow as tf\n'), ((8187, 8262), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""target_network"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_network')\n", (8204, 8262), True, 'import tensorflow as tf\n'), ((8542, 8579), 'tensorflow.group', 'tf.group', (['*self.target_network_update'], {}), '(*self.target_network_update)\n', (8550, 8579), True, 'import tensorflow as tf\n'), ((9373, 9412), 'random.randint', 'random.randint', (['(0)', '(self.num_actions - 1)'], {}), '(0, self.num_actions - 1)\n', (9387, 9412), False, 'import random\n'), ((11478, 11527), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""score"""', 'simple_value': 'score'}), "(tag='score', simple_value=score)\n", (11494, 11527), True, 'import tensorflow as tf\n'), ((11557, 11628), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""exploration_rate"""', 'simple_value': 'self.exploration'}), "(tag='exploration_rate', simple_value=self.exploration)\n", (11573, 11628), True, 'import tensorflow as tf\n'), ((11658, 11717), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""number_of_steps"""', 'simple_value': 'steps'}), "(tag='number_of_steps', simple_value=steps)\n", (11674, 11717), True, 'import tensorflow as tf\n'), ((11854, 11887), 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'report_measures'}), '(value=report_measures)\n', (11864, 11887), True, 'import tensorflow as tf\n'), ((4060, 4090), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_network"""'], {}), "('q_network')\n", (4077, 4090), True, 'import tensorflow as tf\n'), ((5295, 5352), 'tensorflow.one_hot', 'tf.one_hot', (['self.action_selection', 'self.num_actions', '(1)', '(0)'], {}), '(self.action_selection, self.num_actions, 1, 0)\n', (5305, 5352), True, 'import tensorflow as tf\n'), ((5721, 5778), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.target_outputs'], {'reduction_indices': '[1]'}), '(self.target_outputs, reduction_indices=[1])\n', (5734, 5778), True, 'import tensorflow as tf\n'), ((7309, 7334), 'tensorflow.square', 'tf.square', (['self.temp_diff'], {}), '(self.temp_diff)\n', (7318, 7334), True, 'import tensorflow as tf\n'), ((7562, 7632), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q_network"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q_network')\n", (7579, 7632), True, 'import tensorflow as tf\n'), ((9337, 9352), 'random.random', 'random.random', ([], {}), '()\n', (9350, 9352), False, 'import random\n'), ((4932, 4974), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_network"""'], {'reuse': '(True)'}), "('q_network', reuse=True)\n", (4949, 4974), True, 'import tensorflow as tf\n'), ((5101, 5138), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.q_next_outputs'], {}), '(self.q_next_outputs)\n', (5117, 5138), True, 'import tensorflow as tf\n'), ((5433, 5468), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_network"""'], {}), "('target_network')\n", (5450, 5468), True, 'import tensorflow as tf\n'), ((6031, 6066), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_network"""'], {}), "('target_network')\n", (6048, 6066), True, 'import tensorflow as tf\n'), ((6383, 6444), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.next_action_scores'], {'reduction_indices': '[1]'}), '(self.next_action_scores, reduction_indices=[1])\n', (6396, 6444), True, 'import tensorflow as tf\n'), ((5547, 5594), 'tensorflow.cast', 'tf.cast', (['self.action_selection_mask', 'tf.float32'], {}), '(self.action_selection_mask, tf.float32)\n', (5554, 5594), True, 'import tensorflow as tf\n'), ((7732, 7744), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (7741, 7744), True, 'import tensorflow as tf\n')] |
import numpy as np
class Fourvector:
"""
A class defining a template for Energy-Momentum four vectors.
Two arguments define the fourvector;
E; the energy component of the fourvector in MeV, and
float
default value = 0.0
p=[p(x), p(y), p(z)] ; the momentum vector in MeV/c.
array like
default value = [0.00,0.00,0.00]
"""
def __init__(self, E=0.00, p=[0.00,0.00,0.00]):
self.__E = E
self.__p = np.array(p) #convert to natural units by dividing by c??
self._fourv = np.append(self.__E,self.__p)
if len(p) > 3:
raise Exception("Error: Four Vector parameter size")
def __repr__(self):
return "%s([E, P] =%r)" % ("Four Vector", self._fourv)
def __str__(self):
return "[%g, %r]" % (self.__E, self.__p)
def fourvec(self):
"""Returns the full Four Vector."""
return self._fourv
def energy(self):
"""Returns the Energy of a Four Vector, E."""
return self.__E
def momentum(self):
""" Returns the momentum attribute (p) of a Four Vector,
which is a vector."""
return self.__p
def copy(self):
"""The function returns a copy of a Four Vector instance as another
independent instance."""
return Fourvector(self.__E, self.__p)
def __add__(self,other):
return Fourvector(self.__E+other.__E, self.__p+other.__p)
def __iadd__(self,other):
self.__E += other.__E
self.__p += other.__p
return self
def __sub__(self,other):
return Fourvector(self.__E-other.__E, self.__p-other.__p)
def __isub__(self,other):
self.__E -= other.__E
self.__p -= other.__p
return self
def inner(self,other):
"""The function takes two four vector arguments, and returns the inner
product of the two four vectors. """
return (self.__E*other.__E) - np.dot(self.__p, other.__p)
def magp_sq(self):
""" The function takes one four vector as an argument and returns the
magnitude of its momentum squared"""
return np.dot(self.__p, self.__p)
def magp(self):
""" The function takes one four vector as an argument and returns the
magnitude of its momentum"""
return np.sqrt(np.dot(self.__p, self.__p))
def boost(self, beta=0.0):
"""The function takes two arguments, a four vector and a beta value,
(where beta= v/c). The function returns a four vector with a boost in
the z-direction."""
gamma = 1/np.sqrt(1-(beta)**2)
return Fourvector(gamma*(self._fourv[0]-beta*self._fourv[3]), [self._fourv[1], self._fourv[2], gamma*(self._fourv[3]-beta*self._fourv[0])])
#lorentz transform, does it still apply for p if you use E instead of E/c????
| [
"numpy.append",
"numpy.array",
"numpy.dot",
"numpy.sqrt"
] | [((472, 483), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (480, 483), True, 'import numpy as np\n'), ((551, 580), 'numpy.append', 'np.append', (['self.__E', 'self.__p'], {}), '(self.__E, self.__p)\n', (560, 580), True, 'import numpy as np\n'), ((2212, 2238), 'numpy.dot', 'np.dot', (['self.__p', 'self.__p'], {}), '(self.__p, self.__p)\n', (2218, 2238), True, 'import numpy as np\n'), ((2017, 2044), 'numpy.dot', 'np.dot', (['self.__p', 'other.__p'], {}), '(self.__p, other.__p)\n', (2023, 2044), True, 'import numpy as np\n'), ((2407, 2433), 'numpy.dot', 'np.dot', (['self.__p', 'self.__p'], {}), '(self.__p, self.__p)\n', (2413, 2433), True, 'import numpy as np\n'), ((2670, 2692), 'numpy.sqrt', 'np.sqrt', (['(1 - beta ** 2)'], {}), '(1 - beta ** 2)\n', (2677, 2692), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def delta(line1, line2):
x = line1.shape[0]
print("len", x)
kmin = 100000
dmin = 0
for delta in range(5):
print("delta:", delta, x-delta)
print(line1.shape)
d1 = line1[delta:(x-delta), :, :]
print("d1shape", d1.shape, d1[0])
len1 = x - 2 * delta
for delta2 in range(5):
print(delta2, delta2 + len1)
if delta2 + len1 > x :
break
d2 = line2[delta2 : delta2 + len1, :, :]
print(d2.shape,d2[0])
diff = np.abs(d2 - d1)
print(diff[0])
k = np.sum(diff)/np.size(diff)
print("diff:",k)
if k < kmin:
kmin = k
dmin = delta2
#return k
for delta2 in range(5):
#delta2 = -delta2
print(x - delta2 - len1, x - delta2)
if x - delta2 - len1 < 0 :
break
d2 = line2[x - delta2 - len1 : x - delta2, :, :]
print(d2.shape,d2[0])
diff = np.abs(d2 - d1)
print(diff[0])
k = np.sum(diff)/np.size(diff)
print("diff:",k)
if k < kmin:
kmin = k
dmin = - delta2
#return k
#return dmin
return dmin
if __name__ == "__main__":
image = "webcams2.webmpulse1.png"
m1 = cv2.imread(image)
dlist = []
l1 = int(m1.shape[0] / 2)
for k in range(m1.shape[1]):
line1 = m1[:l1,k:k+1,:]
line2 = m1[:l1,k+1:k+2,:]
d1 = delta(line1, line2)
print(d1)
dlist.append(d1)
print(dlist)
m2 = np.zeros((50,len(dlist)+1,3),np.uint8)
t0 = 25
idx = 0
cv2.circle(m2,(idx,t0),3,(0,0,255),-1)
for dt in dlist:
if abs(dt) > 5 :
t0 = 25
continue
t0 = t0 + dt
if t0 > m2.shape[0] :
t0 = 25
continue
idx = idx + 1
cv2.circle(m2,(idx*3,t0),3,(0,0,255))
print(t0, end=" ")
cv2.imwrite("pulse_sin1.jpg", m2)
| [
"cv2.imwrite",
"numpy.abs",
"numpy.size",
"numpy.sum",
"cv2.circle",
"cv2.imread"
] | [((1421, 1438), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (1431, 1438), False, 'import cv2\n'), ((1753, 1798), 'cv2.circle', 'cv2.circle', (['m2', '(idx, t0)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(m2, (idx, t0), 3, (0, 0, 255), -1)\n', (1763, 1798), False, 'import cv2\n'), ((2071, 2104), 'cv2.imwrite', 'cv2.imwrite', (['"""pulse_sin1.jpg"""', 'm2'], {}), "('pulse_sin1.jpg', m2)\n", (2082, 2104), False, 'import cv2\n'), ((2002, 2047), 'cv2.circle', 'cv2.circle', (['m2', '(idx * 3, t0)', '(3)', '(0, 0, 255)'], {}), '(m2, (idx * 3, t0), 3, (0, 0, 255))\n', (2012, 2047), False, 'import cv2\n'), ((574, 589), 'numpy.abs', 'np.abs', (['(d2 - d1)'], {}), '(d2 - d1)\n', (580, 589), True, 'import numpy as np\n'), ((1090, 1105), 'numpy.abs', 'np.abs', (['(d2 - d1)'], {}), '(d2 - d1)\n', (1096, 1105), True, 'import numpy as np\n'), ((633, 645), 'numpy.sum', 'np.sum', (['diff'], {}), '(diff)\n', (639, 645), True, 'import numpy as np\n'), ((646, 659), 'numpy.size', 'np.size', (['diff'], {}), '(diff)\n', (653, 659), True, 'import numpy as np\n'), ((1149, 1161), 'numpy.sum', 'np.sum', (['diff'], {}), '(diff)\n', (1155, 1161), True, 'import numpy as np\n'), ((1162, 1175), 'numpy.size', 'np.size', (['diff'], {}), '(diff)\n', (1169, 1175), True, 'import numpy as np\n')] |
import os
import glob
import re
import json
import random
import cv2
import numpy as np
class CaltechDataset:
def __init__(self, root, transform=None, target_transform=None, is_test=False, keep_difficult=False, label_file=None):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.datalist = []
self.train_datalist = []
self.test_datalist = []
self.train_list_name = "train_data_list.json"
self.test_list_name = "test_data_list.json"
self.class_names = ('BACKGROUND', 'person')
self.is_test = is_test
if os.path.exists(os.path.join(root, self.train_list_name)) and os.path.exists(os.path.join(root, self.test_list_name)):
self.train_datalist = json.load(open(os.path.join(root, self.train_list_name)))
self.test_datalist = json.load(open(os.path.join(root, self.test_list_name)))
else:
annotations = json.load(open(os.path.join(root, "annotations.json")))
for set_num in annotations.keys():
if set_num != "set00":
continue
for V_num in annotations[set_num].keys():
for frame_num in annotations[set_num][V_num]['frames'].keys():
data = annotations[set_num][V_num]['frames'].get(frame_num)
image_name = set_num + "_" + V_num + "_" + frame_num + ".png"
image_path = os.path.join(root, 'images', set_num, image_name)
if not os.path.exists(image_path):
continue
boxes = []
labels = []
for datum in data:
label = datum['lbl']
if label != 'person':
continue
x, y, w, h = datum['pos']
boxes.append([x, y, x+w, y+h])
labels.append(1)
if not boxes:
continue
self.datalist.append({"image_path": image_path, "boxes": boxes, "labels": labels})
random.shuffle(self.datalist)
part = int(len(self.datalist) * 0.75)
self.train_datalist = self.datalist[0:part]
self.test_datalist = self.datalist[part:]
print("train_datalist length: {0}, test_datalist length: {1}".format(len(self.train_datalist), len(self.test_datalist)))
json.dump(self.train_datalist, open(os.path.join(root, self.train_list_name), 'w'))
json.dump(self.test_datalist, open(os.path.join(root, self.test_list_name), 'w'))
print("dataset laoded!")
# image_paths = glob.glob('/home/test/data/*/*.png')
# for image_path in image_paths:
# img_name = os.path.basename(image_path)
# set_name = re.search('(set[0-9]+)', img_name).groups()[0]
# video_name = re.search('(V[0-9]+)', img_name).groups()[0]
# n_frame = re.search('_([0-9]+)\.png', img_name).groups()[0]
# data = annotations[set_name][video_name]['frames'].get(n_frame)
@staticmethod
def _read_image_ids(image_sets_file):
ids = []
with open(image_sets_file) as f:
for line in f:
ids.append(line.rstrip())
return ids
def show_image_and_label(self, index):
index = index % len(self.train_datalist)
data = self.train_datalist[index]
image_path = data["image_path"]
boxes = data["boxes"]
image = cv2.imread(image_path)
for box in boxes:
x0, y0, x1, y1 = [int(v) for v in box]
cv2.rectangle(image, (x0, y0), (x1, y1), (0, 0, 255), 1)
cv2.imshow("image", image)
cv2.waitKey(-1)
def __len__(self):
if self.is_test:
return len(self.test_datalist)
else:
return len(self.train_datalist)
def __getitem__(self, index):
if not self.is_test:
index = index % len(self.test_datalist)
data = self.test_datalist[index]
else:
index = index % len(self.train_datalist)
data = self.train_datalist[index]
image_path = data["image_path"]
boxes = np.array(data["boxes"], dtype=np.float32)
labels = np.array(data["labels"], dtype=np.int64)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
image, boxes, labels = self.transform(image, boxes, labels)
if self.target_transform:
boxes, labels = self.target_transform(boxes, labels)
return image, boxes, labels
if __name__ == '__main__':
a = []
if not a:
print("none")
dataset = CaltechDataset("caltech_data")
dataset.__getitem__(334)
dataset.show_image_and_label(334)
| [
"cv2.rectangle",
"os.path.exists",
"random.shuffle",
"os.path.join",
"cv2.imshow",
"numpy.array",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread"
] | [((3675, 3697), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3685, 3697), False, 'import cv2\n'), ((3852, 3878), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (3862, 3878), False, 'import cv2\n'), ((3887, 3902), 'cv2.waitKey', 'cv2.waitKey', (['(-1)'], {}), '(-1)\n', (3898, 3902), False, 'import cv2\n'), ((4383, 4424), 'numpy.array', 'np.array', (["data['boxes']"], {'dtype': 'np.float32'}), "(data['boxes'], dtype=np.float32)\n", (4391, 4424), True, 'import numpy as np\n'), ((4442, 4482), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int64'}), "(data['labels'], dtype=np.int64)\n", (4450, 4482), True, 'import numpy as np\n'), ((4499, 4521), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4509, 4521), False, 'import cv2\n'), ((4538, 4576), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4550, 4576), False, 'import cv2\n'), ((2247, 2276), 'random.shuffle', 'random.shuffle', (['self.datalist'], {}), '(self.datalist)\n', (2261, 2276), False, 'import random\n'), ((3787, 3843), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x0, y0)', '(x1, y1)', '(0, 0, 255)', '(1)'], {}), '(image, (x0, y0), (x1, y1), (0, 0, 255), 1)\n', (3800, 3843), False, 'import cv2\n'), ((651, 691), 'os.path.join', 'os.path.join', (['root', 'self.train_list_name'], {}), '(root, self.train_list_name)\n', (663, 691), False, 'import os\n'), ((712, 751), 'os.path.join', 'os.path.join', (['root', 'self.test_list_name'], {}), '(root, self.test_list_name)\n', (724, 751), False, 'import os\n'), ((803, 843), 'os.path.join', 'os.path.join', (['root', 'self.train_list_name'], {}), '(root, self.train_list_name)\n', (815, 843), False, 'import os\n'), ((894, 933), 'os.path.join', 'os.path.join', (['root', 'self.test_list_name'], {}), '(root, self.test_list_name)\n', (906, 933), False, 'import os\n'), ((991, 1029), 'os.path.join', 'os.path.join', (['root', '"""annotations.json"""'], {}), "(root, 'annotations.json')\n", (1003, 1029), False, 'import os\n'), ((2618, 2658), 'os.path.join', 'os.path.join', (['root', 'self.train_list_name'], {}), '(root, self.train_list_name)\n', (2630, 2658), False, 'import os\n'), ((2713, 2752), 'os.path.join', 'os.path.join', (['root', 'self.test_list_name'], {}), '(root, self.test_list_name)\n', (2725, 2752), False, 'import os\n'), ((1495, 1544), 'os.path.join', 'os.path.join', (['root', '"""images"""', 'set_num', 'image_name'], {}), "(root, 'images', set_num, image_name)\n", (1507, 1544), False, 'import os\n'), ((1576, 1602), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (1590, 1602), False, 'import os\n')] |
import pandas as pd
import numpy as np
import torch
import sys
from tqdm import tqdm
import mysql.connector
import io
import time
import ML
import numpy
import csv
import os
from tqdm import tqdm
class uci_loader():
def __init__(self):
import os
self.path="/mnt/prod_uci/check/"
self.a=os.listdir(self.path)
self.length=len(self.a)
self.iter=0
self.uci_data=None
def _next(self):
print("loading",self.a[self.iter])
self.uci_data=pd.read_csv("/mnt/prod_uci/check/"+self.a[self.iter],usecols=['date','user_id','content_id','article_opened','article_scrolled_half','article_scrolled_end','articles_shared'])
self.iter+=1
return self.uci_data
#u2 are content ids on which we want our UPDATE
def user_update():
user_content_vectors2={}
load=uci_loader()
for l in range(load.length):
uci_data=load._next()
uci_data=uci_filtering(uci_data)
for user_id in (u2):
user_id=int(user_id)
c_new=uci_data[uci_data['user_id']==user_id].content_id.values
s_new=uci_data[uci_data['user_id']==user_id].Score1.values.astype(numpy.int64)
sum=0
for i in range(len(s_new)):
if c_new[i] in con_con_vec.keys():
sum=sum+(s_new[i]*(con_con_vec[c_new[i]][0]))
emb_new=sum/np.sum(s_new)
if np.sum(s_new)==0:
emb_new=0
if l==0:
emb_old , s_old , c_old = user_content_vectors[user_id]
else:
emb_old , s_old , c_old = user_content_vectors2[user_id]
user_c=(((np.sum(s_new)*emb_new)+(s_old*emb_old))/(np.sum(s_new)+s_old))
c=c_old+len(c_new)
sc=s_old+np.sum(s_new)
user_content_vectors2[user_id]=(user_c,sc,c)
return user_content_vectors2
def uci_filtering(uci):
uci=uci[uci['content_id'].isin(list(con_con_vec.keys()))]
uci=uci.dropna()
s1=uci['article_opened']
s2=uci['article_scrolled_half']
s3=uci['article_scrolled_end']
s4=uci['articles_shared']
uci['article_opened']=s1/s1
uci['article_scrolled_half']=s2/s2
uci['article_scrolled_end']=s3/s3
uci['articles_shared']=s4/s4
uci=uci.fillna(0)
s1=uci['article_opened']
s2=uci['article_scrolled_half']
s3=uci['article_scrolled_end']
s4=uci['articles_shared']
uci['Score1']=s1*1+s2*2+s3*3+s4*4
uci=uci[uci.Score1!=0]
return uci
#content-vectors loading and UCI preprocessing
con_con_vec= np.load('/home/ubuntu/recSysDB/rawS3data/con_con_vec.npy',allow_pickle='TRUE').item()
print('Content-vectors loaded successfully')
uci= pd.read_csv("/home/ubuntu/recSysDB/rawS3data/uci_meta_5_6_20",usecols=['date','user_id','content_id','article_opened','article_scrolled_half','article_scrolled_end','articles_shared'])
uci=uci_filtering(uci)
user_content_vectors={}
##Training Loop
for user_id in tqdm(uci.user_id.unique()):
user_id=int(user_id)
c=uci[uci['user_id']==user_id].content_id.values
s=uci[uci['user_id']==user_id].Score1.values.astype(numpy.int64)
sum=0
for i in range(len(s)):
if c[i] in con_con_vec.keys():
sum=sum+(s[i]*(con_con_vec[c[i]][0]))
sum=sum/np.sum(s)
user_content_vectors[user_id]=(sum,np.sum(s),len(c))
def adapt_array(array):
"""
Using the numpy.save function to save a binary version of the array,
and BytesIO to catch the stream of data and convert it into a BLOB.
"""
out = io.BytesIO()
numpy.save(out, array)
out.seek(0)
return out.read()
def convert_array(blob):
"""
Using BytesIO to convert the binary version of the array back into a numpy array.
"""
out = io.BytesIO(blob)
out.seek(0)
return numpy.load(out)
main=ML.ML_DB()
mydb,cor=main.connect_to_db(host='credentials')
s='use story_sorted'
cor.execute(s)
s="select user_id,user_embedding_1,score_total,articles_no from user_embedding where articles_no < 24 "
cor.execute(s)
u1=cor.fetchall()
s="select user_id from user_embedding where articles_no > 24 "
cor.execute(s)
u2=cor.fetchall()
u2=[u2[i][0] for i in range(len(u2)) if u2[i][0] in user_content_vectors.keys()]
#For Merging
for uid,emb_old,sc_old,c_old in tqdm([u1[i] for i in range(len(u1))]):
if uid in user_content_vectors.keys():
emb_nw,sc_new,c_new=user_content_vectors[uid]
emb_old=convert_array(emb_old)
user_c=adapt_array(((sc_new*emb_nw)+(sc_old*emb_old))/(sc_new+sc_old))
c=c_old+c_new
sc=sc_old+sc_new
s="INSERT INTO user_embedding (user_id,user_embedding_1,articles_no,score_total) VALUES(%s,%s,%s,%s) ON DUPLICATE KEY UPDATE user_embedding_1=%s , articles_no=%s, score_total=%s"
b=(uid, user_c, c, sc.item(), user_c, c, sc.item())
cor.execute(s,b)
# straight update
user_content_vect2=user_update()
for uid,emb_u in tqdm(user_content_vect2.items()):
user_c=adapt_array(emb_u[0])
sc=emb_u[1]
c=emb_u[2]
#Don't need to fetch as directly updating
s="UPDATE user_embedding SET user_embedding_1=%s , articles_no=%s, score_total=%s WHERE user_id=%s AND articles_no > 24"
b=(user_c, c, sc.item(), uid)
cor.execute(s,b)
#i for new users in uci-file
for uid,emb_u in tqdm(user_content_vectors.items()):
user_c=adapt_array(emb_u[0])
sc=emb_u[1]
c=emb_u[2]
s="INSERT IGNORE INTO user_embedding (user_id,user_embedding_1,articles_no,score_total) VALUES (%s,%s,%s,%s)"
b=(uid, user_c, c, sc.item())
cor.execute(s,b)
mydb.commit()
main.close_connection()
| [
"ML.ML_DB",
"os.listdir",
"pandas.read_csv",
"io.BytesIO",
"numpy.sum",
"numpy.load",
"numpy.save"
] | [((2709, 2909), 'pandas.read_csv', 'pd.read_csv', (['"""/home/ubuntu/recSysDB/rawS3data/uci_meta_5_6_20"""'], {'usecols': "['date', 'user_id', 'content_id', 'article_opened', 'article_scrolled_half',\n 'article_scrolled_end', 'articles_shared']"}), "('/home/ubuntu/recSysDB/rawS3data/uci_meta_5_6_20', usecols=[\n 'date', 'user_id', 'content_id', 'article_opened',\n 'article_scrolled_half', 'article_scrolled_end', 'articles_shared'])\n", (2720, 2909), True, 'import pandas as pd\n'), ((3837, 3847), 'ML.ML_DB', 'ML.ML_DB', ([], {}), '()\n', (3845, 3847), False, 'import ML\n'), ((3552, 3564), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3562, 3564), False, 'import io\n'), ((3569, 3591), 'numpy.save', 'numpy.save', (['out', 'array'], {}), '(out, array)\n', (3579, 3591), False, 'import numpy\n'), ((3769, 3785), 'io.BytesIO', 'io.BytesIO', (['blob'], {}), '(blob)\n', (3779, 3785), False, 'import io\n'), ((3814, 3829), 'numpy.load', 'numpy.load', (['out'], {}), '(out)\n', (3824, 3829), False, 'import numpy\n'), ((315, 336), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (325, 336), False, 'import os\n'), ((502, 694), 'pandas.read_csv', 'pd.read_csv', (["('/mnt/prod_uci/check/' + self.a[self.iter])"], {'usecols': "['date', 'user_id', 'content_id', 'article_opened', 'article_scrolled_half',\n 'article_scrolled_end', 'articles_shared']"}), "('/mnt/prod_uci/check/' + self.a[self.iter], usecols=['date',\n 'user_id', 'content_id', 'article_opened', 'article_scrolled_half',\n 'article_scrolled_end', 'articles_shared'])\n", (513, 694), True, 'import pandas as pd\n'), ((2573, 2652), 'numpy.load', 'np.load', (['"""/home/ubuntu/recSysDB/rawS3data/con_con_vec.npy"""'], {'allow_pickle': '"""TRUE"""'}), "('/home/ubuntu/recSysDB/rawS3data/con_con_vec.npy', allow_pickle='TRUE')\n", (2580, 2652), True, 'import numpy as np\n'), ((3289, 3298), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (3295, 3298), True, 'import numpy as np\n'), ((3338, 3347), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (3344, 3347), True, 'import numpy as np\n'), ((1383, 1396), 'numpy.sum', 'np.sum', (['s_new'], {}), '(s_new)\n', (1389, 1396), True, 'import numpy as np\n'), ((1412, 1425), 'numpy.sum', 'np.sum', (['s_new'], {}), '(s_new)\n', (1418, 1425), True, 'import numpy as np\n'), ((1795, 1808), 'numpy.sum', 'np.sum', (['s_new'], {}), '(s_new)\n', (1801, 1808), True, 'import numpy as np\n'), ((1721, 1734), 'numpy.sum', 'np.sum', (['s_new'], {}), '(s_new)\n', (1727, 1734), True, 'import numpy as np\n'), ((1680, 1693), 'numpy.sum', 'np.sum', (['s_new'], {}), '(s_new)\n', (1686, 1693), True, 'import numpy as np\n')] |
"""
gesture.py is the main file used to for live detection
"""
import tensorflow as tf
import numpy as np
import cv2
c_frame = -1
p_frame = -1
# Setting threshold for number of frames to compare
threshold_frames = 50
# loading up the model
session = tf.Session()
# recreate the network graph,
saver = tf.train.import_meta_graph('../model/train_model.meta')
# restoring the weights
saver.restore(session, tf.train.latest_checkpoint('../model'))
# default graph
graph = tf.get_default_graph()
# Now, let's get hold of the operation that we can be processed to get the output.
# y is the tensor that is the prediction of the network
y = graph.get_tensor_by_name("y: 0")
# feed the images to the input placeholders
X = graph.get_tensor_by_name("X: 0")
y_true = graph.get_tensor_by_name("y_true: 0")
y_test_imgs = np.zeros((1, 10))
# live detection
def live(frame, y_test_images):
"""
live detection using webcam
:param frame: webcam frame
:param y_test_images: test images
:return: predicted output
"""
img_size = 50
n_channels = 3
images = []
image = frame
cv2.imshow('test', image)
# preprocessing step.
# resize images
img = cv2.resize(image, (img_size, img_size), 0, 0, cv2.INTER_LINEAR)
images.append(img)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
# The input to the network is of shape [None, img_size, img_size, n_channels].
X_batch = images.reshape(1, img_size, img_size, n_channels)
# Creating the feed_dict that is required to be feed to calculate y
feed_dict_testing = {X: X_batch, y_true: y_test_images}
result = session.run(y, feed_dict=feed_dict_testing)
return np.array(result)
# camera object
capture = cv2.VideoCapture(0)
# frame dim (4 * 4)
capture.set(4, 700)
capture.set(4, 700)
i = 0
while i < 100000000:
ret, frame = capture.read()
cv2.rectangle(frame, (300, 300), (100, 100), (0, 255, 0), 0)
crop_frame = frame[100:300, 100:300]
# blur the images
blur = cv2.GaussianBlur(crop_frame, (3, 3), 0)
# convert to HSV colour space
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
# create a binary image with where white will be skin colour and rest in black
mask2 = cv2.inRange(hsv, np.array([2, 50, 50]), np.array([15, 255, 255]))
median_blur = cv2.medianBlur(mask2, 5)
# displaying frames
cv2.imshow('main', frame)
cv2.imshow('masked', median_blur)
# resize the image
memedian_blur = cv2.resize(median_blur, (50, 50))
# making it 3 channel
median_blur = np.stack((median_blur, )*3)
# making it 3 channel
median_blur = np.stack((median_blur, )*3)
# adjusting rows, columns as per X
median_blur = np.rollaxis(median_blur, axis=1, start=0)
median_blur = np.rollaxis(median_blur, axis=2, start=0)
# rotating and flipping correctly as per training images
rotate_img = cv2.getRotationMatrix2D((25, 25), 270, 1)
median_blur = cv2.wrapAffine(median_blur)
median_blur = np.fliplr(median_blur)
# exponent to float
np.set_printoptions(formatter={'float_kind': '{:f}'.format})
# print index of maximum probability value
answer = live(median_blur, y_test_imgs)
# compare 50 continuous frames
c_frame = np.argmax(max(answer))
if c_frame == p_frame:
counter = + 1
p_frame = c_frame
if counter == threshold_frames:
print(answer)
print("Answer: "+str(c_frame))
counter = 0
i = 0
else:
p_frame = c_frame
counter = 0
# close the output video by pressing 'ESC'
k = cv2.waitKey(2) & 0xFF
if k == 27:
break
i = + 1
capture.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"numpy.rollaxis",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.multiply",
"tensorflow.Session",
"cv2.medianBlur",
"numpy.stack",
"cv2.waitKey",
"tensorflow.get_default_graph",
"numpy.fliplr",
"tensorflow.train.import_meta_graph",
"cv2.cvtColor",
"cv2.getR... | [((253, 265), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (263, 265), True, 'import tensorflow as tf\n'), ((305, 360), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""../model/train_model.meta"""'], {}), "('../model/train_model.meta')\n", (331, 360), True, 'import tensorflow as tf\n'), ((473, 495), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (493, 495), True, 'import tensorflow as tf\n'), ((817, 834), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (825, 834), True, 'import numpy as np\n'), ((1799, 1818), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1815, 1818), False, 'import cv2\n'), ((3760, 3783), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3781, 3783), False, 'import cv2\n'), ((409, 447), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""../model"""'], {}), "('../model')\n", (435, 447), True, 'import tensorflow as tf\n'), ((1108, 1133), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'image'], {}), "('test', image)\n", (1118, 1133), False, 'import cv2\n'), ((1190, 1253), 'cv2.resize', 'cv2.resize', (['image', '(img_size, img_size)', '(0)', '(0)', 'cv2.INTER_LINEAR'], {}), '(image, (img_size, img_size), 0, 0, cv2.INTER_LINEAR)\n', (1200, 1253), False, 'import cv2\n'), ((1290, 1322), 'numpy.array', 'np.array', (['images'], {'dtype': 'np.uint8'}), '(images, dtype=np.uint8)\n', (1298, 1322), True, 'import numpy as np\n'), ((1374, 1406), 'numpy.multiply', 'np.multiply', (['images', '(1.0 / 255.0)'], {}), '(images, 1.0 / 255.0)\n', (1385, 1406), True, 'import numpy as np\n'), ((1754, 1770), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1762, 1770), True, 'import numpy as np\n'), ((1945, 2005), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(300, 300)', '(100, 100)', '(0, 255, 0)', '(0)'], {}), '(frame, (300, 300), (100, 100), (0, 255, 0), 0)\n', (1958, 2005), False, 'import cv2\n'), ((2080, 2119), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['crop_frame', '(3, 3)', '(0)'], {}), '(crop_frame, (3, 3), 0)\n', (2096, 2119), False, 'import cv2\n'), ((2164, 2201), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (2176, 2201), False, 'import cv2\n'), ((2381, 2405), 'cv2.medianBlur', 'cv2.medianBlur', (['mask2', '(5)'], {}), '(mask2, 5)\n', (2395, 2405), False, 'import cv2\n'), ((2434, 2459), 'cv2.imshow', 'cv2.imshow', (['"""main"""', 'frame'], {}), "('main', frame)\n", (2444, 2459), False, 'import cv2\n'), ((2464, 2497), 'cv2.imshow', 'cv2.imshow', (['"""masked"""', 'median_blur'], {}), "('masked', median_blur)\n", (2474, 2497), False, 'import cv2\n'), ((2541, 2574), 'cv2.resize', 'cv2.resize', (['median_blur', '(50, 50)'], {}), '(median_blur, (50, 50))\n', (2551, 2574), False, 'import cv2\n'), ((2619, 2647), 'numpy.stack', 'np.stack', (['((median_blur,) * 3)'], {}), '((median_blur,) * 3)\n', (2627, 2647), True, 'import numpy as np\n'), ((2691, 2719), 'numpy.stack', 'np.stack', (['((median_blur,) * 3)'], {}), '((median_blur,) * 3)\n', (2699, 2719), True, 'import numpy as np\n'), ((2776, 2817), 'numpy.rollaxis', 'np.rollaxis', (['median_blur'], {'axis': '(1)', 'start': '(0)'}), '(median_blur, axis=1, start=0)\n', (2787, 2817), True, 'import numpy as np\n'), ((2836, 2877), 'numpy.rollaxis', 'np.rollaxis', (['median_blur'], {'axis': '(2)', 'start': '(0)'}), '(median_blur, axis=2, start=0)\n', (2847, 2877), True, 'import numpy as np\n'), ((2956, 2997), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(25, 25)', '(270)', '(1)'], {}), '((25, 25), 270, 1)\n', (2979, 2997), False, 'import cv2\n'), ((3016, 3043), 'cv2.wrapAffine', 'cv2.wrapAffine', (['median_blur'], {}), '(median_blur)\n', (3030, 3043), False, 'import cv2\n'), ((3062, 3084), 'numpy.fliplr', 'np.fliplr', (['median_blur'], {}), '(median_blur)\n', (3071, 3084), True, 'import numpy as np\n'), ((3113, 3173), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float_kind': '{:f}'.format}"}), "(formatter={'float_kind': '{:f}'.format})\n", (3132, 3173), True, 'import numpy as np\n'), ((2314, 2335), 'numpy.array', 'np.array', (['[2, 50, 50]'], {}), '([2, 50, 50])\n', (2322, 2335), True, 'import numpy as np\n'), ((2337, 2361), 'numpy.array', 'np.array', (['[15, 255, 255]'], {}), '([15, 255, 255])\n', (2345, 2361), True, 'import numpy as np\n'), ((3677, 3691), 'cv2.waitKey', 'cv2.waitKey', (['(2)'], {}), '(2)\n', (3688, 3691), False, 'import cv2\n')] |
"""Application logic"""
# Import standard library
import random
import itertools
from typing import Tuple, List
# Import modules
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import seagull as sg
import seagull.lifeforms as lf
from loguru import logger
from scipy.signal import convolve2d
logger.disable("seagull") # Do not print logs from Seagull
def generate_sprite(
n_iters: int = 1,
extinction: float = 0.125,
survival: float = 0.375,
size: int = 180,
sprite_seed: int = None,
color_seeds: List[int] = None,
):
"""Generate a sprite given various parameters
Parameters
----------
n_iters : int
Number of iterations to run Conway's Game of Life.
extinction : float (0.0 to 1.0)
Controls how many dead cells will stay dead on the next iteration
Default is 0.125 (around 1 cell)
survival: float (0.0 to 1.0)
Controls how many live cells will stay alive on the next iteration.
Default is 0.375 (around 3 cells)
size : int
Size of the generated sprite in pixels. Default is 180 for 180 x 180px.
sprite_seed : int (optional)
Random seed for the Sprite. Default is None
color_seeds : list (optional)
Random seed for the colors. Default is None
Returns
-------
matplotlib.Figure
"""
logger.debug("Initializing board")
board = sg.Board(size=(8, 4))
logger.debug("Seeding the lifeform")
if sprite_seed:
np.random.seed(sprite_seed)
noise = np.random.choice([0, 1], size=(8, 4))
custom_lf = lf.Custom(noise)
board.add(custom_lf, loc=(0, 0))
logger.debug("Running the simulation")
sim = sg.Simulator(board)
sim.run(
_custom_rule,
iters=n_iters,
n_extinct=int(extinction * 8),
n_survive=int(survival * 8),
)
fstate = sim.get_history()[-1]
logger.debug("Adding outline, gradient, and colors")
sprite = np.hstack([fstate, np.fliplr(fstate)])
sprite = np.pad(sprite, mode="constant", pad_width=1, constant_values=1)
sprite_with_outline = _add_outline(sprite)
sprite_gradient = _get_gradient(sprite_with_outline)
sprite_final = _combine(sprite_with_outline, sprite_gradient)
logger.trace("Registering a colormap")
iterator = list(_group(3, color_seeds))[:3] if color_seeds else [None] * 3
random_colors = [_color(seeds) for seeds in iterator]
base_colors = ["black", "#f2f2f2"]
colors = base_colors + random_colors
logger.trace(f"Colors to use: {colors}")
cm.register_cmap(
cmap=mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors
).reversed()
)
logger.debug("Preparing final image")
fig, axs = plt.subplots(1, 1, figsize=(1, 1), dpi=size)
axs = fig.add_axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
axs.imshow(sprite_final, cmap="custom_r", interpolation="nearest")
logger.debug("Successfully generated sprite!")
return fig
def _custom_rule(
X: np.ndarray, n_extinct: int = 3, n_survive: int = 3
) -> np.ndarray:
"""Custom Conway's Rule"""
n = convolve2d(X, np.ones((3, 3)), mode="same", boundary="fill") - X
reproduction_rule = (X == 0) & (n <= n_extinct)
stasis_rule = (X == 1) & ((n == 2) | (n == n_survive))
return reproduction_rule | stasis_rule
def _color(seeds: Tuple[int, int, int] = None) -> str:
"""Returns a random hex code"""
hex_values = []
for i in range(3):
if seeds:
random.seed(seeds[i])
h = random.randint(0, 255)
hex_values.append(h)
return "#{:02X}{:02X}{:02X}".format(*hex_values)
def _add_outline(mat: np.ndarray) -> np.ndarray:
"""Create an outline given a sprite image
It traverses the matrix and looks for the body of the sprite, as
represented by 0 values. Once it founds one, it looks around its neighbors
and change all background values (represented as 1) into an outline.
Parameters
----------
mat : np.ndarray
The input sprite image
Returns
-------
np.ndarray
The sprite image with outline
"""
m = np.ones(mat.shape)
for idx, orig_val in np.ndenumerate(mat):
x, y = idx
neighbors = [(x, y + 1), (x + 1, y), (x, y - 1), (x - 1, y)]
if orig_val == 0:
m[idx] = 0 # Set the coordinate in the new matrix as 0
for n_coord in neighbors:
try:
m[n_coord] = 0.5 if mat[n_coord] == 1 else 0
except IndexError:
pass
m = np.pad(m, mode="constant", pad_width=1, constant_values=1)
# I need to switch some values so that I get the colors right.
# Need to make all 0.5 (outline) as 1, and all 1 (backround)
# as 0.5
m[m == 1] = np.inf
m[m == 0.5] = 1
m[m == np.inf] = 0.5
return m
def _get_gradient(
mat: np.ndarray, map_range: Tuple[float, float] = (0.2, 0.25)
) -> np.ndarray:
"""Get gradient of an outline sprite
We use gradient as a way to shade the body of the sprite. It is a crude
approach, but it works most of the time.
Parameters
----------
mat : np.ndarray
The input sprite with outline
map_range : tuple of floats
Map the gradients within a certain set of values. The default is
between 0.2 and 0.25 because those values look better in the color map.
Returns
-------
np.ndarray
The sprite with shading
"""
grad = np.gradient(mat)[0]
def _remap(new_range, matrix):
old_min, old_max = np.min(matrix), np.max(matrix)
new_min, new_max = new_range
old = old_max - old_min
new = new_max - new_min
return (((matrix - old_min) * new) / old) + new_min
sprite_with_gradient = _remap(map_range, grad)
return sprite_with_gradient
def _combine(mat_outline: np.ndarray, mat_gradient: np.ndarray) -> np.ndarray:
"""Combine the sprite with outline and the one with gradients
Parameters
----------
mat_outline: np.ndarray
The sprite with outline
mat_gradient: np.ndarray
The sprite with gradient
Returns
-------
np.ndarray
The final black-and-white sprite image before coloring
"""
mat_final = np.copy(mat_outline)
mask = mat_outline == 0
mat_final[mask] = mat_gradient[mask]
return mat_final
def _group(n, it):
args = [iter(it)] * n
return itertools.zip_longest(fillvalue=None, *args)
| [
"seagull.Simulator",
"seagull.lifeforms.Custom",
"loguru.logger.disable",
"numpy.gradient",
"loguru.logger.trace",
"numpy.ndenumerate",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"random.randint",
"numpy.ones",
"numpy.random.choice",
"numpy.fliplr",
"itertools.zip_longest",
"matplotl... | [((349, 374), 'loguru.logger.disable', 'logger.disable', (['"""seagull"""'], {}), "('seagull')\n", (363, 374), False, 'from loguru import logger\n'), ((1387, 1421), 'loguru.logger.debug', 'logger.debug', (['"""Initializing board"""'], {}), "('Initializing board')\n", (1399, 1421), False, 'from loguru import logger\n'), ((1434, 1455), 'seagull.Board', 'sg.Board', ([], {'size': '(8, 4)'}), '(size=(8, 4))\n', (1442, 1455), True, 'import seagull as sg\n'), ((1461, 1497), 'loguru.logger.debug', 'logger.debug', (['"""Seeding the lifeform"""'], {}), "('Seeding the lifeform')\n", (1473, 1497), False, 'from loguru import logger\n'), ((1566, 1603), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': '(8, 4)'}), '([0, 1], size=(8, 4))\n', (1582, 1603), True, 'import numpy as np\n'), ((1620, 1636), 'seagull.lifeforms.Custom', 'lf.Custom', (['noise'], {}), '(noise)\n', (1629, 1636), True, 'import seagull.lifeforms as lf\n'), ((1679, 1717), 'loguru.logger.debug', 'logger.debug', (['"""Running the simulation"""'], {}), "('Running the simulation')\n", (1691, 1717), False, 'from loguru import logger\n'), ((1728, 1747), 'seagull.Simulator', 'sg.Simulator', (['board'], {}), '(board)\n', (1740, 1747), True, 'import seagull as sg\n'), ((1928, 1980), 'loguru.logger.debug', 'logger.debug', (['"""Adding outline, gradient, and colors"""'], {}), "('Adding outline, gradient, and colors')\n", (1940, 1980), False, 'from loguru import logger\n'), ((2046, 2109), 'numpy.pad', 'np.pad', (['sprite'], {'mode': '"""constant"""', 'pad_width': '(1)', 'constant_values': '(1)'}), "(sprite, mode='constant', pad_width=1, constant_values=1)\n", (2052, 2109), True, 'import numpy as np\n'), ((2285, 2323), 'loguru.logger.trace', 'logger.trace', (['"""Registering a colormap"""'], {}), "('Registering a colormap')\n", (2297, 2323), False, 'from loguru import logger\n'), ((2545, 2585), 'loguru.logger.trace', 'logger.trace', (['f"""Colors to use: {colors}"""'], {}), "(f'Colors to use: {colors}')\n", (2557, 2585), False, 'from loguru import logger\n'), ((2728, 2765), 'loguru.logger.debug', 'logger.debug', (['"""Preparing final image"""'], {}), "('Preparing final image')\n", (2740, 2765), False, 'from loguru import logger\n'), ((2781, 2825), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(1, 1)', 'dpi': 'size'}), '(1, 1, figsize=(1, 1), dpi=size)\n', (2793, 2825), True, 'import matplotlib.pyplot as plt\n'), ((2975, 3021), 'loguru.logger.debug', 'logger.debug', (['"""Successfully generated sprite!"""'], {}), "('Successfully generated sprite!')\n", (2987, 3021), False, 'from loguru import logger\n'), ((4191, 4209), 'numpy.ones', 'np.ones', (['mat.shape'], {}), '(mat.shape)\n', (4198, 4209), True, 'import numpy as np\n'), ((4235, 4254), 'numpy.ndenumerate', 'np.ndenumerate', (['mat'], {}), '(mat)\n', (4249, 4254), True, 'import numpy as np\n'), ((4631, 4689), 'numpy.pad', 'np.pad', (['m'], {'mode': '"""constant"""', 'pad_width': '(1)', 'constant_values': '(1)'}), "(m, mode='constant', pad_width=1, constant_values=1)\n", (4637, 4689), True, 'import numpy as np\n'), ((6337, 6357), 'numpy.copy', 'np.copy', (['mat_outline'], {}), '(mat_outline)\n', (6344, 6357), True, 'import numpy as np\n'), ((6506, 6550), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {'fillvalue': 'None'}), '(*args, fillvalue=None)\n', (6527, 6550), False, 'import itertools\n'), ((1526, 1553), 'numpy.random.seed', 'np.random.seed', (['sprite_seed'], {}), '(sprite_seed)\n', (1540, 1553), True, 'import numpy as np\n'), ((3590, 3612), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3604, 3612), False, 'import random\n'), ((5551, 5567), 'numpy.gradient', 'np.gradient', (['mat'], {}), '(mat)\n', (5562, 5567), True, 'import numpy as np\n'), ((2013, 2030), 'numpy.fliplr', 'np.fliplr', (['fstate'], {}), '(fstate)\n', (2022, 2030), True, 'import numpy as np\n'), ((3185, 3200), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3192, 3200), True, 'import numpy as np\n'), ((3556, 3577), 'random.seed', 'random.seed', (['seeds[i]'], {}), '(seeds[i])\n', (3567, 3577), False, 'import random\n'), ((5634, 5648), 'numpy.min', 'np.min', (['matrix'], {}), '(matrix)\n', (5640, 5648), True, 'import numpy as np\n'), ((5650, 5664), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (5656, 5664), True, 'import numpy as np\n'), ((2621, 2683), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""custom"""', 'colors'], {}), "('custom', colors)\n", (2665, 2683), True, 'import matplotlib as mpl\n')] |
import pytest
from unittestmock import UnitTestMock
import numpy as np
from cykhash import none_int64, none_int64_from_iter, Int64Set_from, Int64Set_from_buffer
from cykhash import none_int32, none_int32_from_iter, Int32Set_from, Int32Set_from_buffer
from cykhash import none_float64, none_float64_from_iter, Float64Set_from, Float64Set_from_buffer
from cykhash import none_float32, none_float32_from_iter, Float32Set_from, Float32Set_from_buffer
from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer
NONE={'int32': none_int32, 'int64': none_int64, 'float64' : none_float64, 'float32' : none_float32}
NONE_FROM_ITER={'int32': none_int32_from_iter, 'int64': none_int64_from_iter, 'float64' : none_float64_from_iter, 'float32' : none_float32_from_iter}
FROM_SET={'int32': Int32Set_from, 'int64': Int64Set_from, 'float64' : Float64Set_from, 'float32' : Float32Set_from, 'pyobject' : PyObjectSet_from}
BUFFER_SIZE = {'int32': 'i', 'int64': 'q', 'float64' : 'd', 'float32' : 'f'}
import array
@pytest.mark.parametrize(
"value_type",
['int64', 'int32', 'float64', 'float32']
)
class TestNone(UnitTestMock):
def test_none_yes(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], [1,3,5]*6)
result=NONE[value_type](a,s)
self.assertEqual(result, True)
def test_none_yes_from_iter(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=[1,3,5]*6
result=NONE_FROM_ITER[value_type](a,s)
self.assertEqual(result, True)
def test_none_last_no(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], [1]*6+[2])
result=NONE[value_type](a,s)
self.assertEqual(result, False)
def test_none_last_no_from_iter(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=[1]*6+[2]
result=NONE_FROM_ITER[value_type](a,s)
self.assertEqual(result, False)
def test_none_empty(self, value_type):
s=FROM_SET[value_type]([])
a=array.array(BUFFER_SIZE[value_type],[])
result=NONE[value_type](a,s)
self.assertEqual(result, True)
def test_none_empty_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=[]
result=NONE_FROM_ITER[value_type](a,s)
self.assertEqual(result, True)
def test_none_empty_set(self, value_type):
s=FROM_SET[value_type]([])
a=array.array(BUFFER_SIZE[value_type],[1])
result=NONE[value_type](a,s)
self.assertEqual(result, True)
def test_none_empty_set_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=[1]
result=NONE_FROM_ITER[value_type](a,s)
self.assertEqual(result, True)
def test_noniter_from_iter(self, value_type):
s=FROM_SET[value_type]([])
a=1
with pytest.raises(TypeError) as context:
NONE_FROM_ITER[value_type](a,s)
self.assertTrue("object is not iterable" in str(context.value))
def test_memview_none(self, value_type):
s=FROM_SET[value_type]([])
self.assertEqual(NONE[value_type](None,s), True)
def test_dbnone(self, value_type):
a=array.array(BUFFER_SIZE[value_type],[1])
self.assertEqual(NONE[value_type](a,None), True)
def test_dbnone_from_iter(self, value_type):
a=1
self.assertEqual(NONE_FROM_ITER[value_type](a,None), True)
class TestNonePyObject(UnitTestMock):
def test_none_yes(self):
s=PyObjectSet_from([2,4,666])
a=np.array([1,3,333]*6, dtype=np.object)
result=none_pyobject(a,s)
self.assertEqual(result, True)
def test_none_from_iter(self):
s=PyObjectSet_from([2,4,666])
a=[1,3,333]*6
result=none_pyobject_from_iter(a,s)
self.assertEqual(result, True)
def test_none_last_no(self):
s=PyObjectSet_from([2,4,666])
a=np.array([1,3,333]*6+[2], dtype=np.object)
result=none_pyobject(a,s)
self.assertEqual(result, False)
def test_none_last_no_from_iter(self):
s=PyObjectSet_from([2,4,666])
a=[1,3,333]*6+[2]
result=none_pyobject_from_iter(a,s)
self.assertEqual(result, False)
def test_none_empty(self):
s=PyObjectSet_from([])
a=np.array([], dtype=np.object)
result=none_pyobject(a,s)
self.assertEqual(result, True)
def test_none_empty_from_iter(self):
s=PyObjectSet_from([])
a=[]
result=none_pyobject_from_iter(a,s)
self.assertEqual(result, True)
def test_none_empty_set(self):
s=PyObjectSet_from([])
a=np.array([1], dtype=np.object)
result=none_pyobject(a,s)
self.assertEqual(result, True)
def test_none_empty_set_from_iter(self):
s=PyObjectSet_from([])
a=[1]
result=none_pyobject_from_iter(a,s)
self.assertEqual(result, True)
def test_noniter_from_iter(self):
s=PyObjectSet_from([])
a=1
with pytest.raises(TypeError) as context:
none_pyobject_from_iter(a,s)
self.assertTrue("object is not iterable" in str(context.value))
def test_memview_none(self):
s=PyObjectSet_from([])
self.assertEqual(none_pyobject(None,s), True)
def test_dbnone(self):
a=np.array([1], dtype=np.object)
self.assertEqual(none_pyobject(a,None), True)
def test_dbnone_from_iter(self):
a=1
self.assertEqual(none_pyobject_from_iter(a,None), True)
| [
"array.array",
"cykhash.none_pyobject_from_iter",
"numpy.array",
"pytest.mark.parametrize",
"cykhash.PyObjectSet_from",
"pytest.raises",
"cykhash.none_pyobject"
] | [((1044, 1123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value_type"""', "['int64', 'int32', 'float64', 'float32']"], {}), "('value_type', ['int64', 'int32', 'float64', 'float32'])\n", (1067, 1123), False, 'import pytest\n'), ((1256, 1307), 'array.array', 'array.array', (['BUFFER_SIZE[value_type]', '([1, 3, 5] * 6)'], {}), '(BUFFER_SIZE[value_type], [1, 3, 5] * 6)\n', (1267, 1307), False, 'import array\n'), ((1674, 1725), 'array.array', 'array.array', (['BUFFER_SIZE[value_type]', '([1] * 6 + [2])'], {}), '(BUFFER_SIZE[value_type], [1] * 6 + [2])\n', (1685, 1725), False, 'import array\n'), ((2091, 2131), 'array.array', 'array.array', (['BUFFER_SIZE[value_type]', '[]'], {}), '(BUFFER_SIZE[value_type], [])\n', (2102, 2131), False, 'import array\n'), ((2488, 2529), 'array.array', 'array.array', (['BUFFER_SIZE[value_type]', '[1]'], {}), '(BUFFER_SIZE[value_type], [1])\n', (2499, 2529), False, 'import array\n'), ((3250, 3291), 'array.array', 'array.array', (['BUFFER_SIZE[value_type]', '[1]'], {}), '(BUFFER_SIZE[value_type], [1])\n', (3261, 3291), False, 'import array\n'), ((3557, 3586), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[2, 4, 666]'], {}), '([2, 4, 666])\n', (3573, 3586), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((3595, 3637), 'numpy.array', 'np.array', (['([1, 3, 333] * 6)'], {'dtype': 'np.object'}), '([1, 3, 333] * 6, dtype=np.object)\n', (3603, 3637), True, 'import numpy as np\n'), ((3649, 3668), 'cykhash.none_pyobject', 'none_pyobject', (['a', 's'], {}), '(a, s)\n', (3662, 3668), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((3753, 3782), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[2, 4, 666]'], {}), '([2, 4, 666])\n', (3769, 3782), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((3818, 3847), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 's'], {}), '(a, s)\n', (3841, 3847), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((3930, 3959), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[2, 4, 666]'], {}), '([2, 4, 666])\n', (3946, 3959), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((3968, 4016), 'numpy.array', 'np.array', (['([1, 3, 333] * 6 + [2])'], {'dtype': 'np.object'}), '([1, 3, 333] * 6 + [2], dtype=np.object)\n', (3976, 4016), True, 'import numpy as np\n'), ((4026, 4045), 'cykhash.none_pyobject', 'none_pyobject', (['a', 's'], {}), '(a, s)\n', (4039, 4045), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4139, 4168), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[2, 4, 666]'], {}), '([2, 4, 666])\n', (4155, 4168), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4208, 4237), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 's'], {}), '(a, s)\n', (4231, 4237), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4319, 4339), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (4335, 4339), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4350, 4379), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.object'}), '([], dtype=np.object)\n', (4358, 4379), True, 'import numpy as np\n'), ((4395, 4414), 'cykhash.none_pyobject', 'none_pyobject', (['a', 's'], {}), '(a, s)\n', (4408, 4414), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4505, 4525), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (4521, 4525), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4554, 4583), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 's'], {}), '(a, s)\n', (4577, 4583), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4668, 4688), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (4684, 4688), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4699, 4729), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.object'}), '([1], dtype=np.object)\n', (4707, 4729), True, 'import numpy as np\n'), ((4745, 4764), 'cykhash.none_pyobject', 'none_pyobject', (['a', 's'], {}), '(a, s)\n', (4758, 4764), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4859, 4879), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (4875, 4879), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((4909, 4938), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 's'], {}), '(a, s)\n', (4932, 4938), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5026, 5046), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (5042, 5046), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5266, 5286), 'cykhash.PyObjectSet_from', 'PyObjectSet_from', (['[]'], {}), '([])\n', (5282, 5286), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5379, 5409), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.object'}), '([1], dtype=np.object)\n', (5387, 5409), True, 'import numpy as np\n'), ((2909, 2933), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2922, 2933), False, 'import pytest\n'), ((5072, 5096), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5085, 5096), False, 'import pytest\n'), ((5121, 5150), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 's'], {}), '(a, s)\n', (5144, 5150), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5312, 5334), 'cykhash.none_pyobject', 'none_pyobject', (['None', 's'], {}), '(None, s)\n', (5325, 5334), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5435, 5457), 'cykhash.none_pyobject', 'none_pyobject', (['a', 'None'], {}), '(a, None)\n', (5448, 5457), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n'), ((5539, 5571), 'cykhash.none_pyobject_from_iter', 'none_pyobject_from_iter', (['a', 'None'], {}), '(a, None)\n', (5562, 5571), False, 'from cykhash import none_pyobject, none_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer\n')] |
# Author: bbrighttaer
# Project: masdcop
# Date: 5/13/2021
# Time: 4:34 AM
# File: env.py
from masdcop.agent import SingleVariableAgent
from masdcop.algo import PseudoTree, SyncBB
import numpy as np
class FourQueens:
def __init__(self, num_agents):
self._max_cost = 0
self.domain = [(i, j) for i in range(0, 4) for j in range(0, 4)]
self.agents = [SingleVariableAgent(i + 1, self.domain) for i in range(num_agents)]
self.pseudo_tree = PseudoTree(self.agents)
self.algo = SyncBB(self.check_constraints, self.pseudo_tree, self._max_cost)
self.grid = np.zeros((4, 4))
self.history = {}
def check_constraints(self, *args) -> bool:
# update grid and history
cleared = []
for var in args:
if var[0] in self.history and self.history[var[0]] not in cleared:
self.grid[self.history[var[0]]] = 0
cleared.append(self.history[var[0]])
self.grid[var[1]] += 1
self.history[var[0]] = var[1]
# check for violations
if (not np.sum(np.sum(self.grid, 1) > 1) == 0) or \
(not np.sum(np.sum(self.grid, 0) > 1) == 0) or \
not _diags_check(self.grid) or not _diags_check(np.fliplr(self.grid)):
return False
return True
def resolve(self, verbose=False):
self.algo.initiate(self.pseudo_tree.next())
while not self.algo.terminate:
agent = self.pseudo_tree.getCurrentAgent()
if agent:
self.algo.receive(agent)
self.algo.sendMessage(agent)
else:
break
def _diags_check(m) -> bool:
"""
Checks for diagonals constraint violation.
:return: bool
True if all constraints are satisfied else False
"""
for i in range(m.shape[0]):
if np.trace(m, i) > 1 or np.trace(m, -1) > 1:
return False
return True
| [
"numpy.trace",
"masdcop.agent.SingleVariableAgent",
"masdcop.algo.SyncBB",
"numpy.fliplr",
"masdcop.algo.PseudoTree",
"numpy.sum",
"numpy.zeros"
] | [((475, 498), 'masdcop.algo.PseudoTree', 'PseudoTree', (['self.agents'], {}), '(self.agents)\n', (485, 498), False, 'from masdcop.algo import PseudoTree, SyncBB\n'), ((519, 583), 'masdcop.algo.SyncBB', 'SyncBB', (['self.check_constraints', 'self.pseudo_tree', 'self._max_cost'], {}), '(self.check_constraints, self.pseudo_tree, self._max_cost)\n', (525, 583), False, 'from masdcop.algo import PseudoTree, SyncBB\n'), ((604, 620), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (612, 620), True, 'import numpy as np\n'), ((380, 419), 'masdcop.agent.SingleVariableAgent', 'SingleVariableAgent', (['(i + 1)', 'self.domain'], {}), '(i + 1, self.domain)\n', (399, 419), False, 'from masdcop.agent import SingleVariableAgent\n'), ((1872, 1886), 'numpy.trace', 'np.trace', (['m', 'i'], {}), '(m, i)\n', (1880, 1886), True, 'import numpy as np\n'), ((1894, 1909), 'numpy.trace', 'np.trace', (['m', '(-1)'], {}), '(m, -1)\n', (1902, 1909), True, 'import numpy as np\n'), ((1258, 1278), 'numpy.fliplr', 'np.fliplr', (['self.grid'], {}), '(self.grid)\n', (1267, 1278), True, 'import numpy as np\n'), ((1092, 1112), 'numpy.sum', 'np.sum', (['self.grid', '(1)'], {}), '(self.grid, 1)\n', (1098, 1112), True, 'import numpy as np\n'), ((1157, 1177), 'numpy.sum', 'np.sum', (['self.grid', '(0)'], {}), '(self.grid, 0)\n', (1163, 1177), True, 'import numpy as np\n')] |
import os
import csv
import time
import yaml
import shutil
import pickle
import argparse
import numpy as np
import tensorflow as tf
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
SCAN_RANGE = 1.5 * np.pi
SCAN_NUM = 720
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--training_params_filename',
type=str,
default='train_scan_classifier.yaml',
help='Name of filename defining the learning params')
args = parser.parse_args()
config = yaml.load(open(args.training_params_filename))
for k, v in config.items():
args.__dict__[k] = v
args.lr = float(args.lr)
if args.centering_on_gp:
args.cropping = False
args.rslts_dir = os.path.join("..", "rslts", "{}".format(time.strftime("%Y-%m-%d-%H-%M-%S")))
os.makedirs(args.rslts_dir, exist_ok=True)
shutil.copyfile(args.training_params_filename, os.path.join(args.rslts_dir, args.training_params_filename))
args.Dy = len(args.used_context)
return args
def plot_scan(scan, gp, save_path, args):
plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(scan)), scan)
gp = np.concatenate([np.zeros((1, 2)), gp], axis=0)
plt.polar(np.arctan2(gp[:, 1], gp[:, 0]), np.linalg.norm(gp, axis=-1))
plt.gca().set_theta_zero_location("N")
plt.gca().set_ylim([0, args.clipping])
plt.savefig(save_path)
plt.close("all")
def preprocess_scan(scan, args):
i = 0
while i < len(scan):
if scan[i] != np.inf:
i += 1
continue
j = i + 1
while j < len(scan) and scan[j] == np.inf:
j += 1
if i == 0:
scan[i:j] = 0.05 * np.ones(j - i)
elif j == len(scan):
scan[i:j] = 0.05 * np.ones(j - i)
else:
scan[i:j] = np.linspace(scan[i - 1], scan[j], j - i + 1)[1:]
i = j
scan = scan.clip(0, args.clipping)
return scan
def preprocess_gp(gp):
gp_clip = []
node_idx = 1
cum_dist = 0
for pt, pt_next in zip(gp.T[:-1], gp.T[1:]):
cum_dist += np.linalg.norm(pt - pt_next)
if node_idx * 0.2 < cum_dist:
gp_clip.append(pt_next)
node_idx += 1
if node_idx == 11:
break
while node_idx < 11:
node_idx += 1
gp_clip.append(gp.T[-1])
gp_clip = np.array(gp_clip)
return gp_clip
def get_dataset(args, draw_data=False):
scan_train, gp_train, y_train = [], [], []
scan_test, gp_test, y_test = [], [], []
for y, fname in enumerate(args.used_context):
fname_ = os.path.join("../bag_files", fname + ".pkl")
if not os.path.exists(fname_):
continue
scans = []
gps = []
ys = []
with open(fname_, "rb") as f:
data = pickle.load(f, encoding='latin1')
for scan, gp in data:
scan = preprocess_scan(scan, args)
gp = preprocess_gp(gp)
scans.append(scan)
gps.append(gp)
ys.append(y)
if draw_data:
fig_dir = os.path.join("..", "data_plots", fname)
os.makedirs(fig_dir, exist_ok=True)
for i, (scan, gp) in enumerate(zip(scans, gps)):
plot_scan(scan, gp, os.path.join(fig_dir, str(i)), args)
# # use first and last 10 % as testing data
num_X = len(scans)
if not args.full_train:
# scan_test.extend(np.concatenate([scans[:num_X // 10], scans[-num_X // 10:]]))
# gp_test.extend(np.concatenate([gps[:num_X // 10], gps[-num_X // 10:]]))
# y_test.extend(ys[:num_X // 10] + ys[-num_X // 10:])
# scan_train.extend(scans[num_X // 10:-num_X // 10])
# gp_train.extend(gps[num_X // 10:-num_X // 10])
# y_train.extend(ys[num_X // 10:-num_X // 10])
scans, gps = shuffle(scans, gps)
scan_test.extend(scans[:num_X // 5])
gp_test.extend(gps[:num_X // 5])
y_test.extend(ys[:num_X // 5])
scan_train.extend(scans[num_X // 5:])
gp_train.extend(gps[num_X // 5:])
y_train.extend(ys[num_X // 5:])
else:
scan_train.extend(scans)
gp_train.extend(gps)
y_train.extend(ys)
# print stats
scans = np.array(scan_train + scan_test)
y = np.array(y_train + y_test)
scan_train = np.array(scan_train)
gp_train = np.array(gp_train)
y_train = np.array(y_train)
scan_test = np.array(scan_test)
gp_test = np.array(gp_test)
y_test = np.array(y_test)
print("Num of train", len(scan_train))
print("Num of test", len(scan_test))
if draw_data:
plt.figure()
plt.hist(scans.reshape((-1), 1), bins="auto")
plt.title("Laser scan distribution")
plt.ylim([0, 10000])
plt.savefig("../data_plots/scan_distribution")
plt.figure()
(unique, counts) = np.unique(y, return_counts=True)
plt.bar(unique, counts)
plt.title("label_distribution")
plt.savefig("../data_plots/label_distribution")
return [scan_train, gp_train], y_train, [scan_test, gp_test], y_test
class ScanClassifier(object):
def __init__(self, args):
self.Dx = args.Dx
self.Dy = args.Dy
self.used_context = args.used_context
self.use_EDL = args.use_EDL
self.dropout_rate = args.dropout_rate
self.scan_Dhs = args.scan_Dhs
self.gp_Dhs = args.gp_Dhs
self.use_conv1d = args.use_conv1d
self.kernel_sizes = args.kernel_sizes
self.filter_sizes = args.filter_sizes
self.strides = args.strides
self.lr = args.lr
self.epochs = args.epochs
self.batch_size = args.batch_size
self.full_train = args.full_train
self.rslts_dir = args.rslts_dir
self.use_weigth = args.use_weigth
self.clipping = args.clipping
self.centering_on_gp = args.centering_on_gp
self.cropping = args.cropping
self.theta_noise_scale = args.theta_noise_scale
self.noise = args.noise
self.noise_scale = args.noise_scale
self.flipping = args.flipping
self.translation = args.translation
self.translation_scale = args.translation_scale
self.mode_inited = False
def _init_model(self):
tf.keras.backend.set_learning_phase(1)
self.scan_ph = tf.placeholder(tf.float32, shape=(None, self.Dx))
self.gp_ph = tf.placeholder(tf.float32, shape=(None, 10, 2))
self.label_ph = tf.placeholder(tf.int32, shape=(None,))
self.annealing_step_ph = tf.placeholder(dtype=tf.int32)
self.dropout_rate_ph = tf.placeholder(dtype=tf.float32)
self.weight_ph = tf.placeholder(tf.float32, shape=(None,))
self.scan_encoder, self.gp_encoder, self.classify_layers = [], [], []
if self.use_conv1d:
for kernel_size, filter_size, stride in zip(self.kernel_sizes, self.filter_sizes, self.strides):
self.scan_encoder.append(tf.keras.layers.Conv1D(filter_size, kernel_size, strides=stride,
activation="relu"))
self.scan_encoder.append(tf.keras.layers.Flatten())
else:
for Dh in self.scan_Dhs:
self.scan_encoder.append(tf.keras.layers.Dense(Dh, activation="relu"))
self.gp_encoder.append(tf.keras.layers.Flatten())
for Dh in self.gp_Dhs:
self.gp_encoder.append(tf.keras.layers.Dense(Dh, activation="relu"))
self.classify_layers.append(tf.keras.layers.Dropout(rate=self.dropout_rate_ph))
self.classify_layers.append(tf.keras.layers.Dense(self.Dy))
scan_h = self.scan_ph
if self.use_conv1d:
scan_h = scan_h[..., tf.newaxis]
for layer in self.scan_encoder:
scan_h = layer(scan_h)
if self.gp_Dhs == [0]:
gp_h = tf.zeros_like(scan_h[:, :0])
else:
gp_h = self.gp_ph
for layer in self.gp_encoder:
gp_h = layer(gp_h)
h = tf.concat([scan_h, gp_h], axis=-1)
for layer in self.classify_layers:
if isinstance(layer, tf.keras.layers.Dropout):
h = layer(h, training=False)
else:
h = layer(h)
global_step_ = tf.Variable(initial_value=0, name='global_step', trainable=False)
if self.use_EDL:
self.evidence = tf.nn.softplus(h)
self.alpha = self.evidence + 1
self.uncertainty = self.Dy / tf.reduce_sum(self.alpha, axis=-1)
self.confidence = 1 - self.uncertainty
self.prob = self.alpha / tf.reduce_sum(self.alpha, axis=-1, keepdims=True)
self.pred = tf.argmax(self.alpha, axis=-1, output_type=tf.int32)
def KL(alpha, K):
beta = tf.constant(np.ones((1, K)), dtype=tf.float32)
S_alpha = tf.reduce_sum(alpha, axis=1, keepdims=True)
KL = tf.reduce_sum((alpha - beta) * (tf.digamma(alpha) - tf.digamma(S_alpha)), axis=1, keepdims=True) +\
tf.lgamma(S_alpha) - tf.reduce_sum(tf.lgamma(alpha), axis=1, keepdims=True) + \
tf.reduce_sum(tf.lgamma(beta), axis=1, keepdims=True) - \
tf.lgamma(tf.reduce_sum(beta, axis=1, keepdims=True))
return KL
def expected_cross_entropy(p, alpha, K, global_step, annealing_step):
if self.use_weigth:
p = p * self.weight_ph[:, tf.newaxis]
loglikelihood = tf.reduce_mean(
tf.reduce_sum(p * (tf.digamma(tf.reduce_sum(alpha, axis=1, keepdims=True)) - tf.digamma(alpha)), 1,
keepdims=True))
KL_reg = tf.minimum(1.0, tf.cast(global_step / annealing_step, tf.float32)) * KL(
(alpha - 1) * (1 - p) + 1, K)
return loglikelihood + KL_reg
label = tf.one_hot(self.label_ph, self.Dy)
loss = expected_cross_entropy(label, self.alpha, self.Dy, global_step_, self.annealing_step_ph)
self.loss = tf.reduce_mean(loss)
else:
logits = h
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_ph, logits=logits)
if self.use_weigth:
loss = loss * self.weight_ph
self.loss = tf.reduce_mean(loss)
self.pred = tf.argmax(logits, axis=-1, output_type=tf.int32)
pred_correctness = tf.equal(self.pred, self.label_ph)
self.acc = tf.reduce_mean(tf.cast(pred_correctness, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss, global_step=global_step_)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.keras.backend.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
# self.writer = tf.summary.FileWriter(self.rslts_dir)
def _save_model(self, epoch_num):
model_dir = os.path.join(self.rslts_dir, "models")
os.makedirs(model_dir, exist_ok=True)
params = {}
for i, module in enumerate([self.scan_encoder, self.gp_encoder, self.classify_layers]):
for j, layer in enumerate(module):
weights = layer.get_weights()
params["weights_{}_{}".format(i, j)] = weights
with open(os.path.join(model_dir, "model_{}.pickle".format(epoch_num)), "wb") as f:
pickle.dump(params, f, protocol=2)
def _load_model(self, fname):
if not self.mode_inited:
self._init_model()
self.mode_inited = True
with open(fname, "rb") as f:
params = pickle.load(f)
for i, module in enumerate([self.scan_encoder, self.gp_encoder, self.classify_layers]):
if module == self.gp_encoder and self.gp_Dhs == [0]:
continue
for j, layer in enumerate(module):
weights = params["weights_{}_{}".format(i, j)]
layer.set_weights(weights)
def _data_augment(self, scans, gps, training=False):
# scans.shape = (batch_size, D_scan)
# gps.shape = (batch_size, 10, 2)
scan_ori, gp_ori = scans.copy(), gps.copy()
scans = scans.copy()
scans = np.clip(scans, 0, self.clipping)
scans /= self.clipping
data_valid = np.full(len(scans), True)
if training:
batch_size = scans.shape[0]
if self.flipping:
is_flipped = np.random.rand(batch_size) < 0.5
scans[is_flipped] = np.flip(scans[is_flipped], axis=-1)
gps[is_flipped, :, 1] = -gps[is_flipped, :, 1]
if self.centering_on_gp:
theta = np.arctan2(gps[:, :, 1], gps[:, :, 0])
avg_theta = np.mean(theta[:, :5], axis=-1)
start_idxes = (avg_theta / SCAN_RANGE * SCAN_NUM).astype(int) + SCAN_NUM // 2 - self.Dx // 2
data_valid = np.logical_and(start_idxes >= 0, start_idxes + self.Dx < SCAN_NUM)
start_idxes[np.logical_not(data_valid)] = 0
scans = np.array([scans[i, idx:idx + self.Dx] for i, idx in enumerate(start_idxes)])
if self.cropping:
theta_noise = np.random.uniform(-self.theta_noise_scale, self.theta_noise_scale, size=batch_size)
theta_noise = theta_noise / 180 * np.pi
start_idxes = (theta_noise / SCAN_RANGE * SCAN_NUM).astype(int) + (SCAN_NUM - self.Dx) // 2
theta = np.arctan2(gps[:, :, 1], gps[:, :, 0])
r = np.linalg.norm(gps, axis=-1)
theta = theta - theta_noise[:, np.newaxis]
gps = np.stack([r * np.cos(theta), r * np.sin(theta)], axis=-1)
scans = np.array([scans[i, idx:idx + self.Dx] for i, idx in enumerate(start_idxes)])
if self.noise:
scale = self.noise_scale / self.clipping
scan_noise = np.random.uniform(-scale, scale, size=scans.shape)
gp_noise = np.random.uniform(-scale, scale, size=gps.shape)
scans, gps = scans + scan_noise, gps + gp_noise
else:
if self.centering_on_gp:
thetas = np.arctan2(gps[:, :, 1], gps[:, :, 0])[:, :5]
avg_thetas = np.zeros(thetas.shape[0])
for i, theta in enumerate(thetas):
if np.any(theta < -SCAN_RANGE / 2) and np.any(theta > SCAN_RANGE / 2):
theta[theta < 0] += 2 * np.pi
avg_theta = np.mean(theta)
if avg_theta > np.pi:
avg_theta -= 2 * np.pi
else:
avg_theta = np.mean(theta)
avg_thetas[i] = avg_theta
start_idxes = (avg_thetas / SCAN_RANGE * SCAN_NUM).astype(int) + SCAN_NUM // 2 - self.Dx // 2
new_scans = np.zeros((scans.shape[0], self.Dx))
for i, (idx, scan) in enumerate(zip(start_idxes, scans)):
if idx < 0 or idx + self.Dx >= len(scan):
data_valid[i] = False
new_scans[i] = np.zeros(self.Dx)
else:
new_scans[i] = scan[idx:idx + self.Dx]
scans = new_scans
if self.cropping:
i = (SCAN_NUM - self.Dx) // 2
scans = scans[:, i:i + self.Dx]
if not self.centering_on_gp:
thetas = np.arctan2(gps[:, :, 1], gps[:, :, 0])[:, :5]
avg_thetas = np.zeros(thetas.shape[0])
for i, theta in enumerate(thetas):
if np.any(theta < -SCAN_RANGE / 2) and np.any(theta > SCAN_RANGE / 2):
theta[theta < 0] += 2 * np.pi
avg_theta = np.mean(theta)
if avg_theta > np.pi:
avg_theta -= 2 * np.pi
else:
avg_theta = np.mean(theta)
avg_thetas[i] = avg_theta
data_valid = np.abs(avg_thetas) < np.pi / 3
# for x1, x2, x3, x4 in zip(scans, gps, scan_ori, gp_ori):
# plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(x3)), x3)
# x4 = np.concatenate([np.zeros((1, 2)), x4], axis=0)
# plt.polar(np.arctan2(x4[:, 1], x4[:, 0]), np.linalg.norm(x4, axis=-1))
#
# x1 *= self.clipping
# plt.polar(np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(x1)) * self.Dx / SCAN_NUM, x1)
# x2 = np.concatenate([np.zeros((1, 2)), x2], axis=0)
# plt.polar(np.arctan2(x2[:, 1], x2[:, 0]), np.linalg.norm(x2, axis=-1))
#
# plt.gca().set_theta_zero_location("N")
# plt.gca().set_ylim([0, self.clipping])
# plt.show()
return scans, gps, data_valid
def _translation(self, scans, gps):
def find_intersect(x1, y1, x2, y2, angle):
A1, B1, C1 = y1 - y2, x2 - x1, x2 * y1 - x1 * y2
A2, B2, C2 = np.tan(angle), -1, 0
x = (B2 * C1 - B1 * C2) / (A1 * B2 - A2 * B1)
y = (A1 * C2 - A2 * C1) / (A1 * B2 - A2 * B1)
return x, y
new_scans, new_gps = [], []
for scan, gp in zip(scans, gps):
trans_x, trans_y = np.random.uniform(low=-self.translation_scale, high=self.translation_scale, size=2)
# new scan
angles = np.linspace(-SCAN_RANGE / 2, SCAN_RANGE / 2, len(scan))
x = np.cos(angles) * scan
y = np.sin(angles) * scan
x -= trans_x
y -= trans_y
new_angles = np.arctan2(y, x)
new_scan = []
for angle_i in angles:
scan_len = []
for j in range(len(new_angles) - 1):
new_angle_j = new_angles[j]
new_angle_jp1 = new_angles[j + 1]
if (new_angle_j - angle_i) * (new_angle_jp1 - angle_i) > 0:
# no intersection
continue
x_j, y_j = x[j], y[j]
x_jp1, y_jp1 = x[j + 1], y[j + 1]
if (new_angle_j - angle_i) * (new_angle_jp1 - angle_i) < 0:
# exists intersection, find out where it is
if np.sqrt((x_j - x_jp1) ** 2 + (y_j - y_jp1) ** 2) < 0.5:
# two points are close, they are on the same object
inter_x, inter_y = find_intersect(x_j, y_j, x_jp1, y_jp1, angle_i)
else:
# two are far away from each other, on the different object
if (x_j ** 2 + y_j ** 2) > (x_jp1 ** 2 + y_jp1 ** 2):
if j > 0:
inter_x, inter_y = find_intersect(x_j, y_j, x[j - 1], y[j - 1], angle_i)
else:
inter_x, inter_y = x_j, y_j
else:
if j < len(new_angles) - 2:
inter_x, inter_y = find_intersect(x_jp1, y_jp1, x[j + 2], y[j + 2], angle_i)
else:
inter_x, inter_y = x_jp1, y_jp1
scan_len.append(np.sqrt(inter_x ** 2 + inter_y ** 2))
else:
if new_angle_j == angle_i:
scan_len.append(np.sqrt(x_j ** 2 + y_j ** 2))
else:
scan_len.append(np.sqrt(x_j ** 2 + y_j ** 2))
if len(scan_len):
new_scan.append(np.min(scan_len))
else:
# no intersection found
angle_diff = np.abs(new_angles - angle_i)
idx1, idx2 = np.argsort(angle_diff)[:2]
inter_x, inter_y = find_intersect(x[idx1], y[idx1], x[idx2], y[idx2], angle_i)
new_scan.append(np.sqrt(inter_x ** 2 + inter_y ** 2))
new_scans.append(new_scan)
# new gp
new_gp = gp - np.array([trans_x, trans_y])
new_gp[:4] += np.linspace(1, 0, 5, endpoint=False)[1:, np.newaxis] * np.array([trans_x, trans_y])
new_gps.append(new_gp)
return np.array(new_scans), np.array(new_gps)
def train(self, X_train, y_train, X_test, y_test):
if not self.mode_inited:
self._init_model()
self.mode_inited = True
scan_train, gp_train = X_train
scan_test, gp_test = X_test
if self.translation:
with open("translation_aug.pkl", "rb") as f:
d = pickle.load(f)
(scan_aug_train_full, gp_aug_train_full), y_aug_train_full = d['X_train'], d['y_train']
(scan_aug_test_full, gp_aug_test_full), y_aug_test_full = d['X_test'], d['y_test']
full_contexts = ["curve", "open_space", "U_turn", "narrow_entrance", "narrow_corridor",
"normal_1", "normal_2"]
scan_aug_train, gp_aug_train, y_aug_train = [], [], []
scan_aug_test, gp_aug_test, y_aug_test = [], [], []
for y, ctx in enumerate(self.used_context):
y_label = full_contexts.index(ctx)
scan_aug_train.extend(scan_aug_train_full[y_aug_train_full == y_label])
gp_aug_train.extend(gp_aug_train_full[y_aug_train_full == y_label])
y_aug_train.extend([y] * np.sum(y_aug_train_full == y_label))
scan_aug_test.extend(scan_aug_test_full[y_aug_test_full == y_label])
gp_aug_test.extend(gp_aug_test_full[y_aug_test_full == y_label])
y_aug_test.extend([y] * np.sum(y_aug_test_full == y_label))
scans, gps, ys = shuffle(np.concatenate([scan_aug_train, scan_aug_test]),
np.concatenate([gp_aug_train, gp_aug_test]),
np.concatenate([y_aug_train, y_aug_test]))
num_train = int(len(scans) * 0.8)
scan_aug_train, gp_aug_train, y_aug_train = scans[:num_train], gps[:num_train], ys[:num_train]
scan_aug_test, gp_aug_test, y_aug_test = scans[num_train:], gps[num_train:], ys[num_train:]
if self.full_train:
scan_train = np.concatenate([scan_train, scan_aug_train, scan_aug_test], axis=0)
gp_train = np.concatenate([gp_train, gp_aug_train, gp_aug_test], axis=0)
y_train = np.concatenate([y_train, y_aug_train, y_aug_test], axis=0)
else:
scan_train = np.concatenate([scan_train, scan_aug_train], axis=0)
gp_train = np.concatenate([gp_train, gp_aug_train], axis=0)
y_train = np.concatenate([y_train, y_aug_train], axis=0)
scan_test = np.concatenate([scan_test, scan_aug_test], axis=0)
gp_test = np.concatenate([gp_test, gp_aug_test], axis=0)
y_test = np.concatenate([y_test, y_aug_test], axis=0)
labels, counts = np.unique(y_train, return_counts=True)
weights = counts / counts.sum() * len(counts)
weights = dict(zip(*[labels, weights]))
sess = self.sess
self.writer.add_graph(sess.graph)
for i in range(self.epochs):
train_acc, train_loss, test_acc, test_loss = [], [], [], []
train_confs = []
scan_train, gp_train, y_train = shuffle(scan_train, gp_train, y_train)
for j in range(len(scan_train) // self.batch_size + 1):
batch_scan = scan_train[j:j + self.batch_size]
batch_gp = gp_train[j:j + self.batch_size]
batch_y = y_train[j:j + self.batch_size]
batch_w = np.array([weights[y] for y in batch_y])
batch_scan, batch_gp, batch_valid = self._data_augment(batch_scan, batch_gp, training=True)
batch_scan, batch_gp, batch_y, batch_w = \
batch_scan[batch_valid], batch_gp[batch_valid], batch_y[batch_valid], batch_w[batch_valid]
loss, acc, _ = sess.run([self.loss, self.acc, self.train_op],
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.weight_ph: batch_w,
self.annealing_step_ph: 1 * (len(X_train) // self.batch_size + 1),
self.dropout_rate_ph: self.dropout_rate})
if self.use_EDL:
conf = sess.run(self.confidence,
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.annealing_step_ph: 1 * (len(X_train) // self.batch_size + 1),
self.dropout_rate_ph: 0.0})
train_confs.extend(conf)
train_loss.extend([loss] * self.batch_size)
train_acc.extend([acc] * self.batch_size)
test_pred = []
test_confs = []
for j in range(0, len(scan_test), self.batch_size):
batch_scan = scan_test[j:j + self.batch_size]
batch_gp = gp_test[j:j + self.batch_size]
batch_y = y_test[j:j + self.batch_size]
batch_w = np.array([weights[y] for y in batch_y])
batch_scan, batch_gp, batch_valid = self._data_augment(batch_scan, batch_gp, training=False)
# batch_scan, batch_gp, batch_y = batch_scan[batch_valid], batch_gp[batch_valid], batch_y[batch_valid]
pred, loss, acc = sess.run([self.pred, self.loss, self.acc],
feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.weight_ph: batch_w,
self.annealing_step_ph: 2 ** 31 - 1,
self.dropout_rate_ph: 0.0})
if self.use_EDL:
conf = sess.run(self.confidence, feed_dict={self.scan_ph: batch_scan,
self.gp_ph: batch_gp,
self.label_ph: batch_y,
self.annealing_step_ph: 2 ** 31 - 1,
self.dropout_rate_ph: 0.0})
test_confs.extend(conf)
test_loss.extend([loss] * len(batch_scan))
test_acc.extend([acc] * len(batch_scan))
test_pred.extend(pred)
test_pred = np.array(test_pred)
test_confs = np.array(test_confs)
train_acc, train_loss = np.mean(train_acc), np.mean(train_loss)
test_acc, test_loss = np.mean(test_acc), np.mean(test_loss)
summary = tf.Summary(value=[tf.Summary.Value(tag="train/loss", simple_value=train_loss),
tf.Summary.Value(tag="train/acc", simple_value=train_acc),
tf.Summary.Value(tag="test/loss", simple_value=test_loss),
tf.Summary.Value(tag="test/acc", simple_value=test_acc)])
self.writer.add_summary(summary, i)
if self.use_EDL:
summary = tf.Summary(value=[tf.Summary.Value(tag="train/conf", simple_value=np.mean(train_confs)),
tf.Summary.Value(tag="test/conf", simple_value=np.mean(test_confs))])
self.writer.add_summary(summary, i)
summary_val = []
for j in range(self.Dy):
acc = np.mean(y_test[y_test == j] == test_pred[y_test == j])
summary_val.append(tf.Summary.Value(tag="test_acc/{}".format(j), simple_value=acc))
if self.use_EDL:
conf = np.mean(test_confs[y_test == j])
summary_val.append(tf.Summary.Value(tag="test_conf/{}".format(j), simple_value=conf))
self.writer.add_summary(tf.Summary(value=summary_val), i)
if (i + 1) % 10 == 0:
self._save_model(epoch_num=i + 1)
def predict(self, scan, gp):
in_batch = len(scan.shape) > 1
if not in_batch:
scan, gp = np.array([scan]), np.array([gp])
scan, gp, valid = self._data_augment(scan, gp, training=False)
if self.use_EDL:
pred, confidence = self.sess.run([self.pred, self.confidence], feed_dict={self.scan_ph: scan,
self.gp_ph: gp,
self.dropout_rate_ph: 0.0})
confidence[np.logical_not(valid)] = 0.0
if not in_batch:
pred, confidence = pred[0], confidence[0]
return pred, confidence
else:
pred = self.sess.run(self.pred, feed_dict={self.scan_ph: scan, self.gp_ph: gp, self.dropout_rate_ph: 0.0})
if not in_batch:
pred = pred[0]
return pred
def main():
args = make_args()
X_train, y_train, X_test, y_test = get_dataset(args, draw_data=False)
model = ScanClassifier(args)
model.train(X_train, y_train, X_test, y_test)
# model._load_model("../rslts/2020-10-14-15-21-06/models/model_500.pickle")
# print(np.unique(model.predict(X_test[0][y_test == 4], X_test[1][y_test == 4])[0], return_counts=True))
# print(np.unique(model.predict(X_train[0][y_train == 4], X_train[1][y_train == 4])[0], return_counts=True))
# print(np.unique(model.predict(X_test[0][y_test == 5], X_test[1][y_test == 5])[0], return_counts=True))
# print(np.unique(model.predict(X_train[0][y_train == 5], X_train[1][y_train == 5])[0], return_counts=True))
if __name__ == "__main__":
main()
| [
"numpy.clip",
"tensorflow.equal",
"numpy.sqrt",
"numpy.random.rand",
"tensorflow.reduce_sum",
"numpy.logical_not",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"numpy.argsort",
"tensorflow.keras.layers.Dense",
"numpy.arctan2",
"tensorflow.nn.softplus",
"tensorflow... | [((357, 382), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (380, 382), False, 'import argparse\n'), ((982, 1024), 'os.makedirs', 'os.makedirs', (['args.rslts_dir'], {'exist_ok': '(True)'}), '(args.rslts_dir, exist_ok=True)\n', (993, 1024), False, 'import os\n'), ((1548, 1570), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (1559, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1592), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1585, 1592), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2598), 'numpy.array', 'np.array', (['gp_clip'], {}), '(gp_clip)\n', (2589, 2598), True, 'import numpy as np\n'), ((4592, 4624), 'numpy.array', 'np.array', (['(scan_train + scan_test)'], {}), '(scan_train + scan_test)\n', (4600, 4624), True, 'import numpy as np\n'), ((4634, 4660), 'numpy.array', 'np.array', (['(y_train + y_test)'], {}), '(y_train + y_test)\n', (4642, 4660), True, 'import numpy as np\n'), ((4681, 4701), 'numpy.array', 'np.array', (['scan_train'], {}), '(scan_train)\n', (4689, 4701), True, 'import numpy as np\n'), ((4718, 4736), 'numpy.array', 'np.array', (['gp_train'], {}), '(gp_train)\n', (4726, 4736), True, 'import numpy as np\n'), ((4752, 4769), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4760, 4769), True, 'import numpy as np\n'), ((4787, 4806), 'numpy.array', 'np.array', (['scan_test'], {}), '(scan_test)\n', (4795, 4806), True, 'import numpy as np\n'), ((4822, 4839), 'numpy.array', 'np.array', (['gp_test'], {}), '(gp_test)\n', (4830, 4839), True, 'import numpy as np\n'), ((4854, 4870), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4862, 4870), True, 'import numpy as np\n'), ((1077, 1136), 'os.path.join', 'os.path.join', (['args.rslts_dir', 'args.training_params_filename'], {}), '(args.rslts_dir, args.training_params_filename)\n', (1089, 1136), False, 'import os\n'), ((1394, 1424), 'numpy.arctan2', 'np.arctan2', (['gp[:, 1]', 'gp[:, 0]'], {}), '(gp[:, 1], gp[:, 0])\n', (1404, 1424), True, 'import numpy as np\n'), ((1426, 1453), 'numpy.linalg.norm', 'np.linalg.norm', (['gp'], {'axis': '(-1)'}), '(gp, axis=-1)\n', (1440, 1453), True, 'import numpy as np\n'), ((2296, 2324), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt - pt_next)'], {}), '(pt - pt_next)\n', (2310, 2324), True, 'import numpy as np\n'), ((2828, 2872), 'os.path.join', 'os.path.join', (['"""../bag_files"""', "(fname + '.pkl')"], {}), "('../bag_files', fname + '.pkl')\n", (2840, 2872), False, 'import os\n'), ((4989, 5001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4999, 5001), True, 'import matplotlib.pyplot as plt\n'), ((5066, 5102), 'matplotlib.pyplot.title', 'plt.title', (['"""Laser scan distribution"""'], {}), "('Laser scan distribution')\n", (5075, 5102), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5132), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 10000]'], {}), '([0, 10000])\n', (5120, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5142, 5188), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../data_plots/scan_distribution"""'], {}), "('../data_plots/scan_distribution')\n", (5153, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5200, 5212), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5210, 5212), True, 'import matplotlib.pyplot as plt\n'), ((5241, 5273), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (5250, 5273), True, 'import numpy as np\n'), ((5283, 5306), 'matplotlib.pyplot.bar', 'plt.bar', (['unique', 'counts'], {}), '(unique, counts)\n', (5290, 5306), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5347), 'matplotlib.pyplot.title', 'plt.title', (['"""label_distribution"""'], {}), "('label_distribution')\n", (5325, 5347), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../data_plots/label_distribution"""'], {}), "('../data_plots/label_distribution')\n", (5368, 5404), True, 'import matplotlib.pyplot as plt\n'), ((6700, 6738), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(1)'], {}), '(1)\n', (6735, 6738), True, 'import tensorflow as tf\n'), ((6763, 6812), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.Dx)'}), '(tf.float32, shape=(None, self.Dx))\n', (6777, 6812), True, 'import tensorflow as tf\n'), ((6835, 6882), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 10, 2)'}), '(tf.float32, shape=(None, 10, 2))\n', (6849, 6882), True, 'import tensorflow as tf\n'), ((6908, 6947), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None,)'}), '(tf.int32, shape=(None,))\n', (6922, 6947), True, 'import tensorflow as tf\n'), ((6982, 7012), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32'}), '(dtype=tf.int32)\n', (6996, 7012), True, 'import tensorflow as tf\n'), ((7045, 7077), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (7059, 7077), True, 'import tensorflow as tf\n'), ((7104, 7145), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None,)'}), '(tf.float32, shape=(None,))\n', (7118, 7145), True, 'import tensorflow as tf\n'), ((8520, 8554), 'tensorflow.concat', 'tf.concat', (['[scan_h, gp_h]'], {'axis': '(-1)'}), '([scan_h, gp_h], axis=-1)\n', (8529, 8554), True, 'import tensorflow as tf\n'), ((8780, 8845), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""global_step"""', 'trainable': '(False)'}), "(initial_value=0, name='global_step', trainable=False)\n", (8791, 8845), True, 'import tensorflow as tf\n'), ((11027, 11061), 'tensorflow.equal', 'tf.equal', (['self.pred', 'self.label_ph'], {}), '(self.pred, self.label_ph)\n', (11035, 11061), True, 'import tensorflow as tf\n'), ((11159, 11204), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (11181, 11204), True, 'import tensorflow as tf\n'), ((11306, 11322), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (11320, 11322), True, 'import tensorflow as tf\n'), ((11392, 11417), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (11402, 11417), True, 'import tensorflow as tf\n'), ((11427, 11466), 'tensorflow.keras.backend.set_session', 'tf.keras.backend.set_session', (['self.sess'], {}), '(self.sess)\n', (11455, 11466), True, 'import tensorflow as tf\n'), ((11652, 11690), 'os.path.join', 'os.path.join', (['self.rslts_dir', '"""models"""'], {}), "(self.rslts_dir, 'models')\n", (11664, 11690), False, 'import os\n'), ((11700, 11737), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (11711, 11737), False, 'import os\n'), ((12967, 12999), 'numpy.clip', 'np.clip', (['scans', '(0)', 'self.clipping'], {}), '(scans, 0, self.clipping)\n', (12974, 12999), True, 'import numpy as np\n'), ((24093, 24131), 'numpy.unique', 'np.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (24102, 24131), True, 'import numpy as np\n'), ((940, 974), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M-%S"""'], {}), "('%Y-%m-%d-%H-%M-%S')\n", (953, 974), False, 'import time\n'), ((1348, 1364), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1356, 1364), True, 'import numpy as np\n'), ((1460, 1469), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1467, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1511, 1513), True, 'import matplotlib.pyplot as plt\n'), ((2889, 2911), 'os.path.exists', 'os.path.exists', (['fname_'], {}), '(fname_)\n', (2903, 2911), False, 'import os\n'), ((3051, 3084), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3062, 3084), False, 'import pickle\n'), ((3334, 3373), 'os.path.join', 'os.path.join', (['""".."""', '"""data_plots"""', 'fname'], {}), "('..', 'data_plots', fname)\n", (3346, 3373), False, 'import os\n'), ((3387, 3422), 'os.makedirs', 'os.makedirs', (['fig_dir'], {'exist_ok': '(True)'}), '(fig_dir, exist_ok=True)\n', (3398, 3422), False, 'import os\n'), ((4136, 4155), 'sklearn.utils.shuffle', 'shuffle', (['scans', 'gps'], {}), '(scans, gps)\n', (4143, 4155), False, 'from sklearn.utils import shuffle\n'), ((7799, 7824), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (7822, 7824), True, 'import tensorflow as tf\n'), ((7979, 8029), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self.dropout_rate_ph'}), '(rate=self.dropout_rate_ph)\n', (8002, 8029), True, 'import tensorflow as tf\n'), ((8068, 8098), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.Dy'], {}), '(self.Dy)\n', (8089, 8098), True, 'import tensorflow as tf\n'), ((8339, 8367), 'tensorflow.zeros_like', 'tf.zeros_like', (['scan_h[:, :0]'], {}), '(scan_h[:, :0])\n', (8352, 8367), True, 'import tensorflow as tf\n'), ((8901, 8918), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['h'], {}), '(h)\n', (8915, 8918), True, 'import tensorflow as tf\n'), ((9205, 9257), 'tensorflow.argmax', 'tf.argmax', (['self.alpha'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(self.alpha, axis=-1, output_type=tf.int32)\n', (9214, 9257), True, 'import tensorflow as tf\n'), ((10461, 10495), 'tensorflow.one_hot', 'tf.one_hot', (['self.label_ph', 'self.Dy'], {}), '(self.label_ph, self.Dy)\n', (10471, 10495), True, 'import tensorflow as tf\n'), ((10630, 10650), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (10644, 10650), True, 'import tensorflow as tf\n'), ((10714, 10802), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.label_ph', 'logits': 'logits'}), '(labels=self.label_ph, logits\n =logits)\n', (10760, 10802), True, 'import tensorflow as tf\n'), ((10902, 10922), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (10916, 10922), True, 'import tensorflow as tf\n'), ((10948, 10996), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(logits, axis=-1, output_type=tf.int32)\n', (10957, 10996), True, 'import tensorflow as tf\n'), ((11097, 11134), 'tensorflow.cast', 'tf.cast', (['pred_correctness', 'tf.float32'], {}), '(pred_correctness, tf.float32)\n', (11104, 11134), True, 'import tensorflow as tf\n'), ((11490, 11523), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11521, 11523), True, 'import tensorflow as tf\n'), ((12123, 12157), 'pickle.dump', 'pickle.dump', (['params', 'f'], {'protocol': '(2)'}), '(params, f, protocol=2)\n', (12134, 12157), False, 'import pickle\n'), ((12358, 12372), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12369, 12372), False, 'import pickle\n'), ((16349, 16374), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (16357, 16374), True, 'import numpy as np\n'), ((18127, 18214), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.translation_scale)', 'high': 'self.translation_scale', 'size': '(2)'}), '(low=-self.translation_scale, high=self.translation_scale,\n size=2)\n', (18144, 18214), True, 'import numpy as np\n'), ((18469, 18485), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (18479, 18485), True, 'import numpy as np\n'), ((21266, 21285), 'numpy.array', 'np.array', (['new_scans'], {}), '(new_scans)\n', (21274, 21285), True, 'import numpy as np\n'), ((21287, 21304), 'numpy.array', 'np.array', (['new_gps'], {}), '(new_gps)\n', (21295, 21304), True, 'import numpy as np\n'), ((24493, 24531), 'sklearn.utils.shuffle', 'shuffle', (['scan_train', 'gp_train', 'y_train'], {}), '(scan_train, gp_train, y_train)\n', (24500, 24531), False, 'from sklearn.utils import shuffle\n'), ((28287, 28306), 'numpy.array', 'np.array', (['test_pred'], {}), '(test_pred)\n', (28295, 28306), True, 'import numpy as np\n'), ((28333, 28353), 'numpy.array', 'np.array', (['test_confs'], {}), '(test_confs)\n', (28341, 28353), True, 'import numpy as np\n'), ((1888, 1902), 'numpy.ones', 'np.ones', (['(j - i)'], {}), '(j - i)\n', (1895, 1902), True, 'import numpy as np\n'), ((7597, 7622), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (7620, 7622), True, 'import tensorflow as tf\n'), ((7894, 7938), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['Dh'], {'activation': '"""relu"""'}), "(Dh, activation='relu')\n", (7915, 7938), True, 'import tensorflow as tf\n'), ((9005, 9039), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.alpha'], {'axis': '(-1)'}), '(self.alpha, axis=-1)\n', (9018, 9039), True, 'import tensorflow as tf\n'), ((9130, 9179), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.alpha'], {'axis': '(-1)', 'keepdims': '(True)'}), '(self.alpha, axis=-1, keepdims=True)\n', (9143, 9179), True, 'import tensorflow as tf\n'), ((9389, 9432), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['alpha'], {'axis': '(1)', 'keepdims': '(True)'}), '(alpha, axis=1, keepdims=True)\n', (9402, 9432), True, 'import tensorflow as tf\n'), ((13276, 13311), 'numpy.flip', 'np.flip', (['scans[is_flipped]'], {'axis': '(-1)'}), '(scans[is_flipped], axis=-1)\n', (13283, 13311), True, 'import numpy as np\n'), ((13439, 13477), 'numpy.arctan2', 'np.arctan2', (['gps[:, :, 1]', 'gps[:, :, 0]'], {}), '(gps[:, :, 1], gps[:, :, 0])\n', (13449, 13477), True, 'import numpy as np\n'), ((13507, 13537), 'numpy.mean', 'np.mean', (['theta[:, :5]'], {'axis': '(-1)'}), '(theta[:, :5], axis=-1)\n', (13514, 13537), True, 'import numpy as np\n'), ((13678, 13744), 'numpy.logical_and', 'np.logical_and', (['(start_idxes >= 0)', '(start_idxes + self.Dx < SCAN_NUM)'], {}), '(start_idxes >= 0, start_idxes + self.Dx < SCAN_NUM)\n', (13692, 13744), True, 'import numpy as np\n'), ((13970, 14058), 'numpy.random.uniform', 'np.random.uniform', (['(-self.theta_noise_scale)', 'self.theta_noise_scale'], {'size': 'batch_size'}), '(-self.theta_noise_scale, self.theta_noise_scale, size=\n batch_size)\n', (13987, 14058), True, 'import numpy as np\n'), ((14245, 14283), 'numpy.arctan2', 'np.arctan2', (['gps[:, :, 1]', 'gps[:, :, 0]'], {}), '(gps[:, :, 1], gps[:, :, 0])\n', (14255, 14283), True, 'import numpy as np\n'), ((14305, 14333), 'numpy.linalg.norm', 'np.linalg.norm', (['gps'], {'axis': '(-1)'}), '(gps, axis=-1)\n', (14319, 14333), True, 'import numpy as np\n'), ((14693, 14743), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'scans.shape'}), '(-scale, scale, size=scans.shape)\n', (14710, 14743), True, 'import numpy as np\n'), ((14772, 14820), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'gps.shape'}), '(-scale, scale, size=gps.shape)\n', (14789, 14820), True, 'import numpy as np\n'), ((15041, 15066), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (15049, 15066), True, 'import numpy as np\n'), ((15683, 15718), 'numpy.zeros', 'np.zeros', (['(scans.shape[0], self.Dx)'], {}), '((scans.shape[0], self.Dx))\n', (15691, 15718), True, 'import numpy as np\n'), ((16277, 16315), 'numpy.arctan2', 'np.arctan2', (['gps[:, :, 1]', 'gps[:, :, 0]'], {}), '(gps[:, :, 1], gps[:, :, 0])\n', (16287, 16315), True, 'import numpy as np\n'), ((16841, 16859), 'numpy.abs', 'np.abs', (['avg_thetas'], {}), '(avg_thetas)\n', (16847, 16859), True, 'import numpy as np\n'), ((17850, 17863), 'numpy.tan', 'np.tan', (['angle'], {}), '(angle)\n', (17856, 17863), True, 'import numpy as np\n'), ((18330, 18344), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (18336, 18344), True, 'import numpy as np\n'), ((18369, 18383), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (18375, 18383), True, 'import numpy as np\n'), ((21072, 21100), 'numpy.array', 'np.array', (['[trans_x, trans_y]'], {}), '([trans_x, trans_y])\n', (21080, 21100), True, 'import numpy as np\n'), ((21183, 21211), 'numpy.array', 'np.array', (['[trans_x, trans_y]'], {}), '([trans_x, trans_y])\n', (21191, 21211), True, 'import numpy as np\n'), ((21656, 21670), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (21667, 21670), False, 'import pickle\n'), ((22807, 22854), 'numpy.concatenate', 'np.concatenate', (['[scan_aug_train, scan_aug_test]'], {}), '([scan_aug_train, scan_aug_test])\n', (22821, 22854), True, 'import numpy as np\n'), ((22894, 22937), 'numpy.concatenate', 'np.concatenate', (['[gp_aug_train, gp_aug_test]'], {}), '([gp_aug_train, gp_aug_test])\n', (22908, 22937), True, 'import numpy as np\n'), ((22977, 23018), 'numpy.concatenate', 'np.concatenate', (['[y_aug_train, y_aug_test]'], {}), '([y_aug_train, y_aug_test])\n', (22991, 23018), True, 'import numpy as np\n'), ((23343, 23410), 'numpy.concatenate', 'np.concatenate', (['[scan_train, scan_aug_train, scan_aug_test]'], {'axis': '(0)'}), '([scan_train, scan_aug_train, scan_aug_test], axis=0)\n', (23357, 23410), True, 'import numpy as np\n'), ((23439, 23500), 'numpy.concatenate', 'np.concatenate', (['[gp_train, gp_aug_train, gp_aug_test]'], {'axis': '(0)'}), '([gp_train, gp_aug_train, gp_aug_test], axis=0)\n', (23453, 23500), True, 'import numpy as np\n'), ((23528, 23586), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_aug_train, y_aug_test]'], {'axis': '(0)'}), '([y_train, y_aug_train, y_aug_test], axis=0)\n', (23542, 23586), True, 'import numpy as np\n'), ((23636, 23688), 'numpy.concatenate', 'np.concatenate', (['[scan_train, scan_aug_train]'], {'axis': '(0)'}), '([scan_train, scan_aug_train], axis=0)\n', (23650, 23688), True, 'import numpy as np\n'), ((23717, 23765), 'numpy.concatenate', 'np.concatenate', (['[gp_train, gp_aug_train]'], {'axis': '(0)'}), '([gp_train, gp_aug_train], axis=0)\n', (23731, 23765), True, 'import numpy as np\n'), ((23793, 23839), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_aug_train]'], {'axis': '(0)'}), '([y_train, y_aug_train], axis=0)\n', (23807, 23839), True, 'import numpy as np\n'), ((23869, 23919), 'numpy.concatenate', 'np.concatenate', (['[scan_test, scan_aug_test]'], {'axis': '(0)'}), '([scan_test, scan_aug_test], axis=0)\n', (23883, 23919), True, 'import numpy as np\n'), ((23947, 23993), 'numpy.concatenate', 'np.concatenate', (['[gp_test, gp_aug_test]'], {'axis': '(0)'}), '([gp_test, gp_aug_test], axis=0)\n', (23961, 23993), True, 'import numpy as np\n'), ((24020, 24064), 'numpy.concatenate', 'np.concatenate', (['[y_test, y_aug_test]'], {'axis': '(0)'}), '([y_test, y_aug_test], axis=0)\n', (24034, 24064), True, 'import numpy as np\n'), ((24810, 24849), 'numpy.array', 'np.array', (['[weights[y] for y in batch_y]'], {}), '([weights[y] for y in batch_y])\n', (24818, 24849), True, 'import numpy as np\n'), ((26721, 26760), 'numpy.array', 'np.array', (['[weights[y] for y in batch_y]'], {}), '([weights[y] for y in batch_y])\n', (26729, 26760), True, 'import numpy as np\n'), ((28393, 28411), 'numpy.mean', 'np.mean', (['train_acc'], {}), '(train_acc)\n', (28400, 28411), True, 'import numpy as np\n'), ((28413, 28432), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (28420, 28432), True, 'import numpy as np\n'), ((28468, 28485), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (28475, 28485), True, 'import numpy as np\n'), ((28487, 28505), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (28494, 28505), True, 'import numpy as np\n'), ((29365, 29419), 'numpy.mean', 'np.mean', (['(y_test[y_test == j] == test_pred[y_test == j])'], {}), '(y_test[y_test == j] == test_pred[y_test == j])\n', (29372, 29419), True, 'import numpy as np\n'), ((29760, 29789), 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'summary_val'}), '(value=summary_val)\n', (29770, 29789), True, 'import tensorflow as tf\n'), ((30008, 30024), 'numpy.array', 'np.array', (['[scan]'], {}), '([scan])\n', (30016, 30024), True, 'import numpy as np\n'), ((30026, 30040), 'numpy.array', 'np.array', (['[gp]'], {}), '([gp])\n', (30034, 30040), True, 'import numpy as np\n'), ((30488, 30509), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (30502, 30509), True, 'import numpy as np\n'), ((1965, 1979), 'numpy.ones', 'np.ones', (['(j - i)'], {}), '(j - i)\n', (1972, 1979), True, 'import numpy as np\n'), ((2020, 2064), 'numpy.linspace', 'np.linspace', (['scan[i - 1]', 'scan[j]', '(j - i + 1)'], {}), '(scan[i - 1], scan[j], j - i + 1)\n', (2031, 2064), True, 'import numpy as np\n'), ((7408, 7496), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (['filter_size', 'kernel_size'], {'strides': 'stride', 'activation': '"""relu"""'}), "(filter_size, kernel_size, strides=stride, activation\n ='relu')\n", (7430, 7496), True, 'import tensorflow as tf\n'), ((7719, 7763), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['Dh'], {'activation': '"""relu"""'}), "(Dh, activation='relu')\n", (7740, 7763), True, 'import tensorflow as tf\n'), ((9327, 9342), 'numpy.ones', 'np.ones', (['(1, K)'], {}), '((1, K))\n', (9334, 9342), True, 'import numpy as np\n'), ((13206, 13232), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (13220, 13232), True, 'import numpy as np\n'), ((13774, 13800), 'numpy.logical_not', 'np.logical_not', (['data_valid'], {}), '(data_valid)\n', (13788, 13800), True, 'import numpy as np\n'), ((14965, 15003), 'numpy.arctan2', 'np.arctan2', (['gps[:, :, 1]', 'gps[:, :, 0]'], {}), '(gps[:, :, 1], gps[:, :, 0])\n', (14975, 15003), True, 'import numpy as np\n'), ((16443, 16474), 'numpy.any', 'np.any', (['(theta < -SCAN_RANGE / 2)'], {}), '(theta < -SCAN_RANGE / 2)\n', (16449, 16474), True, 'import numpy as np\n'), ((16479, 16509), 'numpy.any', 'np.any', (['(theta > SCAN_RANGE / 2)'], {}), '(theta > SCAN_RANGE / 2)\n', (16485, 16509), True, 'import numpy as np\n'), ((16595, 16609), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (16602, 16609), True, 'import numpy as np\n'), ((16757, 16771), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (16764, 16771), True, 'import numpy as np\n'), ((20714, 20742), 'numpy.abs', 'np.abs', (['(new_angles - angle_i)'], {}), '(new_angles - angle_i)\n', (20720, 20742), True, 'import numpy as np\n'), ((21128, 21164), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(5)'], {'endpoint': '(False)'}), '(1, 0, 5, endpoint=False)\n', (21139, 21164), True, 'import numpy as np\n'), ((29583, 29615), 'numpy.mean', 'np.mean', (['test_confs[y_test == j]'], {}), '(test_confs[y_test == j])\n', (29590, 29615), True, 'import numpy as np\n'), ((9768, 9810), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['beta'], {'axis': '(1)', 'keepdims': '(True)'}), '(beta, axis=1, keepdims=True)\n', (9781, 9810), True, 'import tensorflow as tf\n'), ((10283, 10332), 'tensorflow.cast', 'tf.cast', (['(global_step / annealing_step)', 'tf.float32'], {}), '(global_step / annealing_step, tf.float32)\n', (10290, 10332), True, 'import tensorflow as tf\n'), ((15143, 15174), 'numpy.any', 'np.any', (['(theta < -SCAN_RANGE / 2)'], {}), '(theta < -SCAN_RANGE / 2)\n', (15149, 15174), True, 'import numpy as np\n'), ((15179, 15209), 'numpy.any', 'np.any', (['(theta > SCAN_RANGE / 2)'], {}), '(theta > SCAN_RANGE / 2)\n', (15185, 15209), True, 'import numpy as np\n'), ((15303, 15317), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (15310, 15317), True, 'import numpy as np\n'), ((15481, 15495), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (15488, 15495), True, 'import numpy as np\n'), ((15944, 15961), 'numpy.zeros', 'np.zeros', (['self.Dx'], {}), '(self.Dx)\n', (15952, 15961), True, 'import numpy as np\n'), ((20594, 20610), 'numpy.min', 'np.min', (['scan_len'], {}), '(scan_len)\n', (20600, 20610), True, 'import numpy as np\n'), ((20777, 20799), 'numpy.argsort', 'np.argsort', (['angle_diff'], {}), '(angle_diff)\n', (20787, 20799), True, 'import numpy as np\n'), ((20941, 20977), 'numpy.sqrt', 'np.sqrt', (['(inter_x ** 2 + inter_y ** 2)'], {}), '(inter_x ** 2 + inter_y ** 2)\n', (20948, 20977), True, 'import numpy as np\n'), ((22483, 22518), 'numpy.sum', 'np.sum', (['(y_aug_train_full == y_label)'], {}), '(y_aug_train_full == y_label)\n', (22489, 22518), True, 'import numpy as np\n'), ((22731, 22765), 'numpy.sum', 'np.sum', (['(y_aug_test_full == y_label)'], {}), '(y_aug_test_full == y_label)\n', (22737, 22765), True, 'import numpy as np\n'), ((28549, 28608), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train/loss"""', 'simple_value': 'train_loss'}), "(tag='train/loss', simple_value=train_loss)\n", (28565, 28608), True, 'import tensorflow as tf\n'), ((28651, 28708), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""train/acc"""', 'simple_value': 'train_acc'}), "(tag='train/acc', simple_value=train_acc)\n", (28667, 28708), True, 'import tensorflow as tf\n'), ((28751, 28808), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test/loss"""', 'simple_value': 'test_loss'}), "(tag='test/loss', simple_value=test_loss)\n", (28767, 28808), True, 'import tensorflow as tf\n'), ((28851, 28906), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test/acc"""', 'simple_value': 'test_acc'}), "(tag='test/acc', simple_value=test_acc)\n", (28867, 28906), True, 'import tensorflow as tf\n'), ((9693, 9708), 'tensorflow.lgamma', 'tf.lgamma', (['beta'], {}), '(beta)\n', (9702, 9708), True, 'import tensorflow as tf\n'), ((14431, 14444), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14437, 14444), True, 'import numpy as np\n'), ((14450, 14463), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14456, 14463), True, 'import numpy as np\n'), ((19172, 19220), 'numpy.sqrt', 'np.sqrt', (['((x_j - x_jp1) ** 2 + (y_j - y_jp1) ** 2)'], {}), '((x_j - x_jp1) ** 2 + (y_j - y_jp1) ** 2)\n', (19179, 19220), True, 'import numpy as np\n'), ((20224, 20260), 'numpy.sqrt', 'np.sqrt', (['(inter_x ** 2 + inter_y ** 2)'], {}), '(inter_x ** 2 + inter_y ** 2)\n', (20231, 20260), True, 'import numpy as np\n'), ((9578, 9596), 'tensorflow.lgamma', 'tf.lgamma', (['S_alpha'], {}), '(S_alpha)\n', (9587, 9596), True, 'import tensorflow as tf\n'), ((9613, 9629), 'tensorflow.lgamma', 'tf.lgamma', (['alpha'], {}), '(alpha)\n', (9622, 9629), True, 'import tensorflow as tf\n'), ((10167, 10184), 'tensorflow.digamma', 'tf.digamma', (['alpha'], {}), '(alpha)\n', (10177, 10184), True, 'import tensorflow as tf\n'), ((20386, 20414), 'numpy.sqrt', 'np.sqrt', (['(x_j ** 2 + y_j ** 2)'], {}), '(x_j ** 2 + y_j ** 2)\n', (20393, 20414), True, 'import numpy as np\n'), ((20492, 20520), 'numpy.sqrt', 'np.sqrt', (['(x_j ** 2 + y_j ** 2)'], {}), '(x_j ** 2 + y_j ** 2)\n', (20499, 20520), True, 'import numpy as np\n'), ((10120, 10163), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['alpha'], {'axis': '(1)', 'keepdims': '(True)'}), '(alpha, axis=1, keepdims=True)\n', (10133, 10163), True, 'import tensorflow as tf\n'), ((29081, 29101), 'numpy.mean', 'np.mean', (['train_confs'], {}), '(train_confs)\n', (29088, 29101), True, 'import numpy as np\n'), ((29196, 29215), 'numpy.mean', 'np.mean', (['test_confs'], {}), '(test_confs)\n', (29203, 29215), True, 'import numpy as np\n'), ((9489, 9506), 'tensorflow.digamma', 'tf.digamma', (['alpha'], {}), '(alpha)\n', (9499, 9506), True, 'import tensorflow as tf\n'), ((9509, 9528), 'tensorflow.digamma', 'tf.digamma', (['S_alpha'], {}), '(S_alpha)\n', (9519, 9528), True, 'import tensorflow as tf\n')] |
from sklearn.feature_extraction.text import TfidfVectorizer
from numpy import linalg as la
from gensim import corpora,models,similarities
from sklearn.cluster import KMeans
import json
import codecs
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
import numpy as np
import nltk
from nltk.stem.snowball import SnowballStemmer
from scipy.cluster.hierarchy import ward, dendrogram,linkage,fcluster
from sklearn.metrics.pairwise import cosine_similarity
stemmer = SnowballStemmer("english")
def tokenize_and_stem(text):
tokens = [word for word in text.lower().split()]
filtered_tokens = []
stems = [stemmer.stem(t) for t in tokens]
return stems
def assign_paper_for_interest(paper_list,interest_list,interest_paper):
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=2000,\
min_df=0.05, stop_words='english',\
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
if len(paper_list) <=2 :
for p in paper_list:
interest = return_interest(interest_list,p)
if len(interest) > 0:
for v in set(interest):
interest_paper.setdefault(v,[]).append(p)
else:
for v in interest_list:
interest_paper.setdefault(v,[]).append(p)
return interest_paper
tfidf_matrix = tfidf_vectorizer.fit_transform(paper_list)
tfidf_new = tfidf_matrix.toarray()
if (tfidf_new.shape[0]) > 0:
u,s,v = la.svd(tfidf_new,full_matrices=False)
#print(u[:,:6].shape)
#print(s[:6].shape)
#print(v[:6,:].shape)
tfidf_new = np.dot(u[:,:16],np.dot(np.diag(s[:16]),v[:16,:]))
dist = cosine_similarity(tfidf_new)
result = kmeans_clustering(dist)
i_paper =group_paper(result,paper_list)
for i,value in i_paper.items():
interest = return_interest(interest_list,' '.join(value))
if len(interest) > 0:
for inst in interest:
for p in value:
interest_paper.setdefault(inst,[]).append(p)
else:
for inst in interest_list:
for p in value:
interest_paper.setdefault(inst,[]).append(p)
return interest_paper
def ward_clustering(matrix):
linkage_matrix = ward(matrix)
result = fcluster(linkage_matrix, 3, criterion='maxclust')
#print (result)
return result
def group_paper_for_test(paper_list):
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=2000,\
min_df=0.05, stop_words='english',\
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
tfidf_matrix = tfidf_vectorizer.fit_transform(paper_list)
tfidf_new = tfidf_matrix.toarray()
if (tfidf_new.shape[0]) > 0:
u,s,v = la.svd(tfidf_new,full_matrices=False)
#print(u[:,:6].shape)
#print(s[:6].shape)
#print(v[:6,:].shape)
tfidf_new = np.dot(u[:,:16],np.dot(np.diag(s[:16]),v[:16,:]))
dist = cosine_similarity(tfidf_new)
result = ward_clustering(dist)
i_paper =group_paper(result,paper_list)
return i_paper
def kmeans_clustering(tfidf_matrix):
num_clusters = min(tfidf_matrix.shape[0],3)
km = KMeans(n_clusters=num_clusters)
km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
#print (clusters)
return clusters
def main():
with codecs.open("../t_author_paper.json","r","utf-8") as fid:
t_author_paper = json.load(fid)
with codecs.open("../author_interest.json","r","utf-8") as fid:
author_interest = json.load(fid)
interest_paper = {}
flag = 0
for author,paper_list in t_author_paper.items():
if flag == 16:
print (author)
print (author_interest[author])
i = 0
for p in paper_list:
print (str(i) + " : " + p)
i += 1
assign_paper_for_interest(paper_list,author_interest[author],interest_paper)
break
flag += 1
def group_paper(label_list,paper_list):
class_paper = {}
for i in range(len(label_list)):
class_paper.setdefault(label_list[i],[]).append(paper_list[i])
return class_paper
def return_interest(interest_list,paper_text):
interest_stem = {}
for v in interest_list:
interest_stem.setdefault(v,v.split()).extend(tokenize_and_stem(v))
interest_token = []
for k,v in interest_stem.items():
interest_token.extend(v)
set_token = set(tokenize_and_stem(paper_text))&set(interest_token)
result = []
for v in set_token:
result.extend([ k for k,value in interest_stem.items() if v in value ])
return result
#if __name__ == "__main__":
# main()
| [
"sklearn.cluster.KMeans",
"sklearn.metrics.pairwise.cosine_similarity",
"scipy.cluster.hierarchy.ward",
"numpy.diag",
"json.load",
"nltk.stem.snowball.SnowballStemmer",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.linalg.svd",
"codecs.open",
"scipy.cluster.hierarchy.fcluster"
] | [((485, 511), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (500, 511), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((778, 927), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_df': '(0.9)', 'max_features': '(2000)', 'min_df': '(0.05)', 'stop_words': '"""english"""', 'use_idf': '(True)', 'tokenizer': 'tokenize_and_stem', 'ngram_range': '(1, 3)'}), "(max_df=0.9, max_features=2000, min_df=0.05, stop_words=\n 'english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))\n", (793, 927), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1734, 1762), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tfidf_new'], {}), '(tfidf_new)\n', (1751, 1762), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2334, 2346), 'scipy.cluster.hierarchy.ward', 'ward', (['matrix'], {}), '(matrix)\n', (2338, 2346), False, 'from scipy.cluster.hierarchy import ward, dendrogram, linkage, fcluster\n'), ((2360, 2409), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['linkage_matrix', '(3)'], {'criterion': '"""maxclust"""'}), "(linkage_matrix, 3, criterion='maxclust')\n", (2368, 2409), False, 'from scipy.cluster.hierarchy import ward, dendrogram, linkage, fcluster\n'), ((2511, 2660), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_df': '(0.9)', 'max_features': '(2000)', 'min_df': '(0.05)', 'stop_words': '"""english"""', 'use_idf': '(True)', 'tokenizer': 'tokenize_and_stem', 'ngram_range': '(1, 3)'}), "(max_df=0.9, max_features=2000, min_df=0.05, stop_words=\n 'english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))\n", (2526, 2660), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3065, 3093), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tfidf_new'], {}), '(tfidf_new)\n', (3082, 3093), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3287, 3318), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters'}), '(n_clusters=num_clusters)\n', (3293, 3318), False, 'from sklearn.cluster import KMeans\n'), ((1526, 1564), 'numpy.linalg.svd', 'la.svd', (['tfidf_new'], {'full_matrices': '(False)'}), '(tfidf_new, full_matrices=False)\n', (1532, 1564), True, 'from numpy import linalg as la\n'), ((2856, 2894), 'numpy.linalg.svd', 'la.svd', (['tfidf_new'], {'full_matrices': '(False)'}), '(tfidf_new, full_matrices=False)\n', (2862, 2894), True, 'from numpy import linalg as la\n'), ((3444, 3495), 'codecs.open', 'codecs.open', (['"""../t_author_paper.json"""', '"""r"""', '"""utf-8"""'], {}), "('../t_author_paper.json', 'r', 'utf-8')\n", (3455, 3495), False, 'import codecs\n'), ((3527, 3541), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (3536, 3541), False, 'import json\n'), ((3552, 3604), 'codecs.open', 'codecs.open', (['"""../author_interest.json"""', '"""r"""', '"""utf-8"""'], {}), "('../author_interest.json', 'r', 'utf-8')\n", (3563, 3604), False, 'import codecs\n'), ((3637, 3651), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (3646, 3651), False, 'import json\n'), ((1695, 1710), 'numpy.diag', 'np.diag', (['s[:16]'], {}), '(s[:16])\n', (1702, 1710), True, 'import numpy as np\n'), ((3025, 3040), 'numpy.diag', 'np.diag', (['s[:16]'], {}), '(s[:16])\n', (3032, 3040), True, 'import numpy as np\n')] |
import numpy as np
from numpy.random import randint
from tqdm import tqdm
md=100
def rnd():
return (2*randint(0,2)-1)
def classify(x):
if x<-md:return -1
if x>md:return 1
return 0
def mutal(x,dep=10):
if dep<=0:return 0
if (ac:=classify(x))!=0:return ac
return (mutal(x-1,dep-1)+mutal(x+1,dep-1))/2
def mutatual(x):
ac=x
while (rel:=classify(ac))==0:
ac+=rnd()
return rel
def highstat(x):
return np.mean([mutatual(x) for i in range(1000)])
x=2*md*np.random.random(size=(50000))-md
y=[]
for xx in tqdm(x):
y.append(mutatual(xx))
np.savez_compressed("data",x=x,y=y)
exit()
x=np.arange(-md,1.0001*md,md/4)
#y=[mutal(zw) for zw in x]
yy=[highstat(zw) for zw in x]
#print(y)
print(yy)
#class 1:below -1, class 2: above 1
| [
"numpy.random.random",
"tqdm.tqdm",
"numpy.random.randint",
"numpy.savez_compressed",
"numpy.arange"
] | [((556, 563), 'tqdm.tqdm', 'tqdm', (['x'], {}), '(x)\n', (560, 563), False, 'from tqdm import tqdm\n'), ((593, 630), 'numpy.savez_compressed', 'np.savez_compressed', (['"""data"""'], {'x': 'x', 'y': 'y'}), "('data', x=x, y=y)\n", (612, 630), True, 'import numpy as np\n'), ((648, 683), 'numpy.arange', 'np.arange', (['(-md)', '(1.0001 * md)', '(md / 4)'], {}), '(-md, 1.0001 * md, md / 4)\n', (657, 683), True, 'import numpy as np\n'), ((506, 534), 'numpy.random.random', 'np.random.random', ([], {'size': '(50000)'}), '(size=50000)\n', (522, 534), True, 'import numpy as np\n'), ((108, 121), 'numpy.random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (115, 121), False, 'from numpy.random import randint\n')] |
# Tools for working with GLM data. Mostly adapted from glmtools
import xarray as xr
import numpy as np
import pyproj as proj4
from datetime import timedelta
import warnings
from glmtools.io.lightning_ellipse import lightning_ellipse_rev
from lmatools.coordinateSystems import CoordinateSystem
from lmatools.grid.fixed import get_GOESR_coordsys
from .abi import get_abi_x_y
from .dataset import get_ds_bin_edges, get_ds_shape, get_ds_core_coords, get_datetime_from_coord
# equatorial and polar radii
this_ellps=0
ltg_ellps_re, ltg_ellps_rp = lightning_ellipse_rev[this_ellps]
# Functions from GLM notebook for parallax correction
def semiaxes_to_invflattening(semimajor, semiminor):
""" Calculate the inverse flattening from the semi-major
and semi-minor axes of an ellipse"""
rf = semimajor/(semimajor-semiminor)
return rf
class GeostationaryFixedGridSystemAltEllipse(CoordinateSystem):
def __init__(self, subsat_lon=0.0, subsat_lat=0.0, sweep_axis='y',
sat_ecef_height=35785831.0,
semimajor_axis=None,
semiminor_axis=None,
datum='WGS84'):
"""
Satellite height is with respect to an arbitray ellipsoid whose
shape is given by semimajor_axis (equatorial) and semiminor_axis(polar)
Fixed grid coordinates are in radians.
"""
rf = semiaxes_to_invflattening(semimajor_axis, semiminor_axis)
# print("Defining alt ellipse for Geostationary with rf=", rf)
self.ECEFxyz = proj4.Proj(proj='geocent',
a=semimajor_axis, rf=rf)
self.fixedgrid = proj4.Proj(proj='geos', lon_0=subsat_lon,
lat_0=subsat_lat, h=sat_ecef_height, x_0=0.0, y_0=0.0,
units='m', sweep=sweep_axis,
a=semimajor_axis, rf=rf)
self.h=sat_ecef_height
def toECEF(self, x, y, z):
X, Y, Z = x*self.h, y*self.h, z*self.h
return proj4.transform(self.fixedgrid, self.ECEFxyz, X, Y, Z)
def fromECEF(self, x, y, z):
X, Y, Z = proj4.transform(self.ECEFxyz, self.fixedgrid, x, y, z)
return X/self.h, Y/self.h, Z/self.h
class GeographicSystemAltEllps(CoordinateSystem):
"""
Coordinate system defined on the surface of the earth using latitude,
longitude, and altitude, referenced by default to the WGS84 ellipse.
Alternately, specify the ellipse shape using an ellipse known
to pyproj, or [NOT IMPLEMENTED] specify r_equator and r_pole directly.
"""
def __init__(self, ellipse='WGS84', datum='WGS84',
r_equator=None, r_pole=None):
if (r_equator is not None) | (r_pole is not None):
rf = semiaxes_to_invflattening(r_equator, r_pole)
# print("Defining alt ellipse for Geographic with rf", rf)
self.ERSlla = proj4.Proj(proj='latlong', #datum=datum,
a=r_equator, rf=rf)
self.ERSxyz = proj4.Proj(proj='geocent', #datum=datum,
a=r_equator, rf=rf)
else:
# lat lon alt in some earth reference system
self.ERSlla = proj4.Proj(proj='latlong', ellps=ellipse, datum=datum)
self.ERSxyz = proj4.Proj(proj='geocent', ellps=ellipse, datum=datum)
def toECEF(self, lon, lat, alt):
projectedData = np.array(proj4.transform(self.ERSlla, self.ERSxyz, lon, lat, alt ))
if len(projectedData.shape) == 1:
return projectedData[0], projectedData[1], projectedData[2]
else:
return projectedData[0,:], projectedData[1,:], projectedData[2,:]
def fromECEF(self, x, y, z):
projectedData = np.array(proj4.transform(self.ERSxyz, self.ERSlla, x, y, z ))
if len(projectedData.shape) == 1:
return projectedData[0], projectedData[1], projectedData[2]
else:
return projectedData[0,:], projectedData[1,:], projectedData[2,:]
def get_GOESR_coordsys_alt_ellps(sat_lon_nadir=-75.0):
goes_sweep = 'x' # Meteosat is 'y'
datum = 'WGS84'
sat_ecef_height=35786023.0
geofixcs = GeostationaryFixedGridSystemAltEllipse(subsat_lon=sat_lon_nadir,
semimajor_axis=ltg_ellps_re, semiminor_axis=ltg_ellps_rp,
datum=datum, sweep_axis=goes_sweep,
sat_ecef_height=sat_ecef_height)
grs80lla = GeographicSystemAltEllps(r_equator=ltg_ellps_re, r_pole=ltg_ellps_rp,
datum='WGS84')
return geofixcs, grs80lla
def get_glm_parallax_offsets(lon, lat, goes_ds):
# Get parallax of glm files to goes projection
x, y = get_abi_x_y(lat, lon, goes_ds)
z = np.zeros_like(x)
nadir = goes_ds.goes_imager_projection.longitude_of_projection_origin
_, grs80lla = get_GOESR_coordsys(nadir)
geofix_ltg, lla_ltg = get_GOESR_coordsys_alt_ellps(nadir)
lon_ltg,lat_ltg,alt_ltg=grs80lla.fromECEF(*geofix_ltg.toECEF(x,y,z))
return lon_ltg-lon, lat_ltg-lat
def get_corrected_glm_x_y(glm_filename, goes_ds):
try:
print(glm_filename, end='\r')
with xr.open_dataset(glm_filename) as glm_ds:
if glm_ds.flash_lat.data.size>0 and glm_ds.flash_lon.data.size>0:
lon_offset, lat_offset = get_glm_parallax_offsets(glm_ds.flash_lon.data, glm_ds.flash_lat.data, goes_ds)
glm_lon = glm_ds.flash_lon.data + lon_offset
glm_lat = glm_ds.flash_lat.data + lat_offset
out = get_abi_x_y(glm_lat, glm_lon, goes_ds)
else:
out = (np.array([]), np.array([]))
except (OSError, RuntimeError) as e:
warnings.warn(e.args[0])
warnings.warn(f'Unable to process file {glm_filename}')
out = (np.array([]), np.array([]))
return out
def get_uncorrected_glm_x_y(glm_filename, goes_ds):
try:
print(glm_filename, end='\r')
with xr.open_dataset(glm_filename) as glm_ds:
if glm_ds.flash_lat.data.size>0 and glm_ds.flash_lon.data.size>0:
glm_lon = glm_ds.flash_lon.data
glm_lat = glm_ds.flash_lat.data
out = get_abi_x_y(glm_lat, glm_lon, goes_ds)
else:
out = (np.array([]), np.array([]))
except (OSError, RuntimeError) as e:
warnings.warn(e.args[0])
warnings.warn(f'Unable to process file {glm_filename}')
out = (np.array([]), np.array([]))
return out
def get_corrected_glm_hist(glm_files, goes_ds, start_time, end_time):
x_bins, y_bins = get_ds_bin_edges(goes_ds, ('x','y'))
glm_x, glm_y = (np.concatenate(locs) for locs in zip(*[get_corrected_glm_x_y(glm_files[i], goes_ds)
for i in glm_files if i > start_time and i < end_time]))
return np.histogram2d(glm_y, glm_x, bins=(y_bins[::-1], x_bins))[0][::-1]
def get_uncorrected_glm_hist(glm_files, goes_ds, start_time, end_time):
x_bins, y_bins = get_ds_bin_edges(goes_ds, ('x','y'))
glm_x, glm_y = (np.concatenate(locs) for locs in zip(*[get_uncorrected_glm_x_y(glm_files[i], goes_ds)
for i in glm_files if i > start_time and i < end_time]))
return np.histogram2d(glm_y, glm_x, bins=(y_bins[::-1], x_bins))[0][::-1]
def regrid_glm(glm_files, goes_ds, corrected=False):
goes_dates = get_datetime_from_coord(goes_ds.t)
goes_coords = get_ds_core_coords(goes_ds)
goes_mapping = {k:goes_coords[k].size for k in goes_coords}
glm_grid_shape = (goes_mapping['t'], goes_mapping['y'], goes_mapping['x'])
# Fill with -1 for missing value
glm_grid = np.full(glm_grid_shape, -1)
for i in range(glm_grid_shape[0]):
# print(i, end='\r')
try:
if corrected:
glm_grid[i] = get_corrected_glm_hist(glm_files, goes_ds,
goes_dates[i],
goes_dates[i]+timedelta(minutes=5))
else:
glm_grid[i] = get_uncorrected_glm_hist(glm_files, goes_ds,
goes_dates[i],
goes_dates[i]+timedelta(minutes=5))
except (ValueError, IndexError) as e:
print('Error processing glm data at step %d' % i)
print(e)
glm_grid = xr.DataArray(glm_grid, goes_coords, ('t','y','x'))
return glm_grid
| [
"numpy.histogram2d",
"lmatools.grid.fixed.get_GOESR_coordsys",
"pyproj.transform",
"datetime.timedelta",
"numpy.array",
"pyproj.Proj",
"xarray.DataArray",
"numpy.concatenate",
"numpy.full",
"warnings.warn",
"xarray.open_dataset",
"numpy.zeros_like"
] | [((4653, 4669), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4666, 4669), True, 'import numpy as np\n'), ((4764, 4789), 'lmatools.grid.fixed.get_GOESR_coordsys', 'get_GOESR_coordsys', (['nadir'], {}), '(nadir)\n', (4782, 4789), False, 'from lmatools.grid.fixed import get_GOESR_coordsys\n'), ((7622, 7649), 'numpy.full', 'np.full', (['glm_grid_shape', '(-1)'], {}), '(glm_grid_shape, -1)\n', (7629, 7649), True, 'import numpy as np\n'), ((8343, 8395), 'xarray.DataArray', 'xr.DataArray', (['glm_grid', 'goes_coords', "('t', 'y', 'x')"], {}), "(glm_grid, goes_coords, ('t', 'y', 'x'))\n", (8355, 8395), True, 'import xarray as xr\n'), ((1527, 1578), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""geocent"""', 'a': 'semimajor_axis', 'rf': 'rf'}), "(proj='geocent', a=semimajor_axis, rf=rf)\n", (1537, 1578), True, 'import pyproj as proj4\n'), ((1616, 1776), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""geos"""', 'lon_0': 'subsat_lon', 'lat_0': 'subsat_lat', 'h': 'sat_ecef_height', 'x_0': '(0.0)', 'y_0': '(0.0)', 'units': '"""m"""', 'sweep': 'sweep_axis', 'a': 'semimajor_axis', 'rf': 'rf'}), "(proj='geos', lon_0=subsat_lon, lat_0=subsat_lat, h=\n sat_ecef_height, x_0=0.0, y_0=0.0, units='m', sweep=sweep_axis, a=\n semimajor_axis, rf=rf)\n", (1626, 1776), True, 'import pyproj as proj4\n'), ((1928, 1982), 'pyproj.transform', 'proj4.transform', (['self.fixedgrid', 'self.ECEFxyz', 'X', 'Y', 'Z'], {}), '(self.fixedgrid, self.ECEFxyz, X, Y, Z)\n', (1943, 1982), True, 'import pyproj as proj4\n'), ((2035, 2089), 'pyproj.transform', 'proj4.transform', (['self.ECEFxyz', 'self.fixedgrid', 'x', 'y', 'z'], {}), '(self.ECEFxyz, self.fixedgrid, x, y, z)\n', (2050, 2089), True, 'import pyproj as proj4\n'), ((6565, 6585), 'numpy.concatenate', 'np.concatenate', (['locs'], {}), '(locs)\n', (6579, 6585), True, 'import numpy as np\n'), ((6994, 7014), 'numpy.concatenate', 'np.concatenate', (['locs'], {}), '(locs)\n', (7008, 7014), True, 'import numpy as np\n'), ((2810, 2856), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""latlong"""', 'a': 'r_equator', 'rf': 'rf'}), "(proj='latlong', a=r_equator, rf=rf)\n", (2820, 2856), True, 'import pyproj as proj4\n'), ((2934, 2980), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""geocent"""', 'a': 'r_equator', 'rf': 'rf'}), "(proj='geocent', a=r_equator, rf=rf)\n", (2944, 2980), True, 'import pyproj as proj4\n'), ((3129, 3183), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""latlong"""', 'ellps': 'ellipse', 'datum': 'datum'}), "(proj='latlong', ellps=ellipse, datum=datum)\n", (3139, 3183), True, 'import pyproj as proj4\n'), ((3210, 3264), 'pyproj.Proj', 'proj4.Proj', ([], {'proj': '"""geocent"""', 'ellps': 'ellipse', 'datum': 'datum'}), "(proj='geocent', ellps=ellipse, datum=datum)\n", (3220, 3264), True, 'import pyproj as proj4\n'), ((3335, 3391), 'pyproj.transform', 'proj4.transform', (['self.ERSlla', 'self.ERSxyz', 'lon', 'lat', 'alt'], {}), '(self.ERSlla, self.ERSxyz, lon, lat, alt)\n', (3350, 3391), True, 'import pyproj as proj4\n'), ((3667, 3717), 'pyproj.transform', 'proj4.transform', (['self.ERSxyz', 'self.ERSlla', 'x', 'y', 'z'], {}), '(self.ERSxyz, self.ERSlla, x, y, z)\n', (3682, 3717), True, 'import pyproj as proj4\n'), ((5074, 5103), 'xarray.open_dataset', 'xr.open_dataset', (['glm_filename'], {}), '(glm_filename)\n', (5089, 5103), True, 'import xarray as xr\n'), ((5615, 5639), 'warnings.warn', 'warnings.warn', (['e.args[0]'], {}), '(e.args[0])\n', (5628, 5639), False, 'import warnings\n'), ((5648, 5703), 'warnings.warn', 'warnings.warn', (['f"""Unable to process file {glm_filename}"""'], {}), "(f'Unable to process file {glm_filename}')\n", (5661, 5703), False, 'import warnings\n'), ((5875, 5904), 'xarray.open_dataset', 'xr.open_dataset', (['glm_filename'], {}), '(glm_filename)\n', (5890, 5904), True, 'import xarray as xr\n'), ((6269, 6293), 'warnings.warn', 'warnings.warn', (['e.args[0]'], {}), '(e.args[0])\n', (6282, 6293), False, 'import warnings\n'), ((6302, 6357), 'warnings.warn', 'warnings.warn', (['f"""Unable to process file {glm_filename}"""'], {}), "(f'Unable to process file {glm_filename}')\n", (6315, 6357), False, 'import warnings\n'), ((6776, 6833), 'numpy.histogram2d', 'np.histogram2d', (['glm_y', 'glm_x'], {'bins': '(y_bins[::-1], x_bins)'}), '(glm_y, glm_x, bins=(y_bins[::-1], x_bins))\n', (6790, 6833), True, 'import numpy as np\n'), ((7207, 7264), 'numpy.histogram2d', 'np.histogram2d', (['glm_y', 'glm_x'], {'bins': '(y_bins[::-1], x_bins)'}), '(glm_y, glm_x, bins=(y_bins[::-1], x_bins))\n', (7221, 7264), True, 'import numpy as np\n'), ((5719, 5731), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5727, 5731), True, 'import numpy as np\n'), ((5733, 5745), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5741, 5745), True, 'import numpy as np\n'), ((6373, 6385), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6381, 6385), True, 'import numpy as np\n'), ((6387, 6399), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6395, 6399), True, 'import numpy as np\n'), ((5538, 5550), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5546, 5550), True, 'import numpy as np\n'), ((5552, 5564), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5560, 5564), True, 'import numpy as np\n'), ((6192, 6204), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6200, 6204), True, 'import numpy as np\n'), ((6206, 6218), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6214, 6218), True, 'import numpy as np\n'), ((7946, 7966), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (7955, 7966), False, 'from datetime import timedelta\n'), ((8176, 8196), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (8185, 8196), False, 'from datetime import timedelta\n')] |
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'Data/Jellyfish.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
print('Predicted:', decode_predictions(preds, top=3)[0]) | [
"keras.preprocessing.image.img_to_array",
"keras.applications.resnet50.decode_predictions",
"keras.applications.resnet50.preprocess_input",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50",
"keras.preprocessing.image.load_img"
] | [((197, 225), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (205, 225), False, 'from keras.applications.resnet50 import ResNet50\n'), ((268, 316), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (282, 316), False, 'from keras.preprocessing import image\n'), ((322, 345), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (340, 345), False, 'from keras.preprocessing import image\n'), ((351, 376), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (365, 376), True, 'import numpy as np\n'), ((382, 401), 'keras.applications.resnet50.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (398, 401), False, 'from keras.applications.resnet50 import preprocess_input, decode_predictions\n'), ((531, 563), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['preds'], {'top': '(3)'}), '(preds, top=3)\n', (549, 563), False, 'from keras.applications.resnet50 import preprocess_input, decode_predictions\n')] |
import inspect
import numbers
import numpy as np
import scipy.odr as odr
import scipy.optimize as opt
from pycertainties import Val
def rows_to_list(string_data):
return [row.split() for row in string_data.strip().split("\n")]
def rows_to_numpy(string_data, sep=","):
if sep:
data = [row.split(sep) for row in string_data.strip().split("\n")]
else:
data = [row.split() for row in string_data.strip().split("\n")]
return np.array(data, np.float64)
def cols_to_numpy(string_data, sep=None):
return rows_to_numpy(string_data, sep).transpose()
def pprint_matrix(matrix, column_lables=None, *, title=None, sep="|", width=20, fwidth=10):
"""
A pretty printer for 2d arrays
matrix: The 2d array
column_labels: Labels for the columns while printing
"""
if len(matrix.shape) != 2:
raise ValueError
rows, cols = matrix.shape
if title:
spaces = width + (width + 1) * (cols - 1)
title_string = f"{{:^{spaces}}}"
print(title_string.format(title))
if column_lables:
string = sep.join([f"{{:^{width}}}"] * cols)
print(string.format(*column_lables))
for row in range(rows):
def fmt_str(val):
return (
f"{{:^{width}.{fwidth}g}}"
if isinstance(val, numbers.Real) and not isinstance(val, int)
else f"{{:^{width}}}"
)
strings = [fmt_str(val) for val in matrix[row]]
string = sep.join(strings)
print(string.format(*matrix[row]))
def _remove_zeros(xs, ys, dxs, dys):
def filtered(values):
if values is None:
return values
return [
val for ind, val in enumerate(values) if (dxs is None or dxs[ind] != 0) and (dys is None or dys[ind] != 0)
]
return filtered(xs), filtered(ys), filtered(dxs), filtered(dys)
def _fit_func(func, xs, ys, dxs=None, dys=None, guesses=None):
if dxs is None:
optimal, covarience = opt.curve_fit(func, xs, ys, sigma=dys, p0=guesses, maxfev=3000)
else:
xs, ys, dxs, dys = _remove_zeros(xs, ys, dxs, dys)
data = odr.RealData(xs, ys, dxs, dys)
new_func = lambda beta, x: func(x, *beta)
sig = inspect.signature(func)
options = len(sig.parameters) - 1
model = odr.Model(new_func)
odr_obj = odr.ODR(data, model, beta0=[1 for _ in range(options)] if guesses is None else guesses)
res = odr_obj.run()
optimal, covarience = res.beta, res.cov_beta
stddev = np.sqrt(np.diag(covarience))
return tuple(Val(value, uncertainty) for value, uncertainty in zip(optimal, stddev))
def fit_func(func, xs, ys, dxs=None, dys=None, limits=None, guesses=None):
if limits is not None:
trim = lambda values: values[limits] if values is not None else None
values = [trim(values) for values in (xs, ys, dxs, dys)]
else:
values = (xs, ys, dxs, dys)
return _fit_func(func, *values, guesses=guesses)
def r_squared(func, xs, ys, parameters):
residuals = ys - func(xs, *parameters)
residual_sum_of_squares = np.sum(residuals ** 2)
total_sum_of_squares = np.sum((ys - np.mean(ys)) ** 2)
return 1 - (residual_sum_of_squares / total_sum_of_squares)
| [
"scipy.optimize.curve_fit",
"numpy.mean",
"inspect.signature",
"numpy.diag",
"numpy.array",
"scipy.odr.RealData",
"numpy.sum",
"scipy.odr.Model",
"pycertainties.Val"
] | [((457, 483), 'numpy.array', 'np.array', (['data', 'np.float64'], {}), '(data, np.float64)\n', (465, 483), True, 'import numpy as np\n'), ((3122, 3144), 'numpy.sum', 'np.sum', (['(residuals ** 2)'], {}), '(residuals ** 2)\n', (3128, 3144), True, 'import numpy as np\n'), ((1997, 2060), 'scipy.optimize.curve_fit', 'opt.curve_fit', (['func', 'xs', 'ys'], {'sigma': 'dys', 'p0': 'guesses', 'maxfev': '(3000)'}), '(func, xs, ys, sigma=dys, p0=guesses, maxfev=3000)\n', (2010, 2060), True, 'import scipy.optimize as opt\n'), ((2145, 2175), 'scipy.odr.RealData', 'odr.RealData', (['xs', 'ys', 'dxs', 'dys'], {}), '(xs, ys, dxs, dys)\n', (2157, 2175), True, 'import scipy.odr as odr\n'), ((2240, 2263), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (2257, 2263), False, 'import inspect\n'), ((2322, 2341), 'scipy.odr.Model', 'odr.Model', (['new_func'], {}), '(new_func)\n', (2331, 2341), True, 'import scipy.odr as odr\n'), ((2551, 2570), 'numpy.diag', 'np.diag', (['covarience'], {}), '(covarience)\n', (2558, 2570), True, 'import numpy as np\n'), ((2589, 2612), 'pycertainties.Val', 'Val', (['value', 'uncertainty'], {}), '(value, uncertainty)\n', (2592, 2612), False, 'from pycertainties import Val\n'), ((3185, 3196), 'numpy.mean', 'np.mean', (['ys'], {}), '(ys)\n', (3192, 3196), True, 'import numpy as np\n')] |
#! /usr/bin/env python
'''
LICENSE
-------------------------------------------------------------------------------
Copyright (c) 2015 to 2018 <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
Defines the ppxf_data class and provides functions for creating ppxf_data
objects from fits files or ascii files
'''
import numpy as np
import astropy.coordinates
import onedspec
'''
Class for the inputs and outputs of run_ppxf
'''
class ppxf_data():
'''
Constructor
wavelengths : wavelengths of input spectrum
fluxes : flux of the input spectrum
sigmas : 1 sigma uncertainty on input flux
ident : object identifier
filename : file that is the source of the input spectrum
rv_prior : prior on the radial velocity
sigma_prior : prior on the velocity dispersion
h3_prior : prior on the h3 moment
h4_prior : prior on the h4 moment
'''
def __init__(self, wavelengths, fluxes, sigmas=None, ident='', filename='', rv_prior=0, sigma_prior=10, h3_prior=0, h4_prior=0):
self.ident = ident
self.filename = filename
self.rv_prior = rv_prior
self.sigma_prior = sigma_prior
self.h3_prior = h3_prior
self.h4_prior = h4_prior
self.input_wavelengths = np.asarray(wavelengths)
self.input_fluxes = np.asarray(fluxes)
if sigmas is None:
sigmas = np.ones(fluxes.size) * np.median(fluxes)**0.5
self.input_sigmas = np.asarray(sigmas)
def __str__(self):
return '<ppxf_data> ' + self.ident + ' ' + self.filename
'''
Create a ppxf_data object from a fits file or two
fluxes_file : the input fluxes
sigmas_file : the uncertainties (optional)
ident : the object identifier
filename : file that is the source of the input spectrum. defaults to the value of fluxes_file
ivars
'''
def create_from_fits(fluxes_file, sigmas_file=None, ident='', filename=None, ivars=True, varis=False, rv_prior=0, sigma_prior=10, h3_prior=0, h4_prior=0):
#try and get the uncertainties from the same file as the fluxes
try:
wavelengths, fluxes, sigmas = onedspec.load_with_errors(fluxes_file)
except IndexError:
wavelengths, fluxes = onedspec.load(fluxes_file)
if sigmas_file != None:
wavelengths, sigmas = onedspec.load(sigmas_file)
if ivars:
np.putmask(sigmas, sigmas <= 0, 1e-10)
sigmas = sigmas**-0.5
elif varis:
sigmas = sigmas**0.5
else:
sigmas = np.ones(fluxes.size) * np.median(fluxes)
#get the RA and Dec
try:
raw_ra, raw_dec = onedspec.listheader(fluxes_file, ['RA', 'DEC'])
coordinates = astropy.coordinates.SkyCoord(raw_ra, raw_dec, unit=(astropy.units.hourangle, astropy.units.deg))
ra = coordinates.ra.deg
dec = coordinates.dec.deg
except KeyError:
ra = None
dec = None
if filename == None:
filename = fluxes_file
datum = ppxf_data(wavelengths, fluxes, sigmas, ident=ident, filename=filename, rv_prior=rv_prior, sigma_prior=sigma_prior, h3_prior=h3_prior, h4_prior=h4_prior)
datum.ra = ra
datum.dec = dec
return datum
'''
Create a ppxf_data object from an ascii file
This assumes that the first column is the wavelength, the second is the fluxes
and the optional thrid column is the uncertainties
'''
def create_from_ascii(ascii_file, ident='', filename=None, ivars=False, varis=False, rv_prior=0, sigma_prior=10, h3_prior=0, h4_prior=0):
wavelengths = []
fluxes = []
sigmas = []
columns = 0
lines = open(ascii_file).readlines()
for line in lines:
line = line.strip()
if line == '' or line[0] == '#':
continue
substrs = line.split()
if not columns:
columns = len(substrs)
if columns < 2:
raise Exception('Too few columns')
elif columns != len(substrs):
raise Exception('Inconsistent number of columns')
wavelengths.append(float(substrs[0]))
fluxes.append(float(substrs[1]))
if columns >= 3:
sigmas.append(float(substrs[2]))
wavelengths = np.array(wavelengths)
fluxes = np.array(fluxes)
if columns >= 3:
sigmas = np.array(sigmas)
else:
sigmas = np.ones(fluxes.size)
if ivars:
np.putmask(sigmas, sigmas <= 0, 1e-10)
sigmas = sigmas**-0.5
elif varis:
sigmas = sigmas**0.5
if filename == None:
filename = ascii_file
datum = ppxf_data(wavelengths, fluxes, sigmas, ident=ident, filename=filename, rv_prior=rv_prior, sigma_prior=sigma_prior, h3_prior=h3_prior, h4_prior=h4_prior)
return datum
| [
"onedspec.load_with_errors",
"numpy.median",
"numpy.ones",
"onedspec.load",
"numpy.putmask",
"numpy.asarray",
"onedspec.listheader",
"numpy.array"
] | [((5650, 5671), 'numpy.array', 'np.array', (['wavelengths'], {}), '(wavelengths)\n', (5658, 5671), True, 'import numpy as np\n'), ((5685, 5701), 'numpy.array', 'np.array', (['fluxes'], {}), '(fluxes)\n', (5693, 5701), True, 'import numpy as np\n'), ((2581, 2604), 'numpy.asarray', 'np.asarray', (['wavelengths'], {}), '(wavelengths)\n', (2591, 2604), True, 'import numpy as np\n'), ((2633, 2651), 'numpy.asarray', 'np.asarray', (['fluxes'], {}), '(fluxes)\n', (2643, 2651), True, 'import numpy as np\n'), ((2774, 2792), 'numpy.asarray', 'np.asarray', (['sigmas'], {}), '(sigmas)\n', (2784, 2792), True, 'import numpy as np\n'), ((3450, 3488), 'onedspec.load_with_errors', 'onedspec.load_with_errors', (['fluxes_file'], {}), '(fluxes_file)\n', (3475, 3488), False, 'import onedspec\n'), ((4005, 4052), 'onedspec.listheader', 'onedspec.listheader', (['fluxes_file', "['RA', 'DEC']"], {}), "(fluxes_file, ['RA', 'DEC'])\n", (4024, 4052), False, 'import onedspec\n'), ((5740, 5756), 'numpy.array', 'np.array', (['sigmas'], {}), '(sigmas)\n', (5748, 5756), True, 'import numpy as np\n'), ((5784, 5804), 'numpy.ones', 'np.ones', (['fluxes.size'], {}), '(fluxes.size)\n', (5791, 5804), True, 'import numpy as np\n'), ((5828, 5866), 'numpy.putmask', 'np.putmask', (['sigmas', '(sigmas <= 0)', '(1e-10)'], {}), '(sigmas, sigmas <= 0, 1e-10)\n', (5838, 5866), True, 'import numpy as np\n'), ((3551, 3577), 'onedspec.load', 'onedspec.load', (['fluxes_file'], {}), '(fluxes_file)\n', (3564, 3577), False, 'import onedspec\n'), ((2700, 2720), 'numpy.ones', 'np.ones', (['fluxes.size'], {}), '(fluxes.size)\n', (2707, 2720), True, 'import numpy as np\n'), ((3649, 3675), 'onedspec.load', 'onedspec.load', (['sigmas_file'], {}), '(sigmas_file)\n', (3662, 3675), False, 'import onedspec\n'), ((2723, 2740), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (2732, 2740), True, 'import numpy as np\n'), ((3714, 3752), 'numpy.putmask', 'np.putmask', (['sigmas', '(sigmas <= 0)', '(1e-10)'], {}), '(sigmas, sigmas <= 0, 1e-10)\n', (3724, 3752), True, 'import numpy as np\n'), ((3892, 3912), 'numpy.ones', 'np.ones', (['fluxes.size'], {}), '(fluxes.size)\n', (3899, 3912), True, 'import numpy as np\n'), ((3915, 3932), 'numpy.median', 'np.median', (['fluxes'], {}), '(fluxes)\n', (3924, 3932), True, 'import numpy as np\n')] |
import sys
sys.path.append('..')
from mtevi.mtevi import *
from mtevi.utils import *
import numpy as np
import torch
import argparse
import os
import math
from BayesianDTI.utils import *
from torch.utils.data import Dataset, DataLoader
from BayesianDTI.datahelper import *
from BayesianDTI.model import *
from BayesianDTI.predictor import *
from scipy.stats import t
from BayesianDTI.utils import confidence_interval
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fold_num", type=int,
help="Fold number. It must be one of the {0,1,2,3,4}.")
parser.add_argument("-e", "--epochs", type=int, default=200,
help="Number of epochs.")
parser.add_argument("-o", "--output",
help="The output directory.")
parser.add_argument("--type", default='None',
help="Davis or Kiba; dataset select.")
parser.add_argument("--model",
help="The trained baseline model. If given, keep train the model.")
parser.add_argument("--abl", action='store_true',
help="Use the vanilla MSE")
parser.add_argument("--evi", action='store_true',
help="Use the vanilla evidential network")
parser.add_argument("--reg", type=float, default=0.0001,
help="Coefficient of evidential regularization")
parser.add_argument("--l2", type=float, default=0.0001,
help="Coefficient of L2 regularization")
parser.add_argument("--cuda", type=int, default=0, help="cuda device number")
args = parser.parse_args()
torch.cuda.set_device(args.cuda)
args.type = args.type.lower()
dir = args.output
print("Arguments: ########################")
print('\n'.join(f'{k}={v}' for k, v in vars(args).items()))
print("###################################")
try:
os.mkdir(args.output)
except FileExistsError:
print("The output directory {} is already exist.".format(args.output))
#######################################################################
### Load data
FOLD_NUM = int(args.fold_num) # {0,1,2,3,4}
class DataSetting:
def __init__(self):
self.dataset_path = 'data/{}/'.format(args.type)
self.problem_type = '1'
self.is_log = False if args.type == 'kiba' else True
data_setting = DataSetting()
dataset = DataSet(data_setting.dataset_path,
1000 if args.type == 'kiba' else 1200,
100 if args.type == 'kiba' else 85) ## KIBA (1000,100) DAVIS (1200, 85)
smiles, proteins, Y = dataset.parse_data(data_setting)
test_fold, train_folds = dataset.read_sets(data_setting)
label_row_inds, label_col_inds = np.where(np.isnan(Y)==False)
test_drug_indices = label_row_inds[test_fold]
test_protein_indices = label_col_inds[test_fold]
train_fold_sum = []
for i in range(5):
if i != FOLD_NUM:
train_fold_sum += train_folds[i]
train_drug_indices = label_row_inds[train_fold_sum]
train_protein_indices = label_col_inds[train_fold_sum]
valid_drug_indices = label_row_inds[train_folds[FOLD_NUM]]
valid_protein_indices = label_col_inds[train_folds[FOLD_NUM]]
dti_dataset = DTIDataset(smiles, proteins, Y, train_drug_indices, train_protein_indices)
valid_dti_dataset = DTIDataset(smiles, proteins, Y, valid_drug_indices, valid_protein_indices)
test_dti_dataset = DTIDataset(smiles, proteins, Y, test_drug_indices, test_protein_indices)
dataloader = DataLoader(dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
valid_dataloader = DataLoader(valid_dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
test_dataloader = DataLoader(test_dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
##########################################################################
### Define models
device = 'cuda:{}'.format(args.cuda)
dti_model = EvidentialDeepDTA(dropout=True).to(device)
objective_fn = EvidentialnetMarginalLikelihood().to(device)
objective_mse = torch.nn.MSELoss()
regularizer = EvidenceRegularizer(factor=args.reg).to(device)
opt = torch.optim.Adam(dti_model.parameters(), lr=0.001, weight_decay=args.l2)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=opt, lr_lambda=lambda epoch: 0.99 ** epoch,
last_epoch=-1,
verbose=False)
total_valid_loss = 0.
total_valid_nll = 0.
total_nll = 0.
train_nll_history = []
valid_loss_history = []
valid_nll_history = []
##########################################################################
### Training
a_history = []
best_nll = 10000
for epoch in range(args.epochs):
dti_model.train()
for d, p, y in dataloader:
y = y.unsqueeze(1).to(device)
gamma, nu, alpha, beta = dti_model(d.to(device), p.to(device))
opt.zero_grad()
###############################################################
#### NLL training
loss = objective_fn(gamma, nu, alpha, beta, y)
total_nll += loss.item()
loss += regularizer(gamma, nu, alpha, beta, y)
if not args.evi:
if args.abl:
mse = objective_mse(gamma, y)
else:
mse = modified_mse(gamma, nu, alpha, beta, y)
loss += mse.mean()
loss.backward()
###############################################################
opt.step()
scheduler.step()
dti_model.eval()
for d_v, p_v, y_v in valid_dataloader:
y_v = y_v.unsqueeze(1).to(device)
gamma, nu, alpha, beta = dti_model(d_v.to(device), p_v.to(device))
nll_v = objective_fn(gamma,
nu,
alpha,
beta,
y_v)
valid_nll_history.append(nll_v.item())
total_valid_nll += nll_v.item()
nll_v = objective_mse(gamma, y_v).mean()
valid_loss_history.append(nll_v.item())
total_valid_loss += nll_v.item()
train_nll = total_nll/len(dataloader)
valid_nll = total_valid_nll/len(valid_dataloader)
valid_loss = total_valid_loss/len(valid_dataloader)
if math.isnan(valid_nll):
break
if best_nll >= valid_nll:
torch.save(dti_model, dir + '/dti_model_best.model')
best_nll = valid_nll
print("Epoch {}: Train NLL [{:.5f}] Val MSE [{:.5f}] Val NLL [{:.5f}]".format(
epoch+1, train_nll, valid_loss, valid_nll))
total_nll = 0.
total_valid_loss = 0.
total_valid_nll = 0.
##########################################################################
fig = plt.figure(figsize=(15,5))
plt.plot(valid_loss_history, label="MSE")
plt.plot(valid_nll_history, label="NLL")
plt.title("Validate loss")
plt.xlabel("Validate steps")
plt.legend(facecolor='white', edgecolor='black')
plt.tight_layout()
plt.savefig(dir + "/MultitaskLoss.png")
##########################################################################
### Evaluation
import torch.distributions.studentT as studentT
predictor = EviNetDTIPredictor()
eval_model = torch.load(dir + '/dti_model_best.model').to(device)
mu_t, std_t, mu_Y_t, freedom_t = predictor(test_dataloader, eval_model)
total_t = std_t['total']
epistemic_t = std_t['epistemic']
aleatoric_t = std_t['aleatoric']
predictive_entropy = studentT.StudentT(torch.from_numpy(freedom_t), scale=torch.from_numpy(total_t)).entropy()
##########################################################################
from BayesianDTI.utils import plot_predictions
plot_predictions(mu_Y_t, mu_t, total_t, title="Mean prediction test with total uncertainty",
sample_num=freedom_t, savefig=dir + "/total_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, aleatoric_t, title="Mean prediction test with aleatoric uncertainty",
sample_num=None, savefig=dir + "/aleatoric_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, epistemic_t, title="Mean prediction test with epistemic uncertainty",
sample_num=None, savefig=dir + "/epistemic_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, predictive_entropy, title="Mean prediction test with predictive entropy",
sample_num=freedom_t, savefig=dir + "/total_uncertainty_colored.png", rep_conf='color')
##########################################################################
from BayesianDTI.utils import evaluate_model
import json
eval_results = evaluate_model(mu_Y_t, mu_t, total_t, sample_num=freedom_t)
eval_json = json.dumps(eval_results, indent=4)
print(eval_json)
with open(dir + '/eval_result_prior.json','w') as outfile:
json.dump(eval_results, outfile)
| [
"torch.optim.lr_scheduler.LambdaLR",
"torch.from_numpy",
"torch.nn.MSELoss",
"matplotlib.style.use",
"BayesianDTI.utils.plot_predictions",
"sys.path.append",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"json.dumps",
"os.mkdir",
"BayesianDTI.utils.evaluate_m... | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((467, 497), 'matplotlib.style.use', 'matplotlib.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (487, 497), False, 'import matplotlib\n'), ((508, 533), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (531, 533), False, 'import argparse\n'), ((1639, 1671), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.cuda'], {}), '(args.cuda)\n', (1660, 1671), False, 'import torch\n'), ((3453, 3539), 'torch.utils.data.DataLoader', 'DataLoader', (['dti_dataset'], {'batch_size': '(256)', 'shuffle': '(True)', 'collate_fn': 'collate_dataset'}), '(dti_dataset, batch_size=256, shuffle=True, collate_fn=\n collate_dataset)\n', (3463, 3539), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3573, 3665), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dti_dataset'], {'batch_size': '(256)', 'shuffle': '(True)', 'collate_fn': 'collate_dataset'}), '(valid_dti_dataset, batch_size=256, shuffle=True, collate_fn=\n collate_dataset)\n', (3583, 3665), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3698, 3789), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dti_dataset'], {'batch_size': '(256)', 'shuffle': '(True)', 'collate_fn': 'collate_dataset'}), '(test_dti_dataset, batch_size=256, shuffle=True, collate_fn=\n collate_dataset)\n', (3708, 3789), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4067, 4085), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4083, 4085), False, 'import torch\n'), ((4240, 4362), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', ([], {'optimizer': 'opt', 'lr_lambda': '(lambda epoch: 0.99 ** epoch)', 'last_epoch': '(-1)', 'verbose': '(False)'}), '(optimizer=opt, lr_lambda=lambda epoch: \n 0.99 ** epoch, last_epoch=-1, verbose=False)\n', (4273, 4362), False, 'import torch\n'), ((6678, 6705), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (6688, 6705), True, 'import matplotlib.pyplot as plt\n'), ((6705, 6746), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss_history'], {'label': '"""MSE"""'}), "(valid_loss_history, label='MSE')\n", (6713, 6746), True, 'import matplotlib.pyplot as plt\n'), ((6747, 6787), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_nll_history'], {'label': '"""NLL"""'}), "(valid_nll_history, label='NLL')\n", (6755, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6788, 6814), 'matplotlib.pyplot.title', 'plt.title', (['"""Validate loss"""'], {}), "('Validate loss')\n", (6797, 6814), True, 'import matplotlib.pyplot as plt\n'), ((6815, 6843), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Validate steps"""'], {}), "('Validate steps')\n", (6825, 6843), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6892), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'facecolor': '"""white"""', 'edgecolor': '"""black"""'}), "(facecolor='white', edgecolor='black')\n", (6854, 6892), True, 'import matplotlib.pyplot as plt\n'), ((6894, 6912), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6910, 6912), True, 'import matplotlib.pyplot as plt\n'), ((6914, 6953), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir + '/MultitaskLoss.png')"], {}), "(dir + '/MultitaskLoss.png')\n", (6925, 6953), True, 'import matplotlib.pyplot as plt\n'), ((7594, 7757), 'BayesianDTI.utils.plot_predictions', 'plot_predictions', (['mu_Y_t', 'mu_t', 'total_t'], {'title': '"""Mean prediction test with total uncertainty"""', 'sample_num': 'freedom_t', 'savefig': "(dir + '/total_uncertainty.png')"}), "(mu_Y_t, mu_t, total_t, title=\n 'Mean prediction test with total uncertainty', sample_num=freedom_t,\n savefig=dir + '/total_uncertainty.png')\n", (7610, 7757), False, 'from BayesianDTI.utils import plot_predictions\n'), ((7766, 7936), 'BayesianDTI.utils.plot_predictions', 'plot_predictions', (['mu_Y_t', 'mu_t', 'aleatoric_t'], {'title': '"""Mean prediction test with aleatoric uncertainty"""', 'sample_num': 'None', 'savefig': "(dir + '/aleatoric_uncertainty.png')"}), "(mu_Y_t, mu_t, aleatoric_t, title=\n 'Mean prediction test with aleatoric uncertainty', sample_num=None,\n savefig=dir + '/aleatoric_uncertainty.png')\n", (7782, 7936), False, 'from BayesianDTI.utils import plot_predictions\n'), ((7945, 8115), 'BayesianDTI.utils.plot_predictions', 'plot_predictions', (['mu_Y_t', 'mu_t', 'epistemic_t'], {'title': '"""Mean prediction test with epistemic uncertainty"""', 'sample_num': 'None', 'savefig': "(dir + '/epistemic_uncertainty.png')"}), "(mu_Y_t, mu_t, epistemic_t, title=\n 'Mean prediction test with epistemic uncertainty', sample_num=None,\n savefig=dir + '/epistemic_uncertainty.png')\n", (7961, 8115), False, 'from BayesianDTI.utils import plot_predictions\n'), ((8124, 8325), 'BayesianDTI.utils.plot_predictions', 'plot_predictions', (['mu_Y_t', 'mu_t', 'predictive_entropy'], {'title': '"""Mean prediction test with predictive entropy"""', 'sample_num': 'freedom_t', 'savefig': "(dir + '/total_uncertainty_colored.png')", 'rep_conf': '"""color"""'}), "(mu_Y_t, mu_t, predictive_entropy, title=\n 'Mean prediction test with predictive entropy', sample_num=freedom_t,\n savefig=dir + '/total_uncertainty_colored.png', rep_conf='color')\n", (8140, 8325), False, 'from BayesianDTI.utils import plot_predictions\n'), ((8482, 8541), 'BayesianDTI.utils.evaluate_model', 'evaluate_model', (['mu_Y_t', 'mu_t', 'total_t'], {'sample_num': 'freedom_t'}), '(mu_Y_t, mu_t, total_t, sample_num=freedom_t)\n', (8496, 8541), False, 'from BayesianDTI.utils import evaluate_model\n'), ((8554, 8588), 'json.dumps', 'json.dumps', (['eval_results'], {'indent': '(4)'}), '(eval_results, indent=4)\n', (8564, 8588), False, 'import json\n'), ((1880, 1901), 'os.mkdir', 'os.mkdir', (['args.output'], {}), '(args.output)\n', (1888, 1901), False, 'import os\n'), ((6235, 6256), 'math.isnan', 'math.isnan', (['valid_nll'], {}), '(valid_nll)\n', (6245, 6256), False, 'import math\n'), ((8669, 8701), 'json.dump', 'json.dump', (['eval_results', 'outfile'], {}), '(eval_results, outfile)\n', (8678, 8701), False, 'import json\n'), ((2714, 2725), 'numpy.isnan', 'np.isnan', (['Y'], {}), '(Y)\n', (2722, 2725), True, 'import numpy as np\n'), ((6310, 6362), 'torch.save', 'torch.save', (['dti_model', "(dir + '/dti_model_best.model')"], {}), "(dti_model, dir + '/dti_model_best.model')\n", (6320, 6362), False, 'import torch\n'), ((7138, 7179), 'torch.load', 'torch.load', (["(dir + '/dti_model_best.model')"], {}), "(dir + '/dti_model_best.model')\n", (7148, 7179), False, 'import torch\n'), ((7399, 7426), 'torch.from_numpy', 'torch.from_numpy', (['freedom_t'], {}), '(freedom_t)\n', (7415, 7426), False, 'import torch\n'), ((7434, 7459), 'torch.from_numpy', 'torch.from_numpy', (['total_t'], {}), '(total_t)\n', (7450, 7459), False, 'import torch\n')] |
import numpy as np
from PIL import Image
from habitat_sim.utils.common import d3_40_colors_rgb
from habitat.config import Config
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union
from habitat.core.dataset import Dataset, Episode, EpisodeIterator
import os
import magnum as mn
import habitat
import random
import matplotlib.pyplot as plt
class SpringEnv(habitat.Env):
def __init__(
self, config: Config, dataset: Optional[Dataset] = None, objects = []
) -> None:
super(SpringEnv, self).__init__(config, dataset)
self._sim.sim_config.sim_cfg.enable_physics = True
for object in objects:
self.add_object(os.path.join("objects", object[0]), object[1])
def add_object(self, object_path, position):
obj_template_manager = self._sim.get_object_template_manager()
print(os.getcwd())
template_id = obj_template_manager.load_object_configs(
str(os.path.join("data", object_path))
)[0]
# object_template = obj_template_manager.get_object_template(template_id)
#template_id.set_scale(np.array([0.005, 0.005, 0.005]))
id_1 = self._sim.add_object(template_id)
# rotation= mn.Quaternion.rotation(
# mn.Rad(1.47), np.array([-1.0, 0, 0])
# )
# self._sim.set_rotation(rotation, id_1)
self._sim.set_translation(np.array(position), id_1)
self._sim.set_object_semantic_id(id_1 + 1, id_1)
def reset(self):
observations = super(SpringEnv, self).reset()
observations["agent_state"] = self._sim.get_agent_state(0)
return observations
def step(self, action):
observations = super(SpringEnv, self).step(action)
observations["agent_state"] = self._sim.get_agent_state(0)
return observations
def set_test_scene(self):
self.add_object("objects/curtain", [1.55, 1.5, 0.2])
self.add_object("objects/stand", [0.6, 0, -0.8])
self.add_object("objects/stand", [-0.6, 0, -0.8])
self.add_object("objects/stand", [-1.8, 0, -0.8])
self.add_object("objects/stand", [-4.5, 0, -0.8])
self.add_object("objects/stand", [0.6, 0, 0.8])
self.add_object("objects/stand", [-0.6, 0, 0.8])
self.add_object("objects/stand", [-1.8, 0, 0.8])
self.add_object("objects/stand", [-3.0, 0, 0.8])
self.add_object("objects/stand", [-4.5, 0, 0.8])
self.add_object("objects/stand", [-5.0, 0, 0])
| [
"numpy.array",
"os.path.join",
"os.getcwd"
] | [((864, 875), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (873, 875), False, 'import os\n'), ((1390, 1408), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (1398, 1408), True, 'import numpy as np\n'), ((682, 716), 'os.path.join', 'os.path.join', (['"""objects"""', 'object[0]'], {}), "('objects', object[0])\n", (694, 716), False, 'import os\n'), ((957, 990), 'os.path.join', 'os.path.join', (['"""data"""', 'object_path'], {}), "('data', object_path)\n", (969, 990), False, 'import os\n')] |
import logging
from typing import Union
from UQpy.inference.information_criteria import AIC
from UQpy.inference.information_criteria.baseclass.InformationCriterion import InformationCriterion
from beartype import beartype
from UQpy.inference.MLE import MLE
import numpy as np
class InformationModelSelection:
# Authors: <NAME>, <NAME>
# Last Modified: 12/19 by <NAME>
@beartype
def __init__(
self,
parameter_estimators: list[MLE],
criterion: InformationCriterion = AIC(),
n_optimizations: list[int] = None,
initial_parameters: list[np.ndarray] = None
):
"""
Perform model selection using information theoretic criteria.
Supported criteria are :class:`.BIC`, :class:`.AIC` (default), :class:`.AICc`. This class leverages the
:class:`.MLE` class for maximum likelihood estimation, thus inputs to :class:`.MLE` can also be provided to
:class:`InformationModelSelection`, as lists of length equal to the number of models.
:param parameter_estimators: A list containing a maximum-likelihood estimator (:class:`.MLE`) for each one of the
models to be compared.
:param criterion: Criterion to be used (:class:`.AIC`, :class:`.BIC`, :class:`.AICc)`. Default is :class:`.AIC`
:param initial_parameters: Initial guess(es) for optimization, :class:`numpy.ndarray` of shape
:code:`(nstarts, n_parameters)` or :code:`(n_parameters, )`, where :code:`nstarts` is the number of times the
optimizer will be called. Alternatively, the user can provide input `n_optimizations` to randomly sample
initial guess(es). The identified MLE is the one that yields the maximum log likelihood over all calls of the
optimizer.
"""
self.candidate_models = [mle.inference_model for mle in parameter_estimators]
self.models_number = len(parameter_estimators)
self.criterion: InformationCriterion = criterion
self.logger = logging.getLogger(__name__)
self.n_optimizations = n_optimizations
self.initial_parameters= initial_parameters
self.parameter_estimators: list = parameter_estimators
""":class:`.MLE` results for each model (contains e.g. fitted parameters)"""
# Initialize the outputs
self.criterion_values: list = [None, ] * self.models_number
"""Value of the criterion for all models."""
self.penalty_terms: list = [None, ] * self.models_number
"""Value of the penalty term for all models. Data fit term is then criterion_value - penalty_term."""
self.probabilities: list = [None, ] * self.models_number
"""Value of the model probabilities, computed as
.. math:: P(M_i|d) = \dfrac{\exp(-\Delta_i/2)}{\sum_i \exp(-\Delta_i/2)}
where :math:`\Delta_i = criterion_i - min_i(criterion)`"""
# Run the model selection procedure
if (self.n_optimizations is not None) or (self.initial_parameters is not None):
self.run(self.n_optimizations, self.initial_parameters)
def run(self, n_optimizations: list[int], initial_parameters: list[np.ndarray]=None):
"""
Run the model selection procedure, i.e. compute criterion value for all models.
This function calls the :meth:`run` method of the :class:`.MLE` object for each model to compute the maximum
log-likelihood, then computes the criterion value and probability for each model. If `data` are given when
creating the :class:`.MLE` object, this method is called automatically when the object is created.
:param n_optimizations: Number of iterations that the optimization is run, starting at random initial
guesses. It is only used if `initial_parameters` is not provided. Default is :math:`1`.
The random initial guesses are sampled uniformly between :math:`0` and :math:`1`, or uniformly between
user-defined bounds if an input bounds is provided as a keyword argument to the `optimizer` input parameter.
:param initial_parameters: Initial guess(es) for optimization, :class:`numpy.ndarray` of shape
:code:`(nstarts, n_parameters)` or :code:`(n_parameters, )`, where :code:`nstarts` is the number of times the
optimizer will be called. Alternatively, the user can provide input `n_optimizations` to randomly sample
initial guess(es). The identified MLE is the one that yields the maximum log likelihood over all calls of the
optimizer.
"""
if (n_optimizations is not None and (len(n_optimizations) != len(self.parameter_estimators))) or \
(initial_parameters is not None and len(initial_parameters) != len(self.parameter_estimators)):
raise ValueError("The length of n_optimizations and initial_parameters should be equal to the number of "
"parameter estimators")
# Loop over all the models
for i, parameter_estimator in enumerate(self.parameter_estimators):
# First evaluate ML estimate for all models, do several iterations if demanded
parameters = None
if initial_parameters is not None:
parameters = initial_parameters[i]
optimizations = 0
if n_optimizations is not None:
optimizations = n_optimizations[i]
parameter_estimator.run(n_optimizations=optimizations, initial_parameters=parameters)
# Then minimize the criterion
self.criterion_values[i], self.penalty_terms[i] = \
self.criterion.minimize_criterion(data=parameter_estimator.data,
parameter_estimator=parameter_estimator,
return_penalty=True)
# Compute probabilities from criterion values
self.probabilities = self._compute_probabilities(self.criterion_values)
def sort_models(self):
"""
Sort models in descending order of model probability (increasing order of `criterion` value).
This function sorts - in place - the attribute lists :py:attr:`.candidate_models`, :py:attr:`.ml_estimators`,
:py:attr:`criterion_values`, :py:attr:`penalty_terms` and :py:attr:`probabilities` so that they are sorted from
most probable to least probable model. It is a stand-alone function that is provided to help the user to easily
visualize which model is the best.
No inputs/outputs.
"""
sort_idx = list(np.argsort(np.array(self.criterion_values)))
self.candidate_models = [self.candidate_models[i] for i in sort_idx]
self.parameter_estimators = [self.parameter_estimators[i] for i in sort_idx]
self.criterion_values = [self.criterion_values[i] for i in sort_idx]
self.penalty_terms = [self.penalty_terms[i] for i in sort_idx]
self.probabilities = [self.probabilities[i] for i in sort_idx]
@staticmethod
def _compute_probabilities(criterion_values):
delta = np.array(criterion_values) - min(criterion_values)
prob = np.exp(-delta / 2)
return prob / np.sum(prob)
| [
"logging.getLogger",
"UQpy.inference.information_criteria.AIC",
"numpy.exp",
"numpy.array",
"numpy.sum"
] | [((521, 526), 'UQpy.inference.information_criteria.AIC', 'AIC', ([], {}), '()\n', (524, 526), False, 'from UQpy.inference.information_criteria import AIC\n'), ((2025, 2052), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2042, 2052), False, 'import logging\n'), ((7188, 7206), 'numpy.exp', 'np.exp', (['(-delta / 2)'], {}), '(-delta / 2)\n', (7194, 7206), True, 'import numpy as np\n'), ((7122, 7148), 'numpy.array', 'np.array', (['criterion_values'], {}), '(criterion_values)\n', (7130, 7148), True, 'import numpy as np\n'), ((7229, 7241), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (7235, 7241), True, 'import numpy as np\n'), ((6621, 6652), 'numpy.array', 'np.array', (['self.criterion_values'], {}), '(self.criterion_values)\n', (6629, 6652), True, 'import numpy as np\n')] |
"""
False Nearest Neighbors (FNN) for dimension (n).
==================================================
"""
def ts_recon(ts, dim, tau):
xlen = len(ts)-(dim-1)*tau
a= np.linspace(0,xlen-1,xlen)
a= np.reshape(a,(xlen,1))
delayVec=np.linspace(0,(dim-1),dim)*tau
delayVec= np.reshape(delayVec,(1,dim))
delayMat=np.tile(delayVec,(xlen,1))
vec=np.tile(a,(1,dim))
indRecon = np.reshape(vec,(xlen,dim)) + delayMat
indRecon = indRecon.astype(np.int64)
tsrecon = ts[indRecon]
tsrecon = tsrecon[:,:,0]
return tsrecon
def FNN_n(ts, tau, maxDim = 10, plotting = False, Rtol=15, Atol=2, threshold = 10):
"""This function implements the False Nearest Neighbors (FNN) algorithm described by Kennel et al.
to select the minimum embedding dimension.
Args:
ts (array): Time series (1d).
tau (int): Embedding delay.
Kwargs:
maxDim (int): maximum dimension in dimension search. Default is 10.
plotting (bool): Plotting for user interpretation. Defaut is False.
Rtol (float): Ratio tolerance. Defaut is 15.
Atol (float): A tolerance. Defaut is 2.
threshold (float): Tolerance threshold for percent of nearest neighbors. Defaut is 10.
Returns:
(int): n, The embedding dimension.
"""
import numpy as np
from scipy.spatial import KDTree
if len(ts)-(maxDim-1)*tau < 20:
maxDim=len(ts)-(maxDim-1)*tau-1
ts = np.reshape(ts, (len(ts),1)) #ts is a column vector
st_dev=np.std(ts) #standart deviation of the time series
Xfnn=[]
dim_array = []
flag = False
i = 0
while flag == False:
i = i+1
dim=i
tsrecon = ts_recon(ts, dim, tau)#delay reconstruction
tree=KDTree(tsrecon)
D,IDX=tree.query(tsrecon,k=2)
#Calculate the false nearest neighbor ratio for each dimension
if i>1:
D_mp1=np.sqrt(np.sum((np.square(tsrecon[ind_m,:]-tsrecon[ind,:])),axis=1))
#Criteria 1 : increase in distance between neighbors is large
num1 = np.heaviside(np.divide(abs(tsrecon[ind_m,-1]-tsrecon[ind,-1]),Dm)-Rtol,0.5)
#Criteria 2 : nearest neighbor not necessarily close to y(n)
num2= np.heaviside(Atol-D_mp1/st_dev,0.5)
num=sum(np.multiply(num1,num2))
den=sum(num2)
Xfnn.append((num/den)*100)
dim_array.append(dim-1)
if (num/den)*100 <= 10 or i == maxDim:
flag = True
# Save the index to D and k(n) in dimension m for comparison with the
# same distance in m+1 dimension
xlen2=len(ts)-dim*tau
Dm=D[0:xlen2,-1]
ind_m=IDX[0:xlen2,-1]
ind=ind_m<=xlen2-1
ind_m=ind_m[ind]
Dm=Dm[ind]
Xfnn = np.array(Xfnn)
if plotting == True:
import matplotlib.pyplot as plt
TextSize = 14
plt.figure(1)
plt.plot(dim_array, Xfnn)
plt.xlabel(r'Dimension $n$', size = TextSize)
plt.ylabel('Percent FNN', size = TextSize)
plt.xticks(size = TextSize)
plt.yticks(size = TextSize)
plt.ylim(0)
plt.savefig('C:\\Users\\myersau3.EGR\\Desktop\\python_png\\FNN_fig.png', bbox_inches='tight',dpi = 400)
plt.show()
return Xfnn, dim-1
# In[ ]:
if __name__ == '__main__':
import numpy as np
fs = 10
t = np.linspace(0, 100, fs*100)
ts = np.sin(t)
tau=15 #embedding delay
perc_FNN, n =FNN_n(ts, tau, plotting = True)
print('FNN embedding Dimension: ',n)
| [
"matplotlib.pyplot.ylabel",
"scipy.spatial.KDTree",
"numpy.array",
"numpy.sin",
"numpy.multiply",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.heaviside",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"numpy.tile",
"matplotlib.pypl... | [((177, 207), 'numpy.linspace', 'np.linspace', (['(0)', '(xlen - 1)', 'xlen'], {}), '(0, xlen - 1, xlen)\n', (188, 207), True, 'import numpy as np\n'), ((211, 235), 'numpy.reshape', 'np.reshape', (['a', '(xlen, 1)'], {}), '(a, (xlen, 1))\n', (221, 235), True, 'import numpy as np\n'), ((292, 322), 'numpy.reshape', 'np.reshape', (['delayVec', '(1, dim)'], {}), '(delayVec, (1, dim))\n', (302, 322), True, 'import numpy as np\n'), ((334, 362), 'numpy.tile', 'np.tile', (['delayVec', '(xlen, 1)'], {}), '(delayVec, (xlen, 1))\n', (341, 362), True, 'import numpy as np\n'), ((369, 389), 'numpy.tile', 'np.tile', (['a', '(1, dim)'], {}), '(a, (1, dim))\n', (376, 389), True, 'import numpy as np\n'), ((1570, 1580), 'numpy.std', 'np.std', (['ts'], {}), '(ts)\n', (1576, 1580), True, 'import numpy as np\n'), ((2891, 2905), 'numpy.array', 'np.array', (['Xfnn'], {}), '(Xfnn)\n', (2899, 2905), True, 'import numpy as np\n'), ((3509, 3538), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(fs * 100)'], {}), '(0, 100, fs * 100)\n', (3520, 3538), True, 'import numpy as np\n'), ((3547, 3556), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3553, 3556), True, 'import numpy as np\n'), ((247, 275), 'numpy.linspace', 'np.linspace', (['(0)', '(dim - 1)', 'dim'], {}), '(0, dim - 1, dim)\n', (258, 275), True, 'import numpy as np\n'), ((403, 431), 'numpy.reshape', 'np.reshape', (['vec', '(xlen, dim)'], {}), '(vec, (xlen, dim))\n', (413, 431), True, 'import numpy as np\n'), ((1828, 1843), 'scipy.spatial.KDTree', 'KDTree', (['tsrecon'], {}), '(tsrecon)\n', (1834, 1843), False, 'from scipy.spatial import KDTree\n'), ((3010, 3023), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3020, 3023), True, 'import matplotlib.pyplot as plt\n'), ((3033, 3058), 'matplotlib.pyplot.plot', 'plt.plot', (['dim_array', 'Xfnn'], {}), '(dim_array, Xfnn)\n', (3041, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dimension $n$"""'], {'size': 'TextSize'}), "('Dimension $n$', size=TextSize)\n", (3077, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3161), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent FNN"""'], {'size': 'TextSize'}), "('Percent FNN', size=TextSize)\n", (3131, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3197), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': 'TextSize'}), '(size=TextSize)\n', (3182, 3197), True, 'import matplotlib.pyplot as plt\n'), ((3208, 3233), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': 'TextSize'}), '(size=TextSize)\n', (3218, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3255), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (3252, 3255), True, 'import matplotlib.pyplot as plt\n'), ((3264, 3370), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""C:\\\\Users\\\\myersau3.EGR\\\\Desktop\\\\python_png\\\\FNN_fig.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('C:\\\\Users\\\\myersau3.EGR\\\\Desktop\\\\python_png\\\\FNN_fig.png',\n bbox_inches='tight', dpi=400)\n", (3275, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3384, 3386), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2365), 'numpy.heaviside', 'np.heaviside', (['(Atol - D_mp1 / st_dev)', '(0.5)'], {}), '(Atol - D_mp1 / st_dev, 0.5)\n', (2337, 2365), True, 'import numpy as np\n'), ((2381, 2404), 'numpy.multiply', 'np.multiply', (['num1', 'num2'], {}), '(num1, num2)\n', (2392, 2404), True, 'import numpy as np\n'), ((2012, 2058), 'numpy.square', 'np.square', (['(tsrecon[ind_m, :] - tsrecon[ind, :])'], {}), '(tsrecon[ind_m, :] - tsrecon[ind, :])\n', (2021, 2058), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import concurrent.futures
# import glob
import itertools
import logging
import os
import re
import sys
import math
import matplotlib
import numpy as np
# import numpy.ma as ma
import pandas as pd
from scipy import stats
matplotlib.use("agg")
import matplotlib.pyplot as plt
def ks_test(a, b):
"""
Accepts two distributions a and b
Returns: tuple (KS Distance and p-value)
# Significant p-value means a and b belong to different distributions
"""
a_prime = remove_nan(a.flatten())
b_prime = remove_nan(b.flatten())
if len(a_prime) == 0 or len(b_prime) == 0:
return [np.nan] * 2
return stats.ks_2samp(a_prime, b_prime)
def cos_sim(a, b):
"""calculate cosine distance between reference and given."""
a_prime = a.flatten()
b_prime = b.flatten()
if len(a_prime) == len(b_prime):
return np.dot(a_prime, b_prime) / (
np.linalg.norm(a_prime) * np.linalg.norm(b_prime)
)
else:
raise Exception("LengthError: Cos_Sim requires arrays to be of the same size.")
def positional_drift(base_array, query_array, cutoff=0.002):
"""
Description:
Identify positions that have a low deviation from the center in the base and
compare those positions in the query. Return the number of positions that drifted
in the query
Accept: (numpy array1, numpy array2, float)
base diff array for the base
query diff array for the query
cutoff deviation cutoff from center
Returns: (int1, int2, int3)
int1: number of positions within cutoff in base
int2: number of positions that can be compared between base and query
int3: number of positions (out of int1) outside cutoff in query
Derivation with KC:
mouse1 = np.load("Bacteroides-uniformis-ATCC-8492.W4M1_vs_W8M1.prob_diff.npy")
nan_pos_mouse1 = ~np.isnan(mouse1)
mouse1_no_nans = mouse1[nan_pos_mouse1]
mouse1_no_nans[abs(mouse1_no_nans) > 0.002].shape
mouse7 = np.load("Bacteroides-uniformis-ATCC-8492.W4M7_vs_W8M7.prob_diff.npy")
mouse7_no_nans = mouse7[nan_pos_mouse1]
filter_pos_mouse7 = np.array(~np.isnan(mouse7_no_nans) & (abs(mouse1_no_nans) < 0.002))
mouse7_filter = mouse7_no_nans[filter_pos_mouse7]
mouse7_filter[abs(mouse7_filter) > 0.002].shape
"""
assert base_array.shape == query_array.shape, "array size mismatch!"
base = base_array.flatten()
query = query_array.flatten()
# Get index of positions that have non-NaN values in Base
base_non_nan_pos_idx = ~np.isnan(base)
# Filter out NaN positions
base_no_nans = base[base_non_nan_pos_idx]
# base_len = len(base_no_nans[abs(base_no_nans) > cutoff])
base_conserved_len = len(base_no_nans[abs(base_no_nans) < cutoff])
# if no positions are found inside the cutoff, there is nothing to compare
if base_conserved_len == 0:
query_comparable_pos = np.nan
query_drift = np.nan
else:
# Select positions in Query where Base has non-NAN values
query_no_nans = query[base_non_nan_pos_idx]
# Number of positions that can be compared (w/o NaNs)
query_comparable_pos = len(query_no_nans[~np.isnan(query_no_nans)])
# Select positions that are not NaNs in Query
# AND
# absolute value of Base is less than cutoff
query_filtered = query_no_nans[
(~np.isnan(query_no_nans)) & (abs(base_no_nans) < cutoff)
]
# Calc the number of common positions that have changed significantly in Query
query_drift = len(query_filtered[abs(query_filtered) > cutoff])
return (base_conserved_len, query_comparable_pos, query_drift)
def remove_nan(prob_array):
return prob_array[~np.isnan(prob_array)]
def summary_stats(d, cutoff=0.002):
"""
Accept: an n-dim numpy array
Do: flatten array and remove np.nan values
Return: dict of summary stats of array with keys:
min,max,mean,median,q10,q90,mad,iqr
"""
d_prime = remove_nan(d.flatten())
d_prime_len = len(d_prime)
if d_prime_len > 0:
d_stats = {
"Total_Length": len(d.flatten()),
"Non_NA_Length": d_prime_len,
"Non_Zero_Length": len(d_prime[d_prime != 0]),
"Extreme_Positions": len(d_prime[abs(d_prime) == 1]),
"Nucl_Within_Threshold": len(d_prime[abs(d_prime) < cutoff]),
"Min": np.nanmin(d_prime),
"Max": np.nanmax(d_prime),
"Mean": np.nanmean(d_prime),
"Std_Dev": np.nanstd(d_prime),
"Variance": np.nanvar(d_prime),
"Q10": np.nanquantile(d_prime, 0.1),
"Median": np.nanmedian(d_prime),
"Q90": np.nanquantile(d_prime, 0.9),
"MAD": stats.median_absolute_deviation(d_prime, nan_policy="omit"),
"IQR": stats.iqr(d_prime, nan_policy="omit"),
"Skew": stats.skew(d_prime, nan_policy="omit"),
"Kurtosis": stats.kurtosis(d_prime, nan_policy="omit"),
}
else:
logging.info(
"Not enough data points for reliable summary statistics. Adding placeholders ..."
)
nan_value = np.nan
d_stats = {
"Total_Length": len(d.flatten()),
"Non_NA_Length": d_prime_len,
"Non_Zero_Length": nan_value,
"Extreme_Positions": nan_value,
"Nucl_Within_Threshold": nan_value,
"Min": nan_value,
"Max": nan_value,
"Mean": nan_value,
"Std_Dev": nan_value,
"Variance": nan_value,
"Q10": nan_value,
"Median": nan_value,
"Q90": nan_value,
"MAD": nan_value,
"IQR": nan_value,
"Skew": nan_value,
"Kurtosis": nan_value,
}
summ = "; ".join([f"{k}:{d_stats[k]}" for k in sorted(d_stats.keys())])
logging.info(f"Depth summary: {summ}")
return d_stats
def summary_stats_select(d, cutoff=0.002):
"""
Accept: an n-dim numpy array
Do: flatten array and remove np.nan values
Return: dict of summary stats of array with keys:
min,max,mean,median,q10,q90,mad,iqr
"""
d_prime = remove_nan(d.flatten())
d_prime_len = len(d_prime)
if d_prime_len > 0:
d_stats = {
"Total_Length": len(d.flatten()),
"Non_NA_Length": d_prime_len,
"Non_Zero_Length": len(d_prime[d_prime != 0]),
"Extreme_Positions": len(d_prime[abs(d_prime) == 1]),
"Nucl_Within_Threshold": len(d_prime[abs(d_prime) < cutoff]),
"Skew": stats.skew(d_prime, nan_policy="omit"),
"Kurtosis": stats.kurtosis(d_prime, nan_policy="omit"),
}
else:
logging.info(
"Not enough data points for reliable summary statistics. Adding placeholders ..."
)
nan_value = np.nan
d_stats = {
"Total_Length": len(d.flatten()),
"Non_NA_Length": d_prime_len,
"Non_Zero_Length": nan_value,
"Extreme_Positions": nan_value,
"Nucl_Within_Threshold": nan_value,
"Skew": nan_value,
"Kurtosis": nan_value,
}
summ = "; ".join([f"{k}:{d_stats[k]}" for k in sorted(d_stats.keys())])
logging.info(f"Depth summary: {summ}")
return d_stats
def compare_arrays_to_base(
np_base_complete, query_array_path, selected_pos=None, cutoff=None
):
query_dict = {}
name_split = os.path.basename(query_array_path).split(".")
query_name = name_split[1]
np_query_complete = np.load(query_array_path)
logging.info(f" ... {query_name}")
if selected_pos is None:
np_base = np_base_complete
np_query = np_query_complete
else:
np_base = np_base_complete[selected_pos]
np_query = np_query_complete[selected_pos]
ks_dist, ks_p_val = ks_test(np_base, np_query)
cosine_sim = cos_sim(np_base, np_query)
base_conserved_len, query_comparable_pos, query_drift = positional_drift(
np_base, np_query, cutoff=cutoff
)
stats = summary_stats(np_query, cutoff=cutoff)
query_dict = {
"query_name": query_name,
"ks_dist": ks_dist,
"ks_p_val": ks_p_val,
"base_conserved_pos": base_conserved_len,
"query_comparable_pos": query_comparable_pos,
"query_conserved_pos_drift": query_drift,
"cosine_similarity": cosine_sim,
}
query_dict.update(stats)
return query_dict
def compare_all(
query_array_paths, base_array_paths, selected_pos=None, cores=None, cutoff=None
):
df_dict_array = list()
num_profiles = len(query_array_paths)
logging.info(f"{num_profiles} profiles found.")
# processed = dict()
for outer_idx, base_array in enumerate(base_array_paths):
# logging.info(f"[{outer_idx+1}/{num_profiles}] Starting with {base_array}")
name_split = os.path.basename(base_array).split(".")
org_name = name_split[0]
base_name = name_split[1]
np_base = np.load(base_array)
# query_dict = {}
logging.info(f"[{outer_idx+1}/{num_profiles}] Comparing {base_name} with ...")
# Parallelize comparisons to base
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
future = [
executor.submit(
compare_arrays_to_base, np_base, query_path, selected_pos, cutoff
)
for query_path in query_array_paths
]
for f in concurrent.futures.as_completed(future):
result = f.result()
# logging.info(
# f"Saving results for {base_name} vs {result['query_name']}"
# )
result.update({"base_name": base_name, "org_name": org_name})
df_dict_array.append(result)
return df_dict_array
def plot_diff_hist(ax, diff_vector, title):
logging.info(f"Generating histogram for {title} ...")
ax.set_yscale("log")
ax.set_title(title)
ax.hist(diff_vector.flatten(), bins=np.arange(-1, 1.01, 0.01))
ax.set_xlim([-1.15, 1.15])
ax.set_rasterized(True)
def visualizing_differences(diff_vector, selected_pos, axs):
title = os.path.basename(diff_vector).split(".")[1]
loaded_diff = np.load(diff_vector)
base_name = title.split("_vs_")[0]
query_name = title.split("_vs_")[1]
masked_diff = loaded_diff[selected_pos]
plot_diff_hist(axs, masked_diff, title)
stat_dict = summary_stats_select(masked_diff)
stat_dict.update({"Base": base_name, "Query": query_name, "Sample": title})
return stat_dict
def within_limits(array, cutoff):
"""
returns a boolean array as int(0 or 1)
"""
logging.info(f"Loading {array} at {cutoff}")
loaded = np.load(array)
with np.errstate(invalid="ignore"):
# When comparing, nan always returns False.
# return values as 0/1
return (abs(loaded) < cutoff).astype(int)
def get_diff_stdev(array_paths, selected_idx):
# Add absolute values of differences
cumulative_diff = np.array(
[subset_on_indices(abs(np.load(array)), selected_idx) for array in array_paths]
)
cum_diff = np.nansum(cumulative_diff, axis=0)
return np.nanstd(cum_diff)
def select_conserved_positions(arrays, method="majority", cutoff=0.002):
"""
Description:
Given a list of diff prob arrays and a absolute cutoff value,
return a vector based on the method containing the:
- intersection of all positions within the cutoff; or
- union of all positions within the cutoff; or
- most prevelant (3/5) all positions within the cutoff
Return: np.ndarray (boolean)
"""
num_arrays = len(arrays)
logging.info(
f"Received {num_arrays} arrays, with method {method} and cutoff {cutoff}"
)
## for each array:
## get an int boolean (0/1) of indices within abs(cutoff)
## sum each base value at each position across all arrays
agg_array = np.sum([within_limits(array, cutoff) for array in arrays], axis=0)
# Make sure I have the axis right.
# If any position's total is greater than the number of arrays,
# Means I'm adding along the wrong axis
assert np.sum((agg_array > num_arrays).astype(int)) == 0, "Wrong axis!"
selected_idx = []
if method == "majority":
if num_arrays % 2 == 1:
majority_at = (num_arrays - 1) / 2
else:
majority_at = (num_arrays) / 2
# majority rule
## if value > majority --> majority of arrays have this base at this position within the cutoff.
selected_idx = (agg_array > majority_at).astype(bool)
elif method == "intersection":
# intersection index
## if value == num_arrays --> all arrays have this base at this position within the cutoff.
selected_idx = (agg_array == num_arrays).astype(bool)
elif method == "union":
# union index
## if value > 0 --> at least one array has this base at this position within the cutoff.
selected_idx = (agg_array > 0).astype(bool)
selected_idx_num = np.sum(selected_idx)
logging.info(
f"Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}"
)
return selected_idx
def get_conserved_positions(arr_paths, method, cutoff=0.002, cores=None):
"""take a list of N arrays
find conserved positions based on all combinations of N-1 arrays
assign scores to each conserved position based on it's occurance in each combination
Args:
arr_paths ([type]): [description]
method ([type]): [description]
cutoff (float, optional): [description]. Defaults to 0.002.
cores ([type], optional): [description]. Defaults to None, meaning "all available".
Returns:
[type]: [description]
"""
num_arr = len(arr_paths)
arr_combinations = list()
logging.info(f"Found {num_arr} arrays")
# based on all combinations of N-1 arrays
index_combinations = list(itertools.combinations(range(num_arr), num_arr - 1))
for idx_combo in index_combinations:
arr_combinations.append([arr_paths[i] for i in idx_combo])
logging.info(f"Created {len(arr_combinations)} combinations")
# find conserved positions across N-1 arrays
all_selected_indices = list()
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
future = [
executor.submit(select_conserved_positions, arr_sub, method, cutoff)
for arr_sub in arr_combinations
]
for f in concurrent.futures.as_completed(future):
selected_index = f.result()
all_selected_indices.append(selected_index.astype(int))
selected_idx, selected_idx_err = pick_best_set(all_selected_indices, arr_paths)
if selected_idx is None:
logging.info(
"Could not find any stable positions in controls, possibly due to lack of breadth or depth of coverage. Exiting ..."
)
raise CoverageError(
"NotEnoughCoverage",
"Could not find any stable positions in controls, possibly due to lack of breadth or depth of coverage.",
)
selected_idx_num = np.sum(selected_idx)
logging.info(
f"[Leave One Out] Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}, Standard Error={selected_idx_err}"
)
return selected_idx.astype("bool")
def pick_best_set(selected_idx_array, diff_paths):
"""Pick the array set with the lowest std err (std / sqrt(sel_pos_len))
if not std err can be calculated, pick longest.
Args:
selected_idx_array (list): list of geneome X 4 (nucleotides) numpy arrays with boolean values for selected positions
diff_paths (list): paths to numpy diff arrays of same shape as each array in selected_idx_array
Returns:
tuple: (selected_idx, selected_idx_stderr)
"""
# apply selected idx and calculate std err across all samples.
all_std = [
get_diff_stdev(diff_paths, selected_idx) for selected_idx in selected_idx_array
]
all_lengths = [np.sum(a) for a in selected_idx_array]
by_std_err = np.full_like(all_std, np.nan)
by_len = np.zeros_like(all_std)
for idx, l in enumerate(all_lengths):
if (l > 0) and (all_std[idx] > 0):
by_std_err[idx] = all_std[idx] / np.sqrt(l)
elif (l > 0) and (all_std[idx] == 0):
by_len[idx] = l
elif l == 0:
continue
best_pick_idx = np.nan
if not np.all(np.isnan(by_std_err)):
best_pick_idx = np.nanargmin(by_std_err)
elif any(by_len > 0):
best_pick_idx = np.nanargmax(by_len)
# logging.info(f"Best Pick Index: {best_pick_idx}")
if np.isnan(best_pick_idx):
return (None, None)
else:
selected_arr_stderr = by_std_err[best_pick_idx]
selected_arr_length = all_lengths[best_pick_idx]
logging.info(
f"Selected indices have a length of {selected_arr_length} and stderr of {selected_arr_stderr}"
)
logging.info(
f"Other Lengths were: {','.join([str(l) for i, l in enumerate(all_lengths) if i != best_pick_idx])}"
)
logging.info(
f"Other StdErrs were: {','.join([str(se) for i, se in enumerate(by_std_err) if i != best_pick_idx])}"
)
return (selected_idx_array[best_pick_idx], selected_arr_stderr)
def read_list_file(list_file):
with open(list_file) as files_list:
paths = [filepath.rstrip("\n") for filepath in files_list]
return paths
def infer_week_and_source(sample_name):
week = np.nan
source = np.nan
source_format = r"W\d+M\d+"
m = re.compile(source_format)
patient_format = r"Pat\d+"
p = re.compile(patient_format)
comm_format = r"Com\d+"
c = re.compile(comm_format)
if m.match(sample_name):
week_fmt = re.compile(r"^W\d+")
week = week_fmt.search(sample_name).group().replace("W", "")
source_fmt = re.compile(r"M\d+")
source = source_fmt.search(sample_name).group()
elif p.match(sample_name):
week = 0
source = p.search(sample_name).group()
elif c.match(sample_name):
week = 0
source = c.search(sample_name).group()
return week, source
def get_array_stats(a):
array = a.flatten()
(col_sum, col_mean, col_median, col_std) = [np.nan] * 4
col_sum = np.nansum(array)
if col_sum > 0:
col_mean = np.nanmean(array)
col_median = np.nanmedian(array)
col_std = np.nanstd(array)
return col_sum, col_mean, col_median, col_std
def subset_on_indices(array, bool_idx):
return np.where(bool_idx, array, np.full_like(array, np.nan))
def selected_pos_depths(query, depth_profile, informative_pos):
# Find all positions where at least one nucleotide has a value
selected_pos_idx = np.sum(informative_pos, axis=1) > 0
selected_pos_len = len(selected_pos_idx)
total_selected_pos = np.sum(selected_pos_idx)
logging.info(f"Calculating depths for informative positions from {query} ...")
# data_frame = pd.read_csv(
# depth_profile,
# header=0,
# usecols=["A", "C", "G", "T", "total_depth"],
# dtype={"A": "int", "C": "int", "G": "int", "T": "int", "total_depth": "float",},
# )
nucl_depth, pos_depth = get_depth_vectors(depth_profile)
# Nucleotides
# n_depth_arr = subset_on_indices(nucl_depth, informative_pos)
# logging.info(f"{nucl_depth.shape}, {informative_pos.shape}")
(
selected_nt_total,
selected_nt_mean,
selected_nt_median,
selected_nt_std,
) = get_array_stats(nucl_depth[informative_pos])
(
unselected_nt_total,
unselected_nt_mean,
unselected_nt_median,
unselected_nt_std,
) = get_array_stats(nucl_depth[~informative_pos])
# Positions
_, og_total_mean, og_total_median, og_total_std = get_array_stats(pos_depth)
genome_len = len(pos_depth)
assert (
selected_pos_len == genome_len
), f"Arrays do not have the same shape ({selected_pos_len} vs {genome_len}). Can't compare."
non_zero_depth_pos = pos_depth > 0
bases_covered = np.sum(non_zero_depth_pos)
genome_cov = 100 * bases_covered / float(genome_len)
selected_pos_depth = pos_depth[selected_pos_idx]
_, sel_total_mean, sel_total_median, sel_total_std = get_array_stats(
selected_pos_depth
)
filled_selected_pos = selected_pos_depth > 0
selected_positions_found = np.sum(filled_selected_pos)
selected_positions_found_perc = np.nan
if total_selected_pos > 0:
selected_positions_found_perc = (
100 * selected_positions_found / float(total_selected_pos)
)
# Of the positions that have nucl values (filled_selected_pos),
# how many nucl values per position?
nucl_per_pos = np.nan
if selected_positions_found > 0:
nucl_depth_binary_idx = nucl_depth > 0
nucl_per_pos_array = np.sum(nucl_depth_binary_idx, axis=1)
nucl_per_pos = np.sum(nucl_per_pos_array[selected_pos_idx]) / float(
selected_positions_found
)
sel_coef_of_var = np.nan
if sel_total_mean > 0:
sel_coef_of_var = sel_total_std / sel_total_mean
perc_genome_selected = 100 * selected_positions_found / float(genome_len)
# logging.info(
# f"{genome_len}, {genome_cov}, {nucl_per_pos}, {total_selected_pos}, {selected_positions_cov}, "
# f"{selected_positions_cov_perc}, {perc_genome_selected}, {sel_coef_of_var}, "
# f"{sel_total_mean}, {sel_total_median}, {sel_total_std},"
# f"{og_total_mean}, {og_total_median}, {og_total_std}, "
# )
return {
"Query": query,
"S3Path": depth_profile,
"Ref_Genome_Len": genome_len,
"Ref_Genome_Cov": genome_cov,
"Nucl_Per_Pos": nucl_per_pos,
"Pos_Selected_Searched": total_selected_pos,
"Pos_Selected_Found": selected_positions_found,
"Pos_Perc_Selected_Found": selected_positions_found_perc,
"Pos_Perc_Genome_Selected": perc_genome_selected,
"Pos_Selected_Coef_of_Var": sel_coef_of_var,
"Pos_Selected_Mean_Depth": sel_total_mean,
"Pos_Selected_Median_Depth": sel_total_median,
"Pos_Selected_Stdev_Depth": sel_total_std,
"Pos_All_Genome_Mean_Depth": og_total_mean,
"Pos_All_Genome_Median_Depth": og_total_median,
"Pos_All_Genome_Stdev_Depth": og_total_std,
"Nucl_Selected_CumSum_Depth": selected_nt_total,
"Nucl_Selected_Mean_Depth": selected_nt_mean,
"Nucl_Selected_Median_Depth": selected_nt_median,
"Nucl_Selected_Stdev_Depth": selected_nt_std,
"Nucl_Unselected_CumSum_Depth": unselected_nt_total,
"Nucl_Unselected_Mean_Depth": unselected_nt_mean,
"Nucl_Unselected_Median_Depth": unselected_nt_median,
"Nucl_Unselected_Stdev_Depth": unselected_nt_std,
}
def parallelize_selected_pos_raw_data(depth_paths_df, informative_pos, cores):
assert (
"Query" in depth_paths_df.columns
), "Missing required column in depth path file: 'Query'"
assert (
"S3Path" in depth_paths_df.columns
), "Missing required column in depth path file: 'S3Path'"
list_of_results = list()
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
future = [
executor.submit(
selected_pos_depths, row.Query, row.S3Path, informative_pos,
)
for row in depth_paths_df.itertuples()
]
for f in concurrent.futures.as_completed(future):
result = f.result()
# logging.info(f"Saving results for {result['Query_name']}")
list_of_results.append(result)
return pd.DataFrame(list_of_results).set_index(["Query"])
def get_depth_vectors(depth_profile):
data_frame = pd.read_csv(
depth_profile,
header=0,
usecols=["A", "C", "G", "T", "total_depth"],
dtype={"A": "int", "C": "int", "G": "int", "T": "int", "total_depth": "float",},
)
genome_size, _ = data_frame.shape
nucl_depth_vector = np.zeros((genome_size, 4))
total_depth_vector = np.zeros(genome_size)
# per Position
nucl_depth_vector = data_frame[["A", "C", "G", "T"]].to_numpy()
total_depth_vector = data_frame[["total_depth"]].to_numpy()
return nucl_depth_vector, total_depth_vector
def get_extreme_pos_depth(diff_vector, depth_profile, informative_pos, values=None):
if values is None:
values = [0, 1]
title = os.path.basename(diff_vector).split(".")[1]
# base_name = title.split("_vs_")[0]
query_name = title.split("_vs_")[1]
logging.info(f"Calculating depth at extreme positions for {query_name}...")
return_dict = {"Query": query_name}
for value in values:
value_dict = get_specific_diff_depth(
diff_vector, depth_profile, informative_pos, value
)
return_dict.update(value_dict)
return return_dict
def get_specific_diff_depth(diff_vector, depth_profile, informative_pos, value):
(
total_nucl_reads,
mean_nucl_reads,
sd_nucl_reads,
total_reads,
mean_reads,
sd_reads,
) = [np.nan] * 6
diff_vector_loaded = np.load(diff_vector)
nucl_idx = abs(diff_vector_loaded) == value
value_idx = np.sum(nucl_idx, axis=1) > 0
if np.sum(value_idx) == 0:
return {
f"Pos_Read_Support_Total_at_{value}": total_reads,
f"Pos_Read_Support_Mean_at_{value}": mean_reads,
f"Pos_Read_Support_Stdev_at_{value}": sd_reads,
f"Nucl_Read_Support_Total_at_{value}": total_nucl_reads,
f"Nucl_Read_Support_Mean_at_{value}": mean_nucl_reads,
f"Nucl_Read_Support_Stdev_at_{value}": sd_nucl_reads,
}
nucl_depth_vector, total_depth_vector = get_depth_vectors(depth_profile)
# Selected Postions Nucleotide Depth
selected_nucl_depth = nucl_depth_vector[nucl_idx & informative_pos]
# selected_nucl_depth = np.sum(selected_nucl_depth_vector, axis=1)
(total_nucl_reads, mean_nucl_reads, _, sd_nucl_reads) = get_array_stats(
selected_nucl_depth
)
# Selected Postion Total Depth
selected_pos_idx = np.sum(informative_pos, axis=1) > 0
selected_pos_depth = total_depth_vector[value_idx & selected_pos_idx]
# One read is counted once for each position, but may have been counted many times
# over across multiple adjecent positions
total_reads = np.nansum(selected_pos_depth)
if total_reads > 0:
mean_reads = np.nanmean(selected_pos_depth)
sd_reads = np.nanstd(selected_pos_depth)
(total_reads, mean_reads, _, sd_reads) = get_array_stats(selected_pos_depth)
return {
f"Pos_Read_Support_Total_at_{value}": total_reads,
f"Pos_Read_Support_Mean_at_{value}": mean_reads,
f"Pos_Read_Support_Stdev_at_{value}": sd_reads,
f"Nucl_Read_Support_Total_at_{value}": total_nucl_reads,
f"Nucl_Read_Support_Mean_at_{value}": mean_nucl_reads,
f"Nucl_Read_Support_Stdev_at_{value}": sd_nucl_reads,
}
def save_predictions(
stats_df,
read_support_df,
output_file,
selected_pos_depth,
prefix,
num_extereme_pos_for_invader=2,
):
selected_pos_depth.drop(["S3Path"], inplace=True, axis=1)
output_cols = (
[
"Organism",
"Sample",
"Source",
"Week",
"Extreme_Positions",
"Prediction",
"missed_2_EP",
]
+ list(selected_pos_depth.columns)
+ list(read_support_df.columns)
)
stats_df = stats_df.join(
selected_pos_depth, on="Query", how="left", rsuffix="_other"
)
stats_df = stats_df.join(read_support_df, on="Query", how="left", rsuffix="_other")
stats_df["Organism"] = prefix
stats_df["Week"], stats_df["Source"] = zip(
*stats_df["Query"].apply(infer_week_and_source)
)
stats_df["Prediction"] = stats_df.apply(
lambda row: get_extreme_prediction(
row["Extreme_Positions"], num_extereme_pos_for_invader
),
axis=1,
)
stats_df["missed_2_EP"] = stats_df.apply(
lambda row: invader_probability(
row["Pos_Selected_Found"],
row["Pos_Selected_Searched"],
num_extereme_pos_for_invader,
),
axis=1,
)
# Update Input Predictions based on missed_2_EP 5% tolerance and Nucl_Read_Support_Total_at_0 > 1000
stats_df["Prediction"] = stats_df.apply(
lambda row: adjust_predictions(
row["Prediction"], row["missed_2_EP"], row["Nucl_Read_Support_Total_at_0"]
),
axis=1,
)
stats_df.to_csv(output_file, index=False, columns=output_cols)
def get_extreme_prediction(num_extreme_positions, min_extreme_pos=2):
prediction = "Unclear"
if np.isnan(num_extreme_positions):
prediction = "Unclear"
elif num_extreme_positions >= min_extreme_pos:
prediction = "Invader"
elif num_extreme_positions < min_extreme_pos:
prediction = "Input"
else:
prediction = "Error"
return prediction
def adjust_predictions(
current_pred,
missed_2_EP,
cum_read_supp_0,
missed_EP_tolerance=0.05,
min_cum_read_support=1000,
):
adj_pred = "Unclear"
if current_pred.lower() == "invader":
adj_pred = current_pred
elif ( # Update Input Predictions based on:
(current_pred.lower() == "input")
# missed_2_EP 5% tolerance and
and (missed_2_EP < missed_EP_tolerance)
# Nucl_Read_Support_Total_at_0 > 1000
and (cum_read_supp_0 >= min_cum_read_support)
):
adj_pred = "Input"
return adj_pred
# def prob_extreme_pos(exp_extreme_pos, fraction_min_depth):
# # ln p = f*S*ln (1-N/S) ~ -f*S*N/S = -f*N
# logP = -fraction_min_depth * exp_extreme_pos
# return logP
# def pos_frac_combinations(fraction_min_depth, possible_extreme_pos):
# # log(N!) = N * math.log(N) - N
# # C = S!/[(f*S)!((1-f)*S)!]
# log_factorial = lambda N: N * math.log(N) - N
# logC = log_factorial(possible_extreme_pos) - (
# log_factorial(fraction_min_depth * possible_extreme_pos)
# + log_factorial((1 - fraction_min_depth) * possible_extreme_pos)
# )
# return logC
# def missed_invader_probability(
# exp_extreme_pos, possible_extreme_pos, fraction_min_depth
# ):
# logP = prob_extreme_pos(exp_extreme_pos, fraction_min_depth)
# logC = pos_frac_combinations(fraction_min_depth, possible_extreme_pos)
# try:
# q = math.exp(logP + logC)
# except OverflowError:
# q = float("inf")
# return q
def invader_probability(obs_cons_pos, exp_cons_pos, missed_extrm_pos):
"""
If you have S conserved positions, and you actually observe O = (S-q) of
them due to some having low coverage, then calculate the probability that
you picked a set of O that avoid the N positions that could be extreme
positions
# S = expected conserved positions
# O = observed conserved positions
# q = conserved positions not found (S - O)
# N = number of missed extreme pos
# p = (S-N)!/S! * q!/(q-N)!
# Using stirling approximation:
# p = exp(log(S)*(-N)+N*N*1./S+log(factorial(q)*1./factorial(q-N)))
Args:
obs_cons_pos (int): observed conserved positions or "selected positions found"
exp_cons_pos (int): expected conserved positions or "selected positions"
missed_extrm_pos (int): calculate the probability for this many missing extreme pos
Returns:
float: probability for this many missing extreme positions
"""
if (exp_cons_pos == 0) or np.isnan(exp_cons_pos):
return np.nan
if np.isnan(obs_cons_pos):
return np.nan
if np.isnan(missed_extrm_pos):
return np.nan
if exp_cons_pos < missed_extrm_pos:
return 1
# q = S - O
missed_conserved_pos = exp_cons_pos - obs_cons_pos
# # If we found all the positions, it's probably an Input.
# if missed_conserved_pos == 0:
# return 0
# Stirling approximation
stirling_aprx = lambda N: N * math.log(N) - N if N > 1 else 1
logP = (
stirling_aprx(exp_cons_pos - missed_extrm_pos)
- stirling_aprx(exp_cons_pos)
+ stirling_aprx(missed_conserved_pos)
- stirling_aprx(missed_conserved_pos - missed_extrm_pos)
)
try:
p = math.exp(logP)
except OverflowError:
p = float("inf")
# This is a probability, it can't be greater than 1
# This if condition manages edge cases like the following:
# >>> invader_probability(2,2,2)
# 5.021384230796916
# >>> invader_probability(3,3,2)
# 2.022153704931268
if p > 1:
p = 1
return p
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class CoverageError(Error):
"""Exception raised possibly due to lack of breadth or depth of coverage
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class InfoError(Error):
"""Exception raised because we could not find any informative positions from the control samples."
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def run(
output_folder_path,
prefix,
method,
control_array_paths,
query_array_paths,
depth_profiles_df,
cutoff=0.002,
cores=10,
invader_extreme_pos=2,
):
assert method.lower() in ["intersection", "union", "majority"], logging.error(
f"Method '{method}' not recognized. Exiting"
)
comparison_df_filepath = f"{output_folder_path}/{prefix}.02d_stats.csv"
hist_plot = f"{output_folder_path}/{prefix}.02d_hist_plot.png"
if os.path.exists(comparison_df_filepath) and os.path.exists(hist_plot):
return comparison_df_filepath
try:
informative_pos = get_conserved_positions(
control_array_paths, method=method, cutoff=cutoff, cores=cores
)
except CoverageError as nec:
logging.info(nec)
return
# np.save(f"{prefix}.{method}.informative_pos.npy", informative_pos)
pos_depth_df = parallelize_selected_pos_raw_data(
depth_profiles_df, informative_pos, cores=cores
)
try:
if np.sum(informative_pos) == 0:
logging.info(
"Cannot find any informative positions from the control samples. Exiting."
)
# sys.exit(0)
except InfoError as ie:
logging.info(ie)
return
logging.info(
f"Found {len(control_array_paths)} control profiles and {len(query_array_paths)} query profiles"
)
# Setup matplotlib figures
all_array_paths = control_array_paths + query_array_paths
# Parallelize extreme position read support (depth) calculations for each mouse
read_support_list = list()
with concurrent.futures.ProcessPoolExecutor(max_workers=cores) as executor:
future = [
executor.submit(
get_extreme_pos_depth,
diff_vector,
depth_profiles_df["S3Path"][idx],
informative_pos,
)
for idx, diff_vector in enumerate(all_array_paths)
]
for f in concurrent.futures.as_completed(future):
result = f.result()
# logging.info(f"Saving results for {result['Query_name']}")
read_support_list.append(result)
extreme_read_support_df = pd.DataFrame(read_support_list).set_index(["Query"])
num_comparisons = len(all_array_paths)
logging.info(f"Making {num_comparisons} comparisons ...")
plot_height = int(num_comparisons * 10)
plt.rcParams["figure.figsize"] = (45, plot_height)
figure, hist_axs = plt.subplots(num_comparisons, 1, sharex=True, sharey=True)
stats_df = pd.DataFrame(
[
visualizing_differences(diff_vector, informative_pos, hist_axs[idx])
for idx, diff_vector in enumerate(all_array_paths)
]
)
save_predictions(
stats_df,
extreme_read_support_df,
comparison_df_filepath,
pos_depth_df,
prefix,
num_extereme_pos_for_invader=invader_extreme_pos,
)
logging.info(f"Saving histogram figure to disk as: {hist_plot}")
figure.savefig(hist_plot, dpi=80, bbox_inches="tight")
logging.info("All Done. Huzzah!")
return comparison_df_filepath
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s\t[%(levelname)s]:\t%(message)s",
)
prefix = sys.argv[1]
method = sys.argv[2]
# All files have samples in the same order
control_profiles = sys.argv[3]
sample_profiles = sys.argv[4]
init_profiles = sys.argv[5]
cutoff = 0.002
cores = 10
control_array_paths = read_list_file(control_profiles)
query_array_paths = read_list_file(sample_profiles)
depth_profiles_df = pd.read_table(
init_profiles, header=None, names=["Query", "S3Path"]
)
output_folder_path = prefix
run(
output_folder_path,
prefix,
method,
control_array_paths,
query_array_paths,
depth_profiles_df,
cutoff=0.002,
cores=10,
)
| [
"numpy.nanargmax",
"numpy.sqrt",
"pandas.read_csv",
"re.compile",
"math.log",
"numpy.nanmean",
"numpy.linalg.norm",
"numpy.nanmin",
"math.exp",
"logging.info",
"logging.error",
"numpy.arange",
"os.path.exists",
"numpy.nanargmin",
"numpy.full_like",
"scipy.stats.kurtosis",
"numpy.dot"... | [((248, 269), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (262, 269), False, 'import matplotlib\n'), ((658, 690), 'scipy.stats.ks_2samp', 'stats.ks_2samp', (['a_prime', 'b_prime'], {}), '(a_prime, b_prime)\n', (672, 690), False, 'from scipy import stats\n'), ((5934, 5972), 'logging.info', 'logging.info', (['f"""Depth summary: {summ}"""'], {}), "(f'Depth summary: {summ}')\n", (5946, 5972), False, 'import logging\n'), ((7330, 7368), 'logging.info', 'logging.info', (['f"""Depth summary: {summ}"""'], {}), "(f'Depth summary: {summ}')\n", (7342, 7368), False, 'import logging\n'), ((7630, 7655), 'numpy.load', 'np.load', (['query_array_path'], {}), '(query_array_path)\n', (7637, 7655), True, 'import numpy as np\n'), ((7660, 7694), 'logging.info', 'logging.info', (['f""" ... {query_name}"""'], {}), "(f' ... {query_name}')\n", (7672, 7694), False, 'import logging\n'), ((8722, 8769), 'logging.info', 'logging.info', (['f"""{num_profiles} profiles found."""'], {}), "(f'{num_profiles} profiles found.')\n", (8734, 8769), False, 'import logging\n'), ((10006, 10059), 'logging.info', 'logging.info', (['f"""Generating histogram for {title} ..."""'], {}), "(f'Generating histogram for {title} ...')\n", (10018, 10059), False, 'import logging\n'), ((10372, 10392), 'numpy.load', 'np.load', (['diff_vector'], {}), '(diff_vector)\n', (10379, 10392), True, 'import numpy as np\n'), ((10811, 10855), 'logging.info', 'logging.info', (['f"""Loading {array} at {cutoff}"""'], {}), "(f'Loading {array} at {cutoff}')\n", (10823, 10855), False, 'import logging\n'), ((10869, 10883), 'numpy.load', 'np.load', (['array'], {}), '(array)\n', (10876, 10883), True, 'import numpy as np\n'), ((11288, 11322), 'numpy.nansum', 'np.nansum', (['cumulative_diff'], {'axis': '(0)'}), '(cumulative_diff, axis=0)\n', (11297, 11322), True, 'import numpy as np\n'), ((11334, 11353), 'numpy.nanstd', 'np.nanstd', (['cum_diff'], {}), '(cum_diff)\n', (11343, 11353), True, 'import numpy as np\n'), ((11839, 11931), 'logging.info', 'logging.info', (['f"""Received {num_arrays} arrays, with method {method} and cutoff {cutoff}"""'], {}), "(\n f'Received {num_arrays} arrays, with method {method} and cutoff {cutoff}')\n", (11851, 11931), False, 'import logging\n'), ((13236, 13256), 'numpy.sum', 'np.sum', (['selected_idx'], {}), '(selected_idx)\n', (13242, 13256), True, 'import numpy as np\n'), ((13261, 13366), 'logging.info', 'logging.info', (['f"""Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}"""'], {}), "(\n f'Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}'\n )\n", (13273, 13366), False, 'import logging\n'), ((14029, 14068), 'logging.info', 'logging.info', (['f"""Found {num_arr} arrays"""'], {}), "(f'Found {num_arr} arrays')\n", (14041, 14068), False, 'import logging\n'), ((15347, 15367), 'numpy.sum', 'np.sum', (['selected_idx'], {}), '(selected_idx)\n', (15353, 15367), True, 'import numpy as np\n'), ((15372, 15528), 'logging.info', 'logging.info', (['f"""[Leave One Out] Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}, Standard Error={selected_idx_err}"""'], {}), "(\n f'[Leave One Out] Method: {method}, Shape:{selected_idx.shape}, Num_Positions:{selected_idx_num}, Standard Error={selected_idx_err}'\n )\n", (15384, 15528), False, 'import logging\n'), ((16326, 16355), 'numpy.full_like', 'np.full_like', (['all_std', 'np.nan'], {}), '(all_std, np.nan)\n', (16338, 16355), True, 'import numpy as np\n'), ((16369, 16391), 'numpy.zeros_like', 'np.zeros_like', (['all_std'], {}), '(all_std)\n', (16382, 16391), True, 'import numpy as np\n'), ((16902, 16925), 'numpy.isnan', 'np.isnan', (['best_pick_idx'], {}), '(best_pick_idx)\n', (16910, 16925), True, 'import numpy as np\n'), ((17858, 17883), 're.compile', 're.compile', (['source_format'], {}), '(source_format)\n', (17868, 17883), False, 'import re\n'), ((17924, 17950), 're.compile', 're.compile', (['patient_format'], {}), '(patient_format)\n', (17934, 17950), False, 'import re\n'), ((17988, 18011), 're.compile', 're.compile', (['comm_format'], {}), '(comm_format)\n', (17998, 18011), False, 'import re\n'), ((18589, 18605), 'numpy.nansum', 'np.nansum', (['array'], {}), '(array)\n', (18598, 18605), True, 'import numpy as np\n'), ((19160, 19184), 'numpy.sum', 'np.sum', (['selected_pos_idx'], {}), '(selected_pos_idx)\n', (19166, 19184), True, 'import numpy as np\n'), ((19189, 19267), 'logging.info', 'logging.info', (['f"""Calculating depths for informative positions from {query} ..."""'], {}), "(f'Calculating depths for informative positions from {query} ...')\n", (19201, 19267), False, 'import logging\n'), ((20395, 20421), 'numpy.sum', 'np.sum', (['non_zero_depth_pos'], {}), '(non_zero_depth_pos)\n', (20401, 20421), True, 'import numpy as np\n'), ((20721, 20748), 'numpy.sum', 'np.sum', (['filled_selected_pos'], {}), '(filled_selected_pos)\n', (20727, 20748), True, 'import numpy as np\n'), ((24114, 24283), 'pandas.read_csv', 'pd.read_csv', (['depth_profile'], {'header': '(0)', 'usecols': "['A', 'C', 'G', 'T', 'total_depth']", 'dtype': "{'A': 'int', 'C': 'int', 'G': 'int', 'T': 'int', 'total_depth': 'float'}"}), "(depth_profile, header=0, usecols=['A', 'C', 'G', 'T',\n 'total_depth'], dtype={'A': 'int', 'C': 'int', 'G': 'int', 'T': 'int',\n 'total_depth': 'float'})\n", (24125, 24283), True, 'import pandas as pd\n'), ((24378, 24404), 'numpy.zeros', 'np.zeros', (['(genome_size, 4)'], {}), '((genome_size, 4))\n', (24386, 24404), True, 'import numpy as np\n'), ((24430, 24451), 'numpy.zeros', 'np.zeros', (['genome_size'], {}), '(genome_size)\n', (24438, 24451), True, 'import numpy as np\n'), ((24931, 25006), 'logging.info', 'logging.info', (['f"""Calculating depth at extreme positions for {query_name}..."""'], {}), "(f'Calculating depth at extreme positions for {query_name}...')\n", (24943, 25006), False, 'import logging\n'), ((25524, 25544), 'numpy.load', 'np.load', (['diff_vector'], {}), '(diff_vector)\n', (25531, 25544), True, 'import numpy as np\n'), ((26779, 26808), 'numpy.nansum', 'np.nansum', (['selected_pos_depth'], {}), '(selected_pos_depth)\n', (26788, 26808), True, 'import numpy as np\n'), ((29170, 29201), 'numpy.isnan', 'np.isnan', (['num_extreme_positions'], {}), '(num_extreme_positions)\n', (29178, 29201), True, 'import numpy as np\n'), ((32077, 32099), 'numpy.isnan', 'np.isnan', (['obs_cons_pos'], {}), '(obs_cons_pos)\n', (32085, 32099), True, 'import numpy as np\n'), ((32131, 32157), 'numpy.isnan', 'np.isnan', (['missed_extrm_pos'], {}), '(missed_extrm_pos)\n', (32139, 32157), True, 'import numpy as np\n'), ((34204, 34263), 'logging.error', 'logging.error', (['f"""Method \'{method}\' not recognized. Exiting"""'], {}), '(f"Method \'{method}\' not recognized. Exiting")\n', (34217, 34263), False, 'import logging\n'), ((36270, 36327), 'logging.info', 'logging.info', (['f"""Making {num_comparisons} comparisons ..."""'], {}), "(f'Making {num_comparisons} comparisons ...')\n", (36282, 36327), False, 'import logging\n'), ((36451, 36509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_comparisons', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(num_comparisons, 1, sharex=True, sharey=True)\n', (36463, 36509), True, 'import matplotlib.pyplot as plt\n'), ((36922, 36986), 'logging.info', 'logging.info', (['f"""Saving histogram figure to disk as: {hist_plot}"""'], {}), "(f'Saving histogram figure to disk as: {hist_plot}')\n", (36934, 36986), False, 'import logging\n'), ((37051, 37084), 'logging.info', 'logging.info', (['"""All Done. Huzzah!"""'], {}), "('All Done. Huzzah!')\n", (37063, 37084), False, 'import logging\n'), ((37152, 37249), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s\t[%(levelname)s]:\t%(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s\\t[%(levelname)s]:\\t%(message)s')\n", (37171, 37249), False, 'import logging\n'), ((37634, 37702), 'pandas.read_table', 'pd.read_table', (['init_profiles'], {'header': 'None', 'names': "['Query', 'S3Path']"}), "(init_profiles, header=None, names=['Query', 'S3Path'])\n", (37647, 37702), True, 'import pandas as pd\n'), ((2585, 2599), 'numpy.isnan', 'np.isnan', (['base'], {}), '(base)\n', (2593, 2599), True, 'import numpy as np\n'), ((5078, 5183), 'logging.info', 'logging.info', (['"""Not enough data points for reliable summary statistics. Adding placeholders ..."""'], {}), "(\n 'Not enough data points for reliable summary statistics. Adding placeholders ...'\n )\n", (5090, 5183), False, 'import logging\n'), ((6787, 6892), 'logging.info', 'logging.info', (['"""Not enough data points for reliable summary statistics. Adding placeholders ..."""'], {}), "(\n 'Not enough data points for reliable summary statistics. Adding placeholders ...'\n )\n", (6799, 6892), False, 'import logging\n'), ((9089, 9108), 'numpy.load', 'np.load', (['base_array'], {}), '(base_array)\n', (9096, 9108), True, 'import numpy as np\n'), ((9143, 9228), 'logging.info', 'logging.info', (['f"""[{outer_idx + 1}/{num_profiles}] Comparing {base_name} with ..."""'], {}), "(f'[{outer_idx + 1}/{num_profiles}] Comparing {base_name} with ...'\n )\n", (9155, 9228), False, 'import logging\n'), ((10893, 10922), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (10904, 10922), True, 'import numpy as np\n'), ((14980, 15120), 'logging.info', 'logging.info', (['"""Could not find any stable positions in controls, possibly due to lack of breadth or depth of coverage. Exiting ..."""'], {}), "(\n 'Could not find any stable positions in controls, possibly due to lack of breadth or depth of coverage. Exiting ...'\n )\n", (14992, 15120), False, 'import logging\n'), ((16270, 16279), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (16276, 16279), True, 'import numpy as np\n'), ((16742, 16766), 'numpy.nanargmin', 'np.nanargmin', (['by_std_err'], {}), '(by_std_err)\n', (16754, 16766), True, 'import numpy as np\n'), ((17086, 17204), 'logging.info', 'logging.info', (['f"""Selected indices have a length of {selected_arr_length} and stderr of {selected_arr_stderr}"""'], {}), "(\n f'Selected indices have a length of {selected_arr_length} and stderr of {selected_arr_stderr}'\n )\n", (17098, 17204), False, 'import logging\n'), ((18061, 18081), 're.compile', 're.compile', (['"""^W\\\\d+"""'], {}), "('^W\\\\d+')\n", (18071, 18081), False, 'import re\n'), ((18173, 18192), 're.compile', 're.compile', (['"""M\\\\d+"""'], {}), "('M\\\\d+')\n", (18183, 18192), False, 'import re\n'), ((18645, 18662), 'numpy.nanmean', 'np.nanmean', (['array'], {}), '(array)\n', (18655, 18662), True, 'import numpy as np\n'), ((18684, 18703), 'numpy.nanmedian', 'np.nanmedian', (['array'], {}), '(array)\n', (18696, 18703), True, 'import numpy as np\n'), ((18722, 18738), 'numpy.nanstd', 'np.nanstd', (['array'], {}), '(array)\n', (18731, 18738), True, 'import numpy as np\n'), ((18869, 18896), 'numpy.full_like', 'np.full_like', (['array', 'np.nan'], {}), '(array, np.nan)\n', (18881, 18896), True, 'import numpy as np\n'), ((19054, 19085), 'numpy.sum', 'np.sum', (['informative_pos'], {'axis': '(1)'}), '(informative_pos, axis=1)\n', (19060, 19085), True, 'import numpy as np\n'), ((21196, 21233), 'numpy.sum', 'np.sum', (['nucl_depth_binary_idx'], {'axis': '(1)'}), '(nucl_depth_binary_idx, axis=1)\n', (21202, 21233), True, 'import numpy as np\n'), ((25609, 25633), 'numpy.sum', 'np.sum', (['nucl_idx'], {'axis': '(1)'}), '(nucl_idx, axis=1)\n', (25615, 25633), True, 'import numpy as np\n'), ((25646, 25663), 'numpy.sum', 'np.sum', (['value_idx'], {}), '(value_idx)\n', (25652, 25663), True, 'import numpy as np\n'), ((26517, 26548), 'numpy.sum', 'np.sum', (['informative_pos'], {'axis': '(1)'}), '(informative_pos, axis=1)\n', (26523, 26548), True, 'import numpy as np\n'), ((26854, 26884), 'numpy.nanmean', 'np.nanmean', (['selected_pos_depth'], {}), '(selected_pos_depth)\n', (26864, 26884), True, 'import numpy as np\n'), ((26904, 26933), 'numpy.nanstd', 'np.nanstd', (['selected_pos_depth'], {}), '(selected_pos_depth)\n', (26913, 26933), True, 'import numpy as np\n'), ((32023, 32045), 'numpy.isnan', 'np.isnan', (['exp_cons_pos'], {}), '(exp_cons_pos)\n', (32031, 32045), True, 'import numpy as np\n'), ((32770, 32784), 'math.exp', 'math.exp', (['logP'], {}), '(logP)\n', (32778, 32784), False, 'import math\n'), ((34430, 34468), 'os.path.exists', 'os.path.exists', (['comparison_df_filepath'], {}), '(comparison_df_filepath)\n', (34444, 34468), False, 'import os\n'), ((34473, 34498), 'os.path.exists', 'os.path.exists', (['hist_plot'], {}), '(hist_plot)\n', (34487, 34498), False, 'import os\n'), ((881, 905), 'numpy.dot', 'np.dot', (['a_prime', 'b_prime'], {}), '(a_prime, b_prime)\n', (887, 905), True, 'import numpy as np\n'), ((3781, 3801), 'numpy.isnan', 'np.isnan', (['prob_array'], {}), '(prob_array)\n', (3789, 3801), True, 'import numpy as np\n'), ((4454, 4472), 'numpy.nanmin', 'np.nanmin', (['d_prime'], {}), '(d_prime)\n', (4463, 4472), True, 'import numpy as np\n'), ((4493, 4511), 'numpy.nanmax', 'np.nanmax', (['d_prime'], {}), '(d_prime)\n', (4502, 4511), True, 'import numpy as np\n'), ((4533, 4552), 'numpy.nanmean', 'np.nanmean', (['d_prime'], {}), '(d_prime)\n', (4543, 4552), True, 'import numpy as np\n'), ((4577, 4595), 'numpy.nanstd', 'np.nanstd', (['d_prime'], {}), '(d_prime)\n', (4586, 4595), True, 'import numpy as np\n'), ((4621, 4639), 'numpy.nanvar', 'np.nanvar', (['d_prime'], {}), '(d_prime)\n', (4630, 4639), True, 'import numpy as np\n'), ((4660, 4688), 'numpy.nanquantile', 'np.nanquantile', (['d_prime', '(0.1)'], {}), '(d_prime, 0.1)\n', (4674, 4688), True, 'import numpy as np\n'), ((4712, 4733), 'numpy.nanmedian', 'np.nanmedian', (['d_prime'], {}), '(d_prime)\n', (4724, 4733), True, 'import numpy as np\n'), ((4754, 4782), 'numpy.nanquantile', 'np.nanquantile', (['d_prime', '(0.9)'], {}), '(d_prime, 0.9)\n', (4768, 4782), True, 'import numpy as np\n'), ((4803, 4862), 'scipy.stats.median_absolute_deviation', 'stats.median_absolute_deviation', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (4834, 4862), False, 'from scipy import stats\n'), ((4883, 4920), 'scipy.stats.iqr', 'stats.iqr', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (4892, 4920), False, 'from scipy import stats\n'), ((4942, 4980), 'scipy.stats.skew', 'stats.skew', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (4952, 4980), False, 'from scipy import stats\n'), ((5006, 5048), 'scipy.stats.kurtosis', 'stats.kurtosis', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (5020, 5048), False, 'from scipy import stats\n'), ((6651, 6689), 'scipy.stats.skew', 'stats.skew', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (6661, 6689), False, 'from scipy import stats\n'), ((6715, 6757), 'scipy.stats.kurtosis', 'stats.kurtosis', (['d_prime'], {'nan_policy': '"""omit"""'}), "(d_prime, nan_policy='omit')\n", (6729, 6757), False, 'from scipy import stats\n'), ((7529, 7563), 'os.path.basename', 'os.path.basename', (['query_array_path'], {}), '(query_array_path)\n', (7545, 7563), False, 'import os\n'), ((10149, 10174), 'numpy.arange', 'np.arange', (['(-1)', '(1.01)', '(0.01)'], {}), '(-1, 1.01, 0.01)\n', (10158, 10174), True, 'import numpy as np\n'), ((16695, 16715), 'numpy.isnan', 'np.isnan', (['by_std_err'], {}), '(by_std_err)\n', (16703, 16715), True, 'import numpy as np\n'), ((16817, 16837), 'numpy.nanargmax', 'np.nanargmax', (['by_len'], {}), '(by_len)\n', (16829, 16837), True, 'import numpy as np\n'), ((21258, 21302), 'numpy.sum', 'np.sum', (['nucl_per_pos_array[selected_pos_idx]'], {}), '(nucl_per_pos_array[selected_pos_idx])\n', (21264, 21302), True, 'import numpy as np\n'), ((24006, 24035), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_results'], {}), '(list_of_results)\n', (24018, 24035), True, 'import pandas as pd\n'), ((34725, 34742), 'logging.info', 'logging.info', (['nec'], {}), '(nec)\n', (34737, 34742), False, 'import logging\n'), ((34969, 34992), 'numpy.sum', 'np.sum', (['informative_pos'], {}), '(informative_pos)\n', (34975, 34992), True, 'import numpy as np\n'), ((35011, 35104), 'logging.info', 'logging.info', (['"""Cannot find any informative positions from the control samples. Exiting."""'], {}), "(\n 'Cannot find any informative positions from the control samples. Exiting.')\n", (35023, 35104), False, 'import logging\n'), ((35192, 35208), 'logging.info', 'logging.info', (['ie'], {}), '(ie)\n', (35204, 35208), False, 'import logging\n'), ((36169, 36200), 'pandas.DataFrame', 'pd.DataFrame', (['read_support_list'], {}), '(read_support_list)\n', (36181, 36200), True, 'import pandas as pd\n'), ((922, 945), 'numpy.linalg.norm', 'np.linalg.norm', (['a_prime'], {}), '(a_prime)\n', (936, 945), True, 'import numpy as np\n'), ((948, 971), 'numpy.linalg.norm', 'np.linalg.norm', (['b_prime'], {}), '(b_prime)\n', (962, 971), True, 'import numpy as np\n'), ((8963, 8991), 'os.path.basename', 'os.path.basename', (['base_array'], {}), '(base_array)\n', (8979, 8991), False, 'import os\n'), ((10310, 10339), 'os.path.basename', 'os.path.basename', (['diff_vector'], {}), '(diff_vector)\n', (10326, 10339), False, 'import os\n'), ((16522, 16532), 'numpy.sqrt', 'np.sqrt', (['l'], {}), '(l)\n', (16529, 16532), True, 'import numpy as np\n'), ((24801, 24830), 'os.path.basename', 'os.path.basename', (['diff_vector'], {}), '(diff_vector)\n', (24817, 24830), False, 'import os\n'), ((3232, 3255), 'numpy.isnan', 'np.isnan', (['query_no_nans'], {}), '(query_no_nans)\n', (3240, 3255), True, 'import numpy as np\n'), ((3434, 3457), 'numpy.isnan', 'np.isnan', (['query_no_nans'], {}), '(query_no_nans)\n', (3442, 3457), True, 'import numpy as np\n'), ((11210, 11224), 'numpy.load', 'np.load', (['array'], {}), '(array)\n', (11217, 11224), True, 'import numpy as np\n'), ((32494, 32505), 'math.log', 'math.log', (['N'], {}), '(N)\n', (32502, 32505), False, 'import math\n')] |
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import preprocessing
## input here
dataSetName = "breast-cancer" # diabetes breast-cancer australian
numofEXP = 10
scenarioList = ["SynTR_OrgTE"]
methodList = ["PrivSyn"]
k_list = [25, 50, 75, 100]
eplison_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for scenario in scenarioList:
for method in methodList:
f = open("result_" + dataSetName + "_" + str(scenario) + "_" + str(method), "w")
for clustersize in k_list:
for eplison in eplison_list:
f.write(str(clustersize) + "," + str(eplison) + ",")
avgmse = 0
for exp in range(numofEXP):
path = "./ExpData/" + dataSetName + "/" + scenario + "/"
if scenario == "OrgTR_SynTE":
dataset_train = np.loadtxt(path + "NotSeed" + dataSetName + "_csv", delimiter=",")
dataset_test = np.loadtxt(
path + method + "/" + dataSetName + "_syn_" + str(clustersize) + "_" + str(
eplison) + "_" + str(exp) + "_csv", delimiter=",")
dataset_train_split = np.split(dataset_train, [1], axis=1)
dataset_test_split = np.split(dataset_test, [1], axis=1)
dataset_X_train = dataset_train_split[1]
dataset_X_test = dataset_test_split[1]
dataset_y_train = dataset_train_split[0]
dataset_y_test = dataset_test_split[0]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(dataset_X_train, dataset_y_train)
# Make predictions using the testing set
dataset_y_pred = regr.predict(dataset_X_test)
print(dataSetName + str(scenario) + "," + str(method) + "," + str(eplison) + "," + str(
clustersize) + "," + str(exp))
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(dataset_y_test, dataset_y_pred))
avgmse = avgmse + mean_squared_error(dataset_y_test, dataset_y_pred)
else:
dataset_train = np.loadtxt(
path + method + "/" + dataSetName + "_syn_" + str(clustersize) + "_" + str(
eplison) + "_" + str(exp) + "_csv", delimiter=",")
dataset_test = np.loadtxt(path + "NotSeed" + dataSetName + "_csv", delimiter=",")
dataset_train_split = np.split(dataset_train, [1], axis=1)
dataset_test_split = np.split(dataset_test, [1], axis=1)
dataset_X_train = dataset_train_split[1]
dataset_X_test = dataset_test_split[1]
dataset_y_train = dataset_train_split[0]
dataset_y_test = dataset_test_split[0]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(dataset_X_train, dataset_y_train)
# Make predictions using the testing set
dataset_y_pred = regr.predict(dataset_X_test)
print(dataSetName + str(scenario) + "," + str(method) + "," + str(eplison) + "," + str(
clustersize) + "," + str(exp))
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(dataset_y_test, dataset_y_pred))
avgmse = avgmse + mean_squared_error(dataset_y_test, dataset_y_pred)
avgmse = avgmse / numofEXP
f.write(str(avgmse))
f.write("\n")
| [
"numpy.loadtxt",
"sklearn.linear_model.LinearRegression",
"numpy.split",
"sklearn.metrics.mean_squared_error"
] | [((966, 1032), 'numpy.loadtxt', 'np.loadtxt', (["(path + 'NotSeed' + dataSetName + '_csv')"], {'delimiter': '""","""'}), "(path + 'NotSeed' + dataSetName + '_csv', delimiter=',')\n", (976, 1032), True, 'import numpy as np\n'), ((1319, 1355), 'numpy.split', 'np.split', (['dataset_train', '[1]'], {'axis': '(1)'}), '(dataset_train, [1], axis=1)\n', (1327, 1355), True, 'import numpy as np\n'), ((1401, 1436), 'numpy.split', 'np.split', (['dataset_test', '[1]'], {'axis': '(1)'}), '(dataset_test, [1], axis=1)\n', (1409, 1436), True, 'import numpy as np\n'), ((1784, 1815), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1813, 1815), False, 'from sklearn import datasets, linear_model\n'), ((2849, 2915), 'numpy.loadtxt', 'np.loadtxt', (["(path + 'NotSeed' + dataSetName + '_csv')"], {'delimiter': '""","""'}), "(path + 'NotSeed' + dataSetName + '_csv', delimiter=',')\n", (2859, 2915), True, 'import numpy as np\n'), ((2963, 2999), 'numpy.split', 'np.split', (['dataset_train', '[1]'], {'axis': '(1)'}), '(dataset_train, [1], axis=1)\n', (2971, 2999), True, 'import numpy as np\n'), ((3045, 3080), 'numpy.split', 'np.split', (['dataset_test', '[1]'], {'axis': '(1)'}), '(dataset_test, [1], axis=1)\n', (3053, 3080), True, 'import numpy as np\n'), ((3428, 3459), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (3457, 3459), False, 'from sklearn import datasets, linear_model\n'), ((2490, 2540), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dataset_y_test', 'dataset_y_pred'], {}), '(dataset_y_test, dataset_y_pred)\n', (2508, 2540), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((4133, 4183), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dataset_y_test', 'dataset_y_pred'], {}), '(dataset_y_test, dataset_y_pred)\n', (4151, 4183), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((2396, 2446), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dataset_y_test', 'dataset_y_pred'], {}), '(dataset_y_test, dataset_y_pred)\n', (2414, 2446), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((4039, 4089), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['dataset_y_test', 'dataset_y_pred'], {}), '(dataset_y_test, dataset_y_pred)\n', (4057, 4089), False, 'from sklearn.metrics import mean_squared_error, r2_score\n')] |
import os.path
import sys
import urllib.parse
import lxml.etree
import healpy as hp
import numpy as np
from mocpy import MOC, WCS
from math import log
import matplotlib
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import astropy
from astropy.utils.data import download_file
from astropy import units as u
from astropy.time import Time, TimeDelta, TimezoneInfo
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, Angle
from astropy.table import Table
from astropy.samp import SAMPIntegratedClient
from datetime import datetime
def moc_confidence_region(prob, id_object, percentage):
"""It returns a confidence region that encloses a given percentage of the localization probability
using the Multi Order Coverage map (MOC) method. The sky localization area (in square degrees)
is also provided for the confidence region.
Parameters
----------
infile : `str`
the HEALPix fits file name, the local file or the link location
percentage : `float`
the decimal percentage of the location probability (from 0 to 1)
Returns
-------
A local file in which the MOC is serialized in "fits" format. The file is named :
"moc_"+'%.1f' % percentage+'_'+skymap
----------------------------------------------------
"moc_" :`str, default`
default string as file prefix
'%.1f' % percentage+' : `str, default`
decimal percentage passed to the function
skymap : `str, default`
string after the last slash and parsing for "."
from infile parameter
----------------------------------------------------
area_sq2 : `float, default`
the area of the moc confidence region in square degrees.
"""
# reading skymap
#prob = hp.read_map(infile, verbose = False)
npix = len(prob)
nside = hp.npix2nside(npix) #healpix resolution
cumsum = np.sort(prob)[::-1].cumsum() # sort and cumulative sum
# finding the minimum credible region
how_many_ipixs, cut_percentage = min(enumerate(cumsum),
key = lambda x: abs(x[1] - percentage))
del(cumsum)
#print ('number of ipixs',how_many_ipixs,'; cut@', round(cut_percentage,3))
indices = range(0, len(prob))
prob_indices = np.c_[prob, indices]
sort = prob_indices[prob_indices[:, 0].argsort()[::-1]]
ipixs = sort[0:how_many_ipixs+1, [1]].astype(int)
# from index to polar coordinates
theta, phi = hp.pix2ang(nside, ipixs)
# converting these to right ascension and declination in degrees
ra = np.rad2deg(phi)
dec = np.rad2deg(0.5 * np.pi - theta)
# creating an astropy.table with RA[deg] and DEC[deg]
contour_ipix = Table([ra, dec], names = ('RA', 'DEC'), meta={'name': 'first table'})
order = int(log(nside, 2))
# moc from table
moc = MOC.from_lonlat(contour_ipix['RA'].T* u.deg,
contour_ipix['DEC'].T* u.deg, order)
# getting skymap name
skymap = id_object.rsplit('/', 1)[-1].rsplit('.')[0]
moc_name = "moc_"+'%.1f' % percentage+"_"+skymap
# return moc region in fits format
moc.write(moc_name, format = "fits",)
# square degrees in a whole sphere
from math import pi
square_degrees_sphere = (360.0**2)/pi
moc = MOC.from_fits(moc_name)
# printing sky area at the given percentage
area_sq2 = round( ( moc.sky_fraction * square_degrees_sphere ), 1 )
print ('The '+str((percentage*100))+'% of '+moc_name+' is ', area_sq2,'sq. deg.')
import gcn
import healpy as hp
# Function to call every time a GCN is received.
# Run only for notices of type
# LVC_PRELIMINARY, LVC_INITIAL, or LVC_UPDATE.
@gcn.handlers.include_notice_types(
gcn.notice_types.LVC_PRELIMINARY,
gcn.notice_types.LVC_INITIAL,
gcn.notice_types.LVC_UPDATE,
gcn.notice_types.LVC_RETRACTION)
def process_gcn(payload, root):
# Respond only to 'test' events.
# VERY IMPORTANT! Replace with the following code
# to respond to only real 'observation' events.
# if root.attrib['role'] != 'observation':
# return
if root.attrib['role'] != 'test':
return
# Read all of the VOEvent parameters from the "What" section.
params = {elem.attrib['name']:
elem.attrib['value']
for elem in root.iterfind('.//Param')}
# Respond only to 'CBC' events. Change 'CBC' to "Burst'
# to respond to only unmodeled burst events.
if params['Group'] != 'CBC':
return
# Print all parameters.
for key, value in params.items():
print(key, '=', value)
if 'skymap_fits' in params:
# Read the HEALPix sky map and the FITS header.
prob, header = hp.read_map(params['skymap_fits'],
h=True, verbose=False)
header = dict(header)
# Print some values from the FITS header.
print('Distance =', header['DISTMEAN'], '+/-', header['DISTSTD'])
id_object = header['OBJECT'] # gracedbid
# moc creation reading prob from healpix
moc_confidence_region(prob, id_object, percentage=0.9)
# query galaxy
# filtering galaxy by distance
# save in astropy table
# francesco
# Listen for GCNs until the program is interrupted
# (killed or interrupted with control-C).
gcn.listen(handler=process_gcn)
# offline
#payload = open('MS181101ab-1-Preliminary.xml', 'rb').read()
#root = lxml.etree.fromstring(payload)
#process_gcn(payload, root)
| [
"healpy.read_map",
"gcn.listen",
"mocpy.MOC.from_fits",
"astropy.table.Table",
"mocpy.MOC.from_lonlat",
"numpy.sort",
"math.log",
"healpy.pix2ang",
"gcn.handlers.include_notice_types",
"healpy.npix2nside",
"numpy.rad2deg"
] | [((3785, 3954), 'gcn.handlers.include_notice_types', 'gcn.handlers.include_notice_types', (['gcn.notice_types.LVC_PRELIMINARY', 'gcn.notice_types.LVC_INITIAL', 'gcn.notice_types.LVC_UPDATE', 'gcn.notice_types.LVC_RETRACTION'], {}), '(gcn.notice_types.LVC_PRELIMINARY, gcn.\n notice_types.LVC_INITIAL, gcn.notice_types.LVC_UPDATE, gcn.notice_types\n .LVC_RETRACTION)\n', (3818, 3954), False, 'import gcn\n'), ((5412, 5443), 'gcn.listen', 'gcn.listen', ([], {'handler': 'process_gcn'}), '(handler=process_gcn)\n', (5422, 5443), False, 'import gcn\n'), ((1891, 1910), 'healpy.npix2nside', 'hp.npix2nside', (['npix'], {}), '(npix)\n', (1904, 1910), True, 'import healpy as hp\n'), ((2548, 2572), 'healpy.pix2ang', 'hp.pix2ang', (['nside', 'ipixs'], {}), '(nside, ipixs)\n', (2558, 2572), True, 'import healpy as hp\n'), ((2652, 2667), 'numpy.rad2deg', 'np.rad2deg', (['phi'], {}), '(phi)\n', (2662, 2667), True, 'import numpy as np\n'), ((2678, 2709), 'numpy.rad2deg', 'np.rad2deg', (['(0.5 * np.pi - theta)'], {}), '(0.5 * np.pi - theta)\n', (2688, 2709), True, 'import numpy as np\n'), ((2796, 2863), 'astropy.table.Table', 'Table', (['[ra, dec]'], {'names': "('RA', 'DEC')", 'meta': "{'name': 'first table'}"}), "([ra, dec], names=('RA', 'DEC'), meta={'name': 'first table'})\n", (2801, 2863), False, 'from astropy.table import Table\n'), ((2934, 3021), 'mocpy.MOC.from_lonlat', 'MOC.from_lonlat', (["(contour_ipix['RA'].T * u.deg)", "(contour_ipix['DEC'].T * u.deg)", 'order'], {}), "(contour_ipix['RA'].T * u.deg, contour_ipix['DEC'].T * u.deg,\n order)\n", (2949, 3021), False, 'from mocpy import MOC, WCS\n'), ((3389, 3412), 'mocpy.MOC.from_fits', 'MOC.from_fits', (['moc_name'], {}), '(moc_name)\n', (3402, 3412), False, 'from mocpy import MOC, WCS\n'), ((2883, 2896), 'math.log', 'log', (['nside', '(2)'], {}), '(nside, 2)\n', (2886, 2896), False, 'from math import log\n'), ((4811, 4868), 'healpy.read_map', 'hp.read_map', (["params['skymap_fits']"], {'h': '(True)', 'verbose': '(False)'}), "(params['skymap_fits'], h=True, verbose=False)\n", (4822, 4868), True, 'import healpy as hp\n'), ((1953, 1966), 'numpy.sort', 'np.sort', (['prob'], {}), '(prob)\n', (1960, 1966), True, 'import numpy as np\n')] |
from typing import Sequence, Union, Tuple
import math
import numpy as np
from numpy import ndarray
from numpy.lib.stride_tricks import as_strided, broadcast_to
__all__ = [
'isscalar',
'isvector',
'ismatrix',
'repeat',
'dilate_map_2d',
'compute_window_slide_size_2d',
'window_slide_2d',
'compute_padding_size_2d',
'apply_padding_2d',
'strip_padding_2d',
]
def isscalar(a: ndarray) -> bool:
if len(a.shape) > 0:
return max(a.shape) == 1
return True
def isvector(a: ndarray):
if a.ndim == 2:
return min(a.shape) == 1
else:
return a.ndim == 1
def ismatrix(a: ndarray):
return a.ndim == 2
def repeat(a: ndarray, axis: int, repeats: int, copy: bool = True) -> ndarray:
if copy:
output = np.repeat(a, repeats, axis)
return output
new_shape = list(a.shape)
new_shape[axis] = repeats
output = broadcast_to(a, new_shape)
return output
def dilate_map_2d(
xsize: int,
ysize: int,
xstride: int,
ystride: int,
dtype: Union[type, np.dtype]
) -> Tuple[ndarray, ndarray]:
ny, nx = ysize, xsize
mx = xstride * (nx - 1) + 1
x = (np.arange(0, mx, 1, dtype=np.int64) % xstride == 0)
my = ystride * (ny - 1) + 1
y = (np.arange(0, my, 1, dtype=np.int64) % ystride == 0)
yv, xv = np.meshgrid(y, x)
yx_indices = (yv & xv)
yx = np.zeros((my, mx), dtype)
return yx, yx_indices
def compute_window_slide_size_2d(
input_size: Tuple[int, int],
window_size: Union[int, Tuple[int, int]],
strides: Union[int, Tuple[int, int]],
mode: str
) -> Tuple[int, int]:
h, w = input_size[:2]
kh, kw = (window_size, window_size) if isinstance(window_size, int) else window_size
sh, sw = (strides, strides) if isinstance(strides, int) else strides
mode = mode.lower()
if mode == 'valid':
output_size_h = int((h - kh) / sh + 1)
output_size_w = int((w - kw) / sw + 1)
elif mode == 'same':
output_size_h, output_size_w = h, w
elif mode == 'full':
full_size_h = 2 * (kh - 1) + h
full_size_w = 2 * (kw - 1) + w
output_size_h = int((full_size_h - kh) / sh + 1)
output_size_w = int((full_size_w - kw) / sw + 1)
else:
raise ValueError('Convolution mode not found.')
return output_size_h, output_size_w
def window_slide_2d(a: ndarray, size: Union[int, Sequence[int]], strides: Union[int, Sequence[int]] = (1, 1), readonly: bool = True) -> ndarray:
ysize, xsize = a.shape[:2] # Input array 2-D size.
ywinsize, xwinsize = (size, size) if isinstance(size, int) else size[:2]
ystride, xstride = (strides, strides) if isinstance(strides, int) else strides[:2]
youtsize, xoutsize = compute_window_slide_size_2d((ysize, xsize), (ywinsize, xwinsize), (ystride, xstride), 'valid')
assert youtsize > 0 and xoutsize > 0, 'Output dimensions must be greater than zero.'
output_shape = (youtsize, xoutsize, ywinsize, xwinsize, *(a.shape[2:]))
input_strides = a.strides
output_strides = (input_strides[0] * ystride, input_strides[1] * xstride, *input_strides)
output = as_strided(a, output_shape, output_strides, writeable=(not readonly))
return output
def compute_padding_size_2d(
size: Tuple[int, int],
ksize: Union[int, Tuple[int, int]],
strides: Union[int, Tuple[int, int]],
mode: str
) -> Tuple[float, float]:
ysize, xsize = size
yksize, xksize = (ksize, ksize) if isinstance(ksize, int) else ksize[:2]
ystride, xstride = (strides, strides) if isinstance(strides, int) else strides[:2]
if mode == 'valid':
return 0., 0.
elif mode == 'same':
x_pad_size = (xstride * (xsize - 1) - xsize + xksize) * .5
y_pad_size = (ystride * (ysize - 1) - ysize + yksize) * .5
return y_pad_size, x_pad_size
elif mode == 'full':
x_pad_size = (xstride * (math.floor((xsize + xksize - 2) / xstride + 1) - 1) + xksize - xsize) * .5
y_pad_size = (ystride * (math.floor((ysize + yksize - 2) / ystride + 1) - 1) + yksize - ysize) * .5
return y_pad_size, x_pad_size
else:
raise ValueError(
'No such padding mode is supported. Available '
'modes are: valid, same, full (case insensitive).')
def apply_padding_2d(
a: ndarray,
mode: str,
ksize: Union[int, Tuple[int, int]] = None,
strides: Union[int, Tuple[int, int]] = None,
padding: Union[float, Tuple[float, float]] = None
) -> ndarray:
"""
Apply 2-D padding.
Padding size is calculated in respect to 'valid' window sliding mode.
"""
ysize, xsize = a.shape[:2]
if padding is None:
y_pad_size, x_pad_size = compute_padding_size_2d((ysize, xsize), ksize, strides, mode)
else:
y_pad_size, x_pad_size = (padding, padding) if isinstance(padding, int) else padding[:2]
padded = np.zeros((ysize + int(2 * y_pad_size), xsize + int(2 * x_pad_size), *a.shape[2:]), a.dtype)
if mode == 'valid':
return a
elif mode == 'same':
y_pad_size_floor = math.floor(y_pad_size)
x_pad_size_floor = math.floor(x_pad_size)
padded[y_pad_size_floor:y_pad_size_floor + ysize, x_pad_size_floor:x_pad_size_floor + xsize, ...] = a
return padded
elif mode == 'full':
y_pad_size_ceil = math.ceil(y_pad_size)
x_pad_size_ceil = math.ceil(x_pad_size)
padded[y_pad_size_ceil:y_pad_size_ceil + ysize, x_pad_size_ceil:x_pad_size_ceil + xsize, ...] = a
return padded
else:
raise ValueError(
'No such padding mode is supported. Available '
'modes are: valid, same, full (case insensitive).')
def strip_padding_2d(a: ndarray, padding_size: Union[float, Tuple[float, float]], mode: str) -> ndarray:
"""
Strip 2-D padding from padded array.
Padding size is calculated in respect to 'valid' window sliding mode.
"""
ysize, xsize = a.shape[:2]
y_pad_size, x_pad_size = (padding_size, padding_size) if isinstance(padding_size, int) else padding_size[:2]
ystripedsize = int(-y_pad_size * 2 + ysize)
xstripedsize = int(-x_pad_size * 2 + xsize)
if mode == 'valid':
return a
elif mode == 'same':
x_pad_size_floor = math.floor(x_pad_size)
y_pad_size_floor = math.floor(y_pad_size)
striped = a[
y_pad_size_floor:y_pad_size_floor + ystripedsize,
x_pad_size_floor:x_pad_size_floor + xstripedsize,
...
]
return striped
elif mode == 'full':
x_pad_size_ceil = math.ceil(x_pad_size)
y_pad_size_ceil = math.ceil(y_pad_size)
striped = a[
y_pad_size_ceil:y_pad_size_ceil + ystripedsize,
x_pad_size_ceil:x_pad_size_ceil + xstripedsize,
...
]
return striped
else:
raise ValueError(
'No such padding mode is supported. Available '
'modes are: valid, same, full (case insensitive).')
# def convolution2d(src: ndarray, filter: ndarray, xstride: int, ystride: int, mode: str = 'full'):
# if src.ndim != 3:
# raise ValueError('Source array must be 3D with shape (Height, Width, Depth).')
# elif filter.ndim != 2 and (filter.ndim == 3 and filter.shape[2] != src.shape[2]):
# raise ValueError('Filter array must be 2-D of shape (Height, Width) or 3-D with shape '
# '(Height, Width, Depth). In such case filter depth dimension must '
# 'match that of the source array.')
#
# mode = mode.lower()
# if mode not in {'full', 'same', 'valid'}:
# raise AssertionError('No such padding mode is available. Currently available '
# 'padding modes are: full | valid | same.')
#
# h, w, d = src.shape[:3]
# kernel_height, kernel_width = filter.shape[:2]
#
# if mode == 'same':
# x_padded_size = xstride * (w - 1) + kernel_width
# y_padded_size = ystride * (h - 1) + kernel_height
#
# x_pad_size = int((x_padded_size - w) * .5)
# y_pad_size = int((y_padded_size - h) * .5)
#
# pad = np.zeros((y_padded_size, x_padded_size, d), src.dtype)
# pad[y_pad_size:h + y_pad_size, x_pad_size:w + x_pad_size, :] = src
# src = pad
#
# conv_out = convolve2d(src[:, :, 0], (filter[:, :, 0] if filter.ndim > 2 else filter), mode='full')
# out = np.empty(list(conv_out.shape)+[d], conv_out.dtype)
# out[:, :, 0] = conv_out
#
# if d > 1:
# for i in range(1, d):
# conv_out = convolve2d(src[:, :, i], (filter[:, :, i] if filter.ndim > 2 else filter), mode='full')
# out[:, :, i] = conv_out
#
# if filter.ndim > 2:
# out = np.expand_dims(np.sum(out, 2), 2)
#
# if mode == 'full':
# out = out[::ystride, ::xstride, :]
# elif mode == 'valid' or mode == 'same':
# if np.prod(out.shape) != 1:
# out = out[
# kernel_height - 1:-kernel_height + 1:ystride,
# kernel_width - 1:-kernel_width + 1:xstride,
# ]
#
# return out
| [
"numpy.lib.stride_tricks.broadcast_to",
"numpy.repeat",
"math.ceil",
"math.floor",
"numpy.lib.stride_tricks.as_strided",
"numpy.zeros",
"numpy.meshgrid",
"numpy.arange"
] | [((912, 938), 'numpy.lib.stride_tricks.broadcast_to', 'broadcast_to', (['a', 'new_shape'], {}), '(a, new_shape)\n', (924, 938), False, 'from numpy.lib.stride_tricks import as_strided, broadcast_to\n'), ((1357, 1374), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (1368, 1374), True, 'import numpy as np\n'), ((1411, 1436), 'numpy.zeros', 'np.zeros', (['(my, mx)', 'dtype'], {}), '((my, mx), dtype)\n', (1419, 1436), True, 'import numpy as np\n'), ((3190, 3257), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['a', 'output_shape', 'output_strides'], {'writeable': '(not readonly)'}), '(a, output_shape, output_strides, writeable=not readonly)\n', (3200, 3257), False, 'from numpy.lib.stride_tricks import as_strided, broadcast_to\n'), ((788, 815), 'numpy.repeat', 'np.repeat', (['a', 'repeats', 'axis'], {}), '(a, repeats, axis)\n', (797, 815), True, 'import numpy as np\n'), ((1197, 1232), 'numpy.arange', 'np.arange', (['(0)', 'mx', '(1)'], {'dtype': 'np.int64'}), '(0, mx, 1, dtype=np.int64)\n', (1206, 1232), True, 'import numpy as np\n'), ((1291, 1326), 'numpy.arange', 'np.arange', (['(0)', 'my', '(1)'], {'dtype': 'np.int64'}), '(0, my, 1, dtype=np.int64)\n', (1300, 1326), True, 'import numpy as np\n'), ((5162, 5184), 'math.floor', 'math.floor', (['y_pad_size'], {}), '(y_pad_size)\n', (5172, 5184), False, 'import math\n'), ((5212, 5234), 'math.floor', 'math.floor', (['x_pad_size'], {}), '(x_pad_size)\n', (5222, 5234), False, 'import math\n'), ((6356, 6378), 'math.floor', 'math.floor', (['x_pad_size'], {}), '(x_pad_size)\n', (6366, 6378), False, 'import math\n'), ((6406, 6428), 'math.floor', 'math.floor', (['y_pad_size'], {}), '(y_pad_size)\n', (6416, 6428), False, 'import math\n'), ((5420, 5441), 'math.ceil', 'math.ceil', (['y_pad_size'], {}), '(y_pad_size)\n', (5429, 5441), False, 'import math\n'), ((5468, 5489), 'math.ceil', 'math.ceil', (['x_pad_size'], {}), '(x_pad_size)\n', (5477, 5489), False, 'import math\n'), ((6676, 6697), 'math.ceil', 'math.ceil', (['x_pad_size'], {}), '(x_pad_size)\n', (6685, 6697), False, 'import math\n'), ((6724, 6745), 'math.ceil', 'math.ceil', (['y_pad_size'], {}), '(y_pad_size)\n', (6733, 6745), False, 'import math\n'), ((3967, 4013), 'math.floor', 'math.floor', (['((xsize + xksize - 2) / xstride + 1)'], {}), '((xsize + xksize - 2) / xstride + 1)\n', (3977, 4013), False, 'import math\n'), ((4075, 4121), 'math.floor', 'math.floor', (['((ysize + yksize - 2) / ystride + 1)'], {}), '((ysize + yksize - 2) / ystride + 1)\n', (4085, 4121), False, 'import math\n')] |
import numpy as np
import onnxruntime
import pandas as pd
import torch
from pathlib import PosixPath
from pickle import load
from sklearn.preprocessing import MinMaxScaler
from make_us_rich.pipelines.preprocessing import extract_features_from_dataset
from make_us_rich.pipelines.converting import to_numpy
class OnnxModel:
def __init__(self, model_path: PosixPath, scaler_path: PosixPath):
self.model_path = model_path
self.scaler_path = scaler_path
self.model_name = self.model_path.parent.parts[-1]
self.model = onnxruntime.InferenceSession(str(model_path))
self.scaler = self._load_scaler()
self.descaler = self._create_descaler()
def __repr__(self) -> str:
return f"<OnnxModel: {self.model_name}>"
def predict(self, sample: pd.DataFrame) -> float:
"""
Predicts the close price based on the input sample.
Parameters
----------
sample: pd.DataFrame
Input sample.
Returns
-------
float
Predicted close price.
"""
X = self._preprocessing_sample(sample)
inputs = {self.model.get_inputs()[0].name: to_numpy(X)}
results = self.model.run(None, inputs)[0][0]
return self._descaling_sample(results)
def _create_descaler(self) -> MinMaxScaler:
"""
Creates a descaler.
Returns
-------
MinMaxScaler
"""
descaler = MinMaxScaler()
descaler.min_, descaler.scale_ = self.scaler.min_[-1], self.scaler.scale_[-1]
return descaler
def _descaling_sample(self, sample) -> None:
"""
Descalings the sample.
Parameters
----------
sample: numpy.ndarray
Sample to be descaled.
Returns
-------
float
Descaled sample.
"""
values_2d = np.array(sample)[:, np.newaxis]
return self.descaler.inverse_transform(values_2d).flatten()
def _load_scaler(self) -> MinMaxScaler:
"""
Loads the scaler from the model files.
Returns
-------
MinMaxScaler
"""
with open(self.scaler_path, "rb") as file:
return load(file)
def _preprocessing_sample(self, sample: pd.DataFrame) -> torch.tensor:
"""
Preprocesses the input sample.
Parameters
----------
sample: pd.DataFrame
Input sample.
Returns
-------
torch.tensor
Preprocessed sample.
"""
data = extract_features_from_dataset(sample)
scaled_data = pd.DataFrame(
self.scaler.transform(data), index=data.index, columns=data.columns
)
return torch.Tensor(scaled_data.values).unsqueeze(0)
| [
"make_us_rich.pipelines.preprocessing.extract_features_from_dataset",
"pickle.load",
"torch.Tensor",
"make_us_rich.pipelines.converting.to_numpy",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
] | [((1483, 1497), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1495, 1497), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2626, 2663), 'make_us_rich.pipelines.preprocessing.extract_features_from_dataset', 'extract_features_from_dataset', (['sample'], {}), '(sample)\n', (2655, 2663), False, 'from make_us_rich.pipelines.preprocessing import extract_features_from_dataset\n'), ((1195, 1206), 'make_us_rich.pipelines.converting.to_numpy', 'to_numpy', (['X'], {}), '(X)\n', (1203, 1206), False, 'from make_us_rich.pipelines.converting import to_numpy\n'), ((1926, 1942), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (1934, 1942), True, 'import numpy as np\n'), ((2267, 2277), 'pickle.load', 'load', (['file'], {}), '(file)\n', (2271, 2277), False, 'from pickle import load\n'), ((2805, 2837), 'torch.Tensor', 'torch.Tensor', (['scaled_data.values'], {}), '(scaled_data.values)\n', (2817, 2837), False, 'import torch\n')] |
import cv2
import numpy as np
global prevgray
prevgray = cv2.cvtColor(cv2.VideoCapture(0).read()[1], cv2.COLOR_BGR2GRAY)
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1) in lines:
try:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
except :
print('err')
return vis
def filtering(img):
img = cv2.GaussianBlur(img, (3, 3), 0)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Filtered Image', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
return img
def edges(img):
img = cv2.Canny(img, 2500, 1500, apertureSize=5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Edge Detection', (10, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
return img
def features(img):
orb = cv2.ORB_create()
kp = orb.detect(img, None)
kp, des = orb.compute(img, kp)
# draw only keypoints location,not size and orientation
img1 = cv2.drawKeypoints(img, kp, img ,color=(0, 255, 0), flags=0)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Features', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
return img1
def optflow(img):
global prevgray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(gray, 'Optical Flow', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
return draw_flow(gray, flow)
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
ctr = 0
while True:
_ , img = cap.read()
if ctr == 0:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Input Image', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Output', img)
elif ctr == 1:
cv2.imshow('Output', filtering(img))
elif ctr == 2:
cv2.imshow('Output', edges(img))
elif ctr == 3:
cv2.imshow('Output', features(img))
elif ctr == 4:
cv2.imshow('Output', optflow(img))
ch = cv2.waitKey(1)
# print (ch)
if ch == 27: # escape key
break
elif ch == 83: # right arrow key
ctr = ctr + 1
if ctr>4:
ctr = 0
elif ch == 81: # left arrow key
ctr = ctr - 1
if ctr<0:
ctr = 4
cv2.destroyAllWindows()
| [
"cv2.drawKeypoints",
"cv2.polylines",
"cv2.Canny",
"numpy.int32",
"cv2.putText",
"cv2.imshow",
"cv2.circle",
"cv2.ORB_create",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.vstack",
"cv2.calcOpticalFlowFarneback",
"cv2.GaussianBlur",
"cv2.waitKey"
] | [((369, 390), 'numpy.int32', 'np.int32', (['(lines + 0.5)'], {}), '(lines + 0.5)\n', (377, 390), True, 'import numpy as np\n'), ((401, 438), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (413, 438), False, 'import cv2\n'), ((443, 484), 'cv2.polylines', 'cv2.polylines', (['vis', 'lines', '(0)', '(0, 255, 0)'], {}), '(vis, lines, 0, (0, 255, 0))\n', (456, 484), False, 'import cv2\n'), ((672, 704), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (688, 704), False, 'import cv2\n'), ((745, 830), 'cv2.putText', 'cv2.putText', (['img', '"""Filtered Image"""', '(10, 50)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(img, 'Filtered Image', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA\n )\n", (756, 830), False, 'import cv2\n'), ((869, 911), 'cv2.Canny', 'cv2.Canny', (['img', '(2500)', '(1500)'], {'apertureSize': '(5)'}), '(img, 2500, 1500, apertureSize=5)\n', (878, 911), False, 'import cv2\n'), ((952, 1042), 'cv2.putText', 'cv2.putText', (['img', '"""Edge Detection"""', '(10, 50)', 'font', '(1)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), "(img, 'Edge Detection', (10, 50), font, 1, (255, 255, 255), 2,\n cv2.LINE_AA)\n", (963, 1042), False, 'import cv2\n'), ((1085, 1101), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (1099, 1101), False, 'import cv2\n'), ((1240, 1299), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img', 'kp', 'img'], {'color': '(0, 255, 0)', 'flags': '(0)'}), '(img, kp, img, color=(0, 255, 0), flags=0)\n', (1257, 1299), False, 'import cv2\n'), ((1340, 1414), 'cv2.putText', 'cv2.putText', (['img', '"""Features"""', '(10, 50)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(img, 'Features', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n", (1351, 1414), False, 'import cv2\n'), ((1482, 1519), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1494, 1519), False, 'import cv2\n'), ((1531, 1607), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prevgray', 'gray', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n', (1559, 1607), False, 'import cv2\n'), ((1668, 1747), 'cv2.putText', 'cv2.putText', (['gray', '"""Optical Flow"""', '(10, 50)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(gray, 'Optical Flow', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n", (1679, 1747), False, 'import cv2\n'), ((1820, 1839), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1836, 1839), False, 'import cv2\n'), ((2707, 2730), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2728, 2730), False, 'import cv2\n'), ((2385, 2399), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2396, 2399), False, 'import cv2\n'), ((537, 582), 'cv2.circle', 'cv2.circle', (['vis', '(x1, y1)', '(1)', '(0, 255, 0)', '(-1)'], {}), '(vis, (x1, y1), 1, (0, 255, 0), -1)\n', (547, 582), False, 'import cv2\n'), ((1974, 2051), 'cv2.putText', 'cv2.putText', (['img', '"""Input Image"""', '(10, 50)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(img, 'Input Image', (10, 50), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n", (1985, 2051), False, 'import cv2\n'), ((2064, 2089), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img'], {}), "('Output', img)\n", (2074, 2089), False, 'import cv2\n'), ((71, 90), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (87, 90), False, 'import cv2\n'), ((303, 336), 'numpy.vstack', 'np.vstack', (['[x, y, x + fx, y + fy]'], {}), '([x, y, x + fx, y + fy])\n', (312, 336), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
class Acid:
def __init__(self, pk=True, *args):
self.K = np.array(args)
if pk:
self.K = 10 ** -self.K
def delta(self, c_H):
pass
| [
"numpy.array"
] | [((114, 128), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (122, 128), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
n,w=np.genfromtxt("Messdaten/c_1.txt",unpack=True)
n=n+1
theta=(n*np.pi)/14
kreisfrequenz=w*2*np.pi
phase=kreisfrequenz/theta
L=1.217*1/10**4
C=20.13*1/10**9
def theorie(f):
return ((2*np.pi*f)/(np.arccos(1-((1/2)*L*C*(2*np.pi*f)**2))))
ascii.write([n, theta,w,w*2*np.pi,phase], 'Messdaten/tab_c.tex', format="latex",
names=['n','theta','frequenz','kreis','Phase'])
plt.plot(w, phase, 'rx', label="Messwerte")
plt.plot(w, theorie(w), 'b-', label="Theoriekurve")
plt.ylabel(r'Phasengeschwindigkeit')
plt.xlabel(r'Frequenz')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('Bilder/Phasengeschwindigkeit.pdf')
| [
"matplotlib.pyplot.savefig",
"astropy.io.ascii.write",
"numpy.arccos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tight_layout",
"numpy.genfromtxt",
"matplotlib.pyplot.legend"
] | [((189, 236), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Messdaten/c_1.txt"""'], {'unpack': '(True)'}), "('Messdaten/c_1.txt', unpack=True)\n", (202, 236), True, 'import numpy as np\n'), ((427, 570), 'astropy.io.ascii.write', 'ascii.write', (['[n, theta, w, w * 2 * np.pi, phase]', '"""Messdaten/tab_c.tex"""'], {'format': '"""latex"""', 'names': "['n', 'theta', 'frequenz', 'kreis', 'Phase']"}), "([n, theta, w, w * 2 * np.pi, phase], 'Messdaten/tab_c.tex',\n format='latex', names=['n', 'theta', 'frequenz', 'kreis', 'Phase'])\n", (438, 570), False, 'from astropy.io import ascii\n'), ((568, 611), 'matplotlib.pyplot.plot', 'plt.plot', (['w', 'phase', '"""rx"""'], {'label': '"""Messwerte"""'}), "(w, phase, 'rx', label='Messwerte')\n", (576, 611), True, 'import matplotlib.pyplot as plt\n'), ((665, 700), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phasengeschwindigkeit"""'], {}), "('Phasengeschwindigkeit')\n", (675, 700), True, 'import matplotlib.pyplot as plt\n'), ((702, 724), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequenz"""'], {}), "('Frequenz')\n", (712, 724), True, 'import matplotlib.pyplot as plt\n'), ((726, 748), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (736, 748), True, 'import matplotlib.pyplot as plt\n'), ((749, 767), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (765, 767), True, 'import matplotlib.pyplot as plt\n'), ((768, 815), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Bilder/Phasengeschwindigkeit.pdf"""'], {}), "('Bilder/Phasengeschwindigkeit.pdf')\n", (779, 815), True, 'import matplotlib.pyplot as plt\n'), ((384, 435), 'numpy.arccos', 'np.arccos', (['(1 - 1 / 2 * L * C * (2 * np.pi * f) ** 2)'], {}), '(1 - 1 / 2 * L * C * (2 * np.pi * f) ** 2)\n', (393, 435), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
def getAddress(name):
return{
'Manuel':'499774491',
'Mario':'499841834',
'Stev': '649998996',
}[name]
def Floyd_Warshall():
G1 = nx.read_graphml("PUCP.graphml")
#Se implementará el algoritmo de floyd watshall
#para esta matriz haciendo el uso de la librería
#numpy para poder manejar mejor los arreglos
N = nx.number_of_nodes(G1)
#crearemos 2 matrices NxN,una para las distancias y otra
#para el camino a seguir
distances = np.zeros((N,N))
roads = np.zeros((N,N))
#para poder trabajar con la matriz, cada nodo deberá tener
#un identificador que está en [0..N-1]
#para esto se creará una lista
nodes = list(G1.nodes)
#ahora se creará un diccionario para saber
#qué número de identificador tiene cada nodo
keys = dict()
for i in range(N):
keys[nodes[i]]=i
#ahora llenaremos la matriz de distancias con los datos de G
for i in range(N):
for j in range(N):
try:
distance=float(G1[nodes[i]][nodes[j]][0]['length'])
distances[i][j]=distance
roads[i][j]=i
except KeyError:
distances[i][j]=2000#un valor muy grande, para
#representar inf en el algoritmo
roads[i][j]=-1
#ahora se deberá modificar ambas matrices N veces para
#llegar a la solución final
for k in range(N):
for i in range(N):
for j in range(N):
if(distances[i][j]>distances[i][k]+distances[k][j]):
distances[i][j]=distances[i][k]+distances[k][j]
roads[i][j]=roads[k][j]
return(roads,distances,keys,nodes)
#Ahora debemos encontrar la ruta usada por nuestro algoritmo
#Para esto definimos el Nodo PUCP y el nodo de la casa de los integrantes
def get_route(name,roads,distances,keys,nodes):
nodoPucp='1843102399'
nodoGrupo=getAddress(name)
distanciaMin=distances[keys[nodoPucp]][keys[nodoGrupo]]
caminoMin=list()
caminoMin.append(int(nodoGrupo))
caminoMin.append(int(nodoPucp))
inicio = keys[nodoGrupo]
fin = keys[nodoPucp]
while True:
punto = int(roads[inicio][fin])
if punto == inicio:
break
caminoMin.insert(1,int(nodes[punto]))
fin = punto
return(distanciaMin,caminoMin)
| [
"numpy.zeros",
"networkx.number_of_nodes",
"networkx.read_graphml"
] | [((181, 212), 'networkx.read_graphml', 'nx.read_graphml', (['"""PUCP.graphml"""'], {}), "('PUCP.graphml')\n", (196, 212), True, 'import networkx as nx\n'), ((363, 385), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['G1'], {}), '(G1)\n', (381, 385), True, 'import networkx as nx\n'), ((483, 499), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (491, 499), True, 'import numpy as np\n'), ((508, 524), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (516, 524), True, 'import numpy as np\n')] |
import os
import sys
import random
import warnings
import math
import numpy as np
import pylab
import scipy.ndimage as ndi
from concurrent.futures import ThreadPoolExecutor
import PIL
from PIL import Image, ImageDraw
from tqdm import tqdm
def autoinvert(image):
assert np.amin(image) >= 0
assert np.amax(image) <= 1
if np.sum(image > 0.9) > np.sum(image < 0.1):
return 1 - image
else:
return image
def zerooneimshow(img):
img = (img * 255).astype(np.uint8)
Image.fromarray(img).show()
return
#
# random geometric transformations
#
def random_transform(translation=(-0.05, 0.05), rotation=(-2, 2), scale=(-0.1, 0.1), aniso=(-0.1, 0.1)):
dx = random.uniform(*translation)
dy = random.uniform(*translation)
angle = random.uniform(*rotation)
angle = angle * np.pi / 180.0
scale = 10 ** random.uniform(*scale)
aniso = 10 ** random.uniform(*aniso)
return dict(angle=angle, scale=scale, aniso=aniso, translation=(dx, dy))
def transform_image(image, angle=0.0, scale=1.0, aniso=1.0, translation=(0, 0), order=1):
dx, dy = translation
scale = 1.0 / scale
c = np.cos(angle)
s = np.sin(angle)
sm = np.array([[scale / aniso, 0], [0, scale * aniso]], 'f')
m = np.array([[c, -s], [s, c]], 'f')
m = np.dot(sm, m)
w, h = image.shape
c = np.array([w, h]) / 2.0
d = c - np.dot(m, c) + np.array([dx * w, dy * h])
return ndi.affine_transform(image, m, offset=d, order=order, mode="nearest", output=np.dtype("f"))
#
# random distortions
#
def bounded_gaussian_noise(shape, sigma, maxdelta):
n, m = shape
deltas = pylab.rand(2, n, m)
deltas = ndi.gaussian_filter(deltas, (0, sigma, sigma))
deltas -= np.amin(deltas)
deltas /= np.amax(deltas)
deltas = (2 * deltas - 1) * maxdelta
return deltas
def distort_with_noise(image, deltas, order=1):
assert deltas.shape[0] == 2
assert image.shape == deltas.shape[1:], (image.shape, deltas.shape)
n, m = image.shape
xy = np.transpose(np.array(np.meshgrid(
range(n), range(m))), axes=[0, 2, 1])
deltas += xy
return ndi.map_coordinates(image, deltas, order=order, mode="reflect")
def noise_distort1d(shape, sigma=100.0, magnitude=100.0):
h, w = shape
noise = ndi.gaussian_filter(pylab.randn(w), sigma)
noise *= magnitude / np.amax(abs(noise))
dys = np.array([noise] * h)
deltas = np.array([dys, np.zeros((h, w))])
return deltas
#
# mass preserving blur
#
def percent_black(image):
n = np.prod(image.shape)
k = np.sum(image < 0.5)
return k * 100.0 / n
def binary_blur(image, sigma, noise=0.0):
p = percent_black(image)
blurred = ndi.gaussian_filter(image, sigma)
if noise > 0:
blurred += pylab.randn(*blurred.shape) * noise
t = np.percentile(blurred, p)
return np.array(blurred > t, 'f')
#
# multiscale noise
#
def make_noise_at_scale(shape, scale):
h, w = shape
h0, w0 = int(h / scale + 1), int(w / scale + 1)
data = pylab.rand(h0, w0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = ndi.zoom(data, scale)
return result[:h, :w]
def make_multiscale_noise(shape, scales, weights=None, limits=(0.0, 1.0)):
if weights is None:
weights = [1.0] * len(scales)
result = make_noise_at_scale(shape, scales[0]) * weights[0]
for s, w in zip(scales, weights):
result += make_noise_at_scale(shape, s) * w
lo, hi = limits
result -= np.amin(result)
result /= np.amax(result)
result *= (hi - lo)
result += lo
return result
def make_multiscale_noise_uniform(shape, srange=(1.0, 100.0), nscales=4, limits=(0.0, 1.0)):
lo, hi = np.log10(srange[0]), np.log10(srange[1])
scales = np.random.uniform(size=nscales)
scales = np.add.accumulate(scales)
scales -= np.amin(scales)
scales /= np.amax(scales)
scales *= hi - lo
scales += lo
scales = 10 ** scales
weights = 2.0 * np.random.uniform(size=nscales)
return make_multiscale_noise(shape, scales, weights=weights, limits=limits)
#
# random blobs
#
def random_blobs(shape, blobdensity, size, roughness=2.0):
from random import randint
from builtins import range # python2 compatible
h, w = shape
numblobs = int(blobdensity * w * h)
mask = np.zeros((h, w), 'i')
for i in range(numblobs):
mask[randint(0, h - 1), randint(0, w - 1)] = 1
dt = ndi.distance_transform_edt(1 - mask)
mask = np.array(dt < size, 'f')
mask = ndi.gaussian_filter(mask, size / (2 * roughness))
mask -= np.amin(mask)
mask /= np.amax(mask)
noise = pylab.rand(h, w)
noise = ndi.gaussian_filter(noise, size / (2 * roughness))
noise -= np.amin(noise)
noise /= np.amax(noise)
return np.array(mask * noise > 0.5, 'f')
def random_blotches(image, fgblobs, bgblobs, fgscale=10, bgscale=10):
fg = random_blobs(image.shape, fgblobs, fgscale)
bg = random_blobs(image.shape, bgblobs, bgscale)
return np.minimum(np.maximum(image, fg), 1 - bg)
#
# random fibers
#
def make_fiber(l, a, stepsize=0.5):
angles = np.random.standard_cauchy(l) * a
angles[0] += 2 * np.pi * pylab.rand()
angles = np.add.accumulate(angles)
coss = np.add.accumulate(np.cos(angles) * stepsize)
sins = np.add.accumulate(np.sin(angles) * stepsize)
return np.array([coss, sins]).transpose((1, 0))
def make_fibrous_image(shape, nfibers=300, l=300, a=0.2, stepsize=0.5, limits=(0.1, 1.0), blur=1.0):
h, w = shape
lo, hi = limits
result = np.zeros(shape)
for i in range(nfibers):
v = pylab.rand() * (hi - lo) + lo
fiber = make_fiber(l, a, stepsize=stepsize)
y, x = random.randint(0, h - 1), random.randint(0, w - 1)
fiber[:, 0] += y
fiber[:, 0] = np.clip(fiber[:, 0], 0, h - .1)
fiber[:, 1] += x
fiber[:, 1] = np.clip(fiber[:, 1], 0, w - .1)
for y, x in fiber:
result[int(y), int(x)] = v
result = ndi.gaussian_filter(result, blur)
result -= np.amin(result)
result /= np.amax(result)
result *= (hi - lo)
result += lo
return result
#
# print-like degradation with multiscale noise
#
def printlike_multiscale(image, blur=0.5, blotches=5e-5, paper_range=(0.8, 1.0), ink_range=(0.0, 0.2)):
selector = autoinvert(image)
# selector = random_blotches(selector, 3 * blotches, blotches)
selector = random_blotches(selector, 2 * blotches, blotches)
paper = make_multiscale_noise_uniform(image.shape, limits=paper_range)
ink = make_multiscale_noise_uniform(image.shape, limits=ink_range)
blurred = ndi.gaussian_filter(selector, blur)
printed = blurred * ink + (1 - blurred) * paper
return printed
def printlike_fibrous(image, blur=0.5, blotches=5e-5, paper_range=(0.8, 1.0), ink_range=(0.0, 0.2)):
selector = autoinvert(image)
selector = random_blotches(selector, 2 * blotches, blotches)
paper = make_multiscale_noise(image.shape, [1.0, 5.0, 10.0, 50.0], weights=[1.0, 0.3, 0.5, 0.3], limits=paper_range)
paper -= make_fibrous_image(image.shape, 300, 500, 0.01, limits=(0.0, 0.25), blur=0.5)
ink = make_multiscale_noise(image.shape, [1.0, 5.0, 10.0, 50.0], limits=ink_range)
blurred = ndi.gaussian_filter(selector, blur)
printed = blurred * ink + (1 - blurred) * paper
return printed
def add_frame(img):
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
# no_aug : up : down : left : right: left&right = 2:1:1:3:3:1
random_list = ['no_aug', 'no_aug',
'up', 'down',
'left', 'right',
'left', 'right',
'left', 'right',
'left&right']
choice = random.choice(random_list)
if choice == 'no_aug':
return img
w, h = img.size
expand_ratio = random.uniform(1.1, 1.3)
new_w = int(w * expand_ratio)
new_h = int(h * expand_ratio)
new_img = Image.new(img.mode, (new_w, new_h), 255) # 0 - black, 255 - white
draw = ImageDraw.Draw(new_img)
# up
if choice == 'up':
new_img.paste(img, ((new_w - w) // 2, new_h - h))
line_thick = random.randint(3, 10)
line_height = random.randint(line_thick, new_h - h - line_thick)
draw.line((0, line_height, new_w, line_height), fill=0, width=line_thick)
if choice == 'down':
new_img.paste(img, ((new_w - w) // 2, 0))
line_thick = random.randint(3, 10)
line_height = random.randint(h + line_thick, new_h - line_thick)
draw.line((0, line_height, new_w, line_height), fill=0, width=line_thick)
if choice == 'left':
new_img.paste(img, (new_w - w, (new_h - h) // 2))
line_thick = random.randint(3, 10)
line_width = random.randint(line_thick, new_w - w - line_thick)
draw.line((line_width, 0, line_width, new_h), fill=0, width=line_thick)
if choice == 'right':
new_img.paste(img, (0, (new_h - h) // 2))
line_thick = random.randint(3, 10)
line_width = random.randint(w + line_thick, new_w - line_thick)
draw.line((line_width, 0, line_width, new_h), fill=0, width=line_thick)
if choice == 'left&right':
new_img.paste(img, ((new_w - w) // 2, (new_h - h) // 2))
line_thick = random.randint(3, 10)
left_line_width = random.randint(line_thick, (new_w - w) // 2 - line_thick)
draw.line((left_line_width, 0, left_line_width, new_h), fill=0, width=line_thick)
line_thick = random.randint(3, 10)
right_line_width = random.randint((new_w - w) // 2 + w + line_thick, new_w - line_thick)
draw.line((right_line_width, 0, right_line_width, new_h), fill=0, width=line_thick)
new_img.resize((w, h), Image.BICUBIC)
return new_img
def ocrodeg_augment(img):
if not isinstance(img, np.ndarray):
img = np.array(img)
img = img / 255
img = np.clip(img, 0.0, 1.0)
# 50% use distort, 50% use raw
flag = 0
if random.random() < 0.5:
img = distort_with_noise(
img,
deltas=bounded_gaussian_noise(
shape=img.shape,
sigma=random.uniform(12.0, 20.0),
maxdelta=random.uniform(3.0, 5.0)
)
)
flag += 1
# img = img / 255
img = np.clip(img, 0.0, 1.0)
# 50% use binary blur, 50% use raw
if random.random() < 0.0:
img = binary_blur(
img,
sigma=random.uniform(0.5, 0.7),
noise=random.uniform(0.05, 0.1)
)
flag += 1
img = np.clip(img, 0.0, 1.0)
# raw - 50% use multiscale, 50% use fibrous, 0% use raw
# flag=1 - 35% use multiscale, 35% use fibrous, 30% use raw
# flag=2 - 20% use multiscale, 20% use fibrous, 60% use raw
rnd = random.random()
if rnd < 0.5 - flag * 0.15:
img = printlike_multiscale(img, blur=0.5)
elif rnd < 1 - flag * 0.15:
img = printlike_fibrous(img)
img = np.clip(img, 0.0, 1.0)
img = (img * 255).astype(np.uint8)
img = Image.fromarray(img)
return img
def add_noise(img, generate_ratio=0.003, generate_size=0.006):
if not isinstance(img, np.ndarray):
img = np.array(img)
h, w = img.shape
R_max = max(3, int(min(h, w) * generate_size))
threshold = int(h * w * generate_ratio)
random_choice_list = []
for i in range(1, R_max + 1):
random_choice_list.extend([i] * (R_max - i + 1))
def cal_dis(pA, pB):
return math.sqrt((pA[0] - pB[0]) ** 2 + (pA[1] - pB[1]) ** 2)
cnt = 0
while True:
R = random.choice(random_choice_list)
P_noise_x = random.randint(R, w - 1 - R)
P_noise_y = random.randint(R, h - 1 - R)
for i in range(P_noise_x - R, P_noise_x + R):
for j in range(P_noise_y - R, P_noise_y + R):
if cal_dis((i, j), (P_noise_x, P_noise_y)) < R:
if random.random() < 0.6:
img[j][i] = random.randint(0, 255)
cnt += 2 * R
if cnt >= threshold:
break
R_max *= 2
random_choice_list = []
for i in range(1, R_max + 1):
random_choice_list.extend([i] * (R_max - i + 1))
cnt = 0
while True:
R = random.choice(random_choice_list)
P_noise_x = random.randint(0, w - 1 - R)
P_noise_y = random.randint(0, h - 1 - R)
for i in range(P_noise_x + 1, P_noise_x + R):
for j in range(P_noise_y + 1, P_noise_y + R):
if random.random() < 0.6:
img[j][i] = random.randint(0, 255)
cnt += R
if cnt >= threshold:
break
img = Image.fromarray(img)
return img
def augment(raw_path, aug_path, img_name):
img_path = os.path.join(raw_path, img_name)
aug_path = os.path.join(aug_path, img_name)
img = Image.open(img_path)
img = add_frame(img)
img = ocrodeg_augment(img)
img = add_noise(img)
img.save(aug_path)
return
def threadpool_aug():
raw_path = 'caokai_fonts_samples/'
# raw_path = 'aug_test/'
aug_path = 'caokai_fonts_aug_samples/'
# aug_path = 'aug_test_aug/'
threadPool = ThreadPoolExecutor(max_workers=8, thread_name_prefix="aug_")
if not os.path.isdir(aug_path):
os.mkdir(aug_path)
for char in os.listdir(raw_path):
char_path = os.path.join(raw_path, char)
aug_char_path = os.path.join(aug_path, char)
if not os.path.isdir(aug_char_path):
os.mkdir(aug_char_path)
for img in os.listdir(char_path):
threadPool.submit(augment, char_path, aug_char_path, img)
threadPool.shutdown(wait=True)
if __name__ == '__main__':
'''
root_path = '楷'
imgs = os.listdir(root_path)
for img in imgs:
img_path = os.path.join(root_path, img)
img = Image.open(img_path)
add_frame(img).show()
'''
threadpool_aug() | [
"numpy.clip",
"numpy.prod",
"numpy.log10",
"PIL.Image.new",
"math.sqrt",
"numpy.array",
"builtins.range",
"PIL.ImageDraw.Draw",
"scipy.ndimage.gaussian_filter",
"numpy.sin",
"scipy.ndimage.zoom",
"os.listdir",
"numpy.dot",
"os.path.isdir",
"os.mkdir",
"warnings.simplefilter",
"numpy.... | [((698, 726), 'random.uniform', 'random.uniform', (['*translation'], {}), '(*translation)\n', (712, 726), False, 'import random\n'), ((736, 764), 'random.uniform', 'random.uniform', (['*translation'], {}), '(*translation)\n', (750, 764), False, 'import random\n'), ((777, 802), 'random.uniform', 'random.uniform', (['*rotation'], {}), '(*rotation)\n', (791, 802), False, 'import random\n'), ((1145, 1158), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1151, 1158), True, 'import numpy as np\n'), ((1167, 1180), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1173, 1180), True, 'import numpy as np\n'), ((1190, 1245), 'numpy.array', 'np.array', (['[[scale / aniso, 0], [0, scale * aniso]]', '"""f"""'], {}), "([[scale / aniso, 0], [0, scale * aniso]], 'f')\n", (1198, 1245), True, 'import numpy as np\n'), ((1254, 1286), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]', '"""f"""'], {}), "([[c, -s], [s, c]], 'f')\n", (1262, 1286), True, 'import numpy as np\n'), ((1295, 1308), 'numpy.dot', 'np.dot', (['sm', 'm'], {}), '(sm, m)\n', (1301, 1308), True, 'import numpy as np\n'), ((1630, 1649), 'pylab.rand', 'pylab.rand', (['(2)', 'n', 'm'], {}), '(2, n, m)\n', (1640, 1649), False, 'import pylab\n'), ((1663, 1709), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['deltas', '(0, sigma, sigma)'], {}), '(deltas, (0, sigma, sigma))\n', (1682, 1709), True, 'import scipy.ndimage as ndi\n'), ((1724, 1739), 'numpy.amin', 'np.amin', (['deltas'], {}), '(deltas)\n', (1731, 1739), True, 'import numpy as np\n'), ((1754, 1769), 'numpy.amax', 'np.amax', (['deltas'], {}), '(deltas)\n', (1761, 1769), True, 'import numpy as np\n'), ((2124, 2187), 'scipy.ndimage.map_coordinates', 'ndi.map_coordinates', (['image', 'deltas'], {'order': 'order', 'mode': '"""reflect"""'}), "(image, deltas, order=order, mode='reflect')\n", (2143, 2187), True, 'import scipy.ndimage as ndi\n'), ((2375, 2396), 'numpy.array', 'np.array', (['([noise] * h)'], {}), '([noise] * h)\n', (2383, 2396), True, 'import numpy as np\n'), ((2526, 2546), 'numpy.prod', 'np.prod', (['image.shape'], {}), '(image.shape)\n', (2533, 2546), True, 'import numpy as np\n'), ((2555, 2574), 'numpy.sum', 'np.sum', (['(image < 0.5)'], {}), '(image < 0.5)\n', (2561, 2574), True, 'import numpy as np\n'), ((2687, 2720), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['image', 'sigma'], {}), '(image, sigma)\n', (2706, 2720), True, 'import scipy.ndimage as ndi\n'), ((2802, 2827), 'numpy.percentile', 'np.percentile', (['blurred', 'p'], {}), '(blurred, p)\n', (2815, 2827), True, 'import numpy as np\n'), ((2839, 2865), 'numpy.array', 'np.array', (['(blurred > t)', '"""f"""'], {}), "(blurred > t, 'f')\n", (2847, 2865), True, 'import numpy as np\n'), ((3011, 3029), 'pylab.rand', 'pylab.rand', (['h0', 'w0'], {}), '(h0, w0)\n', (3021, 3029), False, 'import pylab\n'), ((3498, 3513), 'numpy.amin', 'np.amin', (['result'], {}), '(result)\n', (3505, 3513), True, 'import numpy as np\n'), ((3528, 3543), 'numpy.amax', 'np.amax', (['result'], {}), '(result)\n', (3535, 3543), True, 'import numpy as np\n'), ((3765, 3796), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'nscales'}), '(size=nscales)\n', (3782, 3796), True, 'import numpy as np\n'), ((3810, 3835), 'numpy.add.accumulate', 'np.add.accumulate', (['scales'], {}), '(scales)\n', (3827, 3835), True, 'import numpy as np\n'), ((3850, 3865), 'numpy.amin', 'np.amin', (['scales'], {}), '(scales)\n', (3857, 3865), True, 'import numpy as np\n'), ((3880, 3895), 'numpy.amax', 'np.amax', (['scales'], {}), '(scales)\n', (3887, 3895), True, 'import numpy as np\n'), ((4326, 4347), 'numpy.zeros', 'np.zeros', (['(h, w)', '"""i"""'], {}), "((h, w), 'i')\n", (4334, 4347), True, 'import numpy as np\n'), ((4361, 4376), 'builtins.range', 'range', (['numblobs'], {}), '(numblobs)\n', (4366, 4376), False, 'from builtins import range\n'), ((4442, 4478), 'scipy.ndimage.distance_transform_edt', 'ndi.distance_transform_edt', (['(1 - mask)'], {}), '(1 - mask)\n', (4468, 4478), True, 'import scipy.ndimage as ndi\n'), ((4490, 4514), 'numpy.array', 'np.array', (['(dt < size)', '"""f"""'], {}), "(dt < size, 'f')\n", (4498, 4514), True, 'import numpy as np\n'), ((4526, 4575), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['mask', '(size / (2 * roughness))'], {}), '(mask, size / (2 * roughness))\n', (4545, 4575), True, 'import scipy.ndimage as ndi\n'), ((4588, 4601), 'numpy.amin', 'np.amin', (['mask'], {}), '(mask)\n', (4595, 4601), True, 'import numpy as np\n'), ((4614, 4627), 'numpy.amax', 'np.amax', (['mask'], {}), '(mask)\n', (4621, 4627), True, 'import numpy as np\n'), ((4640, 4656), 'pylab.rand', 'pylab.rand', (['h', 'w'], {}), '(h, w)\n', (4650, 4656), False, 'import pylab\n'), ((4669, 4719), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['noise', '(size / (2 * roughness))'], {}), '(noise, size / (2 * roughness))\n', (4688, 4719), True, 'import scipy.ndimage as ndi\n'), ((4733, 4747), 'numpy.amin', 'np.amin', (['noise'], {}), '(noise)\n', (4740, 4747), True, 'import numpy as np\n'), ((4761, 4775), 'numpy.amax', 'np.amax', (['noise'], {}), '(noise)\n', (4768, 4775), True, 'import numpy as np\n'), ((4787, 4820), 'numpy.array', 'np.array', (['(mask * noise > 0.5)', '"""f"""'], {}), "(mask * noise > 0.5, 'f')\n", (4795, 4820), True, 'import numpy as np\n'), ((5212, 5237), 'numpy.add.accumulate', 'np.add.accumulate', (['angles'], {}), '(angles)\n', (5229, 5237), True, 'import numpy as np\n'), ((5555, 5570), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (5563, 5570), True, 'import numpy as np\n'), ((5584, 5598), 'builtins.range', 'range', (['nfibers'], {}), '(nfibers)\n', (5589, 5598), False, 'from builtins import range\n'), ((5997, 6030), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['result', 'blur'], {}), '(result, blur)\n', (6016, 6030), True, 'import scipy.ndimage as ndi\n'), ((6045, 6060), 'numpy.amin', 'np.amin', (['result'], {}), '(result)\n', (6052, 6060), True, 'import numpy as np\n'), ((6075, 6090), 'numpy.amax', 'np.amax', (['result'], {}), '(result)\n', (6082, 6090), True, 'import numpy as np\n'), ((6633, 6668), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['selector', 'blur'], {}), '(selector, blur)\n', (6652, 6668), True, 'import scipy.ndimage as ndi\n'), ((7254, 7289), 'scipy.ndimage.gaussian_filter', 'ndi.gaussian_filter', (['selector', 'blur'], {}), '(selector, blur)\n', (7273, 7289), True, 'import scipy.ndimage as ndi\n'), ((7746, 7772), 'random.choice', 'random.choice', (['random_list'], {}), '(random_list)\n', (7759, 7772), False, 'import random\n'), ((7858, 7882), 'random.uniform', 'random.uniform', (['(1.1)', '(1.3)'], {}), '(1.1, 1.3)\n', (7872, 7882), False, 'import random\n'), ((7965, 8005), 'PIL.Image.new', 'Image.new', (['img.mode', '(new_w, new_h)', '(255)'], {}), '(img.mode, (new_w, new_h), 255)\n', (7974, 8005), False, 'from PIL import Image, ImageDraw\n'), ((8043, 8066), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['new_img'], {}), '(new_img)\n', (8057, 8066), False, 'from PIL import Image, ImageDraw\n'), ((9910, 9932), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (9917, 9932), True, 'import numpy as np\n'), ((10314, 10336), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (10321, 10336), True, 'import numpy as np\n'), ((10578, 10600), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (10585, 10600), True, 'import numpy as np\n'), ((10800, 10815), 'random.random', 'random.random', ([], {}), '()\n', (10813, 10815), False, 'import random\n'), ((10978, 11000), 'numpy.clip', 'np.clip', (['img', '(0.0)', '(1.0)'], {}), '(img, 0.0, 1.0)\n', (10985, 11000), True, 'import numpy as np\n'), ((11051, 11071), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (11066, 11071), False, 'from PIL import Image, ImageDraw\n'), ((11378, 11397), 'builtins.range', 'range', (['(1)', '(R_max + 1)'], {}), '(1, R_max + 1)\n', (11383, 11397), False, 'from builtins import range\n'), ((12131, 12150), 'builtins.range', 'range', (['(1)', '(R_max + 1)'], {}), '(1, R_max + 1)\n', (12136, 12150), False, 'from builtins import range\n'), ((12665, 12685), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12680, 12685), False, 'from PIL import Image, ImageDraw\n'), ((12761, 12793), 'os.path.join', 'os.path.join', (['raw_path', 'img_name'], {}), '(raw_path, img_name)\n', (12773, 12793), False, 'import os\n'), ((12809, 12841), 'os.path.join', 'os.path.join', (['aug_path', 'img_name'], {}), '(aug_path, img_name)\n', (12821, 12841), False, 'import os\n'), ((12852, 12872), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (12862, 12872), False, 'from PIL import Image, ImageDraw\n'), ((13174, 13234), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(8)', 'thread_name_prefix': '"""aug_"""'}), "(max_workers=8, thread_name_prefix='aug_')\n", (13192, 13234), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((13315, 13335), 'os.listdir', 'os.listdir', (['raw_path'], {}), '(raw_path)\n', (13325, 13335), False, 'import os\n'), ((277, 291), 'numpy.amin', 'np.amin', (['image'], {}), '(image)\n', (284, 291), True, 'import numpy as np\n'), ((308, 322), 'numpy.amax', 'np.amax', (['image'], {}), '(image)\n', (315, 322), True, 'import numpy as np\n'), ((335, 354), 'numpy.sum', 'np.sum', (['(image > 0.9)'], {}), '(image > 0.9)\n', (341, 354), True, 'import numpy as np\n'), ((357, 376), 'numpy.sum', 'np.sum', (['(image < 0.1)'], {}), '(image < 0.1)\n', (363, 376), True, 'import numpy as np\n'), ((855, 877), 'random.uniform', 'random.uniform', (['*scale'], {}), '(*scale)\n', (869, 877), False, 'import random\n'), ((896, 918), 'random.uniform', 'random.uniform', (['*aniso'], {}), '(*aniso)\n', (910, 918), False, 'import random\n'), ((1340, 1356), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (1348, 1356), True, 'import numpy as np\n'), ((1390, 1416), 'numpy.array', 'np.array', (['[dx * w, dy * h]'], {}), '([dx * w, dy * h])\n', (1398, 1416), True, 'import numpy as np\n'), ((2297, 2311), 'pylab.randn', 'pylab.randn', (['w'], {}), '(w)\n', (2308, 2311), False, 'import pylab\n'), ((3039, 3064), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3062, 3064), False, 'import warnings\n'), ((3074, 3105), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3095, 3105), False, 'import warnings\n'), ((3123, 3144), 'scipy.ndimage.zoom', 'ndi.zoom', (['data', 'scale'], {}), '(data, scale)\n', (3131, 3144), True, 'import scipy.ndimage as ndi\n'), ((3711, 3730), 'numpy.log10', 'np.log10', (['srange[0]'], {}), '(srange[0])\n', (3719, 3730), True, 'import numpy as np\n'), ((3732, 3751), 'numpy.log10', 'np.log10', (['srange[1]'], {}), '(srange[1])\n', (3740, 3751), True, 'import numpy as np\n'), ((3981, 4012), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'nscales'}), '(size=nscales)\n', (3998, 4012), True, 'import numpy as np\n'), ((5021, 5042), 'numpy.maximum', 'np.maximum', (['image', 'fg'], {}), '(image, fg)\n', (5031, 5042), True, 'import numpy as np\n'), ((5124, 5152), 'numpy.random.standard_cauchy', 'np.random.standard_cauchy', (['l'], {}), '(l)\n', (5149, 5152), True, 'import numpy as np\n'), ((5186, 5198), 'pylab.rand', 'pylab.rand', ([], {}), '()\n', (5196, 5198), False, 'import pylab\n'), ((5807, 5839), 'numpy.clip', 'np.clip', (['fiber[:, 0]', '(0)', '(h - 0.1)'], {}), '(fiber[:, 0], 0, h - 0.1)\n', (5814, 5839), True, 'import numpy as np\n'), ((5886, 5918), 'numpy.clip', 'np.clip', (['fiber[:, 1]', '(0)', '(w - 0.1)'], {}), '(fiber[:, 1], 0, w - 0.1)\n', (5893, 5918), True, 'import numpy as np\n'), ((7433, 7453), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (7448, 7453), False, 'from PIL import Image, ImageDraw\n'), ((8178, 8199), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (8192, 8199), False, 'import random\n'), ((8222, 8272), 'random.randint', 'random.randint', (['line_thick', '(new_h - h - line_thick)'], {}), '(line_thick, new_h - h - line_thick)\n', (8236, 8272), False, 'import random\n'), ((8451, 8472), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (8465, 8472), False, 'import random\n'), ((8495, 8545), 'random.randint', 'random.randint', (['(h + line_thick)', '(new_h - line_thick)'], {}), '(h + line_thick, new_h - line_thick)\n', (8509, 8545), False, 'import random\n'), ((8732, 8753), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (8746, 8753), False, 'import random\n'), ((8775, 8825), 'random.randint', 'random.randint', (['line_thick', '(new_w - w - line_thick)'], {}), '(line_thick, new_w - w - line_thick)\n', (8789, 8825), False, 'import random\n'), ((9003, 9024), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (9017, 9024), False, 'import random\n'), ((9046, 9096), 'random.randint', 'random.randint', (['(w + line_thick)', '(new_w - line_thick)'], {}), '(w + line_thick, new_w - line_thick)\n', (9060, 9096), False, 'import random\n'), ((9294, 9315), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (9308, 9315), False, 'import random\n'), ((9342, 9399), 'random.randint', 'random.randint', (['line_thick', '((new_w - w) // 2 - line_thick)'], {}), '(line_thick, (new_w - w) // 2 - line_thick)\n', (9356, 9399), False, 'import random\n'), ((9511, 9532), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (9525, 9532), False, 'import random\n'), ((9560, 9629), 'random.randint', 'random.randint', (['((new_w - w) // 2 + w + line_thick)', '(new_w - line_thick)'], {}), '((new_w - w) // 2 + w + line_thick, new_w - line_thick)\n', (9574, 9629), False, 'import random\n'), ((9865, 9878), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9873, 9878), True, 'import numpy as np\n'), ((9989, 10004), 'random.random', 'random.random', ([], {}), '()\n', (10002, 10004), False, 'import random\n'), ((10384, 10399), 'random.random', 'random.random', ([], {}), '()\n', (10397, 10399), False, 'import random\n'), ((11206, 11219), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (11214, 11219), True, 'import numpy as np\n'), ((11497, 11551), 'math.sqrt', 'math.sqrt', (['((pA[0] - pB[0]) ** 2 + (pA[1] - pB[1]) ** 2)'], {}), '((pA[0] - pB[0]) ** 2 + (pA[1] - pB[1]) ** 2)\n', (11506, 11551), False, 'import math\n'), ((11593, 11626), 'random.choice', 'random.choice', (['random_choice_list'], {}), '(random_choice_list)\n', (11606, 11626), False, 'import random\n'), ((11647, 11675), 'random.randint', 'random.randint', (['R', '(w - 1 - R)'], {}), '(R, w - 1 - R)\n', (11661, 11675), False, 'import random\n'), ((11696, 11724), 'random.randint', 'random.randint', (['R', '(h - 1 - R)'], {}), '(R, h - 1 - R)\n', (11710, 11724), False, 'import random\n'), ((11742, 11777), 'builtins.range', 'range', (['(P_noise_x - R)', '(P_noise_x + R)'], {}), '(P_noise_x - R, P_noise_x + R)\n', (11747, 11777), False, 'from builtins import range\n'), ((12249, 12282), 'random.choice', 'random.choice', (['random_choice_list'], {}), '(random_choice_list)\n', (12262, 12282), False, 'import random\n'), ((12303, 12331), 'random.randint', 'random.randint', (['(0)', '(w - 1 - R)'], {}), '(0, w - 1 - R)\n', (12317, 12331), False, 'import random\n'), ((12352, 12380), 'random.randint', 'random.randint', (['(0)', '(h - 1 - R)'], {}), '(0, h - 1 - R)\n', (12366, 12380), False, 'import random\n'), ((12398, 12433), 'builtins.range', 'range', (['(P_noise_x + 1)', '(P_noise_x + R)'], {}), '(P_noise_x + 1, P_noise_x + R)\n', (12403, 12433), False, 'from builtins import range\n'), ((13247, 13270), 'os.path.isdir', 'os.path.isdir', (['aug_path'], {}), '(aug_path)\n', (13260, 13270), False, 'import os\n'), ((13280, 13298), 'os.mkdir', 'os.mkdir', (['aug_path'], {}), '(aug_path)\n', (13288, 13298), False, 'import os\n'), ((13357, 13385), 'os.path.join', 'os.path.join', (['raw_path', 'char'], {}), '(raw_path, char)\n', (13369, 13385), False, 'import os\n'), ((13410, 13438), 'os.path.join', 'os.path.join', (['aug_path', 'char'], {}), '(aug_path, char)\n', (13422, 13438), False, 'import os\n'), ((13539, 13560), 'os.listdir', 'os.listdir', (['char_path'], {}), '(char_path)\n', (13549, 13560), False, 'import os\n'), ((503, 523), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (518, 523), False, 'from PIL import Image, ImageDraw\n'), ((1375, 1387), 'numpy.dot', 'np.dot', (['m', 'c'], {}), '(m, c)\n', (1381, 1387), True, 'import numpy as np\n'), ((1505, 1518), 'numpy.dtype', 'np.dtype', (['"""f"""'], {}), "('f')\n", (1513, 1518), True, 'import numpy as np\n'), ((2425, 2441), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (2433, 2441), True, 'import numpy as np\n'), ((2758, 2785), 'pylab.randn', 'pylab.randn', (['*blurred.shape'], {}), '(*blurred.shape)\n', (2769, 2785), False, 'import pylab\n'), ((5267, 5281), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (5273, 5281), True, 'import numpy as np\n'), ((5323, 5337), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (5329, 5337), True, 'import numpy as np\n'), ((5361, 5383), 'numpy.array', 'np.array', (['[coss, sins]'], {}), '([coss, sins])\n', (5369, 5383), True, 'import numpy as np\n'), ((5709, 5733), 'random.randint', 'random.randint', (['(0)', '(h - 1)'], {}), '(0, h - 1)\n', (5723, 5733), False, 'import random\n'), ((5735, 5759), 'random.randint', 'random.randint', (['(0)', '(w - 1)'], {}), '(0, w - 1)\n', (5749, 5759), False, 'import random\n'), ((11800, 11835), 'builtins.range', 'range', (['(P_noise_y - R)', '(P_noise_y + R)'], {}), '(P_noise_y - R, P_noise_y + R)\n', (11805, 11835), False, 'from builtins import range\n'), ((12456, 12491), 'builtins.range', 'range', (['(P_noise_y + 1)', '(P_noise_y + R)'], {}), '(P_noise_y + 1, P_noise_y + R)\n', (12461, 12491), False, 'from builtins import range\n'), ((13454, 13482), 'os.path.isdir', 'os.path.isdir', (['aug_char_path'], {}), '(aug_char_path)\n', (13467, 13482), False, 'import os\n'), ((13496, 13519), 'os.mkdir', 'os.mkdir', (['aug_char_path'], {}), '(aug_char_path)\n', (13504, 13519), False, 'import os\n'), ((2058, 2066), 'builtins.range', 'range', (['n'], {}), '(n)\n', (2063, 2066), False, 'from builtins import range\n'), ((2068, 2076), 'builtins.range', 'range', (['m'], {}), '(m)\n', (2073, 2076), False, 'from builtins import range\n'), ((4391, 4408), 'random.randint', 'randint', (['(0)', '(h - 1)'], {}), '(0, h - 1)\n', (4398, 4408), False, 'from random import randint\n'), ((4410, 4427), 'random.randint', 'randint', (['(0)', '(w - 1)'], {}), '(0, w - 1)\n', (4417, 4427), False, 'from random import randint\n'), ((5612, 5624), 'pylab.rand', 'pylab.rand', ([], {}), '()\n', (5622, 5624), False, 'import pylab\n'), ((10469, 10493), 'random.uniform', 'random.uniform', (['(0.5)', '(0.7)'], {}), '(0.5, 0.7)\n', (10483, 10493), False, 'import random\n'), ((10513, 10538), 'random.uniform', 'random.uniform', (['(0.05)', '(0.1)'], {}), '(0.05, 0.1)\n', (10527, 10538), False, 'import random\n'), ((12512, 12527), 'random.random', 'random.random', ([], {}), '()\n', (12525, 12527), False, 'import random\n'), ((12567, 12589), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (12581, 12589), False, 'import random\n'), ((10161, 10187), 'random.uniform', 'random.uniform', (['(12.0)', '(20.0)'], {}), '(12.0, 20.0)\n', (10175, 10187), False, 'import random\n'), ((10214, 10238), 'random.uniform', 'random.uniform', (['(3.0)', '(5.0)'], {}), '(3.0, 5.0)\n', (10228, 10238), False, 'import random\n'), ((11924, 11939), 'random.random', 'random.random', ([], {}), '()\n', (11937, 11939), False, 'import random\n'), ((11983, 12005), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (11997, 12005), False, 'import random\n')] |
from __future__ import division, print_function, absolute_import
import argparse
import functools
import json
import logging
import math
import os
import random
import numpy as np
import pandas as pd
import sklearn.metrics as sm
import tensorflow as tf
import tqdm
try:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
except:
pass
import atom3d.shard.shard as sh
import examples.cnn3d.model as model
import examples.cnn3d.feature_ppi as feature_ppi
import examples.cnn3d.subgrid_gen as subgrid_gen
import examples.cnn3d.util as util
import dotenv as de
de.load_dotenv(de.find_dotenv(usecwd=True))
def compute_perf(results):
res = {}
all_trues = results['true'].astype(np.int8)
all_preds = results['pred'].astype(np.int8)
res['all_ap'] = sm.average_precision_score(all_trues, all_preds)
res['all_auroc'] = sm.roc_auc_score(all_trues, all_preds)
res['all_acc'] = sm.accuracy_score(all_trues, all_preds.round())
res['all_bal_acc'] = \
sm.balanced_accuracy_score(all_trues, all_preds.round())
res['all_loss'] = sm.log_loss(all_trues, all_preds)
return res
def __stats(mode, df):
# Compute stats
res = compute_perf(df)
logging.info(
'\n{:}\n'
'Perf Metrics:\n'
' AP: {:.3f}\n'
' AUROC: {:.3f}\n'
' Accuracy: {:.3f}\n'
' Balanced Accuracy: {:.3f}\n'
' Log loss: {:.3f}'.format(
mode,
float(res["all_ap"]),
float(res["all_auroc"]),
float(res["all_acc"]),
float(res["all_bal_acc"]),
float(res["all_loss"])))
def compute_accuracy(true_y, predicted_y):
true_y = tf.cast(true_y, tf.float32)
correct_prediction = \
tf.logical_or(
tf.logical_and(
tf.less_equal(predicted_y, 0.5),
tf.less_equal(true_y, 0.5)),
tf.logical_and(
tf.greater(predicted_y, 0.5),
tf.greater(true_y, 0.5)))
return tf.cast(correct_prediction, tf.float32, name='accuracy')
# Construct model and loss
def conv_model(feature, target, is_training, conv_drop_rate, fc_drop_rate,
top_nn_drop_rate, args):
num_conv = args.num_conv
conv_filters = [32 * (2**n) for n in range(num_conv)]
conv_kernel_size = 3
max_pool_positions = [0, 1]*int((num_conv+1)/2)
max_pool_sizes = [2]*num_conv
max_pool_strides = [2]*num_conv
fc_units = [512]
top_fc_units = [512]*args.num_final_fc_layers
logits = model.siamese_model(
feature,
is_training,
conv_drop_rate,
fc_drop_rate,
top_nn_drop_rate,
conv_filters, conv_kernel_size,
max_pool_positions,
max_pool_sizes, max_pool_strides,
fc_units,
top_fc_units,
batch_norm=args.use_batch_norm,
dropout=not args.no_dropout,
top_nn_activation=args.top_nn_activation)
# Prediction
predict = tf.round(tf.nn.sigmoid(logits), name='predict')
# Loss
with tf.variable_scope('loss'):
loss = tf.nn.weighted_cross_entropy_with_logits(
targets=tf.cast(target, tf.float32), logits=logits,
pos_weight=args.grid_config.neg_to_pos_ratio)
# We reweight the losses so that the mean is comparable across
# different neg to pos ratios.
batch_size = tf.cast(tf.shape(target)[0], tf.float32)
num_pos = tf.count_nonzero(target, dtype=tf.float32)
num_neg = batch_size - num_pos
effective_weight = num_pos * args.grid_config.neg_to_pos_ratio + num_neg
loss = loss / tf.cast(effective_weight, tf.float32) * batch_size
loss = tf.identity(loss, name='cross_entropy')
# Accuracy
accuracy = compute_accuracy(target, predict)
return logits, predict, loss, accuracy
def batch_dataset_generator(gen, args, is_testing=False):
grid_size = subgrid_gen.grid_size(args.grid_config)
channel_size = subgrid_gen.num_channels(args.grid_config)
dataset = tf.data.Dataset.from_generator(
gen,
output_types=(tf.string, tf.float32, tf.float32),
output_shapes=((), (2, grid_size, grid_size, grid_size, channel_size), (1,))
)
# Shuffle dataset
if not is_testing:
if args.shuffle:
dataset = dataset.repeat(count=None)
else:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(buffer_size=1000))
dataset = dataset.batch(args.batch_size)
dataset = dataset.prefetch(8)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
return dataset, next_element
def train_model(sess, args):
# tf Graph input
# Subgrid maps for each residue in a protein
logging.debug('Create input placeholder...')
grid_size = subgrid_gen.grid_size(args.grid_config)
channel_size = subgrid_gen.num_channels(args.grid_config)
feature_placeholder = tf.placeholder(
tf.float32,
[None, 2, grid_size, grid_size, grid_size, channel_size],
name='main_input')
label_placeholder = tf.placeholder(tf.float32, [None, 1], 'label')
# Placeholder for model parameters
training_placeholder = tf.placeholder(tf.bool, shape=[], name='is_training')
conv_drop_rate_placeholder = tf.placeholder(tf.float32, name='conv_drop_rate')
fc_drop_rate_placeholder = tf.placeholder(tf.float32, name='fc_drop_rate')
top_nn_drop_rate_placeholder = tf.placeholder(tf.float32, name='top_nn_drop_rate')
# Define loss and optimizer
logging.debug('Define loss and optimizer...')
logits_op, predict_op, loss_op, accuracy_op = conv_model(
feature_placeholder, label_placeholder, training_placeholder,
conv_drop_rate_placeholder, fc_drop_rate_placeholder,
top_nn_drop_rate_placeholder, args)
logging.debug('Generate training ops...')
train_op = model.training(loss_op, args.learning_rate)
# Initialize the variables (i.e. assign their default value)
logging.debug('Initializing global variables...')
init = tf.global_variables_initializer()
# Create saver and summaries.
logging.debug('Initializing saver...')
saver = tf.train.Saver(max_to_keep=100000)
logging.debug('Finished initializing saver...')
def __loop(generator, mode, num_iters):
tf_dataset, next_element = batch_dataset_generator(
generator, args, is_testing=(mode=='test'))
structures, losses, logits, preds, labels = [], [], [], [], []
epoch_loss = 0
epoch_acc = 0
progress_format = mode + ' loss: {:6.6f}' + '; acc: {:6.4f}'
# Loop over all batches (one batch is all feature for 1 protein)
num_batches = int(math.ceil(float(num_iters)/args.batch_size))
#print('Running {:} -> {:} iters in {:} batches (batch size: {:})'.format(
# mode, num_iters, num_batches, args.batch_size))
with tqdm.tqdm(total=num_batches, desc=progress_format.format(0, 0)) as t:
for i in range(num_batches):
try:
structure_, feature_, label_ = sess.run(next_element)
_, logit, pred, loss, accuracy = sess.run(
[train_op, logits_op, predict_op, loss_op, accuracy_op],
feed_dict={feature_placeholder: feature_,
label_placeholder: label_,
training_placeholder: (mode == 'train'),
conv_drop_rate_placeholder:
args.conv_drop_rate if mode == 'train' else 0.0,
fc_drop_rate_placeholder:
args.fc_drop_rate if mode == 'train' else 0.0,
top_nn_drop_rate_placeholder:
args.top_nn_drop_rate if mode == 'train' else 0.0})
epoch_loss += (np.mean(loss) - epoch_loss) / (i + 1)
epoch_acc += (np.mean(accuracy) - epoch_acc) / (i + 1)
structures.extend(structure_.astype(str))
losses.append(loss)
logits.extend(logit.astype(np.float))
preds.extend(pred.astype(np.int8))
labels.extend(label_.astype(np.int8))
t.set_description(progress_format.format(epoch_loss, epoch_acc))
t.update(1)
except (tf.errors.OutOfRangeError, StopIteration):
logging.info("\nEnd of {:} dataset at iteration {:}".format(mode, i))
break
def __concatenate(array):
try:
array = np.concatenate(array)
return array
except:
return array
structures = __concatenate(structures)
logits = __concatenate(logits)
preds = __concatenate(preds)
labels = __concatenate(labels)
losses = __concatenate(losses)
return structures, logits, preds, labels, losses, epoch_loss
# Run the initializer
logging.debug('Running initializer...')
sess.run(init)
logging.debug('Finished running initializer...')
##### Training + validation
def __count_num_structs(gen):
return np.sum(list(gen))
if not args.test_only:
prev_val_loss, best_val_loss = float("inf"), float("inf")
if ((args.grid_config.max_pos_regions_per_ensemble > 0) and
(args.grid_config.neg_to_pos_ratio > 0)):
multiplier = args.grid_config.max_pos_regions_per_ensemble * int(
1 + args.grid_config.neg_to_pos_ratio)
if (args.max_num_ensembles_train == None):
train_num_structures = args.train_sharded.get_num_keyed()
else:
train_num_structures = args.max_num_ensembles_train
if (args.max_num_ensembles_val == None):
val_num_structures = args.val_sharded.get_num_keyed()
else:
val_num_structures = args.max_num_ensembles_val
train_num_structures *= args.repeat_gen * multiplier
val_num_structures *= args.repeat_gen * multiplier
else:
assert False
logging.info("Start training with {:} structures for train and {:} structures for val per epoch".format(
train_num_structures, val_num_structures))
def _save():
ckpt = saver.save(sess, os.path.join(args.output_dir, 'model-ckpt'),
global_step=epoch)
return ckpt
run_info_filename = os.path.join(args.output_dir, 'run_info.json')
run_info = {}
def __update_and_write_run_info(key, val):
run_info[key] = val
with open(run_info_filename, 'w') as f:
json.dump(run_info, f, indent=4)
per_epoch_val_losses = []
for epoch in range(1, args.num_epochs+1):
random_seed = args.random_seed #random.randint(1, 10e6)
logging.info('Epoch {:} - random_seed: {:}'.format(epoch, args.random_seed))
logging.debug('Creating train generator...')
train_generator_callable = functools.partial(
feature_ppi.dataset_generator,
args.train_sharded,
args.grid_config,
shuffle=args.shuffle,
repeat=args.repeat_gen,
max_num_ensembles=args.max_num_ensembles_train,
testing=False,
random_seed=random_seed)
logging.debug('Creating val generator...')
val_generator_callable = functools.partial(
feature_ppi.dataset_generator,
args.val_sharded,
args.grid_config,
shuffle=args.shuffle,
repeat=args.repeat_gen,
max_num_ensembles=args.max_num_ensembles_val,
testing=False,
random_seed=random_seed)
# Training
train_structures, train_logits, train_preds, train_labels, _, curr_train_loss = __loop(
train_generator_callable, 'train', num_iters=train_num_structures)
# Validation
val_structures, val_logits, val_preds, val_labels, _, curr_val_loss = __loop(
val_generator_callable, 'val', num_iters=val_num_structures)
per_epoch_val_losses.append(curr_val_loss)
__update_and_write_run_info('val_losses', per_epoch_val_losses)
if args.use_best or args.early_stopping:
if curr_val_loss < best_val_loss:
# Found new best epoch.
best_val_loss = curr_val_loss
ckpt = _save()
__update_and_write_run_info('val_best_loss', best_val_loss)
__update_and_write_run_info('best_ckpt', ckpt)
logging.info("New best {:}".format(ckpt))
if (epoch == args.num_epochs - 1 and not args.use_best):
# At end and just using final checkpoint.
ckpt = _save()
__update_and_write_run_info('best_ckpt', ckpt)
logging.info("Last checkpoint {:}".format(ckpt))
if args.save_all_ckpts:
# Save at every checkpoint
ckpt = _save()
logging.info("Saving checkpoint {:}".format(ckpt))
## Save train and val results
logging.info("Saving train and val results")
train_df = pd.DataFrame(
np.array([train_structures, train_labels, train_preds, train_logits]).T,
columns=['structure', 'true', 'pred', 'logits'],
)
train_df[['ensemble', 'res0', 'res1']] = train_df.structure.str.split('/', expand=True)
train_df.to_pickle(os.path.join(args.output_dir, 'train_result-{:}.pkl'.format(epoch)))
val_df = pd.DataFrame(
np.array([val_structures, val_labels, val_preds, val_logits]).T,
columns=['structure', 'true', 'pred', 'logits'],
)
val_df[['ensemble', 'res0', 'res1']] = val_df.structure.str.split('/', expand=True)
val_df.to_pickle(os.path.join(args.output_dir, 'val_result-{:}.pkl'.format(epoch)))
__stats('Train Epoch {:}'.format(epoch), train_df)
__stats('Val Epoch {:}'.format(epoch), val_df)
if args.early_stopping and curr_val_loss >= prev_val_loss:
logging.info("Validation loss stopped decreasing, stopping...")
break
else:
prev_val_loss = curr_val_loss
logging.info("Finished training")
##### Testing
logging.debug("Run testing")
if not args.test_only:
to_use = run_info['best_ckpt'] if args.use_best else ckpt
else:
with open(os.path.join(args.model_dir, 'run_info.json')) as f:
run_info = json.load(f)
to_use = run_info['best_ckpt']
saver = tf.train.import_meta_graph(to_use + '.meta')
logging.info("Using {:} for testing".format(to_use))
saver.restore(sess, to_use)
test_generator_callable = functools.partial(
feature_ppi.dataset_generator,
args.test_sharded,
args.grid_config,
shuffle=args.shuffle,
repeat=1,
max_num_ensembles=args.max_num_ensembles_test,
testing=True,
use_shard_nums=args.use_shard_nums,
random_seed=args.random_seed)
if ((not args.grid_config.full_test) and
(args.grid_config.max_pos_regions_per_ensemble_testing > 0) and
(args.grid_config.neg_to_pos_ratio_testing > 0)):
multiplier = args.grid_config.max_pos_regions_per_ensemble_testing * int(
1 + args.grid_config.neg_to_pos_ratio_testing)
if (args.max_num_ensembles_test == None):
test_num_structures = args.test_sharded.get_num_keyed()
else:
test_num_structures = args.max_num_ensembles_test
test_num_structures *= multiplier
else:
assert False
logging.info("Start testing with {:} structures".format(test_num_structures))
test_structures, test_logits, test_preds, test_labels, _, test_loss = __loop(
test_generator_callable, 'test', num_iters=test_num_structures)
logging.info("Finished testing")
test_df = pd.DataFrame(
np.array([test_structures, test_labels, test_preds, test_logits]).T,
columns=['structure', 'true', 'pred', 'logits'],
)
test_df[['ensemble', 'res0', 'res1']] = test_df.structure.str.split('/', expand=True)
test_df.to_pickle(os.path.join(args.output_dir, 'test_result.pkl'))
__stats('Test', test_df)
print(test_df.groupby(['true', 'pred']).size())
def create_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_sharded', type=str,
default=os.environ['PPI_TRAIN_SHARDED'])
parser.add_argument(
'--val_sharded', type=str,
default=os.environ['PPI_VAL_SHARDED'])
parser.add_argument(
'--test_sharded', type=str,
default=os.environ['PPI_TEST_SHARDED'])
parser.add_argument(
'--output_dir', type=str,
default=os.environ['MODEL_DIR'])
# Training parameters
parser.add_argument('--max_num_ensembles_train', type=int, default=None)
parser.add_argument('--max_num_ensembles_val', type=int, default=None)
parser.add_argument('--max_num_ensembles_test', type=int, default=None)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--conv_drop_rate', type=float, default=0.1)
parser.add_argument('--fc_drop_rate', type=float, default=0.25)
parser.add_argument('--top_nn_drop_rate', type=float, default=0.5)
parser.add_argument('--top_nn_activation', type=str, default=None)
parser.add_argument('--num_epochs', type=int, default=5)
parser.add_argument('--repeat_gen', type=int, default=1)
parser.add_argument('--num_conv', type=int, default=4)
parser.add_argument('--num_final_fc_layers', type=int, default=2)
parser.add_argument('--use_batch_norm', action='store_true', default=False)
parser.add_argument('--no_dropout', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--shuffle', action='store_true', default=False)
parser.add_argument('--early_stopping', action='store_true', default=False)
parser.add_argument('--use_best', action='store_true', default=False)
parser.add_argument('--random_seed', type=int, default=random.randint(1, 10e6))
parser.add_argument('--max_pos_regions_per_ensemble', type=int, default=70)
parser.add_argument('--max_pos_regions_per_ensemble_testing', type=int, default=-1)
parser.add_argument('--neg_to_pos_ratio', type=int, default=1)
parser.add_argument('--neg_to_pos_ratio_testing', type=int, default=1)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--unobserved', action='store_true', default=False)
parser.add_argument('--save_all_ckpts', action='store_true', default=False)
# Test only
parser.add_argument('--test_only', action='store_true', default=False)
parser.add_argument('--full_test', action='store_true', default=False)
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--use_shard_nums', type=int, nargs='*', default=None)
return parser
def main():
parser = create_train_parser()
args = parser.parse_args()
args.__dict__['grid_config'] = feature_ppi.grid_config
if args.test_only:
with open(os.path.join(args.model_dir, 'config.json')) as f:
model_config = json.load(f)
args.num_conv = model_config['num_conv']
args.use_batch_norm = model_config['use_batch_norm']
if 'grid_config' in model_config:
args.__dict__['grid_config'] = util.dotdict(
model_config['grid_config'])
args.__dict__['grid_config'].max_pos_regions_per_ensemble = args.max_pos_regions_per_ensemble
args.__dict__['grid_config'].max_pos_regions_per_ensemble_testing = args.max_pos_regions_per_ensemble_testing
args.__dict__['grid_config'].neg_to_pos_ratio = args.neg_to_pos_ratio
args.__dict__['grid_config'].neg_to_pos_ratio_testing = args.neg_to_pos_ratio_testing
args.__dict__['grid_config'].full_test = args.full_test
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if args.debug:
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
else:
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.info("Running 3D CNN PPI training...")
if args.unobserved:
args.output_dir = os.path.join(args.output_dir, 'None')
os.makedirs(args.output_dir, exist_ok=True)
else:
num = 0
while True:
dirpath = os.path.join(args.output_dir, str(num))
if os.path.exists(dirpath):
num += 1
else:
args.output_dir = dirpath
logging.info('Creating output directory {:}'.format(args.output_dir))
os.mkdir(args.output_dir)
break
logging.info("\n" + str(json.dumps(args.__dict__, indent=4)) + "\n")
# Save config
with open(os.path.join(args.output_dir, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=4)
args.train_sharded = sh.Sharded.load(args.train_sharded)
args.val_sharded = sh.Sharded.load(args.val_sharded)
args.test_sharded = sh.Sharded.load(args.test_sharded)
logging.info("Writing all output to {:}".format(args.output_dir))
with tf.Session() as sess:
np.random.seed(args.random_seed)
tf.set_random_seed(args.random_seed)
train_model(sess, args)
if __name__ == '__main__':
main()
| [
"tensorflow.shape",
"logging.debug",
"examples.cnn3d.subgrid_gen.num_channels",
"sklearn.metrics.roc_auc_score",
"examples.cnn3d.model.training",
"numpy.array",
"sklearn.metrics.log_loss",
"tensorflow.cast",
"logging.info",
"tensorflow.set_random_seed",
"os.path.exists",
"numpy.mean",
"argpa... | [((277, 339), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (311, 339), True, 'import tensorflow as tf\n'), ((596, 623), 'dotenv.find_dotenv', 'de.find_dotenv', ([], {'usecwd': '(True)'}), '(usecwd=True)\n', (610, 623), True, 'import dotenv as de\n'), ((783, 831), 'sklearn.metrics.average_precision_score', 'sm.average_precision_score', (['all_trues', 'all_preds'], {}), '(all_trues, all_preds)\n', (809, 831), True, 'import sklearn.metrics as sm\n'), ((855, 893), 'sklearn.metrics.roc_auc_score', 'sm.roc_auc_score', (['all_trues', 'all_preds'], {}), '(all_trues, all_preds)\n', (871, 893), True, 'import sklearn.metrics as sm\n'), ((1077, 1110), 'sklearn.metrics.log_loss', 'sm.log_loss', (['all_trues', 'all_preds'], {}), '(all_trues, all_preds)\n', (1088, 1110), True, 'import sklearn.metrics as sm\n'), ((1665, 1692), 'tensorflow.cast', 'tf.cast', (['true_y', 'tf.float32'], {}), '(true_y, tf.float32)\n', (1672, 1692), True, 'import tensorflow as tf\n'), ((1993, 2049), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {'name': '"""accuracy"""'}), "(correct_prediction, tf.float32, name='accuracy')\n", (2000, 2049), True, 'import tensorflow as tf\n'), ((2513, 2833), 'examples.cnn3d.model.siamese_model', 'model.siamese_model', (['feature', 'is_training', 'conv_drop_rate', 'fc_drop_rate', 'top_nn_drop_rate', 'conv_filters', 'conv_kernel_size', 'max_pool_positions', 'max_pool_sizes', 'max_pool_strides', 'fc_units', 'top_fc_units'], {'batch_norm': 'args.use_batch_norm', 'dropout': '(not args.no_dropout)', 'top_nn_activation': 'args.top_nn_activation'}), '(feature, is_training, conv_drop_rate, fc_drop_rate,\n top_nn_drop_rate, conv_filters, conv_kernel_size, max_pool_positions,\n max_pool_sizes, max_pool_strides, fc_units, top_fc_units, batch_norm=\n args.use_batch_norm, dropout=not args.no_dropout, top_nn_activation=\n args.top_nn_activation)\n', (2532, 2833), True, 'import examples.cnn3d.model as model\n'), ((3893, 3932), 'examples.cnn3d.subgrid_gen.grid_size', 'subgrid_gen.grid_size', (['args.grid_config'], {}), '(args.grid_config)\n', (3914, 3932), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((3952, 3994), 'examples.cnn3d.subgrid_gen.num_channels', 'subgrid_gen.num_channels', (['args.grid_config'], {}), '(args.grid_config)\n', (3976, 3994), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((4009, 4181), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen'], {'output_types': '(tf.string, tf.float32, tf.float32)', 'output_shapes': '((), (2, grid_size, grid_size, grid_size, channel_size), (1,))'}), '(gen, output_types=(tf.string, tf.float32, tf\n .float32), output_shapes=((), (2, grid_size, grid_size, grid_size,\n channel_size), (1,)))\n', (4039, 4181), True, 'import tensorflow as tf\n'), ((4754, 4798), 'logging.debug', 'logging.debug', (['"""Create input placeholder..."""'], {}), "('Create input placeholder...')\n", (4767, 4798), False, 'import logging\n'), ((4815, 4854), 'examples.cnn3d.subgrid_gen.grid_size', 'subgrid_gen.grid_size', (['args.grid_config'], {}), '(args.grid_config)\n', (4836, 4854), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((4874, 4916), 'examples.cnn3d.subgrid_gen.num_channels', 'subgrid_gen.num_channels', (['args.grid_config'], {}), '(args.grid_config)\n', (4898, 4916), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((4943, 5050), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2, grid_size, grid_size, grid_size, channel_size]'], {'name': '"""main_input"""'}), "(tf.float32, [None, 2, grid_size, grid_size, grid_size,\n channel_size], name='main_input')\n", (4957, 5050), True, 'import tensorflow as tf\n'), ((5096, 5142), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""label"""'], {}), "(tf.float32, [None, 1], 'label')\n", (5110, 5142), True, 'import tensorflow as tf\n'), ((5210, 5263), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]', 'name': '"""is_training"""'}), "(tf.bool, shape=[], name='is_training')\n", (5224, 5263), True, 'import tensorflow as tf\n'), ((5297, 5346), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""conv_drop_rate"""'}), "(tf.float32, name='conv_drop_rate')\n", (5311, 5346), True, 'import tensorflow as tf\n'), ((5378, 5425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""fc_drop_rate"""'}), "(tf.float32, name='fc_drop_rate')\n", (5392, 5425), True, 'import tensorflow as tf\n'), ((5461, 5512), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""top_nn_drop_rate"""'}), "(tf.float32, name='top_nn_drop_rate')\n", (5475, 5512), True, 'import tensorflow as tf\n'), ((5550, 5595), 'logging.debug', 'logging.debug', (['"""Define loss and optimizer..."""'], {}), "('Define loss and optimizer...')\n", (5563, 5595), False, 'import logging\n'), ((5838, 5879), 'logging.debug', 'logging.debug', (['"""Generate training ops..."""'], {}), "('Generate training ops...')\n", (5851, 5879), False, 'import logging\n'), ((5895, 5938), 'examples.cnn3d.model.training', 'model.training', (['loss_op', 'args.learning_rate'], {}), '(loss_op, args.learning_rate)\n', (5909, 5938), True, 'import examples.cnn3d.model as model\n'), ((6009, 6058), 'logging.debug', 'logging.debug', (['"""Initializing global variables..."""'], {}), "('Initializing global variables...')\n", (6022, 6058), False, 'import logging\n'), ((6070, 6103), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6101, 6103), True, 'import tensorflow as tf\n'), ((6143, 6181), 'logging.debug', 'logging.debug', (['"""Initializing saver..."""'], {}), "('Initializing saver...')\n", (6156, 6181), False, 'import logging\n'), ((6194, 6228), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100000)'}), '(max_to_keep=100000)\n', (6208, 6228), True, 'import tensorflow as tf\n'), ((6233, 6280), 'logging.debug', 'logging.debug', (['"""Finished initializing saver..."""'], {}), "('Finished initializing saver...')\n", (6246, 6280), False, 'import logging\n'), ((9138, 9177), 'logging.debug', 'logging.debug', (['"""Running initializer..."""'], {}), "('Running initializer...')\n", (9151, 9177), False, 'import logging\n'), ((9201, 9249), 'logging.debug', 'logging.debug', (['"""Finished running initializer..."""'], {}), "('Finished running initializer...')\n", (9214, 9249), False, 'import logging\n'), ((14749, 14782), 'logging.info', 'logging.info', (['"""Finished training"""'], {}), "('Finished training')\n", (14761, 14782), False, 'import logging\n'), ((14806, 14834), 'logging.debug', 'logging.debug', (['"""Run testing"""'], {}), "('Run testing')\n", (14819, 14834), False, 'import logging\n'), ((15266, 15525), 'functools.partial', 'functools.partial', (['feature_ppi.dataset_generator', 'args.test_sharded', 'args.grid_config'], {'shuffle': 'args.shuffle', 'repeat': '(1)', 'max_num_ensembles': 'args.max_num_ensembles_test', 'testing': '(True)', 'use_shard_nums': 'args.use_shard_nums', 'random_seed': 'args.random_seed'}), '(feature_ppi.dataset_generator, args.test_sharded, args.\n grid_config, shuffle=args.shuffle, repeat=1, max_num_ensembles=args.\n max_num_ensembles_test, testing=True, use_shard_nums=args.\n use_shard_nums, random_seed=args.random_seed)\n', (15283, 15525), False, 'import functools\n'), ((16412, 16444), 'logging.info', 'logging.info', (['"""Finished testing"""'], {}), "('Finished testing')\n", (16424, 16444), False, 'import logging\n'), ((16903, 16928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16926, 16928), False, 'import argparse\n'), ((20825, 20871), 'logging.info', 'logging.info', (['"""Running 3D CNN PPI training..."""'], {}), "('Running 3D CNN PPI training...')\n", (20837, 20871), False, 'import logging\n'), ((21632, 21667), 'atom3d.shard.shard.Sharded.load', 'sh.Sharded.load', (['args.train_sharded'], {}), '(args.train_sharded)\n', (21647, 21667), True, 'import atom3d.shard.shard as sh\n'), ((21691, 21724), 'atom3d.shard.shard.Sharded.load', 'sh.Sharded.load', (['args.val_sharded'], {}), '(args.val_sharded)\n', (21706, 21724), True, 'import atom3d.shard.shard as sh\n'), ((21749, 21783), 'atom3d.shard.shard.Sharded.load', 'sh.Sharded.load', (['args.test_sharded'], {}), '(args.test_sharded)\n', (21764, 21783), True, 'import atom3d.shard.shard as sh\n'), ((2962, 2983), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (2975, 2983), True, 'import tensorflow as tf\n'), ((3022, 3047), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (3039, 3047), True, 'import tensorflow as tf\n'), ((3418, 3460), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['target'], {'dtype': 'tf.float32'}), '(target, dtype=tf.float32)\n', (3434, 3460), True, 'import tensorflow as tf\n'), ((3669, 3708), 'tensorflow.identity', 'tf.identity', (['loss'], {'name': '"""cross_entropy"""'}), "(loss, name='cross_entropy')\n", (3680, 3708), True, 'import tensorflow as tf\n'), ((10662, 10708), 'os.path.join', 'os.path.join', (['args.output_dir', '"""run_info.json"""'], {}), "(args.output_dir, 'run_info.json')\n", (10674, 10708), False, 'import os\n'), ((15100, 15144), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(to_use + '.meta')"], {}), "(to_use + '.meta')\n", (15126, 15144), True, 'import tensorflow as tf\n'), ((16730, 16778), 'os.path.join', 'os.path.join', (['args.output_dir', '"""test_result.pkl"""'], {}), "(args.output_dir, 'test_result.pkl')\n", (16742, 16778), False, 'import os\n'), ((20690, 20746), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'log_fmt'}), '(level=logging.DEBUG, format=log_fmt)\n', (20709, 20746), False, 'import logging\n'), ((20765, 20820), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (20784, 20820), False, 'import logging\n'), ((20923, 20960), 'os.path.join', 'os.path.join', (['args.output_dir', '"""None"""'], {}), "(args.output_dir, 'None')\n", (20935, 20960), False, 'import os\n'), ((20969, 21012), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (20980, 21012), False, 'import os\n'), ((21568, 21605), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(4)'}), '(args.__dict__, f, indent=4)\n', (21577, 21605), False, 'import json\n'), ((21864, 21876), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (21874, 21876), True, 'import tensorflow as tf\n'), ((21894, 21926), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (21908, 21926), True, 'import numpy as np\n'), ((21935, 21971), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (21953, 21971), True, 'import tensorflow as tf\n'), ((1788, 1819), 'tensorflow.less_equal', 'tf.less_equal', (['predicted_y', '(0.5)'], {}), '(predicted_y, 0.5)\n', (1801, 1819), True, 'import tensorflow as tf\n'), ((1837, 1863), 'tensorflow.less_equal', 'tf.less_equal', (['true_y', '(0.5)'], {}), '(true_y, 0.5)\n', (1850, 1863), True, 'import tensorflow as tf\n'), ((1910, 1938), 'tensorflow.greater', 'tf.greater', (['predicted_y', '(0.5)'], {}), '(predicted_y, 0.5)\n', (1920, 1938), True, 'import tensorflow as tf\n'), ((1956, 1979), 'tensorflow.greater', 'tf.greater', (['true_y', '(0.5)'], {}), '(true_y, 0.5)\n', (1966, 1979), True, 'import tensorflow as tf\n'), ((11171, 11215), 'logging.debug', 'logging.debug', (['"""Creating train generator..."""'], {}), "('Creating train generator...')\n", (11184, 11215), False, 'import logging\n'), ((11255, 11488), 'functools.partial', 'functools.partial', (['feature_ppi.dataset_generator', 'args.train_sharded', 'args.grid_config'], {'shuffle': 'args.shuffle', 'repeat': 'args.repeat_gen', 'max_num_ensembles': 'args.max_num_ensembles_train', 'testing': '(False)', 'random_seed': 'random_seed'}), '(feature_ppi.dataset_generator, args.train_sharded, args.\n grid_config, shuffle=args.shuffle, repeat=args.repeat_gen,\n max_num_ensembles=args.max_num_ensembles_train, testing=False,\n random_seed=random_seed)\n', (11272, 11488), False, 'import functools\n'), ((11618, 11660), 'logging.debug', 'logging.debug', (['"""Creating val generator..."""'], {}), "('Creating val generator...')\n", (11631, 11660), False, 'import logging\n'), ((11698, 11927), 'functools.partial', 'functools.partial', (['feature_ppi.dataset_generator', 'args.val_sharded', 'args.grid_config'], {'shuffle': 'args.shuffle', 'repeat': 'args.repeat_gen', 'max_num_ensembles': 'args.max_num_ensembles_val', 'testing': '(False)', 'random_seed': 'random_seed'}), '(feature_ppi.dataset_generator, args.val_sharded, args.\n grid_config, shuffle=args.shuffle, repeat=args.repeat_gen,\n max_num_ensembles=args.max_num_ensembles_val, testing=False,\n random_seed=random_seed)\n', (11715, 11927), False, 'import functools\n'), ((13537, 13581), 'logging.info', 'logging.info', (['"""Saving train and val results"""'], {}), "('Saving train and val results')\n", (13549, 13581), False, 'import logging\n'), ((15032, 15044), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15041, 15044), False, 'import json\n'), ((16482, 16547), 'numpy.array', 'np.array', (['[test_structures, test_labels, test_preds, test_logits]'], {}), '([test_structures, test_labels, test_preds, test_logits])\n', (16490, 16547), True, 'import numpy as np\n'), ((18719, 18748), 'random.randint', 'random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (18733, 18748), False, 'import random\n'), ((19869, 19881), 'json.load', 'json.load', (['f'], {}), '(f)\n', (19878, 19881), False, 'import json\n'), ((21136, 21159), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (21150, 21159), False, 'import os\n'), ((21503, 21547), 'os.path.join', 'os.path.join', (['args.output_dir', '"""config.json"""'], {}), "(args.output_dir, 'config.json')\n", (21515, 21547), False, 'import os\n'), ((3126, 3153), 'tensorflow.cast', 'tf.cast', (['target', 'tf.float32'], {}), '(target, tf.float32)\n', (3133, 3153), True, 'import tensorflow as tf\n'), ((3367, 3383), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (3375, 3383), True, 'import tensorflow as tf\n'), ((3603, 3640), 'tensorflow.cast', 'tf.cast', (['effective_weight', 'tf.float32'], {}), '(effective_weight, tf.float32)\n', (3610, 3640), True, 'import tensorflow as tf\n'), ((4394, 4446), 'tensorflow.contrib.data.shuffle_and_repeat', 'tf.contrib.data.shuffle_and_repeat', ([], {'buffer_size': '(1000)'}), '(buffer_size=1000)\n', (4428, 4446), True, 'import tensorflow as tf\n'), ((8736, 8757), 'numpy.concatenate', 'np.concatenate', (['array'], {}), '(array)\n', (8750, 8757), True, 'import numpy as np\n'), ((10515, 10558), 'os.path.join', 'os.path.join', (['args.output_dir', '"""model-ckpt"""'], {}), "(args.output_dir, 'model-ckpt')\n", (10527, 10558), False, 'import os\n'), ((10882, 10914), 'json.dump', 'json.dump', (['run_info', 'f'], {'indent': '(4)'}), '(run_info, f, indent=4)\n', (10891, 10914), False, 'import json\n'), ((14594, 14657), 'logging.info', 'logging.info', (['"""Validation loss stopped decreasing, stopping..."""'], {}), "('Validation loss stopped decreasing, stopping...')\n", (14606, 14657), False, 'import logging\n'), ((14956, 15001), 'os.path.join', 'os.path.join', (['args.model_dir', '"""run_info.json"""'], {}), "(args.model_dir, 'run_info.json')\n", (14968, 15001), False, 'import os\n'), ((19791, 19834), 'os.path.join', 'os.path.join', (['args.model_dir', '"""config.json"""'], {}), "(args.model_dir, 'config.json')\n", (19803, 19834), False, 'import os\n'), ((20093, 20134), 'examples.cnn3d.util.dotdict', 'util.dotdict', (["model_config['grid_config']"], {}), "(model_config['grid_config'])\n", (20105, 20134), True, 'import examples.cnn3d.util as util\n'), ((21348, 21373), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (21356, 21373), False, 'import os\n'), ((13635, 13704), 'numpy.array', 'np.array', (['[train_structures, train_labels, train_preds, train_logits]'], {}), '([train_structures, train_labels, train_preds, train_logits])\n', (13643, 13704), True, 'import numpy as np\n'), ((14043, 14104), 'numpy.array', 'np.array', (['[val_structures, val_labels, val_preds, val_logits]'], {}), '([val_structures, val_labels, val_preds, val_logits])\n', (14051, 14104), True, 'import numpy as np\n'), ((21425, 21460), 'json.dumps', 'json.dumps', (['args.__dict__'], {'indent': '(4)'}), '(args.__dict__, indent=4)\n', (21435, 21460), False, 'import json\n'), ((7973, 7986), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (7980, 7986), True, 'import numpy as np\n'), ((8045, 8062), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (8052, 8062), True, 'import numpy as np\n')] |
import json, os
import h5py, numpy
def read_config(fn):
from white_matter.utils.paths_in_config import path_local_to_cfg_root
with open(fn, 'r') as fid:
ret = json.load(fid)["ProjectionMapping"]
ret["cfg_root"] = os.path.split(fn)[0]
path_local_to_cfg_root(ret, ["cache_manifest", "h5_fn"])
return ret
class ProjectionMapper(object):
def __init__(self, cfg_file=None):
if cfg_file is None:
cfg_file = os.path.join(os.path.split(__file__)[0], 'default.json')
self.cfg = read_config(cfg_file)
if not os.path.exists(self.cfg["h5_fn"]):
import subprocess, logging
logging.getLogger(__file__).warning("Mapping cache does not exist at %s! Creating it now..."
% self.cfg["h5_fn"])
subprocess.check_call(["write_projection_mapping_cache.py", cfg_file])
assert os.path.exists(self.cfg["h5_fn"]), "Mapping cache still missing!"
def move_to_left_hemi(self, x):
x_out = x.copy()
pivot = 2 * self.cfg["hemi_mirror_at"]
x_out[x >= self.cfg["hemi_mirror_at"]] = pivot - x_out[x >= self.cfg["hemi_mirror_at"]]
return x_out
def move_to_right_hemi(self, x):
x_out = x.copy()
pivot = 2 * self.cfg["hemi_mirror_at"]
x_out[x < self.cfg["hemi_mirror_at"]] = pivot - x_out[x < self.cfg["hemi_mirror_at"]]
return x_out
def for_source(self, src):
with h5py.File(self.cfg["h5_fn"], 'r') as h5:
x = numpy.array(h5[src]['coordinates']['x'])
y = numpy.array(h5[src]['coordinates']['y'])
base_sys = h5[src]['coordinates'].attrs.get('base_coord_system', 'Allen Dorsal Flatmap') # TODO: make dataset
y = self.move_to_right_hemi(y)
return x, y, base_sys
def for_target(self, src):
def _for_target(tgt, hemi):
try:
with h5py.File(self.cfg["h5_fn"], 'r') as h5:
x = numpy.array(h5[src]['targets'][tgt]['coordinates/x'])
y = numpy.array(h5[src]['targets'][tgt]['coordinates/y'])
base_sys = str(h5[src]['targets'][tgt]['coordinates/base_coord_system'].value)
var = h5[src]['targets'][tgt]['mapping_variance'][0]
if hemi == 'ipsi':
y = self.move_to_right_hemi(y)
elif hemi == 'contra':
y = self.move_to_left_hemi(y)
except:
print("Insufficient data for projection {0} - {1}, {2}".format(src, tgt, hemi))
raise
return x, y, base_sys, var
return _for_target
| [
"logging.getLogger",
"os.path.exists",
"subprocess.check_call",
"white_matter.utils.paths_in_config.path_local_to_cfg_root",
"os.path.split",
"h5py.File",
"numpy.array",
"json.load"
] | [((260, 316), 'white_matter.utils.paths_in_config.path_local_to_cfg_root', 'path_local_to_cfg_root', (['ret', "['cache_manifest', 'h5_fn']"], {}), "(ret, ['cache_manifest', 'h5_fn'])\n", (282, 316), False, 'from white_matter.utils.paths_in_config import path_local_to_cfg_root\n'), ((235, 252), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (248, 252), False, 'import json, os\n'), ((177, 191), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (186, 191), False, 'import json, os\n'), ((571, 604), 'os.path.exists', 'os.path.exists', (["self.cfg['h5_fn']"], {}), "(self.cfg['h5_fn'])\n", (585, 604), False, 'import json, os\n'), ((831, 901), 'subprocess.check_call', 'subprocess.check_call', (["['write_projection_mapping_cache.py', cfg_file]"], {}), "(['write_projection_mapping_cache.py', cfg_file])\n", (852, 901), False, 'import subprocess, logging\n'), ((921, 954), 'os.path.exists', 'os.path.exists', (["self.cfg['h5_fn']"], {}), "(self.cfg['h5_fn'])\n", (935, 954), False, 'import json, os\n'), ((1483, 1516), 'h5py.File', 'h5py.File', (["self.cfg['h5_fn']", '"""r"""'], {}), "(self.cfg['h5_fn'], 'r')\n", (1492, 1516), False, 'import h5py, numpy\n'), ((1540, 1580), 'numpy.array', 'numpy.array', (["h5[src]['coordinates']['x']"], {}), "(h5[src]['coordinates']['x'])\n", (1551, 1580), False, 'import h5py, numpy\n'), ((1597, 1637), 'numpy.array', 'numpy.array', (["h5[src]['coordinates']['y']"], {}), "(h5[src]['coordinates']['y'])\n", (1608, 1637), False, 'import h5py, numpy\n'), ((471, 494), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (484, 494), False, 'import json, os\n'), ((657, 684), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (674, 684), False, 'import subprocess, logging\n'), ((1935, 1968), 'h5py.File', 'h5py.File', (["self.cfg['h5_fn']", '"""r"""'], {}), "(self.cfg['h5_fn'], 'r')\n", (1944, 1968), False, 'import h5py, numpy\n'), ((2000, 2053), 'numpy.array', 'numpy.array', (["h5[src]['targets'][tgt]['coordinates/x']"], {}), "(h5[src]['targets'][tgt]['coordinates/x'])\n", (2011, 2053), False, 'import h5py, numpy\n'), ((2078, 2131), 'numpy.array', 'numpy.array', (["h5[src]['targets'][tgt]['coordinates/y']"], {}), "(h5[src]['targets'][tgt]['coordinates/y'])\n", (2089, 2131), False, 'import h5py, numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 - 2013
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# https://github.com/PyRadar/pyradar
#
# 2020: <NAME>: <<EMAIL>> Converted to python3.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from scipy.stats import variation
def weighting(window, cu=0.25):
"""
Computes the weighthing function for Kuan filter using cu as the noise
coefficient.
"""
two_cu = cu * cu
ci = variation(window, None)
two_ci = ci * ci
if not two_ci: # dirty patch to avoid zero division
two_ci = 0.01
divisor = 1.0 + two_cu
if not divisor:
divisor = 0.0001
if cu > ci:
w_t = 0.0
else:
w_t = (1.0 - (two_cu / two_ci)) / divisor
return w_t
def kuan_filter(img, win_size=3, cu=0.25):
"""
Apply kuan to a numpy matrix containing the image, with a window of
win_size x win_size.
"""
if win_size < 3: raise Exception('[findpeaks] >ERROR: win size must be at least 3')
if len(img.shape) > 2: raise Exception('[findpeaks] >ERROR: Image should be 2D. Hint: set the parameter: togray=True')
if ((win_size % 2) == 0): print('[findpeaks] >It is highly recommended to user odd window sizes. You provided %s, an even number.' % (win_size))
# we process the entire img as float64 to avoid type overflow error
img = np.float64(img)
img_filtered = np.zeros_like(img)
N, M = img.shape
# win_offset = win_size / 2
win_offset = int(win_size / 2)
for i in np.arange(0, N):
xleft = i - win_offset
xright = i + win_offset
if xleft < 0:
xleft = 0
if xright >= N:
xright = N
for j in np.arange(0, M):
yup = j - win_offset
ydown = j + win_offset
if yup < 0:
yup = 0
if ydown >= M:
ydown = M
pix_value = img[i, j]
window = img[xleft:xright, yup:ydown]
w_t = weighting(window, cu)
window_mean = window.mean()
new_pix_value = (pix_value * w_t) + (window_mean * (1.0 - w_t))
if (new_pix_value is None) or np.isnan(new_pix_value):
new_pix_value = 0
img_filtered[i, j] = round(new_pix_value)
return img_filtered
| [
"numpy.float64",
"scipy.stats.variation",
"numpy.isnan",
"numpy.zeros_like",
"numpy.arange"
] | [((1079, 1102), 'scipy.stats.variation', 'variation', (['window', 'None'], {}), '(window, None)\n', (1088, 1102), False, 'from scipy.stats import variation\n'), ((1990, 2005), 'numpy.float64', 'np.float64', (['img'], {}), '(img)\n', (2000, 2005), True, 'import numpy as np\n'), ((2025, 2043), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (2038, 2043), True, 'import numpy as np\n'), ((2147, 2162), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (2156, 2162), True, 'import numpy as np\n'), ((2337, 2352), 'numpy.arange', 'np.arange', (['(0)', 'M'], {}), '(0, M)\n', (2346, 2352), True, 'import numpy as np\n'), ((2808, 2831), 'numpy.isnan', 'np.isnan', (['new_pix_value'], {}), '(new_pix_value)\n', (2816, 2831), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpmath import zeta
import animplotlib as anim
plt.style.use('dark_background')
x, y = [], []
n_upper = 49
for k in np.arange(0, n_upper, 0.01):
compl = zeta(complex(0.5, k))
x.append(compl.real)
y.append(compl.imag)
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot([], [], lw=0.5)
ax.set_xlim(-2, 5)
ax.set_ylim(-3, 3)
ax.set_axis_off()
anim.AnimPlot(fig, line, x, y, plot_speed=20)
plt.show()
| [
"animplotlib.AnimPlot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((103, 135), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (116, 135), True, 'import matplotlib.pyplot as plt\n'), ((175, 202), 'numpy.arange', 'np.arange', (['(0)', 'n_upper', '(0.01)'], {}), '(0, n_upper, 0.01)\n', (184, 202), True, 'import numpy as np\n'), ((295, 307), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (305, 307), True, 'import matplotlib.pyplot as plt\n'), ((425, 470), 'animplotlib.AnimPlot', 'anim.AnimPlot', (['fig', 'line', 'x', 'y'], {'plot_speed': '(20)'}), '(fig, line, x, y, plot_speed=20)\n', (438, 470), True, 'import animplotlib as anim\n'), ((471, 481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (479, 481), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('reset', '')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
# These are some parameters to make figures nice (and big)
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
plt.rcParams['figure.figsize'] = 16,8
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
# # Exercise 1: Unfair dice
# Consider a pair of unfair dice. The probabilities for the two dice are as follows:
#
# |Roll|Probability Dice 1|Probability Dice 2
# |---|---|---|
# |1|1/8|1/10|
# |2|1/8|1/10|
# |3|1/8|1/10|
# |4|1/8|1/10|
# |5|1/8|3/10|
# |6|3/8|3/10|
#
# ## Question:
# Use the law of total probability. to compute the probability of rolling a total of 11.
#
# ### Answer
# We denote by $S$ the sum of the dice and by $D_1$ the value of the roll of dice 1
# $$P(S=11)=\sum_{n=1}^{6}P(S=11|D_{1}=n)$$
# $$P(S=11)=P(S=11|D_{1}=5)\cdot P(D_{1}=5)+P(S=11|D_{1}=6)\cdot P(D_{1}=6)$$
# $$P(S=11)=P(D_{2}=6)\cdot P(D_{1}=5)+P(D_{2}=6)\cdot P(D_{1}=5)$$
# $$P(S=11)=3/10\cdot1/8+3/10\cdot3/8=10/80=1/8$$
#
# <hr style="border:2px solid black"> </hr>
# # Exercise 2: Covariance vs independence
# Consider two random variables, $X$ and $Y$. $X$ is uniformly distributed over the interval $\left[-1,1\right]$:
#
# $$X\sim U[-1,1],$$
#
# while $Y$ is normally distributed (Gaussian), with a variance equal to $X^{2}$. We would denote this as:
# $$Y|X\sim\mathcal{N}\left(0,X^{2}\right),$$
# to imply that
# $$P(Y=y|X=x)=p(y|x)=\left(2\pi x^2\right)^{-1/2}\exp\left[-\frac{1}{2}\left(\frac{y}{x}\right)^2\right]$$
# The two random variables are obviously not independent. Indepencene requires $p(y|x)=p(y)$, which in turn would imply $p(y)=p(y|x_1)p(y|x_2)$ for $x_1\neq x_2$.
# ## Question 1 (Theory):
# Prove analyitically that $Cov(X,Y)=0$.<br>
# *Hint:* Use the relation $p(x,y)=p(y|x)p(x)$ to compute $E(XY)$. Alternatively, you can use the same relation to first prove $E(E(Y|X))$.
#
# ### Answer:
# $$Cov(X,Y)=E(XY)-E(X)E(Y)=E(XY)$$
# $$=\int_{-1}^{1}\int_{-\infty}^{\infty}x\cdot y\cdot p(x,y)\cdot dx\cdot dy=\int_{-1}^{1}\int_{-\infty}^{\infty}y\cdot x\cdot p(y|x)p(x)\cdot dx\cdot dy$$
# $$=\int_{-1}^{1}\left[\int_{-\infty}^{\infty}y\cdot p(y|x)\cdot dy\right]x\cdot dx$$
# $$=\int_{-1}^{1}\left[\int_{-\infty}^{\infty}y\cdot\frac{1}{\sqrt{2\pi x^{2}}}e^{-\frac{1}{2}\left(\frac{y}{x}\right)^{2}}\right]x\cdot dx$$
# The inner integral is just the expected value of $y$ for a constant $x$, $E(Y|X)$ and it is zero, since $Y|X\sim\mathcal{N}\left(0,X^{2}\right)$. Thus, since the integrand is zero, the whole intergral is zero.
# ## Question 2 (Numerical):
# Show, numerically, that expected covariance is zero.
# 1. Draw $n$ samples $(x_j,y_j)$ of $(X,Y)$ and plot $y_j$ vs $x_j$ for $n=100$:
# 2. Compute the sample covariance $s_{n-1}=\frac{1}{n-1}\sum_{j=1}^{n}(y_j-\overline y)$ of $X,Y$ for $n=100$. Repeat the experiment a large number of times (e.g. $M=10,000$) and plot the sampling distribution of $s_{100-1}$. What is the mean of the sampling distribution.
# 3. Now increase the sample size up to $n=100,000$ and plot the value of the sample covariance as a function of $n$. By the Law of Large Numbers you should see it asymptote to zero
#
# ### Answer
# In[206]:
#2.1
Ndraws=100
X=stats.uniform.rvs(loc=-1,scale=2,size=Ndraws);
Y=np.zeros([Ndraws])
for i in range(Ndraws):
Y[i]=stats.norm.rvs(loc=0,scale=np.abs(X[i]),size=1)
plt.plot(X,Y,'.')
scov=1/(Ndraws-1)*np.sum((X-np.mean(X))*(Y-np.mean(Y)))
print(scov)
# In[207]:
#2.2
M=1000
Ndraws=100
scov=np.zeros(M);
for j in range(M):
X=stats.uniform.rvs(loc=-1,scale=2,size=Ndraws);
Y=np.zeros([Ndraws]);
for i in range(Ndraws):
Y[i]=stats.norm.rvs(loc=0,scale=np.abs(X[i]),size=1);
scov[j]=1/(Ndraws-1)*np.sum((X-np.mean(X))*(Y-np.mean(Y)));
plt.hist(scov,rwidth=0.98);
print(np.mean(scov))
# In[208]:
#2.3
Ndraws=100000
scov=np.zeros(Ndraws)
X=stats.uniform.rvs(loc=-1,scale=2,size=Ndraws)
Y=np.zeros([Ndraws])
for i in range(Ndraws):
Y[i]=stats.norm.rvs(loc=0,scale=np.abs(X[i]),size=1)
if i>1:
scov[i]=1/(i-1)*np.sum((X[0:i]-np.mean(X[0:i]))*(Y[0:i]-np.mean(Y[0:i])))
plt.plot(scov)
plt.grid()
# <hr style="border:2px solid black"> </hr>
# # Exercise 3: Central Limit Theorem
# The central limit theorem says that the distribution of the sample mean of **any** random variable approaches a normal distribution.
#
# **Theorem** Let $ X_1, \cdots , X_n $ be $n$ independent and identically distributed (i.i.d) random variables with expectation $\mu$ and variance $\sigma^2$. The distribution of the sample mean $\overline X_n=\frac{1}{n}\sum_{i=1}^n X_i$ approaches the distribution of a gaussian
#
# $$\overline X_n \sim \mathcal N (\mu,\sigma^2/n),$$
# for large $n$.
#
# In this exercise, you will convince yourself of this theorem numerically. Here is a recipe for how to do it:
# - Pick your probability distribution. The CLT even works for discrete random variables!
# - Generate a random $n \times m$ matrix ($n$ rows, $m$ columns) of realizations from that distribution.
# - For each column, find the sample mean $\overline X_n$ of the $n$ samples, by taking the mean along the first (0-th) dimension. You now have $m$ independent realizations of the sample mean $\overline X_n$.
# - You can think of each column as an experiment where you take $n$ samples and average over them. We want to know the distribution of the sample-mean. The $m$ columns represent $m$ experiments, and thus provide us with $m$ realizations of the sample mean random variable. From these we can approximate a distribution of the sample mean (via, e.g. a histogram).
# - On top of the histogram of the sample mean distribution, plot the pdf of a normal distribution with the same process mean and process variance as the sample mean of the distribution of $\overline X_n$.
#
#
# ## Question 1: Continuous random variables:
# Demonstrate, numerically, that the sample mean of a number of Gamma-distributed random variables is approximately normal. https://en.wikipedia.org/wiki/Gamma_distribution
#
# Plot the distribution of the sample mean for $n=[1,5,25,100]$,using $m=10,000$, and overlay it with a normal pdf. For best visualization,use values of $\alpha=1$ loc$=0$, scale=$1$ for the gamma distribution; 30 bins for the histogram; and set the x-limits of [3,6] for all four values of $n$.
#
# ### Answer:
# In[209]:
m=10000
n=[1,5,20,100]
Nbins=30
fig,ax=plt.subplots(4,1,figsize=[8,8])
alpha=1;
loc=0;
scale=1;
for j in range(4):
x=stats.gamma.rvs(alpha,loc=loc,scale=scale,size=[n[j],m])
sample_mean=np.mean(x,axis=0);
z=np.linspace(0,5,100);
norm_pdf=stats.norm.pdf(z,loc=np.mean(sample_mean),scale=np.std(sample_mean));
ax[j].hist(sample_mean,Nbins,rwidth=1,density=True)
ax[j].plot(z,norm_pdf);
ax[j].set_xlim(left=0,right=4)
# ## Question 2: Discrete random variables:
# Demonstrate, numerically, that the sample mean of a large number of random dice throws is approximately normal.
#
# Simulate the dice using a discrete uniform random variables <code>stats.randint.rvs</code>, taking values from 1 to 6 (remember Python is right exclusive). The sample mean $\overline X_n$ is thus equivalnt to the average value of the dice throw $n$ throws.
#
# Plot the normalized (density=True) histogram for $n=[1,2,25,200]$, using $m=100,000$, and overlay it with a normal pdf. For best visualization use 50 bins for the histogram, and set the x-limits of [1,6] for all four values of $n$.
# ### Answer
# In[224]:
m=100000
n=[1,2,25,200]
Nbins=50
fig,ax=plt.subplots(4,1,figsize=[16,8])
alpha=1;
loc=0;
scale=1;
for j in range(4):
x=stats.randint.rvs(1,7,size=[n[j],m])
sample_mean=np.mean(x,axis=0);
z=np.linspace(0,7,1000);
norm_pdf=stats.norm.pdf(z,loc=np.mean(sample_mean),scale=np.std(sample_mean));
ax[j].hist(sample_mean,Nbins,rwidth=1,density=True)
ax[j].plot(z,norm_pdf);
ax[j].set_xlim(left=1,right=6)
# ## Question 3: Precip in Urbana
# Plot the histograms of precipitation in urbana on hourly, daily, monthly, and annual time scales. What do you observe?
#
# For convenience, I've downloaded 4-times daily hourly data from ERA5 for the gridcell representing Urbana. We'll use xarray since it makes it very easy to compute daily-, monthly-, and annual-total precipitation.
#
# The cell below computes hourly, daily, monthly, and annual values of precipitation. All you have to do is plot their histograms
# In[42]:
import xarray as xr
#convert from m/hr to inches/hr, taking into account we only sample 4hrs of the day
ds=xr.open_dataset('/data/keeling/a/cristi/SIMLES/data/ERA5precip_urbana_1950-2021.nc');
unit_conv=1000/24.5*6
pr_hr =ds.tp*unit_conv;
pr_day =pr_hr.resample(time='1D').sum('time')
pr_mon=pr_hr.resample(time='1M').sum('time')
pr_yr =pr_hr.resample(time='1Y').sum('time')
Nbins=15;
# ### Answer
# In[41]:
Nbins=15
fig,ax=plt.subplots(2,2,figsize=[12,12]);
ax[0,0].hist(pr_hr,Nbins,rwidth=0.9);
ax[0,1].hist(pr_day,Nbins,rwidth=0.9);
ax[1,0].hist(pr_mon,Nbins,rwidth=0.9);4
ax[1,1].hist(pr_yr,Nbins,rwidth=0.9);
# <hr style="border:2px solid black"> </hr>
# # Exercise 4: Houston precipitation return times via MLE
# In the wake of <NAME>, many have described the assocaited flooding as a "500-year event". How can this be, given that in most places there are only a few decades of data available? In this exercise we apply a simple (and most likely wrong) methodology to estimate _return periods_, and comment on the wisdom of that concept.
#
# Let's load and get to know the data. We are looking at daily precip data (in cm) at Beaumont Research Center and Port Arthur, two of the weather stations in the Houston area that reported very high daily precip totals.
#
# The data comes from NOAA GHCN:<br>
# https://www.ncdc.noaa.gov/cdo-web/datasets/GHCND/stations/GHCND:USC00410613/detail<br>
# https://www.ncdc.noaa.gov/cdo-web/datasets/GHCND/stations/GHCND:USW00012917/detail
#
# In[256]:
# read data and take a cursory look
#df=pd.read_csv('/data/keeling/a/cristi/SIMLES/data/Beaumont_precip.csv')
df=pd.read_csv('/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv')
df.head()
# In[257]:
# plot raw precipitation
precip_raw=df['PRCP'].values
precip_raw=precip_raw[np.isnan(precip_raw)==False] # take out nans
fig,ax=plt.subplots(1,1)
ax.plot(precip_raw)
ax.set_xlabel('day since beginning of record')
ax.set_ylabel('Daily Precip (cm)')
# In[258]:
# Plot the histogram of the data.
# For distributions such as a gamma distribution it makes sense to use a logarithmic axis.
#define bin edges and bin widths.
# we'll use the maximum value in the data to define the upper limit
bin_edge_low=0
bin_edge_high=np.round(np.max(precip_raw)+1);
bin_width=0.25
bin_edges=np.arange(bin_edge_low,bin_edge_high,bin_width)
fig,ax=plt.subplots(1,2)
ax[0].hist(precip_raw,bin_edges,rwidth=0.9);
ax[0].set_xlabel('daily precip (cm)')
ax[0].set_ylabel('count (number of days)')
ax[0].grid()
ax[1].hist(precip_raw,bin_edges,rwidth=0.9)
ax[1].set_yscale('log')
ax[1].grid()
ax[1].set_xlabel('daily precip (cm)')
ax[1].set_ylabel('count (number of days)')
# In[259]:
# the jump in the first bin indicates a probability mass at 0 ( a large number of days do not see any precipitation).
# Let's only look at days when it rains. While we're at it, let's clean NaNs as well.
precip=precip_raw[precip_raw>0.01]
# Plot the histogram of the data
fig,ax=plt.subplots(1,2)
ax[0].hist(precip,bin_edges,rwidth=0.9);
ax[0].set_xlabel('daily precip (cm)')
ax[0].set_ylabel('count (number of days)')
ax[0].grid()
ax[0].set_xlabel('daily precip (cm)')
ax[0].set_ylabel('count (number of days)')
ax[1].hist(precip,bin_edges,rwidth=0.9)
ax[1].set_yscale('log')
ax[1].grid()
ax[1].set_xlabel('daily precip (cm)')
ax[1].set_ylabel('count (number of days)')
# ## Question 1:
# Fit an gamma distribution to the data, using the <code>stats.gamma.fit</code> method to obtain maximum likelihood estimates for the parameters.
# Show the fit by overlaying the pdf of the gamma distribution with mle parameters on top of the histogram of daily precipitation at Beaumont Research Center.
#
# Hints:
# - you'll need to show a *density* estimate of the histogram, unlike the count i.e. ensure <code>density=True</code>.
# - The method will output the thre parameters of the gamma random variable: <code>a,loc,scale</code> (see documentation <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gamma.html"> here</a>). So you'll need to call it as <code>alpha_mle,loc_mle,scale_mle=stats.gama.fit( .... )</code>
#
# ### Answer:
# In[253]:
alpha_mle,loc_mle,scale_mle=stats.gamma.fit(precip)
x_plot=np.linspace(0,np.max(precip),200)
gamma_pdf=stats.gamma.pdf(x_plot,alpha_mle,loc_mle,scale_mle)
# Plot the histogram of the data
fig,ax=plt.subplots(1,2)
ax[0].hist(precip,bin_edges,rwidth=0.9,density=True);
ax[0].set_xlabel('daily precip (cm)')
ax[0].set_ylabel('count (number of days)')
ax[1].hist(precip,bin_edges,rwidth=0.9,density=True)
ax[1].set_yscale('log')
ax[0].plot(x_plot,gamma_pdf)
ax[1].plot(x_plot,gamma_pdf)
# In[263]:
np.max(precip)
# ## Question 2:
# Compute the return time of the rainiest day recorded at Beaumont Research Center (in years).
#
# What does this mean? The rainiest day at Beaumont brought $x$ cm. The return time represents how often we would expect to get $x$ cm or more of rain at Beaumont.
#
# To compute the return time we need to compute the probability of daily rain >$x$ cm. The inverse of this probability is the frequency of daily rain >$x$ cm.
#
# For example, if the probability of daily rain > 3 cm =1/30, it means we would expect that it rains 3 cm or more once about every 30 day, and we would say 3 cm is a 10 day event.
#
# For the largest precip event the probability will be significantly smaller, and thus the return time significantly larger
#
# *Hint*: Remember that the probability of daily rain being *less* than $x$ cm is given by the CDF: $$F(x)=P(\text{daily rain}<x\text{ cm})$$.
# *Hint*: The answer should only take a very small number of lines of code
# ### Answer
# In[264]:
gamma_F=stats.gamma.cdf(x_plot,alpha_mle,loc_mle,scale_mle)
prob=1-stats.gamma.cdf(np.max(precip),alpha_mle,loc_mle,scale_mle)
1/prob/365
# ## Question 3:
# Repeat the analysis for the Port Arthur data. If you fit a Gamma ditribution and compute the return time of the largest daily rain event, what is the return time?
#
# Does that seem reasonable? Why do you think the statistical model fails here? Think of the type of precipitation events that make up the precipitation data at Port Arthur
#
# {
# "tags": [
# "margin",
# ]
# }
# ### Answer
# In[260]:
# read data and take a cursory look
df=pd.read_csv('/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv')
df.head()
# plot raw precipitation
precip_raw=df['PRCP'].values
precip_raw=precip_raw[np.isnan(precip_raw)==False] # take out nans
precip=precip_raw[precip_raw>0.01]
alpha_mle,loc_mle,scale_mle=stats.gamma.fit(precip)
x_plot=np.linspace(0,np.max(precip),200)
gamma_pdf=stats.gamma.pdf(x_plot,alpha_mle,loc_mle,scale_mle)
# Plot the histogram of the data
fig,ax=plt.subplots(1,2)
ax[0].hist(precip,bin_edges,rwidth=0.9,density=True);
ax[0].set_xlabel('daily precip (cm)')
ax[0].set_ylabel('count (number of days)')
ax[1].hist(precip,bin_edges,rwidth=0.9,density=True)
ax[1].set_yscale('log')
ax[0].plot(x_plot,gamma_pdf)
ax[1].plot(x_plot,gamma_pdf)
# In[261]:
gamma_F=stats.gamma.cdf(x_plot,alpha_mle,loc_mle,scale_mle)
prob=1-stats.gamma.cdf(np.max(precip),alpha_mle,loc_mle,scale_mle)
1/prob/365
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"scipy.stats.gamma.rvs",
"pandas.read_csv",
"scipy.stats.randint.rvs",
"scipy.stats.uniform.rvs",
"numpy.arange",
"numpy.mean",
"scipy.stats.gamma.pdf",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"numpy.abs",
"scipy.stats.ga... | [((649, 676), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (668, 676), True, 'import matplotlib.pyplot as plt\n'), ((3609, 3656), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', ([], {'loc': '(-1)', 'scale': '(2)', 'size': 'Ndraws'}), '(loc=-1, scale=2, size=Ndraws)\n', (3626, 3656), False, 'from scipy import stats\n'), ((3658, 3676), 'numpy.zeros', 'np.zeros', (['[Ndraws]'], {}), '([Ndraws])\n', (3666, 3676), True, 'import numpy as np\n'), ((3758, 3777), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""."""'], {}), "(X, Y, '.')\n", (3766, 3777), True, 'import matplotlib.pyplot as plt\n'), ((3889, 3900), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (3897, 3900), True, 'import numpy as np\n'), ((4156, 4183), 'matplotlib.pyplot.hist', 'plt.hist', (['scov'], {'rwidth': '(0.98)'}), '(scov, rwidth=0.98)\n', (4164, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4245, 4261), 'numpy.zeros', 'np.zeros', (['Ndraws'], {}), '(Ndraws)\n', (4253, 4261), True, 'import numpy as np\n'), ((4266, 4313), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', ([], {'loc': '(-1)', 'scale': '(2)', 'size': 'Ndraws'}), '(loc=-1, scale=2, size=Ndraws)\n', (4283, 4313), False, 'from scipy import stats\n'), ((4314, 4332), 'numpy.zeros', 'np.zeros', (['[Ndraws]'], {}), '([Ndraws])\n', (4322, 4332), True, 'import numpy as np\n'), ((4509, 4523), 'matplotlib.pyplot.plot', 'plt.plot', (['scov'], {}), '(scov)\n', (4517, 4523), True, 'import matplotlib.pyplot as plt\n'), ((4527, 4537), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4535, 4537), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6842), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '[8, 8]'}), '(4, 1, figsize=[8, 8])\n', (6820, 6842), True, 'import matplotlib.pyplot as plt\n'), ((7945, 7980), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '[16, 8]'}), '(4, 1, figsize=[16, 8])\n', (7957, 7980), True, 'import matplotlib.pyplot as plt\n'), ((8964, 9053), 'xarray.open_dataset', 'xr.open_dataset', (['"""/data/keeling/a/cristi/SIMLES/data/ERA5precip_urbana_1950-2021.nc"""'], {}), "(\n '/data/keeling/a/cristi/SIMLES/data/ERA5precip_urbana_1950-2021.nc')\n", (8979, 9053), True, 'import xarray as xr\n'), ((9289, 9325), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '[12, 12]'}), '(2, 2, figsize=[12, 12])\n', (9301, 9325), True, 'import matplotlib.pyplot as plt\n'), ((10481, 10552), 'pandas.read_csv', 'pd.read_csv', (['"""/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv"""'], {}), "('/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv')\n", (10492, 10552), True, 'import pandas as pd\n'), ((10707, 10725), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (10719, 10725), True, 'import matplotlib.pyplot as plt\n'), ((11158, 11207), 'numpy.arange', 'np.arange', (['bin_edge_low', 'bin_edge_high', 'bin_width'], {}), '(bin_edge_low, bin_edge_high, bin_width)\n', (11167, 11207), True, 'import numpy as np\n'), ((11214, 11232), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (11226, 11232), True, 'import matplotlib.pyplot as plt\n'), ((11831, 11849), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (11843, 11849), True, 'import matplotlib.pyplot as plt\n'), ((13055, 13078), 'scipy.stats.gamma.fit', 'stats.gamma.fit', (['precip'], {}), '(precip)\n', (13070, 13078), False, 'from scipy import stats\n'), ((13130, 13184), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['x_plot', 'alpha_mle', 'loc_mle', 'scale_mle'], {}), '(x_plot, alpha_mle, loc_mle, scale_mle)\n', (13145, 13184), False, 'from scipy import stats\n'), ((13224, 13242), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (13236, 13242), True, 'import matplotlib.pyplot as plt\n'), ((13529, 13543), 'numpy.max', 'np.max', (['precip'], {}), '(precip)\n', (13535, 13543), True, 'import numpy as np\n'), ((14558, 14612), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['x_plot', 'alpha_mle', 'loc_mle', 'scale_mle'], {}), '(x_plot, alpha_mle, loc_mle, scale_mle)\n', (14573, 14612), False, 'from scipy import stats\n'), ((15171, 15242), 'pandas.read_csv', 'pd.read_csv', (['"""/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv"""'], {}), "('/data/keeling/a/cristi/SIMLES/data/PortArthur_precip.csv')\n", (15182, 15242), True, 'import pandas as pd\n'), ((15440, 15463), 'scipy.stats.gamma.fit', 'stats.gamma.fit', (['precip'], {}), '(precip)\n', (15455, 15463), False, 'from scipy import stats\n'), ((15515, 15569), 'scipy.stats.gamma.pdf', 'stats.gamma.pdf', (['x_plot', 'alpha_mle', 'loc_mle', 'scale_mle'], {}), '(x_plot, alpha_mle, loc_mle, scale_mle)\n', (15530, 15569), False, 'from scipy import stats\n'), ((15609, 15627), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (15621, 15627), True, 'import matplotlib.pyplot as plt\n'), ((15922, 15976), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['x_plot', 'alpha_mle', 'loc_mle', 'scale_mle'], {}), '(x_plot, alpha_mle, loc_mle, scale_mle)\n', (15937, 15976), False, 'from scipy import stats\n'), ((3928, 3975), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', ([], {'loc': '(-1)', 'scale': '(2)', 'size': 'Ndraws'}), '(loc=-1, scale=2, size=Ndraws)\n', (3945, 3975), False, 'from scipy import stats\n'), ((3981, 3999), 'numpy.zeros', 'np.zeros', (['[Ndraws]'], {}), '([Ndraws])\n', (3989, 3999), True, 'import numpy as np\n'), ((4191, 4204), 'numpy.mean', 'np.mean', (['scov'], {}), '(scov)\n', (4198, 4204), True, 'import numpy as np\n'), ((6891, 6951), 'scipy.stats.gamma.rvs', 'stats.gamma.rvs', (['alpha'], {'loc': 'loc', 'scale': 'scale', 'size': '[n[j], m]'}), '(alpha, loc=loc, scale=scale, size=[n[j], m])\n', (6906, 6951), False, 'from scipy import stats\n'), ((6964, 6982), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6971, 6982), True, 'import numpy as np\n'), ((6990, 7012), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (7001, 7012), True, 'import numpy as np\n'), ((8029, 8068), 'scipy.stats.randint.rvs', 'stats.randint.rvs', (['(1)', '(7)'], {'size': '[n[j], m]'}), '(1, 7, size=[n[j], m])\n', (8046, 8068), False, 'from scipy import stats\n'), ((8082, 8100), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (8089, 8100), True, 'import numpy as np\n'), ((8108, 8131), 'numpy.linspace', 'np.linspace', (['(0)', '(7)', '(1000)'], {}), '(0, 7, 1000)\n', (8119, 8131), True, 'import numpy as np\n'), ((13100, 13114), 'numpy.max', 'np.max', (['precip'], {}), '(precip)\n', (13106, 13114), True, 'import numpy as np\n'), ((15485, 15499), 'numpy.max', 'np.max', (['precip'], {}), '(precip)\n', (15491, 15499), True, 'import numpy as np\n'), ((10654, 10674), 'numpy.isnan', 'np.isnan', (['precip_raw'], {}), '(precip_raw)\n', (10662, 10674), True, 'import numpy as np\n'), ((11110, 11128), 'numpy.max', 'np.max', (['precip_raw'], {}), '(precip_raw)\n', (11116, 11128), True, 'import numpy as np\n'), ((14633, 14647), 'numpy.max', 'np.max', (['precip'], {}), '(precip)\n', (14639, 14647), True, 'import numpy as np\n'), ((15330, 15350), 'numpy.isnan', 'np.isnan', (['precip_raw'], {}), '(precip_raw)\n', (15338, 15350), True, 'import numpy as np\n'), ((15997, 16011), 'numpy.max', 'np.max', (['precip'], {}), '(precip)\n', (16003, 16011), True, 'import numpy as np\n'), ((3737, 3749), 'numpy.abs', 'np.abs', (['X[i]'], {}), '(X[i])\n', (3743, 3749), True, 'import numpy as np\n'), ((4393, 4405), 'numpy.abs', 'np.abs', (['X[i]'], {}), '(X[i])\n', (4399, 4405), True, 'import numpy as np\n'), ((7046, 7066), 'numpy.mean', 'np.mean', (['sample_mean'], {}), '(sample_mean)\n', (7053, 7066), True, 'import numpy as np\n'), ((7073, 7092), 'numpy.std', 'np.std', (['sample_mean'], {}), '(sample_mean)\n', (7079, 7092), True, 'import numpy as np\n'), ((8165, 8185), 'numpy.mean', 'np.mean', (['sample_mean'], {}), '(sample_mean)\n', (8172, 8185), True, 'import numpy as np\n'), ((8192, 8211), 'numpy.std', 'np.std', (['sample_mean'], {}), '(sample_mean)\n', (8198, 8211), True, 'import numpy as np\n'), ((3805, 3815), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (3812, 3815), True, 'import numpy as np\n'), ((3820, 3830), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (3827, 3830), True, 'import numpy as np\n'), ((4069, 4081), 'numpy.abs', 'np.abs', (['X[i]'], {}), '(X[i])\n', (4075, 4081), True, 'import numpy as np\n'), ((4126, 4136), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (4133, 4136), True, 'import numpy as np\n'), ((4141, 4151), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (4148, 4151), True, 'import numpy as np\n'), ((4465, 4480), 'numpy.mean', 'np.mean', (['X[0:i]'], {}), '(X[0:i])\n', (4472, 4480), True, 'import numpy as np\n'), ((4490, 4505), 'numpy.mean', 'np.mean', (['Y[0:i]'], {}), '(Y[0:i])\n', (4497, 4505), True, 'import numpy as np\n')] |
# -*-coding:utf-8-*-
"""
This module is an example for Swadesh corpus retrieval
"""
import re
import numpy as np
from nltk.corpus import swadesh
__author__ = "besnier"
germanic_languages = ["en", "de", "nl"]
roman_languages = ["fr", "es", "it"]
alphabet = list('azertyuiopqsdfghjklmwxcvbn')
to_aligner_ger = swadesh.entries(germanic_languages)
to_aligner_rom = swadesh.entries(roman_languages)
def vocabulary_retrieve(languages, normalize):
"""
Load and normalize corpora according to chosen languages
:param languages:
:param normalize:
:return:
"""
to_align = swadesh.entries(languages)
normalised_words = []
characters = set()
for i, mots in enumerate(to_align):
normalised_words.append([])
for j in range(len(languages)):
normalised_words[i].append(list(normalize(mots[j])))
characters.update(normalised_words[i][j])
return normalised_words, list(characters)
def normalise_rom(word):
"""
Normalise French, Spanish and Italian
:param word:
:return: normalised word
"""
if ',' in word:
i = word.find(',')
word = word[:i]
word = re.sub(r' \([\w ]*\)', '', word.lower())
word = word.replace(' ', '')
word = word.replace('...', '')
word = word.replace("'", '')
word = word.replace('ù', 'u')
word = word.replace('œ', 'oe')
word = word.replace('è', 'e')
word = word.replace('é', 'e')
word = word.replace('á', 'a')
word = word.replace('à', 'a')
word = word.replace('ñ', 'n')
word = word.replace('í', 'i')
word = word.replace('â', 'a')
word = word.replace('ê', 'e')
word = word.replace('û', 'u')
word = word.replace('ó', 'o')
word = word.replace('ú', 'u')
word = word.replace('ü', 'u')
return word
def normalise_ger(word):
"""
Normaliser for western Germanic languages
:param word:
:return: normalised_word
"""
if ',' in word:
i = word.find(',')
word = word[:i]
word = re.sub(r' \([\w ]*\)', '', word.lower()).replace(' ', '')
word = word.replace('ß', 'ss')
word = word.replace('ö', 'oe')
word = word.replace('ü', 'ue')
word = word.replace('ä', 'ae')
word = word.replace('á', 'a')
word = word.replace('à', 'a')
word = word.replace('í', 'i')
word = word.replace('â', 'a')
word = word.replace('ê', 'e')
word = word.replace('û', 'u')
word = word.replace('ó', 'o')
word = word.replace('ú', 'u')
word = word.replace('å', 'aa')
return word
def list_to_pairs(l):
"""
From list(list(str)) ) to list([str, str])
:param l:
:return:
"""
res = []
for i in l:
length_i = len(i)
for j in range(length_i-1):
for k in range(j+1, length_i):
res.append([np.array(i[j]), np.array(i[k])])
return res
if __name__ == "__main__":
# print(list_to_pairs(ger))
# print(list_to_pairs(rom))
vocabulary_retrieve(germanic_languages, normalise_ger)
vocabulary_retrieve(roman_languages, normalise_rom)
| [
"numpy.array",
"nltk.corpus.swadesh.entries"
] | [((314, 349), 'nltk.corpus.swadesh.entries', 'swadesh.entries', (['germanic_languages'], {}), '(germanic_languages)\n', (329, 349), False, 'from nltk.corpus import swadesh\n'), ((367, 399), 'nltk.corpus.swadesh.entries', 'swadesh.entries', (['roman_languages'], {}), '(roman_languages)\n', (382, 399), False, 'from nltk.corpus import swadesh\n'), ((598, 624), 'nltk.corpus.swadesh.entries', 'swadesh.entries', (['languages'], {}), '(languages)\n', (613, 624), False, 'from nltk.corpus import swadesh\n'), ((2817, 2831), 'numpy.array', 'np.array', (['i[j]'], {}), '(i[j])\n', (2825, 2831), True, 'import numpy as np\n'), ((2833, 2847), 'numpy.array', 'np.array', (['i[k]'], {}), '(i[k])\n', (2841, 2847), True, 'import numpy as np\n')] |
import numpy as np
import os
import platform
import xdg
import py4dgeo._py4dgeo as _py4dgeo
class Py4DGeoError(Exception):
pass
def find_file(filename):
"""Find a file of given name on the file system.
This function is intended to use in tests and demo applications
to locate data files without resorting to absolute paths. You may
use it for your code as well.
It looks in the following locations:
* If an absolute filename is given, it is used
* Check whether the given relative path exists with respect to the current working directory
* Check whether the given relative path exists with respect to the specified XDG data directory (e.g. through the environment variable :code:`XDG_DATA_DIRS`).
:param filename:
The (relative) filename to search for
:type filename: str
:return: An absolute filename
"""
# If the path is absolute, do not change it
if os.path.isabs(filename):
return filename
# Gather a list of candidate paths for relative path
candidates = []
# Use the current working directory
candidates.append(os.path.join(os.getcwd(), filename))
# Use the XDG data directories
if platform.system() in ["Linux", "Darwin"]:
for xdg_dir in xdg.xdg_data_dirs():
candidates.append(os.path.join(xdg_dir, filename))
# Iterate through the list to check for file existence
for candidate in candidates:
if os.path.exists(candidate):
return candidate
raise FileNotFoundError(
f"Cannot locate file {filename}. Tried the following locations: {', '.join(candidates)}"
)
class MemoryPolicy(_py4dgeo.MemoryPolicy):
"""A descriptor for py4dgeo's memory usage policy
This can be used to describe the memory usage policy that py4dgeo
should follow. The implementation of py4dgeo checks the currently
set policy whenever it would make a memory allocation of the same order
of magnitude as the input pointcloud or the set of corepoints.
To globally set the policy, use :func:`~py4dgeo.set_memory_policy`.
Currently the following policies are available:
* :code:`STRICT`: py4dgeo is not allowed to do additional memory allocations.
If such an allocation would be required, an error is thrown.
* :code:`MINIMAL`: py4dgeo is allowed to do additional memory allocations if
and only if they are necessary for a seemless operation of the library.
* :code:`COREPOINTS`: py4dgeo is allowed to do additional memory allocations
as part of performance trade-off considerations (e.g. precompute vs. recompute),
but only if the allocation is on the order of the number of corepoints.
This is the default behaviour of py4dgeo.
* :code:`RELAXED`: py4dgeo is allowed to do additional memory allocations as
part of performance trade-off considerations (e.g. precompute vs. recompute).
"""
pass
# The global storage for the memory policy
_policy = MemoryPolicy.COREPOINTS
def set_memory_policy(policy: MemoryPolicy):
"""Globally set py4dgeo's memory policy
For details about the memory policy, see :class:`~py4dgeo.MemoryPolicy`.
Use this once before performing any operations. Changing the memory policy
in the middle of the computation results in undefined behaviour.
:param policy: The policy value to globally set
:type policy: MemoryPolicy
"""
global _policy
_policy = policy
def get_memory_policy():
"""Access the globally set memory policy"""
return _policy
def memory_policy_is_minimum(policy: MemoryPolicy):
"""Whether or not the globally set memory policy is at least the given one
:param policy: The policy value to compare against
:type policy: MemoryPolicy
:returns: Whether the globally set policy is at least the given one
:rtype: bool
"""
return policy <= get_memory_policy()
def make_contiguous(arr: np.ndarray):
"""Make a numpy array contiguous
This is a no-op if the array is already contiguous and makes
a copy if it is not. It checks py4dgeo's memory policy before copying.
:param arr: The numpy array
:type arr: np.ndarray
"""
if arr.flags["C_CONTIGUOUS"]:
return arr
if not memory_policy_is_minimum(MemoryPolicy.MINIMAL):
raise Py4DGeoError(
"Using non-contiguous memory layouts requires at least the MINIMAL memory policy"
)
return np.copy(arr, order="C")
def _as_dtype(arr, dtype, policy_check):
if np.issubdtype(arr.dtype, dtype):
return arr
if policy_check and not memory_policy_is_minimum(MemoryPolicy.MINIMAL):
raise Py4DGeoError(
f"py4dgeo expected an input of type {np.dtype(dtype).name}, but got {np.dtype(arr.dtype).name}. Current memory policy forbids automatic cast."
)
return np.asarray(arr, dtype=dtype)
def as_double_precision(arr: np.ndarray, policy_check=True):
"""Ensure that a numpy array is double precision
This is a no-op if the array is already double precision and makes a copy
if it is not. It checks py4dgeo's memory policy before copying.
:param arr: The numpy array
:type arr: np.ndarray
"""
return _as_dtype(arr, np.float64, policy_check)
def as_single_precision(arr: np.ndarray, policy_check=True):
"""Ensure that a numpy array is signle precision
This is a no-op if the array is already double precision and makes a copy
if it is not. It checks py4dgeo's memory policy before copying.
:param arr: The numpy array
:type arr: np.ndarray
"""
return _as_dtype(arr, np.float32, policy_check)
def set_num_threads(num_threads: int):
"""Set the number of threads to use in py4dgeo
:param num_threads: The number of threads to use
"type num_threads: int
"""
try:
_py4dgeo.omp_set_num_threads(num_threads)
except AttributeError:
# The C++ library was built without OpenMP!
if num_threads != 1:
raise Py4DGeoError("py4dgeo was built without threading support!")
def get_num_threads():
"""Get the number of threads currently used by py4dgeo
:return: The number of threads
:rtype: int
"""
try:
return _py4dgeo.omp_get_max_threads()
except AttributeError:
# The C++ library was built without OpenMP!
return 1
| [
"numpy.copy",
"os.path.exists",
"os.path.isabs",
"py4dgeo._py4dgeo.omp_set_num_threads",
"numpy.asarray",
"os.path.join",
"os.getcwd",
"numpy.issubdtype",
"platform.system",
"xdg.xdg_data_dirs",
"py4dgeo._py4dgeo.omp_get_max_threads",
"numpy.dtype"
] | [((932, 955), 'os.path.isabs', 'os.path.isabs', (['filename'], {}), '(filename)\n', (945, 955), False, 'import os\n'), ((4461, 4484), 'numpy.copy', 'np.copy', (['arr'], {'order': '"""C"""'}), "(arr, order='C')\n", (4468, 4484), True, 'import numpy as np\n'), ((4535, 4566), 'numpy.issubdtype', 'np.issubdtype', (['arr.dtype', 'dtype'], {}), '(arr.dtype, dtype)\n', (4548, 4566), True, 'import numpy as np\n'), ((4869, 4897), 'numpy.asarray', 'np.asarray', (['arr'], {'dtype': 'dtype'}), '(arr, dtype=dtype)\n', (4879, 4897), True, 'import numpy as np\n'), ((1202, 1219), 'platform.system', 'platform.system', ([], {}), '()\n', (1217, 1219), False, 'import platform\n'), ((1267, 1286), 'xdg.xdg_data_dirs', 'xdg.xdg_data_dirs', ([], {}), '()\n', (1284, 1286), False, 'import xdg\n'), ((1455, 1480), 'os.path.exists', 'os.path.exists', (['candidate'], {}), '(candidate)\n', (1469, 1480), False, 'import os\n'), ((5861, 5902), 'py4dgeo._py4dgeo.omp_set_num_threads', '_py4dgeo.omp_set_num_threads', (['num_threads'], {}), '(num_threads)\n', (5889, 5902), True, 'import py4dgeo._py4dgeo as _py4dgeo\n'), ((6259, 6289), 'py4dgeo._py4dgeo.omp_get_max_threads', '_py4dgeo.omp_get_max_threads', ([], {}), '()\n', (6287, 6289), True, 'import py4dgeo._py4dgeo as _py4dgeo\n'), ((1135, 1146), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1144, 1146), False, 'import os\n'), ((1318, 1349), 'os.path.join', 'os.path.join', (['xdg_dir', 'filename'], {}), '(xdg_dir, filename)\n', (1330, 1349), False, 'import os\n'), ((4741, 4756), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (4749, 4756), True, 'import numpy as np\n'), ((4773, 4792), 'numpy.dtype', 'np.dtype', (['arr.dtype'], {}), '(arr.dtype)\n', (4781, 4792), True, 'import numpy as np\n')] |
from src.scenario.scenario_generator import ScenarioGenerator
from src.executors.exact.solve_opf import solve
from src.scenario.scenario import Scenario
from src.grid.grid import Grid
import numpy as np
class GridEnv:
def __init__(self,
grid: Grid,
scenario: Scenario,
scenario_generator: ScenarioGenerator,
tee: bool = False):
""" Environment clas. Combines the DC grid with the information about EVs and power price.
grid: DC grid
scenario: scenario specifying EVs and power price
scenario_generator: ScenarioGenerator object, contains information about all
EV and power price related distributions """
self.grid = grid
self.scenario = scenario
self.scenario_generator = scenario_generator
self.tee = tee
self.t_start_ind = 0
self.t_end_ind = scenario.t_end_ind
self.timesteps_hr = scenario.timesteps_hr
self.t_ind = 0
self.ptu_size_hr = scenario.ptu_size_hr
self.V_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.P_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_lines = np.empty((self.grid.n_lines, self.timesteps_hr.shape[0]))
self.SOC_evs = np.nan * np.ones((len(self.scenario.evs), self.timesteps_hr.shape[0]))
@property
def t_hr(self):
return self.timesteps_hr[self.t_ind]
@property
def finished(self):
return self.t_ind > self.t_end_ind
@property
def current_SOC(self):
return np.minimum([ev.soc_max for ev in self.scenario.evs], np.maximum(0, self.SOC_evs[:, self.t_ind]))
def reset(self, ):
V_nodes, P_nodes, I_nodes, I_lines = self.grid.get_init_state()
self.grid.apply_state(V_nodes, P_nodes, I_nodes, I_lines)
utility_coefs = np.zeros(self.grid.n_nodes)
for load_ind in self.grid.load_inds:
utility_coefs[load_ind] = 100
for gen_ind in self.grid.gen_inds:
utility_coefs[gen_ind] = 1
self.grid.update_demand_and_price(np.zeros(self.grid.n_nodes), np.zeros(self.grid.n_nodes), utility_coefs)
self.t_ind = 0
self.V_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.P_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_nodes = np.empty((self.grid.n_nodes, self.timesteps_hr.shape[0]))
self.I_lines = np.empty((self.grid.n_lines, self.timesteps_hr.shape[0]))
self.SOC_evs = np.zeros((len(self.scenario.evs), self.timesteps_hr.shape[0]))
for ev_ind, ev in enumerate(self.scenario.evs):
if self.t_hr == ev.t_arr_hr:
self.SOC_evs[ev_ind, self.t_ind] = ev.soc_arr
def step(self, p_demand_min, p_demand_max, utility_coefs, normalize_opf=False):
#print('Stepping:', p_demand_min.round(2), '\n', p_demand_max.round(2))
for load_ind in self.grid.load_inds:
ev_at_t_at_load = self.scenario.load_evs_presence[load_ind][self.t_ind]
active_evs_at_t_at_node = [ev for ev in ev_at_t_at_load if ev.t_dep_hr > self.t_hr]
if len(active_evs_at_t_at_node) > 0:
ev = active_evs_at_t_at_node[0]
ev_ind = self.scenario.evs.index(ev)
load_p_max = (ev.soc_max - self.SOC_evs[ev_ind, self.t_ind]) / self.scenario.ptu_size_hr
p_demand_max[load_ind] = min(load_p_max, p_demand_max[load_ind])
p_demand_max[p_demand_max < p_demand_min] = p_demand_min[p_demand_max < p_demand_min] + 1e-10
self.grid.update_demand_and_price(p_demand_min-1e-8, p_demand_max + 1e-8, utility_coefs)
#print('After corrections:', p_demand_min.round(2), '\n', p_demand_max.round(2))
# self.tee = True
#print('LB', p_demand_min)
#print('UB', p_demand_max)
loads_p_demand_min = p_demand_min[self.grid.load_inds]
loads_p_demand_max = p_demand_max[self.grid.load_inds]
if loads_p_demand_min.max() == loads_p_demand_max.max() == 0:
model = None
V_nodes = np.array(([n.v_nominal for n in self.grid.nodes]))
P_nodes = np.zeros(self.grid.n_nodes)
I_nodes = np.zeros(self.grid.n_nodes)
I_lines = np.zeros(self.grid.n_lines)
else:
model, V_nodes, P_nodes, I_nodes, I_lines = solve(self.grid, tee=self.tee, normalize=normalize_opf)
self.grid.apply_state(V_nodes, P_nodes, I_nodes, I_lines)
self.V_nodes[:, self.t_ind] = np.copy(V_nodes)
self.P_nodes[:, self.t_ind] = np.copy(P_nodes)
self.I_nodes[:, self.t_ind] = np.copy(I_nodes)
self.I_lines[:, self.t_ind] = np.copy(I_lines)
self.t_ind += 1
if not self.finished:
for ev_ind, ev in enumerate(self.scenario.evs):
if self.t_hr == ev.t_arr_hr:
self.SOC_evs[ev_ind, self.t_ind] = ev.soc_arr
elif ev.t_arr_hr < self.t_hr <= ev.t_dep_hr:
old_soc = self.SOC_evs[ev_ind, self.t_ind - 1]
new_soc = old_soc + self.ptu_size_hr * self.P_nodes[ev.load_ind, self.t_ind - 1]
self.SOC_evs[ev_ind, self.t_ind] = np.copy(new_soc)
def observe_scenario(self, know_future=False):
if know_future:
return self.scenario
else:
return self.scenario.create_scenario_unknown_future(self.t_ind)
def get_cost_coefs(self):
utility_coefs = np.zeros(self.grid.n_nodes)
utility_coefs[self.grid.gen_inds] = self.scenario.power_price[self.t_ind]
for load_ind in self.grid.load_inds:
ev_at_t_at_load = self.scenario.load_evs_presence[load_ind][self.t_ind]
active_evs_at_t_at_node = [ev for ev in ev_at_t_at_load if ev.t_dep_hr > self.t_hr]
if len(active_evs_at_t_at_node) > 0:
assert len(active_evs_at_t_at_node) == 1, "More than 1 EV at load %d" % load_ind
ev = active_evs_at_t_at_node[0]
utility_coefs[load_ind] = ev.utility_coef
return utility_coefs
def generate_possible_futures(self, n_scenarios):
return self.scenario_generator.generate(self.grid.n_loads, n_scenarios, self.t_ind,
self.scenario.get_evs_known_at_t_ind(self.t_ind)) | [
"numpy.copy",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"src.executors.exact.solve_opf.solve",
"numpy.maximum"
] | [((1110, 1167), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (1118, 1167), True, 'import numpy as np\n'), ((1191, 1248), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (1199, 1248), True, 'import numpy as np\n'), ((1272, 1329), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (1280, 1329), True, 'import numpy as np\n'), ((1353, 1410), 'numpy.empty', 'np.empty', (['(self.grid.n_lines, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_lines, self.timesteps_hr.shape[0]))\n', (1361, 1410), True, 'import numpy as np\n'), ((2007, 2034), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (2015, 2034), True, 'import numpy as np\n'), ((2366, 2423), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (2374, 2423), True, 'import numpy as np\n'), ((2447, 2504), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (2455, 2504), True, 'import numpy as np\n'), ((2528, 2585), 'numpy.empty', 'np.empty', (['(self.grid.n_nodes, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_nodes, self.timesteps_hr.shape[0]))\n', (2536, 2585), True, 'import numpy as np\n'), ((2609, 2666), 'numpy.empty', 'np.empty', (['(self.grid.n_lines, self.timesteps_hr.shape[0])'], {}), '((self.grid.n_lines, self.timesteps_hr.shape[0]))\n', (2617, 2666), True, 'import numpy as np\n'), ((4698, 4714), 'numpy.copy', 'np.copy', (['V_nodes'], {}), '(V_nodes)\n', (4705, 4714), True, 'import numpy as np\n'), ((4753, 4769), 'numpy.copy', 'np.copy', (['P_nodes'], {}), '(P_nodes)\n', (4760, 4769), True, 'import numpy as np\n'), ((4808, 4824), 'numpy.copy', 'np.copy', (['I_nodes'], {}), '(I_nodes)\n', (4815, 4824), True, 'import numpy as np\n'), ((4863, 4879), 'numpy.copy', 'np.copy', (['I_lines'], {}), '(I_lines)\n', (4870, 4879), True, 'import numpy as np\n'), ((5661, 5688), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (5669, 5688), True, 'import numpy as np\n'), ((1777, 1819), 'numpy.maximum', 'np.maximum', (['(0)', 'self.SOC_evs[:, self.t_ind]'], {}), '(0, self.SOC_evs[:, self.t_ind])\n', (1787, 1819), True, 'import numpy as np\n'), ((2247, 2274), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (2255, 2274), True, 'import numpy as np\n'), ((2276, 2303), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (2284, 2303), True, 'import numpy as np\n'), ((4266, 4314), 'numpy.array', 'np.array', (['[n.v_nominal for n in self.grid.nodes]'], {}), '([n.v_nominal for n in self.grid.nodes])\n', (4274, 4314), True, 'import numpy as np\n'), ((4339, 4366), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (4347, 4366), True, 'import numpy as np\n'), ((4389, 4416), 'numpy.zeros', 'np.zeros', (['self.grid.n_nodes'], {}), '(self.grid.n_nodes)\n', (4397, 4416), True, 'import numpy as np\n'), ((4439, 4466), 'numpy.zeros', 'np.zeros', (['self.grid.n_lines'], {}), '(self.grid.n_lines)\n', (4447, 4466), True, 'import numpy as np\n'), ((4537, 4592), 'src.executors.exact.solve_opf.solve', 'solve', (['self.grid'], {'tee': 'self.tee', 'normalize': 'normalize_opf'}), '(self.grid, tee=self.tee, normalize=normalize_opf)\n', (4542, 4592), False, 'from src.executors.exact.solve_opf import solve\n'), ((5390, 5406), 'numpy.copy', 'np.copy', (['new_soc'], {}), '(new_soc)\n', (5397, 5406), True, 'import numpy as np\n')] |
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple, Union
import gym
import numpy as np
import torch
import torch.nn as nn
from ....environments import VecEnv
from ...common import (
ReplayBuffer,
get_env_properties,
get_model,
load_params,
safe_mean,
save_params,
set_seeds,
)
class TD3:
"""
Twin Delayed DDPG
Paper: https://arxiv.org/abs/1509.02971
:param network_type: (str) The deep neural network layer types ['mlp']
:param env: (Gym environment) The environment to learn from
:param gamma: (float) discount factor
:param replay_size: (int) Replay memory size
:param batch_size: (int) Update batch size
:param lr_p: (float) Policy network learning rate
:param lr_q: (float) Q network learning rate
:param polyak: (float) Polyak averaging weight to update target network
:param policy_frequency: (int) Update actor and target networks every
policy_frequency steps
:param epochs: (int) Number of epochs
:param start_steps: (int) Number of exploratory steps at start
:param steps_per_epoch: (int) Number of steps per epoch
:param noise_std: (float) Standard deviation for action noise
:param max_ep_len: (int) Maximum steps per episode
:param deterministic_actions: True if actions are deterministic
:param start_update: (int) Number of steps before first parameter update
:param update_interval: (int) Number of steps between parameter updates
:param save_interval: (int) Number of steps between saves of models
:param layers: (tuple or list) Number of neurons in hidden layers
:param seed (int): seed for torch and gym
:param render (boolean): if environment is to be rendered
:param device (str): device to use for tensor operations; 'cpu' for cpu
and 'cuda' for gpu
:param run_num: (boolean) if model has already been trained
:param save_name: (str) model save name (if None, model hasn't been
pretrained)
:param save_version: (int) model save version (if None, model hasn't been
pretrained)
:param run_num: model run number if it has already been trained
:param save_model: model save directory
:param load_model: model loading path
:type network_type: str
:type env: Gym environment
:type gamma: float
:type replay_size: int
:type batch_size: int
:type lr_p: float
:type lr_q: float
:type polyak: float
:type policy_frequency: int
:type epochs: int
:type start_steps: int
:type steps_per_epoch: int
:type noise_std: float
:type max_ep_len: int
:type deterministic_actions: bool
:type start_update: int
:type update_interval: int
:type save_interval: int
:type layers: tuple or list
:type seed: int
:type render: boolean
:type device: str
:type run_num: boolean
:type save_name: str
:type save_version: int
:type run_num: int
:type save_model: string
:type load_model: string
"""
def __init__(
self,
network_type: str,
env: Union[gym.Env, VecEnv],
gamma: float = 0.99,
replay_size: int = 1000000,
batch_size: int = 100,
lr_p: float = 0.001,
lr_q: float = 0.001,
polyak: float = 0.995,
policy_frequency: int = 2,
epochs: int = 100,
start_steps: int = 10000,
steps_per_epoch: int = 4000,
noise: Optional[Any] = None,
noise_std: float = 0.1,
max_ep_len: int = 1000,
deterministic_actions: bool = False,
start_update: int = 1000,
update_interval: int = 50,
layers: Tuple = (256, 256),
seed: Optional[int] = None,
render: bool = False,
device: Union[torch.device, str] = "cpu",
run_num: int = None,
save_model: str = None,
load_model: str = None,
save_interval: int = 5000,
):
self.network_type = network_type
self.env = env
self.gamma = gamma
self.replay_size = replay_size
self.batch_size = batch_size
self.lr_p = lr_p
self.lr_q = lr_q
self.polyak = polyak
self.policy_frequency = policy_frequency
self.epochs = epochs
self.start_steps = start_steps
self.steps_per_epoch = steps_per_epoch
self.noise = noise
self.noise_std = noise_std
self.max_ep_len = max_ep_len
self.deterministic_actions = deterministic_actions
self.start_update = start_update
self.update_interval = update_interval
self.save_interval = save_interval
self.layers = layers
self.seed = seed
self.render = render
self.run_num = run_num
self.save_model = save_model
self.load_model = load_model
self.save = save_params
self.load = load_params
self.logs = {}
self.logs["policy_loss"] = []
self.logs["value_loss"] = []
# Assign device
if "cuda" in device and torch.cuda.is_available():
self.device = torch.device(device)
else:
self.device = torch.device("cpu")
# Assign seed
if seed is not None:
set_seeds(seed, self.env)
self.create_model()
self.checkpoint = self.get_hyperparams()
def create_model(self) -> None:
state_dim, action_dim, discrete, _ = get_env_properties(self.env)
if discrete:
raise Exception(
"Discrete Environments not supported for {}.".format(__class__.__name__)
)
if self.noise is not None:
self.noise = self.noise(
np.zeros_like(action_dim), self.noise_std * np.ones_like(action_dim)
)
self.ac = get_model("ac", self.network_type)(
state_dim, action_dim, self.layers, "Qsa", False
).to(self.device)
self.ac.qf1 = self.ac.critic
self.ac.qf2 = get_model("v", self.network_type)(
state_dim, action_dim, hidden=self.layers, val_type="Qsa"
)
self.ac.qf1.to(self.device)
self.ac.qf2.to(self.device)
if self.load_model is not None:
self.load(self)
self.ac.actor.load_state_dict(self.checkpoint["policy_weights"])
self.ac.qf1.load_state_dict(self.checkpoint["q1_weights"])
self.ac.qf2.load_state_dict(self.checkpoint["q2_weights"])
for key, item in self.checkpoint.items():
if key not in ["weights", "save_model"]:
setattr(self, key, item)
print("Loaded pretrained model")
self.ac_target = deepcopy(self.ac).to(self.device)
# freeze target network params
for param in self.ac_target.parameters():
param.requires_grad = False
self.replay_buffer = ReplayBuffer(self.replay_size, self.env)
self.q_params = list(self.ac.qf1.parameters()) + list(self.ac.qf2.parameters())
self.optimizer_q = torch.optim.Adam(self.q_params, lr=self.lr_q)
self.optimizer_policy = torch.optim.Adam(
self.ac.actor.parameters(), lr=self.lr_p
)
def update_params_before_select_action(self, timestep: int) -> None:
"""
Update any parameters before selecting action like epsilon for decaying epsilon greedy
:param timestep: Timestep in the training process
:type timestep: int
"""
pass
def select_action(self, state: np.ndarray) -> np.ndarray:
with torch.no_grad():
action = self.ac_target.get_action(
torch.as_tensor(state, dtype=torch.float32, device=self.device),
deterministic=self.deterministic_actions,
)[0].numpy()
# add noise to output from policy network
if self.noise is not None:
action += self.noise()
return np.clip(
action, -self.env.action_space.high[0], self.env.action_space.high[0]
)
def get_q_loss(
self,
state: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
next_state: np.ndarray,
done: np.ndarray,
) -> torch.Tensor:
q1 = self.ac.qf1.get_value(torch.cat([state, action], dim=-1))
q2 = self.ac.qf2.get_value(torch.cat([state, action], dim=-1))
with torch.no_grad():
target_q1 = self.ac_target.qf1.get_value(
torch.cat(
[
next_state,
self.ac_target.get_action(next_state, deterministic=True)[0],
],
dim=-1,
)
)
target_q2 = self.ac_target.qf2.get_value(
torch.cat(
[
next_state,
self.ac_target.get_action(next_state, deterministic=True)[0],
],
dim=-1,
)
)
target_q = torch.min(target_q1, target_q2).unsqueeze(1)
target = reward.squeeze(1) + self.gamma * (1 - done) * target_q.squeeze(1)
l1 = nn.MSELoss()(q1, target)
l2 = nn.MSELoss()(q2, target)
return l1 + l2
def get_p_loss(self, state: np.array) -> torch.Tensor:
q_pi = self.ac.get_value(
torch.cat([state, self.ac.get_action(state, deterministic=True)[0]], dim=-1)
)
return -torch.mean(q_pi)
def update_params(self, update_interval: int) -> None:
for timestep in range(update_interval):
batch = self.replay_buffer.sample(self.batch_size)
state, action, reward, next_state, done = (x.to(self.device) for x in batch)
self.optimizer_q.zero_grad()
# print(state.shape, action.shape, reward.shape, next_state.shape, done.shape)
loss_q = self.get_q_loss(state, action, reward, next_state, done)
loss_q.backward()
self.optimizer_q.step()
# Delayed Update
if timestep % self.policy_frequency == 0:
# freeze critic params for policy update
for param in self.q_params:
param.requires_grad = False
self.optimizer_policy.zero_grad()
loss_p = self.get_p_loss(state)
loss_p.backward()
self.optimizer_policy.step()
# unfreeze critic params
for param in self.ac.critic.parameters():
param.requires_grad = True
# update target network
with torch.no_grad():
for param, param_target in zip(
self.ac.parameters(), self.ac_target.parameters()
):
param_target.data.mul_(self.polyak)
param_target.data.add_((1 - self.polyak) * param.data)
self.logs["policy_loss"].append(loss_p.item())
self.logs["value_loss"].append(loss_q.item())
def learn(self) -> None: # pragma: no cover
state, episode_reward, episode_len, episode = (
self.env.reset(),
np.zeros(self.env.n_envs),
np.zeros(self.env.n_envs),
np.zeros(self.env.n_envs),
)
total_steps = self.steps_per_epoch * self.epochs * self.env.n_envs
if self.noise is not None:
self.noise.reset()
for timestep in range(0, total_steps, self.env.n_envs):
# execute single transition
if timestep > self.start_steps:
action = self.select_action(state)
else:
action = self.env.sample()
next_state, reward, done, _ = self.env.step(action)
if self.render:
self.env.render()
episode_reward += reward
episode_len += 1
# dont set d to True if max_ep_len reached
# done = self.env.n_envs*[False] if np.any(episode_len == self.max_ep_len) else done
done = np.array(
[
False if episode_len[i] == self.max_ep_len else done[i]
for i, ep_len in enumerate(episode_len)
]
)
self.replay_buffer.extend(zip(state, action, reward, next_state, done))
state = next_state
if np.any(done) or np.any(episode_len == self.max_ep_len):
if sum(episode) % 20 == 0:
print(
"Ep: {}, reward: {}, t: {}".format(
sum(episode), np.mean(episode_reward), timestep
)
)
for i, di in enumerate(done):
# print(d)
if di or episode_len[i] == self.max_ep_len:
episode_reward[i] = 0
episode_len[i] = 0
episode += 1
if self.noise is not None:
self.noise.reset()
state, episode_reward, episode_len = (
self.env.reset(),
np.zeros(self.env.n_envs),
np.zeros(self.env.n_envs),
)
episode += 1
# update params
if timestep >= self.start_update and timestep % self.update_interval == 0:
self.update_params(self.update_interval)
if self.save_model is not None:
if timestep >= self.start_update and timestep % self.save_interval == 0:
self.checkpoint = self.get_hyperparams()
self.save(self, timestep)
print("Saved current model")
self.env.close()
def get_hyperparams(self) -> Dict[str, Any]:
hyperparams = {
"network_type": self.network_type,
"gamma": self.gamma,
"lr_p": self.lr_p,
"lr_q": self.lr_q,
"polyak": self.polyak,
"policy_frequency": self.policy_frequency,
"noise_std": self.noise_std,
"q1_weights": self.ac.qf1.state_dict(),
"q2_weights": self.ac.qf2.state_dict(),
"policy_weights": self.ac.actor.state_dict(),
}
return hyperparams
def get_logging_params(self) -> Dict[str, Any]:
"""
:returns: Logging parameters for monitoring training
:rtype: dict
"""
logs = {
"policy_loss": safe_mean(self.logs["policy_loss"]),
"value_loss": safe_mean(self.logs["value_loss"]),
}
self.empty_logs()
return logs
def empty_logs(self):
"""
Empties logs
"""
self.logs["policy_loss"] = []
self.logs["value_loss"] = []
if __name__ == "__main__":
env = gym.make("Pendulum-v0")
algo = TD3("mlp", env)
algo.learn()
| [
"numpy.clip",
"torch.optim.Adam",
"numpy.ones_like",
"torch.as_tensor",
"numpy.mean",
"torch.mean",
"numpy.any",
"torch.min",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.cuda.is_available",
"copy.deepcopy",
"torch.no_grad",
"numpy.zeros_like",
"gym.make",
"torch.cat",
"torch.device"
] | [((14926, 14949), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (14934, 14949), False, 'import gym\n'), ((7032, 7077), 'torch.optim.Adam', 'torch.optim.Adam', (['self.q_params'], {'lr': 'self.lr_q'}), '(self.q_params, lr=self.lr_q)\n', (7048, 7077), False, 'import torch\n'), ((7927, 8005), 'numpy.clip', 'np.clip', (['action', '(-self.env.action_space.high[0])', 'self.env.action_space.high[0]'], {}), '(action, -self.env.action_space.high[0], self.env.action_space.high[0])\n', (7934, 8005), True, 'import numpy as np\n'), ((5039, 5064), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5062, 5064), False, 'import torch\n'), ((5092, 5112), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (5104, 5112), False, 'import torch\n'), ((5153, 5172), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5165, 5172), False, 'import torch\n'), ((7561, 7576), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7574, 7576), False, 'import torch\n'), ((8262, 8296), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(-1)'}), '([state, action], dim=-1)\n', (8271, 8296), False, 'import torch\n'), ((8333, 8367), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(-1)'}), '([state, action], dim=-1)\n', (8342, 8367), False, 'import torch\n'), ((8383, 8398), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8396, 8398), False, 'import torch\n'), ((9186, 9198), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9196, 9198), True, 'import torch.nn as nn\n'), ((9224, 9236), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9234, 9236), True, 'import torch.nn as nn\n'), ((9482, 9498), 'torch.mean', 'torch.mean', (['q_pi'], {}), '(q_pi)\n', (9492, 9498), False, 'import torch\n'), ((11234, 11259), 'numpy.zeros', 'np.zeros', (['self.env.n_envs'], {}), '(self.env.n_envs)\n', (11242, 11259), True, 'import numpy as np\n'), ((11273, 11298), 'numpy.zeros', 'np.zeros', (['self.env.n_envs'], {}), '(self.env.n_envs)\n', (11281, 11298), True, 'import numpy as np\n'), ((11312, 11337), 'numpy.zeros', 'np.zeros', (['self.env.n_envs'], {}), '(self.env.n_envs)\n', (11320, 11337), True, 'import numpy as np\n'), ((5693, 5718), 'numpy.zeros_like', 'np.zeros_like', (['action_dim'], {}), '(action_dim)\n', (5706, 5718), True, 'import numpy as np\n'), ((6682, 6699), 'copy.deepcopy', 'deepcopy', (['self.ac'], {}), '(self.ac)\n', (6690, 6699), False, 'from copy import deepcopy\n'), ((12446, 12458), 'numpy.any', 'np.any', (['done'], {}), '(done)\n', (12452, 12458), True, 'import numpy as np\n'), ((12462, 12500), 'numpy.any', 'np.any', (['(episode_len == self.max_ep_len)'], {}), '(episode_len == self.max_ep_len)\n', (12468, 12500), True, 'import numpy as np\n'), ((5737, 5761), 'numpy.ones_like', 'np.ones_like', (['action_dim'], {}), '(action_dim)\n', (5749, 5761), True, 'import numpy as np\n'), ((9039, 9070), 'torch.min', 'torch.min', (['target_q1', 'target_q2'], {}), '(target_q1, target_q2)\n', (9048, 9070), False, 'import torch\n'), ((10655, 10670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10668, 10670), False, 'import torch\n'), ((13222, 13247), 'numpy.zeros', 'np.zeros', (['self.env.n_envs'], {}), '(self.env.n_envs)\n', (13230, 13247), True, 'import numpy as np\n'), ((13269, 13294), 'numpy.zeros', 'np.zeros', (['self.env.n_envs'], {}), '(self.env.n_envs)\n', (13277, 13294), True, 'import numpy as np\n'), ((7642, 7705), 'torch.as_tensor', 'torch.as_tensor', (['state'], {'dtype': 'torch.float32', 'device': 'self.device'}), '(state, dtype=torch.float32, device=self.device)\n', (7657, 7705), False, 'import torch\n'), ((12675, 12698), 'numpy.mean', 'np.mean', (['episode_reward'], {}), '(episode_reward)\n', (12682, 12698), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.linalg import eigh
from ec_feature_selection.utils import check_array
from ec_feature_selection._ecfs_functions import get_fisher_score, get_mutual_information, build_kernel, build_sigma
from typing import Optional, Union
Data = Union[np.array, pd.DataFrame, pd.Series, sparse.spmatrix]
class ECFS:
"""
Feature Ranking and Selection via Eigenvector Centrality is a graph-based
method for feature selection that ranks feature by identifying the most important ones.
References
--------
Based on the algorithm as introduced in:
<NAME>. and <NAME>., 2017, July. Ranking to learn: Feature ranking and selection via eigenvector centrality.
In New Frontiers in Mining Complex Patterns: 5th International Workshop,
NFMCP 2016, Held in Conjunction with ECML-PKDD 2016, Riva del Garda, Italy, September 19, 2016,
Revised Selected Papers (Vol. 10312, p. 19). Springer.
% @InProceedings{RoffoECML16,
% author={<NAME> and <NAME>},
% booktitle={Proceedings of New Frontiers in Mining Complex Patterns (NFMCP 2016)},
% title={Features Selection via Eigenvector Centrality},
% year={2016},
% keywords={Feature selection;ranking;high dimensionality;data mining},
% month={Oct}}
This Python implementation is inspired by the 'Feature Ranking and Selection via Eigenvector Centrality'
MATLAB implementation that can be found in the Feature Selection Library (FSLib) by <NAME>.
Many more Feature Selection methods are also available in the Feature Selection Library:
https://www.mathworks.com/matlabcentral/fileexchange/56937-feature-selection-library
Parameters
----------
n_features : int > 0 and lower than the m original features, or None (default=None)
Number of features to select.
If n_features is set to None all features are kept and a ranked dataset is returned.
epsilon : int or float >=0 (default 1e-5)
A small number. Used for avoiding division by zero.
Attributes
----------
n_features : int
Number of features to select.
epsilon : float >=0 (default 1e-5)
A small number. Used for avoiding division by zero.
alpha : int or float ∈ [0, 1]
Loading coefficent.
The adjacency matrix A is given by: A = (alpha * kernel) + (1 − alpha) * sigma
positive_class : int, float, or str (default 1)
Label of the positive class.
negative : int, float, or str (default -1)
Label of the negative class.
fisher_score: numpy array, shape (m_features,)
The fisher scores for each feature.
mutual_information: float
Mutual Information of X and y.
A: numpy array (m_features, m_features)
The adjacency matrix.
eigenvalues: numpy array (n_features,)
The eigenvalues of the adjacency matrix.
eigenvectors: numpy array (n_features, n_features)
The eigenvectors of the adjacency matrix.
ranking: numpy array (n_features,)
Ranking of features (0 is the most important feature, 1 is 2nd most imporant etc... ).
"""
def __init__(self, n_features: Optional[int] = None, epsilon: float = 1e-5) -> None:
self.n_features = n_features
self.epsilon = epsilon
def fit(self, X: Data, y: Data, alpha: Union[int, float], positive_class: Union[int, float, str],
negative_class: Union[int, float, str]) -> None:
"""
Computes the feature ranking from the training data.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training data to compute the feature importance scores from.
y : array-like, shape (n_samples,)
Training labels.
alpha : int or float ∈ [0, 1]
Loading coefficent.
The adjacency matrix A is given by: A = (alpha * kernel) + (1 − alpha) * sigma
positive_class : int, float, or str
Label of the positive class.
negative_class : int, float, or str
Label of the negative class.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X)
y = check_array(y)
assert X.shape[0] == y.shape[0], 'X and y should have the same number of samples. {} != {}'.format(X.shape[0],
y.shape[0])
self.positive_class = positive_class
self.negative_class = negative_class
self.alpha = alpha
self.fisher_score = get_fisher_score(X=X, y=y, negative_class=self.negative_class,
positive_class=self.positive_class, epsilon=self.epsilon)
self.mutual_information = np.apply_along_axis(get_mutual_information, 0, X, y, self.epsilon)
self.kernel = build_kernel(self.fisher_score, self.mutual_information)
self.sigma = build_sigma(X)
self.A = self.alpha * self.kernel + (1 - self.alpha) * self.sigma
self.eigenvalues, self.eigenvectors = eigh(self.A)
self.ranking = np.abs(self.eigenvectors[:, self.eigenvalues.argmax()]).argsort()[::-1]
def transform(self, X: Data) -> np.ndarray:
"""
Reduces the feature set down to the top n_features.
Parameters
----------
X: array-like (n_samples, m_features)
Data to perform feature ranking and selection on.
Returns
-------
X_ranked: array-like (n_samples, n_top_features)
Reduced feature matrix.
"""
if self.n_features:
if self.n_features > X.shape[1]:
raise ValueError(
'Number of features to select is higher than the original number of features. {} > {}'.format(
self.n_features, X.shape[1]))
X_ranked = X[:, self.ranking]
if self.n_features is not None:
X_ranked = X_ranked[:, :self.n_features]
return X_ranked
def fit_transform(self, X: Data, y: Data, alpha: Union[int, float], positive_class: Union[int, float, str],
negative_class: Union[int, float, str]) -> np.ndarray:
"""
Computes the feature ranking from the training data, then reduces
the feature set down to the top n_features.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training data to compute the feature importance scores from
and to perform feature ranking and selection on.
y : array-like, shape (n_samples)
Training labels.
alpha : int or float ∈ [0, 1]
Loading coefficent.
The adjacency matrix A is given by: A = (alpha * kernel) + (1 − alpha) * sigma
positive_class : int, float, or str
Label of the positive class.
positive_class : int, float, or str
Label of the negative class.
Returns
-------
X_ranked: array-like (n_samples, n_top_features)
Reduced feature matrix.
"""
self.fit(X, y, alpha, positive_class, negative_class)
return self.transform(X)
| [
"ec_feature_selection.utils.check_array",
"scipy.linalg.eigh",
"ec_feature_selection._ecfs_functions.build_kernel",
"ec_feature_selection._ecfs_functions.build_sigma",
"ec_feature_selection._ecfs_functions.get_fisher_score",
"numpy.apply_along_axis"
] | [((4356, 4370), 'ec_feature_selection.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (4367, 4370), False, 'from ec_feature_selection.utils import check_array\n'), ((4383, 4397), 'ec_feature_selection.utils.check_array', 'check_array', (['y'], {}), '(y)\n', (4394, 4397), False, 'from ec_feature_selection.utils import check_array\n'), ((4783, 4907), 'ec_feature_selection._ecfs_functions.get_fisher_score', 'get_fisher_score', ([], {'X': 'X', 'y': 'y', 'negative_class': 'self.negative_class', 'positive_class': 'self.positive_class', 'epsilon': 'self.epsilon'}), '(X=X, y=y, negative_class=self.negative_class,\n positive_class=self.positive_class, epsilon=self.epsilon)\n', (4799, 4907), False, 'from ec_feature_selection._ecfs_functions import get_fisher_score, get_mutual_information, build_kernel, build_sigma\n'), ((4984, 5050), 'numpy.apply_along_axis', 'np.apply_along_axis', (['get_mutual_information', '(0)', 'X', 'y', 'self.epsilon'], {}), '(get_mutual_information, 0, X, y, self.epsilon)\n', (5003, 5050), True, 'import numpy as np\n'), ((5074, 5130), 'ec_feature_selection._ecfs_functions.build_kernel', 'build_kernel', (['self.fisher_score', 'self.mutual_information'], {}), '(self.fisher_score, self.mutual_information)\n', (5086, 5130), False, 'from ec_feature_selection._ecfs_functions import get_fisher_score, get_mutual_information, build_kernel, build_sigma\n'), ((5152, 5166), 'ec_feature_selection._ecfs_functions.build_sigma', 'build_sigma', (['X'], {}), '(X)\n', (5163, 5166), False, 'from ec_feature_selection._ecfs_functions import get_fisher_score, get_mutual_information, build_kernel, build_sigma\n'), ((5289, 5301), 'scipy.linalg.eigh', 'eigh', (['self.A'], {}), '(self.A)\n', (5293, 5301), False, 'from scipy.linalg import eigh\n')] |
# -*- coding: utf-8 -*-
#######################################################
# author :<NAME> [<EMAIL>]
# create date : 06 Nov 2016
# last edit date: 19 Apr 2017
#######################################################
##########import package files##########
import numpy as np
import math
import datetime
# #########################################################################
# ########### Reinforcement Learning (RL) constants start##################
# #########################################################################
# # labels of weights for features
# w_0 = "bias(w_0)"
# ##### 1 DLI to plants with the state and action
# w_1 = "DLIEachDayToPlants(w_1)"
# ##### 2 plant weight increase with the state and action
# w_2 = "unitDailyFreshWeightIncrease(w_2)"
# ##### 3 plant weight at the state
# w_3 = "accumulatedUnitDailyFreshWeightIncrease(w_3)"
# ##### 4 averageDLITillTheDay
# w_4 = "averageDLITillHarvestDay(w_4)"
# ##### 5 season effects (winter) representing dates.
# w_5 = "isSpring(w_5)"
# ##### 6 season effects (spring) representing dates.
# w_6 = "isSummer(w_6)"
# ##### 7 season effects (summer) representing dates.
# w_7 = "isAutumn(w_7)"
# ##### 8 season effects (autumn) representing dates.
# w_8 = "isWinter(w_8)"
#
# # starts from middle of Feb
# daysFromJanStartApril = 45
# # starts from May first
# daysFromJanStartSummer = 121
# # starts from middle of September
# daysFromJanStartAutumn = 259
# # starts from middle of Winter
# daysFromJanStartWinter = 320
#
# fileNameQLearningTrainedWeight = "qLearningTraintedWeights"
#
# ifRunTraining = True
# ifSaveCalculatedWeight = True
# ifLoadWeight = True
# ##############################################
# ########### RL constants end##################
# ##############################################
#####################################################
############ filepath and file name start ###########
#####################################################
environmentData = "20130101-20170101" + ".csv"
romaineLettceRetailPriceFileName = "romaineLettuceRetailPrice.csv"
romaineLettceRetailPriceFilePath = ""
averageRetailPriceOfElectricityMonthly = "averageRetailPriceOfElectricityMonthly.csv"
plantGrowthModelValidationData = "plantGrowthModelValidationData.csv"
# source: https://www.eia.gov/dnav/ng/hist/n3010az3m.htm
ArizonaPriceOfNaturalGasDeliveredToResidentialConsumers = "ArizonaPriceOfNaturalGasDeliveredToResidentialConsumers.csv"
###################################################
############ filepath and file name end ###########
###################################################
###############################################################
####################### If statement flag start ###############
###############################################################
# True: use the real data (the imported data whose source is the local weather station "https://midcdmz.nrel.gov/ua_oasis/),
# False: use the
ifUseOnlyRealData = False
# ifUseOnlyRealData = True
#If True, export measured horizontal and estimated data when the simulation day is one day.
# ifExportMeasuredHorizontalAndExtimatedData = True
#If True, export measured horizontal and estimated data only on 15th day each month.
ifGet15thDayData = True
# ifGet15thDayData = False
# if consider the photo inhibition by too strong sunlight, True, if not, False
# IfConsiderPhotoInhibition = True
IfConsiderPhotoInhibition = False
# if consider the price discount by tipburn , True, if not, False
IfConsiderDiscountByTipburn = False
# make this false when running optimization algorithm
exportCSVFiles = True
# exportCSVFiles = False
# if you want to export CVS file and figures, then true
ifExportCSVFile = True
# ifExportCSVFile = False
# ifExportFigures = True
ifExportFigures = False
# if you want to refer to the price of lettuce grown at greenhouse (sales per head), True, if you sell lettuce at open field farming price (sales per kg), False
sellLettuceByGreenhouseRetailPrice = True
# sellLettuceByGreenhouseRetailPrice = False
print("sellLettuceByGreenhouseRetailPrice:{}".format(sellLettuceByGreenhouseRetailPrice))
#############################################################
####################### If statement flag end################
#############################################################
########################################
##########other constant start##########
########################################
# day on each month: days of Jan, Feb, ...., December
dayperMonthArray = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
dayperMonthLepArray = np.array([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
# keep them int
secondperMinute = 60
minuteperHour = 60
hourperDay = 24
dayperYear = 365
monthsperYear = 12
dayperLeapYear = 366
noonHour = 12
#the temperature at STC (Standard Test Conditions) unit [Celsius]
STCtemperature = 25.0
# current prepared data range: 20130101-20170101", 20150101 to 20160815 was the only correctly observed period. some 2014 data work too.
# do not choose "20140201 to 20160101" specifically. somehow it does not work.
# do not include 1/19/2014 as start date because 1/19/2014 partially misses its hourly data
# do not include after 8/18/2016 because the dates after 8/18/2016 do not correctly log the body temperature.
SimulationStartDate="20150101"
# SimulationStartDate="20150323"
# SimulationStartDate="20151215"
SimulationEndDate = "20151231"
# SimulationEndDate = "20150419"
# one cultivation cycle
# SimulationEndDate = "20151215"
sunnyDaysList = ["20150115", "20150217", "20150316", "20150413", "20150517", "20150615", "20150711", "20150815", "20150918", "20151013", "20151117", "20151215"]
print("SimulationStartDate:{}, SimulationEndDate:{}".format(SimulationStartDate, SimulationEndDate))
# latitude at Tucson == 32.2800408 N
Latitude = math.radians(32.2800408)
# longitude at Tucson 110.9422745 W
Longitude = math.radians(110.9422745)
# lambda (longitude of the site) = 32.2800408 N: latitude,-110.9422745 W : longitude [degree]
# lambda_R ( the longitude of the time zone in which the site is situated) = 33.4484, 112.074 [degree]
# J' (the day angle in the year ) =
# <NAME>, "The Role of Solar-Radiation Climatology in the Design of Photovoltaic Systems", 2nd edition
# there is a equation calculating LAT.
# p618 (46 in pdf file)
# The passage of days is described mathematically by numbering the days continuously through the year to produce a Julian day number, J: 1
# January, J 5 1; 1 February, J 5 32; 1 March, J 5 57 in a nonleap year and 58 in a leap year; and so on. Each day in the year can be then be
# expressed in an angular form as a day angle, J0, in degrees by multiplying
# J by 360/365.25. The day angle is used in the many of the trigonometric expressions that follow.
# EOT (equation of time) =
#watt To PPFD Conversion coefficient for sunlight (W m^-2) -> (μmol/m^2/s)
# the range of wavelength considered is around from 280 to 2800 (shortwave sunlight), not from 400nm to 700nm (visible sunlight).
wattToPPFDConversionRatio = 2.05
# wattToPPFDConversionRatio = 4.57 <- this ratio is used when the range of wavelength of PAR [W m^-2] is between 400nm and 700nm (visible sunlight)
# [W/m^2]
solarConstant = 1367.0
# ground reflectance of sunlight. source: https://www2.pvlighthouse.com.au/resources/courses/altermatt/The%20Solar%20Spectrum/The%20reflectance%20of%20the%20ground.aspx
groundReflectance = 0.1
# referred from <NAME> et. al., 2009, "Electrical energy generated by photovoltaic modules mounted inside the roof of a north–south oriented greenhouse"
# this number should be carefully definted according to the weather property of the simulation region (very cloudy: 0.0 ~ 1.0: very sunny)
# atmosphericTransmissivity = 0.6
# atmosphericTransmissivity = 0.7
# the reason why this coefficient was changed into this value is described in my paper
atmosphericTransmissivity = 0.643
print("atmosphericTransmissivity:{}".format(atmosphericTransmissivity))
# unit conversion. [cwt] -> [kg] US standard
kgpercwt = 45.3630
# if this is true, then continue to grow plants during the Summer period. the default value is False in the object(instance)
# ifGrowForSummerPeriod = True
ifGrowForSummerPeriod = False
print("ifGrowForSummerPeriod:{}".format(ifGrowForSummerPeriod))
######################################
##########other constant end##########
######################################
#########################################################
##########Specification of the greenhouse start##########
#########################################################
# #########################################################
# # Our real greenhouse specification source:
# # http://www.gpstructures.com/pdfs/Windjammer.pdf
# # We used 6' TALL SIDEWALL HEIGHT 30' width, Free Standing Structures in our project.
# #greenhouse roof type
# greenhouseRoofType = "SimplifiedAFlame"
# #width of the greenhouse (m)
# greenhouseWidth = 9.144 # = 30 [feet]
# #depth of the greenhouse (m)
# greenhouseDepth = 14.6
# #area of the greenhouse (m**2)
# greenhouseFloorArea = greenhouseWidth * greenhouseDepth
# # print("greenhouseFloorArea[m^2]:{}".format(greenhouseFloorArea))
# #width of the greenhouse cultivation area (m)
# greenhouseCultivationFloorWidth = 7.33
# #depth of the greenhouse cultivation area(m)
# greenhouseCultivationFloorDepth = 10.89
# #floor area of greenhouse cultivation area(m**2)
# greenhouseCultivationFloorArea = greenhouseCultivationFloorWidth * greenhouseCultivationFloorDepth
# # greenhouseCultivationFloorArea = greenhouseWidth * greenhouseDepth * 0.9
# # print("greenhouseCultivationFloorArea[m^2]:{}".format(greenhouseCultivationFloorArea))
# # number of roofs. If this is 1, the greenhouse is a single roof greenhouse. If >1, multi-roof greenhouse
# numOfRoofs = 1
# # the type of roof direction
# roofDirectionNotation = "EastWestDirectionRoof"
# #side wall height of greenhouse (m)
# greenhouseHeightSideWall = 1.8288 # = 6[feet]
# #center height of greenhouse (m)
# greenhouseHeightRoofTop = 4.8768 # = 16[feet]
# #width of the rooftop. calculate from the Pythagorean theorem. assumed that the shape of rooftop is straight, not curved.
# greenhouseRoofWidth = math.sqrt((greenhouseWidth/2.0)**2.0 + (greenhouseHeightRoofTop-greenhouseHeightSideWall)**2.0)
# #print ("greenhouseRoofWidth: {}".format(greenhouseRoofWidth))
# #angle of the rooftop (theta θ). [rad]
#
# greenhouseRoofAngle = math.acos((greenhouseWidth/2.0) / greenhouseRoofWidth)
# # print ("greenhouseRoofAngle (rad) : {}".format(greenhouseRoofAngle))
# # the angle of the roof facing north or east [rad]
# roofAngleNorthOrEast = greenhouseRoofAngle
# # the angle of the roof facing south or west [rad]. This should be modified if the roof angle is different from the other side.
# roofAngleWestOrSouth = greenhouseRoofAngle
# #area of the rooftop [m^2]. summing the left and right side of rooftops from the center.
# greenhouseTotalRoofArea = greenhouseRoofWidth * greenhouseDepth * 2.0
# # print ("greenhouseRoofArea[m^2]: {}".format(greenhouseTotalRoofArea))1
# #########################################################
#########################################################
# Virtual greenhouse specification, the multi-roof greenhouse virtually connected 10 of our real greenhouses.
# You can change these numbers according to your greenhouse design and specification
#greenhouse roof type
greenhouseRoofType = "SimplifiedAFlame"
# #width of the greenhouse (m)
# greenhouseWidth = 91.44 #
# #depth of the greenhouse (m)
# greenhouseDepth = 14.6
# the greenhouse floor area was replaced with the following, referring to a common business size greenhouse
# number of roofs (-). If this is 1, the greenhouse is a single roof greenhouse. If >1, multi-roof greenhouse.
numOfRoofs = 10.0
# source: https://www.interempresas.net/FeriaVirtual/Catalogos_y_documentos/1381/Multispan-greenhouse-ULMA-Agricola.pdf,
# source: https://www.alibaba.com/product-detail/Multi-roof-Poly-Film-Tunnel-Greenhouse_60184626287.html
# this should be cahnged depending on the type of greenhouse simulated. (m)
widthPerRoof = 9.6
#total width of the greenhouse (m)
# greenhouseWidth = 91.44
greenhouseWidth = numOfRoofs * widthPerRoof
#depth of the greenhouse (m)
greenhouseDepth = 14.6
#area of the greenhouse (m**2)
greenhouseFloorArea = greenhouseWidth * greenhouseDepth
print("greenhouseFloorArea[m^2]:{}".format(greenhouseFloorArea))
# # the following calculation gives the real cultivation area of our research greenhouse. However, since it has too large vacant space which is unrealistic for business greenhouse, this number was not used.
# #width of the greenhouse cultivation area (m)
# greenhouseCultivationFloorWidth = 73.3
# #depth of the greenhouse cultivation area(m)
# greenhouseCultivationFloorDepth = 10.89
# #floor area of greenhouse cultivation area(m**2)
# greenhouseCultivationFloorArea = greenhouseCultivationFloorWidth * greenhouseCultivationFloorDepth
# Instead, it was assumed the cultivation area is 0.9 time of the total greenhouse floor area
greenhouseCultivationFloorArea = greenhouseFloorArea * 0.9
print("greenhouseCultivationFloorArea[m^2]:{}".format(greenhouseCultivationFloorArea))
# the type of roof direction
roofDirectionNotation = "EastWestDirectionRoof"
# roofDirectionNotation = "NorthSouthDirectionRoof"
#side wall height of greenhouse (m)
greenhouseHeightSideWall = 1.8288 # = 6[feet]
# the total sidewall area
greenhouseSideWallArea = 2.0 * (greenhouseWidth + greenhouseDepth) * greenhouseHeightSideWall
print("greenhouseSideWallArea[m^2]:{}".format(greenhouseSideWallArea))
#center height of greenhouse (m)
greenhouseHeightRoofTop = 4.8768 # = 16[feet]
#width of the rooftop. calculate from the Pythagorean theorem. assumed that the shape of rooftop is straight (not curved), and the top height and roof angels are same at each roof.
greenhouseRoofWidth = math.sqrt((greenhouseWidth/(numOfRoofs*2.0))**2.0 + (greenhouseHeightRoofTop-greenhouseHeightSideWall)**2.0)
print ("greenhouseRoofWidth [m]: {}".format(greenhouseRoofWidth))
# the length of the roof facing east or north
greenhouseRoofWidthEastOrNorth = greenhouseRoofWidth
# the length of the roof facing west or south
greenhouseRoofWidthWestOrSouth = greenhouseRoofWidth
#angle of the rooftop (theta θ). [rad]
greenhouseRoofAngle = math.acos(((greenhouseWidth/(numOfRoofs*2.0)) / greenhouseRoofWidth))
# greenhouseRoofAngle = 0.0
print ("greenhouseRoofAngle (rad) : {}".format(greenhouseRoofAngle))
# the angle of the roof facing north or east [rad]
roofAngleEastOrNorth = greenhouseRoofAngle
# the angle of the roof facing south or west [rad]. This should be modified if the roof angle is different from the other side.
roofAngleWestOrSouth = greenhouseRoofAngle
# area of the rooftop [m^2]. summing the left and right side of rooftops from the center.
greenhouseTotalRoofArea = greenhouseRoofWidth * greenhouseDepth * numOfRoofs * 2.0
print ("greenhouseTotalRoofArea[m^2]: {}".format(greenhouseTotalRoofArea))
greenhouseRoofTotalAreaEastOrNorth = greenhouseRoofWidthEastOrNorth * greenhouseDepth * numOfRoofs
print ("greenhouseRoofTotalAreaEastOrNorth[m^2]: {}".format(greenhouseRoofTotalAreaEastOrNorth))
greenhouseRoofTotalAreaWestOrSouth = greenhouseRoofWidthWestOrSouth * greenhouseDepth * numOfRoofs
print ("greenhouseRoofTotalAreaWestOrSouth[m^2]: {}".format(greenhouseRoofTotalAreaWestOrSouth))
#########################################################
#the proportion of shade made by the greenhouse inner structure, actuator (e.g. sensors and fog cooling systems) and farming equipments (e.g. gutters) (-)
# GreenhouseShadeProportion = 0.1
GreenhouseShadeProportionByInnerStructures = 0.05
# DLI [mol m^-2 day^-1]
DLIForButterHeadLettuceWithNoTipburn = 17.0
# PPFD [umol m^-2 s^-1]
# the PPFD was divided by 2.0 because it was assumed that the time during the day was the half of a day (12 hours)
# OptimumPPFDForButterHeadLettuceWithNoTipburn = DLIForButterHeadLettuceWithNoTipburn * 1000000.0 / float(secondperMinute*minuteperHour*hourperDay)
OptimumPPFDForButterHeadLettuceWithNoTipburn = DLIForButterHeadLettuceWithNoTipburn * 1000000.0 / float(secondperMinute*minuteperHour*hourperDay/2.0)
print("OptimumPPFDForButterHeadLettuceWithNoTipburn (PPFD):{}".format(OptimumPPFDForButterHeadLettuceWithNoTipburn))
# 1.5 is an arbitrary number
# the amount of PPFD to deploy shading curtain
shadingCurtainDeployPPFD = OptimumPPFDForButterHeadLettuceWithNoTipburn * 1.5
print("shadingCurtainDeployPPFD:{}".format(shadingCurtainDeployPPFD))
# The maximum value of m: the number of roofs that incident light penetrating in the model. This value is used at SolarIrradianceMultiroofRoof.py.
# if the angle between the incident light and the horizonta ql axis is too small, the m can be too large, which cause a system error at Util.sigma by iterating too much and make the simulation slow.
# Thus, the upper limit was set.
mMax = numOfRoofs
# defaultIterationLimit = 495
#######################################################
##########Specification of the greenhouse end##########
#######################################################
##################################################################
##########specification of glazing (covering film) start##########
##################################################################
greenhouseGlazingType = "polyethylene (PE) DoubleLayer"
#ratio of visible light (400nm - 750nm) through a glazing material (-)
#source: <NAME>, "TOMATO GREENHOUSE ROADMAP" https://www.amazon.com/Tomato-Greenhouse-Roadmap-Guide-Production-ebook/dp/B00O4CPO42
# https://www.goodreads.com/book/show/23878832-tomato-greenhouse-roadmap
# singlePEPERTransmittance = 0.875
singlePERTransmittance = 0.85
dobulePERTransmittance = singlePERTransmittance ** 2.0
# reference: https://www.filmetrics.com/refractive-index-database/Polyethylene/PE-Polyethene
PEFilmRefractiveIndex = 1.5
# reference: https://en.wikipedia.org/wiki/Refractive_index
AirRefractiveIndex = 1.000293
# Source of reference https://www.amazon.com/Tomato-Greenhouse-Roadmap-Guide-Production-ebook/dp/B00O4CPO42
singlePolycarbonateTransmittance = 0.91
doublePolycarbonateTransmittance = singlePolycarbonateTransmittance ** 2.0
roofCoveringTransmittance = singlePERTransmittance
sideWallTransmittance = singlePERTransmittance
################################################################
##########specification of glazing (covering film) end##########
################################################################
#####################################################################
##########specification of OPV module (film or panel) start##########
#####################################################################
# [rad]. tilt of OPV module = tilt of the greenhouse roof
# OPVAngle = math.radians(0.0)
OPVAngle = greenhouseRoofAngle
# the coverage ratio of OPV module on the greenhouse roof [-]
# OPVAreaCoverageRatio = 0.20
# OPVAreaCoverageRatio = 0.58
# OPVAreaCoverageRatio = 0.5
OPVAreaCoverageRatio = 0.009090909090909
# print("OPVAreaCoverageRatio:{}".format(OPVAreaCoverageRatio))
# the coverage ratio of OPV module on the greenhouse roof [-]. If you set this value same as OPVAreaCoverageRatio, it assumed that the OPV coverage ratio does not change during the whole period
# OPVAreaCoverageRatioSummerPeriod = 1.0
# OPVAreaCoverageRatioSummerPeriod = 0.5
OPVAreaCoverageRatioSummerPeriod = OPVAreaCoverageRatio
# OPVAreaCoverageRatioSummerPeriod = 0.0
print("OPVAreaCoverageRatioSummerPeriod:{}".format(OPVAreaCoverageRatioSummerPeriod))
#the area of OPV on the roofTop.
OPVArea = OPVAreaCoverageRatio * greenhouseTotalRoofArea
print("OPVArea:{}".format(OPVArea))
# the PV module area facing each angle
OPVAreaFacingEastOrNorthfacingRoof = OPVArea * (greenhouseRoofTotalAreaEastOrNorth/greenhouseTotalRoofArea)
OPVAreaFacingWestOrSouthfacingRoof = OPVArea * (greenhouseRoofTotalAreaWestOrSouth/greenhouseTotalRoofArea)
#the ratio of degradation per day (/day)
# TODO: find a paper describing the general degradation ratio of OPV module
#the specification document of our PV module says that the guaranteed quality period is 1 year.
# reference (degradation ratio of PV module): https://www.nrel.gov/docs/fy12osti/51664.pdf, https://www.solar-partners.jp/pv-eco-informations-41958.html
# It was assumed that inorganic PV module expiration date is 20 years and its yearly degradation rate is 0.8% (from the first reference page 6), which indicates the OPV film degrades faster by 20 times.
PVDegradationRatioPerHour = 20.0 * 0.008 / dayperYear / hourperDay
print("PVDegradationRatioPerHour:{}".format(PVDegradationRatioPerHour))
# the coefficient converting the ideal (given by manufacturers) cell efficiency to the real efficiency under actual conditions
# degradeCoefficientFromIdealtoReal = 0.85
# this website (https://franklinaid.com/2013/02/06/solar-power-for-subs-the-panels/) says "In real-life conditions, the actual values will be somewhat more or less than listed by the manufacturer."
# So it was assumed the manufacture's spec sheet correctly shows the actual power
degradeCoefficientFromIdealtoReal = 1.00
########################################################################################################################
# the following information should be taken from the spec sheet provided by a PV module manufacturer
########################################################################################################################
#unit[/K]. The proportion of a change of voltage which the OPV film generates under STC condition (25°C),
# mentioned in # Table 1-2-1
TempCoeffitientVmpp = -0.0019
#unit[/K]. The proportion of a change of current which the OPV film generates under STC condition (25°C),
# mentioned in Table 1-2-1
TempCoeffitientImpp = 0.0008
#unit[/K]. The proportion of a change of power which the OPV film generates under STC condition (25°C),
# mentioned in Table 1-2-
TempCoeffitientPmpp = 0.0002
#transmission ratio of VISIBLE sunlight through OPV film.
#OPVPARTransmittance = 0.6
OPVPARTransmittance = 0.3
# unit: [A]
shortCircuitCurrent = 0.72
# unit [V]
openCIrcuitVoltage = 24.0
# unit: [A]
currentAtMaximumPowerPoint = 0.48
# unit [V]
voltageAtMaximumPowerPoint = 16.0
# unit: [watt]
maximumPower = currentAtMaximumPowerPoint * voltageAtMaximumPowerPoint
print("maximumPower:{}".format(maximumPower))
# unit: [m^2]. This is the area per sheet, not roll (having 8 sheets concatenated). This area excludes the margin space of the OPV sheet. THe margin space are made from transparent laminated film with connectors.
OPVAreaPerSheet = 0.849 * 0.66
#conversion efficiency from ligtht energy to electricity
#The efficiency of solar panels is based on standard testing conditions (STC),
#under which all solar panel manufacturers must test their modules. STC specifies a temperature of 25°C (77 F),
#solar irradiance of 1000 W/m2 and an air mass 1.5 (AM1.5) spectrums.
#The STC efficiency of a 240-watt module measuring 1.65 square meters is calculated as follows:
#240 watts ÷ (1.65m2 (module area) x 1000 W/m2) = 14.54%.
#source: http://www.solartown.com/learning/solar-panels/solar-panel-efficiency-have-you-checked-your-eta-lately/
# http://www.isu.edu/~rodrrene/Calculating%20the%20Efficiency%20of%20the%20Solar%20Cell.doc
# unit: -
# OPVEfficiencyRatioSTC = maximumPower / OPVAreaPerSheet / 1000.0
# OPVEfficiencyRatioSTC = 0.2
# this value is the cell efficiency of OPV film purchased at Kacira Lab, CEAC at University of Arizona
# OPVEfficiencyRatioSTC = 0.0137
# source: 19. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., St <NAME>., … <NAME>. (2017). Printed semi-transparent large area organic photovoltaic modules with power conversion efficiencies of close to 5 %. Organic Electronics: Physics, Materials, Applications, 45, 41–45. https://doi.org/10.1016/j.orgel.2017.03.013
OPVEfficiencyRatioSTC = 0.043
print("OPVCellEfficiencyRatioSTC:{}".format(OPVEfficiencyRatioSTC))
#what is an air mass??
#エアマスとは太陽光の分光放射分布を表すパラメーター、標準状態の大気(標準気圧1013hPa)に垂直に入射(太陽高度角90°)した
# 太陽直達光が通過する路程の長さをAM1.0として、それに対する比で表わされます。
#source: http://www.solartech.jp/module_char/standard.html
########################################################################################################################
#source: http://energy.gov/sites/prod/files/2014/01/f7/pvmrw13_ps5_3m_nachtigal.pdf (3M Ultra-Barrier Solar Film spec.pdf)
#the price of OPV per area [EUR/m^2]
# [EUR]
originalOPVPriceEUR = 13305.6
OPVPriceEUR = 13305.6
# [m^2]
OPVSizePurchased = 6.0 * 0.925 * 10.0
# [EUR/m^2]
OPVPriceperAreaEUR = OPVPriceEUR / OPVSizePurchased
#as of 11Nov/2016 [USD/EUR]
CurrencyConversionRatioUSDEUR= 1/1.0850
#the price of OPV per area [USD/m^2]
# OPVPricePerAreaUSD = OPVPriceperAreaEUR*CurrencyConversionRatioUSDEUR
# OPVPricePerAreaUSD = 50.0
OPVPricePerAreaUSD = 0.0
# reference of PV panel purchase cost. 200 was a reasonable price in the US.
# https://www.homedepot.com/p/Grape-Solar-265-Watt-Polycrystalline-Solar-Panel-4-Pack-GS-P60-265x4/206365811?cm_mmc=Shopping%7cVF%7cG%7c0%7cG-VF-PLA%7c&gclid=Cj0KCQjw6pLZBRCxARIsALaaY9YIkZf5W4LESs9HA2RgxsYaeXOfzvMuMCUT9iZ7xU65GafQel6FIY8aApLfEALw_wcB&dclid=CLjZj46A2NsCFYQlfwodGQoKsQ
# https://www.nrel.gov/docs/fy17osti/68925.pdf
# OPVPricePerAreaUSD = 200.0
print("OPVPricePerAreaUSD:{}".format(OPVPricePerAreaUSD))
# True == consider the OPV cost, False == ignore the OPV cost
ifConsiderOPVCost = True
# if you set this 730, you assume the purchase cost of is OPV zero because at the simulator class, this number divides the integer number, which gives zero.
# OPVDepreciationPeriodDays = 730.0
OPVDepreciationPeriodDays = 365.0
OPVDepreciationMethod = "StraightLine"
###################################################################
##########specification of OPV module (film or panel) end##########
###################################################################
##########################################################
##########specification of shading curtain start##########
##########################################################
#the transmittance ratio of shading curtain
shadingTransmittanceRatio = 0.45
isShadingCurtainReinforcementLearning = True
#if True, a black net is covered over the roof for shading in summer
# hasShadingCurtain = False
hasShadingCurtain = True
openCurtainString = "openCurtain"
closeCurtainString = "closeCurtain"
# ######## default setting ########
# ShadingCurtainDeployStartMMSpring = 5
# ShadingCurtainDeployStartDDSpring = 31
# ShadingCurtainDeployEndMMSpring = 5
# ShadingCurtainDeployEndDDSpring = 31
# ShadingCurtainDeployStartMMFall =9
# ShadingCurtainDeployStartDDFall =15
# ShadingCurtainDeployEndMMFall =6
# ShadingCurtainDeployEndDDFall =29
# # # optimzed period on greenhouse retail price, starting from minimum values
# ShadingCurtainDeployStartMMSpring = 1
# ShadingCurtainDeployStartDDSpring = 1
# ShadingCurtainDeployEndMMSpring = 1
# ShadingCurtainDeployEndDDSpring = 4
# ShadingCurtainDeployStartMMFall = 1
# ShadingCurtainDeployStartDDFall = 6
# ShadingCurtainDeployEndMMFall = 1
# ShadingCurtainDeployEndDDFall = 16
# # # optimzed period on greenhouse retail price, starting from middle values
# ShadingCurtainDeployStartMMSpring = 6
# ShadingCurtainDeployStartDDSpring = 19
# ShadingCurtainDeployEndMMSpring = 7
# ShadingCurtainDeployEndDDSpring = 5
# ShadingCurtainDeployStartMMFall = 7
# ShadingCurtainDeployStartDDFall = 14
# ShadingCurtainDeployEndMMFall = 7
# ShadingCurtainDeployEndDDFall = 15
# # # optimzed period on greenhouse retail price, starting from max values
# ShadingCurtainDeployStartMMSpring = 12
# ShadingCurtainDeployStartDDSpring = 24
# ShadingCurtainDeployEndMMSpring = 12
# ShadingCurtainDeployEndDDSpring = 28
# ShadingCurtainDeployStartMMFall = 12
# ShadingCurtainDeployStartDDFall = 30
# ShadingCurtainDeployEndMMFall = 12
# ShadingCurtainDeployEndDDFall = 31
# optimzed period on open field farming retail price, starting from max values
# ShadingCurtainDeployStartMMSpring = 8
# ShadingCurtainDeployStartDDSpring = 28
# ShadingCurtainDeployEndMMSpring = 12
# ShadingCurtainDeployEndDDSpring = 2
# ShadingCurtainDeployStartMMFall = 12
# ShadingCurtainDeployStartDDFall = 12
# ShadingCurtainDeployEndMMFall = 12
# ShadingCurtainDeployEndDDFall = 31
# # # initial date s for optimization
ShadingCurtainDeployStartMMSpring = 5
ShadingCurtainDeployStartDDSpring = 17
ShadingCurtainDeployEndMMSpring = 6
ShadingCurtainDeployEndDDSpring = 12
ShadingCurtainDeployStartMMFall = 6
ShadingCurtainDeployStartDDFall = 23
ShadingCurtainDeployEndMMFall = 7
ShadingCurtainDeployEndDDFall = 2
# Summer period. This should happend soon after ending the shading curtain deployment period.
SummerPeriodStartDate = datetime.date(int(SimulationStartDate[0:4]), ShadingCurtainDeployEndMMSpring, ShadingCurtainDeployEndDDSpring) + datetime.timedelta(days=1)
SummerPeriodEndDate = datetime.date(int(SimulationStartDate[0:4]), ShadingCurtainDeployStartMMFall, ShadingCurtainDeployStartDDFall) - datetime.timedelta(days=1)
SummerPeriodStartMM = int(SummerPeriodStartDate.month)
print("SummerPeriodStartMM:{}".format(SummerPeriodStartMM))
SummerPeriodStartDD = int(SummerPeriodStartDate.day)
print("SummerPeriodStartDD:{}".format(SummerPeriodStartDD))
SummerPeriodEndMM = int(SummerPeriodEndDate.month)
print("SummerPeriodEndMM:{}".format(SummerPeriodEndMM))
SummerPeriodEndDD = int(SummerPeriodEndDate.day)
print("SummerPeriodEndDD:{}".format(SummerPeriodEndDD))
# this is gonna be True when you want to deploy shading curtains only from ShadigCuratinDeployStartHH to ShadigCuratinDeployEndHH
IsShadingCurtainDeployOnlyDayTime = True
ShadigCuratinDeployStartHH = 10
ShadigCuratinDeployEndHH = 14
IsDifferentShadingCurtainDeployTimeEachMonth = True
# this is gonna be true when you want to control shading curtain opening and closing every hour
IsHourlyShadingCurtainDeploy = False
########################################################
##########specification of shading curtain end##########
########################################################
#################################################
##########Specification of plants start##########
#################################################
#Cost of plant production. the unit is USD/m^2
# the conversion rate was calculated from from University of California Cooperative Extension (UCEC) UC Small farm program (http://sfp.ucdavis.edu/crops/coststudieshtml/lettuce/LettuceTable1/)
plantProductionCostperSquareMeterPerYear = 1.096405
numberOfRidge = 5.0
#unit: m. plant density can be derived from this.
distanceBetweenPlants = 0.2
# plant density (num of heads per area) [head/m^2]
plantDensity = 1.0/(distanceBetweenPlants**2.0)
print("plantDensity:{}".format(plantDensity))
#number of heads
# numberOFheads = int(greenhouseCultivationFloorDepth/distanceBetweenPlants * numberOfRidge)
numberOFheads = int (plantDensity * greenhouseCultivationFloorArea)
print("numberOFheads:{}".format(numberOFheads))
# number of head per cultivation area [heads/m^2]
numberOFheadsPerArea = float(numberOFheads / greenhouseCultivationFloorArea)
#photoperiod (time of lighting in a day). the unit is hour
# TODO: this should be revised so that the photo period is calculated by the sum of PPFD each day or the change of direct solar radiation or the diff of sunse and sunrise
photoperiod = 14.0
#number of cultivation days (days/harvest)
cultivationDaysperHarvest = 35
# cultivationDaysperHarvest = 30
# the constant of each plant growth model
# Source: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH
A_J_Both_Modified_TaylorExpantionWithFluctuatingDLI = "A_J_Both_Modified_TaylorExpantionWithFluctuatingDLI"
# Source: https://www.researchgate.net/publication/4745082_Validation_of_a_dynamic_lettuce_growth_model_for_greenhouse_climate_control
E_J_VanHenten1994 = "E_J_VanHenten1994"
# Source: https://www.researchgate.net/publication/286938495_A_validated_model_to_predict_the_effects_of_environment_on_the_growth_of_lettuce_Lactuca_sativa_L_Implications_for_climate_change
S_Pearson1997 = "S_Pearson1997"
plantGrowthModel = E_J_VanHenten1994
# lettuce base temperature [Celusius]
# Reference: A validated model to predict the effects of environment on the growth of lettuce (Lactuca sativa L.): Implications for climate change
# https://www.tandfonline.com/doi/abs/10.1080/14620316.1997.11515538
lettuceBaseTemperature = 0.0
DryMassToFreshMass = 1.0/0.045
# the weight to harvest [g]
harvestDryWeight = 200.0 / DryMassToFreshMass
# harvestDryWeight = 999.0 / DryMassToFreshMass
# operation cost of plants [USD/m^2/year]
plantcostperSquaremeterperYear = 1.096405
# the DLI upper limitation causing some tipburn
DLIforTipBurn = DLIForButterHeadLettuceWithNoTipburn
# the discount ratio when there are some tipburn observed
tipburnDiscountRatio = 0.2
# make this number 1.0 in the end. change this only for simulation experiment
plantPriceDiscountRatio_justForSimulation = 1.0
# the set point temperature during day time [Celusius]
# reference: <NAME>, TEN YEARS OF HYDROPONIC LETTUCE RESEARCH: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH
setPointTemperatureDayTime = 24.0
# setPointTemperatureDayTime = 16.8
# the set point temperature during night time [Celusius]
# reference: <NAME>, TEN YEARS OF HYDROPONIC LETTUCE RESEARCH: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH
setPointTemperatureNightTime = 19.0
# setPointTemperatureNightTime = 16.8
setPointHumidityDayTime = 0.65
# setPointHumidityDayTime = 0.7
setPointHumidityNightTime = 0.8
# setPointHumidityNightTime = 0.7
# the flags indicating daytime or nighttime at each time step
daytime = "daytime"
nighttime = "nighttime"
# sales price of lettuce grown at greenhouses, which is usually higher than that of open field farming grown lettuce
# source
# 1.99 USD head-1 for "Lettuce, Other, Boston-Greenhouse") cited from USDA (https://www.ams.usda.gov/mnreports/fvwretail.pdf)
# other source: USDA, Agricultural, Marketing, Service, National Retail Report - Specialty Crops page 9 and others: https://www.ams.usda.gov/mnreports/fvwretail.pdf
# unit: USD head-1
romainLettucePriceBasedOnHeadPrice = 1.99
###################################################
##########Specification of the plants end##########
###################################################
###################################################
##########Specification of labor cost start########
###################################################
# source: https://onlinelibrary.wiley.com/doi/abs/10.1111/cjag.12161
# unit: people/10000kg yield
necessaryLaborPer10000kgYield = 0.315
# source:https://www.bls.gov/regions/west/news-release/occupationalemploymentandwages_tucson.htm
# unit:USD/person/hour
hourlyWagePerPerson = 12.79
# unit: hour/day
workingHourPerDay = 8.0
###################################################
##########Specification of labor cost end########
###################################################
###################################################
##########Specification of energy cost start#######
###################################################
# energy efficiency of heating equipment [-]
# source: https://www.alibaba.com/product-detail/Natural-gas-fired-hot-air-heater_60369835987.html?spm=a2700.7724838.2017115.1.527251bcQ2pojZ
# source: https://www.aga.org/natural-gas/in-your-home/heating/
heatingEquipmentEfficiency = 0.9
# unit: USD
heatingEquipmentQurchaseCost = 0.0
# source: http://www.world-nuclear.org/information-library/facts-and-figures/heat-values-of-various-fuels.aspx
# source: http://agnatural.pt/documentos/ver/natural-gas-conversion-guide_cb4f0ccd80ccaf88ca5ec336a38600867db5aaf1.pdf
# unit: MJ m-3
naturalGasSpecificEnergy = {"MJ m-3" :38.7}
naturalGasSpecificEnergy["MJ ft-3"] = naturalGasSpecificEnergy["MJ m-3"] / 35.3147
heatOfWaterEcaporation = {"J kg-1" : 2257}
# source: https://www.researchgate.net/publication/265890843_A_Review_of_Evaporative_Cooling_Technologies?enrichId=rgreq-2c40013798cfb3c564cf35844f4947fb-XXX&enrichSource=Y292ZXJQYWdlOzI2NTg5MDg0MztBUzoxNjUxOTgyMjg4OTM2OTdAMTQxNjM5NzczNTk5Nw%3D%3D&el=1_x_3&_esc=publicationCoverPdf
# COP = coefficient of persormance. COP = Q/W
# Q is the useful heat supplied or removed by the considered system. W is the work required by the considered system.
PadAndFanCOP = 15.0
###################################################
##########Specification of energy cost end#########
###################################################
#########################################################################
###########################Global variable end###########################
#########################################################################
class CropElectricityYeildSimulatorConstant:
"""
a constant class.
"""
###########the constractor##################
def __init__(self):
print ("call CropElectricityYeildSimulatorConstant")
###########the constractor end##################
###########the methods##################
def method(self, val):
print("call CropElectricityYeildSimulatorConstant method")
###########the methods end##################
| [
"math.acos",
"math.sqrt",
"math.radians",
"numpy.array",
"datetime.timedelta"
] | [((4507, 4565), 'numpy.array', 'np.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (4515, 4565), True, 'import numpy as np\n'), ((4588, 4646), 'numpy.array', 'np.array', (['[31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (4596, 4646), True, 'import numpy as np\n'), ((5828, 5852), 'math.radians', 'math.radians', (['(32.2800408)'], {}), '(32.2800408)\n', (5840, 5852), False, 'import math\n'), ((5901, 5926), 'math.radians', 'math.radians', (['(110.9422745)'], {}), '(110.9422745)\n', (5913, 5926), False, 'import math\n'), ((13903, 14026), 'math.sqrt', 'math.sqrt', (['((greenhouseWidth / (numOfRoofs * 2.0)) ** 2.0 + (greenhouseHeightRoofTop -\n greenhouseHeightSideWall) ** 2.0)'], {}), '((greenhouseWidth / (numOfRoofs * 2.0)) ** 2.0 + (\n greenhouseHeightRoofTop - greenhouseHeightSideWall) ** 2.0)\n', (13912, 14026), False, 'import math\n'), ((14338, 14407), 'math.acos', 'math.acos', (['(greenhouseWidth / (numOfRoofs * 2.0) / greenhouseRoofWidth)'], {}), '(greenhouseWidth / (numOfRoofs * 2.0) / greenhouseRoofWidth)\n', (14347, 14407), False, 'import math\n'), ((28881, 28907), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (28899, 28907), False, 'import datetime\n'), ((29043, 29069), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (29061, 29069), False, 'import datetime\n')] |
import os
import cv2
import time
import shutil
import numpy as np
import matplotlib.pyplot as plt
from open3d import *
# Display utilities
def visualize_joints_2d(ax, joints, joint_idxs=True, links=None, alpha=1):
"""Draw 2d skeleton on matplotlib axis"""
if links is None:
links = [(0, 1, 2, 3, 4), (0, 5, 6, 7, 8), (0, 9, 10, 11, 12),
(0, 13, 14, 15, 16), (0, 17, 18, 19, 20)]
# Scatter hand joints on image
x = joints[:, 0]
y = joints[:, 1]
ax.scatter(x, y, 1, 'r')
# Add idx labels to joints
for row_idx, row in enumerate(joints):
if joint_idxs:
plt.annotate(str(row_idx), (row[0], row[1]))
_draw2djoints(ax, joints, links, alpha=alpha)
def _draw2djoints(ax, annots, links, alpha=1):
"""Draw segments, one color per link"""
colors = ['r', 'm', 'b', 'c', 'g']
for finger_idx, finger_links in enumerate(links):
for idx in range(len(finger_links) - 1):
_draw2dseg(
ax,
annots,
finger_links[idx],
finger_links[idx + 1],
c=colors[finger_idx],
alpha=alpha)
def _draw2dseg(ax, annot, idx1, idx2, c='r', alpha=1):
"""Draw segment of given color"""
ax.plot(
[annot[idx1, 0], annot[idx2, 0]], [annot[idx1, 1], annot[idx2, 1]],
c=c,
alpha=alpha)
reorder_idx = np.array([
0, 1, 6, 7, 8, 2, 9, 10, 11, 3, 12, 13, 14, 4, 15, 16, 17, 5, 18, 19,
20
])
cam_extr = np.array(
[[0.999988496304, -0.00468848412856, 0.000982563360594,
25.7], [0.00469115935266, 0.999985218048, -0.00273845880292, 1.22],
[-0.000969709653873, 0.00274303671904, 0.99999576807,
3.902], [0, 0, 0, 1]])
cam_intr = np.array([[1395.749023, 0, 935.732544],
[0, 1395.749268, 540.681030], [0, 0, 1]])
found = False
root_data_file = './F-PHAB/Video_files/'
skeleton_root = './F-PHAB/Hand_pose_annotation_v1'
for subject_dr in ['Subject_6']:
if subject_dr.startswith('.'):
continue
subject_dir = os.path.join(root_data_file, subject_dr)
for action_dr in os.listdir(subject_dir):
if action_dr.startswith('.'):
continue
action_dir = os.path.join(subject_dir, action_dr)
for seq_dr in os.listdir(action_dir):
if seq_dr.startswith('.'):
continue
seq_dir = os.path.join(action_dir, seq_dr)
if seq_dir == './F-PHAB/Video_files/Subject_6/handshake/1':
found = True
if not found:
continue
# skip files that caused error
if seq_dir in ['./F-PHAB/Video_files/Subject_3/drink_mug/2',
'./F-PHAB/Video_files/Subject_6/open_soda_can/1',
'./F-PHAB/Video_files/Subject_6/drink_mug/1',
'./F-PHAB/Video_files/Subject_5/give_card/3',
'./F-PHAB/Video_files/Subject_5/handshake/4',
'./F-PHAB/Video_files/Subject_5/receive_coin/1',
'./F-PHAB/Video_files/Subject_6/handshake/6',
'./F-PHAB/Video_files/Subject_6/handshake/5']:
continue
print(seq_dir)
color_dir = os.path.join(seq_dir, 'color')
color_cropped = os.path.join(seq_dir, 'color_cropped')
if os.path.isdir(color_cropped):
shutil.rmtree(color_cropped)
if not os.path.exists(color_cropped):
os.makedirs(color_cropped, exist_ok=True)
skeleton_path = os.path.join(skeleton_root, seq_dir.replace(
'./F-PHAB/Video_files/', ''), 'skeleton.txt')
skeleton_vals = np.loadtxt(skeleton_path)
if skeleton_vals.size > 0:
skel_order = skeleton_vals[:, 0]
skel = skeleton_vals[:, 1:].reshape(
skeleton_vals.shape[0], 21, -1)
skel = skel.astype(np.float32)
centerlefttop = np.mean(skel, axis=1)
centerlefttop[:, 0] -= 100
centerlefttop[:, 1] += 100
centerrightbottom = np.mean(skel, axis=1)
centerrightbottom[:, 0] += 100
centerrightbottom[:, 1] -= 100
centerlefttop_camcoords = cam_extr.dot(
np.concatenate([centerlefttop, np.ones([centerlefttop.shape[0], 1])], 1).transpose()).transpose()[:, :3].astype(np.float32)
centerlefttop_pixel = np.array(cam_intr).dot(
centerlefttop_camcoords.transpose()).transpose()
centerlefttop_pixel = (
centerlefttop_pixel / centerlefttop_pixel[:, 2:])[:, :2]
centerrightbottom_camcoords = cam_extr.dot(
np.concatenate([centerrightbottom, np.ones([centerrightbottom.shape[0], 1])], 1).transpose()).transpose()[:, :3].astype(np.float32)
centerrightbottom_pixel = np.array(cam_intr).dot(
centerrightbottom_camcoords.transpose()).transpose()
centerrightbottom_pixel = (
centerrightbottom_pixel / centerrightbottom_pixel[:, 2:])[:, :2]
for idx in range(centerrightbottom_camcoords.shape[0]):
color = cv2.imread(os.path.join(
color_dir, 'color_{:04d}.jpeg'.format(int(skel_order[idx]))), -1)
new_xmin = max(centerlefttop_pixel[idx, 0], 0)
new_ymin = max(centerrightbottom_pixel[idx, 1], 0)
new_xmax = min(
centerrightbottom_pixel[idx, 0], color.shape[1]-1)
new_ymax = min(
centerlefttop_pixel[idx, 1], color.shape[0]-1)
crop = color[int(new_ymin):int(new_ymax),
int(new_xmin):int(new_xmax)]
output_color = cv2.resize(
crop, (96, 96), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(os.path.join(color_cropped, 'color_{:04d}.jpeg'.format(
int(skel_order[idx]))), output_color)
| [
"os.path.exists",
"numpy.mean",
"os.listdir",
"cv2.resize",
"os.makedirs",
"numpy.ones",
"os.path.join",
"numpy.array",
"os.path.isdir",
"shutil.rmtree",
"numpy.loadtxt"
] | [((1405, 1493), 'numpy.array', 'np.array', (['[0, 1, 6, 7, 8, 2, 9, 10, 11, 3, 12, 13, 14, 4, 15, 16, 17, 5, 18, 19, 20]'], {}), '([0, 1, 6, 7, 8, 2, 9, 10, 11, 3, 12, 13, 14, 4, 15, 16, 17, 5, 18,\n 19, 20])\n', (1413, 1493), True, 'import numpy as np\n'), ((1513, 1732), 'numpy.array', 'np.array', (['[[0.999988496304, -0.00468848412856, 0.000982563360594, 25.7], [\n 0.00469115935266, 0.999985218048, -0.00273845880292, 1.22], [-\n 0.000969709653873, 0.00274303671904, 0.99999576807, 3.902], [0, 0, 0, 1]]'], {}), '([[0.999988496304, -0.00468848412856, 0.000982563360594, 25.7], [\n 0.00469115935266, 0.999985218048, -0.00273845880292, 1.22], [-\n 0.000969709653873, 0.00274303671904, 0.99999576807, 3.902], [0, 0, 0, 1]])\n', (1521, 1732), True, 'import numpy as np\n'), ((1756, 1841), 'numpy.array', 'np.array', (['[[1395.749023, 0, 935.732544], [0, 1395.749268, 540.68103], [0, 0, 1]]'], {}), '([[1395.749023, 0, 935.732544], [0, 1395.749268, 540.68103], [0, 0, 1]]\n )\n', (1764, 1841), True, 'import numpy as np\n'), ((2071, 2111), 'os.path.join', 'os.path.join', (['root_data_file', 'subject_dr'], {}), '(root_data_file, subject_dr)\n', (2083, 2111), False, 'import os\n'), ((2133, 2156), 'os.listdir', 'os.listdir', (['subject_dir'], {}), '(subject_dir)\n', (2143, 2156), False, 'import os\n'), ((2238, 2274), 'os.path.join', 'os.path.join', (['subject_dir', 'action_dr'], {}), '(subject_dir, action_dr)\n', (2250, 2274), False, 'import os\n'), ((2297, 2319), 'os.listdir', 'os.listdir', (['action_dir'], {}), '(action_dir)\n', (2307, 2319), False, 'import os\n'), ((2407, 2439), 'os.path.join', 'os.path.join', (['action_dir', 'seq_dr'], {}), '(action_dir, seq_dr)\n', (2419, 2439), False, 'import os\n'), ((3305, 3335), 'os.path.join', 'os.path.join', (['seq_dir', '"""color"""'], {}), "(seq_dir, 'color')\n", (3317, 3335), False, 'import os\n'), ((3364, 3402), 'os.path.join', 'os.path.join', (['seq_dir', '"""color_cropped"""'], {}), "(seq_dir, 'color_cropped')\n", (3376, 3402), False, 'import os\n'), ((3418, 3446), 'os.path.isdir', 'os.path.isdir', (['color_cropped'], {}), '(color_cropped)\n', (3431, 3446), False, 'import os\n'), ((3766, 3791), 'numpy.loadtxt', 'np.loadtxt', (['skeleton_path'], {}), '(skeleton_path)\n', (3776, 3791), True, 'import numpy as np\n'), ((3464, 3492), 'shutil.rmtree', 'shutil.rmtree', (['color_cropped'], {}), '(color_cropped)\n', (3477, 3492), False, 'import shutil\n'), ((3512, 3541), 'os.path.exists', 'os.path.exists', (['color_cropped'], {}), '(color_cropped)\n', (3526, 3541), False, 'import os\n'), ((3559, 3600), 'os.makedirs', 'os.makedirs', (['color_cropped'], {'exist_ok': '(True)'}), '(color_cropped, exist_ok=True)\n', (3570, 3600), False, 'import os\n'), ((4065, 4086), 'numpy.mean', 'np.mean', (['skel'], {'axis': '(1)'}), '(skel, axis=1)\n', (4072, 4086), True, 'import numpy as np\n'), ((4210, 4231), 'numpy.mean', 'np.mean', (['skel'], {'axis': '(1)'}), '(skel, axis=1)\n', (4217, 4231), True, 'import numpy as np\n'), ((5989, 6048), 'cv2.resize', 'cv2.resize', (['crop', '(96, 96)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(crop, (96, 96), interpolation=cv2.INTER_NEAREST)\n', (5999, 6048), False, 'import cv2\n'), ((4565, 4583), 'numpy.array', 'np.array', (['cam_intr'], {}), '(cam_intr)\n', (4573, 4583), True, 'import numpy as np\n'), ((5030, 5048), 'numpy.array', 'np.array', (['cam_intr'], {}), '(cam_intr)\n', (5038, 5048), True, 'import numpy as np\n'), ((4434, 4470), 'numpy.ones', 'np.ones', (['[centerlefttop.shape[0], 1]'], {}), '([centerlefttop.shape[0], 1])\n', (4441, 4470), True, 'import numpy as np\n'), ((4891, 4931), 'numpy.ones', 'np.ones', (['[centerrightbottom.shape[0], 1]'], {}), '([centerrightbottom.shape[0], 1])\n', (4898, 4931), True, 'import numpy as np\n')] |
"""Simple example operations that are used to demonstrate some image processing in Xi-CAM.
"""
import numpy as np
from xicam.plugins.operationplugin import (limits, describe_input, describe_output,
operation, opts, output_names, visible)
# TODO remove dependency
from ttictoc import tic, toc
from .colorwheel import create_sym, fake_create_sym
# Define an operation that applies Symmetry to an image
@operation
@output_names("output_image")
@describe_input("image", "Create Orientation Map")
@describe_input("symmetry", "The factor of noise to add to the image")
@limits("order", [1.0, 10.0])
@opts("order", step=1.0)
@visible("image", is_visible=False)
def symmetry(image: np.ndarray, order: float = 2) -> np.ndarray:
if issubclass(image.dtype.type, np.integer):
max_value = np.iinfo(image.dtype).max
else:
max_value = np.finfo(image.dtype).max
tic()
# using fake image for now
whl, rgb2 = fake_create_sym(image, order)
#whl, rgb2 = create_sym(image,order)
print(toc())
#clrwhl , rgb = create_sym(img, order)
#print(img.shape)
return rgb2
| [
"xicam.plugins.operationplugin.output_names",
"xicam.plugins.operationplugin.visible",
"ttictoc.tic",
"numpy.iinfo",
"xicam.plugins.operationplugin.opts",
"xicam.plugins.operationplugin.limits",
"numpy.finfo",
"xicam.plugins.operationplugin.describe_input",
"ttictoc.toc"
] | [((471, 499), 'xicam.plugins.operationplugin.output_names', 'output_names', (['"""output_image"""'], {}), "('output_image')\n", (483, 499), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((502, 551), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""image"""', '"""Create Orientation Map"""'], {}), "('image', 'Create Orientation Map')\n", (516, 551), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((554, 623), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""symmetry"""', '"""The factor of noise to add to the image"""'], {}), "('symmetry', 'The factor of noise to add to the image')\n", (568, 623), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((626, 654), 'xicam.plugins.operationplugin.limits', 'limits', (['"""order"""', '[1.0, 10.0]'], {}), "('order', [1.0, 10.0])\n", (632, 654), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((657, 680), 'xicam.plugins.operationplugin.opts', 'opts', (['"""order"""'], {'step': '(1.0)'}), "('order', step=1.0)\n", (661, 680), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((683, 717), 'xicam.plugins.operationplugin.visible', 'visible', (['"""image"""'], {'is_visible': '(False)'}), "('image', is_visible=False)\n", (690, 717), False, 'from xicam.plugins.operationplugin import limits, describe_input, describe_output, operation, opts, output_names, visible\n'), ((944, 949), 'ttictoc.tic', 'tic', ([], {}), '()\n', (947, 949), False, 'from ttictoc import tic, toc\n'), ((1082, 1087), 'ttictoc.toc', 'toc', ([], {}), '()\n', (1085, 1087), False, 'from ttictoc import tic, toc\n'), ((855, 876), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (863, 876), True, 'import numpy as np\n'), ((913, 934), 'numpy.finfo', 'np.finfo', (['image.dtype'], {}), '(image.dtype)\n', (921, 934), True, 'import numpy as np\n')] |
# <NAME>
# Learning matplotlib
# 25/07/21
# Imoporting Modules
import matplotlib.pyplot as plt
import numpy as np
# Generating line
xpoints = np.array([0, 10])
ypoints = np.array([0, 10])
plt.plot(xpoints, ypoints, marker = '*')
# Plotting a points
xpoints = np.array([10, 0])
ypoints = np.array([0, 10])
plt.plot(xpoints, ypoints, "bo")
# Showing plot
plt.show()
| [
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((145, 162), 'numpy.array', 'np.array', (['[0, 10]'], {}), '([0, 10])\n', (153, 162), True, 'import numpy as np\n'), ((173, 190), 'numpy.array', 'np.array', (['[0, 10]'], {}), '([0, 10])\n', (181, 190), True, 'import numpy as np\n'), ((191, 229), 'matplotlib.pyplot.plot', 'plt.plot', (['xpoints', 'ypoints'], {'marker': '"""*"""'}), "(xpoints, ypoints, marker='*')\n", (199, 229), True, 'import matplotlib.pyplot as plt\n'), ((263, 280), 'numpy.array', 'np.array', (['[10, 0]'], {}), '([10, 0])\n', (271, 280), True, 'import numpy as np\n'), ((291, 308), 'numpy.array', 'np.array', (['[0, 10]'], {}), '([0, 10])\n', (299, 308), True, 'import numpy as np\n'), ((309, 341), 'matplotlib.pyplot.plot', 'plt.plot', (['xpoints', 'ypoints', '"""bo"""'], {}), "(xpoints, ypoints, 'bo')\n", (317, 341), True, 'import matplotlib.pyplot as plt\n'), ((359, 369), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (367, 369), True, 'import matplotlib.pyplot as plt\n')] |
import numpy
class Embeddings:
def __init__(self):
"""
Initializes the embeddings database.
"""
self.Vectors = []
self.Labels = []
def Add(self, vector, label):
"""
Adds embedding to embeddings database.
Args:
vector: Vector
label: Label
"""
self.Vectors.append(vector)
self.Labels.append(label)
def Remove(self, label):
"""
Removes embedding from embeddings database.
Args:
label: Label
"""
index = self.Labels.index(label)
_ = self.Vectors.pop(index)
_ = self.Labels.pop(index)
def Clear(self):
"""
Clears embeddings database.
"""
self.Vectors.clear()
self.Labels.clear()
def Count(self):
"""
Returns embeddings database count.
Returns:
Count
"""
return len(self.Vectors)
def FromDistance(self, vector):
"""
Score vector from database by Euclidean distance.
Args:
vector: Vector
Returns:
Label
"""
length = self.Count()
minimum = 2147483647
index = -1
for i in range(length):
v = self.Vectors[i]
d = numpy.linalg.norm(v - vector)
if (d < minimum):
index = i
minimum = d
label = self.Labels[index] if (index != -1 and self.Labels != []) else ''
return label, minimum
def FromSimilarity(self, vector):
"""
Score vector from database by cosine similarity.
Args:
vector: Vector
Returns:
Label
"""
length = self.Count()
maximum = -2147483648
index = -1
for i in range(length):
v = self.Vectors[i]
a = numpy.linalg.norm(v)
b = numpy.linalg.norm(vector)
s = numpy.dot(v, vector) / (a * b)
if (s > maximum):
index = i
maximum = s
label = self.Labels[index] if (index != -1 and self.Labels != []) else ''
return label, maximum | [
"numpy.dot",
"numpy.linalg.norm"
] | [((1327, 1356), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(v - vector)'], {}), '(v - vector)\n', (1344, 1356), False, 'import numpy\n'), ((1928, 1948), 'numpy.linalg.norm', 'numpy.linalg.norm', (['v'], {}), '(v)\n', (1945, 1948), False, 'import numpy\n'), ((1965, 1990), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vector'], {}), '(vector)\n', (1982, 1990), False, 'import numpy\n'), ((2007, 2027), 'numpy.dot', 'numpy.dot', (['v', 'vector'], {}), '(v, vector)\n', (2016, 2027), False, 'import numpy\n')] |
import numpy as np
def generate_random_policy(env):
n_states = env.observation_space.n
n_actions = env.action_space.n
policy = np.ones([n_states, n_actions]) / n_actions
policy[0, :] = 0
policy[n_states - 1, :] = 0
return policy
def policy_evaluation(policy, env, V=None, gamma=1, theta=1e-8, max_t=None):
n_states = env.observation_space.n
n_actions = env.action_space.n
rewards_low, rewards_high = env.reward_range[0], env.reward_range[1]
# Init V
if V is None:
V = np.zeros(n_states)
# Evaluate policy iteratively
t = 0
while True:
t += 1
delta = 0
for s in range(1, n_states - 1):
val = V[s]
V[s] = sum(policy[s, a] * env.state_transition_prob(s1, r, s, a) * (r + gamma * V[s1])
for a in range(n_actions) for r in range(rewards_low, rewards_high + 1) for s1 in range(n_states))
delta = max(delta, abs(val - V[s]))
if theta > delta or (max_t and t == max_t):
break
return V
def policy_improvement(V, policy, env, gamma=1):
n_states = env.observation_space.n
n_actions = env.action_space.n
rewards_low, rewards_high = env.reward_range[0], env.reward_range[1]
policy_stable = True
for s in range(1, n_states - 1):
old_action = np.argmax(policy[s])
action_values = np.zeros(n_actions)
for a in range(n_actions):
action_values[a] = sum(env.state_transition_prob(s1, r, s, a) * (r + gamma * V[s1])
for r in range(rewards_low, rewards_high + 1) for s1 in range(n_states))
best_action = np.argmax(action_values)
# Update Policy
policy[s, :] = 0
policy[s, best_action] = 1
policy_changed = old_action != best_action
if policy_changed:
policy_stable = False
return policy, policy_stable
def print_policy(policy):
actions = ['UP', 'RIGHT', 'DOWN', 'LEFT']
for s in range(1, policy.shape[0] - 1):
print('State:{} Action: {}'.format(s, actions[np.argmax(policy[s])]))
def policy_iteration(policy, env):
V = None
while True:
V = policy_evaluation(policy, env, V=V)
policy, policy_stable = policy_improvement(V, policy, env)
if policy_stable:
break
return V, policy
def value_iteration(policy, env, gamma=1, theta=1e-8):
n_states = env.observation_space.n
n_actions = env.action_space.n
rewards_low, rewards_high = env.reward_range[0], env.reward_range[1]
# Init V
V = np.zeros(n_states)
# Evaluate policy iteratively
while True:
delta = 0
for s in range(1, n_states - 1):
val = V[s]
V[s] = max(env.state_transition_prob(env.next_state(s, a), r, s, a) * (r + gamma * V[env.next_state(s, a)])
for a in range(n_actions) for r in range(rewards_low, rewards_high + 1))
delta = max(delta, abs(val - V[s]))
if theta >= delta:
break
# Output deterministic policy
for s in range(1, n_states - 1):
action_values = np.zeros(n_actions)
for a in range(n_actions):
action_values[a] = sum(env.state_transition_prob(s1, r, s, a) * (r + gamma * V[s1])
for r in range(rewards_low, rewards_high + 1) for s1 in range(n_states))
print(action_values)
best_action = np.argmax(action_values)
# Update Policy
policy[s, :] = 0
policy[s, best_action] = 1
return V, policy
| [
"numpy.argmax",
"numpy.zeros",
"numpy.ones"
] | [((2581, 2599), 'numpy.zeros', 'np.zeros', (['n_states'], {}), '(n_states)\n', (2589, 2599), True, 'import numpy as np\n'), ((141, 171), 'numpy.ones', 'np.ones', (['[n_states, n_actions]'], {}), '([n_states, n_actions])\n', (148, 171), True, 'import numpy as np\n'), ((524, 542), 'numpy.zeros', 'np.zeros', (['n_states'], {}), '(n_states)\n', (532, 542), True, 'import numpy as np\n'), ((1334, 1354), 'numpy.argmax', 'np.argmax', (['policy[s]'], {}), '(policy[s])\n', (1343, 1354), True, 'import numpy as np\n'), ((1379, 1398), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (1387, 1398), True, 'import numpy as np\n'), ((1660, 1684), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (1669, 1684), True, 'import numpy as np\n'), ((3137, 3156), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (3145, 3156), True, 'import numpy as np\n'), ((3447, 3471), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (3456, 3471), True, 'import numpy as np\n'), ((2086, 2106), 'numpy.argmax', 'np.argmax', (['policy[s]'], {}), '(policy[s])\n', (2095, 2106), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg') # set the backend before importing pyplot
import pandas as pd
import numpy as np
import pyper as pr
import rpy2.robjects as robj
from rpy2.robjects import pandas2ri
from rpy2.robjects import FloatVector as rfc
from rpy2.robjects import StrVector as rsc
from rpy2.robjects import FactorVector as rfctc
from rpy2.robjects import ListVector as rlist
from rpy2.robjects.packages import importr
import rpy2.rinterface as ri
import matplotlib.pyplot as plt
# uncomment to install
# !pip install pyper
# !pip install rpy2
# r("install.packages('spatstat', repos = \"https://cloud.r-project.org\")")
# In[310]:
pandas2ri.activate()
base = importr('base')
spatstat = importr('spatstat')
rlist = ri.baseenv['list']
rnull = ri.NULL
rcmd = pr.R(use_numpy=True)
def ppp(x, y, marks, window_xrange=None, window_yrange=None):
'''
create a point pattern object
input:
x: list of the input points' x coordinate
y: list of the input points' y coordinate
marks: list of the mark of input points
window_xrange: a list that specifies the x range of the window. Equals [min(x), max(x)] if it is None
window_yrange: a list that specifies the y range of the window. Equals [min(y), max(y)] if it is None
output:
point pattern dictionary
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks, window_xrange=[0, 5], window_yrange=[0, 5])
>>> r.print(pp)
'''
pp = {}
if not window_xrange:
window_xrange=rfc([robj.r.min(rfc(x)), robj.r.max(rfc(x))])
if not window_yrange:
window_yrange=rfc([robj.r.min(rfc(y)), robj.r.max(rfc(y))])
pp['ppp'] = spatstat.ppp(x=rfc(x), y=rfc(y),
window=spatstat.owin(xrange=window_xrange,
yrange=window_yrange),
marks=rfctc(marks))
pp['coor'] = [x] + [y]
pp['marks'] = marks
return pp
def Gest(pp, r=None, correction='km', plot=True):
'''
Take the output of ppp() function as the input and return the G function value at given r
input:
pp: point pattern
r : your selected radius ticks
correction: the border correction you want to apply.
options are 'none', 'rs', 'km' and 'han'
the default is 'km'
plot: if True given the plots
output:
Gest: the R Gest class object
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks)
>>> Gest(pp, plot=True)
'''
r_ppp = pp['ppp']
if r:
r_Gest = spatstat.Gest(r_ppp, r=rfc(r), correction=correction)
else:
r_Gest = spatstat.Gest(r_ppp, correction=correction)
r_used = r_Gest['r']
G_val_theo = r_Gest['theo']
G_val_samp = r_Gest['raw' if correction=='none' else correction]
if plot:
plt.plot(r_used, G_val_samp, c='black', linestyle='-', linewidth=3, label=r"$G_{"+correction+"}$")
plt.plot(r_used, G_val_theo, c='red', linestyle='--', linewidth=3, label=r"$G_{Poisson}$")
plt.xlabel('r')
plt.ylabel('G(r)')
plt.legend(fontsize=12)
plt.title("G function")
return r_Gest
def Kest(pp, r=None, correction='iso', plot=True):
'''
Take the output of ppp() function as the input and return the K function value at given r
input:
pp: point pattern
r : your selected radius ticks
correction: the border correction you want to apply.
options are 'border', 'iso' and 'trans'
the default is 'isotropic' (which is the Ripley correction)
plot: if True given the plots
output:
r_Kest: the R Kest class
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks)
>>> Kest(pp, plot=True)
'''
r_ppp = pp['ppp']
if r:
r_Kest = spatstat.Kest(r_ppp, r=rfc(r), correction=correction)
else:
r_Kest = spatstat.Kest(r_ppp, correction=correction)
r_used = r_Kest['r']
K_val_theo = r_Kest['theo']
K_val_samp = r_Kest['un' if correction=='none' else correction]
if plot:
plt.plot(r_used, K_val_samp, c='black', linestyle='-', linewidth=3, label=r"$K_{"+correction+"}$")
plt.plot(r_used, K_val_theo, c='red', linestyle='--', linewidth=3, label=r"$K_{Poisson}$")
plt.xlabel('r')
plt.ylabel('K(r)')
plt.legend(fontsize=12)
plt.title("K function")
return r_Kest, K_val_samp
def Gcross(pp, i, j, r=None, correction='km', plot=True):
'''
Take the output of ppp() function as the input and return the cross G function value at given r
input:
pp: point pattern
i : the type of point in the center
j : the type of point in the neighbor
r : your selected radius ticks
correction: the border correction you want to apply.
options are 'none', 'rs', 'km' and 'han'
the default is 'km'
plot: if True given the plots
output:
r_Gcross: the R Gcross class object
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks)
>>> Gcross(pp, i='a', j='b', plot=True)
'''
r_ppp = pp['ppp']
if r:
r_Gcross = spatstat.Gcross(r_ppp, r=rfc(r), i=i, j=j, correction=correction)
else:
r_Gcross = spatstat.Gcross(r_ppp, i=i, j=j, correction=correction)
r_used = r_Gcross['r']
G_val_theo = r_Gcross['theo']
G_val_samp = r_Gcross['raw' if correction=='none' else correction]
if plot:
plt.plot(r_used, G_val_samp, c='black', linestyle='-', linewidth=3, label=r"$G_{"+correction+"}$")
plt.plot(r_used, G_val_theo, c='red', linestyle='--', linewidth=3, label=r"$G_{Poisson}$")
plt.xlabel('r')
plt.ylabel('G(r)')
plt.legend(fontsize=12)
plt.title(r"$G_{" + str(i) + ", " + str(j) + "} (r)$ function")
return r_Gcross
def Kcross(pp, i, j, r=None, correction='iso', plot=True):
'''
Take the output of ppp() function as the input and return the cross K function value at given r
input:
pp: point pattern
i : the type of point in the center
j : the type of point in the neighbor
r : your selected radius ticks
correction: the border correction you want to apply.
options are 'none', 'iso', 'border', 'board.modif', "trans"
the default is 'iso'
plot: if True given the plots
output:
r_Kcross: the R Kcross class object
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks)
>>> Kcross(pp, i='a', j='b', plot=True)
'''
r_ppp = pp['ppp']
if r:
r_Kcross = spatstat.Kcross(r_ppp, r=rfc(r), i=i, j=j, correction=correction)
else:
r_Kcross = spatstat.Kcross(r_ppp, i=i, j=j, correction=correction)
r_used = r_Kcross['r']
K_val_theo = r_Kcross['theo']
K_val_samp = r_Kcross['un' if correction=='none' else correction]
if plot:
plt.plot(r_used, K_val_samp, c='black', linestyle='-', linewidth=3, label=r"$K_{"+correction+"}$")
plt.plot(r_used, K_val_theo, c='red', linestyle='--', linewidth=3, label=r"$K_{Poisson}$")
plt.xlabel('r')
plt.ylabel('K(r)')
plt.legend(fontsize=12)
plt.title(r"$K_{" + str(i) + ", " + str(j) + "} (r)$ function")
return r_Kcross,K_val_samp
def envelop(pp, fun='Kest', i=None, j=None, r=None, correction=None, global_var=False, theoretic_var=False, plot=True):
'''
Return Monte Carlo simulated envelop for your spatial statistics
Input:
pp: point pattern
fun: spatial statistics you want envelop.
options are: 'Kest', 'Kcross', 'Gest', 'Gcross'
i : the type of cell in the center if you use cross statistics
j : the type of cell in the neighbor if you use cross statistics
r : your selected radius ticks
correction: boarder correction you want to apply for your statistics
global_var : use the global envelope for your confidence band
theoretic_var : use theoretic envelope for your confidence band
plot : the plot is shown if True
output:
r_envelop: the R envelope object
>>> x = [1, 2, 3, 4]
>>> y = [1, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp = ppp(x, y, marks)
>>> envelop(pp, fun='Kcross', i='a', j='b', correction='iso', plot=True)
'''
print('fun',fun)
r_ppp = pp['ppp']
if not i:
i = rnull
if not j:
j = rnull
if not correction:
correction = rnull
if not r:
r_envelop = spatstat.envelope(r_ppp, fun=fun, i=i, j=j, correction=correction,fix_marks=True)
else:
r_envelop = spatstat.envelope(r_ppp, r=rfc(r), fun=fun, i=i, j=j, correction=correction,fix_marks=True)
r_used = r_envelop['r']
enve_val_theo = r_envelop['theo']
enve_val_samp = r_envelop['obs']
theo_hi = r_envelop['hi']
theo_lo = r_envelop['lo']
if plot:
plt.plot(r_used, enve_val_samp, c='black', linestyle='-', linewidth=3, label=r"$"+fun+"_{obs}$")
plt.plot(r_used, enve_val_theo, c='red', linestyle='--', linewidth=3, label=r"$"+fun+"_{Poisson}$")
plt.plot(r_used, theo_hi, c='grey', linestyle='-', linewidth=1, label=r"$"+fun+"_{confidence}$")
plt.plot(r_used, theo_lo, c='grey', linestyle='-', linewidth=1)
plt.fill_between(r_used, theo_hi, theo_lo, color='grey', alpha=0.4)
plt.xlabel('r')
plt.ylabel(fun+'(r)')
plt.legend(fontsize=12)
if i or j:
plt.title(r"$" + fun+ "_{" + str(i) + ", " + str(j) + "} (r)$ function")
else:
plt.title(r"$" + fun+ "(r)$ function")
return r_envelop
def pool(pp_list, fun=Kest, r=None, i=None, j=None, correction='none'):
'''
Pool multiple spatial statistics
Input:
pp_list: python list that contains point patterns
fn: spatial statistics function to be applied.
r: radius ticks where spatial statistics is evaluated at
i: center points type
j: neighbor points type
correction: border correction to be applied
output:
r : radius tick where G_pool is evaluated at
G_pool : pooled G function value
VG_pool : pooled G function's variance
lamb_pool : pooled intensity value
>>> x1 = [1, 2, 3, 4]; y1 = [1, 2, 3, 4]
>>> x2 = [2, 2, 3, 4]; y2 = [2, 2, 3, 4]
>>> x3 = [3, 2, 3, 4]; y3 = [3, 2, 3, 4]
>>> marks = ['a', 'a', 'b', 'c']
>>> pp1 = ppp(x1, y1, marks); pp2 = ppp(x2, y2, marks); pp3 = ppp(x3, y3, marks)
>>> K1 = Kest(pp1); K2 = Kest(pp2); K3 = Kest
>>> r_pool = pool([K1, K2, K3])
# Or you could pool the envelop also
>>> Kenv1 = envelop(pp1); Kenv2 = envelop(pp2); Kenv3 = envelop(pp3);
>>> G_pool, VG_pool, lambda_pool = pool([Kenv1, Kenv2, Kenv3])
'''
m = []
lambda_list = []
fv_list = []
W_list = []
for k in range(len(pp_list)):
ppp_obj = pp_list[k]['ppp']
marks = pp_list[k]['marks']
if i or j:
ni = sum([mark==i for mark in marks])
nj = sum([mark==j for mark in marks])
else:
ni = len(marks)
nj = ni
m.append(ni*nj)
W = ppp_obj[0]
xrange = W[1][1]-W[1][0]
yrange = W[2][1]-W[2][0]
W_size = xrange*yrange
lambda_list.append(nj/W_size)
W_list.append(W_size)
if not r:
max_r = np.sqrt(xrange**2 + yrange**2)/4
r = [i*(max_r/200) for i in range(200)]
if i or j:
fv_list.append(fun(pp_list[k], r=r, i=i, j=j, correction=correction, plot=False))
else:
fv_list.append(fun(pp_list[k], r=r, correction=correction, plot=False))
G_pool = [0 for i in range(len(r))]
VG_pool = [0 for i in range(len(r))]
for k in range(len(r)):
Gs = [fv.iloc[k, 2] for fv in fv_list]
G_pool[k] = sum(np.array(m)*np.array(Gs))/sum(m)
m_star = len(m)*np.array(m)/sum(m)
G_star = np.array([x if x>0 else 0 for x in len(fv_list)*np.array(Gs)/sum(Gs)])
cov_m_G = np.cov(m_star, G_star)
VG_pool[k] = G_pool[k]**2/len(m)*(cov_m_G[0,0]+cov_m_G[1,1] - 2*cov_m_G[1,0])
lamb_pool = sum(np.array(W_list)*np.array(lambda_list))/sum(W_list)
return r, G_pool, VG_pool, lamb_pool
# In[278]:
if __name__=="__main__":
# Read and parse your data
points = np.load("TCGA-A7-A4SC-01Z-00-DX1_15500_35001_1001_1000_0.9920000000000001_gt_dots.npy", allow_pickle=True)
cell_code = {1:'lymphocyte', 2:'tumor', 3:'other'}
x = []
y = []
mark = []
heights, widths, channels = points.shape
for c in range(channels):
for h in range(heights):
for w in range(widths):
if points[h, w, c]:
x.append(h)
y.append(w)
mark.append(cell_code[c])
# In[279]:
test_ppp = ppp(x, y, mark)
print(test_ppp['ppp']) # the point pattern in R object
# print(test_ppp['coor']) # your coordinate
# print(test_ppp['marks']) # your cell types
# In[280]:
r_ppp = test_ppp['ppp']
r = [0, 5, 10, 15, 20, 25]
r_Gest = spatstat.Gest(r_ppp, r=rfc(r))
plt.figure(figsize=(12, 10))
plt.subplot(2, 2, 1)
r_Gest = Gest(test_ppp, correction='none')
plt.subplot(2, 2, 2)
r_Kest,K_val_samp = Kest(test_ppp, correction='none')
plt.subplot(2, 2, 3)
r_Gcross = Gcross(test_ppp, i='lymphocyte', j='tumor', correction='km')
plt.subplot(2, 2, 4)
r_Kcross,K_val_samp = Kcross(test_ppp, i='lymphocyte', j='tumor', correction='iso')
# Each of these R object is a Pandas dataframe in Python, you could access the content with their keys.For example:
# In[281]:
r_Gest.head()
# r is the radius ticks where G function is evaluated
# theo is the Poisson process's G value
# raw is the sample estimation
# In[282]:
plt.figure(figsize=(12, 10))
plt.subplot(2, 2, 1)
r_envelop = envelop(test_ppp, fun='Gcross', i='lymphocyte', j='tumor')
plt.subplot(2, 2, 2)
r_envelop = envelop(test_ppp, fun='Kcross', i='lymphocyte', j='tumor')
plt.subplot(2, 2, 3)
r_envelop = envelop(test_ppp, fun='Gest')
plt.subplot(2, 2, 4)
r_envelop = envelop(test_ppp, fun='Kest')
# ### Statistics Pooling
#
# Statistics pooling is based on following formula (same procedure applies to K function):
#
# __Process Steps__: <br>
# Suppose now we have $G_{i,j}(r)^{(1)}, G_{i,j}(r)^{(2)}, \cdots, G_{i,j}(r)^{(Q)}$. And we also have estimation of the intensity $\hat{\lambda}_j^{(1)}, \hat{\lambda}_j^{(2)}, \cdots, \hat{\lambda}_j^{(Q)}$ Then we pool these Q statistics functions to get our final pooled G function. We also pooled these Q intensity estimator to get a pooled intensity and then the pooled theoretic closed-formed G function.
#
# __Pooled $G_{i,j}(r)$ (Theoretic one)__: <br>
# $\lambda_{pool} = \frac{\sum_{i=1}^{Q} |W_i|\lambda_i}{\sum_{i=1}^{Q}|W_i|}$
#
# $G(r)_{pool} = 1-\exp\{-\lambda_{pool}\pi r^2\}$
#
# __Pooled $\hat{G}_{i,j}(r)$ (Sample version)__: <br>
# $m_k = n1_k \times n2_k$<br>
#
# $\hat{G}_{i,j}(r) = \frac{\sum_{k=1}^{Q}m_kG_{i,j}^{(k)}(r)}{\sum_{k=1}^{Q}m_k}$
#
# where $n1_k$ is the number of points of the center type, and $n2_k$ is the number of points of the neighbor type. To estimate $G_{i,j}^{(k)}(r)$ we need $n1_k \times n2_k$ pair of samples.
#
# __Variance for Pooled $\hat{G}_{i,j}(r)$__: <br>
# $\vec{m}^* = \left(\frac{Q m_1}{\sum_{k=1}^{Q}m_k}, \frac{Q m_2}{\sum_{k=1}^{Q} m_k},
# \cdots, \frac{Q m_k}{\sum_{k=1}^{Q}m_k}\right)$ <br>
# $\vec{G^*_{i,j}(r)} = \left(\frac{Q G^{(1)}_{i,j}(r)}{\sum_{k=1}^{Q}G^{k}_{i,j}(r)},
# \frac{Q G^{(2)}_{i,j}(r)}{\sum_{k=1}^{Q}G^{k}_{i,j}(r)},
# \cdots,
# \frac{Q G^{(k)}_{i,j}(r)}{\sum_{k=1}^{Q}G^{k}_{i,j}(r)}\right)$ <br>
# $\Sigma = cov\left(\vec{m}^*, \vec{G^*_{i,j}(r)}\right)$ <br>
# $Var\left(\hat{G}^*_{i,j}(r)\right) = \frac{\hat{G}^*_{i,j}(r)^2}{Q}(\Sigma_{1,1}+\Sigma_{2,2}-2\Sigma_{1,2})$
#
# Where the confidence band width equals $\sqrt{Var\left(\hat{G}_{i,j}(r)\right)}$
# In[309]:
# Suppose now you have other two point processes
x2 = [coor + 150*np.random.rand(1)[0] for coor in x]
y2 = [coor + 150*np.random.rand(1)[0] for coor in y]
x3 = [coor + 150*np.random.rand(1)[0] for coor in x]
y3 = [coor + 150*np.random.rand(1)[0] for coor in y]
test_pp2 = ppp(x2, y2, marks=np.random.permutation(mark))
test_pp3 = ppp(x3, y3, marks=np.random.permutation(mark))
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
r_used, G_pool, VG_pool, lamb_pool = pool([test_ppp, test_pp2, test_pp3], fun=Gest)
# theoretic formula for G function is 1-exp(-\lambda * \pi * r^2)
G_theo = 1-np.exp(-np.pi*lamb_pool*np.array(r_used)**2)
plt.plot(r_used, G_pool, c='black', linewidth=3, linestyle='-', label=r"$G_{pooled}(r)$")
plt.plot(r_used, G_theo, c='red', linewidth=3, linestyle='--', label=r"$G_{Poisson}(r)$")
plt.plot(r_used, G_pool-np.sqrt(VG_pool), c='grey', linewidth=1, linestyle='-', label="G(r) Confidence Band")
plt.plot(r_used, G_pool+np.sqrt(VG_pool), c='grey', linewidth=1, linestyle='-')
plt.fill_between(r_used, G_pool-np.sqrt(VG_pool), G_pool+np.sqrt(VG_pool), color='grey', alpha=0.4)
plt.legend()
plt.title("Pooled G Function")
plt.subplot(2, 2, 2)
r_used, K_pool, VK_pool, lamb_pool = pool([test_ppp, test_pp2, test_pp3], fun=Kest)
# theoretic formula for K function is \pi*r^2
K_theo = np.pi*np.array(r_used)**2
plt.plot(r_used, K_pool, c='black', linewidth=3, linestyle='-', label=r"$K_{pooled}(r)$")
plt.plot(r_used, K_theo, c='red', linewidth=3, linestyle='--', label=r"$K_{Poisson}(r)$")
plt.plot(r_used, K_pool-np.sqrt(VK_pool), c='grey', linewidth=1, linestyle='-', label="K(r) Confidence Band")
plt.plot(r_used, K_pool+np.sqrt(VK_pool), c='grey', linewidth=1, linestyle='-')
plt.fill_between(r_used, K_pool-np.sqrt(VK_pool), K_pool+np.sqrt(VK_pool), color='grey', alpha=0.4)
plt.legend()
plt.title("Pooled K Function")
plt.subplot(2, 2, 3)
r_used, G_pool, VG_pool, lamb_pool = pool([test_ppp, test_pp2, test_pp3], fun=Gcross, i='lymphocyte', j='tumor')
# theoretic formula for G function is 1-exp(-\lambda * \pi * r^2)
G_theo = 1-np.exp(-np.pi*lamb_pool*np.array(r_used)**2)
plt.plot(r_used, G_pool, c='black', linewidth=3, linestyle='-', label=r"$G_{pooled}(r)$")
plt.plot(r_used, G_theo, c='red', linewidth=3, linestyle='--', label=r"$G_{Poisson}(r)$")
plt.plot(r_used, G_pool-np.sqrt(VG_pool), c='grey', linewidth=1, linestyle='-', label="G(r) Confidence Band")
plt.plot(r_used, G_pool+np.sqrt(VG_pool), c='grey', linewidth=1, linestyle='-')
plt.fill_between(r_used, G_pool-np.sqrt(VG_pool), G_pool+np.sqrt(VG_pool), color='grey', alpha=0.4)
plt.legend()
plt.title(r"Pooled Cross $G_{i,j}(r)$ Function")
plt.subplot(2, 2, 4)
r_used, K_pool, VK_pool, lamb_pool = pool([test_ppp, test_pp2, test_pp3], fun=Kcross, i='lymphocyte', j='tumor')
# theoretic formula for K function is \pi*r^2
K_theo = np.pi*np.array(r_used)**2
plt.plot(r_used, K_pool, c='black', linewidth=3, linestyle='-', label=r"$K_{pooled}(r)$")
plt.plot(r_used, K_theo, c='red', linewidth=3, linestyle='--', label=r"$K_{Poisson}(r)$")
plt.plot(r_used, K_pool-np.sqrt(VK_pool), c='grey', linewidth=1, linestyle='-', label="K(r) Confidence Band")
plt.plot(r_used, K_pool+np.sqrt(VK_pool), c='grey', linewidth=1, linestyle='-')
plt.fill_between(r_used, K_pool-np.sqrt(VK_pool), K_pool+np.sqrt(VK_pool), color='grey', alpha=0.4)
plt.legend()
plt.title(r"Pooled Kcross $K_{i,j}(r)$ Function")
| [
"rpy2.robjects.pandas2ri.activate",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"rpy2.robjects.FloatVector",
"numpy.array",
"numpy.cov",
"numpy.load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"rpy2.robjects.packages.importr",
... | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((651, 671), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (669, 671), False, 'from rpy2.robjects import pandas2ri\n'), ((679, 694), 'rpy2.robjects.packages.importr', 'importr', (['"""base"""'], {}), "('base')\n", (686, 694), False, 'from rpy2.robjects.packages import importr\n'), ((706, 725), 'rpy2.robjects.packages.importr', 'importr', (['"""spatstat"""'], {}), "('spatstat')\n", (713, 725), False, 'from rpy2.robjects.packages import importr\n'), ((776, 796), 'pyper.R', 'pr.R', ([], {'use_numpy': '(True)'}), '(use_numpy=True)\n', (780, 796), True, 'import pyper as pr\n'), ((13020, 13136), 'numpy.load', 'np.load', (['"""TCGA-A7-A4SC-01Z-00-DX1_15500_35001_1001_1000_0.9920000000000001_gt_dots.npy"""'], {'allow_pickle': '(True)'}), "(\n 'TCGA-A7-A4SC-01Z-00-DX1_15500_35001_1001_1000_0.9920000000000001_gt_dots.npy'\n , allow_pickle=True)\n", (13027, 13136), True, 'import numpy as np\n'), ((13847, 13875), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (13857, 13875), True, 'import matplotlib.pyplot as plt\n'), ((13880, 13900), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (13891, 13900), True, 'import matplotlib.pyplot as plt\n'), ((13952, 13972), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (13963, 13972), True, 'import matplotlib.pyplot as plt\n'), ((14035, 14055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (14046, 14055), True, 'import matplotlib.pyplot as plt\n'), ((14136, 14156), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (14147, 14156), True, 'import matplotlib.pyplot as plt\n'), ((14564, 14592), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (14574, 14592), True, 'import matplotlib.pyplot as plt\n'), ((14597, 14617), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (14608, 14617), True, 'import matplotlib.pyplot as plt\n'), ((14697, 14717), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (14708, 14717), True, 'import matplotlib.pyplot as plt\n'), ((14797, 14817), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (14808, 14817), True, 'import matplotlib.pyplot as plt\n'), ((14868, 14888), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (14879, 14888), True, 'import matplotlib.pyplot as plt\n'), ((17375, 17403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (17385, 17403), True, 'import matplotlib.pyplot as plt\n'), ((17408, 17428), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (17419, 17428), True, 'import matplotlib.pyplot as plt\n'), ((17651, 17744), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_pool'], {'c': '"""black"""', 'linewidth': '(3)', 'linestyle': '"""-"""', 'label': '"""$G_{pooled}(r)$"""'}), "(r_used, G_pool, c='black', linewidth=3, linestyle='-', label=\n '$G_{pooled}(r)$')\n", (17659, 17744), True, 'import matplotlib.pyplot as plt\n'), ((17745, 17838), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_theo'], {'c': '"""red"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'label': '"""$G_{Poisson}(r)$"""'}), "(r_used, G_theo, c='red', linewidth=3, linestyle='--', label=\n '$G_{Poisson}(r)$')\n", (17753, 17838), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18153), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18151, 18153), True, 'import matplotlib.pyplot as plt\n'), ((18158, 18188), 'matplotlib.pyplot.title', 'plt.title', (['"""Pooled G Function"""'], {}), "('Pooled G Function')\n", (18167, 18188), True, 'import matplotlib.pyplot as plt\n'), ((18194, 18214), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (18205, 18214), True, 'import matplotlib.pyplot as plt\n'), ((18396, 18489), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_pool'], {'c': '"""black"""', 'linewidth': '(3)', 'linestyle': '"""-"""', 'label': '"""$K_{pooled}(r)$"""'}), "(r_used, K_pool, c='black', linewidth=3, linestyle='-', label=\n '$K_{pooled}(r)$')\n", (18404, 18489), True, 'import matplotlib.pyplot as plt\n'), ((18490, 18583), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_theo'], {'c': '"""red"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'label': '"""$K_{Poisson}(r)$"""'}), "(r_used, K_theo, c='red', linewidth=3, linestyle='--', label=\n '$K_{Poisson}(r)$')\n", (18498, 18583), True, 'import matplotlib.pyplot as plt\n'), ((18886, 18898), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18896, 18898), True, 'import matplotlib.pyplot as plt\n'), ((18903, 18933), 'matplotlib.pyplot.title', 'plt.title', (['"""Pooled K Function"""'], {}), "('Pooled K Function')\n", (18912, 18933), True, 'import matplotlib.pyplot as plt\n'), ((18939, 18959), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (18950, 18959), True, 'import matplotlib.pyplot as plt\n'), ((19211, 19304), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_pool'], {'c': '"""black"""', 'linewidth': '(3)', 'linestyle': '"""-"""', 'label': '"""$G_{pooled}(r)$"""'}), "(r_used, G_pool, c='black', linewidth=3, linestyle='-', label=\n '$G_{pooled}(r)$')\n", (19219, 19304), True, 'import matplotlib.pyplot as plt\n'), ((19305, 19398), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_theo'], {'c': '"""red"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'label': '"""$G_{Poisson}(r)$"""'}), "(r_used, G_theo, c='red', linewidth=3, linestyle='--', label=\n '$G_{Poisson}(r)$')\n", (19313, 19398), True, 'import matplotlib.pyplot as plt\n'), ((19701, 19713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19711, 19713), True, 'import matplotlib.pyplot as plt\n'), ((19718, 19765), 'matplotlib.pyplot.title', 'plt.title', (['"""Pooled Cross $G_{i,j}(r)$ Function"""'], {}), "('Pooled Cross $G_{i,j}(r)$ Function')\n", (19727, 19765), True, 'import matplotlib.pyplot as plt\n'), ((19772, 19792), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (19783, 19792), True, 'import matplotlib.pyplot as plt\n'), ((20003, 20096), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_pool'], {'c': '"""black"""', 'linewidth': '(3)', 'linestyle': '"""-"""', 'label': '"""$K_{pooled}(r)$"""'}), "(r_used, K_pool, c='black', linewidth=3, linestyle='-', label=\n '$K_{pooled}(r)$')\n", (20011, 20096), True, 'import matplotlib.pyplot as plt\n'), ((20097, 20190), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_theo'], {'c': '"""red"""', 'linewidth': '(3)', 'linestyle': '"""--"""', 'label': '"""$K_{Poisson}(r)$"""'}), "(r_used, K_theo, c='red', linewidth=3, linestyle='--', label=\n '$K_{Poisson}(r)$')\n", (20105, 20190), True, 'import matplotlib.pyplot as plt\n'), ((20493, 20505), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20503, 20505), True, 'import matplotlib.pyplot as plt\n'), ((20510, 20558), 'matplotlib.pyplot.title', 'plt.title', (['"""Pooled Kcross $K_{i,j}(r)$ Function"""'], {}), "('Pooled Kcross $K_{i,j}(r)$ Function')\n", (20519, 20558), True, 'import matplotlib.pyplot as plt\n'), ((3040, 3146), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_val_samp'], {'c': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(3)', 'label': "('$G_{' + correction + '}$')"}), "(r_used, G_val_samp, c='black', linestyle='-', linewidth=3, label=\n '$G_{' + correction + '}$')\n", (3048, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3148, 3242), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_val_theo'], {'c': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(3)', 'label': '"""$G_{Poisson}$"""'}), "(r_used, G_val_theo, c='red', linestyle='--', linewidth=3, label=\n '$G_{Poisson}$')\n", (3156, 3242), True, 'import matplotlib.pyplot as plt\n'), ((3247, 3262), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (3257, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3289), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""G(r)"""'], {}), "('G(r)')\n", (3281, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3321), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (3308, 3321), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3353), 'matplotlib.pyplot.title', 'plt.title', (['"""G function"""'], {}), "('G function')\n", (3339, 3353), True, 'import matplotlib.pyplot as plt\n'), ((4402, 4508), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_val_samp'], {'c': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(3)', 'label': "('$K_{' + correction + '}$')"}), "(r_used, K_val_samp, c='black', linestyle='-', linewidth=3, label=\n '$K_{' + correction + '}$')\n", (4410, 4508), True, 'import matplotlib.pyplot as plt\n'), ((4510, 4604), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_val_theo'], {'c': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(3)', 'label': '"""$K_{Poisson}$"""'}), "(r_used, K_val_theo, c='red', linestyle='--', linewidth=3, label=\n '$K_{Poisson}$')\n", (4518, 4604), True, 'import matplotlib.pyplot as plt\n'), ((4609, 4624), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (4619, 4624), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""K(r)"""'], {}), "('K(r)')\n", (4643, 4651), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4683), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (4670, 4683), True, 'import matplotlib.pyplot as plt\n'), ((4692, 4715), 'matplotlib.pyplot.title', 'plt.title', (['"""K function"""'], {}), "('K function')\n", (4701, 4715), True, 'import matplotlib.pyplot as plt\n'), ((5903, 6009), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_val_samp'], {'c': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(3)', 'label': "('$G_{' + correction + '}$')"}), "(r_used, G_val_samp, c='black', linestyle='-', linewidth=3, label=\n '$G_{' + correction + '}$')\n", (5911, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6105), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'G_val_theo'], {'c': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(3)', 'label': '"""$G_{Poisson}$"""'}), "(r_used, G_val_theo, c='red', linestyle='--', linewidth=3, label=\n '$G_{Poisson}$')\n", (6019, 6105), True, 'import matplotlib.pyplot as plt\n'), ((6110, 6125), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (6120, 6125), True, 'import matplotlib.pyplot as plt\n'), ((6134, 6152), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""G(r)"""'], {}), "('G(r)')\n", (6144, 6152), True, 'import matplotlib.pyplot as plt\n'), ((6161, 6184), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (6171, 6184), True, 'import matplotlib.pyplot as plt\n'), ((7450, 7556), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_val_samp'], {'c': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(3)', 'label': "('$K_{' + correction + '}$')"}), "(r_used, K_val_samp, c='black', linestyle='-', linewidth=3, label=\n '$K_{' + correction + '}$')\n", (7458, 7556), True, 'import matplotlib.pyplot as plt\n'), ((7558, 7652), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'K_val_theo'], {'c': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(3)', 'label': '"""$K_{Poisson}$"""'}), "(r_used, K_val_theo, c='red', linestyle='--', linewidth=3, label=\n '$K_{Poisson}$')\n", (7566, 7652), True, 'import matplotlib.pyplot as plt\n'), ((7657, 7672), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (7667, 7672), True, 'import matplotlib.pyplot as plt\n'), ((7681, 7699), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""K(r)"""'], {}), "('K(r)')\n", (7691, 7699), True, 'import matplotlib.pyplot as plt\n'), ((7708, 7731), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (7718, 7731), True, 'import matplotlib.pyplot as plt\n'), ((9507, 9610), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'enve_val_samp'], {'c': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(3)', 'label': "('$' + fun + '_{obs}$')"}), "(r_used, enve_val_samp, c='black', linestyle='-', linewidth=3,\n label='$' + fun + '_{obs}$')\n", (9515, 9610), True, 'import matplotlib.pyplot as plt\n'), ((9613, 9720), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'enve_val_theo'], {'c': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(3)', 'label': "('$' + fun + '_{Poisson}$')"}), "(r_used, enve_val_theo, c='red', linestyle='--', linewidth=3, label\n ='$' + fun + '_{Poisson}$')\n", (9621, 9720), True, 'import matplotlib.pyplot as plt\n'), ((9721, 9824), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'theo_hi'], {'c': '"""grey"""', 'linestyle': '"""-"""', 'linewidth': '(1)', 'label': "('$' + fun + '_{confidence}$')"}), "(r_used, theo_hi, c='grey', linestyle='-', linewidth=1, label='$' +\n fun + '_{confidence}$')\n", (9729, 9824), True, 'import matplotlib.pyplot as plt\n'), ((9826, 9889), 'matplotlib.pyplot.plot', 'plt.plot', (['r_used', 'theo_lo'], {'c': '"""grey"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(r_used, theo_lo, c='grey', linestyle='-', linewidth=1)\n", (9834, 9889), True, 'import matplotlib.pyplot as plt\n'), ((9898, 9965), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['r_used', 'theo_hi', 'theo_lo'], {'color': '"""grey"""', 'alpha': '(0.4)'}), "(r_used, theo_hi, theo_lo, color='grey', alpha=0.4)\n", (9914, 9965), True, 'import matplotlib.pyplot as plt\n'), ((9974, 9989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r"""'], {}), "('r')\n", (9984, 9989), True, 'import matplotlib.pyplot as plt\n'), ((9998, 10021), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(fun + '(r)')"], {}), "(fun + '(r)')\n", (10008, 10021), True, 'import matplotlib.pyplot as plt\n'), ((10028, 10051), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (10038, 10051), True, 'import matplotlib.pyplot as plt\n'), ((12710, 12732), 'numpy.cov', 'np.cov', (['m_star', 'G_star'], {}), '(m_star, G_star)\n', (12716, 12732), True, 'import numpy as np\n'), ((1774, 1780), 'rpy2.robjects.FloatVector', 'rfc', (['x'], {}), '(x)\n', (1777, 1780), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((1784, 1790), 'rpy2.robjects.FloatVector', 'rfc', (['y'], {}), '(y)\n', (1787, 1790), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((1975, 1987), 'rpy2.robjects.FactorVector', 'rfctc', (['marks'], {}), '(marks)\n', (1980, 1987), True, 'from rpy2.robjects import FactorVector as rfctc\n'), ((10183, 10221), 'matplotlib.pyplot.title', 'plt.title', (["('$' + fun + '(r)$ function')"], {}), "('$' + fun + '(r)$ function')\n", (10192, 10221), True, 'import matplotlib.pyplot as plt\n'), ((13834, 13840), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (13837, 13840), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((17279, 17306), 'numpy.random.permutation', 'np.random.permutation', (['mark'], {}), '(mark)\n', (17300, 17306), True, 'import numpy as np\n'), ((17341, 17368), 'numpy.random.permutation', 'np.random.permutation', (['mark'], {}), '(mark)\n', (17362, 17368), True, 'import numpy as np\n'), ((17863, 17879), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (17870, 17879), True, 'import numpy as np\n'), ((17977, 17993), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (17984, 17993), True, 'import numpy as np\n'), ((18069, 18085), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (18076, 18085), True, 'import numpy as np\n'), ((18094, 18110), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (18101, 18110), True, 'import numpy as np\n'), ((18372, 18388), 'numpy.array', 'np.array', (['r_used'], {}), '(r_used)\n', (18380, 18388), True, 'import numpy as np\n'), ((18608, 18624), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (18615, 18624), True, 'import numpy as np\n'), ((18722, 18738), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (18729, 18738), True, 'import numpy as np\n'), ((18814, 18830), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (18821, 18830), True, 'import numpy as np\n'), ((18839, 18855), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (18846, 18855), True, 'import numpy as np\n'), ((19423, 19439), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (19430, 19439), True, 'import numpy as np\n'), ((19537, 19553), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (19544, 19553), True, 'import numpy as np\n'), ((19629, 19645), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (19636, 19645), True, 'import numpy as np\n'), ((19654, 19670), 'numpy.sqrt', 'np.sqrt', (['VG_pool'], {}), '(VG_pool)\n', (19661, 19670), True, 'import numpy as np\n'), ((19979, 19995), 'numpy.array', 'np.array', (['r_used'], {}), '(r_used)\n', (19987, 19995), True, 'import numpy as np\n'), ((20215, 20231), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (20222, 20231), True, 'import numpy as np\n'), ((20329, 20345), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (20336, 20345), True, 'import numpy as np\n'), ((20421, 20437), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (20428, 20437), True, 'import numpy as np\n'), ((20446, 20462), 'numpy.sqrt', 'np.sqrt', (['VK_pool'], {}), '(VK_pool)\n', (20453, 20462), True, 'import numpy as np\n'), ((2777, 2783), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (2780, 2783), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((4140, 4146), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (4143, 4146), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((5610, 5616), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (5613, 5616), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((7158, 7164), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (7161, 7164), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((9257, 9263), 'rpy2.robjects.FloatVector', 'rfc', (['r'], {}), '(r)\n', (9260, 9263), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((12047, 12081), 'numpy.sqrt', 'np.sqrt', (['(xrange ** 2 + yrange ** 2)'], {}), '(xrange ** 2 + yrange ** 2)\n', (12054, 12081), True, 'import numpy as np\n'), ((12585, 12596), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (12593, 12596), True, 'import numpy as np\n'), ((12839, 12855), 'numpy.array', 'np.array', (['W_list'], {}), '(W_list)\n', (12847, 12855), True, 'import numpy as np\n'), ((12856, 12877), 'numpy.array', 'np.array', (['lambda_list'], {}), '(lambda_list)\n', (12864, 12877), True, 'import numpy as np\n'), ((1610, 1616), 'rpy2.robjects.FloatVector', 'rfc', (['x'], {}), '(x)\n', (1613, 1616), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((1630, 1636), 'rpy2.robjects.FloatVector', 'rfc', (['x'], {}), '(x)\n', (1633, 1636), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((1704, 1710), 'rpy2.robjects.FloatVector', 'rfc', (['y'], {}), '(y)\n', (1707, 1710), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((1724, 1730), 'rpy2.robjects.FloatVector', 'rfc', (['y'], {}), '(y)\n', (1727, 1730), True, 'from rpy2.robjects import FloatVector as rfc\n'), ((12528, 12539), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (12536, 12539), True, 'import numpy as np\n'), ((12540, 12552), 'numpy.array', 'np.array', (['Gs'], {}), '(Gs)\n', (12548, 12552), True, 'import numpy as np\n'), ((17039, 17056), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (17053, 17056), True, 'import numpy as np\n'), ((17096, 17113), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (17110, 17113), True, 'import numpy as np\n'), ((17153, 17170), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (17167, 17170), True, 'import numpy as np\n'), ((17210, 17227), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (17224, 17227), True, 'import numpy as np\n'), ((17626, 17642), 'numpy.array', 'np.array', (['r_used'], {}), '(r_used)\n', (17634, 17642), True, 'import numpy as np\n'), ((19186, 19202), 'numpy.array', 'np.array', (['r_used'], {}), '(r_used)\n', (19194, 19202), True, 'import numpy as np\n'), ((12669, 12681), 'numpy.array', 'np.array', (['Gs'], {}), '(Gs)\n', (12677, 12681), True, 'import numpy as np\n')] |
from __future__ import division
from past.utils import old_div
import unittest2 as unittest
import numpy as np
from vsm.spatial import *
#TODO: add tests for recently added methods.
def KL(p,q):
return sum(p*np.log2(old_div(p,q)))
def partial_KL(p,q):
return p * np.log2(old_div((2*p), (p+q)))
def JS(p,q):
return 0.5*(KL(p,((p+q)*0.5)) + KL(q,((p+q)*0.5)))
def JSD(p,q):
return (0.5*(KL(p,((p+q)*0.5)) + KL(q,((p+q)*0.5))))**0.5
class TestSpatial(unittest.TestCase):
def setUp(self):
# 2 random distributions
self.p=np.random.random_sample((5,))
self.q=np.random.random_sample((5,))
# normalize
self.p /= self.p.sum()
self.q /= self.q.sum()
def test_KL_div(self):
self.assertTrue(np.allclose(KL_div(self.p,self.q), KL(self.p,self.q)))
def test_JS_div(self):
self.assertTrue(np.allclose(JS_div(self.p,self.q), JS(self.p,self.q)))
def test_JS_dist(self):
self.assertTrue(np.allclose(JS_dist(self.p,self.q), JSD(self.p,self.q)))
def test_count_matrix(self):
arr = [1, 2, 4, 2, 1]
slices = [slice(0,1), slice(1, 3), slice(3,3), slice(3, 5)]
m = 6
result = coo_matrix([[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]])
self.assertTrue((result.toarray() ==
count_matrix(arr, slices, m).toarray()).all())
suite = unittest.TestLoader().loadTestsFromTestCase(TestSpatial)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"unittest2.TestLoader",
"numpy.random.random_sample",
"unittest2.TextTestRunner",
"past.utils.old_div"
] | [((558, 587), 'numpy.random.random_sample', 'np.random.random_sample', (['(5,)'], {}), '((5,))\n', (581, 587), True, 'import numpy as np\n'), ((603, 632), 'numpy.random.random_sample', 'np.random.random_sample', (['(5,)'], {}), '((5,))\n', (626, 632), True, 'import numpy as np\n'), ((1581, 1602), 'unittest2.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (1600, 1602), True, 'import unittest2 as unittest\n'), ((1638, 1674), 'unittest2.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1661, 1674), True, 'import unittest2 as unittest\n'), ((281, 302), 'past.utils.old_div', 'old_div', (['(2 * p)', '(p + q)'], {}), '(2 * p, p + q)\n', (288, 302), False, 'from past.utils import old_div\n'), ((222, 235), 'past.utils.old_div', 'old_div', (['p', 'q'], {}), '(p, q)\n', (229, 235), False, 'from past.utils import old_div\n')] |
import cv2
import numpy as np
from ORBFeature import ORBFeature, orb_match
from cnnmatching import cnn_match
from lib.utils import show_match
class Coarse2Fine(object):
def __init__(self, img1, img2, M):
self.img1 = img1
self.img2 = img2
self.h_mat = np.eye(3)
self.M = M
def compute_homography(self, pairs):
src_pts = [x for x, y in pairs]
dst_pts = [y for x, y in pairs]
src_pts = np.array([(x[0], x[1]) for x in src_pts])
dst_pts = np.array([(x[0], x[1]) for x in dst_pts])
h_mat, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)
return h_mat
def coarse_wrap(self):
img1 = cv2.cvtColor(self.img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(self.img2, cv2.COLOR_BGR2GRAY)
orb1 = ORBFeature(img1, 12, 4, 0.2)
orb1.detector()
# orb1.show_corner_points(image_data1)
orb2 = ORBFeature(img2, 12, 4, 0.2)
orb2.detector()
matchs = orb_match(orb1, orb2)
x_y_pairs = list()
for i1, i2, score in matchs:
x_y_pairs.append((orb1.corner_points[i1][:2], orb2.corner_points[i2][:2]))
self.h_mat = self.compute_homography(x_y_pairs)
def reverse_warp_points(self, pts):
pts = np.array(pts)
pts = np.concatenate((pts, np.ones(len(pts)).reshape(-1,1)), axis=1)
reverse_mat = np.linalg.inv(self.h_mat)
reverse_pts = np.dot(reverse_mat, pts.T).T
reverse_pts = reverse_pts[:,:2]/np.average(reverse_pts[:,2])
reverse_pts -= self.M[:,2]
reverse_pts = np.dot(np.linalg.inv(self.M[:,:2]), reverse_pts.T).T
return reverse_pts
def warp_image(self):
h, w = self.img2.shape[:2]
return cv2.warpPerspective(self.img1, self.h_mat, (int(w * 1.5), int(h * 1.5)))
def coarse2fine(self, iter=4, save_path='./'):
for i in range(iter):
print("iter", i)
img1 = self.warp_image()
img2 = self.img2
src_pts, dst_pts = cnn_match(img1, img2)
pairs = [(src_pts[i], dst_pts[i]) for i in range(len(src_pts))]
pairs = [(x, y) for x, y in pairs if sum(img1[int(x[1])][int(x[0])]) != 0]
acc = self.accuracy(src_pts, dst_pts)
show_match(src_pts, dst_pts, img1, img2, save_path, str(i) + "_" + str(acc) + '_' + str(len(src_pts)) + '_.png')
h_mat = self.compute_homography(pairs)
if i != iter - 1:
self.h_mat = np.dot(h_mat, self.h_mat)
def accuracy(self, src_pts, dst_pts):
src_pts = self.reverse_warp_points(src_pts)
dlst = list()
for p1, p2 in zip(src_pts, dst_pts):
dx = np.sum(np.abs(p1 - p2))
dlst.append(dx)
dlst = np.array(dlst)
return np.count_nonzero(dlst < 20)/len(src_pts) | [
"numpy.abs",
"numpy.eye",
"cv2.findHomography",
"ORBFeature.orb_match",
"numpy.average",
"ORBFeature.ORBFeature",
"numpy.count_nonzero",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"cnnmatching.cnn_match",
"cv2.cvtColor"
] | [((281, 290), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (287, 290), True, 'import numpy as np\n'), ((465, 506), 'numpy.array', 'np.array', (['[(x[0], x[1]) for x in src_pts]'], {}), '([(x[0], x[1]) for x in src_pts])\n', (473, 506), True, 'import numpy as np\n'), ((525, 566), 'numpy.array', 'np.array', (['[(x[0], x[1]) for x in dst_pts]'], {}), '([(x[0], x[1]) for x in dst_pts])\n', (533, 566), True, 'import numpy as np\n'), ((587, 635), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC'], {}), '(src_pts, dst_pts, cv2.RANSAC)\n', (605, 635), False, 'import cv2\n'), ((714, 757), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img1', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img1, cv2.COLOR_BGR2GRAY)\n', (726, 757), False, 'import cv2\n'), ((773, 816), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img2', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img2, cv2.COLOR_BGR2GRAY)\n', (785, 816), False, 'import cv2\n'), ((832, 860), 'ORBFeature.ORBFeature', 'ORBFeature', (['img1', '(12)', '(4)', '(0.2)'], {}), '(img1, 12, 4, 0.2)\n', (842, 860), False, 'from ORBFeature import ORBFeature, orb_match\n'), ((948, 976), 'ORBFeature.ORBFeature', 'ORBFeature', (['img2', '(12)', '(4)', '(0.2)'], {}), '(img2, 12, 4, 0.2)\n', (958, 976), False, 'from ORBFeature import ORBFeature, orb_match\n'), ((1019, 1040), 'ORBFeature.orb_match', 'orb_match', (['orb1', 'orb2'], {}), '(orb1, orb2)\n', (1028, 1040), False, 'from ORBFeature import ORBFeature, orb_match\n'), ((1319, 1332), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (1327, 1332), True, 'import numpy as np\n'), ((1432, 1457), 'numpy.linalg.inv', 'np.linalg.inv', (['self.h_mat'], {}), '(self.h_mat)\n', (1445, 1457), True, 'import numpy as np\n'), ((2841, 2855), 'numpy.array', 'np.array', (['dlst'], {}), '(dlst)\n', (2849, 2855), True, 'import numpy as np\n'), ((1480, 1506), 'numpy.dot', 'np.dot', (['reverse_mat', 'pts.T'], {}), '(reverse_mat, pts.T)\n', (1486, 1506), True, 'import numpy as np\n'), ((1549, 1578), 'numpy.average', 'np.average', (['reverse_pts[:, 2]'], {}), '(reverse_pts[:, 2])\n', (1559, 1578), True, 'import numpy as np\n'), ((2079, 2100), 'cnnmatching.cnn_match', 'cnn_match', (['img1', 'img2'], {}), '(img1, img2)\n', (2088, 2100), False, 'from cnnmatching import cnn_match\n'), ((2871, 2898), 'numpy.count_nonzero', 'np.count_nonzero', (['(dlst < 20)'], {}), '(dlst < 20)\n', (2887, 2898), True, 'import numpy as np\n'), ((1643, 1671), 'numpy.linalg.inv', 'np.linalg.inv', (['self.M[:, :2]'], {}), '(self.M[:, :2])\n', (1656, 1671), True, 'import numpy as np\n'), ((2551, 2576), 'numpy.dot', 'np.dot', (['h_mat', 'self.h_mat'], {}), '(h_mat, self.h_mat)\n', (2557, 2576), True, 'import numpy as np\n'), ((2781, 2796), 'numpy.abs', 'np.abs', (['(p1 - p2)'], {}), '(p1 - p2)\n', (2787, 2796), True, 'import numpy as np\n')] |
import os
import os.path as osp
import numpy as np
import torch
from datasets.hitgraphs import HitDataset
from torch_geometric.data import DataLoader
from models import get_model
import tqdm
model_fname = "../model_files/denoiser_pointnet_reference_model.pth"
path = osp.join(os.environ['GNN_TRAINING_DATA_ROOT'], 'single_tau')
full_dataset = HitDataset(path)
fulllen = len(full_dataset)
d = full_dataset
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
#device = torch.device('cpu')
model = get_model(name='PointNet').to(device)
model.load_state_dict(torch.load(model_fname))
evt_index = 1
test_dataset = torch.utils.data.Subset(full_dataset,[evt_index])
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
for i,data in enumerate(test_loader):
assert (i == 0);
x=(data.x.cpu().detach().numpy())
pos =(data.pos.cpu().detach().numpy())
y=(data.y.cpu().detach().numpy() >0.5)
data = data.to(device)
out = model(data).cpu().detach().numpy() > 0.5
assert(y.shape == out.shape)
print(y)
print(out)
truepos = np.logical_and(y, out)
falseneg = np.logical_and(np.logical_not(out), y)
falsepos = np.logical_and(out, np.logical_not(y))
trueneg = np.logical_and(np.logical_not(y), np.logical_not(out))
assert(y.shape[0] == truepos.sum() + falseneg.sum() + falsepos.sum() + trueneg.sum())
print("truepos", truepos.sum(), "trueneg", trueneg.sum(), "falsepos", falsepos.sum(), "falseneg", falseneg.sum())
print("efficiency", truepos.sum()/y.sum())
print("purity", truepos.sum()/(truepos.sum()+falsepos.sum()))
print("true negative rate", (trueneg.sum())/(falsepos.sum() + trueneg.sum())) | [
"numpy.logical_and",
"torch_geometric.data.DataLoader",
"models.get_model",
"torch.load",
"datasets.hitgraphs.HitDataset",
"os.path.join",
"numpy.logical_not",
"torch.utils.data.Subset",
"torch.cuda.is_available"
] | [((274, 334), 'os.path.join', 'osp.join', (["os.environ['GNN_TRAINING_DATA_ROOT']", '"""single_tau"""'], {}), "(os.environ['GNN_TRAINING_DATA_ROOT'], 'single_tau')\n", (282, 334), True, 'import os.path as osp\n'), ((350, 366), 'datasets.hitgraphs.HitDataset', 'HitDataset', (['path'], {}), '(path)\n', (360, 366), False, 'from datasets.hitgraphs import HitDataset\n'), ((640, 690), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['full_dataset', '[evt_index]'], {}), '(full_dataset, [evt_index])\n', (663, 690), False, 'import torch\n'), ((704, 757), 'torch_geometric.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (714, 757), False, 'from torch_geometric.data import DataLoader\n'), ((1094, 1116), 'numpy.logical_and', 'np.logical_and', (['y', 'out'], {}), '(y, out)\n', (1108, 1116), True, 'import numpy as np\n'), ((584, 607), 'torch.load', 'torch.load', (['model_fname'], {}), '(model_fname)\n', (594, 607), False, 'import torch\n'), ((1143, 1162), 'numpy.logical_not', 'np.logical_not', (['out'], {}), '(out)\n', (1157, 1162), True, 'import numpy as np\n'), ((1198, 1215), 'numpy.logical_not', 'np.logical_not', (['y'], {}), '(y)\n', (1212, 1215), True, 'import numpy as np\n'), ((1242, 1259), 'numpy.logical_not', 'np.logical_not', (['y'], {}), '(y)\n', (1256, 1259), True, 'import numpy as np\n'), ((1261, 1280), 'numpy.logical_not', 'np.logical_not', (['out'], {}), '(out)\n', (1275, 1280), True, 'import numpy as np\n'), ((448, 473), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (471, 473), False, 'import torch\n'), ((524, 550), 'models.get_model', 'get_model', ([], {'name': '"""PointNet"""'}), "(name='PointNet')\n", (533, 550), False, 'from models import get_model\n')] |
## Copyright (c) 2017 <NAME> GmbH
## All rights reserved.
##
## This source code is licensed under the MIT license found in the
## LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import numpy as np
from torch.nn.utils import weight_norm
import pickle
import sys
from termcolor import colored
from modules.hierarchical_embedding import HierarchicalEmbedding
from modules.embeddings import LearnableEmbedding, SineEmbedding
def sqdist(A, B):
return (A**2).sum(dim=2)[:,:,None] + (B**2).sum(dim=2)[:,None,:] - 2 * torch.bmm(A, B.transpose(1,2))
class ResidualBlock(nn.Module):
def __init__(self, d_in, d_out, groups=1, dropout=0.0):
super().__init__()
assert d_in % groups == 0, "Input dimension must be a multiple of groups"
assert d_out % groups == 0, "Output dimension must be a multiple of groups"
self.d_in = d_in
self.d_out = d_out
self.proj = nn.Sequential(nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(d_out, d_out, kernel_size=1, groups=groups),
nn.Dropout(dropout))
if d_in != d_out:
self.downsample = nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups)
def forward(self, x):
assert x.size(1) == self.d_in, "x dimension does not agree with d_in"
return x + self.proj(x) if self.d_in == self.d_out else self.downsample(x) + self.proj(x)
class GraphLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_head, dropout=0.0, attn_dropout=0.0, wnorm=False, use_quad=False, lev=0):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_head = d_head
self.dropout = nn.Dropout(dropout)
self.attn_dropout = nn.Dropout(attn_dropout)
self.lev = lev
self.use_quad = use_quad
# To produce the query-key-value for the self-attention computation
self.qkv_net = nn.Linear(d_model, 3*d_model)
self.o_net = nn.Linear(n_head*d_head, d_model, bias=False)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.proj1 = nn.Linear(d_model, d_inner)
self.proj2 = nn.Linear(d_inner, d_model)
self.gamma = nn.Parameter(torch.ones(4, 4)) # For different sub-matrices of D
self.sqrtd = np.sqrt(d_head)
if wnorm:
self.wnorm()
def wnorm(self):
self.qkv_net = weight_norm(self.qkv_net, name="weight")
self.o_net = weight_norm(self.o_net, name="weight")
self.proj1 = weight_norm(self.proj1, name="weight")
self.proj2 = weight_norm(self.proj2, name="weight")
def forward(self, Z, D, new_mask, mask, RA, RB, RT, RQ, store=False):
# RA = slice(0,N), RB = slice(N,N+M), RT = slice(N+M, N+M+P)
bsz, n_elem, nhid = Z.size()
n_head, d_head, d_model = self.n_head, self.d_head, self.d_model
assert nhid == d_model, "Hidden dimension of Z does not agree with d_model"
# create gamma mask
gamma_mask = torch.ones_like(D)
all_slices = [RA, RB, RT, RQ] if self.use_quad else [RA, RB, RT]
for i, slice_i in enumerate(all_slices):
for j, slice_j in enumerate(all_slices):
gamma_mask[:, slice_i, slice_j] = self.gamma[i, j]
# N+M+P+Q = 333
# d_model = 650
# n_head = 10
# Z.shape= torch.Size([48, 333, 650])
# D.shape= torch.Size([48, 333, 333])
# new_mask.shape= torch.Size([48, 333, 333]) torch.Size([48, 333, 333])
# V.shape= torch.Size([48, 333, 10, 65])
# Q.shape= torch.Size([48, 333, 10, 65])
# K.shape= torch.Size([48, 333, 10, 65])
# V.shape= torch.Size([48, 333, 10, 65])
# W.shape= torch.Size([48, 10, 333, 333])
# WV.shape= torch.Size([48, 333, 10, 65])
# attn_out.shape= torch.Size([48, 333, 650])
# ret.shape= torch.Size([48, 333, 650])
# Self-attention
inp = Z
Z = self.norm1(Z)
V, Q, K = self.qkv_net(Z).view(bsz, n_elem, n_head, 3*d_head).chunk(3, dim=3) # "V, Q, K"
W = -(gamma_mask*D)[:,None] + torch.einsum('bnij, bmij->binm', Q, K).type(D.dtype) / self.sqrtd + new_mask[:,None]
W = self.attn_dropout(F.softmax(W, dim=3).type(mask.dtype) * mask[:, None]) # softmax(-gamma*D + Q^T K)
if store:
pickle.dump(W.cpu().detach().numpy(), open(f'analysis/layer_{self.lev}_W.pkl', 'wb'))
attn_out = torch.einsum('binm,bmij->bnij', W, V.type(W.dtype)).contiguous().view(bsz, n_elem, d_model)
attn_out = self.dropout(self.o_net(F.leaky_relu(attn_out)))
Z = attn_out + inp
# Position-wise feed-forward
inp = Z
Z = self.norm2(Z)
# d_model -> d_inner -> d_model
return self.proj2(self.dropout(F.relu(self.proj1(Z)))) + inp
class GraphTransformer(nn.Module):
def __init__(self,
dim, # model dim
n_layers,
final_dim,
d_inner,
fdim=30, # feature dim; embed_dim = dim - fdim
dropout=0.0,
dropatt=0.0,
final_dropout=0.0,
n_head=10,
num_atom_types=[5, 13, 27],
num_bond_types=[28, 53, 69],
num_triplet_types=[29, 118],
num_quad_types=[62],
#min_bond_dist=0.9586,
#max_bond_dist=3.9244,
dist_embedding="sine",
atom_angle_embedding="learnable",
trip_angle_embedding="learnable",
quad_angle_embedding="learnable",
wnorm=False,
use_quad=False
):
super().__init__()
num_atom_types = np.array(num_atom_types)
num_bond_types = np.array(num_bond_types)
num_triplet_types = np.array(num_triplet_types)
num_quad_types = np.array(num_quad_types)
if atom_angle_embedding == 'learnable':
# features = [closes atoms angle, partial charge]
self.atom_embedding = LearnableEmbedding(len(num_atom_types), num_atom_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=2)
else:
self.atom_embedding = SineEmbedding(len(num_atom_types), num_atom_types+1, dim, n_feature=2)
if dist_embedding == 'learnable':
# features: [bond_dist]
self.bond_embedding = LearnableEmbedding(len(num_bond_types), num_bond_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.bond_embedding = SineEmbedding(len(num_bond_types), num_bond_types+1, dim, n_feature=1)
if trip_angle_embedding == 'learnable':
# features: [angle]
self.triplet_embedding = LearnableEmbedding(len(num_triplet_types), num_triplet_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.triplet_embedding = SineEmbedding(len(num_triplet_types), num_triplet_types+1, dim)
if use_quad:
if quad_angle_embedding == 'learnable':
self.quad_embedding = LearnableEmbedding(len(num_quad_types), num_quad_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.quad_embedding = SineEmbedding(len(num_quad_types), num_quad_types+1, dim)
self.fdim = fdim
self.dim = dim
#self.min_bond_dist = min_bond_dist
#self.max_bond_dist = max_bond_dist
self.wnorm = wnorm
self.use_quad = use_quad
print(f"{'' if use_quad else colored('Not ', 'cyan')}Using Quadruplet Features")
self.n_head = n_head
assert dim % n_head == 0, "dim must be a multiple of n_head"
self.layers = nn.ModuleList([GraphLayer(d_model=dim, d_inner=d_inner, n_head=n_head, d_head=dim//n_head, dropout=dropout,
attn_dropout=dropatt, wnorm=wnorm, use_quad=use_quad, lev=i+1) for i in range(n_layers)])
self.final_norm = nn.LayerNorm(dim)
# TODO: Warning: we are predicting with the second-hierarchy bond (sub)types!!!!!
self.final_dropout = final_dropout
final_dim = num_bond_types[1] * final_dim
self.final_lin1 = nn.Conv1d(dim, final_dim, kernel_size=1)
self.final_res = nn.Sequential(
ResidualBlock(final_dim, final_dim, groups=int(num_bond_types[1]), dropout=final_dropout),
nn.Conv1d(final_dim, num_bond_types[1], kernel_size=1, groups=int(num_bond_types[1]))
)
self.apply(self.weights_init)
def forward(self, x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle, x_quad, x_quad_angle):
# PART I: Form the embeddings and the distance matrix
bsz = x_atom.shape[0]
N = x_atom.shape[1]
M = x_bond.shape[1]
P = x_triplet.shape[1]
Q = x_quad.shape[1] if self.use_quad else 0
D = torch.zeros(x_atom.shape[0], N+M+P+Q, N+M+P+Q, device=x_atom.device)
RA = slice(0,N)
RB = slice(N,N+M)
RT = slice(N+M, N+M+P)
RQ = slice(N+M+P, N+M+P+Q)
D[:,RA,RA] = sqdist(x_atom_pos[:,:,:3], x_atom_pos[:,:,:3]) # Only the x,y,z information, not charge/angle
for i in range(D.shape[0]):
# bonds
a1,a2 = x_bond[i,:,3], x_bond[i,:,4]
D[i, RA, RB] = torch.min(D[i, RA, a1], D[i, RA, a2])
D[i, RB, RA] = D[i, RA, RB].transpose(0,1)
D[i, RB, RB] = (D[i,a1,RB] + D[i,a2,RB])/2
D[i, RB ,RB] = (D[i,RB,RB] + D[i,RB,RB].transpose(0,1))/2
# triplets
a1, a2, a3 = x_triplet[i,:,1], x_triplet[i,:,2], x_triplet[i,:,3]
b1, b2 = x_triplet[i,:,4], x_triplet[i,:,5]
D[i, RA, RT] = torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]) + D[i,RA,a1]
D[i, RT, RA] = D[i,RA,RT].transpose(0,1)
D[i, RB, RT] = torch.min(D[i,RB,b1], D[i,RB,b2])
D[i, RT, RB] = D[i,RB,RT].transpose(0,1)
D[i, RT, RT] = (D[i,b1,RT] + D[i,b2,RT]) / 2
D[i, RT, RT] = (D[i,RT,RT] + D[i,RT,RT].transpose(0,1)) / 2
if self.use_quad:
# quad
a1,a2,a3,a4 = x_quad[i,:,1], x_quad[i,:,2], x_quad[i,:,3], x_quad[i,:,4]
b1,b2,b3 = x_quad[i,:,5], x_quad[i,:,6], x_quad[i,:,7]
t1,t2 = x_quad[i,:,8], x_quad[i,:,9]
D[i,RA,RQ] = torch.min(torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]), D[i,RA,a4]) + \
torch.min(D[i,RA,a1], D[i,RA,a2])
D[i,RQ,RA] = D[i,RA,RQ].transpose(0,1)
D[i,RB,RQ] = torch.min(torch.min(D[i,RB,b1], D[i,RB,b2]), D[i,RB, b3]) + D[i,RB,b1]
D[i,RQ,RB] = D[i,RB,RQ].transpose(0,1)
D[i,RT,RQ] = torch.min(D[i,RT,t1], D[i,RT,t2])
D[i,RQ,RT] = D[i,RT,RQ].transpose(0,1)
D[i,RQ,RQ] = (D[i,t1,RQ] + D[i,t2,RQ]) / 2
D[i,RQ,RQ] = (D[i,RQ,RQ] + D[i,RQ,RQ].transpose(0,1))/2
# No interaction (as in attention = 0) if query or key is the zero padding...
if self.use_quad:
mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0, x_quad[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)
else:
mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)
mask = torch.einsum('bi, bj->bij', mask, mask)
new_mask = -1e20 * torch.ones_like(mask).to(mask.device)
new_mask[mask > 0] = 0
if self.use_quad:
Z = torch.cat([
self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]),
self.bond_embedding(x_bond[:,:,:3], x_bond_dist),
self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle),
self.quad_embedding(x_quad[:,:,:1], x_quad_angle)], dim=1)
else:
Z = torch.cat([
self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]),
self.bond_embedding(x_bond[:,:,:3], x_bond_dist),
self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle)], dim=1)
# PART II: Pass through a bunch of self-attention and position-wise feed-forward blocks
seed = np.random.uniform(0,1)
for i in range(len(self.layers)):
Z = self.layers[i](Z, D, new_mask, mask, RA, RB, RT, RQ, store=False)
# PART III: Coupling type based (grouped) transformations
#Z.shape= torch.Size([bs, 333, 650])
#Z_group.shape= torch.Size([bs, 19600, 250])
#ret.shape= torch.Size([bs, 70, 250])
#RB slice(28, 279, None) ~ 250
# 250 * 280
#print(RB)
Z = self.final_norm(Z)
# self.final_dim = num_bond_types[1] * final_dim
# 70 x 280
# bs x 333 x 650=> bs x 650 x 333 => bs x 650 x 250 => bs x final_dim(280) x 250
# bs x 70000 * 250
Z_group = self.final_lin1(Z.transpose(1,2)[:,:,RB])
# final_dim => num_bond_types[1] (70) grouped convolution
return self.final_res(Z_group), Z
@staticmethod
def init_weight(weight):
nn.init.uniform_(weight, -0.1, 0.1)
@staticmethod
def init_bias(bias):
nn.init.constant_(bias, 0.0)
@staticmethod
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv1d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
GraphTransformer.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
GraphTransformer.init_bias(m.bias)
| [
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.min",
"numpy.array",
"torch.nn.functional.softmax",
"torch.nn.LayerNorm",
"torch.nn.init.uniform_",
"torch.nn.functional.leaky_relu",
"torch.ones_like",
"torch.einsum",
"torch.cat",
"t... | [((1970, 1989), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1980, 1989), True, 'import torch.nn as nn\n'), ((2018, 2042), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {}), '(attn_dropout)\n', (2028, 2042), True, 'import torch.nn as nn\n'), ((2199, 2230), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(3 * d_model)'], {}), '(d_model, 3 * d_model)\n', (2208, 2230), True, 'import torch.nn as nn\n'), ((2250, 2297), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_head)', 'd_model'], {'bias': '(False)'}), '(n_head * d_head, d_model, bias=False)\n', (2259, 2297), True, 'import torch.nn as nn\n'), ((2317, 2338), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (2329, 2338), True, 'import torch.nn as nn\n'), ((2360, 2381), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (2372, 2381), True, 'import torch.nn as nn\n'), ((2404, 2431), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_inner'], {}), '(d_model, d_inner)\n', (2413, 2431), True, 'import torch.nn as nn\n'), ((2453, 2480), 'torch.nn.Linear', 'nn.Linear', (['d_inner', 'd_model'], {}), '(d_inner, d_model)\n', (2462, 2480), True, 'import torch.nn as nn\n'), ((2590, 2605), 'numpy.sqrt', 'np.sqrt', (['d_head'], {}), '(d_head)\n', (2597, 2605), True, 'import numpy as np\n'), ((2695, 2735), 'torch.nn.utils.weight_norm', 'weight_norm', (['self.qkv_net'], {'name': '"""weight"""'}), "(self.qkv_net, name='weight')\n", (2706, 2735), False, 'from torch.nn.utils import weight_norm\n'), ((2757, 2795), 'torch.nn.utils.weight_norm', 'weight_norm', (['self.o_net'], {'name': '"""weight"""'}), "(self.o_net, name='weight')\n", (2768, 2795), False, 'from torch.nn.utils import weight_norm\n'), ((2817, 2855), 'torch.nn.utils.weight_norm', 'weight_norm', (['self.proj1'], {'name': '"""weight"""'}), "(self.proj1, name='weight')\n", (2828, 2855), False, 'from torch.nn.utils import weight_norm\n'), ((2877, 2915), 'torch.nn.utils.weight_norm', 'weight_norm', (['self.proj2'], {'name': '"""weight"""'}), "(self.proj2, name='weight')\n", (2888, 2915), False, 'from torch.nn.utils import weight_norm\n'), ((3304, 3322), 'torch.ones_like', 'torch.ones_like', (['D'], {}), '(D)\n', (3319, 3322), False, 'import torch\n'), ((6078, 6102), 'numpy.array', 'np.array', (['num_atom_types'], {}), '(num_atom_types)\n', (6086, 6102), True, 'import numpy as np\n'), ((6128, 6152), 'numpy.array', 'np.array', (['num_bond_types'], {}), '(num_bond_types)\n', (6136, 6152), True, 'import numpy as np\n'), ((6181, 6208), 'numpy.array', 'np.array', (['num_triplet_types'], {}), '(num_triplet_types)\n', (6189, 6208), True, 'import numpy as np\n'), ((6234, 6258), 'numpy.array', 'np.array', (['num_quad_types'], {}), '(num_quad_types)\n', (6242, 6258), True, 'import numpy as np\n'), ((8524, 8541), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (8536, 8541), True, 'import torch.nn as nn\n'), ((8752, 8792), 'torch.nn.Conv1d', 'nn.Conv1d', (['dim', 'final_dim'], {'kernel_size': '(1)'}), '(dim, final_dim, kernel_size=1)\n', (8761, 8792), True, 'import torch.nn as nn\n'), ((9492, 9577), 'torch.zeros', 'torch.zeros', (['x_atom.shape[0]', '(N + M + P + Q)', '(N + M + P + Q)'], {'device': 'x_atom.device'}), '(x_atom.shape[0], N + M + P + Q, N + M + P + Q, device=x_atom.device\n )\n', (9503, 9577), False, 'import torch\n'), ((12005, 12044), 'torch.einsum', 'torch.einsum', (['"""bi, bj->bij"""', 'mask', 'mask'], {}), "('bi, bj->bij', mask, mask)\n", (12017, 12044), False, 'import torch\n'), ((12862, 12885), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (12879, 12885), True, 'import numpy as np\n'), ((13768, 13803), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['weight', '(-0.1)', '(0.1)'], {}), '(weight, -0.1, 0.1)\n', (13784, 13803), True, 'import torch.nn as nn\n'), ((13856, 13884), 'torch.nn.init.constant_', 'nn.init.constant_', (['bias', '(0.0)'], {}), '(bias, 0.0)\n', (13873, 13884), True, 'import torch.nn as nn\n'), ((1022, 1074), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_in', 'd_out'], {'kernel_size': '(1)', 'groups': 'groups'}), '(d_in, d_out, kernel_size=1, groups=groups)\n', (1031, 1074), True, 'import torch.nn as nn\n'), ((1110, 1131), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1117, 1131), True, 'import torch.nn as nn\n'), ((1167, 1186), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1177, 1186), True, 'import torch.nn as nn\n'), ((1222, 1275), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_out', 'd_out'], {'kernel_size': '(1)', 'groups': 'groups'}), '(d_out, d_out, kernel_size=1, groups=groups)\n', (1231, 1275), True, 'import torch.nn as nn\n'), ((1311, 1330), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1321, 1330), True, 'import torch.nn as nn\n'), ((1388, 1440), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_in', 'd_out'], {'kernel_size': '(1)', 'groups': 'groups'}), '(d_in, d_out, kernel_size=1, groups=groups)\n', (1397, 1440), True, 'import torch.nn as nn\n'), ((2515, 2531), 'torch.ones', 'torch.ones', (['(4)', '(4)'], {}), '(4, 4)\n', (2525, 2531), False, 'import torch\n'), ((9929, 9966), 'torch.min', 'torch.min', (['D[i, RA, a1]', 'D[i, RA, a2]'], {}), '(D[i, RA, a1], D[i, RA, a2])\n', (9938, 9966), False, 'import torch\n'), ((10483, 10520), 'torch.min', 'torch.min', (['D[i, RB, b1]', 'D[i, RB, b2]'], {}), '(D[i, RB, b1], D[i, RB, b2])\n', (10492, 10520), False, 'import torch\n'), ((4892, 4914), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['attn_out'], {}), '(attn_out)\n', (4904, 4914), True, 'import torch.nn.functional as F\n'), ((11382, 11419), 'torch.min', 'torch.min', (['D[i, RT, t1]', 'D[i, RT, t2]'], {}), '(D[i, RT, t1], D[i, RT, t2])\n', (11391, 11419), False, 'import torch\n'), ((10342, 10379), 'torch.min', 'torch.min', (['D[i, RA, a1]', 'D[i, RA, a2]'], {}), '(D[i, RA, a1], D[i, RA, a2])\n', (10351, 10379), False, 'import torch\n'), ((11109, 11146), 'torch.min', 'torch.min', (['D[i, RA, a1]', 'D[i, RA, a2]'], {}), '(D[i, RA, a1], D[i, RA, a2])\n', (11118, 11146), False, 'import torch\n'), ((11734, 11843), 'torch.cat', 'torch.cat', (['[x_atom[:, :, 0] > 0, x_bond[:, :, 0] > 0, x_triplet[:, :, 0] > 0, x_quad[:,\n :, 0] > 0]'], {'dim': '(1)'}), '([x_atom[:, :, 0] > 0, x_bond[:, :, 0] > 0, x_triplet[:, :, 0] > 0,\n x_quad[:, :, 0] > 0], dim=1)\n', (11743, 11843), False, 'import torch\n'), ((11888, 11977), 'torch.cat', 'torch.cat', (['[x_atom[:, :, 0] > 0, x_bond[:, :, 0] > 0, x_triplet[:, :, 0] > 0]'], {'dim': '(1)'}), '([x_atom[:, :, 0] > 0, x_bond[:, :, 0] > 0, x_triplet[:, :, 0] > 0\n ], dim=1)\n', (11897, 11977), False, 'import torch\n'), ((12072, 12093), 'torch.ones_like', 'torch.ones_like', (['mask'], {}), '(mask)\n', (12087, 12093), False, 'import torch\n'), ((4530, 4549), 'torch.nn.functional.softmax', 'F.softmax', (['W'], {'dim': '(3)'}), '(W, dim=3)\n', (4539, 4549), True, 'import torch.nn.functional as F\n'), ((8078, 8101), 'termcolor.colored', 'colored', (['"""Not """', '"""cyan"""'], {}), "('Not ', 'cyan')\n", (8085, 8101), False, 'from termcolor import colored\n'), ((11237, 11274), 'torch.min', 'torch.min', (['D[i, RB, b1]', 'D[i, RB, b2]'], {}), '(D[i, RB, b1], D[i, RB, b2])\n', (11246, 11274), False, 'import torch\n'), ((4415, 4453), 'torch.einsum', 'torch.einsum', (['"""bnij, bmij->binm"""', 'Q', 'K'], {}), "('bnij, bmij->binm', Q, K)\n", (4427, 4453), False, 'import torch\n'), ((11015, 11052), 'torch.min', 'torch.min', (['D[i, RA, a1]', 'D[i, RA, a2]'], {}), '(D[i, RA, a1], D[i, RA, a2])\n', (11024, 11052), False, 'import torch\n')] |
#!/usr/bin/env python
"""
Parse DAXM indexation results (xml) to instances of daxm voxel
objects.
The script can be envoked as individual programe via CMD or imported
as a parser func for interactive data analysis.
"""
import numpy as np
import xml.etree.cElementTree as ET
from daxmexplorer.voxel import DAXMvoxel
def parse_xml(xmlfile,
namespace={'step':'http://sector34.xray.aps.anl.gov/34ide:indexResult'},
autopair=False,
forceNaNtoZero=True,
h5file=None):
voxels = []
tree = ET.parse(xmlfile)
root = tree.getroot()
ns = namespace
for i in range(len(root)):
step = root[i]
# locate next indexed voxel in xml file
target_str = 'step:indexing/step:pattern/step:recip_lattice/step:astar'
astar = step.find(target_str, ns)
if astar is None:
continue
# get coords
xsample = float(step.find('step:Xsample', ns).text)
ysample = float(step.find('step:Ysample', ns).text)
zsample = float(step.find('step:Zsample', ns).text)
depth = float(step.find('step:depth', ns).text)
# for scans with no wire, depth will be nan, which should be consider as zero.
if forceNaNtoZero:
depth = 0.0 if np.isnan(depth) else depth
coords = np.array([-xsample, -ysample, -zsample+depth])
# get pattern image name
h5img = step.find('step:detector/step:inputImage', ns).text
voxelname = h5img.split("/")[-1].replace(".h5", "")
# get peaks
xpix = step.find('step:detector/step:peaksXY/step:Xpixel', ns).text
ypix = step.find('step:detector/step:peaksXY/step:Ypixel', ns).text
xpix = np.nan if xpix is None else list(map(float, xpix.split()))
ypix = np.nan if ypix is None else list(map(float, ypix.split()))
peaks = np.stack((xpix, ypix))
# get scattering vectors
qx = step.find('step:detector/step:peaksXY/step:Qx', ns).text
qy = step.find('step:detector/step:peaksXY/step:Qy', ns).text
qz = step.find('step:detector/step:peaksXY/step:Qz', ns).text
qx = list(map(float, qx.split()))
qy = list(map(float, qy.split()))
qz = list(map(float, qz.split()))
scatter_vecs = np.stack((qx, qy, qz))
# get reciprocal base (a*, b*, c*)
astar_str = 'step:indexing/step:pattern/step:recip_lattice/step:astar'
bstar_str = 'step:indexing/step:pattern/step:recip_lattice/step:bstar'
cstar_str = 'step:indexing/step:pattern/step:recip_lattice/step:cstar'
astar = list(map(float, step.find(astar_str, ns).text.split()))
bstar = list(map(float, step.find(bstar_str, ns).text.split()))
cstar = list(map(float, step.find(cstar_str, ns).text.split()))
recip_base = np.column_stack((astar, bstar, cstar))
# get plane index (hkl)
h = step.find('step:indexing/step:pattern/step:hkl_s/step:h', ns).text
k = step.find('step:indexing/step:pattern/step:hkl_s/step:k', ns).text
l = step.find('step:indexing/step:pattern/step:hkl_s/step:l', ns).text
h = list(map(float, h.split()))
k = list(map(float, k.split()))
l = list(map(float, l.split()))
plane = np.stack((h, k, l))
# create the DAXM voxel
tmpvoxel = DAXMvoxel(name=voxelname,
ref_frame='APS',
coords=coords,
pattern_image=h5img,
scatter_vec=scatter_vecs,
plane=plane,
recip_base=recip_base,
peak=peaks,
depth=depth,
)
# pair scattering vectors with plane index
if autopair:
tmpvoxel.pair_scattervec_plane()
if h5file is not None:
tmpvoxel.write(h5file=h5file)
voxels.append(tmpvoxel)
return voxels
if __name__ == "__main__":
import sys
xmlfile = sys.argv[1]
print("parsing xml file: {}".format(xmlfile))
h5file = xmlfile.replace(".xml", ".h5")
parse_xml(xmlfile, h5file=h5file)
| [
"daxmexplorer.voxel.DAXMvoxel",
"numpy.column_stack",
"xml.etree.cElementTree.parse",
"numpy.array",
"numpy.stack",
"numpy.isnan"
] | [((550, 567), 'xml.etree.cElementTree.parse', 'ET.parse', (['xmlfile'], {}), '(xmlfile)\n', (558, 567), True, 'import xml.etree.cElementTree as ET\n'), ((1331, 1379), 'numpy.array', 'np.array', (['[-xsample, -ysample, -zsample + depth]'], {}), '([-xsample, -ysample, -zsample + depth])\n', (1339, 1379), True, 'import numpy as np\n'), ((1877, 1899), 'numpy.stack', 'np.stack', (['(xpix, ypix)'], {}), '((xpix, ypix))\n', (1885, 1899), True, 'import numpy as np\n'), ((2294, 2316), 'numpy.stack', 'np.stack', (['(qx, qy, qz)'], {}), '((qx, qy, qz))\n', (2302, 2316), True, 'import numpy as np\n'), ((2835, 2873), 'numpy.column_stack', 'np.column_stack', (['(astar, bstar, cstar)'], {}), '((astar, bstar, cstar))\n', (2850, 2873), True, 'import numpy as np\n'), ((3280, 3299), 'numpy.stack', 'np.stack', (['(h, k, l)'], {}), '((h, k, l))\n', (3288, 3299), True, 'import numpy as np\n'), ((3353, 3527), 'daxmexplorer.voxel.DAXMvoxel', 'DAXMvoxel', ([], {'name': 'voxelname', 'ref_frame': '"""APS"""', 'coords': 'coords', 'pattern_image': 'h5img', 'scatter_vec': 'scatter_vecs', 'plane': 'plane', 'recip_base': 'recip_base', 'peak': 'peaks', 'depth': 'depth'}), "(name=voxelname, ref_frame='APS', coords=coords, pattern_image=\n h5img, scatter_vec=scatter_vecs, plane=plane, recip_base=recip_base,\n peak=peaks, depth=depth)\n", (3362, 3527), False, 'from daxmexplorer.voxel import DAXMvoxel\n'), ((1286, 1301), 'numpy.isnan', 'np.isnan', (['depth'], {}), '(depth)\n', (1294, 1301), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Assignment_2_final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xFHKJhg5V45TgGza0RYuqTMQLqRIhscE
"""
import random
import math
import numpy as np
import pandas as pd
from queue import deque
SWARM_SIZE = 20
DIMENTIONS = 20
ITERATIONS = 1000
PSO_RUNS = 30
ENABLE_EXEC_LOG = True
TS_MEMORY = 30
ENABLE_TS_CACHE = True
class PSO:
"""
Particle Swarm Optimization implementation with star topology
"""
def __init__(self, w, c1, c2, bounds, obj_func):
"""
POS random initialization of particle positions and velocities
"""
self.inertia = w
self.cognitive = c1
self.social = c2
self.obj_func = obj_func
# establish the swarm
swarm=[]
for i in range(SWARM_SIZE):
start_position = [random.uniform(bounds[0], bounds[1]) for i in range(DIMENTIONS)]
swarm.append(Particle(start_position))
self.swarm = swarm
self.bounds = bounds
def run(self):
num_dimensions = DIMENTIONS
err_best_g = -1 # best error for group
pos_best_g = [] # best position for group
swarm = self.swarm
# begin optimization loop
for i in range(ITERATIONS):
# cycle through particles in swarm and evaluate fitness
for j in range(SWARM_SIZE):
swarm[j].evaluate(self.obj_func)
# determine if current particle is the best (globally)
if swarm[j].particle_err < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].particle_pos)
err_best_g=float(swarm[j].particle_err)
# cycle through swarm and update velocities and position
for j in range(SWARM_SIZE):
swarm[j].update_velocity(pos_best_g, self.inertia, self.cognitive, self.social)
swarm[j].update_position(self.bounds)
return err_best_g
class Particle:
"""
Represents a particle of the swarm
"""
def __init__(self, start):
self.particle_pos = [] # particle position
self.particle_vel = [] # particle velocity
self.best_pos = [] # best position individual
self.best_err = -1 # best result individual
self.particle_err = -1 # result individual
for i in range(DIMENTIONS):
self.particle_vel.append(random.uniform(-1,1))
self.particle_pos.append(start[i])
def evaluate(self, costFunc):
"""
Evaluate current fitness
"""
self.particle_err = costFunc(self.particle_pos)
# check to see if the current position is an individual best
if self.particle_err < self.best_err or self.best_err == -1:
self.best_pos = self.particle_pos
self.best_err = self.particle_err
def update_velocity(self, pos_best_g, w, c1, c2):
"""
Updates new particle velocity
"""
for i in range(DIMENTIONS):
r1 = random.random()
r2 = random.random()
vel_cognitive = c1 * r1 * (self.best_pos[i] - self.particle_pos[i])
vel_social = c2 * r2 * (pos_best_g[i] - self.particle_pos[i])
self.particle_vel[i] = w * self.particle_vel[i] + vel_cognitive + vel_social
def update_position(self, bounds):
"""
Updates the particle position based off new velocity updates
"""
for i in range(DIMENTIONS):
self.particle_pos[i] = self.particle_pos[i] + self.particle_vel[i]
# adjust maximum position if necessary
if self.particle_pos[i] > bounds[1]:
self.particle_pos[i] = bounds[1]
# adjust minimum position if neseccary
if self.particle_pos[i] < bounds[0]:
self.particle_pos[i] = bounds[0]
class ObjFn:
"""
Objective functions for optimization with PSO
"""
spherical_bounds = (-5.12, 5.12)
ackley_bounds = (-32.768, 32.768)
michalewicz_bounds = (np.finfo(np.float64).tiny, math.pi)
katsuura_bounds = (-100, 100)
def spherical(x):
result = 0
for i in range(len(x)):
result = result + x[i]**2
return result
def ackley(x):
n = len(x)
res_cos = 0
for i in range(n):
res_cos = res_cos + math.cos(2*(math.pi)*x[i])
#returns the point value of the given coordinate
result = -20 * math.exp(-0.2*math.sqrt((1/n)*ObjFn.spherical(x)))
result = result - math.exp((1/n)*res_cos) + 20 + math.exp(1)
return result
def michalewicz(x):
m=10
result = 0
for i in range(len(x)):
result += math.sin(x[i]) * (math.sin((i*x[i]**2)/(math.pi)))**(2*m)
return -result
def katsuura(x):
prod = 1
d = len(x)
for i in range(0, d):
sum = 0
two_k = 1
for k in range(1, 33):
two_k = two_k * 2
sum += abs(two_k * x[i] - round(two_k * x[i])) / two_k
prod *= (1 + (i+1) * sum)**(10/(d**1.2))
return (10/(d**2)) * (prod - 1)
class BenchmarkPSO:
"""
Implements the logical steps of the PSO Intelligent Parameter Tuning
"""
def __init__(self):
next
def run_benchmark():
if ENABLE_EXEC_LOG:
print("******** RUNNING PSO BENCHMARKS ********")
print("\nBelow are logged sample optimization results:")
print("\tW\tC1\tC2\tSpherical\t\tAckley\t\tMichalewicz\t\tKatsuura")
data = []
for w in np.arange(-1.1, 1.15, 0.1):
for c in np.arange(0.05, 2.525, 0.05):
s_res = BenchmarkPSO.run_pso_batch(w, c, c, ObjFn.spherical, ObjFn.spherical_bounds)
a_res = BenchmarkPSO.run_pso_batch(w, c, c, ObjFn.ackley, ObjFn.ackley_bounds)
m_res = BenchmarkPSO.run_pso_batch(w, c, c, ObjFn.michalewicz, ObjFn.michalewicz_bounds)
k_res = BenchmarkPSO.run_pso_batch(w, c, c, ObjFn.katsuura, ObjFn.katsuura_bounds)
data.append([w, c, c, s_res, a_res, m_res, k_res])
if ENABLE_EXEC_LOG and math.floor(c * 100) % 25 == 0:
print("\t{:.1f}\t{:.2f}\t{:.2f}\t{:.10f}\t{:.10f}\t\t{:.10f}\t{:.10f}".format(w, c, c, s_res, a_res, m_res, k_res))
df = pd.DataFrame(data, columns=['inertia', 'cognitive', 'social', 'spherical', 'ackley', 'michalewicz', 'katsuura'])
if ENABLE_EXEC_LOG:
print("PSO Benchmarking completed")
return df
def run_pso_batch(w, c1, c2, func, bounds):
total = 0
for i in range(PSO_RUNS):
pso = PSO(w = w, c1 = c1, c2 = c2, bounds = bounds, obj_func = ObjFn.spherical)
total += pso.run()
return total / PSO_RUNS
class TabuSearch:
"""
Tabu Search algorithm implementation to find the optimal w, c1, c2 coefficients
for a PSO run
"""
def __init__(self, objective_function, boundaries, TS_MOVES):
"""
Initialization method
"""
self.tabuMemory = deque(maxlen = TS_MEMORY)
self.func = objective_function
self.bounds = boundaries
# if ENABLE_TS_CACHE:
self.cache = dict()
self.moves = TS_MOVES
def solutionFitness(self, solution):
"""
Solution fitness score is calculated as a total of all information gains for each selected feature
"""
if ENABLE_TS_CACHE:
if not solution in self.cache.keys():
pso = PSO(w = solution[0], c1 = solution[1], c2 = solution[2], obj_func = self.func, bounds = self.bounds)
self.cache[solution] = pso.run()
return self.cache.get(solution)
pso = PSO(w = solution[0], c1 = solution[1], c2 = solution[2], obj_func = self.func, bounds = self.bounds)
return pso.run()
def memorize(self, solution):
"""
Memorizes current solution for further verification of tabu and aspiration criterias
"""
self.tabuMemory.append(";".join(format(f, '.1f') for f in solution))
def tabuCriteria(self, solution):
"""
Verifyes if a solution is not tabooed
"""
str = ";".join(format(f, '.1f') for f in solution)
if str in self.tabuMemory:
# if ENABLE_EXEC_LOG:
# print("Tabooed coefficients: {}".format(str))
return False
return True
def putativeNeighbors(self, solution):
"""
Find coefficients values within 0.1 step on any coefficient that satisfy tabu criteria and order-1/-2 stability
"""
neighbors = list()
for a in [-0.1, 0, 0.1]:
for b in [-0.1, 0, 0.1]:
for c in [-0.1, 0, 0.1]:
n = list(solution).copy()
n[0] += a
n[1] += b
n[2] += c
if TabuSearch.stabilityCheck(n) and self.tabuCriteria(n):
neighbors.append(tuple(n))
return neighbors
def stabilityCheck(solution):
"""
Verify if the solution satisfies the condition for order-1 and order-2 stability
"""
w = solution[0]
c1 = solution[1]
c2 = solution[2]
return abs(w) < 1 and c1 + c2 > 0 and c1 + c2 < 24 * (1 - w**2) / (7 - 5*w)
def randomSolution():
"""
Initialize first solution randomly, such that it satisfies stability condition
"""
rand_velocity = random.randint(4, 9) / 10 #random.randint(-5, 5) / 10
rand_cognitive = random.randint(11, 17)/10 #random.randint(-15, 15) / 10
rand_social = random.randint(11, 17)/10 #random.randint(-15, 15) / 10
return (rand_velocity, rand_cognitive, rand_social)
def run(self, init_solution):
"""
Performs a run of Tabu Search based on initialized objective function and finds the optimal
set of coefficients for the Particle Swarm Optimization algorithm
"""
self.best_solution = init_solution
self.memorize(self.best_solution)
self.curr_solution = self.best_solution
for i in range(self.moves):
if i in [300, 600, 900, 1200, 1500, 1800]:
print("At step {}".format(i))
neighbors = self.putativeNeighbors(self.curr_solution)
bestFit = np.finfo(np.float64).max
# Find the best solution from putative neighbors and makes it the current one
for solution in neighbors:
if self.solutionFitness(solution) < bestFit:
self.curr_solution = solution
bestFit = self.solutionFitness(solution)
# Memorize the current solution
self.memorize(self.curr_solution)
# Verify if current solution is better than the best one, and saves the current as best, if true
if self.solutionFitness(self.curr_solution) < self.solutionFitness(self.best_solution):
self.best_solution = self.curr_solution
if ENABLE_EXEC_LOG:
print("Next best solution ({}) on step {};\tResult: {:.10f}\t cache size: {}".format(" ".join(format(f, '.1f') for f in self.best_solution), i, bestFit, len(self.cache)))
print(len(self.cache.keys()))
# Return the best solution found
res = self.solutionFitness(self.best_solution)
return self.best_solution + (res,)
def main():
# df = BenchmarkPSO.run_benchmark() # Thses commands to store benchmark function parameters
# df.to_csv('pso_benchmark.csv')
"""
TS_MOVES is our control parameter
"""
print("Test on Spherical Function:\n")
init_solution = TabuSearch.randomSolution()
print("Initial coefficients: {}".format(init_solution))
s_ts = TabuSearch(objective_function = ObjFn.spherical, boundaries = ObjFn.spherical_bounds, TS_MOVES = 20)
s_res = s_ts.run(init_solution)
print("Spherical Best performing coefficients: w={:.1f} c1={:.1f} c2={:.1f}\nPSO result: {:.20f}".format(s_res[0], s_res[1], s_res[2], s_res[3]))
print("Test on Ackley Function:\n")
init_solution = TabuSearch.randomSolution()
print("Initial coefficients: {}".format(init_solution))
a_ts = TabuSearch(objective_function = ObjFn.ackley, boundaries = ObjFn.ackley_bounds, TS_MOVES = 20)
a_res = a_ts.run(init_solution)
print("Ackley Best performing coefficients: w={:.1f} c1={:.1f} c2={:.1f}\nPSO result: {:.20f}".format(a_res[0], a_res[1], a_res[2], a_res[3]))
print("Test on Michalewicz Function:\n")
init_solution = TabuSearch.randomSolution()
print("Initial coefficients: {}".format(init_solution))
m_ts = TabuSearch(objective_function = ObjFn.michalewicz, boundaries = ObjFn.michalewicz_bounds, TS_MOVES = 20)
m_res = m_ts.run(init_solution)
print("Michalewicz Best performing coefficients: w={:.1f} c1={:.1f} c2={:.1f}\nPSO result: {:.20f}".format(m_res[0], m_res[1], m_res[2], m_res[3]))
print("Test on Katsuura Function:\n")
init_solution = TabuSearch.randomSolution()
print("Initial coefficients: {}".format(init_solution))
k_ts = TabuSearch(objective_function = ObjFn.katsuura, boundaries = ObjFn.katsuura_bounds, TS_MOVES = 80)
k_res = k_ts.run(init_solution)
print("katsuura Best performing coefficients: w={:.1f} c1={:.1f} c2={:.1f}\nPSO result: {:.20f}".format(k_res[0], k_res[1], k_res[2], k_res[3]))
if __name__ == "__main__":
# main(sys.argv)
main() | [
"queue.deque",
"random.uniform",
"math.floor",
"math.cos",
"numpy.finfo",
"pandas.DataFrame",
"random.random",
"math.sin",
"random.randint",
"numpy.arange",
"math.exp"
] | [((5777, 5803), 'numpy.arange', 'np.arange', (['(-1.1)', '(1.15)', '(0.1)'], {}), '(-1.1, 1.15, 0.1)\n', (5786, 5803), True, 'import numpy as np\n'), ((6542, 6658), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['inertia', 'cognitive', 'social', 'spherical', 'ackley', 'michalewicz',\n 'katsuura']"}), "(data, columns=['inertia', 'cognitive', 'social', 'spherical',\n 'ackley', 'michalewicz', 'katsuura'])\n", (6554, 6658), True, 'import pandas as pd\n'), ((7298, 7321), 'queue.deque', 'deque', ([], {'maxlen': 'TS_MEMORY'}), '(maxlen=TS_MEMORY)\n', (7303, 7321), False, 'from queue import deque\n'), ((3177, 3192), 'random.random', 'random.random', ([], {}), '()\n', (3190, 3192), False, 'import random\n'), ((3210, 3225), 'random.random', 'random.random', ([], {}), '()\n', (3223, 3225), False, 'import random\n'), ((4201, 4221), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (4209, 4221), True, 'import numpy as np\n'), ((4738, 4749), 'math.exp', 'math.exp', (['(1)'], {}), '(1)\n', (4746, 4749), False, 'import math\n'), ((5826, 5854), 'numpy.arange', 'np.arange', (['(0.05)', '(2.525)', '(0.05)'], {}), '(0.05, 2.525, 0.05)\n', (5835, 5854), True, 'import numpy as np\n'), ((9771, 9791), 'random.randint', 'random.randint', (['(4)', '(9)'], {}), '(4, 9)\n', (9785, 9791), False, 'import random\n'), ((9850, 9872), 'random.randint', 'random.randint', (['(11)', '(17)'], {}), '(11, 17)\n', (9864, 9872), False, 'import random\n'), ((9929, 9951), 'random.randint', 'random.randint', (['(11)', '(17)'], {}), '(11, 17)\n', (9943, 9951), False, 'import random\n'), ((891, 927), 'random.uniform', 'random.uniform', (['bounds[0]', 'bounds[1]'], {}), '(bounds[0], bounds[1])\n', (905, 927), False, 'import random\n'), ((2559, 2580), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2573, 2580), False, 'import random\n'), ((4523, 4551), 'math.cos', 'math.cos', (['(2 * math.pi * x[i])'], {}), '(2 * math.pi * x[i])\n', (4531, 4551), False, 'import math\n'), ((4883, 4897), 'math.sin', 'math.sin', (['x[i]'], {}), '(x[i])\n', (4891, 4897), False, 'import math\n'), ((10639, 10659), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (10647, 10659), True, 'import numpy as np\n'), ((4707, 4732), 'math.exp', 'math.exp', (['(1 / n * res_cos)'], {}), '(1 / n * res_cos)\n', (4715, 4732), False, 'import math\n'), ((4901, 4934), 'math.sin', 'math.sin', (['(i * x[i] ** 2 / math.pi)'], {}), '(i * x[i] ** 2 / math.pi)\n', (4909, 4934), False, 'import math\n'), ((6362, 6381), 'math.floor', 'math.floor', (['(c * 100)'], {}), '(c * 100)\n', (6372, 6381), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Coursework 6 Extra: Phase space of Lorenz Attractor
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Compute the derivative dq from an array of consecutive points q with initial
# derivative dq0 with timestep d
def deriv(q,d=0.001):
dq = (q[1:len(q),:]-q[0:(len(q)-1),:])/d
return dq
# Differential equation of Lorenz Attractor
def F(q):
ddq1 = 10*(q[1]-q[0])
ddq2 = 28*q[0] - q[1] -q[0]*q[2]
ddq3 = q[0]*q[1] - (8/3)*q[2]
return np.array([ddq1, ddq2, ddq3])
# Compute the orbit of the differential equation F with initial values q0 and dq0,
# n points and timestep d
def orb(n,q0, F, d=0.001):
q = np.empty([n+1,3])
q[0,:] = q0
for i in np.arange(1,n+1):
q[i,:] = q[i-1,:] + d*F(q[i-1,:])
return q
# Plot an orbit of the differential equation F with initial values q0 and dq0,
# n points and timestep d with color col and linestyle marker. Return orbits computed
def simplectica(q0,F,ax1,ax2,ax3,col = 0, d = 10**(-4),n = int(16/10**(-4)),marker='-'):
q = orb(n,q0=q0,F=F,d=d)
dq = deriv(q,d=d)
q = q[:-1,:]
p = dq/2
c=plt.get_cmap("inferno",125)(col)
ax1.plot(q[:,0], p[:,0], linewidth = 0.25, c=c )
ax2.plot(q[:,1], p[:,1], linewidth = 0.25, c=c)
ax3.plot(q[:,2], p[:,2], linewidth = 0.25, c=c)
return q,p
# Compute for some n the estimation of the box-counting dimension for
# a cover of epsilon = 2/(n-1) 6-squares
def box_count(points, n):
lim = np.linspace(-1,1,n)
H,_ = np.histogramdd(points, bins = (lim,lim,lim,lim,lim,lim))
count = np.sum(H > 0)
return count
#%%
"""
Exercise 2: Plot (q1,q2,q3) and (p1,p2,p3)
"""
# Compute orbit
d = 1e-3
q = orb(30000,[1,1,1],F)
dq = deriv(q)
p = dq/2
# Plot (q1,q2,q3)
fig = plt.figure()
ax = plt.axes(projection = '3d')
ax.scatter3D(1,1,1,c='r')
ax.plot3D(q[:,0],q[:,1],q[:,2])
# Plot (p1,p2,p3)
fig = plt.figure()
ax = plt.axes(projection = '3d')
ax.scatter3D(p[0,0],p[1,1],p[2,2],c='r')
ax.plot3D(p[:,0],p[:,1],p[:,2])
#%%
"""
Exercise 3: Plot (q1,p1), (q2,p2) and (q3,p3)
"""
# Compute ticks of interval: C
seq_q0 = np.linspace(-1,1.,num=5)
# Initialise figures
col = np.linspace(0,1,125)
c = 0
fig = plt.figure()
ax1 = fig.add_subplot(1,1, 1)
fig = plt.figure()
ax2 = fig.add_subplot(1,1, 1)
fig = plt.figure()
ax3 = fig.add_subplot(1,1, 1)
# Store data for next exercise
allq = np.zeros((1,3))
allp = np.zeros((1,3))
# Plot orbit for initial conditions in C x C x C
for i in range(len(seq_q0)):
for j in range(len(seq_q0)):
for k in range(len(seq_q0)):
q0 = [seq_q0[i], seq_q0[j], seq_q0[k]]
q,p = simplectica(q0=q0,F=F,ax1=ax1,ax2=ax2,ax3=ax3,col=c,marker='ro',
d= 10**(-3),n=int(30/d))
c = c + 1
# Store data for next exercise
allq = np.concatenate((allq,q),axis =0)
allp = np.concatenate((allp,p),axis =0)
ax1.set_xlabel("q(t)", fontsize=12)
ax1.set_ylabel("p(t)", fontsize=12)
ax2.set_xlabel("q(t)", fontsize=12)
ax2.set_ylabel("p(t)", fontsize=12)
ax3.set_xlabel("q(t)", fontsize=12)
ax3.set_ylabel("p(t)", fontsize=12)
plt.show()
#%%
"""
Exercise 4: Hausdorff dimension
"""
narr = range(5,28)
points = np.array(list(zip(allq[:,0],allq[:,1],allq[:,2],allp[:,0],allp[:,1],allp[:,2])))
# Normalize
m = np.max(np.abs(points))
points = points/m
# Test dimension d
d = 2.75
Hd = []
He = []
for n in narr:
# count boxes with side epsilon=2/(n-1)
count = box_count(points,n)
# diameter if each box
diam = np.sqrt(24)/(n-1)
Hd.append(count*(diam**d))
eps = 2/(n-1)
He.append(np.log(count)/np.log(1/eps))
# Plot evolution of H^d_{\epsilon}
plt.figure()
plt.plot(narr,Hd)
plt.xlabel('$n$')
plt.ylabel('$H^{d}_{\epsilon}(E)$')
# Plot evolution of dim_{box}(\epsilon)
plt.figure()
plt.plot(narr,He)
plt.xlabel('$n$')
plt.ylabel('$dim_{box}(\epsilon)$') | [
"numpy.abs",
"numpy.sqrt",
"numpy.histogramdd",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.axes",
"numpy.linspace",
"numpy.empty",
"numpy.sum",
"numpy.concaten... | [((1819, 1831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1829, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1862), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1845, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1958, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1966, 1991), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1974, 1991), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2195), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.0)'], {'num': '(5)'}), '(-1, 1.0, num=5)\n', (2179, 2195), True, 'import numpy as np\n'), ((2221, 2243), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(125)'], {}), '(0, 1, 125)\n', (2232, 2243), True, 'import numpy as np\n'), ((2254, 2266), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2264, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2313, 2315), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2364), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2362, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2450), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (2442, 2450), True, 'import numpy as np\n'), ((2457, 2473), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (2465, 2473), True, 'import numpy as np\n'), ((3222, 3232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3230, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3790, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3811), 'matplotlib.pyplot.plot', 'plt.plot', (['narr', 'Hd'], {}), '(narr, Hd)\n', (3801, 3811), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$n$"""'], {}), "('$n$')\n", (3821, 3828), True, 'import matplotlib.pyplot as plt\n'), ((3829, 3865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$H^{d}_{\\\\epsilon}(E)$"""'], {}), "('$H^{d}_{\\\\epsilon}(E)$')\n", (3839, 3865), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3916, 3918), True, 'import matplotlib.pyplot as plt\n'), ((3919, 3937), 'matplotlib.pyplot.plot', 'plt.plot', (['narr', 'He'], {}), '(narr, He)\n', (3927, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$n$"""'], {}), "('$n$')\n", (3947, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3955, 3991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$dim_{box}(\\\\epsilon)$"""'], {}), "('$dim_{box}(\\\\epsilon)$')\n", (3965, 3991), True, 'import matplotlib.pyplot as plt\n'), ((532, 560), 'numpy.array', 'np.array', (['[ddq1, ddq2, ddq3]'], {}), '([ddq1, ddq2, ddq3])\n', (540, 560), True, 'import numpy as np\n'), ((706, 726), 'numpy.empty', 'np.empty', (['[n + 1, 3]'], {}), '([n + 1, 3])\n', (714, 726), True, 'import numpy as np\n'), ((753, 772), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (762, 772), True, 'import numpy as np\n'), ((1529, 1550), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (1540, 1550), True, 'import numpy as np\n'), ((1559, 1618), 'numpy.histogramdd', 'np.histogramdd', (['points'], {'bins': '(lim, lim, lim, lim, lim, lim)'}), '(points, bins=(lim, lim, lim, lim, lim, lim))\n', (1573, 1618), True, 'import numpy as np\n'), ((1628, 1641), 'numpy.sum', 'np.sum', (['(H > 0)'], {}), '(H > 0)\n', (1634, 1641), True, 'import numpy as np\n'), ((3411, 3425), 'numpy.abs', 'np.abs', (['points'], {}), '(points)\n', (3417, 3425), True, 'import numpy as np\n'), ((1170, 1198), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""inferno"""', '(125)'], {}), "('inferno', 125)\n", (1182, 1198), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3636), 'numpy.sqrt', 'np.sqrt', (['(24)'], {}), '(24)\n', (3632, 3636), True, 'import numpy as np\n'), ((2908, 2941), 'numpy.concatenate', 'np.concatenate', (['(allq, q)'], {'axis': '(0)'}), '((allq, q), axis=0)\n', (2922, 2941), True, 'import numpy as np\n'), ((2960, 2993), 'numpy.concatenate', 'np.concatenate', (['(allp, p)'], {'axis': '(0)'}), '((allp, p), axis=0)\n', (2974, 2993), True, 'import numpy as np\n'), ((3711, 3724), 'numpy.log', 'np.log', (['count'], {}), '(count)\n', (3717, 3724), True, 'import numpy as np\n'), ((3725, 3740), 'numpy.log', 'np.log', (['(1 / eps)'], {}), '(1 / eps)\n', (3731, 3740), True, 'import numpy as np\n')] |
import numpy as np
from tools import gauss, gauss2
from math import sqrt, pi
from markov_chain import simu_mc, simu_mc_nonstat, calc_probaprio_mc
def forward_neigh(A, p, gauss, g2, g3):
"""
Cette fonction calcule récursivement (mais ce n'est pas une fonction récursive!) les valeurs forward de la chaîne
:param A: Matrice (2*2) de transition de la chaîne
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param gauss: numpy array (longeur de signal_noisy)*2 qui correspond aux valeurs des densité gaussiennes pour chaque élément du signal bruité
:return: un vecteur de taille la longueur de la chaîne, contenant tous les forward (de 1 à n)
"""
proba2 = A @ g2[0]
proba3 = A @ g3[0]
forward = np.zeros((len(gauss), 2))
forward[0] = p * (gauss[0]*proba2*proba3)
forward[0] = forward[0] / (forward[0].sum())
for l in range(1, len(gauss)):
proba2 = A@g2[l]
proba3 = A@g3[l]
forward[l] = (gauss[l]*proba2*proba3) * (forward[l - 1] @ A)
forward[l] = forward[l] / forward[l].sum()
return forward
def backward_neigh(A, gauss, g2, g3):
"""
Cette fonction calcule récursivement (mais ce n'est pas une fonction récursive!) les valeurs backward de la chaîne
:param A: Matrice (2*2) de transition de la chaîne
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param gauss: numpy array (longeur de signal_noisy)*2 qui correspond aux valeurs des densité gaussiennes pour chaque élément du signal bruité
:return: un vecteur de taille la longueur de la chaîne, contenant tous les backward (de 1 à n).
Attention, si on calcule les backward en partant de la fin de la chaine, je conseille quand même d'ordonner le vecteur backward du début à la fin
"""
backward = np.zeros((len(gauss), 2))
backward[len(gauss) - 1] = np.ones(2)
backward[len(gauss) - 1] = backward[len(gauss) - 1] / (backward[len(gauss) - 1].sum())
for k in reversed(range(0, len(gauss)-1)):
proba2 = A @ g2[k+1]
proba3 = A @ g3[k+1]
backward[k] = A @ (backward[k + 1] * (gauss[k + 1]*proba2*proba3))
backward[k] = backward[k] / (backward[k].sum())
return backward
def mpm_mc_neigh(signal_noisy, neighboursh, neighboursv, w, p, A, m1, sig1, m2, sig2):
"""
Cette fonction permet d'appliquer la méthode mpm pour retrouver notre signal d'origine à partir de sa version bruité et des paramètres du model.
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param w: vecteur dont la première composante est la valeur de la classe w1 et la deuxième est la valeur de la classe w2
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param A: Matrice (2*2) de transition de la chaîne
:param m1: La moyenne de la première gaussienne
:param sig1: La variance de la première gaussienne
:param m2: La moyenne de la deuxième gaussienne
:param sig2: La variance de la deuxième gaussienne
:return: Un signal discret à 2 classe (numpy array 1D d'int)
"""
gausses = gauss(signal_noisy, m1, sig1, m2, sig2)
g2 = gauss2(neighboursh, m1, sig1, m2, sig2)
g3 = gauss2(neighboursv, m1, sig1, m2, sig2)
alpha = forward_neigh(A, p, gausses, g2, g3)
beta = backward_neigh(A, gausses, g2, g3)
proba_apost = alpha * beta
proba_apost = proba_apost / (proba_apost.sum(axis=1)[..., np.newaxis])
return w[np.argmax(proba_apost, axis=1)]
def calc_param_EM_mc_neigh(signal_noisy, neighboursh, neighboursv, p, A, m1, sig1, m2, sig2):
"""
Cette fonction permet de calculer les nouveaux paramètres estimé pour une itération de EM
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param A: Matrice (2*2) de transition de la chaîne
:param m1: La moyenne de la première gaussienne
:param sig1: La variance de la première gaussienne
:param m2: La moyenne de la deuxième gaussienne
:param sig2: La variance de la deuxième gaussienne
:return: tous les paramètres réestimés donc p, A, m1, sig1, m2, sig2
"""
gausses = gauss(signal_noisy, m1, sig1, m2, sig2)
g2 = gauss2(neighboursh, m1, sig1, m2, sig2)
g3 = gauss2(neighboursv, m1, sig1, m2, sig2)
proba2 = np.einsum('ij,kj->ki',A,g2)
proba3 = np.einsum('ij,kj->ki',A,g3)
alpha = forward_neigh(A, p, gausses, g2, g3)
beta = backward_neigh(A, gausses, g2, g3)
proba_apost = alpha * beta
proba_apost = proba_apost / (proba_apost.sum(axis=1)[..., np.newaxis])
p = proba_apost.sum(axis=0)/proba_apost.shape[0]
proba_c_apost = (
alpha[:-1, :, np.newaxis]
* ((gausses[1:, np.newaxis, :]*proba2[1:, np.newaxis, :]*proba3[1:, np.newaxis, :])
* beta[1:, np.newaxis, :]
* A[np.newaxis, :, :])
)
proba_c_apost = proba_c_apost / (proba_c_apost.sum(axis=(1, 2))[..., np.newaxis, np.newaxis])
A = np.transpose(np.transpose((proba_c_apost.sum(axis=0))) / (proba_apost[:-1:].sum(axis=0)))
m1 = (proba_apost[:,0] * signal_noisy).sum()/proba_apost[:,0].sum()
sig1 = np.sqrt((proba_apost[:,0]*((signal_noisy-m1)**2)).sum()/proba_apost[:,0].sum())
m2 = (proba_apost[:, 1] * signal_noisy).sum() / proba_apost[:, 1].sum()
sig2 = np.sqrt((proba_apost[:, 1] * ((signal_noisy - m2) ** 2)).sum() / proba_apost[:, 1].sum())
return p, A, m1, sig1, m2, sig2
def estim_param_EM_mc_neigh(iter, signal_noisy, neighboursh, neighboursv, p, A, m1, sig1, m2, sig2):
"""
Cette fonction est l'implémentation de l'algorithme EM pour le modèle en question
:param iter: Nombre d'itération choisie
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: la valeur d'initialisation du vecteur de proba
:param A: la valeur d'initialisation de la matrice de transition de la chaîne
:param m1: la valeur d'initialisation de la moyenne de la première gaussienne
:param sig1: la valeur d'initialisation de l'écart type de la première gaussienne
:param m2: la valeur d'initialisation de la moyenne de la deuxième gaussienne
:param sig2: la valeur d'initialisation de l'écart type de la deuxième gaussienne
:return: Tous les paramètres réestimés à la fin de l'algorithme EM donc p, A, m1, sig1, m2, sig2
"""
p_est = p
A_est = A
m1_est = m1
sig1_est = sig1
m2_est = m2
sig2_est = sig2
for i in range(iter):
p_est, A_est, m1_est, sig1_est, m2_est, sig2_est = calc_param_EM_mc_neigh(signal_noisy, neighboursh, neighboursv,
p_est, A_est, m1_est, sig1_est, m2_est,
sig2_est)
print({'iter':i,'p': p_est, 'A': A_est, 'm1': m1_est, 'sig1': sig1_est, 'm2': m2_est, 'sig2': sig2_est})
return p_est, A_est, m1_est, sig1_est, m2_est, sig2_est
def calc_param_SEM_mc_neigh(signal_noisy, neighboursh, neighboursv, p, A, m1, sig1, m2, sig2):
"""
Cette fonction permet de calculer les nouveaux paramètres estimé pour une itération de EM
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param A: Matrice (2*2) de transition de la chaîne
:param m1: La moyenne de la première gaussienne
:param sig1: La variance de la première gaussienne
:param m2: La moyenne de la deuxième gaussienne
:param sig2: La variance de la deuxième gaussienne
:return: tous les paramètres réestimés donc p, A, m1, sig1, m2, sig2
"""
gausses = gauss(signal_noisy, m1, sig1, m2, sig2)
g2 = gauss2(neighboursh, m1, sig1, m2, sig2)
g3 = gauss2(neighboursv, m1, sig1, m2, sig2)
proba2 = np.einsum('ij,kj->ki',A,g2)
proba3 = np.einsum('ij,kj->ki',A,g3)
alpha = forward_neigh(A, p, gausses, g2, g3)
beta = backward_neigh(A, gausses, g2, g3)
proba_init = alpha[0] * beta[0]
proba_init = proba_init / proba_init.sum()
tapost = (
((gausses[1:, np.newaxis, :]*proba2[1:, np.newaxis, :]*proba3[1:, np.newaxis, :])
* beta[1:, np.newaxis, :]
* A[np.newaxis, :, :])
)
tapost = tapost / tapost.sum(axis=2)[..., np.newaxis]
signal = simu_mc_nonstat(signal_noisy.shape[0], proba_init, tapost)
p,A = calc_probaprio_mc(signal, np.array([0,1]))
m1 = ((signal==0) * signal_noisy).sum()/(signal==0).sum()
sig1 = np.sqrt(((signal==0)*((signal_noisy-m1)**2)).sum()/(signal==0).sum())
m2 = ((signal==1) * signal_noisy).sum()/(signal==1).sum()
sig2 = np.sqrt(((signal == 1) * ((signal_noisy - m2) ** 2)).sum() / (signal == 1).sum())
return p, A, m1, sig1, m2, sig2
def estim_param_SEM_mc_neigh(iter, signal_noisy, neighboursh, neighboursv, p, A, m1, sig1, m2, sig2):
"""
Cette fonction est l'implémentation de l'algorithme EM pour le modèle en question
:param iter: Nombre d'itération choisie
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: la valeur d'initialisation du vecteur de proba
:param A: la valeur d'initialisation de la matrice de transition de la chaîne
:param m1: la valeur d'initialisation de la moyenne de la première gaussienne
:param sig1: la valeur d'initialisation de l'écart type de la première gaussienne
:param m2: la valeur d'initialisation de la moyenne de la deuxième gaussienne
:param sig2: la valeur d'initialisation de l'écart type de la deuxième gaussienne
:return: Tous les paramètres réestimés à la fin de l'algorithme EM donc p, A, m1, sig1, m2, sig2
"""
p_est = p
A_est = A
m1_est = m1
sig1_est = sig1
m2_est = m2
sig2_est = sig2
for i in range(iter):
p_est, A_est, m1_est, sig1_est, m2_est, sig2_est = calc_param_SEM_mc_neigh(signal_noisy, neighboursh, neighboursv,
p_est, A_est, m1_est, sig1_est, m2_est,
sig2_est)
print({'iter':i,'p': p_est, 'A': A_est, 'm1': m1_est, 'sig1': sig1_est, 'm2': m2_est, 'sig2': sig2_est})
return p_est, A_est, m1_est, sig1_est, m2_est, sig2_est
| [
"numpy.ones",
"markov_chain.simu_mc_nonstat",
"numpy.argmax",
"tools.gauss2",
"numpy.array",
"numpy.einsum",
"tools.gauss"
] | [((1911, 1921), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1918, 1921), True, 'import numpy as np\n'), ((3156, 3195), 'tools.gauss', 'gauss', (['signal_noisy', 'm1', 'sig1', 'm2', 'sig2'], {}), '(signal_noisy, m1, sig1, m2, sig2)\n', (3161, 3195), False, 'from tools import gauss, gauss2\n'), ((3205, 3244), 'tools.gauss2', 'gauss2', (['neighboursh', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursh, m1, sig1, m2, sig2)\n', (3211, 3244), False, 'from tools import gauss, gauss2\n'), ((3254, 3293), 'tools.gauss2', 'gauss2', (['neighboursv', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursv, m1, sig1, m2, sig2)\n', (3260, 3293), False, 'from tools import gauss, gauss2\n'), ((4261, 4300), 'tools.gauss', 'gauss', (['signal_noisy', 'm1', 'sig1', 'm2', 'sig2'], {}), '(signal_noisy, m1, sig1, m2, sig2)\n', (4266, 4300), False, 'from tools import gauss, gauss2\n'), ((4310, 4349), 'tools.gauss2', 'gauss2', (['neighboursh', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursh, m1, sig1, m2, sig2)\n', (4316, 4349), False, 'from tools import gauss, gauss2\n'), ((4359, 4398), 'tools.gauss2', 'gauss2', (['neighboursv', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursv, m1, sig1, m2, sig2)\n', (4365, 4398), False, 'from tools import gauss, gauss2\n'), ((4412, 4441), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'A', 'g2'], {}), "('ij,kj->ki', A, g2)\n", (4421, 4441), True, 'import numpy as np\n'), ((4453, 4482), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'A', 'g3'], {}), "('ij,kj->ki', A, g3)\n", (4462, 4482), True, 'import numpy as np\n'), ((7788, 7827), 'tools.gauss', 'gauss', (['signal_noisy', 'm1', 'sig1', 'm2', 'sig2'], {}), '(signal_noisy, m1, sig1, m2, sig2)\n', (7793, 7827), False, 'from tools import gauss, gauss2\n'), ((7837, 7876), 'tools.gauss2', 'gauss2', (['neighboursh', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursh, m1, sig1, m2, sig2)\n', (7843, 7876), False, 'from tools import gauss, gauss2\n'), ((7886, 7925), 'tools.gauss2', 'gauss2', (['neighboursv', 'm1', 'sig1', 'm2', 'sig2'], {}), '(neighboursv, m1, sig1, m2, sig2)\n', (7892, 7925), False, 'from tools import gauss, gauss2\n'), ((7939, 7968), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'A', 'g2'], {}), "('ij,kj->ki', A, g2)\n", (7948, 7968), True, 'import numpy as np\n'), ((7980, 8009), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'A', 'g3'], {}), "('ij,kj->ki', A, g3)\n", (7989, 8009), True, 'import numpy as np\n'), ((8435, 8493), 'markov_chain.simu_mc_nonstat', 'simu_mc_nonstat', (['signal_noisy.shape[0]', 'proba_init', 'tapost'], {}), '(signal_noisy.shape[0], proba_init, tapost)\n', (8450, 8493), False, 'from markov_chain import simu_mc, simu_mc_nonstat, calc_probaprio_mc\n'), ((3508, 3538), 'numpy.argmax', 'np.argmax', (['proba_apost'], {'axis': '(1)'}), '(proba_apost, axis=1)\n', (3517, 3538), True, 'import numpy as np\n'), ((8530, 8546), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (8538, 8546), True, 'import numpy as np\n')] |
"""
'ratracer.shape' define all types of shapes for ray tracing to be used.
Contains classes:
:class:`.Identifiable` -
class defining local and brief ids for user's shapes.
:class:`.Plane` -
a plane specified by its normal and some point on its surface,
that can be treated as zero-point in 2D-coords on the plane.
"""
import numpy
from itertools import count
from ratracer.utils import normalize, TOLERANCE, vec3d
_SHADOWING_INDENT = TOLERANCE
_SHADOWING_INDENT_INV = 1 - _SHADOWING_INDENT
_NO_INTERSECTION = numpy.nan, numpy.nan
class Identifiable:
""" A class that intended to be subclassed to provide a brief id. """
_id_gen = count()
def __init__(self):
self.id = next(Identifiable._id_gen)
class Plane(Identifiable):
def __init__(self, init_point, normal):
self._point = init_point
self._normal = normalize(normal)
super().__init__()
def __str__(self):
return repr(self)
def __repr__(self):
classname = self.__class__.__name__.lower()
return f'{classname}({self._point} {self._normal})'
def _aoa_cosine(self, direction):
""" Returns a signed cosine of grazing angle (angle of arrival) for ray
hitting towards `direction`.
"""
return numpy.dot(direction, self._normal)
def aoa_cosine(self, direction):
""" Returns a cosine of grazing angle for ray hitting towards `direction`"""
return numpy.abs(self._aoa_cosine(direction))
def _distance_to(self, point):
""" A signed distance between a `point` and the plane; a positive value
means the normal directed to the same semi-plane as a point to be, negative
- an oposite semi-plane
"""
return numpy.dot(self._point - point, self._normal)
def distance_to(self, point):
""" Return the distance from the plane to a `point` """
return numpy.abs(self._distance_to(point))
def intersect(self, start, direction):
""" Return the coeffient for an intersection point computation; an infinity
value means orthogonality of `direction` and a plane normal, a negative
value mean a ray specified by `start` and `direction is directed in
opposition to the plane, thus in both cases there is no intersection
Parameters
__________
start : 3-`numpy.ndarray`
Starting point of ray to intersect
direction : 3-`numpy.ndarray`
A normalized direction of ray to intersect.
Note
____
The unit length of direction vector is crucial.
"""
aoa_cosine = self._aoa_cosine(direction)
if numpy.abs(aoa_cosine) < TOLERANCE:
return _NO_INTERSECTION
length = self._distance_to(start) / aoa_cosine
return (length, aoa_cosine) if length > 0 else _NO_INTERSECTION
def is_intersected(self, start, direction):
""" Check whether a ray specified by its :arg:`start` and :arg:`direction`
crosses the plane. """
return self.intersect(start, direction) != _NO_INTERSECTION
def is_shadowing(self, start, delta):
""" Check whether 'self' shadows the ray at a segement start - end. Note,
that :arg:delta should not be normalized. """
length, _ = self.intersect(start, delta)
return length > _SHADOWING_INDENT and length < _SHADOWING_INDENT_INV
def normal(self, point=None):
""" Returns normal to the shape at a :arg:`point` (:arg:`point` is
insufficient here). """
return self._normal
def project(self, point):
""" Project a point on the plane. """
return point + self._distance_to(point) * self._normal
def reflect(self, point):
""" Reflect a point from the plane. """
return point + 2 * self._distance_to(point) * self._normal
def build(specs):
return [Plane(vec3d(*i), vec3d(*n)) for i, n in specs.items()]
| [
"numpy.abs",
"ratracer.utils.vec3d",
"numpy.dot",
"itertools.count",
"ratracer.utils.normalize"
] | [((661, 668), 'itertools.count', 'count', ([], {}), '()\n', (666, 668), False, 'from itertools import count\n'), ((851, 868), 'ratracer.utils.normalize', 'normalize', (['normal'], {}), '(normal)\n', (860, 868), False, 'from ratracer.utils import normalize, TOLERANCE, vec3d\n'), ((1228, 1262), 'numpy.dot', 'numpy.dot', (['direction', 'self._normal'], {}), '(direction, self._normal)\n', (1237, 1262), False, 'import numpy\n'), ((1667, 1711), 'numpy.dot', 'numpy.dot', (['(self._point - point)', 'self._normal'], {}), '(self._point - point, self._normal)\n', (1676, 1711), False, 'import numpy\n'), ((2516, 2537), 'numpy.abs', 'numpy.abs', (['aoa_cosine'], {}), '(aoa_cosine)\n', (2525, 2537), False, 'import numpy\n'), ((3662, 3671), 'ratracer.utils.vec3d', 'vec3d', (['*i'], {}), '(*i)\n', (3667, 3671), False, 'from ratracer.utils import normalize, TOLERANCE, vec3d\n'), ((3673, 3682), 'ratracer.utils.vec3d', 'vec3d', (['*n'], {}), '(*n)\n', (3678, 3682), False, 'from ratracer.utils import normalize, TOLERANCE, vec3d\n')] |
import pandas as pd
import numpy as np
from .batch_generator import BatchGenerator
class TripletPKGenerator(BatchGenerator):
"""
This class implements a batch generator for generic triplet network described in this
[paper](https://arxiv.org/abs/1703.07737)
TODO: add more details
"""
def __init__(self,
data: pd.DataFrame,
triplet_label,
classes_in_batch,
samples_per_class,
x_structure,
y_structure=None,
**kwargs):
self.triplet_label = triplet_label
self.class_ref = data[self.triplet_label].value_counts()
self.classes_in_batch = classes_in_batch
self.sample_per_class = samples_per_class
super().__init__(data, x_structure, y_structure, shuffle=False, **kwargs)
def __len__(self):
"""
Helps to determine number of batches in one epoch
:return:
n - number of batches
"""
return int(np.ceil(self.data.shape[0] / (self.sample_per_class * self.classes_in_batch)))
def _select_batch(self, index):
"""
This method is the core of triplet PK batch generator
"""
classes_selected = self.class_ref.sample(self.classes_in_batch).index.values
batch = self.data.loc[self.data[self.triplet_label].isin(classes_selected), :].\
groupby(self.triplet_label).apply(self._select_samples_for_class)
return batch
def _select_samples_for_class(self, df):
if df.shape[0] <= self.sample_per_class:
return df
return df.sample(self.sample_per_class, replace=False)
| [
"numpy.ceil"
] | [((1025, 1102), 'numpy.ceil', 'np.ceil', (['(self.data.shape[0] / (self.sample_per_class * self.classes_in_batch))'], {}), '(self.data.shape[0] / (self.sample_per_class * self.classes_in_batch))\n', (1032, 1102), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.competition_data_manager
from autosklearn.pipeline.classification import SimpleClassificationPipeline
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'jannis'
output = args.output
path = os.path.join(input, dataset)
D = autosklearn.data.competition_data_manager.CompetitionDataManager(path)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Replace the following array by a new ensemble
choices = \
[(0.160000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 0.8230378717081245,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 8,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0015176084175820871,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.6233562705637476,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 3,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 16,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.100000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.669880400352936,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 7,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.007886826567333378,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.7007942047071865,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 12,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.100000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.406221097074635,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 12,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0035146367585256835,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.4922383207655763,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 3,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 11,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.100000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.0766500993287544,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 6,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0052442759651766795,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.8345876814556745,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 2,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 5,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.992423276448683,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 6,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.005060149532495579,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.9176773896808106,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 4,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 11,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.8427415302457764,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 11,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.006094139631443715,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.9054667656168744,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 5,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 9,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.060000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.4679336412447643,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 7,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.013856064466225412,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.821607178538829,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 2,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.060000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.3155071079117242,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 8,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0035361733150731144,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 1.0918950483680085,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 5,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.940497229306341,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 7,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.01824307123711416,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.491200900865445,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 6,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 8,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.000546197252823,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 18,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.004487818890097931,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 4.913797450061312,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 10,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.412130298506767,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 11,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.008149946091382478,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.821331241321694,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 8,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 2,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 2.585334606852488,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 15,
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.31154541628607907,
'preprocessor:select_rates:mode': 'fwe',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 2.660106264030751,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 19,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00023797748625219528,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.711779030141609,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 5,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.311155499004128,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 6,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0045832519735911365,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.4313384521470844,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 8,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.1283591217664084,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 4,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.00016005125675672377,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.089911925114402,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 4,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 5,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.0,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 6,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.009337300926998802,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.851719023875125,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 3,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 7,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.940497229306341,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 7,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.05798212052141874,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 2.866480230834629,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 4,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 4,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.377815501014255,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 19,
'classifier:random_forest:min_samples_split': 3,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00935903671597016,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.926283104591668,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 1,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 7,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 2.8412937716906717,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 20,
'classifier:random_forest:min_samples_split': 20,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.004994082832266316,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 4.139497534039955,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 2,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 15,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
]
targets = []
predictions = []
predictions_valid = []
predictions_test = []
# Make predictions and weight them
iteration = 0
for weight, classifier in choices:
iteration += 1
print(dataset, "Iteration %d/%d" % (iteration, len(choices)))
try:
classifier.fit(X.copy(), y.copy())
predictions_valid.append(
classifier.predict_proba(X_valid.copy()) * weight)
predictions_test.append(
classifier.predict_proba(X_test.copy()) * weight)
except Exception as e:
print(e)
print(classifier.configuration)
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0).astype(np.float32)
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ', fmt='%.4e')
| [
"autosklearn.pipeline.classification.SimpleClassificationPipeline",
"argparse.ArgumentParser",
"os.path.join",
"numpy.array",
"numpy.sum",
"autosklearn.data.competition_data_manager.CompetitionDataManager",
"numpy.savetxt"
] | [((226, 251), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (249, 251), False, 'import argparse\n'), ((406, 434), 'os.path.join', 'os.path.join', (['input', 'dataset'], {}), '(input, dataset)\n', (418, 434), False, 'import os\n'), ((439, 509), 'autosklearn.data.competition_data_manager.CompetitionDataManager', 'autosklearn.data.competition_data_manager.CompetitionDataManager', (['path'], {}), '(path)\n', (503, 509), False, 'import autosklearn\n'), ((32595, 32616), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (32603, 32616), True, 'import numpy as np\n'), ((32698, 32757), 'os.path.join', 'os.path.join', (['output', "('%s_%s_000.predict' % (dataset, name))"], {}), "(output, '%s_%s_000.predict' % (dataset, name))\n", (32710, 32757), False, 'import os\n'), ((32762, 32822), 'numpy.savetxt', 'np.savetxt', (['filepath', 'predictions'], {'delimiter': '""" """', 'fmt': '"""%.4e"""'}), "(filepath, predictions, delimiter=' ', fmt='%.4e')\n", (32772, 32822), True, 'import numpy as np\n'), ((685, 2237), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 0.8230378717081245,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 8,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0015176084175820871,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.6233562705637476,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 16,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 0.8230378717081245,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 8,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0015176084175820871,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.6233562705637476,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 16,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (713, 2237), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((2339, 3889), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.669880400352936,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.007886826567333378,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.7007942047071865,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 12,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.669880400352936,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.007886826567333378,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.7007942047071865,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 12,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (2367, 3889), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((4015, 5572), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.406221097074635,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 12,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0035146367585256835,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.4922383207655763,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 11,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.406221097074635,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 12,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0035146367585256835,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.4922383207655763,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 11,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (4043, 5572), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((5694, 7246), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.0766500993287544,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0052442759651766795,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.8345876814556745,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.0766500993287544,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0052442759651766795,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.8345876814556745,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (5722, 7246), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((7368, 8911), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.992423276448683,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.005060149532495579,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.9176773896808106,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 11,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.992423276448683,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.005060149532495579,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.9176773896808106,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 11,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (7396, 8911), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((9041, 10597), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.8427415302457764,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 11,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.006094139631443715,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.9054667656168744,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 9,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.8427415302457764,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 11,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.006094139631443715,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.9054667656168744,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 9,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (9069, 10597), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((10719, 12272), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.4679336412447643,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:minimum_fraction': \n 0.013856064466225412, 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.821607178538829,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.4679336412447643,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:minimum_fraction': \n 0.013856064466225412, 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.821607178538829,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (10747, 12272), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((12397, 13946), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.3155071079117242,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 8,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0035361733150731144,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 1.0918950483680085,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.3155071079117242,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 8,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0035361733150731144,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 1.0918950483680085,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (12425, 13946), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((14072, 15608), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.940497229306341,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.01824307123711416,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.491200900865445,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 6,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.940497229306341,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.01824307123711416,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.491200900865445,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 6,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (14100, 15608), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((15738, 17296), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.000546197252823,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 18,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:minimum_fraction': \n 0.004487818890097931, 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 4.913797450061312,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 10,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'none'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.000546197252823,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 18,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:minimum_fraction': \n 0.004487818890097931, 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 4.913797450061312,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 10,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'none'})\n", (15766, 17296), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((17417, 18966), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.412130298506767,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 11,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.008149946091382478,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.821331241321694,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.412130298506767,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 11,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.008149946091382478,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.821331241321694,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (17445, 18966), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((19092, 20047), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.585334606852488,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 15,\n 'classifier:random_forest:min_samples_split': 2,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:use_minimum_fraction': 'False',\n 'preprocessor:__choice__': 'select_rates',\n 'preprocessor:select_rates:alpha': 0.31154541628607907,\n 'preprocessor:select_rates:mode': 'fwe',\n 'preprocessor:select_rates:score_func': 'f_classif',\n 'rescaling:__choice__': 'none'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.585334606852488,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 15,\n 'classifier:random_forest:min_samples_split': 2,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'most_frequent', 'one_hot_encoding:use_minimum_fraction': 'False',\n 'preprocessor:__choice__': 'select_rates',\n 'preprocessor:select_rates:alpha': 0.31154541628607907,\n 'preprocessor:select_rates:mode': 'fwe',\n 'preprocessor:select_rates:score_func': 'f_classif',\n 'rescaling:__choice__': 'none'})\n", (19120, 20047), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((20165, 21722), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.660106264030751,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 19,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.00023797748625219528,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.711779030141609,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.660106264030751,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 19,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.00023797748625219528,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.711779030141609,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (20193, 21722), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((21844, 23394), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.311155499004128,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0045832519735911365,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.4313384521470844,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.311155499004128,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.0045832519735911365,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.4313384521470844,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 8,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (21872, 23394), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((23520, 25070), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.1283591217664084,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 4,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.00016005125675672377,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.089911925114402,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.1283591217664084,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 4,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.00016005125675672377,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.089911925114402,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 5,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (23548, 25070), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((25196, 26730), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.0,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.009337300926998802,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.851719023875125,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 7,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 1.0,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 6,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.009337300926998802,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.851719023875125,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 3,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 7,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (25224, 26730), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((26856, 28397), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.940497229306341,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.05798212052141874,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.866480230834629,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 4.940497229306341,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 7,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.05798212052141874,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion': 'gini',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 2.866480230834629,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 4,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (26884, 28397), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((28523, 30073), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.377815501014255,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 3,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.00935903671597016,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.926283104591668,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 7,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'gini',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 3.377815501014255,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 19,\n 'classifier:random_forest:min_samples_split': 3,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'median', 'one_hot_encoding:minimum_fraction': 0.00935903671597016,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap':\n 'False',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 3.926283104591668,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 1,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 7,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'min/max'})\n", (28551, 30073), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((30195, 31749), 'autosklearn.pipeline.classification.SimpleClassificationPipeline', 'SimpleClassificationPipeline', ([], {'configuration': "{'balancing:strategy': 'weighting', 'classifier:__choice__':\n 'random_forest', 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.8412937716906717,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 20,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.004994082832266316,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 4.139497534039955,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 15,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'}"}), "(configuration={'balancing:strategy':\n 'weighting', 'classifier:__choice__': 'random_forest',\n 'classifier:random_forest:bootstrap': 'False',\n 'classifier:random_forest:criterion': 'entropy',\n 'classifier:random_forest:max_depth': 'None',\n 'classifier:random_forest:max_features': 2.8412937716906717,\n 'classifier:random_forest:max_leaf_nodes': 'None',\n 'classifier:random_forest:min_samples_leaf': 20,\n 'classifier:random_forest:min_samples_split': 20,\n 'classifier:random_forest:min_weight_fraction_leaf': 0.0,\n 'classifier:random_forest:n_estimators': 100, 'imputation:strategy':\n 'mean', 'one_hot_encoding:minimum_fraction': 0.004994082832266316,\n 'one_hot_encoding:use_minimum_fraction': 'True',\n 'preprocessor:__choice__': 'extra_trees_preproc_for_classification',\n 'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'True',\n 'preprocessor:extra_trees_preproc_for_classification:criterion':\n 'entropy',\n 'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',\n 'preprocessor:extra_trees_preproc_for_classification:max_features': \n 4.139497534039955,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf':\n 2,\n 'preprocessor:extra_trees_preproc_for_classification:min_samples_split':\n 15,\n 'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf'\n : 0.0,\n 'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,\n 'rescaling:__choice__': 'standardize'})\n", (30223, 31749), False, 'from autosklearn.pipeline.classification import SimpleClassificationPipeline\n'), ((32635, 32662), 'numpy.sum', 'np.sum', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (32641, 32662), True, 'import numpy as np\n')] |
import os
import glob
from pathlib import Path
from .textgrid_utils import build_hashtable_textgrid, get_textgrid_sa
import soundfile as sf
import numpy as np
def hash_librispeech(librispeech_traintest):
hashtab = {}
utterances = glob.glob(os.path.join(librispeech_traintest, "**/*.wav"), recursive=True)
for utt in utterances:
id = Path(utt).parent.parent
hashtab[id] = utt
return hashtab
def ema_energy(x, alpha=0.99):
out = np.sum(x[0]**2)
for i in range(1, len(x)):
out = (1-alpha)*np.sum(x[i]**2) + alpha*out
return out
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def estimate_snr(speech, file):
def get_energy(signal): # probably windowed estimation is better but we take min afterwards and
return np.sum(signal**2)
audio, fs = sf.read(file)
audio = audio - np.mean(audio)
s, e = [int(x*fs) for x in speech[0]]
speech_lvl = [get_energy(audio[s:e])]
noise_lvl = [get_energy(audio[0: s])]
for i in range(1, len(speech)):
speech_lvl.append(get_energy(audio[int(speech[i][0]*fs):int(speech[i][-1]*fs)]))
noise_lvl.append(get_energy(audio[int(speech[i-1][-1]*fs): int(speech[i][0]*fs)]))
noise_lvl.append(get_energy(audio[int(speech[-1][-1]*fs):]))
noise_lvl = min(noise_lvl) # we take min to avoid breathing
speech_lvl = min(speech_lvl)
return speech_lvl / (noise_lvl + 1e-8)
def build_utterance_list(librispeech_dir, textgrid_dir, merge_shorter=0.15, fs=16000):
hashgrid = build_hashtable_textgrid(textgrid_dir)
audiofiles = glob.glob(os.path.join(librispeech_dir, "**/*.flac"), recursive=True)
utterances = {}
tot_missing = 0
snrs = []
for f in audiofiles:
filename = Path(f).stem
if filename not in hashgrid.keys():
print("Missing Alignment file for : {}".format(f))
tot_missing += 1
continue
speech, words = get_textgrid_sa(hashgrid[filename], merge_shorter)
spk_id = Path(f).parent.parent.stem
sub_utterances = []
# get all segments for this speaker
if not speech:
raise EnvironmentError("something is wrong with alignments or parsing, all librispeech files have speech")
snr = estimate_snr(speech, f)
snrs.append([f, snr])
for i in range(len(speech)):
start, stop = speech[i]
#start = #int(start*fs)
#stop = int(stop*fs)
tmp = {"textgrid": hashgrid[filename], "file": f, "start": start, "stop": stop, "words": words[i],
"spk_id": spk_id, "chapter_id": Path(f).parent.stem, "utt_id": Path(f).stem}
sub_utterances.append(tmp)
if spk_id not in utterances.keys():
utterances[spk_id] = [sub_utterances]
else:
utterances[spk_id].append(sub_utterances)
# here we filter utterances based on SNR we sort the snrs list and if a chapter has more than 10 entries with snr lower than
# 10 we remove that chapter
snrs = sorted(snrs, key = lambda x : x[-1])
chapters = {}
for x in snrs:
file, snr = x
chapter = Path(file).parent.stem
if chapter not in chapters.keys():
chapters[chapter] = [0, 0]
chapters[chapter][0] += 1
if snr < 25: # tuned manually
chapters[chapter][-1] += 1
# normalize
for k in chapters.keys():
chapters[k] = chapters[k][-1] / chapters[k][0]
prev_tot_utterances = sum([len(utterances[k]) for k in utterances.keys()])
new = {}
for spk in utterances.keys():
new[spk] = []
for utt in utterances[spk]:
if chapters[utt[0]["chapter_id"]] >= 0.1:
continue
else:
new[spk].append(utt)
if len(new[spk]) == 0:
continue
utterances = new
print("Discarded {} over {} files because of low SNR".format(prev_tot_utterances - \
sum([len(utterances[k]) for k in utterances.keys()]), prev_tot_utterances))
return utterances
if __name__ == "__main__":
build_utterance_list("/media/sam/Data/LibriSpeech/test-clean/", "/home/sam/Downloads/librispeech_alignments/test-clean/")
| [
"numpy.mean",
"pathlib.Path",
"os.path.join",
"numpy.lib.stride_tricks.as_strided",
"numpy.sum",
"soundfile.read"
] | [((468, 485), 'numpy.sum', 'np.sum', (['(x[0] ** 2)'], {}), '(x[0] ** 2)\n', (474, 485), True, 'import numpy as np\n'), ((730, 794), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (761, 794), True, 'import numpy as np\n'), ((980, 993), 'soundfile.read', 'sf.read', (['file'], {}), '(file)\n', (987, 993), True, 'import soundfile as sf\n'), ((251, 298), 'os.path.join', 'os.path.join', (['librispeech_traintest', '"""**/*.wav"""'], {}), "(librispeech_traintest, '**/*.wav')\n", (263, 298), False, 'import os\n'), ((945, 964), 'numpy.sum', 'np.sum', (['(signal ** 2)'], {}), '(signal ** 2)\n', (951, 964), True, 'import numpy as np\n'), ((1014, 1028), 'numpy.mean', 'np.mean', (['audio'], {}), '(audio)\n', (1021, 1028), True, 'import numpy as np\n'), ((1751, 1793), 'os.path.join', 'os.path.join', (['librispeech_dir', '"""**/*.flac"""'], {}), "(librispeech_dir, '**/*.flac')\n", (1763, 1793), False, 'import os\n'), ((1913, 1920), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1917, 1920), False, 'from pathlib import Path\n'), ((356, 365), 'pathlib.Path', 'Path', (['utt'], {}), '(utt)\n', (360, 365), False, 'from pathlib import Path\n'), ((539, 556), 'numpy.sum', 'np.sum', (['(x[i] ** 2)'], {}), '(x[i] ** 2)\n', (545, 556), True, 'import numpy as np\n'), ((3327, 3337), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (3331, 3337), False, 'from pathlib import Path\n'), ((2176, 2183), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (2180, 2183), False, 'from pathlib import Path\n'), ((2823, 2830), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (2827, 2830), False, 'from pathlib import Path\n'), ((2792, 2799), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (2796, 2799), False, 'from pathlib import Path\n')] |
# import modin.pandas as pd
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.graph_objs import Layout
from tqdm import tqdm
from ..utils import STATUS_ARROW
_LAYOUT = Layout(
paper_bgcolor='rgb(255,255,255)',
plot_bgcolor='rgb(255,255,255)',
yaxis={'gridcolor': 'black'},
xaxis={'gridcolor': 'black'},
)
class Features(object):
def __init__(self, features: dict, df: 'pd.DataFrame',
concatenated: bool = True,
output_folder: str = "analysis",
region: str = 'all',
group_by: str = 'd'):
self._df: 'pd.DataFrame' = df
self._concatenated: bool = concatenated
self._region: str = region
self._group_by: str = group_by
self._filter_data(concatenated)
self._add_group_column()
self._output_folder = Path(output_folder)
self._output_folder.mkdir(parents=True, exist_ok=True)
self._features = []
self._features_data = {}
for key, value in features.items():
if value.get('type', False) == "float" and value.get('buckets', False):
cur_values = []
cur_values.extend(value.get('keys'))
if value.get('bucket_open_right', False):
cur_values.extend([cur_values[-1]*2])
self._features.append(key)
setattr(self, key, cur_values)
def _add_group_column(self):
if self._concatenated:
self._df['datetime'] = pd.to_datetime(self._df.reqDay, unit='s')
if self._group_by == 'd':
self._df['day'] = self._df.datetime.dt.day
elif self._group_by == 'w':
self._df['week'] = self._df.datetime.dt.week
elif self._group_by == 'm':
self._df['month'] = self._df.datetime.dt.month
else:
for cur_df in self._df:
cur_df['datetime'] = pd.to_datetime(cur_df.reqDay, unit='s')
if self._group_by == 'd':
cur_df['day'] = cur_df.datetime.dt.day
elif self._group_by == 'w':
cur_df['week'] = cur_df.datetime.dt.week
elif self._group_by == 'm':
cur_df['month'] = cur_df.datetime.dt.month
def _filter_data(self, concatenated: bool = True):
print(f"{STATUS_ARROW}Filter DataType data and mc")
if concatenated:
if self._df.DataType.dtype == np.int64:
if self._region == 'it':
self._df = self._df[
(self._df.DataType == 0) | (self._df.DataType == 1)
]
elif self._region == 'us':
self._df = self._df[
(self._df.DataType == 0) | (self._df.DataType == 3)
]
else:
self._df = self._df[
(self._df.DataType == "data") | (self._df.DataType == "mc")
]
else:
for idx in tqdm(range(len(self._df))):
cur_df = self._df[idx]
if cur_df.DataType.dtype == np.int64:
if self._region == 'it':
self._df[idx] = cur_df[
(cur_df.DataType == 0) | (cur_df.DataType == 1)
]
elif self._region == 'us':
self._df[idx] = cur_df[
(cur_df.DataType == 0) | (cur_df.DataType == 3)
]
else:
self._df[idx] = cur_df[
(cur_df.DataType == "data") | (cur_df.DataType == "mc")
]
print(f"{STATUS_ARROW}Filter success jobs")
if concatenated:
self._df = self._df[self._df.JobSuccess.astype(bool)]
else:
for idx in tqdm(range(len(self._df))):
cur_df = self._df[idx]
self._df[idx] = cur_df[cur_df.JobSuccess.astype(bool)]
def check_all_features(self, features: List[str] = []):
cur_features = []
if features:
cur_features.extend(features)
else:
cur_features.extend(self._features)
for feature in tqdm(cur_features,
desc=f"{STATUS_ARROW}Check features",
ascii=True):
np_hist = self.check_bins_of(feature)
self.plot_bins_of(feature, np_hist)
self.plot_violin_of(feature, np_hist)
def __get_groups(self):
groups = None
if self._concatenated:
if self._group_by == 'd':
groups = self._df.groupby('reqDay')
elif self._group_by == 'w':
groups = self._df.groupby('week')
elif self._group_by == 'm':
groups = self._df.groupby('month')
else:
if self._group_by == 'd':
groups = [
(idx, cur_df)
for idx, cur_df in enumerate(self._df)
]
else:
if self._group_by == 'w':
group_by = 'week'
elif self._group_by == 'm':
group_by = 'month'
groups = {}
for cur_df in self._df:
for week, cur_week in cur_df.groupby(group_by):
if week not in groups:
groups[week] = cur_week
else:
groups[week] = pd.concat([
groups[week],
cur_week,
], ignore_index=True)
groups = [
(group_key, groups[group_key])
for group_key in sorted(groups)
]
return groups
def check_bins_of(self, feature: str, n_bins: int = 6):
all_data = None
if feature == 'size':
if self._concatenated:
sizes = (self._df['Size'] / 1024**2).astype(int).to_numpy()
else:
sizes = np.array([])
for cur_df in tqdm(
self._df,
desc=f"{STATUS_ARROW}Calculate sizes x day",
ascii=True):
sizes = np.concatenate([
sizes, (cur_df['Size'] / 1024 **
2).astype(int).to_numpy()
])
self._features_data[feature] = sizes
all_data = sizes
elif feature == 'numReq':
numReqXGroup = np.array([])
for _, group in tqdm(self.__get_groups(),
desc=f"{STATUS_ARROW}Calculate frequencies x day",
ascii=True):
numReqXGroup = np.concatenate([
numReqXGroup, group.Filename.value_counts().to_numpy()
])
self._features_data[feature] = numReqXGroup
all_data = numReqXGroup
elif feature == 'deltaLastRequest':
delta_files = []
for _, group in tqdm(self.__get_groups(),
desc=f"{STATUS_ARROW}Calculate delta times",
ascii=True):
files = {}
for _t_, row in enumerate(group.itertuples()):
filename = row.Filename
if filename not in files:
files[filename] = _t_
else:
cur_delta = _t_ - files[filename]
files[filename] = _t_
delta_files.append(cur_delta)
delta_files = np.array(delta_files)
self._features_data[feature] = delta_files
all_data = delta_files
else:
raise Exception(
f"ERROR: feature {feature} can not be checked...")
if feature in self._features:
cur_bins = np.array(getattr(self, feature))
else:
# _, cur_bins = np.histogram(
# all_data,
# bins=6,
# density=False
# )
cur_bins = []
step = int(100. / n_bins)
for qx in range(step, 100, step):
cur_bins.append(np.quantile(all_data, qx/100.))
cur_bins = np.array(cur_bins + [cur_bins[-1]*2])
if feature == "numReq":
cur_bins = np.unique(cur_bins.astype(int))
prev_bin = 0.
counts = []
for bin_idx, cur_bin in enumerate(cur_bins):
if bin_idx != cur_bins.shape[0] - 1:
cur_count = all_data[
(all_data > prev_bin) &
(all_data <= cur_bin)
].shape[0]
else:
cur_count = all_data[
(all_data > prev_bin)
].shape[0]
counts.append(cur_count)
prev_bin = cur_bin
counts = np.array(counts)
return counts, cur_bins
def plot_bins_of(self, feature: str, np_hist: tuple):
counts, bins = np_hist
# print(counts, bins)
percentages = (counts / counts.sum()) * 100.
percentages[np.isnan(percentages)] = 0.
fig = px.bar(
x=[str(cur_bin) for cur_bin in bins[:-1]] + ['max'],
y=percentages,
title=f"Feature {feature}",
)
# fig.update_xaxes(type="log")
# fig.update_yaxes(type="log")
fig.update_layout(_LAYOUT)
fig.update_layout(
xaxis_title="bin",
yaxis_title="%",
xaxis={
'type': "category",
}
)
# fig.update_yaxes(type="linear")
# fig.show()
# print(f"{STATUS_ARROW}Save bin plot of {feature} as png")
# fig.write_image(
# self._output_folder.joinpath(
# f"feature_{feature}_bins.png"
# ).as_posix()
# )
print(f"{STATUS_ARROW}Save bin plot of {feature} as html")
fig.write_html(
self._output_folder.joinpath(
f"feature_{feature}_bins.html"
).as_posix()
)
def plot_violin_of(self, feature: str, np_hist: tuple):
_, bins = np_hist
cur_feature_data = self._features_data[feature]
fig = go.Figure()
fig.add_trace(
go.Violin(
y=cur_feature_data,
x0=0,
name="global",
box_visible=True,
meanline_visible=True,
)
)
prev_bin = 0.
for bin_idx, cur_bin in enumerate(bins, 1):
if bin_idx != bins.shape[0]:
cur_data = cur_feature_data[
(cur_feature_data > prev_bin) &
(cur_feature_data <= cur_bin)
]
else:
cur_data = cur_feature_data[
(cur_feature_data > prev_bin)
]
fig.add_trace(
go.Violin(
y=cur_data,
x0=bin_idx,
name=str(cur_bin) if bin_idx != bins.shape[0] else 'max',
box_visible=True,
meanline_visible=True,
# points="all",
)
)
prev_bin = cur_bin
fig.update_layout(_LAYOUT)
fig.update_layout({
'title': f"Feature {feature}",
'xaxis': {
'tickmode': 'array',
'tickvals': list(range(len(bins)+1)),
'ticktext': ['global'] + [str(cur_bin) for cur_bin in bins]
}
})
# fig.show()
# print(f"{STATUS_ARROW}Save violin plot of {feature} as pnh")
# fig.write_image(
# self._output_folder.joinpath(
# f"feature_{feature}_violin.png"
# ).as_posix()
# )
print(f"{STATUS_ARROW}Save violin plot of {feature} as html")
fig.write_html(
self._output_folder.joinpath(
f"feature_{feature}_violin.html"
).as_posix()
)
| [
"pathlib.Path",
"tqdm.tqdm",
"plotly.graph_objects.Violin",
"numpy.array",
"plotly.graph_objects.Figure",
"numpy.quantile",
"numpy.isnan",
"plotly.graph_objs.Layout",
"pandas.concat",
"pandas.to_datetime"
] | [((283, 420), 'plotly.graph_objs.Layout', 'Layout', ([], {'paper_bgcolor': '"""rgb(255,255,255)"""', 'plot_bgcolor': '"""rgb(255,255,255)"""', 'yaxis': "{'gridcolor': 'black'}", 'xaxis': "{'gridcolor': 'black'}"}), "(paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(255,255,255)',\n yaxis={'gridcolor': 'black'}, xaxis={'gridcolor': 'black'})\n", (289, 420), False, 'from plotly.graph_objs import Layout\n'), ((957, 976), 'pathlib.Path', 'Path', (['output_folder'], {}), '(output_folder)\n', (961, 976), False, 'from pathlib import Path\n'), ((4384, 4452), 'tqdm.tqdm', 'tqdm', (['cur_features'], {'desc': 'f"""{STATUS_ARROW}Check features"""', 'ascii': '(True)'}), "(cur_features, desc=f'{STATUS_ARROW}Check features', ascii=True)\n", (4388, 4452), False, 'from tqdm import tqdm\n'), ((9226, 9242), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (9234, 9242), True, 'import numpy as np\n'), ((10601, 10612), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (10610, 10612), True, 'import plotly.graph_objects as go\n'), ((1621, 1662), 'pandas.to_datetime', 'pd.to_datetime', (['self._df.reqDay'], {'unit': '"""s"""'}), "(self._df.reqDay, unit='s')\n", (1635, 1662), True, 'import pandas as pd\n'), ((8586, 8625), 'numpy.array', 'np.array', (['(cur_bins + [cur_bins[-1] * 2])'], {}), '(cur_bins + [cur_bins[-1] * 2])\n', (8594, 8625), True, 'import numpy as np\n'), ((9468, 9489), 'numpy.isnan', 'np.isnan', (['percentages'], {}), '(percentages)\n', (9476, 9489), True, 'import numpy as np\n'), ((10649, 10744), 'plotly.graph_objects.Violin', 'go.Violin', ([], {'y': 'cur_feature_data', 'x0': '(0)', 'name': '"""global"""', 'box_visible': '(True)', 'meanline_visible': '(True)'}), "(y=cur_feature_data, x0=0, name='global', box_visible=True,\n meanline_visible=True)\n", (10658, 10744), True, 'import plotly.graph_objects as go\n'), ((2051, 2090), 'pandas.to_datetime', 'pd.to_datetime', (['cur_df.reqDay'], {'unit': '"""s"""'}), "(cur_df.reqDay, unit='s')\n", (2065, 2090), True, 'import pandas as pd\n'), ((6278, 6290), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6286, 6290), True, 'import numpy as np\n'), ((6321, 6392), 'tqdm.tqdm', 'tqdm', (['self._df'], {'desc': 'f"""{STATUS_ARROW}Calculate sizes x day"""', 'ascii': '(True)'}), "(self._df, desc=f'{STATUS_ARROW}Calculate sizes x day', ascii=True)\n", (6325, 6392), False, 'from tqdm import tqdm\n'), ((6789, 6801), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6797, 6801), True, 'import numpy as np\n'), ((7913, 7934), 'numpy.array', 'np.array', (['delta_files'], {}), '(delta_files)\n', (7921, 7934), True, 'import numpy as np\n'), ((8530, 8563), 'numpy.quantile', 'np.quantile', (['all_data', '(qx / 100.0)'], {}), '(all_data, qx / 100.0)\n', (8541, 8563), True, 'import numpy as np\n'), ((5690, 5744), 'pandas.concat', 'pd.concat', (['[groups[week], cur_week]'], {'ignore_index': '(True)'}), '([groups[week], cur_week], ignore_index=True)\n', (5699, 5744), True, 'import pandas as pd\n')] |
import torch
import torch.nn as nn
import numpy as np
from torch import optim
import math
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
gamma = 0.99
class Environment():
def __init__(self):
self.a = 1.232
self.b = 1.545
self.prev = np.abs(self.a - self.b)
def reset(self):
self.a = 1.232
self.b = 1.545
self.prev = np.abs(self.a - self.b)
return np.array([self.a, self.b, self.prev])
def step(self, action):
done = False
if action == 0:
self.a += 0.01
self.b += 0.01
if action == 1:
self.a -= 0.01
self.b -= 0.01
if action == 2:
self.a += 0.01
self.b -= 0.01
if action == 3:
self.a -= 0.01
self.b += 0.01
curr = np.abs(self.a - self.b)
if self.prev > curr:
done = True
reward = -1
elif self.prev == curr:
reward = 0
else:
reward = 1
self.prev = curr
return np.array([self.a, self.b, self.prev]), reward, done
def modified_step(self, action):
done = False
if action[0] == 0:
self.a += 0.01
else:
self.a -= 0.01
if action[1] == 0:
self.b += 0.01
else:
self.b -= 0.01
curr = np.abs(self.a - self.b)
if math.isclose(self.prev, curr, rel_tol=1e-5):
reward = 0
elif self.prev > curr:
done = True
# print(self.a, self.b, self.prev, curr)
reward = -1
else:
reward = 1
self.prev = curr
return np.array([self.a, self.b, self.prev]), reward, done
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
# Normalize reward to avoid a big variability in rewards
mean = np.mean(discounted_r)
std = np.std(discounted_r)
if std == 0:
std = 1
normalized_discounted_r = (discounted_r - mean) / std
return normalized_discounted_r
class Agent(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Agent, self).__init__()
self.a = nn.Linear(input_size, hidden_size, bias=False)
self.b = nn.Linear(hidden_size, output_size, bias=False)
def forward(self, input):
output = torch.relu(self.a(input))
output = torch.softmax(self.b(output), dim=-1)
return output
class Modified_Agent(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Modified_Agent, self).__init__()
self.a = nn.Linear(input_size, hidden_size)
self.a_1 = nn.Linear(hidden_size, hidden_size)
self.a_2 = nn.Linear(hidden_size, output_size)
self.b = nn.Linear(hidden_size, hidden_size)
self.b_1 = nn.Linear(hidden_size, hidden_size)
self.b_2 = nn.Linear(hidden_size, output_size)
def forward(self, input):
output_a = torch.relu(self.a(input))
output = self.a_1(output_a)
output = self.a_2(output)
first = torch.softmax(output, dim=-1)
output = torch.relu(self.b(torch.relu(output_a)))
output = self.b_1(output)
second = torch.softmax(self.b_2(output), dim=-1)
return first, second
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
# Set total number of episodes to train agent on.
total_episodes = 1000
max_ep = 999
update_frequency = 5
i = 0
total_reward = []
total_length = []
rlist = []
lr = 1e-3
s_size = 3 # state
a_size = 2 # action
h_size = 16 # hidden
model = Modified_Agent(s_size, h_size, a_size)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters(), lr=lr)
env = Environment()
while i < total_episodes:
if i % 100 == 0:
print(f'Start {i}th Episode ... ')
s = env.reset()
running_reward = 0
ep_history = []
for j in range(max_ep):
# 네트워크 출력에서 확률적으로 액션을 선택
with torch.no_grad():
s = torch.from_numpy(s).type(torch.FloatTensor)
chosen_action_1, chosen_action_2 = model(s)
a_dist = chosen_action_1.detach().numpy()
a = np.random.choice(a_dist, p=a_dist)
a = np.argmax(a_dist == a)
b_dist = chosen_action_2.detach().numpy()
b = np.random.choice(b_dist, p=b_dist)
b = np.argmax(b_dist == b)
s1, r, d = env.modified_step([a, b]) # Get our reward for taking an action
ep_history.append([s.numpy(), np.array([a, b]), r, s1])
s = s1 # Next state
running_reward += r
rlist.append(running_reward)
if d == True:
# Update Network
running_reward = 0
ep_history = np.array(ep_history)
# 보상을 증식시켜 최근에 얻은 보상이 더 커지게 설정함
ep_history[:, 2] = discount_rewards(ep_history[:, 2])
# feed_dict={myAgent.reward_holder:ep_history[:,2],
# myAgent.action_holder:ep_history[:,1], myAgent.state_in:np.vstack(ep_history[:,0])}
state_in = np.vstack(ep_history[:, 0])
state_in = torch.from_numpy(state_in).type(torch.FloatTensor)
model.eval()
output_1, output_2 = model(state_in)
indexes = np.vstack(ep_history[:, 1])
indexes_1 = torch.from_numpy(indexes[:, 0].astype('int32')).type(torch.LongTensor)
indexes_2 = torch.from_numpy(indexes[:, 1].astype('int32')).type(torch.LongTensor)
reward = torch.from_numpy(ep_history[:, 2].astype('float32')).type(torch.FloatTensor)
responsible_outputs_1 = output_1.gather(1, indexes_1.view(-1, 1))
responsible_outputs_2 = output_2.gather(1, indexes_2.view(-1, 1))
# print(loss)
model.train()
loss_1 = -torch.sum(torch.log(responsible_outputs_1.view(-1)) * reward)
loss_2 = -torch.sum(torch.log(responsible_outputs_2.view(-1)) * reward)
loss = loss_1 + loss_2
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_reward.append(running_reward)
total_length.append(j)
break
i += 1
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot(rlist, 'b')
plt.show()
| [
"numpy.mean",
"numpy.abs",
"math.isclose",
"torch.nn.init.xavier_uniform_",
"numpy.random.choice",
"torch.relu",
"numpy.argmax",
"torch.from_numpy",
"torch.softmax",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.vstack",
"torch.nn.Linear",
"numpy.std",
"torch.no_grad",
"numpy.zeros... | [((6550, 6562), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6560, 6562), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6623, 6625), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1937), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (1934, 1937), True, 'import numpy as np\n'), ((2159, 2180), 'numpy.mean', 'np.mean', (['discounted_r'], {}), '(discounted_r)\n', (2166, 2180), True, 'import numpy as np\n'), ((2191, 2211), 'numpy.std', 'np.std', (['discounted_r'], {}), '(discounted_r)\n', (2197, 2211), True, 'import numpy as np\n'), ((317, 340), 'numpy.abs', 'np.abs', (['(self.a - self.b)'], {}), '(self.a - self.b)\n', (323, 340), True, 'import numpy as np\n'), ((429, 452), 'numpy.abs', 'np.abs', (['(self.a - self.b)'], {}), '(self.a - self.b)\n', (435, 452), True, 'import numpy as np\n'), ((468, 505), 'numpy.array', 'np.array', (['[self.a, self.b, self.prev]'], {}), '([self.a, self.b, self.prev])\n', (476, 505), True, 'import numpy as np\n'), ((883, 906), 'numpy.abs', 'np.abs', (['(self.a - self.b)'], {}), '(self.a - self.b)\n', (889, 906), True, 'import numpy as np\n'), ((1437, 1460), 'numpy.abs', 'np.abs', (['(self.a - self.b)'], {}), '(self.a - self.b)\n', (1443, 1460), True, 'import numpy as np\n'), ((1472, 1516), 'math.isclose', 'math.isclose', (['self.prev', 'curr'], {'rel_tol': '(1e-05)'}), '(self.prev, curr, rel_tol=1e-05)\n', (1484, 1516), False, 'import math\n'), ((2481, 2527), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {'bias': '(False)'}), '(input_size, hidden_size, bias=False)\n', (2490, 2527), True, 'import torch.nn as nn\n'), ((2545, 2592), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {'bias': '(False)'}), '(hidden_size, output_size, bias=False)\n', (2554, 2592), True, 'import torch.nn as nn\n'), ((2905, 2939), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (2914, 2939), True, 'import torch.nn as nn\n'), ((2959, 2994), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2968, 2994), True, 'import torch.nn as nn\n'), ((3014, 3049), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (3023, 3049), True, 'import torch.nn as nn\n'), ((3067, 3102), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (3076, 3102), True, 'import torch.nn as nn\n'), ((3122, 3157), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (3131, 3157), True, 'import torch.nn as nn\n'), ((3177, 3212), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (3186, 3212), True, 'import torch.nn as nn\n'), ((3375, 3404), 'torch.softmax', 'torch.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (3388, 3404), False, 'import torch\n'), ((3643, 3682), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (3672, 3682), False, 'import torch\n'), ((1118, 1155), 'numpy.array', 'np.array', (['[self.a, self.b, self.prev]'], {}), '([self.a, self.b, self.prev])\n', (1126, 1155), True, 'import numpy as np\n'), ((1750, 1787), 'numpy.array', 'np.array', (['[self.a, self.b, self.prev]'], {}), '([self.a, self.b, self.prev])\n', (1758, 1787), True, 'import numpy as np\n'), ((4323, 4338), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4336, 4338), False, 'import torch\n'), ((4526, 4560), 'numpy.random.choice', 'np.random.choice', (['a_dist'], {'p': 'a_dist'}), '(a_dist, p=a_dist)\n', (4542, 4560), True, 'import numpy as np\n'), ((4577, 4599), 'numpy.argmax', 'np.argmax', (['(a_dist == a)'], {}), '(a_dist == a)\n', (4586, 4599), True, 'import numpy as np\n'), ((4670, 4704), 'numpy.random.choice', 'np.random.choice', (['b_dist'], {'p': 'b_dist'}), '(b_dist, p=b_dist)\n', (4686, 4704), True, 'import numpy as np\n'), ((4721, 4743), 'numpy.argmax', 'np.argmax', (['(b_dist == b)'], {}), '(b_dist == b)\n', (4730, 4743), True, 'import numpy as np\n'), ((5095, 5115), 'numpy.array', 'np.array', (['ep_history'], {}), '(ep_history)\n', (5103, 5115), True, 'import numpy as np\n'), ((5412, 5439), 'numpy.vstack', 'np.vstack', (['ep_history[:, 0]'], {}), '(ep_history[:, 0])\n', (5421, 5439), True, 'import numpy as np\n'), ((5611, 5638), 'numpy.vstack', 'np.vstack', (['ep_history[:, 1]'], {}), '(ep_history[:, 1])\n', (5620, 5638), True, 'import numpy as np\n'), ((3440, 3460), 'torch.relu', 'torch.relu', (['output_a'], {}), '(output_a)\n', (3450, 3460), False, 'import torch\n'), ((4867, 4883), 'numpy.array', 'np.array', (['[a, b]'], {}), '([a, b])\n', (4875, 4883), True, 'import numpy as np\n'), ((4356, 4375), 'torch.from_numpy', 'torch.from_numpy', (['s'], {}), '(s)\n', (4372, 4375), False, 'import torch\n'), ((5463, 5489), 'torch.from_numpy', 'torch.from_numpy', (['state_in'], {}), '(state_in)\n', (5479, 5489), False, 'import torch\n')] |
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn import PrimalSolutionComponent
from miplearn.classifiers import Classifier
from miplearn.tests import get_test_pyomo_instances
def test_predict():
instances, models = get_test_pyomo_instances()
comp = PrimalSolutionComponent()
comp.fit(instances)
solution = comp.predict(instances[0])
assert "x" in solution
assert 0 in solution["x"]
assert 1 in solution["x"]
assert 2 in solution["x"]
assert 3 in solution["x"]
def test_evaluate():
instances, models = get_test_pyomo_instances()
clf_zero = Mock(spec=Classifier)
clf_zero.predict_proba = Mock(
return_value=np.array(
[
[0.0, 1.0], # x[0]
[0.0, 1.0], # x[1]
[1.0, 0.0], # x[2]
[1.0, 0.0], # x[3]
]
)
)
clf_one = Mock(spec=Classifier)
clf_one.predict_proba = Mock(
return_value=np.array(
[
[1.0, 0.0], # x[0] instances[0]
[1.0, 0.0], # x[1] instances[0]
[0.0, 1.0], # x[2] instances[0]
[1.0, 0.0], # x[3] instances[0]
]
)
)
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
comp.fit(instances[:1])
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
assert instances[0].solution == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
ev = comp.evaluate(instances[:1])
assert ev == {
"Fix one": {
0: {
"Accuracy": 0.5,
"Condition negative": 1,
"Condition negative (%)": 25.0,
"Condition positive": 3,
"Condition positive (%)": 75.0,
"F1 score": 0.5,
"False negative": 2,
"False negative (%)": 50.0,
"False positive": 0,
"False positive (%)": 0.0,
"Precision": 1.0,
"Predicted negative": 3,
"Predicted negative (%)": 75.0,
"Predicted positive": 1,
"Predicted positive (%)": 25.0,
"Recall": 0.3333333333333333,
"True negative": 1,
"True negative (%)": 25.0,
"True positive": 1,
"True positive (%)": 25.0,
}
},
"Fix zero": {
0: {
"Accuracy": 0.75,
"Condition negative": 3,
"Condition negative (%)": 75.0,
"Condition positive": 1,
"Condition positive (%)": 25.0,
"F1 score": 0.6666666666666666,
"False negative": 0,
"False negative (%)": 0.0,
"False positive": 1,
"False positive (%)": 25.0,
"Precision": 0.5,
"Predicted negative": 2,
"Predicted negative (%)": 50.0,
"Predicted positive": 2,
"Predicted positive (%)": 50.0,
"Recall": 1.0,
"True negative": 2,
"True negative (%)": 50.0,
"True positive": 1,
"True positive (%)": 25.0,
}
},
}
def test_primal_parallel_fit():
instances, models = get_test_pyomo_instances()
comp = PrimalSolutionComponent()
comp.fit(instances, n_jobs=2)
assert len(comp.classifiers) == 2
| [
"numpy.array",
"miplearn.tests.get_test_pyomo_instances",
"miplearn.PrimalSolutionComponent",
"unittest.mock.Mock"
] | [((466, 492), 'miplearn.tests.get_test_pyomo_instances', 'get_test_pyomo_instances', ([], {}), '()\n', (490, 492), False, 'from miplearn.tests import get_test_pyomo_instances\n'), ((504, 529), 'miplearn.PrimalSolutionComponent', 'PrimalSolutionComponent', ([], {}), '()\n', (527, 529), False, 'from miplearn import PrimalSolutionComponent\n'), ((790, 816), 'miplearn.tests.get_test_pyomo_instances', 'get_test_pyomo_instances', ([], {}), '()\n', (814, 816), False, 'from miplearn.tests import get_test_pyomo_instances\n'), ((832, 853), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Classifier'}), '(spec=Classifier)\n', (836, 853), False, 'from unittest.mock import Mock\n'), ((1122, 1143), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Classifier'}), '(spec=Classifier)\n', (1126, 1143), False, 'from unittest.mock import Mock\n'), ((1460, 1530), 'miplearn.PrimalSolutionComponent', 'PrimalSolutionComponent', ([], {'classifier': '[clf_zero, clf_one]', 'threshold': '(0.5)'}), '(classifier=[clf_zero, clf_one], threshold=0.5)\n', (1483, 1530), False, 'from miplearn import PrimalSolutionComponent\n'), ((3595, 3621), 'miplearn.tests.get_test_pyomo_instances', 'get_test_pyomo_instances', ([], {}), '()\n', (3619, 3621), False, 'from miplearn.tests import get_test_pyomo_instances\n'), ((3633, 3658), 'miplearn.PrimalSolutionComponent', 'PrimalSolutionComponent', ([], {}), '()\n', (3656, 3658), False, 'from miplearn import PrimalSolutionComponent\n'), ((910, 968), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]])\n', (918, 968), True, 'import numpy as np\n'), ((1199, 1257), 'numpy.array', 'np.array', (['[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]]'], {}), '([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]])\n', (1207, 1257), True, 'import numpy as np\n')] |
import sys
import os
import argparse
import logging
import json
import time
import numpy as np
import openslide
import PIL
import cv2
import matplotlib.pyplot as plt
from scipy import ndimage
from torch.utils.data import DataLoader
import math
import json
import logging
import time
import tensorflow as tf
from tensorflow.keras import backend as K
from skimage.transform import resize, rescale
import gzip
import time
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from helpers.utils import *
from data.data_loader import WSIStridedPatchDataset
from models.seg_models import *
np.random.seed(0)
# python3 probs_map.py /media/mak/mirlproject1/CAMELYON17/training/dataset/center_0/patient_004_node_4.tif /media/mak/Data/Projects/Camelyon17/saved_models/keras_models/segmentation/CM16/unet_densenet121_imagenet_pretrained_L0_20190712-173828/Model_Stage2.h5 ./configs/UNET_FCN.json ../../../predictions/DenseNet-121_UNET/patient_004_node_4_mask.npy
parser = argparse.ArgumentParser(description='Get the probability map of tumor'
' patch predictions given a WSI')
parser.add_argument('wsi_path', default=None, metavar='WSI_PATH', type=str,
help='Path to the input WSI file')
parser.add_argument('model_path', default=None, metavar='MODEL_PATH', type=str,
help='Path to the saved model weights file of a Keras model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help='Path to the config file in json format related to'
' the ckpt file')
parser.add_argument('probs_map_path', default=None, metavar='PROBS_MAP_PATH',
type=str, help='Path to the output probs_map numpy file')
parser.add_argument('--mask_path', default=None, metavar='MASK_PATH', type=str,
help='Path to the tissue mask of the input WSI file')
parser.add_argument('--label_path', default=None, metavar='LABEL_PATH', type=str,
help='Path to the Ground-Truth label image')
parser.add_argument('--GPU', default='0', type=str, help='which GPU to use'
', default 0')
parser.add_argument('--num_workers', default=5, type=int, help='number of '
'workers to use to make batch, default 5')
parser.add_argument('--eight_avg', default=1, type=int, help='if using average'
' of the 8 direction predictions for each patch,'
' default 0, which means disabled')
parser.add_argument('--level', default=5, type=int, help='heatmap generation level,'
' default 5')
parser.add_argument('--sampling_stride', default=32, type=int, help='Sampling pixels in tissue mask,'
' default 32')
parser.add_argument('--roi_masking', default=True, type=int, help='Sample pixels from tissue mask region,'
' default True, points are not sampled from glass region')
def transform_prob(data, flip, rotate):
"""
Do inverse data augmentation
"""
if flip == 'FLIP_LEFT_RIGHT':
data = np.fliplr(data)
if rotate == 'ROTATE_90':
data = np.rot90(data, 3)
if rotate == 'ROTATE_180':
data = np.rot90(data, 2)
if rotate == 'ROTATE_270':
data = np.rot90(data, 1)
return data
def get_index(coord_ax, probs_map_shape_ax, grid_ax):
"""
This function checks whether coordinates are within the WSI
"""
# print (coord_ax, probs_map_shape_ax, grid_ax)
_min = grid_ax//2
_max = grid_ax//2
ax_min = coord_ax - _min
while ax_min < 0:
_min -= 1
ax_min += 1
ax_max = coord_ax + _max
while ax_max > probs_map_shape_ax:
_max -= 1
ax_max -= 1
return _min, _max
def get_probs_map(model, dataloader):
"""
Generate probability map
"""
eps = 0.0001
probs_map = np.zeros(dataloader.dataset._mask.shape)
count_map = np.zeros(dataloader.dataset._mask.shape)
num_batch = len(dataloader)
batch_size = dataloader.batch_size
map_x_size = dataloader.dataset._mask.shape[0]
map_y_size = dataloader.dataset._mask.shape[1]
level = dataloader.dataset._level
factor = dataloader.dataset._sampling_stride
flip = dataloader.dataset._flip
rotate = dataloader.dataset._rotate
down_scale = 1.0 / pow(2, level)
count = 0
time_now = time.time()
# label_mask is not utilized
for (image_patches, x_coords, y_coords, label_patches) in dataloader:
image_patches = image_patches.cpu().data.numpy()
label_patches = label_patches.cpu().data.numpy()
x_coords = x_coords.cpu().data.numpy()
y_coords = y_coords.cpu().data.numpy()
# start = time.time()
y_preds = model.predict(image_patches, batch_size=batch_size, verbose=1, steps=None)
# end = time.time()
# print('Elapsed Inference Time', (end - start))
# print (image_patches[0].shape, y_preds[0].shape)
# imshow (normalize_minmax(image_patches[0]),label_patches[0], y_preds[0][:,:,0], y_preds[0][:,:,1], np.argmax(y_preds[0], axis=2))
for i in range(batch_size):
# start = time.time()
y_preds_transformed = transform_prob(y_preds[i], flip, rotate)
# img_patch_rescaled = rescale(image_patches[i], down_scale, anti_aliasing=True)
y_preds_rescaled = rescale(y_preds_transformed, down_scale, anti_aliasing=False)
# imshow(normalize_minmax(image_patches[i]), label_patches[i], y_preds[i][:,:,1], np.argmax(y_preds[i], axis=2), title=['Image', 'Ground-Truth', 'Heat-Map', 'Predicted-Label-Map'])
# imshow(normalize_minmax(img_patch_rescaled), y_preds_rescaled[:,:,1], np.argmax(y_preds_rescaled, axis=2), title=['Rescaled-Image', 'Rescaled-Predicted-Heat-Map', 'Rescaled-Predicted-Label-Map'])
xmin, xmax = get_index(x_coords[i], map_x_size, factor)
ymin, ymax = get_index(y_coords[i], map_y_size, factor)
# print (xmin, xmax, ymin, ymax)
probs_map[x_coords[i] - xmin: x_coords[i] + xmax, y_coords[i] - ymin: y_coords[i] + ymax] =\
y_preds_rescaled[0:xmin+xmax, 0:ymin+ymax, 1]
count_map[x_coords[i] - xmin: x_coords[i] + xmax, y_coords[i] - ymin: y_coords[i] + ymax] +=\
np.ones_like(y_preds_rescaled[0:xmin+xmax, 0:ymin+ymax, 1])
# end = time.time()
# print('Elapsed post inference time', (end - start))
count += 1
time_spent = time.time() - time_now
time_now = time.time()
logging.info(
'{}, flip : {}, rotate : {}, batch : {}/{}, Run Time : {:.2f}'
.format(
time.strftime("%Y-%m-%d %H:%M:%S"), dataloader.dataset._flip,
dataloader.dataset._rotate, count, num_batch, time_spent))
# imshow(count_map)
return probs_map
def make_dataloader(args, cfg, flip='NONE', rotate='NONE'):
batch_size = cfg['batch_size']
dataloader = DataLoader(WSIStridedPatchDataset(args.wsi_path, args.mask_path,
args.label_path,
image_size=cfg['image_size'],
normalize=True, flip=flip, rotate=rotate,
level=args.level, sampling_stride=args.sampling_stride, roi_masking=args.roi_masking),
batch_size=batch_size, num_workers=args.num_workers, drop_last=True)
return dataloader
def run(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
logging.basicConfig(level=logging.INFO)
with open(args.cfg_path) as f:
cfg = json.load(f)
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = True
session =tf.Session(config=core_config)
K.set_session(session)
model = unet_densenet121((None, None), weights=None)
model.load_weights(args.model_path)
print ("Loaded Model Weights")
save_dir = os.path.dirname(args.probs_map_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not args.eight_avg:
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='NONE')
probs_map = get_probs_map(model, dataloader)
else:
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='NONE')
probs_map = np.zeros(dataloader.dataset._mask.shape)
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_90')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_180')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_270')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='NONE')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_90')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_180')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_270')
probs_map += get_probs_map(model, dataloader)
probs_map /= 8
np.save(args.probs_map_path, probs_map)
def main():
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
| [
"numpy.rot90",
"numpy.save",
"os.path.exists",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.ConfigProto",
"skimage.transform.rescale",
"numpy.fliplr",
"data.data_loader.WSIStridedPatchDataset",
"os.path.dirname",
"tensorflow.keras.backend.set_session",
"t... | [((601, 618), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (615, 618), True, 'import numpy as np\n'), ((982, 1088), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get the probability map of tumor patch predictions given a WSI"""'}), "(description=\n 'Get the probability map of tumor patch predictions given a WSI')\n", (1005, 1088), False, 'import argparse\n'), ((3895, 3935), 'numpy.zeros', 'np.zeros', (['dataloader.dataset._mask.shape'], {}), '(dataloader.dataset._mask.shape)\n', (3903, 3935), True, 'import numpy as np\n'), ((3952, 3992), 'numpy.zeros', 'np.zeros', (['dataloader.dataset._mask.shape'], {}), '(dataloader.dataset._mask.shape)\n', (3960, 3992), True, 'import numpy as np\n'), ((4400, 4411), 'time.time', 'time.time', ([], {}), '()\n', (4409, 4411), False, 'import time\n'), ((7584, 7623), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (7603, 7623), False, 'import logging\n'), ((7706, 7722), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7720, 7722), True, 'import tensorflow as tf\n'), ((7785, 7815), 'tensorflow.Session', 'tf.Session', ([], {'config': 'core_config'}), '(config=core_config)\n', (7795, 7815), True, 'import tensorflow as tf\n'), ((7821, 7843), 'tensorflow.keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (7834, 7843), True, 'from tensorflow.keras import backend as K\n'), ((7993, 8029), 'os.path.dirname', 'os.path.dirname', (['args.probs_map_path'], {}), '(args.probs_map_path)\n', (8008, 8029), False, 'import os\n'), ((9605, 9644), 'numpy.save', 'np.save', (['args.probs_map_path', 'probs_map'], {}), '(args.probs_map_path, probs_map)\n', (9612, 9644), True, 'import numpy as np\n'), ((3099, 3114), 'numpy.fliplr', 'np.fliplr', (['data'], {}), '(data)\n', (3108, 3114), True, 'import numpy as np\n'), ((3161, 3178), 'numpy.rot90', 'np.rot90', (['data', '(3)'], {}), '(data, 3)\n', (3169, 3178), True, 'import numpy as np\n'), ((3226, 3243), 'numpy.rot90', 'np.rot90', (['data', '(2)'], {}), '(data, 2)\n', (3234, 3243), True, 'import numpy as np\n'), ((3291, 3308), 'numpy.rot90', 'np.rot90', (['data', '(1)'], {}), '(data, 1)\n', (3299, 3308), True, 'import numpy as np\n'), ((6601, 6612), 'time.time', 'time.time', ([], {}), '()\n', (6610, 6612), False, 'import time\n'), ((7053, 7294), 'data.data_loader.WSIStridedPatchDataset', 'WSIStridedPatchDataset', (['args.wsi_path', 'args.mask_path', 'args.label_path'], {'image_size': "cfg['image_size']", 'normalize': '(True)', 'flip': 'flip', 'rotate': 'rotate', 'level': 'args.level', 'sampling_stride': 'args.sampling_stride', 'roi_masking': 'args.roi_masking'}), "(args.wsi_path, args.mask_path, args.label_path,\n image_size=cfg['image_size'], normalize=True, flip=flip, rotate=rotate,\n level=args.level, sampling_stride=args.sampling_stride, roi_masking=\n args.roi_masking)\n", (7075, 7294), False, 'from data.data_loader import WSIStridedPatchDataset\n'), ((7674, 7686), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7683, 7686), False, 'import json\n'), ((8041, 8065), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (8055, 8065), False, 'import os\n'), ((8075, 8096), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (8086, 8096), False, 'import os\n'), ((8394, 8434), 'numpy.zeros', 'np.zeros', (['dataloader.dataset._mask.shape'], {}), '(dataloader.dataset._mask.shape)\n', (8402, 8434), True, 'import numpy as np\n'), ((452, 477), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (467, 477), False, 'import os\n'), ((5414, 5475), 'skimage.transform.rescale', 'rescale', (['y_preds_transformed', 'down_scale'], {'anti_aliasing': '(False)'}), '(y_preds_transformed, down_scale, anti_aliasing=False)\n', (5421, 5475), False, 'from skimage.transform import resize, rescale\n'), ((6356, 6419), 'numpy.ones_like', 'np.ones_like', (['y_preds_rescaled[0:xmin + xmax, 0:ymin + ymax, 1]'], {}), '(y_preds_rescaled[0:xmin + xmax, 0:ymin + ymax, 1])\n', (6368, 6419), True, 'import numpy as np\n'), ((6559, 6570), 'time.time', 'time.time', ([], {}), '()\n', (6568, 6570), False, 'import time\n'), ((6747, 6781), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (6760, 6781), False, 'import time\n')] |
import numpy as np
import cv2
img = cv2.imread('laptop.jpg')
img1 = img.copy()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
cv2.imshow("Edges",edges)
#Normal Hough Transform
lines = cv2.HoughLines(edges,1,np.pi/180,170)
print(lines.shape[0])
for i in range(lines.shape[0]):
r = lines[i][0][0]
t = lines[i][0][1]
a = np.cos(t)
b = np.sin(t)
x0 = a*r
y0 = b*r
x1 = int(x0 - 1000*b)
y1 = int(y0 + 1000*a)
x2 = int(x0 + 1000*b)
y2 = int(y0 - 1000*a)
cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 2)
cv2.imshow("Lines",img)
#Probabilistic Hough Transform
minLineL = 10
maxLineG = 10
linesP = cv2.HoughLinesP(edges, 1, np.pi/180,75, minLineL, maxLineG)
print(linesP.shape)
for i in range(linesP.shape[0]):
xp1 = linesP[i][0][0]
yp1 = linesP[i][0][1]
xp2 = linesP[i][0][2]
yp2 = linesP[i][0][3]
cv2.line(img1, (xp1,yp1),(xp2,yp2),(0,255,0),2)
#print("Hello")
cv2.imshow("Lines_P",img1)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.HoughLinesP",
"cv2.line",
"cv2.imshow",
"cv2.waitKey",
"cv2.HoughLines",
"cv2.destroyAllWindows",
"numpy.cos",
"cv2.cvtColor",
"numpy.sin",
"cv2.Canny",
"cv2.imread"
] | [((37, 61), 'cv2.imread', 'cv2.imread', (['"""laptop.jpg"""'], {}), "('laptop.jpg')\n", (47, 61), False, 'import cv2\n'), ((87, 124), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (99, 124), False, 'import cv2\n'), ((132, 172), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(150)'], {'apertureSize': '(3)'}), '(gray, 50, 150, apertureSize=3)\n', (141, 172), False, 'import cv2\n'), ((172, 198), 'cv2.imshow', 'cv2.imshow', (['"""Edges"""', 'edges'], {}), "('Edges', edges)\n", (182, 198), False, 'import cv2\n'), ((231, 273), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(170)'], {}), '(edges, 1, np.pi / 180, 170)\n', (245, 273), False, 'import cv2\n'), ((553, 577), 'cv2.imshow', 'cv2.imshow', (['"""Lines"""', 'img'], {}), "('Lines', img)\n", (563, 577), False, 'import cv2\n'), ((647, 709), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['edges', '(1)', '(np.pi / 180)', '(75)', 'minLineL', 'maxLineG'], {}), '(edges, 1, np.pi / 180, 75, minLineL, maxLineG)\n', (662, 709), False, 'import cv2\n'), ((919, 946), 'cv2.imshow', 'cv2.imshow', (['"""Lines_P"""', 'img1'], {}), "('Lines_P', img1)\n", (929, 946), False, 'import cv2\n'), ((946, 960), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (957, 960), False, 'import cv2\n'), ((961, 984), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (982, 984), False, 'import cv2\n'), ((369, 378), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (375, 378), True, 'import numpy as np\n'), ((384, 393), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (390, 393), True, 'import numpy as np\n'), ((507, 556), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (515, 556), False, 'import cv2\n'), ((854, 908), 'cv2.line', 'cv2.line', (['img1', '(xp1, yp1)', '(xp2, yp2)', '(0, 255, 0)', '(2)'], {}), '(img1, (xp1, yp1), (xp2, yp2), (0, 255, 0), 2)\n', (862, 908), False, 'import cv2\n')] |
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
from numpy import mean, std
from scipy import ndimage as ndi
from scipy.optimize import curve_fit
import scipy.ndimage
import itertools as it
from math import floor
import constant
import settings
from Exceptions import *
from winspec import SpeFile
def quadratic(data, aa, bb, cc):
return aa * data ** 2 + bb * data + cc
def get_bsol_field(i_sol):
"""
Get the solenoid magnetic field from the solenoid current.
:param i_sol: current of the blue solenoid [A]
:return: a magnetic field list of blue solenoid [Gauss]
"""
bsol_file = pd.read_excel(settings.BSOL_FILE_NAME)
current = bsol_file[settings.BSOL_CURRENT_COLUMN]
b_field = bsol_file[settings.BSOL_B_FIELD_COLUMN]
a, b = np.polyfit(current, b_field, 1)
return [i * a + b for i in i_sol]
def gaus(x, a, x0, sigma, c):
"""
Gaussian function.
:param x:
:param a: constant
:param x0: constant
:param sigma: RMS width
:param c: gaussian function offset
:return:
"""
return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + c
def calculate_gauss_fitting_params(interval, intensity_ave_no_bg):
"""
Get Gaussian fitting parameters.
:param interval: x range
:param intensity_ave_no_bg: y value
:return: popt, pcov, where popt is a list of optimal values for all fitting parameters
"""
mean = sum(intensity_ave_no_bg * interval) / sum(intensity_ave_no_bg)
sigma = np.sqrt(np.absolute(sum(intensity_ave_no_bg * (interval - mean) ** 2) / sum(intensity_ave_no_bg)))
return curve_fit(gaus, interval, intensity_ave_no_bg, p0=[1., mean, sigma, 0], maxfev=10000000)
def calculate_intensity_avg_no_bg(bg, intensity_ave):
"""
Get intensity after subtracting the background.
:param bg: a float of calculated background
:param intensity_ave: 1D list of averaged intensity
:return: 1D list of intensity with background subtracted
"""
intensity_ave_no_bg = [i - bg for i in intensity_ave]
for index in range(len(intensity_ave_no_bg)):
intensity_ave_no_bg[index] = 0 if intensity_ave_no_bg[index] < 0 else intensity_ave_no_bg[index]
return intensity_ave_no_bg
def convert_cont_to_current(count: float):
"""
Convert solenoid count number to solenoid field.
:param count: solenoid count number
:return: a single current float
"""
if 0 <= count <= 1312:
count_ls = [0, 608, 672, 736, 800, 864, 928, 992, 1056, 1120, 1184, 1248, 1312] # blue solenoid count number
current_ls = [0, 11.7, 13.1, 14.4, 15.6, 17.1, 18.2, 19.6, 20.8, 22, 23.3, 24.7, 26] # blue solenoid current
xvals = np.linspace(0, max(count_ls), max(count_ls) + 1)
yinterp = np.interp(xvals, count_ls, current_ls, 1)
return yinterp[count]
else:
raise CurrentValueError('Exceed the range of the interpolation. Calculation is terminated. '
'Please double check the solenoid count number.')
class DenoiseSPEImage:
def __init__(self, file_start_str, folder_location, file_list):
self.file_start_str = file_start_str
self.folder_location = folder_location
self.file_list = file_list
self.spe_file_list = []
self.sol_cont = []
self.x_rms_all = []
self.y_rms_all = []
self.x_rms_std = []
self.y_rms_std = []
self.contour_paths_all = []
self.zoom_in_single_frame = []
self.x_intensity_no_bg = []
self.y_intensity_no_bg = []
self.x_start = None
self.x_end = None
self.y_start = None
self.y_end = None
self.bg_x_start = None
self.bg_x_end = None
self.bg_y_start = None
self.bg_y_end = None
self.set_background_range(0, 8, 0, 8)
for file in self.file_list:
if file.endswith(".SPE") and file.startswith(self.file_start_str):
self.spe_file_list.append(file)
def clear(self):
self.sol_cont = []
self.x_rms_all = []
self.y_rms_all = []
self.x_rms_std = []
self.y_rms_std = []
# def get_all_rms_and_std(self):
# return self.x_rms_all, self.y_rms_all, self.x_rms_std, self.y_rms_std
def set_cropped_range(self, x_start, x_end, y_start, y_end):
"""
Set the cropping boundaries of the original images.
"""
self.x_start = x_start
self.x_end = x_end
self.y_start = y_start
self.y_end = y_end
def has_cropped_range(self):
"""
Image cropping boundary value check.
"""
if self.x_start and self.x_end and self.y_end and self.y_start:
return True
return False
def set_background_range(self, bg_x_start, bg_x_end, bg_y_start, bg_y_end):
"""
Set the background boundary (useful only when regular_method is applied).
"""
self.bg_x_start = bg_x_start
self.bg_x_end = bg_x_end
self.bg_y_start = bg_y_start
self.bg_y_end = bg_y_end
def has_background_range(self):
"""
Background cropping boundary value check.
"""
if self.bg_x_start is not None and self.bg_x_end is not None and self.y_start is not None and self.y_end is not None:
return True
return False
def get_background(self, background_arr):
"""
Calculate the background value from a corner of the beam image (useful only when regular_method is applied).
:param background_arr: image array to calculate the background
:return: a background float
"""
if not self.has_background_range():
raise DenoiseBGError('You need to set the background range first.')
cropped_background = background_arr[self.bg_y_start:self.bg_y_end, self.bg_x_start:self.bg_x_end]
return np.mean(cropped_background.flatten())
def get_bg_within_contour(self, current_frame):
"""
Calculate the background value from denoised beam image (useful only when contour_method is applied).
:param current_frame: a denoised image array with only the beam in the contour left and zero everywhere else
:return: background float averaged over lowest 100 non zero pixel values
"""
self.get_main_beam_contour_and_force_outer_zero(current_frame)
single_frame = self.zoom_in_single_frame
single_frame_temp = sorted(np.array(single_frame).flatten())
single_frame_temp[:] = (i for i in single_frame_temp if i != 0)
return np.mean(single_frame_temp[:100])
def get_current_all_frame(self, file):
"""
Read .SPE data file.
:param file: raw data file
:return: multi-dimension image data array
"""
self.sol_cont.append(file.split('_')[1])
return SpeFile(self.folder_location + file).data
def save_pdf(self, file, file_type):
"""
Save beam images to pdf.
:param file: image data file name string
:param file_type: pdf name end string
:return:
"""
if file_type == 'all':
file_end_str = '_all_jet.pdf'
elif file_type == 'single':
file_end_str = '_jet_single_frame.pdf'
else:
raise ValueError('file_type has to be single or all.')
with PdfPages(file + file_end_str) as pdf:
for fig_index in range(1, plt.gcf().number + 1):
pdf.savefig(fig_index)
def generate_image_and_save(self, zoom_in, x_intensity_ave_no_bg, y_intensity_ave_no_bg, file, file_type,
single_frame_counter=None):
"""
Generate plot including the beam itself, the x and y normalized (to 10) bar chart together with the Gaussian
fitting curves.
:param zoom_in: cropped image array
:param x_intensity_ave_no_bg: x intensity with the background subtracted
:param y_intensity_ave_no_bg: y intensity with the background subtracted
:param file: a string of the image file name
:param file_type: saving all single frames (most used) or averaged frames (not frequently used)
:param single_frame_counter: plotting frame number
:return:
"""
x = np.linspace(0, len(x_intensity_ave_no_bg), len(x_intensity_ave_no_bg))
y = np.linspace(0, len(y_intensity_ave_no_bg), len(y_intensity_ave_no_bg))
fig = plt.figure()
gs = gridspec.GridSpec(4, 4)
ax_main = plt.subplot(gs[1:4, 0:3])
ax_x_dist = plt.subplot(gs[0, 0:3], sharex=ax_main)
ax_y_dist = plt.subplot(gs[1:4, 3], sharey=ax_main)
ax_main.imshow(zoom_in, aspect='auto', cmap='jet')
ax_main.axis([0, len(zoom_in[0]), len(zoom_in), 0])
ax_main.set(xlabel="x pixel", ylabel="y pixel")
x_intensity_ave_no_bg_normalized = [i * 10 / max(x_intensity_ave_no_bg) for i in x_intensity_ave_no_bg]
y_intensity_ave_no_bg_normalized = [i * 10 / max(y_intensity_ave_no_bg) for i in y_intensity_ave_no_bg]
ax_x_dist.bar(x, x_intensity_ave_no_bg_normalized, 1, color='b')
ax_x_dist.set(ylabel='count')
ax_x_dist.grid(ls=':', alpha=0.5)
plt.setp(ax_x_dist.get_xticklabels(), visible=False)
ax_y_dist.barh(y, y_intensity_ave_no_bg_normalized, 1, color='b')
ax_y_dist.set(xlabel='count')
ax_y_dist.grid(ls=':', alpha=0.5)
plt.setp(ax_y_dist.get_yticklabels(), visible=False)
# Gaussian fit
popt, pcov = calculate_gauss_fitting_params(x, x_intensity_ave_no_bg_normalized)
poptt, pcovv = calculate_gauss_fitting_params(y, y_intensity_ave_no_bg_normalized)
ax_y_dist.plot(gaus(y, *poptt), y, label='gaussian fit', c='C03', alpha=0.6)
ax_y_dist.text(5, (self.y_end - self.y_start) / 2 - 60,
'y$_{RMS}$=%.4f mm' % (abs(poptt[2]) * constant.pixel_res),
rotation=270, fontsize=8)
ax_x_dist.plot(x, gaus(x, *popt), label='gaussian fit', c='C03', alpha=0.6)
ax_x_dist.text((self.x_end - self.x_start) / 2 + 60, 5,
'x$_{RMS}$=%.4f mm' % (abs(popt[2]) * constant.pixel_res),
fontsize=8)
self.x_rms_all.append(abs(popt[2] * constant.pixel_res))
self.y_rms_all.append(abs(poptt[2]) * constant.pixel_res)
if file_type == 'all':
fig.suptitle(file, fontsize=14)
elif file_type == 'single':
if single_frame_counter is None:
raise ValueError('Single frame counter is needed.')
fig.suptitle(file + "\n frame#%d" % (single_frame_counter + 1), fontsize=14)
else:
raise ValueError('file_type has to be single or all.')
print("\rSaving the beam profile of %s Frame#%02i" % (file, single_frame_counter + 1), end="")
self.save_pdf(file.split(".")[0], file_type)
def get_intensity_ave_no_bg(self, frame):
"""
Get 1D lists of averaged intensity along rows, and columns with background subtracted (regular_method use).
:param frame: original beam image (non-cropped) array
:return: 1D lists of x and y intensities without the background
"""
zoomed_in_current = frame[self.y_start:self.y_end, self.x_start:self.x_end]
bg = self.get_background(zoomed_in_current)
x_intensity_ave = zoomed_in_current.mean(axis=0).tolist()
y_intensity_ave = zoomed_in_current.mean(axis=1).tolist()
return calculate_intensity_avg_no_bg(bg, x_intensity_ave), calculate_intensity_avg_no_bg(bg, y_intensity_ave)
def get_intensity_ave_using_contour_bg(self, frame):
"""
Get 1D lists of averaged x and y intensities with background subtracted from 2D image array (contour_method
use).
:param frame: original beam image (non-cropped) array
:return: 1D lists of x and y intensities without the background, cropped in 2D image array
"""
zoomed_single_frame = frame[self.y_start:self.y_end, self.x_start:self.x_end]
bg = self.get_bg_within_contour(frame)
zoomed_single_frame = np.array(zoomed_single_frame) - bg
zoomed_single_frame[zoomed_single_frame < 0] = 0
zoomed_single_frame = ndi.median_filter(zoomed_single_frame, 3)
x_intensity_ave = zoomed_single_frame.mean(axis=0).tolist()
y_intensity_ave = zoomed_single_frame.mean(axis=1).tolist()
return x_intensity_ave, y_intensity_ave, zoomed_single_frame
def get_rms_plot_all_ave_frames(self):
"""
Beam image at each solenoid setting is averaged over the 20 frames. Then performing the RMS calculation and
plotting etc. (This method is not frequently in use).
"""
if not self.has_cropped_range():
raise DenoiseCroppedError('You need to set the cropped range first.')
self.clear()
for file in self.spe_file_list:
curr_all_frame = self.get_current_all_frame(file)
results = sum(curr_all_frame[i] for i in range(len(curr_all_frame))) / len(curr_all_frame)
zoomed_in_results = results[self.y_start:self.y_end, self.x_start:self.x_end]
background = self.get_background(zoomed_in_results)
x_intensity_ave = zoomed_in_results.mean(axis=0).tolist()
x_intensity_ave_no_bg = [i - background for i in x_intensity_ave]
y_intensity_ave = zoomed_in_results.mean(axis=1).tolist()
y_intensity_ave_no_bg = [i - background for i in y_intensity_ave]
self.generate_image_and_save(zoomed_in_results, x_intensity_ave_no_bg, y_intensity_ave_no_bg, file, 'all')
def get_rms_and_rms_error(self, contour_method=False, regular_method=False):
"""
Get beam x and y RMS widths and the correlated standard deviations.
:param contour_method: a boolean argument for using contour_method
:param regular_method: a boolean argument for using regular_method
:return: RMS x, RMS y, STD x, STD y
"""
if not self.has_cropped_range():
raise DenoiseCroppedError('You need to set the cropped range first.')
self.clear()
if contour_method is True and regular_method is True:
raise RmsMethodError('Only one method is allowed to be True at once.')
self.clear()
for file in self.spe_file_list:
curr_all_frame = self.get_current_all_frame(file)
x_rms_temp = []
y_rms_temp = []
for i in range(len(curr_all_frame)):
if regular_method:
x_intensity_ave_no_bg, y_intensity_ave_no_bg = self.get_intensity_ave_no_bg(curr_all_frame[i])
if contour_method:
x_intensity_ave_no_bg, y_intensity_ave_no_bg = self.get_intensity_ave_using_contour_bg(
curr_all_frame[i])[0:2]
x = np.linspace(0, len(x_intensity_ave_no_bg), len(x_intensity_ave_no_bg))
y = np.linspace(0, len(y_intensity_ave_no_bg), len(y_intensity_ave_no_bg))
# Gaussian fit
popt, pcov = calculate_gauss_fitting_params(x, x_intensity_ave_no_bg)
poptt, pcovv = calculate_gauss_fitting_params(y, y_intensity_ave_no_bg)
x_rms_temp.append(abs(popt[2] * constant.pixel_res))
y_rms_temp.append(abs(poptt[2]) * constant.pixel_res)
self.x_rms_std.append(std(x_rms_temp))
self.y_rms_std.append(std(y_rms_temp))
self.x_rms_all.append(mean(x_rms_temp))
self.y_rms_all.append(mean(y_rms_temp))
return self.x_rms_all, self.y_rms_all, self.x_rms_std, self.y_rms_std
def plot_single_frame(self, contour_method=False, regular_method=False):
"""
:param contour_method: boolean argument
:param regular_method: boolean argument
:return:
"""
if not self.has_cropped_range():
raise DenoiseCroppedError('You need to set the cropped range first.')
self.clear()
if contour_method is True and regular_method is True:
raise RmsMethodError('Only one method is allowed to be True at once.')
self.clear()
if contour_method is False and regular_method is False:
raise RmsMethodError('At least one method needs to be used.')
self.clear()
for file in self.spe_file_list:
curr_all_frame = self.get_current_all_frame(file)
frame_counter = -1
plt.close('all')
for i in range(len(curr_all_frame)):
frame_counter += 1
current_frame = curr_all_frame[i]
zoomed_in_current = current_frame[self.y_start:self.y_end, self.x_start:self.x_end]
if regular_method and not contour_method:
x_intensity_ave_no_bg, y_intensity_ave_no_bg = self.get_intensity_ave_no_bg(curr_all_frame[i])
zoomed_in_frame = ndi.median_filter(zoomed_in_current, 3)
if contour_method and not regular_method:
x_intensity_ave_no_bg, y_intensity_ave_no_bg, zoomed_in_frame = self.get_intensity_ave_using_contour_bg(
curr_all_frame[i])
self.generate_image_and_save(zoomed_in_frame, x_intensity_ave_no_bg, y_intensity_ave_no_bg, file,
'single', frame_counter)
def draw_beam_contour(self, lower_diameter_boundary):
"""
Draw beam image with the contour plotted in red.
:param lower_diameter_boundary: lower boundary of the contour diameter to be removed
:return:
"""
for file in self.spe_file_list:
curr_all_frame = self.get_current_all_frame(file)
contr = 0
for i in range(len(curr_all_frame)):
contr += 1
current_frame = curr_all_frame[i]
zoomed_in_current = current_frame[self.y_start:self.y_end, self.x_start:self.x_end]
plt.imshow(zoomed_in_current, cmap='jet')
plt.title(file + "\nFrame#" + str(contr))
plt.xlabel("x pixel")
plt.ylabel("y pixel")
# plot the less fine contour for good looking.
smooth_results_to_plt = scipy.ndimage.zoom(zoomed_in_current, 0.5)
x_to_plt = np.linspace(0, len(zoomed_in_current[0]), round(len(zoomed_in_current[0]) * 0.5))
y_to_plt = np.linspace(0, len(zoomed_in_current), round(len(zoomed_in_current) * 0.5))
X_to_plt, Y_to_plt = np.meshgrid(x_to_plt, y_to_plt)
contour_to_plt = plt.contour(X_to_plt, Y_to_plt, smooth_results_to_plt, levels=[settings.CONTOUR_LEVEL],
colors='r')
# remove small contours (random noisy spots) for good looking.
for level in contour_to_plt.collections:
for i, path in reversed(list(enumerate(level.get_paths()))):
verts = path.vertices
diameter = np.max(verts.max(axis=0) - verts.min(axis=0))
if diameter < lower_diameter_boundary:
del (level.get_paths()[i])
plt.show()
def get_main_beam_contour_and_force_outer_zero(self, current_frame):
"""
Force all pixels to be zero but the main center contoured beam.
:param current_frame: original beam 2D array
:return:
"""
zoomed_in_current = current_frame[self.y_start:self.y_end, self.x_start:self.x_end]
fig, ax = plt.subplots()
smooth_results_to_plt = scipy.ndimage.zoom(zoomed_in_current, 1)
x = np.linspace(0, len(zoomed_in_current[0]), round(len(zoomed_in_current[0]) * 1))
y = np.linspace(0, len(zoomed_in_current), round(len(zoomed_in_current) * 1))
X, Y = np.meshgrid(x, y)
contour_to_plt = ax.contour(X, Y, smooth_results_to_plt,
levels=[settings.CONTOUR_LEVEL],
colors='r')
plt.close(fig)
contour_collection = contour_to_plt.collections[0].get_paths()
contour_enu = list(contour_collection)
largest_path = max(contour_enu, key=len).vertices.tolist()
sorted_by_y = sorted(largest_path, key=lambda tup: tup[1])
round_sorted_y = [(round(x), round(y)) for x, y in sorted_by_y]
y_set = {}
for pointx, pointy in round_sorted_y:
y_set[pointy] = (y_set[pointy] + [pointx]) if y_set.get(pointy) else [pointx]
y_cor_list = []
for y, x_list in y_set.items():
if y not in y_cor_list:
y_cor_list.append(int(y))
for check_x in it.chain(range(0, min(x_list)), range(max(x_list) + 1, len(zoomed_in_current[0]))):
zoomed_in_current[y][check_x] = 0
for enu_x in range(len(zoomed_in_current[0])):
for row_num in it.chain(range(0, min(y_cor_list)), range(max(y_cor_list), len(zoomed_in_current))):
zoomed_in_current[row_num][enu_x] = 0
self.zoom_in_single_frame = zoomed_in_current
| [
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"numpy.array",
"pandas.read_excel",
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.contour",
"numpy.meshgrid",
"matplotlib.pyplot.gcf... | [((727, 765), 'pandas.read_excel', 'pd.read_excel', (['settings.BSOL_FILE_NAME'], {}), '(settings.BSOL_FILE_NAME)\n', (740, 765), True, 'import pandas as pd\n'), ((885, 916), 'numpy.polyfit', 'np.polyfit', (['current', 'b_field', '(1)'], {}), '(current, b_field, 1)\n', (895, 916), True, 'import numpy as np\n'), ((1706, 1799), 'scipy.optimize.curve_fit', 'curve_fit', (['gaus', 'interval', 'intensity_ave_no_bg'], {'p0': '[1.0, mean, sigma, 0]', 'maxfev': '(10000000)'}), '(gaus, interval, intensity_ave_no_bg, p0=[1.0, mean, sigma, 0],\n maxfev=10000000)\n', (1715, 1799), False, 'from scipy.optimize import curve_fit\n'), ((2864, 2905), 'numpy.interp', 'np.interp', (['xvals', 'count_ls', 'current_ls', '(1)'], {}), '(xvals, count_ls, current_ls, 1)\n', (2873, 2905), True, 'import numpy as np\n'), ((6717, 6749), 'numpy.mean', 'np.mean', (['single_frame_temp[:100]'], {}), '(single_frame_temp[:100])\n', (6724, 6749), True, 'import numpy as np\n'), ((8596, 8608), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8606, 8608), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8645), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (8639, 8645), True, 'import matplotlib.gridspec as gridspec\n'), ((8664, 8689), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:4, 0:3]'], {}), '(gs[1:4, 0:3])\n', (8675, 8689), True, 'import matplotlib.pyplot as plt\n'), ((8711, 8750), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0:3]'], {'sharex': 'ax_main'}), '(gs[0, 0:3], sharex=ax_main)\n', (8722, 8750), True, 'import matplotlib.pyplot as plt\n'), ((8771, 8810), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:4, 3]'], {'sharey': 'ax_main'}), '(gs[1:4, 3], sharey=ax_main)\n', (8782, 8810), True, 'import matplotlib.pyplot as plt\n'), ((12442, 12483), 'scipy.ndimage.median_filter', 'ndi.median_filter', (['zoomed_single_frame', '(3)'], {}), '(zoomed_single_frame, 3)\n', (12459, 12483), True, 'from scipy import ndimage as ndi\n'), ((19895, 19909), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19907, 19909), True, 'import matplotlib.pyplot as plt\n'), ((20177, 20194), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (20188, 20194), True, 'import numpy as np\n'), ((20385, 20399), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20394, 20399), True, 'import matplotlib.pyplot as plt\n'), ((1182, 1223), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (1188, 1223), True, 'import numpy as np\n'), ((6996, 7032), 'winspec.SpeFile', 'SpeFile', (['(self.folder_location + file)'], {}), '(self.folder_location + file)\n', (7003, 7032), False, 'from winspec import SpeFile\n'), ((7503, 7532), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['(file + file_end_str)'], {}), '(file + file_end_str)\n', (7511, 7532), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((12319, 12348), 'numpy.array', 'np.array', (['zoomed_single_frame'], {}), '(zoomed_single_frame)\n', (12327, 12348), True, 'import numpy as np\n'), ((16740, 16756), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16749, 16756), True, 'import matplotlib.pyplot as plt\n'), ((15656, 15671), 'numpy.std', 'std', (['x_rms_temp'], {}), '(x_rms_temp)\n', (15659, 15671), False, 'from numpy import mean, std\n'), ((15707, 15722), 'numpy.std', 'std', (['y_rms_temp'], {}), '(y_rms_temp)\n', (15710, 15722), False, 'from numpy import mean, std\n'), ((15759, 15775), 'numpy.mean', 'mean', (['x_rms_temp'], {}), '(x_rms_temp)\n', (15763, 15775), False, 'from numpy import mean, std\n'), ((15811, 15827), 'numpy.mean', 'mean', (['y_rms_temp'], {}), '(y_rms_temp)\n', (15815, 15827), False, 'from numpy import mean, std\n'), ((18272, 18313), 'matplotlib.pyplot.imshow', 'plt.imshow', (['zoomed_in_current'], {'cmap': '"""jet"""'}), "(zoomed_in_current, cmap='jet')\n", (18282, 18313), True, 'import matplotlib.pyplot as plt\n'), ((18388, 18409), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x pixel"""'], {}), "('x pixel')\n", (18398, 18409), True, 'import matplotlib.pyplot as plt\n'), ((18426, 18447), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y pixel"""'], {}), "('y pixel')\n", (18436, 18447), True, 'import matplotlib.pyplot as plt\n'), ((18844, 18875), 'numpy.meshgrid', 'np.meshgrid', (['x_to_plt', 'y_to_plt'], {}), '(x_to_plt, y_to_plt)\n', (18855, 18875), True, 'import numpy as np\n'), ((18909, 19013), 'matplotlib.pyplot.contour', 'plt.contour', (['X_to_plt', 'Y_to_plt', 'smooth_results_to_plt'], {'levels': '[settings.CONTOUR_LEVEL]', 'colors': '"""r"""'}), "(X_to_plt, Y_to_plt, smooth_results_to_plt, levels=[settings.\n CONTOUR_LEVEL], colors='r')\n", (18920, 19013), True, 'import matplotlib.pyplot as plt\n'), ((19533, 19543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19541, 19543), True, 'import matplotlib.pyplot as plt\n'), ((6596, 6618), 'numpy.array', 'np.array', (['single_frame'], {}), '(single_frame)\n', (6604, 6618), True, 'import numpy as np\n'), ((17203, 17242), 'scipy.ndimage.median_filter', 'ndi.median_filter', (['zoomed_in_current', '(3)'], {}), '(zoomed_in_current, 3)\n', (17220, 17242), True, 'from scipy import ndimage as ndi\n'), ((7579, 7588), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7586, 7588), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Original file comes from sncosmo
"""Functions for reading GRB light curves.
Adapted from Python module 'sncosmo'"""
from __future__ import print_function
from collections import OrderedDict as odict
import numpy as np
from astropy.table import Table, vstack
import six
__all__ = [
"read_lc",
"load_observations",
"load_info_observations",
"load_telescope_transmissions",
]
def _cast_str(s):
try:
return int(s)
except:
try:
return float(s)
except:
return s.strip()
# -----------------------------------------------------------------------------
# Reader: ascii
def _read_ascii(f, **kwargs):
delim = kwargs.get("delim", None)
metachar = kwargs.get("metachar", "@")
commentchar = kwargs.get("commentchar", "#")
meta = odict()
colnames = []
cols = []
readingdata = False
for line in f:
# strip leading & trailing whitespace, newline, and comments
line = line.strip()
# Find name, format and time in comment
if len(line) > 0 and line[0] == "#":
if line[1] == "#":
pass # comment line
pos = line.find(":")
if pos in [-1, 1]:
pass # comment line
if line[1:pos].strip().lower() == "name":
grb_name = str(line[pos+1:].strip())
if line[1:pos].strip().lower() == "type":
grb_format = str(line[pos+1:].strip())
if line[1:pos].strip().lower() == "time_since_burst":
time = str(line[pos+1:].strip())
pos = line.find(commentchar)
if pos > -1:
line = line[:pos]
if len(line) == 0:
continue
if not readingdata:
# Read metadata
if line[0] == metachar:
pos = line.find(" ") # Find first space.
if pos in [-1, 1]: # Space must exist and key must exist.
raise ValueError("Incorrectly formatted metadata line: " + line)
meta[line[1:pos]] = _cast_str(line[pos:])
continue
# Read header line
if grb_format.lower() == "lc":
colnames.extend(["Name"])
elif grb_format.lower() == "sed":
colnames.extend(["Name", "time_since_burst"])
for item in line.split(delim):
colnames.append(item.strip())
cols.append([])
# add columns for the grb name and time since burst
cols.extend([[], []])
readingdata = True
continue
# Now we're reading data
items = []
items.append(grb_name)
if grb_format.lower() == "sed":
items.extend([time])
items.extend(line.split(delim))
for col, item in zip(cols, items):
# print ('col: {}'.format(col))
# print ('items: {}'.format(items))
col.append(_cast_str(item))
data = odict(zip(colnames, cols))
return meta, data
# -----------------------------------------------------------------------------
# All readers
READERS = {"ascii": _read_ascii}
def read_lc(file_or_dir, format="ascii", **kwargs):
"""Read light curve data for a single supernova.
Parameters
----------
file_or_dir : str
Filename (formats 'ascii', 'salt2') or directory name
(format 'salt2-old'). For 'salt2-old' format, directory must contain
a file named 'lightfile'. All other files in the directory are
assumed to be photometry files, unless the `filenames` keyword argument
is set.
format : {'ascii', 'salt2', 'salt2-old'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
delim : str, optional
**[ascii only]** Used to split entries on a line. Default is `None`.
Extra whitespace is ignored.
metachar : str, optional
**[ascii only]** Lines whose first non-whitespace character is
`metachar` are treated as metadata lines, where the key and value
are split on the first whitespace. Default is ``'@'``
commentchar : str, optional
**[ascii only]** One-character string indicating a comment. Default is
'#'.
filenames : list, optional
**[salt2-old only]** Only try to read the given filenames as
photometry files. Default is to try to read all files in directory.
Returns
-------
t : astropy `~astropy.table.Table`
Table of data. Metadata (as an `OrderedDict`) can be accessed via
the ``t.meta`` attribute. For example: ``t.meta['key']``. The key
is case-sensitive.
Examples
--------
Read an ascii format file that includes metadata (``StringIO``
behaves like a file object):
>>> from astropy.extern.six import StringIO
>>> f = StringIO('''
... @id 1
... @RA 36.0
... @description good
... time band flux fluxerr zp zpsys
... 50000. g 1. 0.1 25. ab
... 50000.1 r 2. 0.1 25. ab
... ''')
>>> t = read_lc(f, format='ascii')
>>> print(t)
time band flux fluxerr zp zpsys
------- ---- ---- ------- ---- -----
50000.0 g 1.0 0.1 25.0 ab
50000.1 r 2.0 0.1 25.0 ab
>>> t.meta
OrderedDict([('id', 1), ('RA', 36.0), ('description', 'good')])
"""
try:
readfunc = READERS[format]
except KeyError:
raise ValueError(
"Reader not defined for format {0!r}. Options: ".format(format)
+ ", ".join(READERS.keys())
)
if format == "salt2-old":
meta, data = readfunc(file_or_dir, **kwargs)
elif isinstance(file_or_dir, six.string_types):
with open(file_or_dir, "r", encoding="utf-8") as f:
meta, data = readfunc(f, **kwargs)
else:
meta, data = readfunc(file_or_dir, **kwargs)
return Table(data, meta=meta, masked=True)
def load_observations(filenames):
""" Load observations, either a light curve or sed
Returns
-------
data: astropy Table
"""
# Use vstack only at the end, otherwise memory usage explodes
data_list = []
for counter, filename in enumerate(filenames):
data = read_lc(filename, format="ascii")
data_list.append(data)
# if counter==0: data_tot=data
# else: data_tot=vstack([data_tot,data],join_type='inner')
# data_tot=vstack(data_list,join_type='inner')
data_tot = vstack(data_list)
return data_tot
def load_info_observations(filenames):
""" Load observations, either a light curve or sed
Returns
-------
data: astropy Table
"""
# name:GRB130606A
# zspec:5.913
# zsim:5.90
# zim_sup:0.29
# zsim_inf:0.22
# Av:0.13
# dust_type:SMC
# beta:0.42
data_list = []
for counter, filename in enumerate(filenames):
references = []
values = []
with open(filename, "r", encoding="utf-8") as f:
for line in f:
# strip leading & trailing whitespace, newline, and comments
line = line.strip()
if len(line) == 0:
continue
# Read header line
if line[0] == "#":
if line[1] == "#":
continue # comment line
pos = line.find(":")
if pos in [-1, 1]:
continue # comment line
ref = line[1:pos].strip()
references.append(ref)
val = line[pos+1:].strip()
values.append(val)
data = Table(np.array(values), names=list(references))
data_list.append(data)
# if counter==0: data_info = data
# else: data_info = vstack([data_info,data],join_type='outer')
data_info = vstack(data_list, join_type="outer")
# sort by name
data_info.sort("name")
return data_info
def load_telescope_transmissions(info_dict, wavelength,
norm=False, norm_val=1.0):
"""
Load the transmittance of the selected band of the selected
telescope with respect to wavelength
Parameters
----------
info_dict: dictionary
wavelength : array
wavelengths in angstrom
norm: boolean
enables to normalise the values to 'norm_val' (default: False)
norm_val: float
value used for normalising the data
Returns
---------
trans : array
transmittance of the mirror at a given wavelength (0-1)
"""
from .utils import resample
filter_path = (
info_dict["path"]
+ "/transmissions/"
+ info_dict["telescope"]
+ "/"
+ info_dict["band"]
+ ".txt"
)
File = open(filter_path, "r")
lines = File.readlines()
wvl = []
trans = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
trans.append(float(bits[1]))
wvl.append(float(bits[0]))
wvl = np.array(wvl) * 10.0 # nm --> angstroms
if max(trans) > 1:
trans = np.array(trans, dtype=np.float64) * 1e-2
# Normalisation
if norm:
trans = trans / max(trans) * norm_val
# Resample the transmission to the
trans = resample(wvl, trans, wavelength, 0.0, 1.0)
return trans
| [
"numpy.array",
"collections.OrderedDict",
"astropy.table.Table",
"astropy.table.vstack"
] | [((929, 936), 'collections.OrderedDict', 'odict', ([], {}), '()\n', (934, 936), True, 'from collections import OrderedDict as odict\n'), ((6065, 6100), 'astropy.table.Table', 'Table', (['data'], {'meta': 'meta', 'masked': '(True)'}), '(data, meta=meta, masked=True)\n', (6070, 6100), False, 'from astropy.table import Table, vstack\n'), ((6639, 6656), 'astropy.table.vstack', 'vstack', (['data_list'], {}), '(data_list)\n', (6645, 6656), False, 'from astropy.table import Table, vstack\n'), ((8037, 8073), 'astropy.table.vstack', 'vstack', (['data_list'], {'join_type': '"""outer"""'}), "(data_list, join_type='outer')\n", (8043, 8073), False, 'from astropy.table import Table, vstack\n'), ((9249, 9262), 'numpy.array', 'np.array', (['wvl'], {}), '(wvl)\n', (9257, 9262), True, 'import numpy as np\n'), ((7835, 7851), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (7843, 7851), True, 'import numpy as np\n'), ((9329, 9362), 'numpy.array', 'np.array', (['trans'], {'dtype': 'np.float64'}), '(trans, dtype=np.float64)\n', (9337, 9362), True, 'import numpy as np\n')] |
"""New style (fast) tag count annos
Use these for new projects.
"""
from mbf_genomics.annotator import Annotator
from typing import Dict, List
from pypipegraph import Job
from mbf_genomics import DelayedDataFrame
import numpy as np
import pypipegraph as ppg
import hashlib
import pandas as pd
import mbf_r
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as numpy2ri
from pathlib import Path
from dppd import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled
from mbf_genomics.util import (
parse_a_or_c_to_plot_name,
parse_a_or_c_to_column,
parse_a_or_c_to_anno,
)
from pandas import DataFrame
dp, X = dppd()
# ## Base classes and strategies - skip these if you just care about using TagCount annotators
class _CounterStrategyBase:
cores_needed = 1
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data
class CounterStrategyStrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "stranded"
def __init__(self):
self.disable_sanity_check = False
def count_reads(
self,
interval_strategy,
genome,
bam_filename,
bam_index_name,
reverse=False,
dump_matching_reads_filename=None,
):
# bam_filename = bamfil
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
from mbf_bam import count_reads_stranded
if dump_matching_reads_filename:
dump_matching_reads_filename = str(dump_matching_reads_filename)
res = count_reads_stranded(
bam_filename,
bam_index_name,
intervals,
gene_intervals,
matching_reads_output_bam_filename=dump_matching_reads_filename,
)
self.sanity_check(res, bam_filename)
return res
def sanity_check(self, foward_and_reverse, bam_filename):
if self.disable_sanity_check:
return
error_count = 0
forward, reverse = foward_and_reverse
for gene_stable_id, forward_count in forward.items():
reverse_count = reverse.get(gene_stable_id, 0)
if (reverse_count > 100) and (reverse_count > forward_count * 1.1):
error_count += 1
if error_count > 0.1 * len(forward):
raise ValueError(
"Found at least %.2f%% of genes to have a reverse read count (%s) "
"above 110%% of the exon read count (and at least 100 tags). "
"This indicates that this lane (%s) should have been reversed before alignment. "
"Set reverse_reads=True on your Lane object"
% (
100.0 * error_count / len(forward),
self.__class__.__name__,
bam_filename,
)
)
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data[0]
class CounterStrategyUnstrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "unstranded"
def count_reads(
self,
interval_strategy,
genome,
bam_filename,
bam_index_name,
reverse=False,
dump_matching_reads_filename=None,
):
# bam_filename = bamfil
if dump_matching_reads_filename:
raise ValueError(
"dump_matching_reads_filename not supoprted on this Counter"
)
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
# chr -> [gene_id, strand, [start], [stops]
from mbf_bam import count_reads_unstranded
res = count_reads_unstranded(
bam_filename, bam_index_name, intervals, gene_intervals
)
return res
class _IntervalStrategy:
def get_interval_lengths_by_gene(self, genome):
by_chr = self._get_interval_tuples_by_chr(genome)
length_by_gene = {}
for chr, tups in by_chr.items():
for tup in tups: # stable_id, strand, [starts], [stops]
gene_stable_id = tup[0]
length = 0
for start, stop in zip(tup[2], tup[3]):
length += stop - start
length_by_gene[gene_stable_id] = length
return length_by_gene
def _get_interval_tuples_by_chr(self, genome): # pragma: no cover
raise NotImplementedError()
def get_deps(self):
return []
class IntervalStrategyGenomicRegion(_IntervalStrategy):
"""Used internally by _FastTagCounterGR"""
def __init__(self, gr):
self.gr = gr
self.name = f"GR_{gr.name}"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
if self.gr.genome != genome: # pragma: no cover
raise ValueError("Mismatched genomes")
df = self.gr.df
if not "strand" in df.columns:
df = df.assign(strand=1)
df = df[["chr", "start", "stop", "strand"]]
if df.index.duplicated().any():
raise ValueError("index must be unique")
for tup in df.itertuples():
result[tup.chr].append((str(tup[0]), tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyGene(_IntervalStrategy):
"""Count from TSS to TES"""
name = "gene"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
gene_info = genome.df_genes
for tup in gene_info[["chr", "start", "stop", "strand"]].itertuples():
result[tup.chr].append((tup[0], tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyExon(_IntervalStrategy):
"""count all exons"""
name = "exon"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.exons_merged
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyIntron(_IntervalStrategy):
"""count all introns"""
name = "intron"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.introns_strict
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyExonSmart(_IntervalStrategy):
"""For protein coding genes: count only in exons of protein-coding transcripts.
For other genes: count all exons"""
name = "exonsmart"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for g in genome.genes.values():
e = g.exons_protein_coding_merged
if len(e[0]) == 0:
e = g.exons_merged
result[g.chr].append((g.gene_stable_id, g.strand, list(e[0]), list(e[1])))
return result
# Now the actual tag count annotators
class TagCountCommonQC:
def register_qc(self, genes):
if not qc_disabled():
self.register_qc_distribution(genes)
self.register_qc_pca(genes)
# self.register_qc_cummulative(genes)
def register_qc_distribution(self, genes):
output_filename = genes.result_dir / self.qc_folder / "read_distribution.png"
output_filename.parent.mkdir(exist_ok=True)
def plot(
output_filename,
elements,
qc_distribution_scale_y_name=self.qc_distribution_scale_y_name,
):
df = genes.df
df = dp(df).select({x.aligned_lane.name: x.columns[0] for x in elements}).pd
if len(df) == 0:
df = pd.DataFrame({"x": [0], "y": [0], "text": "no data"})
dp(df).p9().add_text("x", "y", "text").render(output_filename).pd
else:
plot_df = dp(df).melt(var_name="sample", value_name="count").pd
plot = dp(plot_df).p9().theme_bw()
print(df)
# df.to_pickle(output_filename + '.pickle')
if ((df > 0).sum(axis=0) > 1).any() and len(df) > 1:
# plot = plot.geom_violin(
# dp.aes(x="sample", y="count"), width=0.5, bw=0.1
# )
pass # oh so slow as of 20201019
if len(plot_df["sample"].unique()) > 1:
plot = plot.annotation_stripes(fill_range=True)
if (plot_df["count"] > 0).any():
# can't have a log boxplot with all nans (log(0))
plot = plot.scale_y_continuous(
trans="log10",
name=qc_distribution_scale_y_name,
breaks=[1, 10, 100, 1000, 10000, 100_000, 1e6, 1e7],
)
return (
plot.add_boxplot(
x="sample", y="count", _width=0.1, _fill=None, _color="blue"
)
.turn_x_axis_labels()
.title("Raw read distribution")
.hide_x_axis_title()
.render_args(limitsize=False)
.render(output_filename, width=0.2 * len(elements) + 1, height=4)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
def register_qc_pca(self, genes):
output_filename = genes.result_dir / self.qc_folder / "pca.png"
def plot(output_filename, elements):
import sklearn.decomposition as decom
if len(elements) == 1:
xy = np.array([[0], [0]]).transpose()
title = "PCA %s - fake / single sample" % genes.name
else:
pca = decom.PCA(n_components=2, whiten=False)
data = genes.df[[x.columns[0] for x in elements]]
data -= data.min() # min max scaling 0..1
data /= data.max()
data = data[~pd.isnull(data).any(axis=1)] # can' do pca on NAN values
if len(data):
pca.fit(data.T)
xy = pca.transform(data.T)
title = "PCA %s\nExplained variance: x %.2f%%, y %.2f%%" % (
genes.name,
pca.explained_variance_ratio_[0] * 100,
pca.explained_variance_ratio_[1] * 100,
)
else:
xy = np.array(
[[0] * len(elements), [0] * len(elements)]
).transpose()
title = "PCA %s - fake / no rows" % genes.name
plot_df = pd.DataFrame(
{"x": xy[:, 0], "y": xy[:, 1], "label": [x.plot_name for x in elements]}
)
print(plot_df)
(
dp(plot_df)
.p9()
.theme_bw()
.add_scatter("x", "y")
.add_text(
"x",
"y",
"label",
# cool, this can go into an endless loop...
# _adjust_text={
# "expand_points": (2, 2),
# "arrowprops": {"arrowstyle": "->", "color": "red"},
# },
)
.scale_color_many_categories()
.title(title)
.render(output_filename, width=8, height=6)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
class _FastTagCounter(Annotator, TagCountCommonQC):
def __init__(
self,
aligned_lane,
count_strategy,
interval_strategy,
column_name,
column_desc,
dump_matching_reads_filename=None,
):
if not hasattr(aligned_lane, "get_bam"):
raise ValueError("_FastTagCounter only accepts aligned lanes!")
self.aligned_lane = aligned_lane
self.genome = self.aligned_lane.genome
self.count_strategy = count_strategy
self.interval_strategy = interval_strategy
self.columns = [(column_name % (self.aligned_lane.name,)).strip()]
self.cache_name = (
"FT_%s_%s" % (count_strategy.name, interval_strategy.name)
+ "_"
+ hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
self.column_properties = {self.columns[0]: {"description": column_desc}}
self.vid = aligned_lane.vid
self.cores_needed = count_strategy.cores_needed
self.plot_name = self.aligned_lane.name
self.qc_folder = f"{self.count_strategy.name}_{self.interval_strategy.name}"
self.qc_distribution_scale_y_name = "raw counts"
self.dump_matching_reads_filename = dump_matching_reads_filename
def calc(self, df):
if ppg.inside_ppg():
data = self._data
else:
data = self.calc_data()
lookup = self.count_strategy.extract_lookup(data)
result = []
for gene_stable_id in df["gene_stable_id"]:
result.append(lookup.get(gene_stable_id, 0))
result = np.array(result, dtype=np.float)
return pd.Series(result)
def deps(self, _genes):
return [
self.load_data(),
ppg.ParameterInvariant(self.cache_name, self.dump_matching_reads_filename),
ppg.FunctionInvariant(
self.cache_name + "_count_reads",
self.count_strategy.__class__.count_reads,
)
# todo: actually, this should be a declared file
]
def calc_data(self):
bam_file, bam_index_name = self.aligned_lane.get_bam_names()
return self.count_strategy.count_reads(
self.interval_strategy,
self.genome,
bam_file,
bam_index_name,
dump_matching_reads_filename=self.dump_matching_reads_filename,
)
def load_data(self):
cf = Path(ppg.util.global_pipegraph.cache_folder) / "FastTagCounters"
cf.mkdir(exist_ok=True)
return (
ppg.CachedAttributeLoadingJob(
cf / self.cache_name, self, "_data", self.calc_data
)
.depends_on(self.aligned_lane.load())
.use_cores(-1)
)
class _FastTagCounterGR(Annotator):
def __init__(self, aligned_lane, count_strategy, column_name, column_desc):
if not hasattr(aligned_lane, "get_bam"):
raise ValueError("_FastTagCounter only accepts aligned lanes!")
self.aligned_lane = aligned_lane
self.genome = self.aligned_lane.genome
self.count_strategy = count_strategy
self.columns = [(column_name % (self.aligned_lane.name,)).strip()]
self.cache_name = (
"FT_%s_%s" % (count_strategy.name, "on_gr")
+ "_"
+ hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
self.column_properties = {self.columns[0]: {"description": column_desc}}
self.vid = aligned_lane.vid
self.cores_needed = count_strategy.cores_needed
self.plot_name = self.aligned_lane.name
# self.qc_folder = f"{self.count_strategy.name}_{self.interval_strategy.name}"
# self.qc_distribution_scale_y_name = "raw counts"
def calc(self, df):
if ppg.inside_ppg():
data = self._data
else:
data = self.calc_data()
lookup = self.count_strategy.extract_lookup(data)
result = []
for idx in df.index:
result.append(lookup.get(str(idx), 0))
result = np.array(result, dtype=np.float)
return pd.Series(result)
def deps(self, gr):
return [self.load_data(gr)]
def calc_data(self, gr):
def inner():
bam_file, bam_index_name = self.aligned_lane.get_bam_names()
return self.count_strategy.count_reads(
IntervalStrategyGenomicRegion(gr), self.genome, bam_file, bam_index_name
)
return inner
def load_data(self, gr):
cf = gr.cache_dir
cf.mkdir(exist_ok=True)
return (
ppg.CachedAttributeLoadingJob(
cf / self.cache_name, self, "_data", self.calc_data(gr)
)
.depends_on(self.aligned_lane.load())
.depends_on(gr.load())
.use_cores(-1) # should be count_strategy cores needed, no?
)
#
# ## Raw tag count annos for analysis usage
class ExonSmartStrandedRust(_FastTagCounter):
def __init__(self, aligned_lane, dump_matching_reads_filename=None):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyStrandedRust(),
IntervalStrategyExonSmart(),
"Exon, protein coding, stranded smart tag count %s",
"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) exons, correct strand only",
dump_matching_reads_filename,
)
class ExonSmartUnstrandedRust(_FastTagCounter):
def __init__(self, aligned_lane):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyUnstrandedRust(),
IntervalStrategyExonSmart(),
"Exon, protein coding, unstranded smart tag count %s",
"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) both strands",
)
class ExonStrandedRust(_FastTagCounter):
def __init__(self, aligned_lane, dump_matching_reads_filename=None):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyStrandedRust(),
IntervalStrategyExon(),
"Exon, protein coding, stranded tag count %s",
"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) exons, correct strand only",
dump_matching_reads_filename,
)
class ExonUnstrandedRust(_FastTagCounter):
def __init__(self, aligned_lane):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyUnstrandedRust(),
IntervalStrategyExon(),
"Exon, protein coding, unstranded tag count %s",
"Tag count inside exons of protein coding transcripts (all if no protein coding transcripts) both strands",
)
class GeneStrandedRust(_FastTagCounter):
def __init__(self, aligned_lane):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyStrandedRust(),
IntervalStrategyGene(),
"Gene, stranded tag count %s",
"Tag count inside gene body (tss..tes), correct strand only",
)
class GeneUnstrandedRust(_FastTagCounter):
def __init__(self, aligned_lane):
_FastTagCounter.__init__(
self,
aligned_lane,
CounterStrategyUnstrandedRust(),
IntervalStrategyGene(),
"Gene unstranded tag count %s",
"Tag count inside gene body (tss..tes), both strands",
)
def GRUnstrandedRust(aligned_lane):
return _FastTagCounterGR(
aligned_lane,
CounterStrategyUnstrandedRust(),
"Tag count %s",
"Tag count inside region, both strands",
)
def GRStrandedRust(aligned_lane):
return _FastTagCounterGR(
aligned_lane,
CounterStrategyStrandedRust(),
"Tag count %s",
"Tag count inside region, stranded",
)
# we are keeping the python ones for now as reference implementations
GeneUnstranded = GeneUnstrandedRust
GeneStranded = GeneStrandedRust
ExonStranded = ExonStrandedRust
ExonUnstranded = ExonUnstrandedRust
ExonSmartStranded = ExonSmartStrandedRust
ExonSmartUnstranded = ExonSmartUnstrandedRust
# ## Normalizing annotators - convert raw tag counts into something normalized
class _NormalizationAnno(Annotator, TagCountCommonQC):
def __init__(self, base_column_spec):
from ..util import parse_a_or_c_to_anno, parse_a_or_c_to_column
self.raw_anno = parse_a_or_c_to_anno(base_column_spec)
self.raw_column = parse_a_or_c_to_column(base_column_spec)
if self.raw_anno is not None:
self.genome = self.raw_anno.genome
self.vid = getattr(self.raw_anno, "vid", None)
self.aligned_lane = getattr(self.raw_anno, "aligned_lane", None)
else:
self.genome = None
self.vid = None
self.aligned_lane = None
self.columns = [self.raw_column + " " + self.name]
self.cache_name = (
self.__class__.__name__
+ "_"
+ hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
if self.raw_anno is not None:
self.plot_name = getattr(self.raw_anno, "plot_name", self.raw_column)
if hasattr(self.raw_anno, "count_strategy"):
if hasattr(self.raw_anno, "interval_strategy"):
iv_name = self.raw_anno.interval_strategy.name
else:
iv_name = "-"
self.qc_folder = f"normalized_{self.name}_{self.raw_anno.count_strategy.name}_{iv_name}"
else:
self.qc_folder = f"normalized_{self.name}"
else:
self.plot_name = parse_a_or_c_to_plot_name(base_column_spec)
self.qc_folder = f"normalized_{self.name}"
self.qc_distribution_scale_y_name = self.name
def dep_annos(self):
if self.raw_anno is None:
return []
else:
return [self.raw_anno]
class NormalizationCPM(_NormalizationAnno):
"""Normalize to 1e6 by taking the sum of all genes"""
def __init__(self, base_column_spec):
self.name = "CPM"
self.normalize_to = 1e6
super().__init__(base_column_spec)
self.column_properties = {
self.columns[0]: {
"description": "Tag count inside protein coding (all if no protein coding transcripts) exons, normalized to 1e6 across all genes"
}
}
def calc(self, df):
raw_counts = df[self.raw_column]
total = max(1, float(raw_counts.sum())) # avoid division by 0
result = raw_counts * (self.normalize_to / total)
return pd.Series(result)
class NormalizationTPM(_NormalizationAnno):
"""Normalize to transcripts per million, ie.
count / length * (1e6 / (sum_i(count_/length_i)))
"""
def __init__(self, base_column_spec, interval_strategy=None):
self.name = "TPM"
self.normalize_to = 1e6
super().__init__(base_column_spec)
if self.raw_anno is None: # pragma: no cover
if interval_strategy is None: # pragma: no cover
raise ValueError(
"TPM normalization needs to know the intervals used. Either base of a FastTagCount annotator or pass in an interval strategy"
)
self.interval_strategy = interval_strategy
else:
self.interval_strategy = self.raw_anno.interval_strategy
self.column_properties = {
self.columns[0]: {"description": "transcripts per million"}
}
def calc(self, df):
raw_counts = df[self.raw_column]
length_by_gene = self.interval_strategy.get_interval_lengths_by_gene(
self.genome
)
result = np.zeros(raw_counts.shape, float)
for ii, gene_stable_id in enumerate(df["gene_stable_id"]):
result[ii] = raw_counts.iloc[ii] / length_by_gene[gene_stable_id]
total = float(result[~pd.isnull(result)].sum())
factor = 1e6 / total
result = result * factor
return pd.DataFrame({self.columns[0]: result})
class NormalizationFPKM(Annotator):
def __init__(self, raw_anno):
raise NotImplementedError(
"FPKM is a bad thing to use. It is not supported by mbf"
)
class Salmon(Annotator):
"""Add salmon gene level estimation calculated on a raw Sample"""
def __init__(
self,
raw_lane,
prefix="Salmon",
options={
# "--validateMappings": None, this always get's set by aligners.Salmon
"--gcBias": None,
"--seqBias": None,
},
libtype="A",
accepted_biotypes=None, # set(("protein_coding", "lincRNA")),
salmon_version="_last_used",
):
self.raw_lane = raw_lane
self.options = options.copy()
self.libtype = libtype
self.accepted_biotypes = accepted_biotypes
self.salmon_version = salmon_version
self.columns = [
f"{prefix} TPM {raw_lane.name}",
f"{prefix} NumReads {raw_lane.name}",
]
self.vid = self.raw_lane.vid
def deps(self, ddf):
import mbf_externals
return mbf_externals.aligners.Salmon(
self.accepted_biotypes, version=self.salmon_version
).run_quant_on_raw_lane(
self.raw_lane, ddf.genome, self.libtype, self.options, gene_level=True
)
def calc_ddf(self, ddf):
quant_path = Path(self.deps(ddf).job_id).parent / "quant.genes.sf"
in_df = pd.read_csv(quant_path, sep="\t").set_index("Name")[["TPM", "NumReads"]]
in_df.columns = self.columns
res = in_df.reindex(ddf.df.gene_stable_id)
res.index = ddf.df.index
return res
class TMM(Annotator):
"""
Calculates the TMM normalization from edgeR on some raw counts.
Returns log2-transformed cpms corrected by the TMM-estimated effective
library sizes. In addition, batch correction using limma might be performed,
if a dictionary indicatin the batches is given.
Parameters
----------
raw : Dict[str, Annotator]
Dictionary of raw count annotator for all samples.
dependencies : List[Job], optional
List of additional dependencies, by default [].
samples_to_group : Dict[str, str], optional
A dictionary sample name to group name, by default None.
batches. : Dict[str, str]
Dictionary indicating batch effects.
"""
def __init__(
self,
raw: Dict[str, Annotator],
dependencies: List[Job] = None,
samples_to_group: Dict[str, str] = None,
batches: Dict[str, str] = None,
suffix: str = "",
):
"""Constructor."""
self.sample_column_lookup = {}
if batches is not None:
for sample_name in raw:
self.sample_column_lookup[
parse_a_or_c_to_column(raw[sample_name])
] = f"{sample_name}{suffix} TMM (batch removed)"
else:
for sample_name in raw:
self.sample_column_lookup[
parse_a_or_c_to_column(raw[sample_name])
] = f"{sample_name}{suffix} TMM"
self.columns = list(self.sample_column_lookup.values())
self.dependencies = []
if dependencies is not None:
self.dependencies = dependencies
self.raw = raw
self.samples_to_group = samples_to_group
self.cache_name = hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
self.batch = None
if batches is not None:
self.batch = [batches[sample_name] for sample_name in raw]
def calc_ddf(self, ddf: DelayedDataFrame) -> DataFrame:
"""
Calculates TMM columns to be added to the ddf instance.
TMM columns are calculated using edgeR with all samples given in self.raw.
Parameters
----------
ddf : DelayedDataFrame
The DelayedDataFrame instance to be annotated.
Returns
-------
DataFrame
A dataframe containing TMM normalized columns for each
"""
raw_columns = [
parse_a_or_c_to_column(self.raw[sample_name]) for sample_name in self.raw
]
df = ddf.df[raw_columns]
df_res = self.call_edgeR(df)
assert (df_res.columns == df.columns).all()
rename = {}
before = df_res.columns.copy()
for col in df_res.columns:
rename[col] = self.sample_column_lookup[col]
df_res = df_res.rename(columns=rename, errors='raise')
if (df_res.columns == before).all():
# there is a bug in pands 1.3.4 that prevents renaming
# to work when multiindices / tuple named columns are involved
# so we have to build it by hand, I suppose
df_res = pd.DataFrame({v: df_res[k] for (k,v) in rename.items()})
return df_res
def call_edgeR(self, df_counts: DataFrame) -> DataFrame:
"""
Call to edgeR via r2py to get TMM (trimmed mean of M-values)
normalization for raw counts.
Prepare the edgeR input in python and call edgeR calcNormFactors via
r2py. The TMM normalized values are returned in a DataFrame which
is converted back to pandas DataFrame via r2py.
Parameters
----------
df_counts : DataFrame
The dataframe containing the raw counts.
Returns
-------
DataFrame
A dataframe with TMM values (trimmed mean of M-values).
"""
ro.r("library(edgeR)")
ro.r("library(base)")
df_input = df_counts
columns = df_input.columns
to_df = {"lib.size": df_input.sum(axis=0).values}
if self.samples_to_group is not None:
to_df["group"] = [
self.samples_to_group[sample_name]
for sample_name in self.samples_to_group
]
if self.batch is not None:
to_df["batch"] = self.batch
df_samples = pd.DataFrame(to_df)
df_samples["lib.size"] = df_samples["lib.size"].astype(int)
r_counts = mbf_r.convert_dataframe_to_r(df_input)
r_samples = mbf_r.convert_dataframe_to_r(df_samples)
y = ro.r("DGEList")(
counts=r_counts,
samples=r_samples,
)
# apply TMM normalization
y = ro.r("calcNormFactors")(y) # default is TMM
logtmm = ro.r(
"""function(y){
cpm(y, log=TRUE, prior.count=5)
}"""
)(
y
) # apparently removeBatchEffects works better on log2-transformed values
if self.batch is not None:
batches = np.array(self.batch)
batches = numpy2ri.py2rpy(batches)
logtmm = ro.r(
"""
function(logtmm, batch) {
tmm = removeBatchEffect(logtmm,batch=batch)
}
"""
)(logtmm=logtmm, batch=batches)
cpm = ro.r("data.frame")(logtmm)
df = mbf_r.convert_dataframe_from_r(cpm)
df = df.reset_index(drop=True)
df.columns = columns
return df
def deps(self, ddf) -> List[Job]:
"""Return ppg.jobs"""
return self.dependencies
def dep_annos(self) -> List[Annotator]:
"""Return other annotators"""
return [parse_a_or_c_to_anno(x) for x in self.raw.values()]
| [
"mbf_bam.count_reads_unstranded",
"mbf_qualitycontrol.qc_disabled",
"rpy2.robjects.numpy2ri.py2rpy",
"mbf_externals.aligners.Salmon",
"pandas.read_csv",
"mbf_genomics.util.parse_a_or_c_to_column",
"numpy.array",
"mbf_bam.count_reads_stranded",
"mbf_genomics.util.parse_a_or_c_to_plot_name",
"mbf_r.... | [((688, 694), 'dppd.dppd', 'dppd', ([], {}), '()\n', (692, 694), False, 'from dppd import dppd\n'), ((1799, 1950), 'mbf_bam.count_reads_stranded', 'count_reads_stranded', (['bam_filename', 'bam_index_name', 'intervals', 'gene_intervals'], {'matching_reads_output_bam_filename': 'dump_matching_reads_filename'}), '(bam_filename, bam_index_name, intervals,\n gene_intervals, matching_reads_output_bam_filename=\n dump_matching_reads_filename)\n', (1819, 1950), False, 'from mbf_bam import count_reads_stranded\n'), ((4080, 4159), 'mbf_bam.count_reads_unstranded', 'count_reads_unstranded', (['bam_filename', 'bam_index_name', 'intervals', 'gene_intervals'], {}), '(bam_filename, bam_index_name, intervals, gene_intervals)\n', (4102, 4159), False, 'from mbf_bam import count_reads_unstranded\n'), ((13746, 13762), 'pypipegraph.inside_ppg', 'ppg.inside_ppg', ([], {}), '()\n', (13760, 13762), True, 'import pypipegraph as ppg\n'), ((14048, 14080), 'numpy.array', 'np.array', (['result'], {'dtype': 'np.float'}), '(result, dtype=np.float)\n', (14056, 14080), True, 'import numpy as np\n'), ((14096, 14113), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (14105, 14113), True, 'import pandas as pd\n'), ((16249, 16265), 'pypipegraph.inside_ppg', 'ppg.inside_ppg', ([], {}), '()\n', (16263, 16265), True, 'import pypipegraph as ppg\n'), ((16522, 16554), 'numpy.array', 'np.array', (['result'], {'dtype': 'np.float'}), '(result, dtype=np.float)\n', (16530, 16554), True, 'import numpy as np\n'), ((16570, 16587), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (16579, 16587), True, 'import pandas as pd\n'), ((21048, 21086), 'mbf_genomics.util.parse_a_or_c_to_anno', 'parse_a_or_c_to_anno', (['base_column_spec'], {}), '(base_column_spec)\n', (21068, 21086), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((21113, 21153), 'mbf_genomics.util.parse_a_or_c_to_column', 'parse_a_or_c_to_column', (['base_column_spec'], {}), '(base_column_spec)\n', (21135, 21153), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((23274, 23291), 'pandas.Series', 'pd.Series', (['result'], {}), '(result)\n', (23283, 23291), True, 'import pandas as pd\n'), ((24382, 24415), 'numpy.zeros', 'np.zeros', (['raw_counts.shape', 'float'], {}), '(raw_counts.shape, float)\n', (24390, 24415), True, 'import numpy as np\n'), ((24694, 24733), 'pandas.DataFrame', 'pd.DataFrame', (['{self.columns[0]: result}'], {}), '({self.columns[0]: result})\n', (24706, 24733), True, 'import pandas as pd\n'), ((30250, 30272), 'rpy2.robjects.r', 'ro.r', (['"""library(edgeR)"""'], {}), "('library(edgeR)')\n", (30254, 30272), True, 'import rpy2.robjects as ro\n'), ((30281, 30302), 'rpy2.robjects.r', 'ro.r', (['"""library(base)"""'], {}), "('library(base)')\n", (30285, 30302), True, 'import rpy2.robjects as ro\n'), ((30720, 30739), 'pandas.DataFrame', 'pd.DataFrame', (['to_df'], {}), '(to_df)\n', (30732, 30739), True, 'import pandas as pd\n'), ((30827, 30865), 'mbf_r.convert_dataframe_to_r', 'mbf_r.convert_dataframe_to_r', (['df_input'], {}), '(df_input)\n', (30855, 30865), False, 'import mbf_r\n'), ((30886, 30926), 'mbf_r.convert_dataframe_to_r', 'mbf_r.convert_dataframe_to_r', (['df_samples'], {}), '(df_samples)\n', (30914, 30926), False, 'import mbf_r\n'), ((31759, 31794), 'mbf_r.convert_dataframe_from_r', 'mbf_r.convert_dataframe_from_r', (['cpm'], {}), '(cpm)\n', (31789, 31794), False, 'import mbf_r\n'), ((7743, 7756), 'mbf_qualitycontrol.qc_disabled', 'qc_disabled', ([], {}), '()\n', (7754, 7756), False, 'from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled\n'), ((11480, 11570), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': xy[:, 0], 'y': xy[:, 1], 'label': [x.plot_name for x in elements]}"], {}), "({'x': xy[:, 0], 'y': xy[:, 1], 'label': [x.plot_name for x in\n elements]})\n", (11492, 11570), True, 'import pandas as pd\n'), ((14202, 14276), 'pypipegraph.ParameterInvariant', 'ppg.ParameterInvariant', (['self.cache_name', 'self.dump_matching_reads_filename'], {}), '(self.cache_name, self.dump_matching_reads_filename)\n', (14224, 14276), True, 'import pypipegraph as ppg\n'), ((14290, 14393), 'pypipegraph.FunctionInvariant', 'ppg.FunctionInvariant', (["(self.cache_name + '_count_reads')", 'self.count_strategy.__class__.count_reads'], {}), "(self.cache_name + '_count_reads', self.count_strategy\n .__class__.count_reads)\n", (14311, 14393), True, 'import pypipegraph as ppg\n'), ((14886, 14930), 'pathlib.Path', 'Path', (['ppg.util.global_pipegraph.cache_folder'], {}), '(ppg.util.global_pipegraph.cache_folder)\n', (14890, 14930), False, 'from pathlib import Path\n'), ((22296, 22339), 'mbf_genomics.util.parse_a_or_c_to_plot_name', 'parse_a_or_c_to_plot_name', (['base_column_spec'], {}), '(base_column_spec)\n', (22321, 22339), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((28828, 28873), 'mbf_genomics.util.parse_a_or_c_to_column', 'parse_a_or_c_to_column', (['self.raw[sample_name]'], {}), '(self.raw[sample_name])\n', (28850, 28873), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((30939, 30954), 'rpy2.robjects.r', 'ro.r', (['"""DGEList"""'], {}), "('DGEList')\n", (30943, 30954), True, 'import rpy2.robjects as ro\n'), ((31072, 31095), 'rpy2.robjects.r', 'ro.r', (['"""calcNormFactors"""'], {}), "('calcNormFactors')\n", (31076, 31095), True, 'import rpy2.robjects as ro\n'), ((31134, 31234), 'rpy2.robjects.r', 'ro.r', (['"""function(y){\n cpm(y, log=TRUE, prior.count=5)\n }"""'], {}), '(\n """function(y){\n cpm(y, log=TRUE, prior.count=5)\n }"""\n )\n', (31138, 31234), True, 'import rpy2.robjects as ro\n'), ((31402, 31422), 'numpy.array', 'np.array', (['self.batch'], {}), '(self.batch)\n', (31410, 31422), True, 'import numpy as np\n'), ((31445, 31469), 'rpy2.robjects.numpy2ri.py2rpy', 'numpy2ri.py2rpy', (['batches'], {}), '(batches)\n', (31460, 31469), True, 'import rpy2.robjects.numpy2ri as numpy2ri\n'), ((31719, 31737), 'rpy2.robjects.r', 'ro.r', (['"""data.frame"""'], {}), "('data.frame')\n", (31723, 31737), True, 'import rpy2.robjects as ro\n'), ((32082, 32105), 'mbf_genomics.util.parse_a_or_c_to_anno', 'parse_a_or_c_to_anno', (['x'], {}), '(x)\n', (32102, 32105), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((8405, 8458), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [0], 'y': [0], 'text': 'no data'}"], {}), "({'x': [0], 'y': [0], 'text': 'no data'})\n", (8417, 8458), True, 'import pandas as pd\n'), ((10565, 10604), 'sklearn.decomposition.PCA', 'decom.PCA', ([], {'n_components': '(2)', 'whiten': '(False)'}), '(n_components=2, whiten=False)\n', (10574, 10604), True, 'import sklearn.decomposition as decom\n'), ((25839, 25926), 'mbf_externals.aligners.Salmon', 'mbf_externals.aligners.Salmon', (['self.accepted_biotypes'], {'version': 'self.salmon_version'}), '(self.accepted_biotypes, version=self.\n salmon_version)\n', (25868, 25926), False, 'import mbf_externals\n'), ((31491, 31654), 'rpy2.robjects.r', 'ro.r', (['"""\n function(logtmm, batch) {\n tmm = removeBatchEffect(logtmm,batch=batch)\n }\n """'], {}), '(\n """\n function(logtmm, batch) {\n tmm = removeBatchEffect(logtmm,batch=batch)\n }\n """\n )\n', (31495, 31654), True, 'import rpy2.robjects as ro\n'), ((26181, 26214), 'pandas.read_csv', 'pd.read_csv', (['quant_path'], {'sep': '"""\t"""'}), "(quant_path, sep='\\t')\n", (26192, 26214), True, 'import pandas as pd\n'), ((27542, 27582), 'mbf_genomics.util.parse_a_or_c_to_column', 'parse_a_or_c_to_column', (['raw[sample_name]'], {}), '(raw[sample_name])\n', (27564, 27582), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((27761, 27801), 'mbf_genomics.util.parse_a_or_c_to_column', 'parse_a_or_c_to_column', (['raw[sample_name]'], {}), '(raw[sample_name])\n', (27783, 27801), False, 'from mbf_genomics.util import parse_a_or_c_to_plot_name, parse_a_or_c_to_column, parse_a_or_c_to_anno\n'), ((10423, 10443), 'numpy.array', 'np.array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (10431, 10443), True, 'import numpy as np\n'), ((15012, 15099), 'pypipegraph.CachedAttributeLoadingJob', 'ppg.CachedAttributeLoadingJob', (['(cf / self.cache_name)', 'self', '"""_data"""', 'self.calc_data'], {}), "(cf / self.cache_name, self, '_data', self.\n calc_data)\n", (15041, 15099), True, 'import pypipegraph as ppg\n'), ((10036, 10074), 'mbf_qualitycontrol.QCCollectingJob', 'QCCollectingJob', (['output_filename', 'plot'], {}), '(output_filename, plot)\n', (10051, 10074), False, 'from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled\n'), ((12318, 12356), 'mbf_qualitycontrol.QCCollectingJob', 'QCCollectingJob', (['output_filename', 'plot'], {}), '(output_filename, plot)\n', (12333, 12356), False, 'from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled\n'), ((24591, 24608), 'pandas.isnull', 'pd.isnull', (['result'], {}), '(result)\n', (24600, 24608), True, 'import pandas as pd\n'), ((10794, 10809), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (10803, 10809), True, 'import pandas as pd\n')] |
from multixrank import MultiplexAll
import numpy
import scipy
class TransitionMatrix:
def __init__(self, multiplex_all: MultiplexAll, bipartite_matrix: numpy.array, lamb: numpy.array):
self.multiplex_all = multiplex_all
self.bipartite_matrix = bipartite_matrix
self.lamb = lamb
self._transition_matrixcoo = None
@property
def transition_matrixcoo(self):
if self._transition_matrixcoo is None:
bipartite_matrix = self.bipartite_matrix
self.multiplexall_supra_adj_matrix_list = []
for i, multiplex_obj in enumerate(self.multiplex_all.multiplex_tuple):
self.multiplexall_supra_adj_matrix_list.append(multiplex_obj.supra_adj_matrixcoo)
size1 = len(self.multiplex_all.multiplex_tuple)
self.get_normalization_Diago()
transition = numpy.zeros((size1, size1), dtype=object)
# give full zero block matrix for transition matrix, essential if there
# are no bipartite
for i in range(size1):
diago = self.Diago[:,i]
transition[i, i] = self.get_normalisation_alpha_alpha(i, self.multiplexall_supra_adj_matrix_list[i], diago)
for j in range(size1):
if j != i:
transition[j, i] = self.get_normalization_bipartite_alpha_beta(j, i, bipartite_matrix[j, i], diago)
self._transition_matrixcoo = scipy.sparse.bmat(transition, format="coo")
return self._transition_matrixcoo
# 1.2.3.1 :
def get_normalization_Diago(self) -> numpy.ndarray:
"""
Function that determine the column sum of each bipartite matrix for the condition
concerning the transition matrix alpha_alpha.
Returns :
self.Diago (numpy.ndarray) : A ndarray with the column sum of each bipartite matrix.
"""
bipartite_matrix = self.bipartite_matrix
# import pdb; pdb.set_trace()
# multiplexall_node_count_list2d = [len(x) for x in self.multiplexall_node_list2d]
multiplexall_node_count_list = [len(m.nodes) for m in self.multiplex_all.multiplex_tuple]
multiplexall_layer_count_list = [len(m.layer_tuple) for m in self.multiplex_all.multiplex_tuple]
size1 = len(self.multiplex_all.multiplex_tuple)
self.Diago = numpy.zeros((size1, size1), dtype=object)
for i in range(size1):
self.Diago[i, i] = numpy.zeros(multiplexall_node_count_list[i] * multiplexall_layer_count_list[i])
for j in range(i + 1, size1):
tot_strength_nodes = (bipartite_matrix[i, j]).sum(axis=0)
self.Diago[i, j] = numpy.array(list(tot_strength_nodes.flat))
tot_strength_nodes = (bipartite_matrix[j, i]).sum(axis=0)
self.Diago[j, i] = numpy.array(list(tot_strength_nodes.flat))
# import pdb; pdb.set_trace()
return self.Diago
# 1.2.3.3 :
def get_normalisation_alpha_alpha(self, alpha, adjacency, diago) -> scipy.sparse.csr.csr_matrix:
"""
Function that compute Normalization for the alpha_alpha term of Transition matrix.
The term alpha_alpha correspond to the supra-adjacency matrix of multiplex alpha.
Args :
alpha (int) : Row index of Transition matrix.
beta (int) : Column index of Transition matrix.
matrix (scipy.sparse.coo.coo_matrix) : Supra-adjacency matrix for multiplex ithat we want
to normalize.
Returns :
alpha_alpha (scipy.sparse.csr.csr_matrix) : Normalized Supra-adjacency matrix for i .
"""
# multiplexall_node_count_list = [len(x) for x in self.multiplexall_node_list2d]
multiplexall_node_count_list = [len(m.nodes) for m in self.multiplex_all.multiplex_tuple]
multiplexall_layer_count_list = [len(m.layer_tuple) for m in self.multiplex_all.multiplex_tuple]
# print(adjacency)
size1 = len(self.multiplex_all.multiplex_tuple)
tot_strength_nodes_alpha = adjacency.sum(axis=0)
diago_up = list(tot_strength_nodes_alpha.flat)
diago_down = list(tot_strength_nodes_alpha.flat)
for k in range(multiplexall_node_count_list[alpha] *
multiplexall_layer_count_list[alpha]):
# add for loop for each element in diago (each element correspond to a bipartite matrix, [i,i] list of 0)
if sum(diago)[k] == 0 : # pas de bipartite
diago_up[k] = 0
if diago_down[k] == 0:
diago_down[k] = 1
else :
diago_down[k] = 1/diago_down[k]
else : # so at least one bipartite no zeros
diago_down[k] = 0
list_value_diago = numpy.zeros(len(diago))
for l in range(len(diago)) :
if (diago[l][k] != 0) :
list_value_diago[l] = 1
list_value_diago = list_value_diago*self.lamb[:,alpha].T
norm = 1
for l in range(size1) :
if (l != alpha) :
norm -= list_value_diago[l]
if diago_up[k] == 0 :
diago_up[k] = 1
else :
diago_up[k] = norm*(1/diago_up[k])
Normalization_matrix_up = scipy.sparse.diags(diago_up, format = "coo")
Normalization_matrix_down = scipy.sparse.diags(diago_down, format = "coo")
Transition_up = adjacency.dot(Normalization_matrix_up)
Transition_down = adjacency.dot(Normalization_matrix_down)
alpha_alpha = Transition_up + Transition_down
return alpha_alpha
# 1.2.3.2 :
def get_normalization_bipartite_alpha_beta(self, alpha, beta,
matrix, diago) -> scipy.sparse.csr.csr_matrix:
"""
Function that compute Normalization for the alpha_beta term of Transition matrix.
The term alpha_beta correspond to the bipartite between multiplex alpha and beta.
Args :
alpha (int) : Row index of Transition matrix.
beta (int) : Column index of Transition matrix.
matrix (scipy.sparse.coo.coo_matrix) : bipartite between i and j that we want to
normalize.
Returns :
alpha_beta (scipy.sparse.csr.csr_matrix) : Normalized bipartite between i and j.
"""
Tot_strength_nodes = matrix.sum(axis=0)
# adjacency = self.SupraAdjacencyMatrix[beta].sum(axis=0) # anthony's script
adjacency = self.multiplex_all.supra_adj_matrix_list[beta].sum(axis=0)
adjacency = list(adjacency.flat)
diago_up = list(Tot_strength_nodes.flat)
diago_down = list(Tot_strength_nodes.flat)
# list with layer count for each multiplex
self_L = [len(x.layer_tuple) for x in self.multiplex_all.multiplex_tuple]
for k in range(len(diago_up)):
if (self_L[beta] == 1) and (adjacency[
k] == 0): # node not in multiplex alpha, only in bip
diago_up[k] = 0
list_value_diago = numpy.zeros(len(self_L))
for l in range(len(self_L)):
if (diago[l][k] != 0):
list_value_diago[l] = 1
list_value_diago = list_value_diago * self.lamb[:, beta].T
norm = 0
for l in range(len(self_L)):
if (l != beta):
norm += list_value_diago[l]
if diago_down[k] == 0:
diago_down[k] = 1
else:
diago_down[k] = (self.lamb[alpha, beta] / norm) * (
1 / diago_down[k])
else: # node in multiplex alpha so standard normalization
diago_down[k] = 0
if diago_up[k] == 0:
diago_up[k] = 1
else:
diago_up[k] = self.lamb[alpha, beta] * (1 / diago_up[k])
Normalization_matrix_up = scipy.sparse.diags(diago_up, format="coo")
Normalization_matrix_down = scipy.sparse.diags(diago_down, format="coo")
Transition_up = matrix.dot(Normalization_matrix_up)
Transition_down = matrix.dot(Normalization_matrix_down)
alpha_beta = Transition_up + Transition_down
return alpha_beta
| [
"scipy.sparse.bmat",
"numpy.zeros",
"scipy.sparse.diags"
] | [((2373, 2414), 'numpy.zeros', 'numpy.zeros', (['(size1, size1)'], {'dtype': 'object'}), '((size1, size1), dtype=object)\n', (2384, 2414), False, 'import numpy\n'), ((5413, 5455), 'scipy.sparse.diags', 'scipy.sparse.diags', (['diago_up'], {'format': '"""coo"""'}), "(diago_up, format='coo')\n", (5431, 5455), False, 'import scipy\n'), ((5494, 5538), 'scipy.sparse.diags', 'scipy.sparse.diags', (['diago_down'], {'format': '"""coo"""'}), "(diago_down, format='coo')\n", (5512, 5538), False, 'import scipy\n'), ((8175, 8217), 'scipy.sparse.diags', 'scipy.sparse.diags', (['diago_up'], {'format': '"""coo"""'}), "(diago_up, format='coo')\n", (8193, 8217), False, 'import scipy\n'), ((8254, 8298), 'scipy.sparse.diags', 'scipy.sparse.diags', (['diago_down'], {'format': '"""coo"""'}), "(diago_down, format='coo')\n", (8272, 8298), False, 'import scipy\n'), ((873, 914), 'numpy.zeros', 'numpy.zeros', (['(size1, size1)'], {'dtype': 'object'}), '((size1, size1), dtype=object)\n', (884, 914), False, 'import numpy\n'), ((1464, 1507), 'scipy.sparse.bmat', 'scipy.sparse.bmat', (['transition'], {'format': '"""coo"""'}), "(transition, format='coo')\n", (1481, 1507), False, 'import scipy\n'), ((2477, 2556), 'numpy.zeros', 'numpy.zeros', (['(multiplexall_node_count_list[i] * multiplexall_layer_count_list[i])'], {}), '(multiplexall_node_count_list[i] * multiplexall_layer_count_list[i])\n', (2488, 2556), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
This module is used for importing study specific .mat files.
Since all trials will be joined to a single dataset we cannot easily handle
single electrodes/channels from certain trials.
Thus, data loaded with this module is expected to be artifact free already.
"""
from os import path, mkdir, unlink
from glob import glob
from scipy import io
import numpy as np
import pandas as pd
from pandas.io.pytables import HDFStore
import warnings
import re
from digits.utils import dotdict
class UnmatchedDimensions(Exception): pass
class UnmatchedSubjects(Exception): pass
class InconsistentElectrodes(Exception): pass
class Session():
"""Simple Class for a session object holding 2 pandas dataframes: samples and targets"""
def __init__(self, subject, sessionid, samples, digits, trials, channels):
if len(digits) != len(samples):
raise UnmatchedDimensions("sample and target length doesn't match")
samplenames = ["t_"+str.zfill(x,4) for x in np.arange(0,1401).astype('str')]
colix = pd.MultiIndex.from_product([channels, samplenames],
names=['channel', 'sample'])
rowsubjects = np.repeat(subject, len(trials))
rowsessions = np.repeat(sessionid, len(trials))
trials = trials.astype('str')
rowruns = np.arange(len(trials)).astype('str')
rowix = pd.MultiIndex.from_arrays([rowsubjects, rowsessions, trials, rowruns],
names=['subject', 'session', 'trial', 'presentation'])
samples = pd.DataFrame(samples, index=rowix, columns=colix)
targets = pd.DataFrame(digits, index=rowix, columns=['label'])
self.ds = dotdict({'samples': samples, 'targets': targets})
class Importer:
"""Main Class for importing mat files.
Data will be stored in the samples and targets of the ds dictionary
attribute and can be loaded or saved from and to a compressed hdf5 file.
Attributes:
dataroot: directory that contains the mat files
ds: trivial dictionary containing:
samples: a pandas dataframe with a MultiIndex composed of the session data
targets: a pandas dataframe with the targets/labels
"""
def __init__(self, dataroot):
self.dataroot = dataroot
self.ds = None
self.store = None
self.importpath = path.join(self.dataroot, 'imported')
def __append(self, session):
"""append session to current object"""
if (self.ds.samples.columns.get_level_values('channel') !=
session.ds.samples.columns.get_level_values('channel')).any():
print(self.ds.samples.columns.get_level_values('channel'))
print(session.ds.samples.columns.get_level_values('channel'))
raise InconsistentElectrodes('electrode labels do not match when merging datasets')
self.ds.samples = self.ds.samples.append(session.ds.samples,
verify_integrity=True)
self.ds.targets = self.ds.targets.append(session.ds.targets,
verify_integrity=True)
def __sort(self):
"""MultiIndex Slicing operations require we sort all indices"""
self.ds.samples.sort_index(level='channel', axis=1, inplace=True)
#self.ds.samples.sort_index(axis=1, inplace=True)
def get_session(self, subject, sessionid):
"""Add a single trial as target/samples pair from a mat file.
Args:
param1: (string): subject ID
param2: (string): session ID
Returns:
A Session object containing samples and target data.
"""
trialpath = glob(path.join(self.dataroot,
subject + '-' + sessionid + '-*.mat'))
if not trialpath or not path.exists(trialpath[0]):
raise FileNotFoundError("no file for subject '{0}' and trial '{1}'".format(subject, sessionid))
session = io.loadmat(trialpath[0])
"""
>> session
session =
data: [1x1 struct]
"""
data = session['data'][0,0]
"""
>> session.data
ans =
label: {64x1 cell}
time: {1x638 cell}
trial: {1x638 cell}
elec: [1x1 struct]
cfg: [1x1 struct]
TrlInfo: {638x16 cell}
TrlInfoLabels: {16x1 cell}
"""
channels = data[0][:,0] # label
channels = np.array(channels.tolist()).flatten() # unify dtype
samples = data[2][0, :] # trial
samples = np.array([ x[:].flatten() for x in samples ], dtype='float32')
cfg = data[4][0][0]
trlinfo = data[5][:,:]
trlinfolabels = data[6][:,0]
"""
>> session.data.cfg
ans =
method: 'spline'
badchannel: {2x1 cell}
trials: 'all'
lambda: 1.0000e-05
order: 4
elec: [1x1 struct]
outputfilepresent: 'overwrite'
callinfo: [1x1 struct]
version: [1x1 struct]
trackconfig: 'off'
checkconfig: 'loose'
checksize: 100000
showcallinfo: 'yes'
debug: 'no'
trackcallinfo: 'yes'
trackdatainfo: 'no'
missingchannel: {0x1 cell}
previous: [1x1 struct]
"""
try:
badchannels = cfg[1][0,0]
except IndexError:
badchannels = []
"""
>> session.data.TrlInfoLabels
ans =
'time stamp original (EEG)'
'time stamp new (EEG)'
'task'
'data part #'
'trial #'
'stimulus type'
'EEG trigger'
'encoding digit #'
'time stamp original (E-Prime)'
'set size'
'probe type'
'response'
'ACC'
'RT'
'digit/probe presented'
'probe position'
"""
trials = trlinfo[:,4].astype('uint8')
digits = trlinfo[:,14].astype('uint8')
return Session(subject, sessionid, samples, digits, trials, channels)
def add_session(self, subject, sessionid):
"""Concatenate a single Session to the current importer instance
implicitly using __append().
Args:
param1: (string): subject ID
param2: (string): session ID
"""
session = self.get_session(subject, sessionid)
if not self.ds:
self.ds = dotdict({'samples': session.ds.samples,
'targets': session.ds.targets})
return
if sessionid in self.ds.samples.index.get_level_values('session'):
warnings.warn("Session already added, doing nothing.")
return
if subject not in self.ds.samples.index.get_level_values('subject'):
raise UnmatchedSubjects("Subjects don't match, will not add current session")
# TODO: other checks ?
self.__append(session)
def import_all(self, subject):
"""Import all .mat files for a subject ID.
Args:
param1: (string): subject ID
"""
trialpath = path.join(self.dataroot, '*' + subject + '*mat')
trialfiles = sorted(glob(trialpath))
if not trialfiles:
raise FileNotFoundError(trialpath)
sessionid_re = re.compile('.*' + subject + '-([0-9]+)-.*mat')
sessionids = [sessionid_re.match(file).groups()[0] for file in trialfiles]
for id in sessionids:
self.add_session(subject, id)
self.__sort()
def save(self, filename, force=False):
"""Save the trials and samples arrays from the current importer
instance to a dataset inside a lzf compressed hdf5 file for later use.
Args:
param1: (string): filename, will be stored in self.importpath
Optional Args:
force: (boolean) Wether or not to overwrite an existing file
(default: False)
"""
try:
mkdir(self.importpath)
except FileExistsError:
pass
filename = path.join(self.importpath, filename)
if path.exists(filename):
if force:
unlink(filename)
else:
raise FileExistsError('Import file "' + filename + '" already exists.')
self.__sort()
self.store = HDFStore(filename, complib='lzo')
self.store['samples'] = self.ds.samples
self.store['targets'] = self.ds.targets
self.store.close()
def load(self, name):
"""Load a hdf5 file created with save() and attach the targets and
samples array to the current importer instance.
Args:
param1: (string): a name for the dataset and the hdf5 file name
"""
self.open(name)
self.ds = dotdict({'samples': None, 'targets': None})
self.ds.samples = self.store['samples']
self.ds.targets = self.store['targets']
self.store.close()
def open(self, name):
if not path.exists(self.importpath):
raise FileNotFoundError(path.join(self.dataroot, 'imported'))
filename = path.join(self.importpath, name)
if not path.exists(filename):
raise FileExistsError(filename)
self.store = HDFStore(filename)
def close(self, name):
self.store.close()
| [
"pandas.MultiIndex.from_product",
"os.path.exists",
"re.compile",
"numpy.arange",
"scipy.io.loadmat",
"pandas.MultiIndex.from_arrays",
"os.path.join",
"pandas.io.pytables.HDFStore",
"warnings.warn",
"os.mkdir",
"os.unlink",
"pandas.DataFrame",
"digits.utils.dotdict",
"glob.glob"
] | [((1054, 1139), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[channels, samplenames]'], {'names': "['channel', 'sample']"}), "([channels, samplenames], names=['channel', 'sample']\n )\n", (1080, 1139), True, 'import pandas as pd\n'), ((1399, 1528), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[rowsubjects, rowsessions, trials, rowruns]'], {'names': "['subject', 'session', 'trial', 'presentation']"}), "([rowsubjects, rowsessions, trials, rowruns],\n names=['subject', 'session', 'trial', 'presentation'])\n", (1424, 1528), True, 'import pandas as pd\n'), ((1586, 1635), 'pandas.DataFrame', 'pd.DataFrame', (['samples'], {'index': 'rowix', 'columns': 'colix'}), '(samples, index=rowix, columns=colix)\n', (1598, 1635), True, 'import pandas as pd\n'), ((1654, 1706), 'pandas.DataFrame', 'pd.DataFrame', (['digits'], {'index': 'rowix', 'columns': "['label']"}), "(digits, index=rowix, columns=['label'])\n", (1666, 1706), True, 'import pandas as pd\n'), ((1725, 1774), 'digits.utils.dotdict', 'dotdict', (["{'samples': samples, 'targets': targets}"], {}), "({'samples': samples, 'targets': targets})\n", (1732, 1774), False, 'from digits.utils import dotdict\n'), ((2403, 2439), 'os.path.join', 'path.join', (['self.dataroot', '"""imported"""'], {}), "(self.dataroot, 'imported')\n", (2412, 2439), False, 'from os import path, mkdir, unlink\n'), ((4035, 4059), 'scipy.io.loadmat', 'io.loadmat', (['trialpath[0]'], {}), '(trialpath[0])\n', (4045, 4059), False, 'from scipy import io\n'), ((7497, 7545), 'os.path.join', 'path.join', (['self.dataroot', "('*' + subject + '*mat')"], {}), "(self.dataroot, '*' + subject + '*mat')\n", (7506, 7545), False, 'from os import path, mkdir, unlink\n'), ((7689, 7735), 're.compile', 're.compile', (["('.*' + subject + '-([0-9]+)-.*mat')"], {}), "('.*' + subject + '-([0-9]+)-.*mat')\n", (7699, 7735), False, 'import re\n'), ((8470, 8506), 'os.path.join', 'path.join', (['self.importpath', 'filename'], {}), '(self.importpath, filename)\n', (8479, 8506), False, 'from os import path, mkdir, unlink\n'), ((8518, 8539), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (8529, 8539), False, 'from os import path, mkdir, unlink\n'), ((8747, 8780), 'pandas.io.pytables.HDFStore', 'HDFStore', (['filename'], {'complib': '"""lzo"""'}), "(filename, complib='lzo')\n", (8755, 8780), False, 'from pandas.io.pytables import HDFStore\n'), ((9207, 9250), 'digits.utils.dotdict', 'dotdict', (["{'samples': None, 'targets': None}"], {}), "({'samples': None, 'targets': None})\n", (9214, 9250), False, 'from digits.utils import dotdict\n'), ((9539, 9571), 'os.path.join', 'path.join', (['self.importpath', 'name'], {}), '(self.importpath, name)\n', (9548, 9571), False, 'from os import path, mkdir, unlink\n'), ((9675, 9693), 'pandas.io.pytables.HDFStore', 'HDFStore', (['filename'], {}), '(filename)\n', (9683, 9693), False, 'from pandas.io.pytables import HDFStore\n'), ((3750, 3812), 'os.path.join', 'path.join', (['self.dataroot', "(subject + '-' + sessionid + '-*.mat')"], {}), "(self.dataroot, subject + '-' + sessionid + '-*.mat')\n", (3759, 3812), False, 'from os import path, mkdir, unlink\n'), ((6807, 6878), 'digits.utils.dotdict', 'dotdict', (["{'samples': session.ds.samples, 'targets': session.ds.targets}"], {}), "({'samples': session.ds.samples, 'targets': session.ds.targets})\n", (6814, 6878), False, 'from digits.utils import dotdict\n'), ((7017, 7071), 'warnings.warn', 'warnings.warn', (['"""Session already added, doing nothing."""'], {}), "('Session already added, doing nothing.')\n", (7030, 7071), False, 'import warnings\n'), ((7574, 7589), 'glob.glob', 'glob', (['trialpath'], {}), '(trialpath)\n', (7578, 7589), False, 'from glob import glob\n'), ((8378, 8400), 'os.mkdir', 'mkdir', (['self.importpath'], {}), '(self.importpath)\n', (8383, 8400), False, 'from os import path, mkdir, unlink\n'), ((9416, 9444), 'os.path.exists', 'path.exists', (['self.importpath'], {}), '(self.importpath)\n', (9427, 9444), False, 'from os import path, mkdir, unlink\n'), ((9587, 9608), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (9598, 9608), False, 'from os import path, mkdir, unlink\n'), ((3881, 3906), 'os.path.exists', 'path.exists', (['trialpath[0]'], {}), '(trialpath[0])\n', (3892, 3906), False, 'from os import path, mkdir, unlink\n'), ((8579, 8595), 'os.unlink', 'unlink', (['filename'], {}), '(filename)\n', (8585, 8595), False, 'from os import path, mkdir, unlink\n'), ((9482, 9518), 'os.path.join', 'path.join', (['self.dataroot', '"""imported"""'], {}), "(self.dataroot, 'imported')\n", (9491, 9518), False, 'from os import path, mkdir, unlink\n'), ((1005, 1023), 'numpy.arange', 'np.arange', (['(0)', '(1401)'], {}), '(0, 1401)\n', (1014, 1023), True, 'import numpy as np\n')] |
# -*- coding:Utf-8 -*-
#####################################################################
#This file is part of RGPA.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#<NAME> : <EMAIL>
#<NAME> : <EMAIL>
#<NAME> : <EMAIL>
#####################################################################
from pylayers.util.project import *
import numpy as np
import scipy as sp
import os
import pdb
def cloud(p, name="cloud", display=False, color='r', dice=2, R=0.5, access='new'):
"""
cloud(p,filename,display,color) : display cloud of points p
p : cloud of points array(Npx3)
display : Boolean to switch display on/off
color : 'r','b','g','k'
dice : sphere sampling (2)
R : sphere radius (0.5)
access : 'new' create a new file append mode neither
"""
sh = np.shape(p)
if len(sh) == 1 :
p = p.reshape((1, len(p)))
Np = np.shape(p)[0]
filename = basename + '/geom/' + name + '.list'
if access == 'new':
fd = open(filename, "w")
fd.write("LIST\n")
else:
fd = open(filename, "a")
sdice = " " + str(dice) + " " + str(dice) + " "
radius = " " + str(R) + " "
if color == 'r':
col = " 1 0 0 "
elif color == 'b':
col = " 0 0 1 "
elif color == 'm':
col = " 1 0 1 "
elif color == 'y':
col = " 1 1 0 "
elif color == 'c':
col = " 0 1 1 "
elif color == 'g':
col = " 0 1 0 "
elif color == 'k':
col = " 0 0 0 "
else:
col = color
for k in range(Np):
try:
c1 = str(p[k, 0]) + " " + str(p[k, 1]) + " " + str(p[k, 2])
except:
c1 = str(p[k, 0]) + " " + str(p[k, 1]) + " " + str(0)
chaine = "{appearance {-edge patchdice" + sdice + "material { diffuse " + col + "}}{SPHERE" + radius + c1 + " }}\n"
fd.write(chaine)
fd.close()
if display:
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
return(filename)
if __name__ == "__main__":
p = 10 * sp.randn(3000, 3)
cloud(p, display=True)
| [
"os.system",
"numpy.shape",
"scipy.randn"
] | [((1504, 1515), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (1512, 1515), True, 'import numpy as np\n'), ((1582, 1593), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (1590, 1593), True, 'import numpy as np\n'), ((2679, 2696), 'os.system', 'os.system', (['chaine'], {}), '(chaine)\n', (2688, 2696), False, 'import os\n'), ((2760, 2777), 'scipy.randn', 'sp.randn', (['(3000)', '(3)'], {}), '(3000, 3)\n', (2768, 2777), True, 'import scipy as sp\n')] |
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import argparse
import cv2
import xlrd
import torch
import imageio
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from utils.Percept import percept
from utils.Det_crash import det_crash
from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB
from utils.Inj_Pre import RNN
from utils.Con_est import Collision_cond
warnings.filterwarnings('ignore')
__author__ = "<NAME>"
def load_para(file, Num):
''' Load the reconstructed information of real-world accidents. '''
# Load the data file.
para_data = xlrd.open_workbook(file).sheet_by_index(0)
# Load and process the vehicle parameters.
veh_l = [para_data.row_values(Num + 1)[3] / 1000, para_data.row_values(Num + 1 + 51)[3] / 1000]
veh_w = [para_data.row_values(Num + 1)[4] / 1000, para_data.row_values(Num + 1 + 51)[4] / 1000]
veh_cgf = [para_data.row_values(Num + 1)[6], para_data.row_values(Num + 1 + 51)[6]]
veh_cgs = [0.5 * veh_w[0], 0.5 * veh_w[1]]
veh_m = [para_data.row_values(Num + 1)[2], para_data.row_values(Num + 1 + 51)[2]]
veh_I = [para_data.row_values(Num + 1)[5], para_data.row_values(Num + 1 + 51)[5]]
veh_k = [np.sqrt(veh_I[0] / veh_m[0]), np.sqrt(veh_I[1] / veh_m[1])]
veh_param = (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m)
# Load and process the occupant parameters.
age = [para_data.row_values(Num + 1)[8], para_data.row_values(Num + 1 + 51)[8]]
sex = [para_data.row_values(Num + 1)[7], para_data.row_values(Num + 1 + 51)[7]]
belt = [para_data.row_values(Num + 1)[9], para_data.row_values(Num + 1 + 51)[9]]
airbag = [para_data.row_values(Num + 1)[10], para_data.row_values(Num + 1 + 51)[10]]
for i in range(2):
if age[i] < 20:
age[i] = 0
elif age[i] < 45:
age[i] = 1
elif age[i] < 65:
age[i] = 2
else:
age[i] = 3
belt[0] = 0 if belt[0] == 'Not in use' else 1
belt[1] = 0 if belt[1] == 'Not in use' else 1
sex[0] = 0 if sex[0] == 'Male' else 1
sex[1] = 0 if sex[1] == 'Male' else 1
airbag[0] = 1 if airbag[0] == 'Activated' else 0
airbag[1] = 1 if airbag[1] == 'Activated' else 0
mass_r = veh_m[0] / veh_m[1]
if mass_r < 1 / 2:
mass_r_12 = 0
elif mass_r < 1 / 1.3:
mass_r_12 = 1
elif mass_r < 1.3:
mass_r_12 = 2
elif mass_r < 2:
mass_r_12 = 3
else:
mass_r_12 = 4
mass_r_21 = 4 - mass_r_12
mass_r = [mass_r_12, mass_r_21]
occ_param = (age, belt, sex, airbag, mass_r)
return veh_param, occ_param
def resize_pic(image, angle, l_, w_):
''' Resize and rotate the vehicle.png. '''
# Resize the picture.
image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_))))
# Obtain the dimensions of the image and then determine the center.
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# Obtain the rotation matrix.
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image.
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation.
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image.
image_ = cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
return image_
def plot_env(ax, V1_x_seq, V1_y_seq, V1_angle, V2_x_seq, V2_y_seq, V2_angle, veh_l, veh_w, img_list):
''' Visualize the simulation environment. '''
plt.cla()
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlim((-10, 40))
plt.ylim((-10, 20))
plt.xticks(np.arange(-10, 40.1, 5), range(0, 50+1, 5), family='Times New Roman', fontsize=16)
plt.yticks(np.arange(-10, 20.1, 5), range(0, 30+1, 5), family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.1, bottom=0.1, top=0.94, right=0.94, wspace=0.25, hspace=0.25)
# # Plot the vehicles' position.
# zoom = ((40) - (-10)) * 0.000135
# img_1 = resize_pic(img_list[0], np.rad2deg(V1_angle), veh_l[0], veh_w[0])
# im_1 = OffsetImage(img_1, zoom=zoom * veh_l[0])
# ab_1 = AnnotationBbox(im_1, xy=(V1_x_seq[-1], V1_y_seq[-1]), xycoords='data', pad=0, frameon=False)
# ax.add_artist(ab_1)
# img_2 = resize_pic(img_list[1], np.rad2deg(V2_angle), veh_l[1], veh_w[1])
# im_2 = OffsetImage(img_2, zoom=zoom * veh_l[1])
# ab_2 = AnnotationBbox(im_2, xy=(V2_x_seq[-1], V2_y_seq[-1]), xycoords='data', pad=0, frameon=False)
# ax.add_artist(ab_2)
from matplotlib import patches
p_x1 = V1_x_seq[-1] - (veh_l[0] / 2) * np.cos(V1_angle) + (veh_w[0] / 2) * np.sin(V1_angle)
p_y1 = V1_y_seq[-1] - (veh_l[0] / 2) * np.sin(V1_angle) - (veh_w[0] / 2) * np.cos(V1_angle)
p_x2 = V2_x_seq[-1] - (veh_l[1] / 2) * np.cos(V2_angle) + (veh_w[1] / 2) * np.sin(V2_angle)
p_y2 = V2_y_seq[-1] - (veh_l[1] / 2) * np.sin(V2_angle) - (veh_w[1] / 2) * np.cos(V2_angle)
e1 = patches.Rectangle((p_x1, p_y1), veh_l[0], veh_w[0], angle=np.rad2deg(V1_angle), linewidth=0, fill=True,
zorder=2, color='red', alpha=0.5)
ax.add_patch(e1)
e2 = patches.Rectangle((p_x2, p_y2), veh_l[1], veh_w[1], angle=np.rad2deg(V2_angle), linewidth=0, fill=True,
zorder=2, color='blue', alpha=0.5)
ax.add_patch(e2)
# Plot the vehicles' trajectories.
plt.plot(V1_x_seq, V1_y_seq, color='red', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(V2_x_seq, V2_y_seq, color='blue', linestyle='--', linewidth=1.3, alpha=0.5)
def main():
''' Make human injury-based safety decisions using the injury risk mitigation (IRM) algorithm. '''
parser = argparse.ArgumentParser()
parser.add_argument('--case_num', type=int, default=1, help='Simulation case number (1-5)')
parser.add_argument('--t_act', type=int, default=500,
help='Activation time of IRM algorithm (100ms~1000ms before the collision)')
parser.add_argument('--Level', type=str, default='S1', help='Level of the IRM algorithm: EB, S1, S2, S3')
parser.add_argument('--Ego_V', type=int, default=1, help='Choose one vehicle as the ego vehicle: 1 or 2')
parser.add_argument('--profile_inf', type=str, default='para\Record_Information_example.xlsx',
help='File: information of the reconstructed accidents')
parser.add_argument('--no_visualize', action='store_false', help='simulation visualization')
parser.add_argument('--save_gif', action='store_true', help='save simulation visualization')
opt = parser.parse_args()
Deci_set = ['straight_cons', 'straight_dec-all', 'straight_dec-half', 'straight_acc-half', 'straight_acc-all',
'left-all_dec-all', 'right-all_dec-all', 'left-all_acc-all', 'right-all_acc-all',
'left-half_dec-all', 'left-half_dec-half', 'left-all_dec-half', 'left-half_cons',
'left-all_cons', 'left-half_acc-half', 'left-all_acc-half', 'left-half_acc-all',
'right-half_dec-all', 'right-half_dec-half', 'right-all_dec-half', 'right-half_cons',
'right-all_cons', 'right-half_acc-half', 'right-all_acc-half', 'right-half_acc-all',
'Record_trajectory']
# Load the occupant injury prediction model.
model_InjPre = RNN(in_dim=16, hid_dim=32, n_layers=2, flag_LSTM=True, bidirectional=True, dropout=0.5)
model_InjPre.load_state_dict(torch.load('para\\DL_InjuryPrediction.pkl'))
model_InjPre.eval()
# Load the vehicle image for visualization.
img_1 = mpimg.imread('para\image\\red.png')
img_2 = mpimg.imread('para\image\\blue.png')
img_list = [img_1, img_2]
# Load the parameters of vehicles and occupants.
veh_param, occ_param = load_para(opt.profile_inf, opt.case_num)
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
(age, belt, female, airbag, mass_r) = occ_param
# Translate the activation time of IRM algorithm.
t_act = int(100 - opt.t_act/10)
# Define the random seed.
random_seed = [41, 24, 11, ][opt.case_num - 1] + t_act
np.random.seed(random_seed)
# Define the two vehicles in the imminent collision scenario.
if opt.Level == 'S3':
Veh_1 = Vehicle_S3(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt[0], female=female[0],
airbag=airbag[0], r_seed=random_seed)
Veh_2 = Vehicle_S3(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt[1], female=female[1],
airbag=airbag[1], r_seed=random_seed)
else:
Veh_1 = Vehicle_S12(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt[0], female=female[0],
airbag=airbag[0], r_seed=random_seed)
Veh_2 = Vehicle_S12(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt[1], female=female[1],
airbag=airbag[1], r_seed=random_seed)
# Predefine some parameters.
flag_EB, flag_S3 = True, True
image_list = []
INJ = - np.ones(2)
INJ_ = - np.ones((2, 6))
V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq, V1_omega_r_seq, V1_wheel_anlge_seq = [], [], [], [], [], [], [], []
V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq, V2_omega_r_seq, V2_wheel_anlge_seq = [], [], [], [], [], [], [], []
t_1 = 0
t_2 = 0
if opt.no_visualize:
fig, ax = plt.subplots(figsize=(10, 6))
plt.ion()
plt.axis('equal')
# Simulate the imminent collision scenario with IRM algorithm for 0-2 seconds.
# Update the time steps in real-time domain.
# The minimum time interval is 10 ms in the simulation.
for i in range(len(Veh_1.x)):
# print(i)
# Record the vehicle states at time step i.
V1_x_seq.append(Veh_1.x[t_1])
V1_y_seq.append(Veh_1.y[t_1])
V1_theta_seq.append(Veh_1.theta[t_1])
V1_v_long_seq.append(Veh_1.v_long[t_1])
V1_v_lat_seq.append(Veh_1.v_lat[t_1])
V1_a_seq.append(Veh_1.v_long_dot[t_1])
V1_omega_r_seq.append(Veh_1.omega_r[t_1])
V1_wheel_anlge_seq.append(Veh_1.wheel_anlge[t_1])
V2_x_seq.append(Veh_2.x[t_2])
V2_y_seq.append(Veh_2.y[t_2])
V2_theta_seq.append(Veh_2.theta[t_2])
V2_v_long_seq.append(Veh_2.v_long[t_2])
V2_v_lat_seq.append(Veh_2.v_lat[t_2])
V2_a_seq.append(Veh_2.v_long_dot[t_2])
V2_omega_r_seq.append(Veh_2.omega_r[t_2])
V2_wheel_anlge_seq.append(Veh_2.wheel_anlge[t_2])
# Make safety decisions based on the IRM algorithm under the different levels.
if opt.Level == 'EB' and flag_EB:
if i >= t_act and (i - t_act) % 10 == 0:
if opt.Ego_V == 1:
# Perceive Vehicle_2's states.
v2_state = percept(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq,
V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq, r_seed=random_seed)
# Decide whether to activate emergency braking (EB).
t_1, flag_EB = deci_EB(i, Veh_1, t_1, v2_state,
(V1_x_seq[-1], V1_y_seq[-1], V1_theta_seq[-1], V1_v_long_seq[-1]))
elif opt.Ego_V == 2:
# Perceive Vehicle_2's states.
v1_state = percept(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq,
V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq, r_seed=random_seed)
# Decide whether to activate emergency braking (EB).
t_2, flag_EB = deci_EB(i, Veh_2, t_2, v1_state,
(V2_x_seq[-1], V2_y_seq[-1], V2_theta_seq[-1], V2_v_long_seq[-1]))
elif opt.Level == 'S1':
# The ego vehicle updates decisions with the frequency of 10 Hz.
if i >= t_act and (i - t_act) % 10 == 0:
if opt.Ego_V == 1:
# Perceive Vehicle_2's states.
v2_state = percept(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq,
V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq, r_seed=random_seed)
# Make safety decisions.
Veh_1.decision(1, i, t_1, v2_state, veh_param, Deci_set, model_InjPre)
# Make motion planning.
Veh_1.trajectory(i, t_1)
t_1 = 0
elif opt.Ego_V == 2:
# Perceive Vehicle_1's states.
v1_state = percept(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq,
V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq, r_seed=random_seed)
# Make safety decisions.
Veh_2.decision(2, i, t_2, v1_state, veh_param, Deci_set, model_InjPre)
# Make motion planning.
Veh_2.trajectory(i, t_2)
t_2 = 0
elif opt.Level == 'S2':
# Vehicle_1 updates decisions with the frequency of 10 Hz.
if i >= t_act and (i - t_act) % 10 == 0:
# Perceive Vehicle_2's states.
v2_state = percept(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq,
V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq, r_seed=random_seed)
# Make safety decisions.
Veh_1.decision(1, i, t_1, v2_state, veh_param, Deci_set, model_InjPre)
# Make motion planning.
Veh_1.trajectory(i, t_1)
t_1 = 0
# Vehicle_2 updates decisions with the frequency of 10 Hz.
if i >= t_act and (i - t_act) % 10 == int(10 // 2):
# Perceive Vehicle_1's states.
v1_state = percept(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq,
V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq, r_seed=random_seed)
# Make safety decisions.
Veh_2.decision(2, i, t_2, v1_state, veh_param, Deci_set, model_InjPre)
# Make motion planning.
Veh_2.trajectory(i, t_2)
t_2 = 0
elif opt.Level == 'S3':
# Vehicles update decisions with the frequency of 10 Hz.
if i >= t_act and (i - t_act) % 10 == 0 and flag_S3:
flag_S3 = deci_S3(flag_S3, i, t_1, Veh_1, Veh_2, veh_param, Deci_set, model_InjPre, r_seed=random_seed)
t_1, t_2 = 0, 0
# Visualize the simulation environment.
if opt.no_visualize:
plot_env(ax, V1_x_seq, V1_y_seq, V1_theta_seq[-1], V2_x_seq, V2_y_seq, V2_theta_seq[-1], veh_l, veh_w,
img_list)
plt.pause(0.0001)
if opt.save_gif:
plt.savefig('image/temp_%s.png' % opt.Level)
image_list.append(imageio.imread('image/temp_%s.png' % opt.Level))
t_1 += 1
t_2 += 1
if i == 0:
continue
# Check whether there is a crash at the time step i.
V1_state = (V1_x_seq[-1], V1_y_seq[-1], V1_theta_seq[-1], V1_x_seq[-2], V1_y_seq[-2], V1_theta_seq[-2])
V2_state = (V2_x_seq[-1], V2_y_seq[-1], V2_theta_seq[-1], V2_x_seq[-2], V2_y_seq[-2], V2_theta_seq[-2])
veh_striking_list = det_crash(veh_l, veh_w, V1_state, V2_state)
# If crash happens, estimate the collision condition and predict occupant injury severity.
if veh_striking_list:
delta_v1, delta_v2, delta_v_index = Collision_cond(veh_striking_list, V1_v_long_seq[-1], V2_v_long_seq[-1],
V2_theta_seq[-1] - V1_theta_seq[-1], veh_param)
dV_list = [delta_v1, delta_v2]
angle_list = [np.rad2deg(V2_theta_seq[-1] - V1_theta_seq[-1]), np.rad2deg(V1_theta_seq[-1] - V2_theta_seq[-1])]
PoI_list = [veh_striking_list[delta_v_index][1], veh_striking_list[delta_v_index][2]]
Veh_list = [Veh_1, Veh_2]
for num_i in range(2):
# Process input valuables of the injury prediction model.
d_V = round(dV_list[num_i] * 3.6)
angle = angle_list[num_i] if angle_list[num_i] >= 0 else angle_list[num_i] + 360
angle = round((angle + 2.5) // 5) * 5
angle = 0 if angle == 360 else angle
veh_i = Veh_list[num_i]
model_input = torch.from_numpy(np.array([[d_V, angle, PoI_list[num_i], PoI_list[1 - num_i], veh_i.age,
veh_i.female, veh_i.belt, veh_i.airbag,
veh_i.mass_ratio, ]])).float()
# Get human injury information using the data-driven injury prediction model.
pred = model_InjPre(model_input).detach()
injury = torch.nn.functional.softmax(pred, dim=1).data.numpy()[0]
# Translate injury probability into OISS.
injury_score = (0 * injury[0] + 1.37 * injury[1] + 7.54 * injury[2] + 32.22 * injury[3]) / 32.22
INJ[num_i] = injury_score
INJ_[num_i, 0] = pred.data.max(1)[1].numpy()
INJ_[num_i, 1:5] = injury
INJ_[num_i, 5] = injury_score
break
if opt.no_visualize:
if opt.save_gif:
for i in range(50):
image_list.append(imageio.imread('image/temp_%s.png' % opt.Level))
imageio.mimsave(
'image/simulation_%s_%s_%s_%s.gif' % (opt.Level, opt.Ego_V, opt.case_num, t_act),
image_list, duration=0.03)
plt.pause(5)
plt.ioff()
plt.close()
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"matplotlib.image.imread",
"utils.Det_crash.det_crash",
"utils.Vehicle.deci_S3",
"numpy.array",
"numpy.sin",
"torch.nn.functional.softmax",
"numpy.arange",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"numpy.random.... | [((848, 881), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (871, 881), False, 'import warnings\n'), ((3456, 3501), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', 'angle', '(1.0)'], {}), '((cX, cY), angle, 1.0)\n', (3479, 3501), False, 'import cv2\n'), ((3512, 3527), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (3518, 3527), True, 'import numpy as np\n'), ((3538, 3553), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (3544, 3553), True, 'import numpy as np\n'), ((3879, 3942), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(nW, nH)'], {'borderValue': '(255, 255, 255)'}), '(image, M, (nW, nH), borderValue=(255, 255, 255))\n', (3893, 3942), False, 'import cv2\n'), ((4121, 4130), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4128, 4130), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4158), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(22)'}), '(fontsize=22)\n', (4145, 4158), True, 'import matplotlib.pyplot as plt\n'), ((4163, 4186), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(22)'}), '(fontsize=22)\n', (4173, 4186), True, 'import matplotlib.pyplot as plt\n'), ((4191, 4210), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-10, 40)'], {}), '((-10, 40))\n', (4199, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4234), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-10, 20)'], {}), '((-10, 20))\n', (4223, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4435, 4528), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.1)', 'top': '(0.94)', 'right': '(0.94)', 'wspace': '(0.25)', 'hspace': '(0.25)'}), '(left=0.1, bottom=0.1, top=0.94, right=0.94, wspace=0.25,\n hspace=0.25)\n', (4454, 4528), True, 'import matplotlib.pyplot as plt\n'), ((5990, 6077), 'matplotlib.pyplot.plot', 'plt.plot', (['V1_x_seq', 'V1_y_seq'], {'color': '"""red"""', 'linestyle': '"""--"""', 'linewidth': '(1.3)', 'alpha': '(0.5)'}), "(V1_x_seq, V1_y_seq, color='red', linestyle='--', linewidth=1.3,\n alpha=0.5)\n", (5998, 6077), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6166), 'matplotlib.pyplot.plot', 'plt.plot', (['V2_x_seq', 'V2_y_seq'], {'color': '"""blue"""', 'linestyle': '"""--"""', 'linewidth': '(1.3)', 'alpha': '(0.5)'}), "(V2_x_seq, V2_y_seq, color='blue', linestyle='--', linewidth=1.3,\n alpha=0.5)\n", (6086, 6166), True, 'import matplotlib.pyplot as plt\n'), ((6294, 6319), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6317, 6319), False, 'import argparse\n'), ((7917, 8008), 'utils.Inj_Pre.RNN', 'RNN', ([], {'in_dim': '(16)', 'hid_dim': '(32)', 'n_layers': '(2)', 'flag_LSTM': '(True)', 'bidirectional': '(True)', 'dropout': '(0.5)'}), '(in_dim=16, hid_dim=32, n_layers=2, flag_LSTM=True, bidirectional=True,\n dropout=0.5)\n', (7920, 8008), False, 'from utils.Inj_Pre import RNN\n'), ((8168, 8204), 'matplotlib.image.imread', 'mpimg.imread', (['"""para\\\\image\\\\red.png"""'], {}), "('para\\\\image\\\\red.png')\n", (8180, 8204), True, 'import matplotlib.image as mpimg\n'), ((8216, 8253), 'matplotlib.image.imread', 'mpimg.imread', (['"""para\\\\image\\\\blue.png"""'], {}), "('para\\\\image\\\\blue.png')\n", (8228, 8253), True, 'import matplotlib.image as mpimg\n'), ((8705, 8732), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (8719, 8732), True, 'import numpy as np\n'), ((1660, 1688), 'numpy.sqrt', 'np.sqrt', (['(veh_I[0] / veh_m[0])'], {}), '(veh_I[0] / veh_m[0])\n', (1667, 1688), True, 'import numpy as np\n'), ((1690, 1718), 'numpy.sqrt', 'np.sqrt', (['(veh_I[1] / veh_m[1])'], {}), '(veh_I[1] / veh_m[1])\n', (1697, 1718), True, 'import numpy as np\n'), ((4250, 4273), 'numpy.arange', 'np.arange', (['(-10)', '(40.1)', '(5)'], {}), '(-10, 40.1, 5)\n', (4259, 4273), True, 'import numpy as np\n'), ((4348, 4371), 'numpy.arange', 'np.arange', (['(-10)', '(20.1)', '(5)'], {}), '(-10, 20.1, 5)\n', (4357, 4371), True, 'import numpy as np\n'), ((8038, 8081), 'torch.load', 'torch.load', (['"""para\\\\DL_InjuryPrediction.pkl"""'], {}), "('para\\\\DL_InjuryPrediction.pkl')\n", (8048, 8081), False, 'import torch\n'), ((8842, 8981), 'utils.Vehicle.Vehicle_S3', 'Vehicle_S3', (['opt.case_num', '(0)', '(1)'], {'mass_ratio': 'mass_r[0]', 'age': 'age[0]', 'belt': 'belt[0]', 'female': 'female[0]', 'airbag': 'airbag[0]', 'r_seed': 'random_seed'}), '(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt[\n 0], female=female[0], airbag=airbag[0], r_seed=random_seed)\n', (8852, 8981), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((9020, 9159), 'utils.Vehicle.Vehicle_S3', 'Vehicle_S3', (['opt.case_num', '(0)', '(2)'], {'mass_ratio': 'mass_r[1]', 'age': 'age[1]', 'belt': 'belt[1]', 'female': 'female[1]', 'airbag': 'airbag[1]', 'r_seed': 'random_seed'}), '(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt[\n 1], female=female[1], airbag=airbag[1], r_seed=random_seed)\n', (9030, 9159), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((9208, 9348), 'utils.Vehicle.Vehicle_S12', 'Vehicle_S12', (['opt.case_num', '(0)', '(1)'], {'mass_ratio': 'mass_r[0]', 'age': 'age[0]', 'belt': 'belt[0]', 'female': 'female[0]', 'airbag': 'airbag[0]', 'r_seed': 'random_seed'}), '(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt\n [0], female=female[0], airbag=airbag[0], r_seed=random_seed)\n', (9219, 9348), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((9388, 9528), 'utils.Vehicle.Vehicle_S12', 'Vehicle_S12', (['opt.case_num', '(0)', '(2)'], {'mass_ratio': 'mass_r[1]', 'age': 'age[1]', 'belt': 'belt[1]', 'female': 'female[1]', 'airbag': 'airbag[1]', 'r_seed': 'random_seed'}), '(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt\n [1], female=female[1], airbag=airbag[1], r_seed=random_seed)\n', (9399, 9528), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((9652, 9662), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (9659, 9662), True, 'import numpy as np\n'), ((9676, 9691), 'numpy.ones', 'np.ones', (['(2, 6)'], {}), '((2, 6))\n', (9683, 9691), True, 'import numpy as np\n'), ((10050, 10079), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (10062, 10079), True, 'import matplotlib.pyplot as plt\n'), ((10088, 10097), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (10095, 10097), True, 'import matplotlib.pyplot as plt\n'), ((10106, 10123), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10114, 10123), True, 'import matplotlib.pyplot as plt\n'), ((16225, 16268), 'utils.Det_crash.det_crash', 'det_crash', (['veh_l', 'veh_w', 'V1_state', 'V2_state'], {}), '(veh_l, veh_w, V1_state, V2_state)\n', (16234, 16268), False, 'from utils.Det_crash import det_crash\n'), ((18607, 18619), 'matplotlib.pyplot.pause', 'plt.pause', (['(5)'], {}), '(5)\n', (18616, 18619), True, 'import matplotlib.pyplot as plt\n'), ((18628, 18638), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (18636, 18638), True, 'import matplotlib.pyplot as plt\n'), ((18647, 18658), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18656, 18658), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1073), 'xlrd.open_workbook', 'xlrd.open_workbook', (['file'], {}), '(file)\n', (1067, 1073), False, 'import xlrd\n'), ((5249, 5265), 'numpy.sin', 'np.sin', (['V1_angle'], {}), '(V1_angle)\n', (5255, 5265), True, 'import numpy as np\n'), ((5345, 5361), 'numpy.cos', 'np.cos', (['V1_angle'], {}), '(V1_angle)\n', (5351, 5361), True, 'import numpy as np\n'), ((5441, 5457), 'numpy.sin', 'np.sin', (['V2_angle'], {}), '(V2_angle)\n', (5447, 5457), True, 'import numpy as np\n'), ((5537, 5553), 'numpy.cos', 'np.cos', (['V2_angle'], {}), '(V2_angle)\n', (5543, 5553), True, 'import numpy as np\n'), ((5621, 5641), 'numpy.rad2deg', 'np.rad2deg', (['V1_angle'], {}), '(V1_angle)\n', (5631, 5641), True, 'import numpy as np\n'), ((5816, 5836), 'numpy.rad2deg', 'np.rad2deg', (['V2_angle'], {}), '(V2_angle)\n', (5826, 5836), True, 'import numpy as np\n'), ((15644, 15661), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (15653, 15661), True, 'import matplotlib.pyplot as plt\n'), ((16447, 16571), 'utils.Con_est.Collision_cond', 'Collision_cond', (['veh_striking_list', 'V1_v_long_seq[-1]', 'V2_v_long_seq[-1]', '(V2_theta_seq[-1] - V1_theta_seq[-1])', 'veh_param'], {}), '(veh_striking_list, V1_v_long_seq[-1], V2_v_long_seq[-1], \n V2_theta_seq[-1] - V1_theta_seq[-1], veh_param)\n', (16461, 16571), False, 'from utils.Con_est import Collision_cond\n'), ((18440, 18568), 'imageio.mimsave', 'imageio.mimsave', (["('image/simulation_%s_%s_%s_%s.gif' % (opt.Level, opt.Ego_V, opt.case_num,\n t_act))", 'image_list'], {'duration': '(0.03)'}), "('image/simulation_%s_%s_%s_%s.gif' % (opt.Level, opt.Ego_V,\n opt.case_num, t_act), image_list, duration=0.03)\n", (18455, 18568), False, 'import imageio\n'), ((5213, 5229), 'numpy.cos', 'np.cos', (['V1_angle'], {}), '(V1_angle)\n', (5219, 5229), True, 'import numpy as np\n'), ((5309, 5325), 'numpy.sin', 'np.sin', (['V1_angle'], {}), '(V1_angle)\n', (5315, 5325), True, 'import numpy as np\n'), ((5405, 5421), 'numpy.cos', 'np.cos', (['V2_angle'], {}), '(V2_angle)\n', (5411, 5421), True, 'import numpy as np\n'), ((5501, 5517), 'numpy.sin', 'np.sin', (['V2_angle'], {}), '(V2_angle)\n', (5507, 5517), True, 'import numpy as np\n'), ((15707, 15751), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('image/temp_%s.png' % opt.Level)"], {}), "('image/temp_%s.png' % opt.Level)\n", (15718, 15751), True, 'import matplotlib.pyplot as plt\n'), ((16700, 16747), 'numpy.rad2deg', 'np.rad2deg', (['(V2_theta_seq[-1] - V1_theta_seq[-1])'], {}), '(V2_theta_seq[-1] - V1_theta_seq[-1])\n', (16710, 16747), True, 'import numpy as np\n'), ((16749, 16796), 'numpy.rad2deg', 'np.rad2deg', (['(V1_theta_seq[-1] - V2_theta_seq[-1])'], {}), '(V1_theta_seq[-1] - V2_theta_seq[-1])\n', (16759, 16796), True, 'import numpy as np\n'), ((11466, 11633), 'utils.Percept.percept', 'percept', (['i', 'V2_x_seq', 'V2_y_seq', 'V2_theta_seq', 'V2_v_long_seq', 'V2_v_lat_seq', 'V2_a_seq', 'V2_omega_r_seq', 'V2_wheel_anlge_seq', 'V1_x_seq', 'V1_y_seq'], {'r_seed': 'random_seed'}), '(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq,\n V2_a_seq, V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq,\n r_seed=random_seed)\n', (11473, 11633), False, 'from utils.Percept import percept\n'), ((11774, 11878), 'utils.Vehicle.deci_EB', 'deci_EB', (['i', 'Veh_1', 't_1', 'v2_state', '(V1_x_seq[-1], V1_y_seq[-1], V1_theta_seq[-1], V1_v_long_seq[-1])'], {}), '(i, Veh_1, t_1, v2_state, (V1_x_seq[-1], V1_y_seq[-1], V1_theta_seq[\n -1], V1_v_long_seq[-1]))\n', (11781, 11878), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((15786, 15833), 'imageio.imread', 'imageio.imread', (["('image/temp_%s.png' % opt.Level)"], {}), "('image/temp_%s.png' % opt.Level)\n", (15800, 15833), False, 'import imageio\n'), ((18379, 18426), 'imageio.imread', 'imageio.imread', (["('image/temp_%s.png' % opt.Level)"], {}), "('image/temp_%s.png' % opt.Level)\n", (18393, 18426), False, 'import imageio\n'), ((12037, 12204), 'utils.Percept.percept', 'percept', (['i', 'V1_x_seq', 'V1_y_seq', 'V1_theta_seq', 'V1_v_long_seq', 'V1_v_lat_seq', 'V1_a_seq', 'V1_omega_r_seq', 'V1_wheel_anlge_seq', 'V2_x_seq', 'V2_y_seq'], {'r_seed': 'random_seed'}), '(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq,\n V1_a_seq, V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq,\n r_seed=random_seed)\n', (12044, 12204), False, 'from utils.Percept import percept\n'), ((12345, 12449), 'utils.Vehicle.deci_EB', 'deci_EB', (['i', 'Veh_2', 't_2', 'v1_state', '(V2_x_seq[-1], V2_y_seq[-1], V2_theta_seq[-1], V2_v_long_seq[-1])'], {}), '(i, Veh_2, t_2, v1_state, (V2_x_seq[-1], V2_y_seq[-1], V2_theta_seq[\n -1], V2_v_long_seq[-1]))\n', (12352, 12449), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((12768, 12935), 'utils.Percept.percept', 'percept', (['i', 'V2_x_seq', 'V2_y_seq', 'V2_theta_seq', 'V2_v_long_seq', 'V2_v_lat_seq', 'V2_a_seq', 'V2_omega_r_seq', 'V2_wheel_anlge_seq', 'V1_x_seq', 'V1_y_seq'], {'r_seed': 'random_seed'}), '(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq,\n V2_a_seq, V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq,\n r_seed=random_seed)\n', (12775, 12935), False, 'from utils.Percept import percept\n'), ((14023, 14190), 'utils.Percept.percept', 'percept', (['i', 'V2_x_seq', 'V2_y_seq', 'V2_theta_seq', 'V2_v_long_seq', 'V2_v_lat_seq', 'V2_a_seq', 'V2_omega_r_seq', 'V2_wheel_anlge_seq', 'V1_x_seq', 'V1_y_seq'], {'r_seed': 'random_seed'}), '(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq,\n V2_a_seq, V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq,\n r_seed=random_seed)\n', (14030, 14190), False, 'from utils.Percept import percept\n'), ((14661, 14828), 'utils.Percept.percept', 'percept', (['i', 'V1_x_seq', 'V1_y_seq', 'V1_theta_seq', 'V1_v_long_seq', 'V1_v_lat_seq', 'V1_a_seq', 'V1_omega_r_seq', 'V1_wheel_anlge_seq', 'V2_x_seq', 'V2_y_seq'], {'r_seed': 'random_seed'}), '(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq,\n V1_a_seq, V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq,\n r_seed=random_seed)\n', (14668, 14828), False, 'from utils.Percept import percept\n'), ((13340, 13507), 'utils.Percept.percept', 'percept', (['i', 'V1_x_seq', 'V1_y_seq', 'V1_theta_seq', 'V1_v_long_seq', 'V1_v_lat_seq', 'V1_a_seq', 'V1_omega_r_seq', 'V1_wheel_anlge_seq', 'V2_x_seq', 'V2_y_seq'], {'r_seed': 'random_seed'}), '(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq,\n V1_a_seq, V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq,\n r_seed=random_seed)\n', (13347, 13507), False, 'from utils.Percept import percept\n'), ((15282, 15379), 'utils.Vehicle.deci_S3', 'deci_S3', (['flag_S3', 'i', 't_1', 'Veh_1', 'Veh_2', 'veh_param', 'Deci_set', 'model_InjPre'], {'r_seed': 'random_seed'}), '(flag_S3, i, t_1, Veh_1, Veh_2, veh_param, Deci_set, model_InjPre,\n r_seed=random_seed)\n', (15289, 15379), False, 'from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB\n'), ((17385, 17520), 'numpy.array', 'np.array', (['[[d_V, angle, PoI_list[num_i], PoI_list[1 - num_i], veh_i.age, veh_i.female,\n veh_i.belt, veh_i.airbag, veh_i.mass_ratio]]'], {}), '([[d_V, angle, PoI_list[num_i], PoI_list[1 - num_i], veh_i.age,\n veh_i.female, veh_i.belt, veh_i.airbag, veh_i.mass_ratio]])\n', (17393, 17520), True, 'import numpy as np\n'), ((17822, 17862), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (17849, 17862), False, 'import torch\n')] |
import numpy as np
import torch
import torchaudio
from pitchTracking import Pitch
class FFE:
def __init__(self, sr=16000):
self.sr = sr
self.pitch = Pitch(self.sr)
def __call__(self, y_ref, y_syn):
return self.calculate_ffe(y_ref, y_syn)
def calculate_ffe_path(self, ref_path, syn_path):
y_ref, sr_ref = torchaudio.load(ref_path)
y_syn, sr_syn = torchaudio.load(syn_path)
assert sr_ref == sr_syn, f"{sr_ref} != {sr_syn}" # audios of same sr
assert sr_ref == self.sr, f"{sr_ref} != {self.sr}" # sr of audio and pitch tracking is same
return self.calculate_ffe(y_ref, y_syn)
def calculate_ffe(self, y_ref, y_syn):
y_ref = y_ref.view(-1)
y_syn = y_syn.view(-1)
yref_f, _, _, yref_t = self.pitch.compute_yin(y_ref, self.sr)
ysyn_f, _, _, ysyn_t = self.pitch.compute_yin(y_syn, self.sr)
yref_f = np.array(yref_f)
yref_t = np.array(yref_t)
ysyn_f = np.array(ysyn_f)
ysyn_t = np.array(ysyn_t)
distortion = self.f0_frame_error(yref_t, yref_f, ysyn_t, ysyn_f)
return distortion.item()
def f0_frame_error(self, true_t, true_f, est_t, est_f):
gross_pitch_error_frames = self._gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
voicing_decision_error_frames = self._voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return (np.sum(gross_pitch_error_frames) +
np.sum(voicing_decision_error_frames)) / (len(true_t))
def _voicing_decision_error_frames(self, true_t, true_f, est_t, est_f):
return (est_f != 0) != (true_f != 0)
def _true_voiced_frames(self, true_t, true_f, est_t, est_f):
return (est_f != 0) & (true_f != 0)
def _gross_pitch_error_frames(self, true_t, true_f, est_t, est_f, eps=1e-8):
voiced_frames = self._true_voiced_frames(true_t, true_f, est_t, est_f)
true_f_p_eps = [x + eps for x in true_f]
pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2
return voiced_frames & pitch_error_frames
if __name__ == "__main__":
path1 = "../docs/audio/sp15.wav"
path2 = "../docs/noisy/sp15_station_sn5.wav"
ffe = FFE(22050)
print(ffe.calculate_ffe_path(path1, path2))
y_ref, sr_ref = torchaudio.load(path1)
y_syn, sr_syn = torchaudio.load(path2)
print(y_ref.size(), y_syn.size())
print(ffe.calculate_ffe(y_ref, y_syn))
| [
"numpy.abs",
"torchaudio.load",
"numpy.array",
"numpy.sum",
"pitchTracking.Pitch"
] | [((2338, 2360), 'torchaudio.load', 'torchaudio.load', (['path1'], {}), '(path1)\n', (2353, 2360), False, 'import torchaudio\n'), ((2381, 2403), 'torchaudio.load', 'torchaudio.load', (['path2'], {}), '(path2)\n', (2396, 2403), False, 'import torchaudio\n'), ((171, 185), 'pitchTracking.Pitch', 'Pitch', (['self.sr'], {}), '(self.sr)\n', (176, 185), False, 'from pitchTracking import Pitch\n'), ((352, 377), 'torchaudio.load', 'torchaudio.load', (['ref_path'], {}), '(ref_path)\n', (367, 377), False, 'import torchaudio\n'), ((402, 427), 'torchaudio.load', 'torchaudio.load', (['syn_path'], {}), '(syn_path)\n', (417, 427), False, 'import torchaudio\n'), ((917, 933), 'numpy.array', 'np.array', (['yref_f'], {}), '(yref_f)\n', (925, 933), True, 'import numpy as np\n'), ((951, 967), 'numpy.array', 'np.array', (['yref_t'], {}), '(yref_t)\n', (959, 967), True, 'import numpy as np\n'), ((985, 1001), 'numpy.array', 'np.array', (['ysyn_f'], {}), '(ysyn_f)\n', (993, 1001), True, 'import numpy as np\n'), ((1019, 1035), 'numpy.array', 'np.array', (['ysyn_t'], {}), '(ysyn_t)\n', (1027, 1035), True, 'import numpy as np\n'), ((2044, 2076), 'numpy.abs', 'np.abs', (['(est_f / true_f_p_eps - 1)'], {}), '(est_f / true_f_p_eps - 1)\n', (2050, 2076), True, 'import numpy as np\n'), ((1467, 1499), 'numpy.sum', 'np.sum', (['gross_pitch_error_frames'], {}), '(gross_pitch_error_frames)\n', (1473, 1499), True, 'import numpy as np\n'), ((1518, 1555), 'numpy.sum', 'np.sum', (['voicing_decision_error_frames'], {}), '(voicing_decision_error_frames)\n', (1524, 1555), True, 'import numpy as np\n')] |
from unittest import TestCase
import rasterio
import numpy as np
import niche_vlaanderen
import pytest
from niche_vlaanderen.exception import NicheException
def raster_to_numpy(filename):
"""Read a GDAL grid as numpy array
Notes
------
No-data values are -99 for integer types and np.nan for real types.
"""
with rasterio.open(filename) as ds:
data = ds.read(1)
nodata = ds.nodatavals[0]
print(nodata)
# create a mask for no-data values, taking into account the data-types
if data.dtype == 'float32':
data[np.isclose(data, nodata)] = np.nan
else:
data[np.isclose(data, nodata)] = -99
return data
class testAcidity(TestCase):
def test_get_soil_mlw(self):
mlw = np.array([50, 66])
soil_code = np.array([14, 7])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
np.testing.assert_equal(np.array([1, 9]), result)
def test_get_soil_mlw_borders(self):
mlw = np.array([79, 80, 100, 110, 111])
soil_code = np.array([14, 14, 14, 14, 14])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
expected = np.array([1, 1, 2, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity_partial(self):
rainwater = np.array([0])
minerality = np.array([1])
inundation = np.array([1])
seepage = np.array([1])
soil_mlw = np.array([1])
a = niche_vlaanderen.Acidity()
result = a._get_acidity(rainwater, minerality, inundation,
seepage, soil_mlw)
np.testing.assert_equal(np.array([3]), result)
def test_seepage_code(self):
seepage = np.array([5, 0.3, 0.05, -0.04, -0.2, -5])
a = niche_vlaanderen.Acidity()
result = a._get_seepage(seepage)
expected = np.array([1, 1, 1, 1, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity(self):
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([14])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
result = a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
np.testing.assert_equal(3, result)
def test_acidity_testcase(self):
a = niche_vlaanderen.Acidity()
inputdir = "testcase/zwarte_beek/input/"
soil_code = raster_to_numpy(inputdir + "soil_code.asc")
soil_code_r = soil_code
soil_code_r[soil_code > 0] = np.round(soil_code / 10000)[soil_code > 0]
mlw = raster_to_numpy(inputdir + "mlw.asc")
inundation = \
raster_to_numpy(inputdir + "inundation.asc")
rainwater = raster_to_numpy(inputdir + "nullgrid.asc")
seepage = raster_to_numpy(inputdir + "seepage.asc")
minerality = raster_to_numpy(inputdir + "minerality.asc")
acidity = raster_to_numpy("testcase/zwarte_beek/abiotic/acidity.asc")
acidity[np.isnan(acidity)] = 255
acidity[acidity == -99] = 255
result = a.calculate(soil_code_r, mlw, inundation, seepage,
minerality, rainwater)
np.testing.assert_equal(acidity, result)
def test_acidity_invalidsoil(self):
a = niche_vlaanderen.Acidity()
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([-1])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
with pytest.raises(NicheException):
a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
def test_acidity_invalidminerality(self):
a = niche_vlaanderen.Acidity()
rainwater = np.array([0])
minerality = np.array([500])
soilcode = np.array([14])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
with pytest.raises(NicheException):
a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
| [
"numpy.isclose",
"numpy.testing.assert_equal",
"rasterio.open",
"numpy.array",
"numpy.isnan",
"pytest.raises",
"niche_vlaanderen.Acidity",
"numpy.round"
] | [((341, 364), 'rasterio.open', 'rasterio.open', (['filename'], {}), '(filename)\n', (354, 364), False, 'import rasterio\n'), ((756, 774), 'numpy.array', 'np.array', (['[50, 66]'], {}), '([50, 66])\n', (764, 774), True, 'import numpy as np\n'), ((795, 812), 'numpy.array', 'np.array', (['[14, 7]'], {}), '([14, 7])\n', (803, 812), True, 'import numpy as np\n'), ((825, 851), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (849, 851), False, 'import niche_vlaanderen\n'), ((1022, 1055), 'numpy.array', 'np.array', (['[79, 80, 100, 110, 111]'], {}), '([79, 80, 100, 110, 111])\n', (1030, 1055), True, 'import numpy as np\n'), ((1076, 1106), 'numpy.array', 'np.array', (['[14, 14, 14, 14, 14]'], {}), '([14, 14, 14, 14, 14])\n', (1084, 1106), True, 'import numpy as np\n'), ((1119, 1145), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (1143, 1145), False, 'import niche_vlaanderen\n'), ((1220, 1245), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3]'], {}), '([1, 1, 2, 2, 3])\n', (1228, 1245), True, 'import numpy as np\n'), ((1254, 1295), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected', 'result'], {}), '(expected, result)\n', (1277, 1295), True, 'import numpy as np\n'), ((1353, 1366), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1361, 1366), True, 'import numpy as np\n'), ((1388, 1401), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1396, 1401), True, 'import numpy as np\n'), ((1423, 1436), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1431, 1436), True, 'import numpy as np\n'), ((1455, 1468), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1463, 1468), True, 'import numpy as np\n'), ((1488, 1501), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1496, 1501), True, 'import numpy as np\n'), ((1515, 1541), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (1539, 1541), False, 'import niche_vlaanderen\n'), ((1768, 1809), 'numpy.array', 'np.array', (['[5, 0.3, 0.05, -0.04, -0.2, -5]'], {}), '([5, 0.3, 0.05, -0.04, -0.2, -5])\n', (1776, 1809), True, 'import numpy as np\n'), ((1822, 1848), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (1846, 1848), False, 'import niche_vlaanderen\n'), ((1910, 1938), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 3]'], {}), '([1, 1, 1, 1, 2, 3])\n', (1918, 1938), True, 'import numpy as np\n'), ((1947, 1988), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['expected', 'result'], {}), '(expected, result)\n', (1970, 1988), True, 'import numpy as np\n'), ((2038, 2051), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2046, 2051), True, 'import numpy as np\n'), ((2073, 2086), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2081, 2086), True, 'import numpy as np\n'), ((2106, 2120), 'numpy.array', 'np.array', (['[14]'], {}), '([14])\n', (2114, 2120), True, 'import numpy as np\n'), ((2142, 2155), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2150, 2155), True, 'import numpy as np\n'), ((2174, 2188), 'numpy.array', 'np.array', (['[20]'], {}), '([20])\n', (2182, 2188), True, 'import numpy as np\n'), ((2203, 2217), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (2211, 2217), True, 'import numpy as np\n'), ((2231, 2257), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (2255, 2257), False, 'import niche_vlaanderen\n'), ((2383, 2417), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(3)', 'result'], {}), '(3, result)\n', (2406, 2417), True, 'import numpy as np\n'), ((2468, 2494), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (2492, 2494), False, 'import niche_vlaanderen\n'), ((3328, 3368), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['acidity', 'result'], {}), '(acidity, result)\n', (3351, 3368), True, 'import numpy as np\n'), ((3422, 3448), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (3446, 3448), False, 'import niche_vlaanderen\n'), ((3469, 3482), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3477, 3482), True, 'import numpy as np\n'), ((3504, 3517), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3512, 3517), True, 'import numpy as np\n'), ((3537, 3551), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3545, 3551), True, 'import numpy as np\n'), ((3573, 3586), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3581, 3586), True, 'import numpy as np\n'), ((3605, 3619), 'numpy.array', 'np.array', (['[20]'], {}), '([20])\n', (3613, 3619), True, 'import numpy as np\n'), ((3634, 3648), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (3642, 3648), True, 'import numpy as np\n'), ((3662, 3688), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (3686, 3688), False, 'import niche_vlaanderen\n'), ((3899, 3925), 'niche_vlaanderen.Acidity', 'niche_vlaanderen.Acidity', ([], {}), '()\n', (3923, 3925), False, 'import niche_vlaanderen\n'), ((3946, 3959), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3954, 3959), True, 'import numpy as np\n'), ((3981, 3996), 'numpy.array', 'np.array', (['[500]'], {}), '([500])\n', (3989, 3996), True, 'import numpy as np\n'), ((4016, 4030), 'numpy.array', 'np.array', (['[14]'], {}), '([14])\n', (4024, 4030), True, 'import numpy as np\n'), ((4052, 4065), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4060, 4065), True, 'import numpy as np\n'), ((4084, 4098), 'numpy.array', 'np.array', (['[20]'], {}), '([20])\n', (4092, 4098), True, 'import numpy as np\n'), ((4113, 4127), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (4121, 4127), True, 'import numpy as np\n'), ((570, 594), 'numpy.isclose', 'np.isclose', (['data', 'nodata'], {}), '(data, nodata)\n', (580, 594), True, 'import numpy as np\n'), ((628, 652), 'numpy.isclose', 'np.isclose', (['data', 'nodata'], {}), '(data, nodata)\n', (638, 652), True, 'import numpy as np\n'), ((940, 956), 'numpy.array', 'np.array', (['[1, 9]'], {}), '([1, 9])\n', (948, 956), True, 'import numpy as np\n'), ((1693, 1706), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1701, 1706), True, 'import numpy as np\n'), ((2677, 2704), 'numpy.round', 'np.round', (['(soil_code / 10000)'], {}), '(soil_code / 10000)\n', (2685, 2704), True, 'import numpy as np\n'), ((3136, 3153), 'numpy.isnan', 'np.isnan', (['acidity'], {}), '(acidity)\n', (3144, 3153), True, 'import numpy as np\n'), ((3702, 3731), 'pytest.raises', 'pytest.raises', (['NicheException'], {}), '(NicheException)\n', (3715, 3731), False, 'import pytest\n'), ((4141, 4170), 'pytest.raises', 'pytest.raises', (['NicheException'], {}), '(NicheException)\n', (4154, 4170), False, 'import pytest\n')] |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2022
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements a Hidden Trigger Backdoor attack on Neural Networks.
| Paper link: https://arxiv.org/abs/1910.00033
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
import numpy as np
from art.attacks.attack import PoisoningAttackWhiteBox
from art.attacks.poisoning.backdoor_attack import PoisoningAttackBackdoor
from art.estimators import BaseEstimator, NeuralNetworkMixin
from art.estimators.classification.classifier import ClassifierMixin
from art.estimators.classification.pytorch import PyTorchClassifier
from art.estimators.classification.keras import KerasClassifier
from art.estimators.classification.tensorflow import TensorFlowV2Classifier
from art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_pytorch import (
HiddenTriggerBackdoorPyTorch,
)
from art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_keras import (
HiddenTriggerBackdoorKeras,
)
if TYPE_CHECKING:
from art.utils import CLASSIFIER_NEURALNETWORK_TYPE
logger = logging.getLogger(__name__)
class HiddenTriggerBackdoor(PoisoningAttackWhiteBox):
"""
Implementation of Hidden Trigger Backdoor Attack by Saha et al 2019.
"Hidden Trigger Backdoor Attacks
| Paper link: https://arxiv.org/abs/1910.00033
"""
attack_params = PoisoningAttackWhiteBox.attack_params + [
"target",
"backdoor",
"feature_layer",
"source",
"eps",
"learning_rate",
"decay_coeff",
"decay_iter",
"stopping_tol",
"max_iter",
"poison_percent",
"batch_size",
"verbose",
"print_iter",
]
_estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin)
def __init__(
self,
classifier: "CLASSIFIER_NEURALNETWORK_TYPE",
target: np.ndarray,
source: np.ndarray,
feature_layer: Union[str, int],
backdoor: PoisoningAttackBackdoor,
eps: float = 0.1,
learning_rate: float = 0.001,
decay_coeff: float = 0.95,
decay_iter: Union[int, List[int]] = 2000,
stopping_threshold: float = 10,
max_iter: int = 5000,
batch_size: float = 100,
poison_percent: float = 0.1,
is_index: bool = False,
verbose: bool = True,
print_iter: int = 100,
) -> None:
"""
Creates a new Hidden Trigger Backdoor poisoning attack.
:param classifier: A trained neural network classifier.
:param target: The target class/indices to poison. Triggers added to inputs not in the target class will
result in misclassifications to the target class. If an int, it represents a label.
Otherwise, it is an array of indices.
:param source: The class/indices which will have a trigger added to cause misclassification
If an int, it represents a label. Otherwise, it is an array of indices.
:param feature_layer: The name of the feature representation layer
:param backdoor: A PoisoningAttackBackdoor that adds a backdoor trigger to the input.
:param eps: Maximum perturbation that the attacker can introduce.
:param learning_rate: The learning rate of clean-label attack optimization.
:param decay_coeff: The decay coefficient of the learning rate.
:param decay_iter: The number of iterations before the learning rate decays
:param stopping_threshold: Stop iterations after loss is less than this threshold.
:param max_iter: The maximum number of iterations for the attack.
:param batch_size: The number of samples to draw per batch.
:param poison_percent: The percentage of the data to poison. This is ignored if indices are provided
:param is_index: If true, the source and target params are assumed to represent indices rather
than a class label. poison_percent is ignored if true.
:param verbose: Show progress bars.
:param print_iter: The number of iterations to print the current loss progress.
"""
super().__init__(classifier=classifier) # type: ignore
self.target = target
self.source = source
self.feature_layer = feature_layer
self.backdoor = backdoor
self.eps = eps
self.learning_rate = learning_rate
self.decay_coeff = decay_coeff
self.decay_iter = decay_iter
self.stopping_threshold = stopping_threshold
self.max_iter = max_iter
self.batch_size = batch_size
self.poison_percent = poison_percent
self.is_index = is_index
self.verbose = verbose
self.print_iter = print_iter
self._check_params()
if isinstance(self.estimator, PyTorchClassifier):
self._attack = HiddenTriggerBackdoorPyTorch(
classifier=classifier, # type: ignore
target=target,
source=source,
backdoor=backdoor,
feature_layer=feature_layer,
eps=eps,
learning_rate=learning_rate,
decay_coeff=decay_coeff,
decay_iter=decay_iter,
stopping_threshold=stopping_threshold,
max_iter=max_iter,
batch_size=batch_size,
poison_percent=poison_percent,
is_index=is_index,
verbose=verbose,
print_iter=print_iter,
)
elif isinstance(self.estimator, (KerasClassifier, TensorFlowV2Classifier)):
self._attack = HiddenTriggerBackdoorKeras( # type: ignore
classifier=classifier, # type: ignore
target=target,
source=source,
backdoor=backdoor,
feature_layer=feature_layer,
eps=eps,
learning_rate=learning_rate,
decay_coeff=decay_coeff,
decay_iter=decay_iter,
stopping_threshold=stopping_threshold,
max_iter=max_iter,
batch_size=batch_size,
poison_percent=poison_percent,
is_index=is_index,
verbose=verbose,
print_iter=print_iter,
)
else:
raise ValueError("Only Pytorch, Keras, and TensorFlowV2 classifiers are supported")
def poison( # pylint: disable=W0221
self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs
) -> Tuple[np.ndarray, np.ndarray]:
"""
Calls perturbation function on the dataset x and returns only the perturbed inputs and their
indices in the dataset.
:param x: An array in the shape NxCxWxH with the points to draw source and target samples from.
Source indicates the class(es) that the backdoor would be added to to cause
misclassification into the target label.
Target indicates the class that the backdoor should cause misclassification into.
:param y: The labels of the provided samples. If none, we will use the classifier to label the
data.
:return: An tuple holding the `(poisoning_examples, poisoning_labels)`.
"""
return self._attack.poison(x, y, **kwargs)
def _check_params(self) -> None:
if not isinstance(self.target, np.ndarray) or not isinstance(self.source, np.ndarray):
raise ValueError("Target and source must be arrays")
if np.array_equal(self.target, self.source):
raise ValueError("Target and source values can't be the same")
if self.learning_rate <= 0:
raise ValueError("Learning rate must be strictly positive")
if not isinstance(self.backdoor, PoisoningAttackBackdoor):
raise TypeError("Backdoor must be of type PoisoningAttackBackdoor")
if self.eps < 0:
raise ValueError("The perturbation size `eps` has to be non-negative.")
if not isinstance(self.feature_layer, (str, int)):
raise TypeError("Feature layer should be a string or int")
if isinstance(self.feature_layer, int):
if not 0 <= self.feature_layer < len(self.estimator.layer_names):
raise ValueError("feature_layer is not a non-negative integer")
if self.decay_coeff <= 0:
raise ValueError("Decay coefficient must be positive")
if not 0 < self.poison_percent <= 1:
raise ValueError("poison_percent must be between 0 (exclusive) and 1 (inclusive)")
if not isinstance(self.verbose, bool):
raise ValueError("The argument `verbose` has to be of type bool.")
| [
"logging.getLogger",
"numpy.array_equal",
"art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_keras.HiddenTriggerBackdoorKeras",
"art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_pytorch.HiddenTriggerBackdoorPyTorch"
] | [((2255, 2282), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2272, 2282), False, 'import logging\n'), ((8806, 8846), 'numpy.array_equal', 'np.array_equal', (['self.target', 'self.source'], {}), '(self.target, self.source)\n', (8820, 8846), True, 'import numpy as np\n'), ((6084, 6492), 'art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_pytorch.HiddenTriggerBackdoorPyTorch', 'HiddenTriggerBackdoorPyTorch', ([], {'classifier': 'classifier', 'target': 'target', 'source': 'source', 'backdoor': 'backdoor', 'feature_layer': 'feature_layer', 'eps': 'eps', 'learning_rate': 'learning_rate', 'decay_coeff': 'decay_coeff', 'decay_iter': 'decay_iter', 'stopping_threshold': 'stopping_threshold', 'max_iter': 'max_iter', 'batch_size': 'batch_size', 'poison_percent': 'poison_percent', 'is_index': 'is_index', 'verbose': 'verbose', 'print_iter': 'print_iter'}), '(classifier=classifier, target=target, source=\n source, backdoor=backdoor, feature_layer=feature_layer, eps=eps,\n learning_rate=learning_rate, decay_coeff=decay_coeff, decay_iter=\n decay_iter, stopping_threshold=stopping_threshold, max_iter=max_iter,\n batch_size=batch_size, poison_percent=poison_percent, is_index=is_index,\n verbose=verbose, print_iter=print_iter)\n', (6112, 6492), False, 'from art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_pytorch import HiddenTriggerBackdoorPyTorch\n'), ((6870, 7276), 'art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_keras.HiddenTriggerBackdoorKeras', 'HiddenTriggerBackdoorKeras', ([], {'classifier': 'classifier', 'target': 'target', 'source': 'source', 'backdoor': 'backdoor', 'feature_layer': 'feature_layer', 'eps': 'eps', 'learning_rate': 'learning_rate', 'decay_coeff': 'decay_coeff', 'decay_iter': 'decay_iter', 'stopping_threshold': 'stopping_threshold', 'max_iter': 'max_iter', 'batch_size': 'batch_size', 'poison_percent': 'poison_percent', 'is_index': 'is_index', 'verbose': 'verbose', 'print_iter': 'print_iter'}), '(classifier=classifier, target=target, source=\n source, backdoor=backdoor, feature_layer=feature_layer, eps=eps,\n learning_rate=learning_rate, decay_coeff=decay_coeff, decay_iter=\n decay_iter, stopping_threshold=stopping_threshold, max_iter=max_iter,\n batch_size=batch_size, poison_percent=poison_percent, is_index=is_index,\n verbose=verbose, print_iter=print_iter)\n', (6896, 7276), False, 'from art.attacks.poisoning.hidden_trigger_backdoor.hidden_trigger_backdoor_keras import HiddenTriggerBackdoorKeras\n')] |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
def applySciKitTools(x,y,x_test,y_test):
#Usando Regresion logistica
model = LogisticRegression()
model.fit(x,y)
accuracy = model.score(x_test,y_test)
print("Precision Regresion Lineal Scikit-Learn:",accuracy)
#Usando arboles de decision
model = tree.DecisionTreeClassifier(max_depth=2, random_state=30)
model.fit(x,y)
accuracy= model.score(x_test, y_test)
print("Precision Arboles de Decision Scikit-Learn",accuracy)
#Usando 50-NearestNeighbors
model = KNeighborsClassifier(n_neighbors=50, algorithm='brute')
model.fit(x, y)
accuracy = model.score(x_test, y_test)
print("Precision 50-NearestNeighbors Scikit-Learn:",accuracy)
def normalize(x,y):
m1,n1 = x.shape
aux = np.ones(m1)
aux = aux.reshape(m1,1)
x = np.append(aux,x,axis=1)
y = y.reshape(-1,1)
return x,y
def GlassDS(file):
# Leyendo dataset
data = pd.read_csv(file,delimiter=' ')
data = data.drop(columns=['ID'],axis=1)
x = data[['RI','Na','Mg','Al','Si','K','Ca','Ba','Fe']] #features del vector de entrada
y = data['Type'] #Categoria a ser predecida
# Creando vector de entrenamiento
x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=40)
x_train = x_train.values
y_train = y_train.values
y_test = y_test.values
#Normalizando
x_train,y_train = normalize(x_train,y_train)
x_test,y_test = normalize(x_test,y_test)
#Aplicando la modelacion
print("Glass Identification Database")
applySciKitTools(x_train,y_train,x_test,y_test)
def IonosphereDS(file):
# Leyendo dataset
data = pd.read_csv(file,delimiter=' ')
data = data.drop(columns=['ID'],axis=1)
Class = {'good':1,'bad':0}
data.Class = [Class[item] for item in data.Class]
x = data[['V1','V2','V3','V4','V5','V6','V7','V8','V9','V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28','V29','V30','V31','V32','V33','V34']] #features del vector de entrada
y = data['Class'] #Categoria a ser predecida
# Creando vector de entrenamiento
x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=40)
x_train = x_train.values
y_train = y_train.values
y_test = y_test.values
#Normalizando
x_train,y_train = normalize(x_train,y_train)
x_test,y_test = normalize(x_test,y_test)
#Aplicando la modelacion
print("Johns Hopkins University Ionosphere database")
applySciKitTools(x_train,y_train,x_test,y_test)
def main():
GlassDS("Glass.txt")
IonosphereDS("Ionosphere.txt")
if __name__ == '__main__':
main() | [
"numpy.ones",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"numpy.append"
] | [((392, 412), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (410, 412), False, 'from sklearn.linear_model import LogisticRegression\n'), ((572, 629), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': '(2)', 'random_state': '(30)'}), '(max_depth=2, random_state=30)\n', (599, 629), False, 'from sklearn import tree\n'), ((791, 846), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(50)', 'algorithm': '"""brute"""'}), "(n_neighbors=50, algorithm='brute')\n", (811, 846), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1017, 1028), 'numpy.ones', 'np.ones', (['m1'], {}), '(m1)\n', (1024, 1028), True, 'import numpy as np\n'), ((1061, 1086), 'numpy.append', 'np.append', (['aux', 'x'], {'axis': '(1)'}), '(aux, x, axis=1)\n', (1070, 1086), True, 'import numpy as np\n'), ((1169, 1201), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '""" """'}), "(file, delimiter=' ')\n", (1180, 1201), True, 'import pandas as pd\n'), ((1454, 1508), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(40)'}), '(x, y, test_size=0.2, random_state=40)\n', (1470, 1508), False, 'from sklearn.model_selection import train_test_split\n'), ((1873, 1905), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '""" """'}), "(file, delimiter=' ')\n", (1884, 1905), True, 'import pandas as pd\n'), ((2388, 2442), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(40)'}), '(x, y, test_size=0.2, random_state=40)\n', (2404, 2442), False, 'from sklearn.model_selection import train_test_split\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import gensim
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, Embedding, LSTM, Conv1D, MaxPool1D
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from wordcloud import WordCloud
import re
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# In[3]:
# In[4]:
fake = pd.read_csv("train.csv")
fake.head()
# In[5]:
# Counting by Subjects
for key, count in fake.label.value_counts().iteritems():
print(f"{key}:\t{count}")
# Getting Total Rows
print(f"Total Records:\t{fake.shape[0]}")
# In[7]:
plt.figure(figsize=(8, 5))
sns.countplot("label", data=fake)
plt.show()
fake = fake.drop(["id", "tid1", "tid2"], axis=1)
# In[17]:
data = fake
data.head()
# In[18]:
data["title1_en"] = data["title1_en"] + " " + data["title2_en"]
data = data.drop(["title2_en"], axis=1)
data.head
# In[19]:
y = data["label"].values
# Converting X to format acceptable by gensim, removing annd punctuation stopwords in the process
X = []
stop_words = set(nltk.corpus.stopwords.words("english"))
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
for par in data["title1_en"].values:
tmp = []
sentences = nltk.sent_tokenize(par)
for sent in sentences:
sent = sent.lower()
tokens = tokenizer.tokenize(sent)
filtered_words = [w.strip()
for w in tokens if w not in stop_words and len(w) > 1]
tmp.extend(filtered_words)
X.append(tmp)
del data
# In[27]:
# Dimension of vectors we are generating
EMBEDDING_DIM = 100
# Creating Word Vectors by Word2Vec Method (takes time...)
w2v_model = gensim.models.Word2Vec(
sentences=X, size=EMBEDDING_DIM, window=5, min_count=1)
# vocab size
len(w2v_model.wv.vocab)
# In[23]:
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
X = tokenizer.texts_to_sequences(X)
# In[24]:
X[0][:10]
# In[25]:
# Lets check few word to numerical replesentation
# Mapping is preserved in dictionary -> word_index property of instance
word_index = tokenizer.word_index
for word, num in word_index.items():
print(f"{word} -> {num}")
if num == 10:
break
# In[26]:
# For determining size of input...
# Making histogram for no of words in news shows that most news article are under 700 words.
# Lets keep each news small and truncate all news to 700 while tokenizing
plt.hist([len(x) for x in X], bins=500)
plt.show()
# Its heavily skewed. There are news with 5000 words? Lets truncate these outliers :)
# In[34]:
nos = np.array([len(x) for x in X])
len(nos[nos < 100])
# Out of 256441 news, 256403 have less than 700 words
# In[35]:
# Lets keep all news to 100, add padding to news with less than 100 words and truncating long ones
maxlen = 100
# Making all news of size maxlen defined above
X = pad_sequences(X, maxlen=maxlen)
# In[36]:
# all news has 700 words (in numerical form now). If they had less words, they have been padded with 0
# 0 is not associated to any word, as mapping of words started from 1
# 0 will also be used later, if unknows word is encountered in test set
len(X[0])
# In[37]:
# Adding 1 because of reserved 0 index
# Embedding Layer creates one more vector for "UNKNOWN" words, or padded words (0s). This Vector is filled with zeros.
# Thus our vocab size inceeases by 1
vocab_size = len(tokenizer.word_index) + 1
# In[38]:
# Function to create weight matrix from word2vec gensim model
def get_weight_matrix(model, vocab):
# total vocabulary size plus 0 for unknown words
vocab_size = len(vocab) + 1
# define weight matrix dimensions with all 0
weight_matrix = np.zeros((vocab_size, EMBEDDING_DIM))
# step vocab, store vectors using the Tokenizer's integer mapping
for word, i in vocab.items():
weight_matrix[i] = model[word]
return weight_matrix
# In[53]:
# Getting embedding vectors from word2vec and usings it as weights of non-trainable keras embedding layer
embedding_vectors = get_weight_matrix(w2v_model, word_index)
# In[54]:
# Defining Neural Network
model = Sequential()
# Non-trainable embeddidng layer
model.add(Embedding(vocab_size, output_dim=EMBEDDING_DIM, weights=[
embedding_vectors], input_length=maxlen, trainable=False))
# LSTM
model.add(LSTM(units=128))
model.add(BatchNormalization())
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy', metrics=['acc'])
del embedding_vectors
# In[55]:
model.summary()
# In[56]:
encoder = LabelEncoder()
encoder.fit(y)
labels_val = encoder.transform(y)
labels_val = to_categorical(labels_val)
val_y = labels_val
val_y
# In[57]:
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, val_y)
# In[ ]:
model.fit(X_train, y_train, validation_split=0.3, epochs=2)
model.save("KAG")
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"nltk.corpus.stopwords.words",
"gensim.models.Word2Vec",
"nltk.sent_tokenize",
"nltk.tokenize.RegexpTokeniz... | [((747, 780), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (770, 780), False, 'import warnings\n'), ((812, 836), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (823, 836), True, 'import pandas as pd\n'), ((1049, 1075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (1059, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1109), 'seaborn.countplot', 'sns.countplot', (['"""label"""'], {'data': 'fake'}), "('label', data=fake)\n", (1089, 1109), True, 'import seaborn as sns\n'), ((1110, 1120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1118, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1588), 'nltk.tokenize.RegexpTokenizer', 'nltk.tokenize.RegexpTokenizer', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (1580, 1588), False, 'import nltk\n'), ((2103, 2181), 'gensim.models.Word2Vec', 'gensim.models.Word2Vec', ([], {'sentences': 'X', 'size': 'EMBEDDING_DIM', 'window': '(5)', 'min_count': '(1)'}), '(sentences=X, size=EMBEDDING_DIM, window=5, min_count=1)\n', (2125, 2181), False, 'import gensim\n'), ((2250, 2261), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (2259, 2261), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2875, 2885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2883, 2885), True, 'import matplotlib.pyplot as plt\n'), ((3275, 3306), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'maxlen': 'maxlen'}), '(X, maxlen=maxlen)\n', (3288, 3306), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((4529, 4541), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4539, 4541), False, 'from tensorflow.keras.models import Sequential\n'), ((4993, 5007), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5005, 5007), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5070, 5096), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['labels_val'], {}), '(labels_val)\n', (5084, 5096), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5191, 5217), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'val_y'], {}), '(X, val_y)\n', (5207, 5217), False, 'from sklearn.model_selection import train_test_split\n'), ((1499, 1537), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (1526, 1537), False, 'import nltk\n'), ((1655, 1678), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['par'], {}), '(par)\n', (1673, 1678), False, 'import nltk\n'), ((4095, 4132), 'numpy.zeros', 'np.zeros', (['(vocab_size, EMBEDDING_DIM)'], {}), '((vocab_size, EMBEDDING_DIM))\n', (4103, 4132), True, 'import numpy as np\n'), ((4585, 4703), 'tensorflow.keras.layers.Embedding', 'Embedding', (['vocab_size'], {'output_dim': 'EMBEDDING_DIM', 'weights': '[embedding_vectors]', 'input_length': 'maxlen', 'trainable': '(False)'}), '(vocab_size, output_dim=EMBEDDING_DIM, weights=[embedding_vectors],\n input_length=maxlen, trainable=False)\n', (4594, 4703), False, 'from tensorflow.keras.layers import Dense, Embedding, LSTM, Conv1D, MaxPool1D\n'), ((4729, 4744), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': '(128)'}), '(units=128)\n', (4733, 4744), False, 'from tensorflow.keras.layers import Dense, Embedding, LSTM, Conv1D, MaxPool1D\n'), ((4756, 4776), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4774, 4776), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((4788, 4818), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (4793, 4818), False, 'from tensorflow.keras.layers import Dense, Embedding, LSTM, Conv1D, MaxPool1D\n')] |
import numpy as np
import os
# plotting settings
import plot_settings
import matplotlib.pyplot as plt
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..",))
from frius import time2distance, das_beamform, image_bf_data
"""
User parameters
"""
min_depth = 0.01575
max_depth = 0.075
"""
Probe + raw date
"""
# probe/medium parameters
samp_freq = 50e6
center_freq = 5e6
dx = 0.9375*1e-3
speed_sound = 6300
bw_pulse = 1.0
pulse_dur = 500e-9
n_cycles = 0.5
# signed 16 bit integer [-128,127]
ndt_rawdata = np.genfromtxt(os.path.join(os.path.dirname(__file__), '..', 'data', 'ndt_rawdata.csv'), delimiter=',')
n_samples = len(ndt_rawdata)
time_vec = np.arange(n_samples)/samp_freq
depth = time2distance(time_vec[-1], speed_sound)
n_elem_tx = ndt_rawdata.shape[1]
probe_geometry = np.arange(n_elem_tx)*dx
"""
DAS beamform
"""
res = das_beamform(ndt_rawdata.T, samp_freq,
dx, probe_geometry, center_freq, speed_sound, depth)[0]
scal_fact = 1e2
image_bf_data(res, probe_geometry, depth, dynamic_range=40, scal_fact=scal_fact)
plt.xlabel("Lateral [cm]")
plt.ylabel("Axial [cm]")
plt.ylim([depth*scal_fact, 0])
plt.tight_layout()
fp = os.path.join(os.path.dirname(__file__), "figures", "_fig4p4a.png")
plt.savefig(fp, dpi=300)
"""
Single RF signal
"""
chan_idx = 0
scal_fact = 1e6
plt.figure()
plt.plot(scal_fact*time_vec, ndt_rawdata[:,chan_idx])
plt.grid()
plt.xlabel("Time [microseconds]")
ax = plt.gca()
ax.axes.yaxis.set_ticklabels([])
plt.tight_layout()
fp = os.path.join(os.path.dirname(__file__), "figures", "_fig4p4b.pdf")
plt.savefig(fp, dpi=300)
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.dirname",
"matplotlib.pyplot.figure",
"frius.das_beamform",
"matplotlib.pyplot.tight_layout",
"matplotlib.p... | [((708, 748), 'frius.time2distance', 'time2distance', (['time_vec[-1]', 'speed_sound'], {}), '(time_vec[-1], speed_sound)\n', (721, 748), False, 'from frius import time2distance, das_beamform, image_bf_data\n'), ((970, 1055), 'frius.image_bf_data', 'image_bf_data', (['res', 'probe_geometry', 'depth'], {'dynamic_range': '(40)', 'scal_fact': 'scal_fact'}), '(res, probe_geometry, depth, dynamic_range=40, scal_fact=scal_fact\n )\n', (983, 1055), False, 'from frius import time2distance, das_beamform, image_bf_data\n'), ((1051, 1077), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lateral [cm]"""'], {}), "('Lateral [cm]')\n", (1061, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1078, 1102), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axial [cm]"""'], {}), "('Axial [cm]')\n", (1088, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1135), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[depth * scal_fact, 0]'], {}), '([depth * scal_fact, 0])\n', (1111, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1152), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1150, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1249), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fp'], {'dpi': '(300)'}), '(fp, dpi=300)\n', (1236, 1249), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1318), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1316, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1375), 'matplotlib.pyplot.plot', 'plt.plot', (['(scal_fact * time_vec)', 'ndt_rawdata[:, chan_idx]'], {}), '(scal_fact * time_vec, ndt_rawdata[:, chan_idx])\n', (1327, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1373, 1383), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1381, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [microseconds]"""'], {}), "('Time [microseconds]')\n", (1394, 1417), True, 'import matplotlib.pyplot as plt\n'), ((1423, 1432), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1430, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1484), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1482, 1484), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1581), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fp'], {'dpi': '(300)'}), '(fp, dpi=300)\n', (1568, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1591, 1593), True, 'import matplotlib.pyplot as plt\n'), ((669, 689), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (678, 689), True, 'import numpy as np\n'), ((800, 820), 'numpy.arange', 'np.arange', (['n_elem_tx'], {}), '(n_elem_tx)\n', (809, 820), True, 'import numpy as np\n'), ((854, 949), 'frius.das_beamform', 'das_beamform', (['ndt_rawdata.T', 'samp_freq', 'dx', 'probe_geometry', 'center_freq', 'speed_sound', 'depth'], {}), '(ndt_rawdata.T, samp_freq, dx, probe_geometry, center_freq,\n speed_sound, depth)\n', (866, 949), False, 'from frius import time2distance, das_beamform, image_bf_data\n'), ((1171, 1196), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1186, 1196), False, 'import os\n'), ((1503, 1528), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1518, 1528), False, 'import os\n'), ((144, 169), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'import os\n'), ((553, 578), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (568, 578), False, 'import os\n')] |
import nltk
import sys
import os
import time
import itertools
import jieba
import numpy
import scipy.sparse
TEXT_ENCODING="utf-8"
print("loading stop words...")
nltk.download('stopwords')
en_stop_words = set(nltk.corpus.stopwords.words('english'))
en_stop_words.add('nsbp') #skip &nsbp;
with open("dict/stop.txt", 'r', encoding='utf-8') as stop_file:
cn_stop_words={word for word in stop_file.read().split('\n') if word.strip()!=''}
print("english stopwords: {}".format(len(en_stop_words)))
print("chinese stopwords: {}".format(len(cn_stop_words)))
OTHER_CH = 0
CN_CH = 1
EN_CH = 2
global_stemmer = nltk.stem.PorterStemmer()
def ch_type(ch):
if ch>='a' and ch<='z':
return EN_CH
if ch>='A' and ch <='Z':
return EN_CH
if ch >='\u4e00' and ch <='\u9fa5':
return CN_CH
return OTHER_CH
def get_word_list(document):
word_list = []
for current_type,segment in itertools.groupby(document.read(), ch_type):
segment = ''.join(segment)
if current_type == EN_CH and len(segment)> 2:
w = segment.lower()
if w not in en_stop_words:
w = global_stemmer.stem(w)
word_list.append(w)
elif current_type == CN_CH:
for w in jieba.cut(segment):
if w not in cn_stop_words:
word_list.append(w)
return word_list
gloabl_word_index = {}
global_word_count = 0
def get_word_index(word):
global global_word_count
global gloabl_word_index
if word in gloabl_word_index:
return gloabl_word_index[word]
else:
gloabl_word_index[word] = global_word_count
global_word_count = global_word_count + 1
return global_word_count -1
root_dir=sys.argv[1]
print("loading data from {}...".format(root_dir))
cat_list = os.listdir(root_dir)
doc_cat = []
rows, cols, data = [],[],[]
for cat_index in range(len(cat_list)): # iterate category
cat_dir = cat_list[cat_index]
cat_path=os.path.join(root_dir, cat_dir)
if not os.path.isdir(cat_path): continue
process_time = 0.0
for sample_file in os.listdir(cat_path): # iterate document under category
sample_path = os.path.join(cat_path, sample_file)
if os.path.isdir(sample_path): continue
doc_index = len(doc_cat)
doc_cat.append(cat_index)
start_time = time.time()
with open(sample_path,'r', encoding=TEXT_ENCODING, errors='ignore') as sample_file:
word_list=get_word_list(sample_file)
for word,v in itertools.groupby(sorted(word_list)):
word_index=get_word_index(word)
if word_index <0: continue
rows.append(doc_index)
cols.append(word_index)
data.append(sum(1 for i in v))
process_time = process_time + time.time() - start_time
print("loading category: {}({}/{}), time: {}".format(
cat_list[cat_index], cat_index +1, len(cat_list), process_time))
doc_word_matrix=scipy.sparse.coo_matrix((data, (rows,cols)))
target_value=numpy.array(doc_cat)
print("data shape:{}, target shape:{}".format(doc_word_matrix.shape, target_value.shape))
scipy.sparse.save_npz("data_tf", doc_word_matrix)
numpy.savez_compressed("target", target=target_value)
| [
"os.listdir",
"jieba.cut",
"nltk.corpus.stopwords.words",
"nltk.download",
"os.path.join",
"nltk.stem.PorterStemmer",
"numpy.array",
"os.path.isdir",
"numpy.savez_compressed",
"time.time"
] | [((162, 188), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (175, 188), False, 'import nltk\n'), ((607, 632), 'nltk.stem.PorterStemmer', 'nltk.stem.PorterStemmer', ([], {}), '()\n', (630, 632), False, 'import nltk\n'), ((1711, 1731), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (1721, 1731), False, 'import os\n'), ((2844, 2864), 'numpy.array', 'numpy.array', (['doc_cat'], {}), '(doc_cat)\n', (2855, 2864), False, 'import numpy\n'), ((3010, 3063), 'numpy.savez_compressed', 'numpy.savez_compressed', (['"""target"""'], {'target': 'target_value'}), "('target', target=target_value)\n", (3032, 3063), False, 'import numpy\n'), ((209, 247), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (236, 247), False, 'import nltk\n'), ((1881, 1912), 'os.path.join', 'os.path.join', (['root_dir', 'cat_dir'], {}), '(root_dir, cat_dir)\n', (1893, 1912), False, 'import os\n'), ((2004, 2024), 'os.listdir', 'os.listdir', (['cat_path'], {}), '(cat_path)\n', (2014, 2024), False, 'import os\n'), ((1921, 1944), 'os.path.isdir', 'os.path.isdir', (['cat_path'], {}), '(cat_path)\n', (1934, 1944), False, 'import os\n'), ((2076, 2111), 'os.path.join', 'os.path.join', (['cat_path', 'sample_file'], {}), '(cat_path, sample_file)\n', (2088, 2111), False, 'import os\n'), ((2117, 2143), 'os.path.isdir', 'os.path.isdir', (['sample_path'], {}), '(sample_path)\n', (2130, 2143), False, 'import os\n'), ((2226, 2237), 'time.time', 'time.time', ([], {}), '()\n', (2235, 2237), False, 'import time\n'), ((1179, 1197), 'jieba.cut', 'jieba.cut', (['segment'], {}), '(segment)\n', (1188, 1197), False, 'import jieba\n'), ((2611, 2622), 'time.time', 'time.time', ([], {}), '()\n', (2620, 2622), False, 'import time\n')] |
# calculation tools
from __future__ import division as __division__
import numpy as np
# spot diagram rms calculator
def rms(xy_list):
x = xy_list[0] - np.mean(xy_list[0])
y = xy_list[1] - np.mean(xy_list[1])
# rms = np.sqrt(sum(x**2+y**2)/len(xy_list))
rms = np.hypot(x, y).mean()
return rms
| [
"numpy.mean",
"numpy.hypot"
] | [((155, 174), 'numpy.mean', 'np.mean', (['xy_list[0]'], {}), '(xy_list[0])\n', (162, 174), True, 'import numpy as np\n'), ((193, 212), 'numpy.mean', 'np.mean', (['xy_list[1]'], {}), '(xy_list[1])\n', (200, 212), True, 'import numpy as np\n'), ((266, 280), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (274, 280), True, 'import numpy as np\n')] |
import numpy as np
from py_diff_stokes_flow.env.env_base import EnvBase
from py_diff_stokes_flow.common.common import ndarray
class FlowAveragerEnv3d(EnvBase):
def __init__(self, seed, folder):
np.random.seed(seed)
cell_nums = (64, 64, 4)
E = 100
nu = 0.499
vol_tol = 1e-2
edge_sample_num = 2
EnvBase.__init__(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)
# Initialize the parametric shapes.
self._parametric_shape_info = [ ('bezier', 11), ('bezier', 11), ('bezier', 11), ('bezier', 11) ]
# Initialize the node conditions.
self._node_boundary_info = []
inlet_range = ndarray([
[0.1, 0.4],
[0.6, 0.9],
])
outlet_range = ndarray([
[0.2, 0.4],
[0.6, 0.8]
])
cx, cy, _ = self.cell_nums()
nx, ny, nz = self.node_nums()
inlet_bd = inlet_range * cy
outlet_bd = outlet_range * cy
for j in range(ny):
for k in range(nz):
# Set the inlet at i = 0.
if inlet_bd[0, 0] < j < inlet_bd[0, 1]:
self._node_boundary_info.append(((0, j, k, 0), 1))
self._node_boundary_info.append(((0, j, k, 1), 0))
self._node_boundary_info.append(((0, j, k, 2), 0))
if inlet_bd[1, 0] < j < inlet_bd[1, 1]:
self._node_boundary_info.append(((0, j, k, 0), 0))
self._node_boundary_info.append(((0, j, k, 1), 0))
self._node_boundary_info.append(((0, j, k, 2), 0))
# Set the top and bottom plane.
for i in range(nx):
for j in range(ny):
for k in [0, nz - 1]:
self._node_boundary_info.append(((i, j, k, 2), 0))
# Initialize the interface.
self._interface_boundary_type = 'free-slip'
# Other data members.
self._inlet_range = inlet_range
self._outlet_range = outlet_range
self._inlet_bd = inlet_bd
self._outlet_bd = outlet_bd
def _variables_to_shape_params(self, x):
x = ndarray(x).copy().ravel()
assert x.size == 8
cx, cy, _ = self._cell_nums
lower = ndarray([
[1, self._outlet_range[0, 0]],
x[2:4],
x[:2],
[0, self._inlet_range[0, 0]],
])
right = ndarray([
[1, self._outlet_range[1, 0]],
[x[4], 1 - x[5]],
x[4:6],
[1, self._outlet_range[0, 1]],
])
upper = ndarray([
[0, self._inlet_range[1, 1]],
[x[0], 1 - x[1]],
[x[2], 1 - x[3]],
[1, self._outlet_range[1, 1]],
])
left = ndarray([
[0, self._inlet_range[0, 1]],
x[6:8],
[x[6], 1 - x[7]],
[0, self._inlet_range[1, 0]],
])
cxy = ndarray([cx, cy])
lower *= cxy
right *= cxy
upper *= cxy
left *= cxy
params = np.concatenate([lower.ravel(),
[0, -0.01, 1],
right.ravel(),
[0.01, 0, 1],
upper.ravel(),
[0, 0.01, 1],
left.ravel(),
[-0.01, 0, 1]
])
# Jacobian.
J = np.zeros((params.size, x.size))
J[2, 2] = J[3, 3] = 1
J[4, 0] = J[5, 1] = 1
J[13, 4] = 1
J[14, 5] = -1
J[15, 4] = J[16, 5] = 1
J[24, 0] = 1
J[25, 1] = -1
J[26, 2] = 1
J[27, 3] = -1
J[35, 6] = J[36, 7] = 1
J[37, 6] = 1
J[38, 7] = -1
J[:, ::2] *= cx
J[:, 1::2] *= cy
return ndarray(params).copy(), ndarray(J).copy()
def _loss_and_grad_on_velocity_field(self, u):
u_field = self.reshape_velocity_field(u)
grad = np.zeros(u_field.shape)
nx, ny, nz = self.node_nums()
loss = 0
cnt = 0
for j in range(ny):
for k in range(nz):
if self._outlet_bd[0, 0] < j < self._outlet_bd[0, 1] or \
self._outlet_bd[1, 0] < j < self._outlet_bd[1, 1]:
cnt += 1
u_diff = u_field[nx - 1, j, k] - ndarray([0.5, 0, 0])
loss += u_diff.dot(u_diff)
grad[nx - 1, j, k] += 2 * u_diff
loss /= cnt
grad /= cnt
return loss, ndarray(grad).ravel()
def _color_velocity(self, u):
return float(np.linalg.norm(u) / 3)
def sample(self):
return np.random.uniform(low=self.lower_bound(), high=self.upper_bound())
def lower_bound(self):
return ndarray([.01, .01, .49, .01, .49, .01, .01, .01])
def upper_bound(self):
return ndarray([.49, .49, .99, .49, .99, .49, .49, .49]) | [
"py_diff_stokes_flow.common.common.ndarray",
"numpy.zeros",
"py_diff_stokes_flow.env.env_base.EnvBase.__init__",
"numpy.random.seed",
"numpy.linalg.norm"
] | [((208, 228), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (222, 228), True, 'import numpy as np\n'), ((356, 430), 'py_diff_stokes_flow.env.env_base.EnvBase.__init__', 'EnvBase.__init__', (['self', 'cell_nums', 'E', 'nu', 'vol_tol', 'edge_sample_num', 'folder'], {}), '(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)\n', (372, 430), False, 'from py_diff_stokes_flow.env.env_base import EnvBase\n'), ((684, 717), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[0.1, 0.4], [0.6, 0.9]]'], {}), '([[0.1, 0.4], [0.6, 0.9]])\n', (691, 717), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((776, 809), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[0.2, 0.4], [0.6, 0.8]]'], {}), '([[0.2, 0.4], [0.6, 0.8]])\n', (783, 809), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2277, 2367), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[1, self._outlet_range[0, 0]], x[2:4], x[:2], [0, self._inlet_range[0, 0]]]'], {}), '([[1, self._outlet_range[0, 0]], x[2:4], x[:2], [0, self.\n _inlet_range[0, 0]]])\n', (2284, 2367), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2438, 2540), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[1, self._outlet_range[1, 0]], [x[4], 1 - x[5]], x[4:6], [1, self.\n _outlet_range[0, 1]]]'], {}), '([[1, self._outlet_range[1, 0]], [x[4], 1 - x[5]], x[4:6], [1, self.\n _outlet_range[0, 1]]])\n', (2445, 2540), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2611, 2721), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[0, self._inlet_range[1, 1]], [x[0], 1 - x[1]], [x[2], 1 - x[3]], [1, self\n ._outlet_range[1, 1]]]'], {}), '([[0, self._inlet_range[1, 1]], [x[0], 1 - x[1]], [x[2], 1 - x[3]],\n [1, self._outlet_range[1, 1]]])\n', (2618, 2721), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2792, 2892), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[[0, self._inlet_range[0, 1]], x[6:8], [x[6], 1 - x[7]], [0, self.\n _inlet_range[1, 0]]]'], {}), '([[0, self._inlet_range[0, 1]], x[6:8], [x[6], 1 - x[7]], [0, self.\n _inlet_range[1, 0]]])\n', (2799, 2892), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2961, 2978), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[cx, cy]'], {}), '([cx, cy])\n', (2968, 2978), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((3339, 3370), 'numpy.zeros', 'np.zeros', (['(params.size, x.size)'], {}), '((params.size, x.size))\n', (3347, 3370), True, 'import numpy as np\n'), ((3892, 3915), 'numpy.zeros', 'np.zeros', (['u_field.shape'], {}), '(u_field.shape)\n', (3900, 3915), True, 'import numpy as np\n'), ((4705, 4762), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[0.01, 0.01, 0.49, 0.01, 0.49, 0.01, 0.01, 0.01]'], {}), '([0.01, 0.01, 0.49, 0.01, 0.49, 0.01, 0.01, 0.01])\n', (4712, 4762), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4798, 4855), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[0.49, 0.49, 0.99, 0.49, 0.99, 0.49, 0.49, 0.49]'], {}), '([0.49, 0.49, 0.99, 0.49, 0.99, 0.49, 0.49, 0.49])\n', (4805, 4855), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4534, 4551), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (4548, 4551), True, 'import numpy as np\n'), ((3734, 3749), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['params'], {}), '(params)\n', (3741, 3749), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((3758, 3768), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['J'], {}), '(J)\n', (3765, 3768), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4456, 4469), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['grad'], {}), '(grad)\n', (4463, 4469), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((2171, 2181), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['x'], {}), '(x)\n', (2178, 2181), False, 'from py_diff_stokes_flow.common.common import ndarray\n'), ((4274, 4294), 'py_diff_stokes_flow.common.common.ndarray', 'ndarray', (['[0.5, 0, 0]'], {}), '([0.5, 0, 0])\n', (4281, 4294), False, 'from py_diff_stokes_flow.common.common import ndarray\n')] |
import numpy
def gradient_descent(
grad: Callable[numpy.ndarray, numpy.ndarray], alpha: float, x: numpy.ndarray
) -> numpy.ndarray:
delta = numpy.ones_like(x)
while numpy.linalg.norm(delta) >= 0.0001:
delta = alpha * grad(x)
x -= delta
return x
| [
"numpy.ones_like",
"numpy.linalg.norm"
] | [((150, 168), 'numpy.ones_like', 'numpy.ones_like', (['x'], {}), '(x)\n', (165, 168), False, 'import numpy\n'), ((179, 203), 'numpy.linalg.norm', 'numpy.linalg.norm', (['delta'], {}), '(delta)\n', (196, 203), False, 'import numpy\n')] |
import skil_client
import time
import uuid
import numpy as np
import requests
import json
import os
try:
import cv2
except ImportError:
cv2 = None
class Service:
"""Service
A service is a deployed model.
# Arguments:
skil: `Skil` server instance
model: `skil.Model` instance
deployment: `skil.Deployment` instance
model_deployment: result of `deploy_model` API call of a model
"""
def __init__(self, skil, model, deployment, model_deployment):
self.skil = skil
self.model = model
self.model_name = self.model.name
self.model_deployment = model_deployment
self.deployment = deployment
def start(self):
"""Starts the service.
"""
if not self.model_deployment:
self.skil.printer.pprint(
"No model deployed yet, call 'deploy()' on a model first.")
else:
self.skil.api.model_state_change(
self.deployment.id,
self.model_deployment.id,
skil_client.SetState("start")
)
self.skil.printer.pprint(">>> Starting to serve model...")
while True:
time.sleep(5)
model_state = self.skil.api.model_state_change(
self.deployment.id,
self.model_deployment.id,
skil_client.SetState("start")
).state
if model_state == "started":
time.sleep(15)
self.skil.printer.pprint(
">>> Model server started successfully!")
break
else:
self.skil.printer.pprint(">>> Waiting for deployment...")
def stop(self):
"""Stops the service.
"""
# TODO: test this
self.skil.api.model_state_change(
self.deployment.id,
self.model_deployment.id,
skil_client.SetState("stop")
)
@staticmethod
def _indarray(np_array):
"""Convert a numpy array to `skil_client.INDArray` instance.
# Arguments
np_array: `numpy.ndarray` instance.
# Returns
`skil_client.INDArray` instance.
"""
return skil_client.INDArray(
ordering='c',
shape=list(np_array.shape),
data=np_array.reshape(-1).tolist()
)
def predict(self, data):
"""Predict for given batch of data.
# Arguments:
data: `numpy.ndarray` (or list thereof). Batch of input data, or list of batches for multi-input model.
# Returns
`numpy.ndarray` instance for single output model and list of `numpy.ndarray` for multi-output model.
"""
if isinstance(data, list):
inputs = [self._indarray(x) for x in data]
else:
inputs = [self._indarray(data)]
classification_response = self.skil.api.multipredict(
deployment_name=self.deployment.name,
model_name=self.model_name,
version_name="default",
body=skil_client.MultiPredictRequest(
id=str(uuid.uuid1()),
needs_pre_processing=False,
inputs=inputs
)
)
outputs = classification_response.outputs
outputs = [np.asarray(o.data).reshape(o.shape) for o in outputs]
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_single(self, data):
"""Predict for a single input.
# Arguments:
data: `numpy.ndarray` (or list thereof). Input data.
# Returns
`numpy.ndarray` instance for single output model and list of `numpy.ndarray` for multi-output model.
"""
if isinstance(data, list):
inputs = [self._indarray(np.expand_dims(x, 0)) for x in data]
else:
inputs = [self._indarray(np.expand_dims(data, 0))]
classification_response = self.skil.api.multipredict(
deployment_name=self.deployment.name,
model_name=self.model_name,
version_name="default",
body=skil_client.MultiPredictRequest(
id=str(uuid.uuid1()),
needs_pre_processing=False,
inputs=inputs
)
)
output = classification_response.outputs[0]
return np.asarray(output.data).reshape(output.shape)
def detect_objects(self, image, threshold=0.5, needs_preprocessing=False, temp_path='temp.jpg'):
"""Detect objects in an image for this service. Only works when deploying an object detection
model like YOLO or SSD.
# Argments
image: `numpy.ndarray`. Input image to detect objects from.
threshold: floating point between 0 and 1. bounding box threshold, only objects with at
least this threshold get returned.
needs_preprocessing: boolean. whether input data needs pre-processing
temp_path: local path to which intermediate numpy arrays get stored.
# Returns
`DetectionResult`, a Python dictionary with labels, confidences and locations of bounding boxes
of detected objects.
"""
if cv2 is None:
raise Exception("OpenCV is not installed.")
cv2.imwrite(temp_path, image)
url = 'http://{}/endpoints/{}/model/{}/v{}/detectobjects'.format(
self.skil.config.host,
self.model.deployment.name,
self.model.name,
self.model.version
)
# TODO: use the official "detectobject" client API, once fixed in skil_client
# print(">>>> TEST")
# resp = self.skil.api.detectobjects(
# id='foo',
# needs_preprocessing=False,
# threshold='0.5',
# image_file=temp_path,
# deployment_name=self.model.deployment.name,
# version_name='default',
# model_name=self.model.name
# )
with open(temp_path, 'rb') as data:
resp = requests.post(
url=url,
headers=self.skil.auth_headers,
files={
'file': (temp_path, data, 'image/jpeg')
},
data={
'id': self.model.id,
'needs_preprocessing': 'true' if needs_preprocessing else 'false',
'threshold': str(threshold)
}
)
if os.path.isfile(temp_path):
os.remove(temp_path)
return json.loads(resp.content)
| [
"cv2.imwrite",
"json.loads",
"skil_client.SetState",
"numpy.asarray",
"time.sleep",
"os.path.isfile",
"uuid.uuid1",
"numpy.expand_dims",
"os.remove"
] | [((5423, 5452), 'cv2.imwrite', 'cv2.imwrite', (['temp_path', 'image'], {}), '(temp_path, image)\n', (5434, 5452), False, 'import cv2\n'), ((6612, 6637), 'os.path.isfile', 'os.path.isfile', (['temp_path'], {}), '(temp_path)\n', (6626, 6637), False, 'import os\n'), ((6688, 6712), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (6698, 6712), False, 'import json\n'), ((1986, 2014), 'skil_client.SetState', 'skil_client.SetState', (['"""stop"""'], {}), "('stop')\n", (2006, 2014), False, 'import skil_client\n'), ((6651, 6671), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (6660, 6671), False, 'import os\n'), ((1061, 1090), 'skil_client.SetState', 'skil_client.SetState', (['"""start"""'], {}), "('start')\n", (1081, 1090), False, 'import skil_client\n'), ((1217, 1230), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1227, 1230), False, 'import time\n'), ((4467, 4490), 'numpy.asarray', 'np.asarray', (['output.data'], {}), '(output.data)\n', (4477, 4490), True, 'import numpy as np\n'), ((1520, 1534), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (1530, 1534), False, 'import time\n'), ((3395, 3413), 'numpy.asarray', 'np.asarray', (['o.data'], {}), '(o.data)\n', (3405, 3413), True, 'import numpy as np\n'), ((3911, 3931), 'numpy.expand_dims', 'np.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (3925, 3931), True, 'import numpy as np\n'), ((3999, 4022), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (4013, 4022), True, 'import numpy as np\n'), ((1401, 1430), 'skil_client.SetState', 'skil_client.SetState', (['"""start"""'], {}), "('start')\n", (1421, 1430), False, 'import skil_client\n'), ((3213, 3225), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (3223, 3225), False, 'import uuid\n'), ((4287, 4299), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4297, 4299), False, 'import uuid\n')] |
import tensorflow as tf
import numpy as np
from python.download import load_content
from python.operator import parse_sequence_example
class AutoCompleteFixed:
def __init__(self, sample: str,
observations: int=None, batch_size: int=128,
use_offsets=False,
repeat: bool=False):
# save config
self._observations = observations
self._batch_size = batch_size
self._repeat = repeat
self._use_offsets = use_offsets
# load data
content = load_content('autocomplete')
maps = np.load(content['map.npz'])
# Store maps for later, note these are inverse of those used in
# the preprocessing step.
self.char_map = maps['char_map']
self.word_map = maps['word_map']
# Create inverse maps
self._char_map_inverse = {c: i for (i, c) in enumerate(self.char_map)}
self._word_map_inverse = {w: i for (i, w) in enumerate(self.word_map)}
# Get number of classes
self.source_classes = len(self.char_map)
self.target_classes = len(self.word_map)
# Encode sample
length, source, target = self.make_source_target_alignment(sample)
if self._use_offsets:
lengths = np.tile(length, (length))
sources = np.tile(source, (length, 1))
targets = np.tile(target, (length, 1))
offsets = np.arange(length, dtype='int32')
else:
lengths = np.asarray([length], dtype='int32')
sources = source[np.newaxis, ...]
targets = target[np.newaxis, ...]
offsets = np.asarray([0], dtype='int32')
self._records = (
lengths, sources, targets, offsets
)
def make_source_target_alignment(self, sequence):
space_char_code = self._char_map_inverse[' ']
unknown_word_code = self._word_map_inverse['<unknown>']
source = []
target = []
length = 0
for word in sequence.split(' '):
source.append(
np.array([space_char_code] + self.encode_source(word),
dtype='int32')
)
target.append(
np.full(len(word) + 1, self.encode_target([word])[0],
dtype='int32')
)
length += 1 + len(word)
# concatenate data
return (
length,
np.concatenate(source),
np.concatenate(target)
)
def encode_source(self, chars):
return [self._char_map_inverse[char] for char in chars]
def encode_target(self, words):
for word in words:
if word not in self._word_map_inverse:
raise ValueError(f'the word "{word}" is not in the vocabulary')
return [self._word_map_inverse[word] for word in words]
def decode_source(self, classes):
return self.char_map[classes]
def decode_target(self, classes):
return self.word_map[classes]
def __call__(self):
# Create shuffled batches
# Documentation: https://www.tensorflow.org/programmers_guide/datasets
dataset = tf.data.Dataset.from_tensor_slices(self._records)
if self._observations is not None:
dataset = dataset.take(self._observations)
if self._repeat:
dataset = dataset.repeat()
dataset = dataset.batch(self._batch_size)
# Export iterator
iterator = dataset.make_one_shot_iterator()
length, source, target, offset = iterator.get_next()
features = {
'source': source,
'target': target,
'length': length
}
if (self._use_offsets):
features['offset'] = offset
return (features, target)
| [
"numpy.tile",
"python.download.load_content",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.asarray",
"numpy.concatenate",
"numpy.load",
"numpy.arange"
] | [((547, 575), 'python.download.load_content', 'load_content', (['"""autocomplete"""'], {}), "('autocomplete')\n", (559, 575), False, 'from python.download import load_content\n'), ((591, 618), 'numpy.load', 'np.load', (["content['map.npz']"], {}), "(content['map.npz'])\n", (598, 618), True, 'import numpy as np\n'), ((3193, 3242), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['self._records'], {}), '(self._records)\n', (3227, 3242), True, 'import tensorflow as tf\n'), ((1280, 1303), 'numpy.tile', 'np.tile', (['length', 'length'], {}), '(length, length)\n', (1287, 1303), True, 'import numpy as np\n'), ((1328, 1356), 'numpy.tile', 'np.tile', (['source', '(length, 1)'], {}), '(source, (length, 1))\n', (1335, 1356), True, 'import numpy as np\n'), ((1379, 1407), 'numpy.tile', 'np.tile', (['target', '(length, 1)'], {}), '(target, (length, 1))\n', (1386, 1407), True, 'import numpy as np\n'), ((1430, 1462), 'numpy.arange', 'np.arange', (['length'], {'dtype': '"""int32"""'}), "(length, dtype='int32')\n", (1439, 1462), True, 'import numpy as np\n'), ((1499, 1534), 'numpy.asarray', 'np.asarray', (['[length]'], {'dtype': '"""int32"""'}), "([length], dtype='int32')\n", (1509, 1534), True, 'import numpy as np\n'), ((1649, 1679), 'numpy.asarray', 'np.asarray', (['[0]'], {'dtype': '"""int32"""'}), "([0], dtype='int32')\n", (1659, 1679), True, 'import numpy as np\n'), ((2453, 2475), 'numpy.concatenate', 'np.concatenate', (['source'], {}), '(source)\n', (2467, 2475), True, 'import numpy as np\n'), ((2489, 2511), 'numpy.concatenate', 'np.concatenate', (['target'], {}), '(target)\n', (2503, 2511), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse.linalg
import scipy.sparse as sparse
from time import time as tm
from einsum_tools import *
# a class to store the operators with the bottom and left indices,
# and bottom and right indices moved to the right for contraction
# while sparse
class SparseOperator():
def __init__(self, Op):
self.shape = Op.shape
self.dtype = Op.dtype
# l_form mujd
self.l_form = NDSparse(Op, [0, 2])
# r_form jumd
self.r_form = NDSparse(Op, [1, 2])
# contracts terms into the left tensor
def contract_left(Op, Md, Mu, L):
# Mu = Mu.conj()
L = einsum("dil,jik->dljk", Md, L)
if type(Op) == SparseOperator:
L = einsum("mujd,dljk->lkmu", Op.l_form, L)
else:
L = einsum("dljk,jmdu->lkmu", L, Op)
L = einsum("lkmu,ukn->mln", L, Mu)
return L
# contracts terms into the right tensor
def contract_right(Op, Md, Mu, R):
# Mu = Mu.conj()
R = einsum("dil,mln->dimn", Md, R)
if type(Op) == SparseOperator:
R = einsum("jumd,dimn->inju", Op.r_form, R)
else:
R = einsum("dimn,jmdu->inju", R, Op)
R = einsum("inju,ukn->jik", R, Mu)
return R
# returns the expectation of an MPO
def expectation(MPS, MPO):
E = np.array([[[1]]])
for i in range(len(MPS)):
E = contract_left(MPO[i], MPS[i], MPS[i], E)
return E[0, 0, 0]
# contracts two mpos by the common indices
def contract_mpo(MPO1, MPO2):
MPO = []
for i in range(len(MPO1)):
MPO += [einsum("ijqu,kldq->ikjldu", MPO1[i], MPO2[i])
.reshape(MPO1[i].shape[0] * MPO2[i].shape[0],
MPO1[i].shape[1] * MPO2[i].shape[1],
MPO2[i].shape[2], MPO1[i].shape[3])]
return MPO
# A liear operator for the sparse eigenvalue problem
class SparseHamProd(sparse.linalg.LinearOperator):
def __init__(self, L, OL, OR, R):
self.L = L
self.OL = OL
self.OR = OR
self.R = R
self.dtype = OL.dtype
self.issparse = type(OL) == SparseOperator
self.req_shape = [OL.shape[2], OR.shape[2], L.shape[1], R.shape[1]]
self.req_shape2 = [OL.shape[2] * OR.shape[2], L.shape[1], R.shape[1]]
self.size = prod(self.req_shape)
self.shape = [self.size, self.size]
# return the output of H*B
def _matvec(self, B):
L = einsum("jik,adil->jkadl", self.L, np.reshape(B, self.req_shape))
if self.issparse: # for sparse
L = einsum("cbja,jkadl->kdlcb", self.OL.l_form, L)
L = einsum("mucd,kdlcb->klbmu", self.OR.l_form, L)
else:
L = einsum("jkadl,jcab->kdlcb", L, self.OL)
L = einsum("kdlcb,cmdu->klbmu", L, self.OR)
L = einsum("klbmu,mln->bukn", L, self.R)
return np.reshape(L, -1)
# truncates the svd output by m
def trunacte_svd(u, s, v, m):
if len(s) < m: m = len(s)
truncation = s[m:].sum()
u = u[:, :, :m]
s = s[:m]
v = v[:m, :, :]
return u, s, v, truncation, m
# optimises the current site
def optimize_sites(M1, M2, O1, O2, L, R, m, heading=True, tol=0):
# generate intial guess B
B = einsum("aiz,dzl->adil", M1, M2)
# create sparse operator
H = SparseHamProd(L, O1, O2, R)
# solve for lowest energy state
E, V = sparse.linalg.eigsh(H, 1, v0=B, which='SA', tol=tol)
V = V[:, 0].reshape(H.req_shape)
# re-arange output so the indices are in the correct location
V = np.moveaxis(V, 1, 2) # aidl
V = V.reshape(O1.shape[2] * L.shape[1], O2.shape[2] * R.shape[1])
# truncate
u, s, v = np.linalg.svd(V)
u = u.reshape(O1.shape[2], L.shape[1], -1)
v = v.reshape(-1, O2.shape[2], R.shape[1])
u, s, v, trunc, m_i = trunacte_svd(u, s, v, m)
# if going right, contract s into the right unitary, else left
if heading:
# v = einsum_with_str("ij,djl->dil", np.diag(s), v)
v = s[:, None] * v.reshape(-1, O2.shape[2] * R.shape[1]) # broadcasting should be faster
v = v.reshape(-1, O2.shape[2], R.shape[1])
else:
# u = einsum_with_str("dik,kl->dil", u, np.diag(s))
u = u.reshape(O1.shape[2] * L.shape[1], -1) * s
u = u.reshape(O1.shape[2], L.shape[1], -1)
v = np.moveaxis(v, 0, 1)
return E[0], u, v, trunc, m_i
def two_site_DMRG(MPS, MPO, m, num_sweeps, verbose=1):
N = len(MPS)
# get first Rj tensor
R = [np.array([[[1.0]]])]
# find Rj tensors starting from the right
for j in range(N - 1, 1, -1):
R += [contract_right(MPO[j], MPS[j], MPS[j], R[-1])]
L = [np.array([[[1.0]]])]
# lists for storing outputs
t = [];
E_s = [];
E_j = []
for i in range(num_sweeps):
t0 = tm()
# sweep right
for j in range(0, N - 2):
# optimise going right
E, MPS[j], MPS[j + 1], trunc, m_i = optimize_sites(MPS[j], MPS[j + 1], MPO[j], MPO[j + 1], L[-1], R[-1], m,
tol=0, heading=True)
R = R[:-1] # remove leftmost R tensor
L += [contract_left(MPO[j], MPS[j], MPS[j], L[-1])] # add L tensor
E_j += [E]
if verbose >= 3: print(E, "sweep right", i, "sites:", (j, j + 1), "m:", m_i)
# sweep left
for j in range(N - 2, 0, -1):
E, MPS[j], MPS[j + 1], trunc, m_i = optimize_sites(MPS[j], MPS[j + 1], MPO[j], MPO[j + 1], L[-1], R[-1], m,
tol=0, heading=False)
R += [contract_right(MPO[j + 1], MPS[j + 1], MPS[j + 1], R[-1])] # add R tensor
L = L[:-1] # remove L tensor
E_j += [E]
if verbose >= 3: print(E, "sweep left", i, "sites:", (j, j + 1), "m:", m_i)
t1 = tm()
t += [t1 - t0]
E_s += [E]
if verbose >= 2: print("sweep", i, "complete")
if verbose >= 1: print("N:", N, "m:", m, "time for", num_sweeps, "sweeps:", *t)
return MPS, t, E_j, E_s
# create |0101..> state
def construct_init_state(d, N):
down = np.zeros((d, 1, 1))
down[0, 0, 0] = 1
up = np.zeros((d, 1, 1))
up[1, 0, 0] = 1
# state 0101...
return [down, up] * (N // 2) + [down] * (N % 2)
def construct_MPO(N, type="heisenberg", h=1, issparse=False):
# operators
I = np.identity(2)
Z = np.zeros([2, 2])
Sz = np.array([[0.5, 0], [0, -0.5]])
Sp = np.array([[0, 0], [1, 0]])
Sm = np.array([[0, 1], [0, 0]])
sz = np.array([[0, 1], [1, 0]])
sx = np.array([[0, -1j], [1j, 0]])
# heisenberg MPO
if type == "h":
W = np.array([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z],
[Z, Z, Z, Z, Sz],
[Z, Z, Z, Z, Sm],
[Z, Z, Z, Z, Sp],
[Z, Z, Z, Z, I]])
W0 = np.array([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z]])
Wn = np.array([[Z], [Sz], [Sm], [Sp], [I]])
else: # ising model mpo
assert (type == "i")
W = np.array([[I, sz, h * sx],
[Z, Z, sz],
[Z, Z, I]])
W0 = np.array([[I, sz, h * sx]])
Wn = np.array([[h * sx], [sz], [I]])
# create H^2 terms
[W02, W2, Wn2] = contract_mpo([W0, W, Wn], [W0, W, Wn])
if issparse: # convert to sparse
W = SparseOperator(W)
W0 = SparseOperator(W0)
Wn = SparseOperator(Wn)
MPO = [W0] + ([W] * (N - 2)) + [Wn]
MPO2 = [W02] + ([W2] * (N - 2)) + [Wn2]
return MPO, MPO2
d = 2 # visible index dimension
N_list = [10, 20, 40, 80] # number of sites
m_list = [2 ** i for i in range(7, 8)] # truncation size / bond dimensionality
# N_list = [10]
# m_list = [20, 50]
model = "h" # model type, h heis, i ising
num_sweeps = 6 # full sweeps
reps = 5 # repetitions
vb = 2 # verbosity
use_sparse = False
t = []
E = []
Var = []
E_sweeps = []
E_steps = []
t_sweeps = []
# run for all configurations
for N in N_list:
MPO, MPO2 = construct_MPO(N, type=model, issparse=use_sparse)
E_steps2 = []
for m in m_list:
for r in range(reps):
MPS = construct_init_state(d, N)
t0 = tm()
MPS, t_s, E_j, E_s = two_site_DMRG(MPS, MPO, m, num_sweeps, verbose=vb)
t1 = tm()
E1 = np.real(expectation(MPS, MPO))
E2 = np.real(expectation(MPS, MPO2))
E += [E1]
Var += [E2 - E1 * E1]
t += [t1 - t0]
E_sweeps += [E_s]
E_steps += [E_j]
t_sweeps += [t_s]
E_steps2 += [E_j]
print("N", N, "m", m, "rep", r, "time:", t1 - t0, "energy:", E1, "var", Var[-1])
E = np.array(E).reshape(len(N_list), len(m_list), reps)
Var = np.array(Var).reshape(len(N_list), len(m_list), reps)
t = np.array(t).reshape(len(N_list), len(m_list), reps)
E_steps2 = np.array(E_steps2).reshape(len(m_list), reps, -1)
import csv
# print the outputs of each trial
file = open(model + "out.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["N", "m", "reps", "E", "var", "t", "dt"])
for i in range(len(N_list)):
for j in range(len(m_list)):
f.writerow([N_list[i], m_list[j], reps, E[i, j, 0], Var[i, j, 0], t[i, j, :].mean(), t[i, j, :].std()])
file.close()
# print all times for each rpeetition for more detailed analysis later
file = open(model + "tout.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["N", "m", "rep", "t"])
for i in range(len(N_list)):
for j in range(len(m_list)):
for r in range(reps):
f.writerow([N_list[i], m_list[j], r, *t_sweeps[len(m_list) * reps * i + reps * j + r]])
file.close()
# print the energy found for each iteration
file = open(model + "Eout.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["m", "E"])
for j in range(len(m_list)):
for i in range((N_list[-1] - 2) * 2 * num_sweeps):
f.writerow([m_list[j], E_steps2[j, 0, i]])
file.close()
| [
"numpy.identity",
"numpy.reshape",
"csv.writer",
"numpy.linalg.svd",
"scipy.sparse.linalg.eigsh",
"numpy.array",
"numpy.zeros",
"numpy.moveaxis",
"time.time"
] | [((9257, 9273), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (9267, 9273), False, 'import csv\n'), ((9649, 9665), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (9659, 9665), False, 'import csv\n'), ((10014, 10030), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (10024, 10030), False, 'import csv\n'), ((1302, 1319), 'numpy.array', 'np.array', (['[[[1]]]'], {}), '([[[1]]])\n', (1310, 1319), True, 'import numpy as np\n'), ((3424, 3476), 'scipy.sparse.linalg.eigsh', 'sparse.linalg.eigsh', (['H', '(1)'], {'v0': 'B', 'which': '"""SA"""', 'tol': 'tol'}), "(H, 1, v0=B, which='SA', tol=tol)\n", (3443, 3476), True, 'import scipy.sparse as sparse\n'), ((3593, 3613), 'numpy.moveaxis', 'np.moveaxis', (['V', '(1)', '(2)'], {}), '(V, 1, 2)\n', (3604, 3613), True, 'import numpy as np\n'), ((3726, 3742), 'numpy.linalg.svd', 'np.linalg.svd', (['V'], {}), '(V)\n', (3739, 3742), True, 'import numpy as np\n'), ((4380, 4400), 'numpy.moveaxis', 'np.moveaxis', (['v', '(0)', '(1)'], {}), '(v, 0, 1)\n', (4391, 4400), True, 'import numpy as np\n'), ((6258, 6277), 'numpy.zeros', 'np.zeros', (['(d, 1, 1)'], {}), '((d, 1, 1))\n', (6266, 6277), True, 'import numpy as np\n'), ((6311, 6330), 'numpy.zeros', 'np.zeros', (['(d, 1, 1)'], {}), '((d, 1, 1))\n', (6319, 6330), True, 'import numpy as np\n'), ((6519, 6533), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (6530, 6533), True, 'import numpy as np\n'), ((6543, 6559), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (6551, 6559), True, 'import numpy as np\n'), ((6570, 6601), 'numpy.array', 'np.array', (['[[0.5, 0], [0, -0.5]]'], {}), '([[0.5, 0], [0, -0.5]])\n', (6578, 6601), True, 'import numpy as np\n'), ((6612, 6638), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (6620, 6638), True, 'import numpy as np\n'), ((6649, 6675), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (6657, 6675), True, 'import numpy as np\n'), ((6686, 6712), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (6694, 6712), True, 'import numpy as np\n'), ((6723, 6756), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (6731, 6756), True, 'import numpy as np\n'), ((2896, 2913), 'numpy.reshape', 'np.reshape', (['L', '(-1)'], {}), '(L, -1)\n', (2906, 2913), True, 'import numpy as np\n'), ((4551, 4570), 'numpy.array', 'np.array', (['[[[1.0]]]'], {}), '([[[1.0]]])\n', (4559, 4570), True, 'import numpy as np\n'), ((4726, 4745), 'numpy.array', 'np.array', (['[[[1.0]]]'], {}), '([[[1.0]]])\n', (4734, 4745), True, 'import numpy as np\n'), ((4873, 4877), 'time.time', 'tm', ([], {}), '()\n', (4875, 4877), True, 'from time import time as tm\n'), ((5963, 5967), 'time.time', 'tm', ([], {}), '()\n', (5965, 5967), True, 'from time import time as tm\n'), ((6811, 6929), 'numpy.array', 'np.array', (['[[I, Sz, 0.5 * Sp, 0.5 * Sm, Z], [Z, Z, Z, Z, Sz], [Z, Z, Z, Z, Sm], [Z, Z,\n Z, Z, Sp], [Z, Z, Z, Z, I]]'], {}), '([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z], [Z, Z, Z, Z, Sz], [Z, Z, Z, Z, Sm\n ], [Z, Z, Z, Z, Sp], [Z, Z, Z, Z, I]])\n', (6819, 6929), True, 'import numpy as np\n'), ((7033, 7075), 'numpy.array', 'np.array', (['[[I, Sz, 0.5 * Sp, 0.5 * Sm, Z]]'], {}), '([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z]])\n', (7041, 7075), True, 'import numpy as np\n'), ((7090, 7128), 'numpy.array', 'np.array', (['[[Z], [Sz], [Sm], [Sp], [I]]'], {}), '([[Z], [Sz], [Sm], [Sp], [I]])\n', (7098, 7128), True, 'import numpy as np\n'), ((7204, 7254), 'numpy.array', 'np.array', (['[[I, sz, h * sx], [Z, Z, sz], [Z, Z, I]]'], {}), '([[I, sz, h * sx], [Z, Z, sz], [Z, Z, I]])\n', (7212, 7254), True, 'import numpy as np\n'), ((7317, 7344), 'numpy.array', 'np.array', (['[[I, sz, h * sx]]'], {}), '([[I, sz, h * sx]])\n', (7325, 7344), True, 'import numpy as np\n'), ((7359, 7390), 'numpy.array', 'np.array', (['[[h * sx], [sz], [I]]'], {}), '([[h * sx], [sz], [I]])\n', (7367, 7390), True, 'import numpy as np\n'), ((8922, 8933), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (8930, 8933), True, 'import numpy as np\n'), ((8981, 8994), 'numpy.array', 'np.array', (['Var'], {}), '(Var)\n', (8989, 8994), True, 'import numpy as np\n'), ((9040, 9051), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (9048, 9051), True, 'import numpy as np\n'), ((9104, 9122), 'numpy.array', 'np.array', (['E_steps2'], {}), '(E_steps2)\n', (9112, 9122), True, 'import numpy as np\n'), ((2500, 2529), 'numpy.reshape', 'np.reshape', (['B', 'self.req_shape'], {}), '(B, self.req_shape)\n', (2510, 2529), True, 'import numpy as np\n'), ((8396, 8400), 'time.time', 'tm', ([], {}), '()\n', (8398, 8400), True, 'from time import time as tm\n'), ((8504, 8508), 'time.time', 'tm', ([], {}), '()\n', (8506, 8508), True, 'from time import time as tm\n')] |
import string
from faker import Faker
import numpy as np
import string
class Factory:
def __init__(self, size: int):
"""
The Data Mocker object
"""
self._size = size
self._fake = Faker()
self._ranges = None
def build_column(self, column_description: str):
"""
Understand the data_type and build the column
Args:
column_description(str): the column data type and modifiers
TODO:
- Support complex column data types
Returns:
list
"""
column_description_parts = column_description.strip().split(' ')
# @todo check the number of parts must be >= 1
if len(column_description_parts) < 1:
raise KeyError('Column description must contain a data_type')
column_data_type = column_description_parts.pop(0)
self.__set_modifier_from_column_description_parts(column_description_parts)
return self.__get_data_type_builder_method(column_data_type)()
def __set_modifier_from_column_description_parts(self, column_description_parts: list):
"""
Setting Modifiers from the column description parts
User can pass multiple modifiers in the same line
Args:
column_description_parts:
Returns:
None
"""
for part in column_description_parts:
""" Build ranges"""
if part.startswith('between'):
self._ranges = part.strip().split(',')
self._ranges.remove('between')
def __get_data_type_builder_method(self, data_type):
"""
A dictionary lockup function to identify and execute the data factory function
Args:
data_type(str|list): the mock type function name
Returns:
function object
"""
all_data_generators_dict = {
# D
"decrement": self.__decrement,
# E
"email": self.__email,
# F
"first_name": self.__first_name,
# I
"int": self.__int,
# L
"last_name": self.__last_name,
# R
"random_int": self.__random_int,
"random_float": self.__random_float,
# S
"password": self.__password,
}
if data_type in all_data_generators_dict.keys():
return all_data_generators_dict.get(data_type)
return all_data_generators_dict.get('int')
# D
def __decrement(self):
"""
Generate a list of decremented integers between the requested size and 1
Returns:
list
"""
return np.arange(start=self._size, stop=0, step=-1)
# E
def __email(self):
"""
Generate a list of fake emails
Returns:
list
"""
return [self._fake.email() for i in range(self._size)]
# F
def __first_name(self):
"""
Generate a list of first names
Returns:
list
"""
return [self._fake.first_name() for i in range(self._size)]
# I
def __int(self):
"""
Generate a list of integers between 1 and requested size
Returns:
np.array
"""
return np.arange(start=1, stop=self._size + 1, step=1)
# L
def __last_name(self):
"""
Generate a list of last names
Returns:
list
"""
return [self._fake.last_name() for i in range(self._size)]
# R
def __random_int(self):
"""
Generate a list of random integers between two numbers
Returns:
np array
"""
rand_min, rand_max = 0, 100
if self._ranges:
rand_min, rand_max = min(self._ranges), max(self._ranges)
return np.random.randint(rand_min, rand_max, size=self._size)
def __random_float(self):
"""
Generate a list of random floats between two numbers
Returns:
np array
"""
rand_min, rand_max = 0, 1
if self._ranges:
rand_min, rand_max = float(min(self._ranges)), float(max(self._ranges))
return np.random.uniform(rand_min, rand_max, size=self._size).round(4)
# P
def __password(self):
"""
Generate a password string
Returns:
str: the return value is a string
"""
password_max_length = 12
if self._ranges:
password_max_length = max(self._ranges)
return [self._fake.password(length=password_max_length) for i in range(self._size)]
| [
"faker.Faker",
"numpy.random.randint",
"numpy.arange",
"numpy.random.uniform"
] | [((227, 234), 'faker.Faker', 'Faker', ([], {}), '()\n', (232, 234), False, 'from faker import Faker\n'), ((2730, 2774), 'numpy.arange', 'np.arange', ([], {'start': 'self._size', 'stop': '(0)', 'step': '(-1)'}), '(start=self._size, stop=0, step=-1)\n', (2739, 2774), True, 'import numpy as np\n'), ((3342, 3389), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(self._size + 1)', 'step': '(1)'}), '(start=1, stop=self._size + 1, step=1)\n', (3351, 3389), True, 'import numpy as np\n'), ((3899, 3953), 'numpy.random.randint', 'np.random.randint', (['rand_min', 'rand_max'], {'size': 'self._size'}), '(rand_min, rand_max, size=self._size)\n', (3916, 3953), True, 'import numpy as np\n'), ((4268, 4322), 'numpy.random.uniform', 'np.random.uniform', (['rand_min', 'rand_max'], {'size': 'self._size'}), '(rand_min, rand_max, size=self._size)\n', (4285, 4322), True, 'import numpy as np\n')] |
"""
Functions for generating and printing information
or any other functions that may be generally useful
"""
import numpy as np
def gen_test_numbers(
len_lim: int = 10,
dummy: bool = False,
default_range: tuple = (-99, 99),
):
"""Generate a list of numbers with length N
Parameters
----------
len_lim : int
Number of elements in the list
dummy : bool
Return the list given in description example
default_range : tuple
Tuple describing the range of numbers to consider.
Increasing this will increase execution times for functions.
Returns
-------
list
List of integers
"""
if dummy:
return [-5, -2, -5, 0, 5, 5, 2]
return list(
np.random.randint(
*default_range, # min and max numbers
size=len_lim, # size upper bound
))
def print_results(
input_nums: list,
result_tuples: list,
):
"""Function to ensure output format
Parameters
----------
input_nums: list
List of integers evaluated
result_tuples : list
List of tuples of triplets
"""
# remove spaces
preped_input = str(input_nums).replace(" ", "")
print(f"\narray = {preped_input}\n")
print(f"Output should be -> ", end="")
# iter results and format
for i, (a, b, c) in enumerate(result_tuples):
print(f"[{a},{b},{c}]", end="")
if i != len(result_tuples) - 1:
print(", ", end="")
print() # got to next line after printing
# print total
print(f"Total -> {len(result_tuples)} tuples") | [
"numpy.random.randint"
] | [((762, 809), 'numpy.random.randint', 'np.random.randint', (['*default_range'], {'size': 'len_lim'}), '(*default_range, size=len_lim)\n', (779, 809), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.