repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/trajectory_analysis.py
|
import h5py
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
from analysis_subroutines.analysis_scripts.trajectory_plot import *
class trajectory:
def __init__(self, working_dir, prefix, soft_assignments, framerate, filenames, new_data, new_predictions):
st.subheader('LIMB TRAJECTORIES (PAPER **FIGURE 2D/G**)')
self.working_dir = working_dir
self.prefix = prefix
self.soft_assignments = soft_assignments
self.framerate = framerate
self.filenames = filenames
self.new_data = new_data
self.new_predictions = new_predictions
self.animal_index = int(st.number_input('Which session? '
'You have a total of {} sessions'.format(len(self.new_predictions)),
min_value=1, max_value=len(self.new_predictions), value=1)) - 1
self.c = []
self.pose_chosen = []
file_type = [s for i, s in enumerate(['csv', 'h5', 'json']) if s in self.filenames[0].partition('.')[-1]][0]
if file_type == 'csv':
file0_df = pd.read_csv(self.filenames[0], low_memory=False)
file0_array = np.array(file0_df, dtype=object)
p = st.multiselect('Select body parts for trajectory:', [*file0_array[0, 1:-1:3]],
[*file0_array[0, 1:-1:3]])
for b in p:
index = [i for i, s in enumerate(file0_array[0, 1:-1:3]) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
elif file_type == 'h5':
try:
file0_df = pd.read_hdf(self.filenames[0], low_memory=False)
p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])],
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])])
for b in p:
index = [i for i, s in enumerate(np.array(file0_df.columns.get_level_values(1))) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
except:
st.info('Detecting SLEAP h5 files...')
file0_df = h5py.File(self.filenames[0], 'r')
p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df['node_names'][:])],
[*np.array(file0_df['node_names'][:])])
for b in p:
index = [i for i, s in enumerate(np.array(file0_df['node_names'][:])) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
start_min = int(st.number_input('From minute:',
min_value=0,
max_value=int((len(self.new_data[self.animal_index])) /
(60 * self.framerate)), value=0))
start_sec = st.number_input('and second:', min_value=0.0, max_value=59.9, value=1.0)
stop_min = int(st.number_input('till minute:',
min_value=0,
max_value=int((len(self.new_data[self.animal_index])) /
(60 * self.framerate)), value=0))
stop_sec = st.number_input('till second:', min_value=0.0, max_value=59.9, value=11.0)
start = int((start_min * 60 + start_sec) * self.framerate) - 1
stop = int((stop_min * 60 + stop_sec) * self.framerate)
self.time_range = [start, stop]
order_top = st.multiselect('Which should be grouped to the top?', p, p)
self.order1 = []
for o in order_top:
index = [i for i, s in enumerate(p) if o in s]
if not index in self.order1:
self.order1 += index
order_bottom = st.multiselect('Which should be grouped to the bottom?', p, p)
self.order2 = []
for o in order_bottom:
index = [i for i, s in enumerate(p) if o in s]
if not index in self.order2:
self.order2 += index
color1 = st.selectbox('Choose color for first group', list(mcolors.CSS4_COLORS.keys()), index=16)
color2 = st.selectbox('Choose color for second group', list(mcolors.CSS4_COLORS.keys()), index=22)
self.c = [color1, color2]
def plot(self):
try:
if st.checkbox('Show trajectory', False, key='tp'):
labels, limbs, soft_assigns = limb_trajectory(self.working_dir, self.prefix,
self.animal_index, self.pose_chosen, self.time_range)
fig, ax1, ax2 = plot_trajectory(limbs=limbs, labels=labels, soft_assignments=self.soft_assignments,
t_range=self.time_range, ord1=self.order1,
ord2=self.order2, c=self.c, fig_size=(5, 3), save=False)
fig.suptitle('Trajectory visual')
ax1.set_ylabel('$\Delta$ pixels')
ax2.set_ylabel('$\Delta$ pixels')
ax2.set_xlabel('Frame number')
st.pyplot(fig)
except IndexError:
st.error('Range out of bounds or no transitions detected!')
fig_format = str(st.selectbox('What file type?',
list(plt.gcf().canvas.get_supported_filetypes().keys()), index=5))
outpath = str.join('', (st.text_input('Where would you like to save it?'), '/'))
if st.button('Save in {}?'.format(outpath)):
plot_trajectory(limbs=limbs, labels=labels, soft_assignments=self.soft_assignments,
t_range=self.time_range, ord1=self.order1, ord2=self.order2, c=self.c,
fig_size=(8.5, 16), fig_format=fig_format, outpath=outpath, save=True)
def main(self):
self.plot()
| 6,324
| 54.973451
| 116
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/video_analysis.py
|
import ffmpeg
import streamlit as st
from analysis_subroutines.analysis_utilities.visuals import *
from analysis_subroutines.analysis_scripts.umap_clustering_plot import plot_enhanced_umap
class bsoid_video:
def __init__(self, working_dir, prefix, features, sampled_features,
sampled_embeddings, soft_assignments, framerate, filenames, new_data):
st.subheader('SYNCHRONIZED B-SOID VIDEO (PAPER **SUPP. VIDEO 1**)')
st.markdown('Cluster plot from training, we will see how well purely clustering maps onto behaviors by '
'synchronizing the video next to it')
self.working_dir = working_dir
self.prefix = prefix
self.features = features
self.sampled_features = sampled_features
self.sampled_embeddings = sampled_embeddings
self.soft_assignments = soft_assignments
self.framerate = framerate
self.filenames = filenames
self.new_data = new_data
sim_array = np.arange(0, self.features.shape[1])
np.random.seed(0)
shuffled_idx = np.random.choice(sim_array, self.sampled_features.shape[0], replace=False)
ordered_ind = np.argsort(shuffled_idx)
self.ordered_embeds = self.sampled_embeddings[ordered_ind, :]
self.ordered_assigns = self.soft_assignments[ordered_ind]
fig, ax = plot_enhanced_umap(self.working_dir, self.prefix, fig_size=(5, 3), save=False)
col1, col2 = st.beta_columns([2, 2])
col1.pyplot(fig)
self.vid_path = st.text_input('Enter corresponding video directory (Absolute path):')
try:
os.listdir(self.vid_path)
st.markdown('You have selected **{}** as your video directory.'.format(self.vid_path))
except FileNotFoundError:
st.error('No such directory')
self.vid_name = st.selectbox('Select the video (.mp4 or .avi)', sorted(os.listdir(self.vid_path)))
f_partition = [self.filenames[i].rpartition('/')[-1] for i in range(len(self.filenames))]
file4vid = st.selectbox('Which file corresponds to the video?',
f_partition, index=0)
f_index = f_partition.index(file4vid)
probe = ffmpeg.probe(os.path.join(self.vid_path, self.vid_name))
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
self.width = int(video_info['width'])
self.height = int(video_info['height'])
self.mov_st_min = int(st.number_input('From minute:',
min_value=0,
max_value=int((len(self.new_data[f_index])) / (60 * self.framerate)),
value=0))
self.mov_st_sec = st.number_input('and second:', min_value=0.0, max_value=59.9, value=1.0)
self.mov_sp_min = int(st.number_input('till minute:',
min_value=0,
max_value=int((len(self.new_data[f_index])) / (60 * self.framerate)),
value=0))
self.mov_sp_sec = st.number_input('till second:', min_value=0.0, max_value=59.9, value=10.0)
self.mov_start = np.sum([len(self.new_data[j]) for j in np.arange(0, f_index)]) + \
int((self.mov_st_min * 60 + self.mov_st_sec) * self.framerate) - 1
self.mov_stop = np.sum([len(self.new_data[j]) for j in np.arange(0, f_index)]) + \
int((self.mov_sp_min * 60 + self.mov_sp_sec) * self.framerate)
self.mov_range = [round(self.mov_start / (self.framerate / 10)), round(self.mov_stop / (self.framerate / 10))]
try:
os.mkdir(str.join('', (self.working_dir, '/bsoid_videos')))
except FileExistsError:
pass
try:
os.mkdir(str.join('', (self.working_dir, '/bsoid_videos/session{}'.format(f_index))))
except FileExistsError:
pass
self.working_dir = str.join('', (self.working_dir, '/bsoid_videos/session{}'.format(f_index)))
def generate(self):
if st.button('Generate synchronized B-SOiD video?'):
try:
umap_scatter(self.ordered_embeds, self.ordered_assigns, self.mov_range,
self.working_dir, self.width, self.height)
trim_video(self.vid_path, self.vid_name, self.mov_range,
self.mov_st_min, self.mov_st_sec, self.mov_sp_min, self.mov_sp_sec, self.working_dir)
video_umap(self.working_dir, self.mov_range)
except IndexError:
st.error('Range out of bounds!')
if st.checkbox('Show left-right synchronized B-SOiD video (from {})?'.format(self.working_dir)):
bsoid_vid_leftright = \
open(os.path.join(str.join('', (self.working_dir,
'/sync_leftright_video2umap{}_{}.mp4'.format(*self.mov_range)))), 'rb')
st.markdown('You have selected to view synchronized video from {}.'.format(self.working_dir))
bsoid_leftright_bytes = bsoid_vid_leftright.read()
st.video(bsoid_leftright_bytes)
st.markdown('After we visualize how well clusters map onto behaviors, we can utilize '
'a machine learning classifier to make more generalized prediction. '
'We can look at how well the machine learns the mapping in the **k-fold accuracy** module.')
def main(self):
self.generate()
| 5,592
| 56.659794
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/directed_graph_analysis.py
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import streamlit as st
from analysis_subroutines.analysis_utilities import statistics
class directed_graph:
def __init__(self, working_dir, prefix, soft_assignments, folders, folder, new_predictions):
st.subheader('(ALPHA) BEHAVIORAL DIRECTED GRAPH')
self.working_dir = working_dir
self.prefix = prefix
self.soft_assignments = soft_assignments
self.folders = folders
self.folder = folder
self.new_predictions = new_predictions
self.node_sizes = []
self.transition_matrix_norm = []
def compute_dynamics(self):
selected_flder = st.sidebar.selectbox('select folder', [*self.folders])
try:
indices = [i for i, s in enumerate(self.folder) if str(selected_flder) in s]
tm_count_all = []
tm_prob_all = []
for idx in indices:
runlen_df, dur_stats, tm_array, tm_df, tm_norm = statistics.main(self.new_predictions[idx],
len(np.unique(self.soft_assignments)))
tm_count_all.append(tm_array)
tm_prob_all.append(tm_norm)
tm_count_mean = np.nanmean(tm_count_all, axis=0)
tm_prob_mean = np.nanmean(tm_prob_all, axis=0)
diag = [tm_count_mean[i][i] for i in range(len(tm_count_mean))]
diag_p = np.array(diag) / np.array(diag).max()
self.node_sizes = [50 * i for i in diag_p]
transition_matrix = np.matrix(tm_prob_mean)
np.fill_diagonal(transition_matrix, 0)
self.transition_matrix_norm = transition_matrix / transition_matrix.sum(axis=1)
nan_indices = np.isnan(self.transition_matrix_norm)
self.transition_matrix_norm[nan_indices] = 0
except:
pass
def plot(self):
if st.checkbox('Show directed graph?', False, key='ds'):
fig = plt.figure()
graph = nx.from_numpy_matrix(self.transition_matrix_norm, create_using=nx.MultiDiGraph())
node_position = nx.layout.spring_layout(graph, seed=0)
edge_colors = [graph[u][v][0].get('weight') for u, v in graph.edges()]
nodes = nx.draw_networkx_nodes(graph, node_position, node_size=self.node_sizes,
node_color='blue')
edges = nx.draw_networkx_edges(graph, node_position, node_size=self.node_sizes, arrowstyle='->',
arrowsize=8, edge_color=edge_colors, edge_cmap=plt.cm.Blues, width=1.5)
label_pos = [node_position[i] + 0.005 for i in range(len(node_position))]
nx.draw_networkx_labels(graph, label_pos, font_size=10)
pc = mpl.collections.PatchCollection(edges, cmap=plt.cm.Blues)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
st.pyplot(fig)
def main(self):
self.compute_dynamics()
self.plot()
| 3,146
| 44.608696
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/kinematics_analysis.py
|
import h5py
import matplotlib.colors as mcolors
import pandas as pd
import streamlit as st
from analysis_subroutines.analysis_scripts.extract_kinematics import *
from analysis_subroutines.analysis_utilities.visuals import *
from analysis_subroutines.analysis_utilities.save_data import results
from analysis_subroutines.analysis_utilities.load_data import load_sav
class kinematics:
def __init__(self, working_dir, prefix, framerate, soft_assignments, filenames):
st.subheader('(BETA) KINEMATICS (PAPER **FIGURE 6B/D**)')
self.working_dir = working_dir
self.prefix = prefix
self.framerate = framerate
self.soft_assignments = soft_assignments
self.filenames = filenames
self.group_num = int(st.number_input('Which behavioral group? You have a total of {} groups '
'starting from 0 to {}'.format(len(np.unique(self.soft_assignments)),
len(np.unique(self.soft_assignments)) - 1),
min_value=0, max_value=len(np.unique(self.soft_assignments)), value=0))
self.c = []
self.bps_exp1_bout_disp = []
self.bps_exp2_bout_disp = []
self.bps_exp1_bout_peak_speed = []
self.bps_exp2_bout_peak_speed = []
self.bps_exp1_bout_dur = []
self.bps_exp2_bout_dur = []
self.vid_outpath = []
self.pose_chosen = []
file_type = [s for i, s in enumerate(['csv', 'h5', 'json']) if s in self.filenames[0].partition('.')[-1]][0]
if file_type == 'csv':
file0_df = pd.read_csv(self.filenames[0], low_memory=False)
file0_array = np.array(file0_df, dtype=object)
self.p = st.multiselect('Select body parts for kinematics:', [*file0_array[0, 1:-1:3]],
[*file0_array[0, 1:-1:3]])
for b in self.p:
index = [i for i, s in enumerate(file0_array[0, 1:-1:3]) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
elif file_type == 'h5':
try:
file0_df = pd.read_hdf(self.filenames[0], low_memory=False)
self.p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])],
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])])
for b in self.p:
index = [i for i, s in enumerate(np.array(file0_df.columns.get_level_values(1)[:])) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
except:
file0_df = h5py.File(self.filenames[0], 'r')
self.p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df['node_names'][:])],
[*np.array(file0_df['node_names'][:])])
for b in self.p:
index = [i for i, s in enumerate(np.array(file0_df['node_names'][:])) if b in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
fname_partition = [self.filenames[i].rpartition('/')[-1] for i in range(len(self.filenames))]
order_ctrl = st.multiselect('Which sessions to be grouped together as CONTROL?',
fname_partition, fname_partition)
self.control = []
for o in order_ctrl:
index = [i for i, s in enumerate(self.filenames) if o in s]
if not index in self.control:
self.control += index
color1 = st.selectbox('Choose color for CONTROL', list(mcolors.CSS4_COLORS.keys()), index=41)
order_expt = st.multiselect('Which sessions to be grouped together as EXPERIMENTAL?',
fname_partition, fname_partition)
self.experimental = []
for o in order_expt:
index = [i for i, s in enumerate(filenames) if o in s]
if not index in self.experimental:
self.experimental += index
self.conditions = [self.control, self.experimental]
color2 = st.selectbox('Choose color for EXPERIMENTAL', list(mcolors.CSS4_COLORS.keys()), index=120)
self.c = [color1, color2]
self.variable_name = st.text_input('Give it a variable name to save:')
def find_peaks(self):
if st.button('Start analyzing kinematics?'):
[_, _, all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, self.vid_outpath] = \
get_kinematics(self.working_dir, self.prefix, self.conditions, self.group_num,
self.pose_chosen, self.framerate)
[self.bps_exp1_bout_disp, self.bps_exp2_bout_disp, self.bps_exp1_bout_peak_speed,
self.bps_exp2_bout_peak_speed, self.bps_exp1_bout_dur, self.bps_exp2_bout_dur] = \
group_kinematics(all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, self.conditions)
results_ = results(self.working_dir, self.prefix)
results_.save_sav(
[self.bps_exp1_bout_disp, self.bps_exp2_bout_disp, self.bps_exp1_bout_peak_speed,
self.bps_exp2_bout_peak_speed, self.bps_exp1_bout_dur, self.bps_exp2_bout_dur, self.p,
self.pose_chosen,
self.conditions, self.vid_outpath], self.variable_name)
st.info('Done analyzing kinematics. Click "R" for plots.')
def plot(self, save, out_path, fig_format):
for pose in range(len(self.p)):
f, ax = plt.subplots(1, 3)
f.suptitle('Kinematics CDF for {}'.format(self.variable_name))
try:
fig1, ax1 = plot_kinematics_cdf(ax.flatten()[0], 'distance', self.variable_name,
[self.bps_exp1_bout_disp[pose], self.bps_exp2_bout_disp[pose]],
self.c, 50, 3, 1, fig_size=(5, 3), save=False)
ax1.title.set_text('Distance')
ax1.set_xlabel('Dist. ($\Delta$ pix)'.format(self.p[pose]))
ax1.set_ylabel('Cumulative probability')
except:
pass
try:
fig2, ax2 = plot_kinematics_cdf(ax.flatten()[1], 'speed', self.variable_name,
[self.bps_exp1_bout_peak_speed[pose],
self.bps_exp2_bout_peak_speed[pose]],
self.c, 50, 3, 1, fig_size=(5, 3), save=False)
ax2.title.set_text('Peak speed')
ax2.set_xlabel('{} speed (pix/frm)'.format(self.p[pose]))
ax2.yaxis.set_ticklabels([])
except:
pass
try:
fig3, ax3 = plot_kinematics_cdf(ax.flatten()[2], 'duration', self.variable_name,
[self.bps_exp1_bout_dur[pose] / self.framerate * 1000,
self.bps_exp2_bout_dur[pose] / self.framerate * 1000],
self.c, 50, 3, 1, fig_size=(5, 3), save=False)
ax3.title.set_text('Bout duration')
ax3.set_xlabel('Duration (ms)'.format(self.p[pose]))
ax3.yaxis.set_ticklabels([])
except:
pass
st.pyplot(f)
if save:
try:
f.savefig(str.join('', (out_path, '/{}_kin_{}_cdf.'.format(self.p[pose], self.variable_name), '.',
fig_format)), dpi=300, format=fig_format, transparent=False)
except RuntimeError:
st.error('Could not save in this format, find another one (jpeg/png/svg)?')
def main(self):
try:
[self.bps_exp1_bout_disp, self.bps_exp2_bout_disp, self.bps_exp1_bout_peak_speed,
self.bps_exp2_bout_peak_speed, self.bps_exp1_bout_dur, self.bps_exp2_bout_dur, self.p, self.pose_chosen,
self.conditions, self.vid_outpath] = load_sav(self.working_dir, self.prefix, self.variable_name)
st.markdown('**Peak speed** computed with ***instantaneous values of peaks***. '
'**Distance** computed using the ***area under the curve from start to end '
'(colored)*** of peak. '
'**Bout duration** computed using ***number of consecutive frames*** in B-SOID defined bouts. '
'The pose trajectory algorithm performance can be visualized above '
'(checkbox, 50% random samples).')
if st.checkbox('Redo?', key='r'):
self.find_peaks()
ftype_out = st.selectbox('What file type?',
list(plt.gcf().canvas.get_supported_filetypes().keys()), index=5)
out_path = str.join('', (st.text_input('Where would you like to save it?'), '/'))
save = st.checkbox('Save in {}?'.format(out_path), False, key='sa')
if save:
self.plot(save=True, out_path=out_path, fig_format=ftype_out)
else:
self.plot(save=False, out_path=out_path, fig_format=ftype_out)
if st.checkbox('Show peak finding algorithm performance?', False):
example_vid_file = open(os.path.join(str.join('', (self.vid_outpath,
'/kinematics_subsample_examples.mp4'))), 'rb')
st.markdown('You have selected to view examples from {}.'.format(self.vid_outpath))
video_bytes = example_vid_file.read()
st.video(video_bytes)
except:
self.find_peaks()
| 10,088
| 57.656977
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/machine_performance.py
|
import matplotlib.colors as mcolors
import streamlit as st
from analysis_subroutines.analysis_scripts.kfold_accuracy import *
from analysis_subroutines.analysis_utilities.load_data import load_sav
from analysis_subroutines.analysis_utilities.save_data import results
from analysis_subroutines.analysis_utilities.visuals import *
class performance:
def __init__(self, working_dir, prefix, soft_assignments):
st.subheader('K-FOLD ACCURACY (PAPER **FIGURE 2C**)')
self.working_dir = working_dir
self.prefix = prefix
self.soft_assignments = soft_assignments
self.k = int(st.number_input('How many folds cross-validation?', min_value=2, max_value=20, value=10))
self.var_name = 'accuracy_kf_raw'
self.var_ordered_name = 'accuracy_kf_ordered'
self.order_class = st.multiselect('Order as follows:', list(np.unique(self.soft_assignments)),
list(np.unique(self.soft_assignments)))
self.accuracy_data = []
self.accuracy_ordered = []
def cross_validate(self):
if st.button('Start K-fold cross-validation.', False):
self.accuracy_data = generate_kfold(self.working_dir, self.prefix, self.k)
results_ = results(self.working_dir, self.prefix)
results_.save_sav([self.accuracy_data, self.k], self.var_name)
self.accuracy_ordered = reorganize_group_order(self.accuracy_data, self.order_class)
results_ = results(self.working_dir, self.prefix)
results_.save_sav([self.accuracy_ordered, self.k], self.var_ordered_name)
def load_performance(self):
self.accuracy_ordered, self.k = load_sav(self.working_dir, self.prefix, self.var_ordered_name)
def show_accuracy_plot(self):
c = [list(mcolors.CSS4_COLORS.keys())[122]] * len(np.unique(self.soft_assignments))
fig, ax = plot_accuracy_boxplot(None, self.accuracy_ordered, c, (5, 3), save=False)
fig.suptitle('{}-fold group accuracy'.format(self.k))
ax.set_xlabel('Accuracy')
ax.set_ylabel('Group number')
col1, col2 = st.beta_columns([2, 2])
radio = st.radio(label='Change colors?', options=["Yes", "No"], index=1)
if radio == 'No':
col1.pyplot(fig)
fig_format = str(st.selectbox('What file type?',
list(plt.gcf().canvas.get_supported_filetypes().keys()), index=5))
out_path = str.join('', (st.text_input('Where would you like to save it?'), '/'))
if st.button('Save in {}?'.format(out_path)):
plot_accuracy_boxplot('Randomforests', self.accuracy_ordered, c,
(8.5, 16), fig_format, out_path, save=True)
elif radio == 'Yes':
c = []
for i in range(len(np.unique(self.soft_assignments))):
color = st.selectbox('Choose color for ORDERED GROUP {}'.format(i),
list(mcolors.CSS4_COLORS.keys()), index=122)
c.append(color)
fig2, ax2 = plot_accuracy_boxplot(None, self.accuracy_ordered, c, (5, 3), save=False)
fig2.suptitle('{}-fold group accuracy'.format(self.k))
ax2.set_xlabel('Accuracy')
ax2.set_ylabel('Group number')
try:
col1.pyplot(fig2)
except ValueError:
st.error('Try another color, this color is not supported :/')
fig_format = str(st.selectbox('What file type?',
list(plt.gcf().canvas.get_supported_filetypes().keys()), index=5))
out_path = str.join('', (st.text_input('Where would you like to save it?'), '/'))
if st.button('Save in {}?'.format(out_path)):
plot_accuracy_boxplot('Randomforests', self.accuracy_ordered, c,
(8.5, 16), fig_format, out_path, save=True)
def main(self):
try:
self.load_performance()
except:
self.cross_validate()
self.load_performance()
self.show_accuracy_plot()
| 4,166
| 50.444444
| 110
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/processing.py
|
import re
from operator import itemgetter
import numpy as np
import pandas as pd
def convert_int(s):
""" Converts digit string to integer
"""
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [convert_int(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def reorganize_group_order(accuracy_data, order):
accuracy_ordered = []
for i in range(len(accuracy_data)):
accuracy_ordered.append(itemgetter(order)(accuracy_data[i]))
return accuracy_ordered
class data_processing:
def __init__(self, data):
self.data = data
def boxcar_center(self, n):
a1 = pd.Series(self.data)
return np.array(a1.rolling(window=n, min_periods=1, center=True).mean())
| 972
| 20.622222
| 80
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/statistics.py
|
import numpy as np
import pandas as pd
def transition_matrix(labels, n):
tm = [[0] * n for _ in range(n)]
for (i, j) in zip(labels, labels[1:]):
tm[i][j] += 1
tm_df = pd.DataFrame(tm)
tm_array = np.array(tm)
tm_norm = tm_array / tm_array.sum(axis=1)
return tm_array, tm_df, tm_norm
def rle(in_array):
ia = np.asarray(in_array)
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return z, p, ia[i]
def feat_dist(feats):
feat_range = []
feat_med = []
p_cts = []
edges = []
for i in range(feats.shape[1]):
feat_range.append([np.quantile(feats[:, i], 0.05), np.quantile(feats[:, i], 0.95)])
feat_med.append(np.quantile(feats[:, i], 0.5))
p_ct, edge = np.histogram(feats[:, i], 50, density=True)
p_cts.append(p_ct)
edges.append(edge)
return feat_range, feat_med, p_cts, edges
def behv_time(labels):
"""
:param labels: 1D array, predicted labels
:return beh_t: 1D array, percent time for each label
"""
beh_t = []
for i in range(0, len(np.unique(labels))):
t = np.sum(labels == i) / labels.shape[0]
beh_t.append(t)
return beh_t
def behv_dur(labels):
"""
:param labels: 1D array, predicted labels
:return runlen_df: object, behavioral duration run lengths data frame
:return dur_stats: object, behavioral duration statistics data frame
"""
lengths, pos, grp = rle(labels)
df_lengths = pd.DataFrame(lengths, columns={'Run lengths'})
df_grp = pd.DataFrame(grp, columns={'B-SOiD labels'})
df_pos = pd.DataFrame(pos, columns={'Start time (frames)'})
runlengths = [df_grp, df_pos, df_lengths]
runlen_df = pd.concat(runlengths, axis=1)
beh_t = behv_time(labels)
dur_means = []
dur_quant0 = []
dur_quant1 = []
dur_quant2 = []
dur_quant3 = []
dur_quant4 = []
for i in range(0, len(np.unique(grp))):
try:
dur_means.append(np.mean(lengths[np.where(grp == i)]))
dur_quant0.append(np.quantile(lengths[np.where(grp == i)], 0.1))
dur_quant1.append(np.quantile(lengths[np.where(grp == i)], 0.25))
dur_quant2.append(np.quantile(lengths[np.where(grp == i)], 0.5))
dur_quant3.append(np.quantile(lengths[np.where(grp == i)], 0.75))
dur_quant4.append(np.quantile(lengths[np.where(grp == i)], 0.9))
except:
dur_quant0.append(0)
dur_quant1.append(0)
dur_quant2.append(0)
dur_quant3.append(0)
dur_quant4.append(0)
alldata = np.concatenate([np.array(beh_t).reshape(len(np.array(beh_t)), 1),
np.array(dur_means).reshape(len(np.array(dur_means)), 1),
np.array(dur_quant0).reshape(len(np.array(dur_quant0)), 1),
np.array(dur_quant1).reshape(len(np.array(dur_quant1)), 1),
np.array(dur_quant2).reshape(len(np.array(dur_quant2)), 1),
np.array(dur_quant3).reshape(len(np.array(dur_quant3)), 1),
np.array(dur_quant4).reshape(len(np.array(dur_quant4)), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('Stats', 'Percent of time'),
('', 'Mean duration (frames)'), ('', '10th %tile (frames)'),
('', '25th %tile (frames)'), ('', '50th %tile (frames)'),
('', '75th %tile (frames)'), ('', '90th %tile (frames)')],
names=['', 'B-SOiD labels'])
dur_stats = pd.DataFrame(alldata, columns=micolumns)
return runlen_df, dur_stats
def repeating_numbers(labels):
"""
:param labels: 1D array, predicted labels
:return n_list: 1D array, the label number
:return idx: 1D array, label start index
:return lengths: 1D array, how long each bout lasted for
"""
i = 0
n_list = []
idx = []
lengths = []
while i < len(labels) - 1:
n = labels[i]
n_list.append(n)
startIndex = i
idx.append(i)
while i < len(labels) - 1 and labels[i] == labels[i + 1]:
i = i + 1
endIndex = i
length = endIndex - startIndex
lengths.append(length)
i = i + 1
return n_list, idx, lengths
def main(labels, n):
"""
:param labels: 1D array: predicted labels
:param output_path: string, output directory
:return dur_stats: object, behavioral duration statistics data frame
:return tm: object, transition matrix data frame
"""
runlen_df, dur_stats = behv_dur(labels)
tm_array, tm_df, tm_norm = transition_matrix(labels, n)
return runlen_df, dur_stats, tm_array, tm_df, tm_norm
| 5,098
| 36.218978
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/visuals.py
|
import colorsys
import glob
import os
import subprocess
import matplotlib as mpl
import matplotlib.colors as mc
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from matplotlib.axes._axes import _log as matplotlib_axes_logger
from matplotlib.pyplot import figure
from analysis_subroutines.analysis_utilities.processing import data_processing
matplotlib_axes_logger.setLevel('ERROR')
def discrete_cmap(n, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, n))
cmap_name = base.name + str(n)
return base.from_list(cmap_name, color_list, n)
def lighten_color(color, amount=0):
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def plot_accuracy_boxplot(algo, data, c, fig_size, fig_format='png', outpath=os.getcwd(), save=True):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
sns.set_palette(sns.color_palette(c))
sns.boxplot(data=np.array(data), orient='h', width=0.7, ax=ax)
for i, artist in enumerate(ax.artists):
col = lighten_color(artist.get_facecolor(), 1.4)
artist.set_edgecolor('k')
for j in range(i * 6, i * 6 + 6):
line = ax.lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
line.set_linewidth(3)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.set_yticks(range(0, len(c), 3))
ax.set_xticks(np.arange(np.percentile(np.concatenate(data), 5),
np.percentile(np.concatenate(data), 95), 0.1))
if save:
ax.spines['left'].set_linewidth(3)
ax.spines['bottom'].set_linewidth(3)
ax.set_yticks(range(0, len(c), 3))
ax.set_xticks(np.arange(np.percentile(np.concatenate(data), 5),
np.percentile(np.concatenate(data), 95), 0.1))
ax.tick_params(length=9, width=3)
ax.tick_params(length=9, width=3)
ax.tick_params(labelsize=24)
ax.tick_params(labelsize=24)
plt.savefig(str.join('', (outpath, algo, '_Kfold_accuracy.', fig_format)), format=fig_format, transparent=False)
else:
return fig, ax
def plot_coherence_boxplot(algo, data, c, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
sns.boxplot(data=np.array(data), orient='h', width=0.7, medianprops={'color': 'white'}, color=c, ax=ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(3)
ax.tick_params(length=24, width=3)
ax.set_xlim(0.7, 1)
ax.set_xticks(np.arange(0.70, 1.01, 0.1))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.savefig(str.join('', (outpath, algo, '_frameshift_coherence.', fig_format)),
format=fig_format, transparent=True)
def plot_peaks(x, ax, ind):
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(16, 8.5))
ax.plot(x, 'k', lw=1)
hfont = {'fontname': 'Helvetica'}
if ind.size:
label = 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=4, ms=16,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1, prop={'family': 'Helvetica', 'size': 24})
ax.set_xlim(-.02 * x.size, x.size * 1.02 - 1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1 * yrange, ymax + 0.1 * yrange)
ax.set_xlabel('Bout duration (frames)', fontsize=24, **hfont)
ax.set_ylabel('Pose estimate $\Delta$ (pixels)', fontsize=24, **hfont)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.tick_params(labelsize=20)
ticks_font = mpl.font_manager.FontProperties(family='Helvetica', size=20)
for l in ax.get_xticklabels():
l.set_fontproperties(ticks_font)
def plot_kinematics_cdf(ax, var, vname, data, c, bnct, tk, leg,
fig_size, fig_format='png', outpath=os.getcwd(), save=None):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
if save:
ax = plt.axes()
values1, base = np.histogram(data[0], bins=np.linspace(np.percentile(np.concatenate(data), 1),
np.percentile(np.concatenate(data), 99), num=bnct),
weights=np.ones(len(data[0])) / len(data[0]), density=False)
values2, base = np.histogram(data[1], bins=np.linspace(np.percentile(np.concatenate(data), 1),
np.percentile(np.concatenate(data), 99), num=bnct),
weights=np.ones(len(data[1])) / len(data[1]), density=False)
values1 = np.append(values1, 0)
values2 = np.append(values2, 0)
if save:
lwidth = 8
lg_size = 8
else:
lwidth = 2
lg_size = 1
ax.plot(base, np.cumsum(values1) / np.cumsum(values1)[-1],
color=c[0], marker='None', linestyle='-',
label="Ctrl.", linewidth=lwidth)
ax.plot(base, np.cumsum(values2) / np.cumsum(values2)[-1],
color=c[1], marker='None', linestyle='-',
label="Exp.", linewidth=lwidth)
ax.set_xlim(np.percentile(np.concatenate(data), 2), np.percentile(np.concatenate(data), 98))
ax.set_ylim(0, 1)
if leg:
lgnd = ax.legend(loc=4, prop={'family': 'Helvetica', 'size': 12})
lgnd.legendHandles[0]._legmarker.set_markersize(lg_size)
lgnd.legendHandles[1]._legmarker.set_markersize(lg_size)
ax.set_xticks(np.arange(int(np.percentile(np.concatenate(data), 2)),
int(np.percentile(np.concatenate(data), 98)) + 0.1,
int((np.percentile(np.concatenate(data), 98) -
np.percentile(np.concatenate(data), 2)) / tk)))
ax.set_yticks(np.arange(0, 1.1, 0.2))
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
if save:
ax.spines['top'].set_linewidth(5)
ax.spines['right'].set_linewidth(5)
ax.spines['bottom'].set_linewidth(5)
ax.spines['left'].set_linewidth(5)
ax.tick_params(length=15, width=5)
ax.tick_params(labelsize=24)
plt.savefig(str.join('', (outpath, '/{}_{}_cdf.'.format(var, vname), fig_format)),
format=fig_format, transparent=True)
plt.close('all')
else:
return fig, ax
def plot_trajectory(limbs, labels, soft_assignments, t_range, ord1, ord2, c,
fig_size, fig_format='png', outpath=os.getcwd(), save=True):
proc_limb = []
for l in range(len(limbs)):
proc_data = data_processing(limbs[l])
proc_limb.append(proc_data.boxcar_center(5))
transitions = 0
transitions = np.vstack((transitions, (np.argwhere(np.diff(labels) != 0) + 1)))
transitions = np.vstack((transitions, len(labels)))
uk = list(np.unique(soft_assignments))
r = np.linspace(0, 1, len(uk))
cmap = plt.cm.get_cmap("Spectral")(r)
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
plt.subplot()
plt.subplot(211)
ax1 = plt.subplot(2, 1, 1)
if save:
lwidth = 8
else:
lwidth = 3
for o in range(len(ord1)):
if o > 0:
a = 0.3
else:
a = 1
ax1.plot(proc_limb[ord1[o]], linewidth=lwidth, color=c[0], alpha=a)
for t in range(len(transitions) - 1):
for g in np.unique(soft_assignments):
if labels[transitions[t]] == g and o == 0:
ax1.axvspan(transitions[t], transitions[t + 1], color=cmap[g], alpha=0.2, lw=0)
plt.text(transitions[t], np.max([proc_limb[ord1[i]].max() for i in range(len(ord1))]),
'{}'.format(g), fontsize=6)
ax1 = plt.gca()
ax1.get_xaxis().set_visible(False)
ax2 = plt.subplot(2, 1, 2)
for o in range(len(ord2)):
if o > 0:
a = 0.3
else:
a = 1
ax2.plot(proc_limb[ord2[o]], linewidth=lwidth, color=c[1], alpha=a)
for t in range(len(transitions) - 1):
for g in np.unique(soft_assignments):
if labels[transitions[t]] == g:
ax2.axvspan(transitions[t], transitions[t + 1], color=cmap[g], alpha=0.2, lw=0)
ax2 = plt.gca()
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax1.spines['left'].set_visible(True)
ax2.spines['left'].set_visible(True)
plt.gca().invert_yaxis()
ax2.set_xticks(range(0, len(labels), 40))
ax1.set_yticks(range(0, int(np.percentile(np.concatenate(limbs), 98)), 5))
ax2.set_yticks(range(0, int(np.percentile(np.concatenate(limbs), 98)), 5))
if save:
ax1.spines['left'].set_linewidth(4)
ax2.spines['left'].set_linewidth(4)
ax2.spines['bottom'].set_linewidth(4)
ax2.set_xticks(range(0, len(labels), 40))
ax1.set_yticks(range(0, int(np.percentile(np.concatenate(limbs), 98)), 5))
ax2.set_yticks(range(0, int(np.percentile(np.concatenate(limbs), 98)), 5))
ax1.tick_params(length=12, width=4)
ax2.tick_params(length=12, width=4)
ax1.tick_params(labelsize=24)
ax2.tick_params(labelsize=24)
plt.savefig(str.join('', (outpath, 'start{}_end{}_limb_trajectory.'.format(*t_range), fig_format)),
format=fig_format, transparent=False)
else:
return fig, ax1, ax2
def umap_scatter(embeds, assigns, mov_range, output_path, width, height):
uk = list(np.unique(assigns))
R = np.linspace(0, 1, len(uk))
cmap = plt.cm.get_cmap("Spectral")(R)
umap_x, umap_y = embeds[mov_range[0]:mov_range[1], 0], embeds[mov_range[0]:mov_range[1], 1]
fig = figure(facecolor='k', edgecolor='w')
fig.set_size_inches(width / 96, height / 96)
ax = fig.add_subplot(111)
ax.axes.axis([min(umap_x) - 0.2, max(umap_x) + 0.2, min(umap_y) - 0.2, max(umap_y) + 0.2])
count = 0
for i, j in enumerate(range(mov_range[0], mov_range[1])):
alph = 0.8
m_size = 80
for g in np.unique(assigns):
if assigns[j] == g and assigns[j] >= 0:
ax.scatter(umap_x[i], umap_y[i], c=cmap[g], edgecolors='w',
label=g, s=m_size, marker='o', alpha=alph)
txt = plt.text(1, 1, 'group {}'.format(g), c='white', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=20,
bbox=dict(facecolor=cmap[g], alpha=0.8))
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_facecolor('black')
ax.tick_params(length=6, width=2, color='white')
count += 1
plt.savefig(output_path + "/file%04d.png" % count, dpi=96)
txt.set_visible(False)
plt.close('all')
subprocess.call([
'ffmpeg', '-y', '-framerate', '10', '-i', output_path + '/file%04d.png',
'-vcodec', 'libx264', '-pix_fmt', 'yuv420p',
output_path + '/umap_enhanced_clustering{}_{}.mp4'.format(*mov_range)
])
for file_name in glob.glob(output_path + "/*.png"):
os.remove(file_name)
def trim_video(mov_path, mov_file, mov_range, mov_st_min, mov_st_sec, mov_sp_min, mov_sp_sec, output_path):
print(mov_path, mov_file)
subprocess.call([
'ffmpeg', '-y', '-i', str.join('', (mov_path, '/', mov_file)),
'-ss', '00:{}:{}'.format(mov_st_min, mov_st_sec), '-to', '00:{}:{}'.format(mov_sp_min, mov_sp_sec),
output_path + '/video_trim2umap{}_{}.mp4'.format(*mov_range)
])
subprocess.call([
'ffmpeg', '-y', '-i', output_path + '/video_trim2umap{}_{}.mp4'.format(*mov_range),
'-filter:v', 'fps=fps=10',
output_path + '/video_trim2umap{}_{}_10fps.mp4'.format(*mov_range)
])
def video_umap(output_path, mov_range):
subprocess.call([
'ffmpeg', '-y', '-i', output_path + '/video_trim2umap{}_{}_10fps.mp4'.format(*mov_range),
'-i', output_path + '/umap_enhanced_clustering{}_{}.mp4'.format(*mov_range),
'-filter_complex', "[0]pad=iw+5:color=white[left];[left][1]hstack=inputs=2",
output_path + '/sync_leftright_video2umap{}_{}.mp4'.format(*mov_range)
])
| 13,577
| 42.941748
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/load_data.py
|
import os
import joblib
import scipy.io
def load_mat(file):
return scipy.io.loadmat(file)
def load_sav(path, name, fname):
with open(os.path.join(path, str.join('', (name, '_', fname, '.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
class appdata:
def __init__(self, path, name):
self.path = path
self.name = name
def load_data(self):
with open(os.path.join(self.path, str.join('', (self.name, '_data.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_feats(self):
with open(os.path.join(self.path, str.join('', (self.name, '_feats.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_embeddings(self):
with open(os.path.join(self.path, str.join('', (self.name, '_embeddings.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_clusters(self):
with open(os.path.join(self.path, str.join('', (self.name, '_clusters.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_classifier(self):
with open(os.path.join(self.path, str.join('', (self.name, '_randomforest.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_predictions(self):
with open(os.path.join(self.path, str.join('', (self.name, '_predictions.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
| 1,576
| 28.754717
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/save_data.py
|
import os
import joblib
class results:
def __init__(self, path, name):
self.path = path
self.name = name
def save_sav(self, datalist, file):
with open(os.path.join(self.path, str.join('', (self.name, '_', file, '.sav'))), 'wb') as f:
joblib.dump(datalist, f)
| 308
| 19.6
| 100
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_utilities/cache_workspace.py
|
import os
import joblib
import streamlit as st
@st.cache
def load_data(path, name):
with open(os.path.join(path, str.join('', (name, '_data.sav'))), 'rb') as fr:
_, _, framerate, _, _, _, _, _ = joblib.load(fr)
with open(os.path.join(path, str.join('', (name, '_feats.sav'))), 'rb') as fr:
features, _ = joblib.load(fr)
with open(os.path.join(path, str.join('', (name, '_embeddings.sav'))), 'rb') as fr:
sampled_features, sampled_embeddings = joblib.load(fr)
with open(os.path.join(path, str.join('', (name, '_clusters.sav'))), 'rb') as fr:
_, assignments, _, soft_assignments = joblib.load(fr)
with open(os.path.join(path, str.join('', (name, '_predictions.sav'))), 'rb') as fr:
folders, folder, filenames, new_data, new_predictions = joblib.load(fr)
return framerate, features, sampled_features, sampled_embeddings, assignments, soft_assignments, \
folders, folder, filenames, new_data, new_predictions
| 983
| 45.857143
| 102
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/kfold_accuracy.py
|
import getopt
import sys
from ast import literal_eval
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from analysis_subroutines.analysis_utilities.load_data import appdata
from analysis_subroutines.analysis_utilities.processing import reorganize_group_order
from analysis_subroutines.analysis_utilities.save_data import results
def generate_kfold(path, name, k):
appdata_ = appdata(path, name)
f_10fps_sub, train_embeddings = appdata_.load_embeddings()
min_cluster_range, assignments, soft_clusters, soft_assignments = appdata_.load_clusters()
y = assignments[assignments >= 0]
X = f_10fps_sub[assignments >= 0, :]
kf = KFold(n_splits=k)
kf.get_n_splits(X)
accuracy_data = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
classifier = RandomForestClassifier(random_state=42)
classifier.fit(X_train, y_train)
accuracy_vec = []
predictions = classifier.predict(X_test)
for i in range(len(np.unique(y_test))):
accuracy_vec.append(len(np.argwhere((predictions - y_test == 0) & (y_test == i)))
/ len(np.argwhere(y_test == i)))
accuracy_data.append(np.array(accuracy_vec))
accuracy_data = np.array(accuracy_data)
return accuracy_data
def main(argv):
path = None
name = None
k = None
order = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:f:k:o:v:',
['path=', 'file=', 'kfold=', 'order=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-k', '--kfold'):
k = option_value
elif option_key in ('-o', '--order'):
order = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('K-FOLD :', k)
print('ORDER :', order)
print('VARIABLE :', vname)
print('*' * 50)
print('Computing...')
accuracy_data = generate_kfold(path, name, int(k))
accuracy_ordered = reorganize_group_order(accuracy_data, literal_eval(order))
results_ = results(path, name)
results_.save_sav([accuracy_data, accuracy_ordered], vname)
if __name__ == '__main__':
main(sys.argv)
| 2,607
| 33.315789
| 94
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/extract_kinematics.py
|
import getopt
import glob
import os
import subprocess
import sys
from ast import literal_eval
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks
from tqdm import tqdm
from analysis_subroutines.analysis_utilities.load_data import appdata
from analysis_subroutines.analysis_utilities.processing import data_processing
from analysis_subroutines.analysis_utilities.save_data import results
from analysis_subroutines.analysis_utilities.statistics import rle
from analysis_subroutines.analysis_utilities.visuals import plot_peaks
def get_kinematics(path, name, exp, group_num, bp, fps):
appdata_ = appdata(path, name)
_, _, filenames2, data_new, fs_labels = appdata_.load_predictions()
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
bout_frames = []
term_frame = []
pose_all_animal = []
eu_all_animal = []
all_bouts_disp = []
all_bouts_peak_speed = []
all_bouts_dur = []
count = 0
for an, se in enumerate(np.concatenate(exp)):
bout_frames.append(np.array(np.where(fs_labels[se] == group_num)))
term_f = np.diff(bout_frames[an]) != 1
term_frame.append(np.array(term_f*1))
lengths, pos, grp = rle(term_frame[an].T)
endpos = np.where(np.diff(pos) < 1)[0][0] + 1
pos = pos[:endpos]
poses = data_new[se]
proc_pose = []
for col in range(poses.shape[1]):
pose = data_processing(poses[:, col])
proc_pose.append(pose.boxcar_center(win_len))
proc_pose = np.array(proc_pose, dtype=object).T
pose_all_bp = []
eu_all_bp = []
for b in bp:
pose_single_bp = []
bt = 0
eu_dist_bout = []
for bout in range(0, len(pos) - 1, 2):
eu_dist_ = []
pose_single_bp.append(proc_pose[int(bout_frames[an][:, pos[bout]]):
int(bout_frames[an][:, pos[bout+1]])+1,
2 * b:2 * b + 2])
for row in range(len(pose_single_bp[bt]) - 1):
try:
eu_dist_ = np.hstack((eu_dist_, np.linalg.norm(pose_single_bp[bt][row + 1, :] -
pose_single_bp[bt][row, :])))
except TypeError:
pass
eu_dist_ = np.array([np.nan if b_ > np.percentile(eu_dist_, 98) else b_ for b_ in eu_dist_])
jump_idx = np.where(np.isnan(eu_dist_))[0]
for ju in jump_idx:
try:
eu_dist_[ju] = np.nanmean(eu_dist_[ju-1:ju+1])
except:
pass
eu_dist_bout.append(eu_dist_)
bt += 1
pose_all_bp.append(pose_single_bp) # all body parts pose estimation in one animal
eu_all_bp.append(eu_dist_bout) # all body parts euclidean distance in one animal
pose_all_animal.append(pose_all_bp) # all body parts pose estimations for all animals
eu_all_animal.append(eu_all_bp) # all body parts euclidean distances for all animals
bps_bouts_disp = []
bps_bouts_peak_speed = []
bps_bouts_dur = []
for i in tqdm(range(len(eu_all_bp))):
bouts_disp = []
bouts_pk_speed = []
bouts_dur = []
for j in range(len(eu_all_bp[i])):
newsig = eu_all_bp[i][j].copy()
newsig = np.array([0 if a_ < 0.1 * np.max(eu_all_bp[i][j]) else a_ for a_ in newsig])
pk, info = find_peaks(newsig, prominence=3, distance=(fps / 30))
try:
os.mkdir(str.join('', (path, '/kinematics_analysis')))
except FileExistsError:
pass
try:
os.mkdir(str.join('', (path, '/kinematics_analysis/group{}'.format(group_num))))
except FileExistsError:
pass
output_path = str.join('', (path, '/kinematics_analysis/group{}'.format(group_num)))
if pk.size:
bout_disp = []
for k in range(len(info['left_bases'])):
bout_disp.append(np.sum((
eu_all_bp[i][j][int(round(info['left_bases'][k])):
int(round(info['right_bases'][k])) + 1])))
if pk.size > 3 and np.random.rand() < 0.5:
count += 1
plot_peaks(eu_all_bp[i][j], None, pk)
R = np.linspace(0, 1, len(info['left_bases']))
cm = plt.cm.Spectral(R)
for k in range(len(info['left_bases'])):
plt.fill_between(
np.arange(int(round(info['left_bases'][k])),
int(round(info['right_bases'][k])) + 1),
eu_all_bp[i][j][int(round(info['left_bases'][k])):
int(round(info['right_bases'][k])) + 1],
alpha=0.5, color=cm[k])
plt.savefig(output_path + "/file%04d.png" % count)
plt.close('all')
bouts_disp.append(bout_disp)
bouts_pk_speed.append(eu_all_bp[i][j][pk])
bouts_dur.append(len(eu_all_bp[i][j]))
bps_bouts_disp.append(bouts_disp)
bps_bouts_peak_speed.append(bouts_pk_speed)
bps_bouts_dur.append(bouts_dur)
all_bouts_disp.append(bps_bouts_disp)
all_bouts_peak_speed.append(bps_bouts_peak_speed)
all_bouts_dur.append(bps_bouts_dur)
subprocess.call([
'ffmpeg', '-y', '-framerate', '5', '-i', output_path + '/file%04d.png',
'-vcodec', 'libx264', '-pix_fmt', 'yuv420p',
output_path + '/kinematics_subsample_examples.mp4'
])
for file_name in glob.glob(output_path + "/*.png"):
os.remove(file_name)
return [pose_all_animal, eu_all_animal, all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, output_path]
def group_kinematics(all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, exp):
bps_exp1_bout_disp = []
bps_exp2_bout_disp = []
for j in range(len(all_bouts_disp[0])):
exp1_bout_disp = []
exp2_bout_disp = []
for i in range(len(all_bouts_disp)):
if any(i == sess for sess in exp[0]):
if len(all_bouts_disp[i][j]):
try:
exp1_bout_disp = np.concatenate((exp1_bout_disp, np.concatenate(all_bouts_disp[i][j])))
except ValueError:
pass
elif any(i == sess for sess in exp[1]):
if len(all_bouts_disp[i][j]):
try:
exp2_bout_disp = np.concatenate((exp2_bout_disp, np.concatenate(all_bouts_disp[i][j])))
except ValueError:
pass
bps_exp1_bout_disp.append(exp1_bout_disp)
bps_exp2_bout_disp.append(exp2_bout_disp)
bps_exp1_bout_peak_speed = []
bps_exp2_bout_peak_speed = []
for j in range(len(all_bouts_peak_speed[0])):
exp1_bout_peak_speed = []
exp2_bout_peak_speed = []
for i in range(len(all_bouts_peak_speed)):
if any(i == sess for sess in exp[0]):
if len(all_bouts_peak_speed[i][j]):
try:
exp1_bout_peak_speed = np.concatenate((exp1_bout_peak_speed,
np.concatenate(all_bouts_peak_speed[i][j])))
except ValueError:
pass
elif any(i == sess for sess in exp[1]):
if len(all_bouts_peak_speed[i][j]):
try:
exp2_bout_peak_speed = np.concatenate((exp2_bout_peak_speed,
np.concatenate(all_bouts_peak_speed[i][j])))
except ValueError:
pass
bps_exp1_bout_peak_speed.append(exp1_bout_peak_speed)
bps_exp2_bout_peak_speed.append(exp2_bout_peak_speed)
bps_exp1_bout_dur = []
bps_exp2_bout_dur = []
for j in range(len(all_bouts_dur[0])):
exp1_bout_dur = []
exp2_bout_dur = []
for i in range(len(all_bouts_dur)):
if any(i == sess for sess in exp[0]):
if len(all_bouts_dur[i][j]):
try:
exp1_bout_dur = np.concatenate((exp1_bout_dur, all_bouts_dur[i][j]))
except ValueError:
pass
if any(i == sess for sess in exp[1]):
if len(all_bouts_dur[i][j]):
try:
exp2_bout_dur = np.concatenate((exp2_bout_dur, all_bouts_dur[i][j]))
except ValueError:
pass
bps_exp1_bout_dur.append(exp1_bout_dur)
bps_exp2_bout_dur.append(exp2_bout_dur)
return [bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed, bps_exp2_bout_peak_speed,
bps_exp1_bout_dur, bps_exp2_bout_dur]
def main(argv):
path = None
name = None
group_num = None
bodyparts = None
exp = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:n:g:b:e:v:',
['path=', 'file=', 'group_num=', 'bodyparts=', 'experiment=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-g', '--group_num'):
group_num = option_value
elif option_key in ('-b', '--bodyparts'):
bodyparts = option_value
elif option_key in ('-e', '--experiment'):
exp = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('BEHAVIOR :', group_num)
print('BODY PARTS :', bodyparts)
print('EXPERIMENT ORDER :', exp)
print('VARIABLES :', vname)
print('*' * 50)
print('Computing...')
[_, _, all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, _] = \
get_kinematics(path, name, literal_eval(exp), int(group_num), literal_eval(bodyparts), 60)
[bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed, bps_exp2_bout_peak_speed,
bps_exp1_bout_dur, bps_exp2_bout_dur] = \
group_kinematics(all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, literal_eval(exp))
results_ = results(path, name)
results_.save_sav([bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed,
bps_exp2_bout_peak_speed, bps_exp1_bout_dur, bps_exp2_bout_dur], vname)
if __name__ == '__main__':
main(sys.argv)
| 11,163
| 44.016129
| 111
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/trajectory_plot.py
|
import getopt
import sys
from ast import literal_eval
import numpy as np
from analysis_subroutines.analysis_utilities.load_data import appdata
from analysis_subroutines.analysis_utilities.visuals import plot_trajectory
def limb_trajectory(path, name, animal_idx, bp, t_range):
appdata_ = appdata(path, name)
_, _, _, data_new, fs_labels = appdata_.load_predictions()
_, _, _, soft_assignments = appdata_.load_clusters()
limbs = []
labels = fs_labels[animal_idx][t_range[0]:t_range[1]]
for b in range(len(bp)):
limb = []
for t in range(t_range[0], t_range[1]):
limb.append(np.linalg.norm(data_new[animal_idx][t, bp[b] * 2:bp[b] * 2 + 2] -
data_new[animal_idx][t - 1, bp[b] * 2:bp[b] * 2 + 2]))
limbs.append(np.array(limb, dtype=object))
return labels, limbs, soft_assignments
def main(argv):
path = None
name = None
animal_idx = None
bp = None
t_range = None
order1 = None
order2 = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:i:b:t:r:R:c:m:o:',
['path=', 'file=', 'animal_idx=', 'bodypart=', 'timerange=', 'order1=', 'order2=', 'colors=',
'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-i', '--animal_idx'):
animal_idx = option_value
elif option_key in ('-b', '--bodypart'):
bp = option_value
elif option_key in ('-t', '--timerange'):
t_range = option_value
elif option_key in ('-r', '--order1'):
order1 = option_value
elif option_key in ('-R', '--order1'):
order2 = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('ANIMAL INDEX :', animal_idx)
print('BODYPARTS :', bp)
print('TIME RANGE :', t_range)
print('TOP PLOT :', order1)
print('BOTTOM PLOT :', order2)
print('COLORS :', c)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
labels, limbs, soft_assignments = limb_trajectory(path, name, int(animal_idx), literal_eval(bp), literal_eval(t_range))
plot_trajectory(limbs, labels, soft_assignments,
literal_eval(t_range), literal_eval(order1), literal_eval(order2), literal_eval(c),
(8.5, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 2,938
| 33.988095
| 123
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/umap_clustering_plot.py
|
import getopt
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes._axes import _log as matplotlib_axes_logger
from matplotlib.pyplot import figure
from analysis_subroutines.analysis_utilities.load_data import appdata
matplotlib_axes_logger.setLevel('ERROR')
def plot_enhanced_umap(path, name, fig_size, fig_format='png', outpath=os.getcwd(), save=True):
appdata_ = appdata(path, name)
f_10fps_sub, train_embeddings = appdata_.load_embeddings()
min_cluster_range, assignments, soft_clusters, soft_assignments = appdata_.load_clusters()
if save:
m_size = 50
lg_size = 8
lg_text_sz = 12
else:
m_size = 0.2
lg_size = 4
lg_text_sz = 8
uk = list(np.unique(assignments))
r = np.linspace(0, 1, len(uk) - 1)
cmap = plt.cm.get_cmap("Spectral")(r)
umap_x, umap_y = train_embeddings[:, 0], train_embeddings[:, 1]
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
for g in np.unique(assignments):
if g >= 0:
idx = np.where(np.array(assignments) == g)
ax.scatter(umap_x[idx], umap_y[idx], c=cmap[g],
label=g, s=m_size, marker='o', alpha=0.6)
plt.legend(ncol=4, loc=0, prop={'family': 'Helvetica', 'size': lg_text_sz}, markerscale=lg_size)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
if save:
ax.spines['top'].set_linewidth(3)
ax.spines['right'].set_linewidth(3)
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_linewidth(3)
ax.tick_params(length=9, width=3)
plt.savefig(str.join('', (outpath, '{}'.format(name), '_umap_enahnced_clustering.', fig_format)),
format=fig_format, transparent=True)
else:
return fig, ax
def main(argv):
path = None
name = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:m:o:',
['path=', 'file=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
plot_enhanced_umap(path, name, (16, 11), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 2,917
| 32.54023
| 105
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/accuracy_boxplot.py
|
import getopt
import sys
from ast import literal_eval
from analysis_utilities.load_data import load_sav
from analysis_utilities.visuals import plot_accuracy_boxplot
def main(argv):
path = None
name = None
vname = None
algorithm = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:v:a:c:m:o:',
['path=', 'file=', 'variable=', 'algorithm=', 'colors=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLE :', vname)
print('ALGORITHM :', algorithm)
print('COLORS :', c)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
_, accuracy_ordered = load_sav(path, name, vname)
plot_accuracy_boxplot(algorithm, accuracy_ordered, literal_eval(c), (6, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 1,585
| 28.924528
| 101
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/coherence_boxplot.py
|
import getopt
import sys
import numpy as np
from analysis_utilities.load_data import load_sav
from analysis_utilities.visuals import plot_coherence_boxplot
def main(argv):
path = None
name = None
vname = None
algorithm = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:v:a:c:m:o:',
['path=', 'file=', 'variable=', 'algorithm=', 'color=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--color'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLE :', vname)
print('ALGORITHM :', algorithm)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
coherence_reordered = load_sav(path, name, vname)
plot_coherence_boxplot(algorithm, np.array(coherence_reordered).T, c, (6, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 1,551
| 27.740741
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/pose_relationships_hist.py
|
import getopt
import itertools
import sys
from ast import literal_eval
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from analysis_utilities.load_data import appdata
def plot_pose_relationships(path, name, order, fig_size, fig_format, outpath):
appdata_ = appdata(path, name)
f_10fps_sub, _ = appdata_.load_embeddings()
_, assignments, _, _ = appdata_.load_clusters()
feature_type1_name = []
feature_type2_name = []
feature_type3_name = []
for i, j in itertools.combinations(range(0, int(np.sqrt(f_10fps_sub.shape[0]))), 2):
feature_type1_name.append(['Pose ', i + 1, j + 1, '$\Delta$ pixels'])
feature_type2_name.append(['Pose vector ', i + 1, j + 1, '$\Delta$ degrees'])
for i in range(int(np.sqrt(f_10fps_sub.shape[0]))):
feature_type3_name.append(['Pose ', i + 1, None, '$\Delta$ pixels'])
keys = np.arange(len(feature_type1_name) + len(feature_type2_name) + len(feature_type3_name))
pose_relationships = OrderedDict({key: [] for key in keys})
for m, feat_name in enumerate(feature_type1_name):
pose_relationships[m] = feat_name
for n, feat_name in enumerate(feature_type2_name):
pose_relationships[m + n + 1] = feat_name
for o, feat_name in enumerate(feature_type3_name):
pose_relationships[m + n + o + 2] = feat_name
r = np.linspace(0, 1, len(np.unique(assignments)))
cm = plt.cm.get_cmap("Spectral")(r)
for f in range(f_10fps_sub.shape[1]):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
fig.suptitle("{}".format(pose_relationships[f]), fontsize=30)
k = 0
for i in order:
k += 1
ax = plt.subplot(len(np.unique(assignments)), 1, k)
if f <= m or f > m + n + 1:
values, base = np.histogram(f_10fps_sub[assignments == i, f] / 23.5126,
bins=np.linspace(0, np.mean(f_10fps_sub[assignments == i, f] / 23.5126) +
3 * np.std(f_10fps_sub[assignments == i, f] / 23.5126),
num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values, color=cm[k - 1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(0, np.mean(f_10fps_sub[:, f] / 23.5126) + 3 * np.std(f_10fps_sub[:, f] / 23.5126))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(0, np.mean(f_10fps_sub[:, f] / 23.5126) +
3 * np.std(f_10fps_sub[:, f] / 23.5126), num=5))
fig.text(0.5, 0.07, 'Centimeters', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
else:
values, base = np.histogram(f_10fps_sub[assignments == i, f],
bins=np.linspace(np.mean(f_10fps_sub[assignments == i, f]) -
3 * np.std(f_10fps_sub[assignments == i, f]),
np.mean(f_10fps_sub[assignments == i, f]) +
3 * np.std(f_10fps_sub[assignments == i, f]), num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values, color=cm[k - 1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(np.mean(f_10fps_sub[:, f]) - 3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) + 3 * np.std(f_10fps_sub[:, f]))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(np.mean(f_10fps_sub[:, f]) - 3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) + 3 * np.std(f_10fps_sub[:, f]), num=5))
fig.text(0.5, 0.07, 'Degrees', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
plt.savefig(str.join('', (outpath, '{}_{}_histogram.'.format(name, pose_relationships[f]), fig_format)),
format=fig_format, transparent=True)
plt.close()
return
def main(argv):
path = None
name = None
order = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:r:m:o:',
['path=', 'file=', 'order=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-r', '--order'):
order = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('ORDER :', order)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
plot_pose_relationships(path, name, literal_eval(order), (11, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 6,576
| 51.616
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/frameshift_coherence.py
|
import getopt
import sys
from ast import literal_eval
import numpy as np
from analysis_utilities.load_data import appdata
from analysis_utilities.processing import reorganize_group_order
from analysis_utilities.save_data import results
def generate_coherence(path, name, fps, target_fps, frame_skips, animal_index, t, order):
appdata_ = appdata(path, name)
flders, flder, filenames, data_new, fs_labels = appdata_.load_predictions()
coherence_data = []
labels = np.repeat(fs_labels[animal_index], np.floor(target_fps / fps))
t = int(t * np.floor(target_fps / fps))
for i in frame_skips:
downsampled_labels = labels[0:t:i]
filled_labels = np.repeat(downsampled_labels, i)
coh_vec = []
for j in range(len(np.unique(fs_labels[0][0:t]))):
coh_vec.append(
len(np.argwhere((filled_labels[0:t] - labels[0:t] == 0) & (labels[0:t] == j)))
/ len(np.argwhere(labels[0:t] == j)))
coherence_data.append(np.array(coh_vec))
coherence_data = np.array(coherence_data)
coherence_reordered = reorganize_group_order(coherence_data, order)
return np.array(coherence_reordered)
def main(argv):
path = None
name = None
fps = None
target_fps = None
frame_skips = None
animal_index = None
t = None
order = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:n:f:F:s:i:t:o:v:',
['path=', 'file=', 'fps=', 'target_fps=', 'frame_skips=', 'animal_idx=', 'time=', 'order=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-f', '--framerate'):
fps = option_value
elif option_key in ('-F', '--target_fps'):
target_fps = option_value
elif option_key in ('-s', '--frame_skips'):
frame_skips = option_value
elif option_key in ('-i', '--animal_idx'):
animal_index = option_value
elif option_key in ('-t', '--time'):
t = option_value
elif option_key in ('-o', '--order'):
order = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('FRAMERATE :', fps)
print('TARGET FRAMERATE :', target_fps)
print('FRAME SKIPS :', frame_skips)
print('ANIMAL INDEX :', animal_index)
print('TIME :', t)
print('ORDER :', order)
print('VARIABLE :', vname)
print('*' * 50)
print('Computing...')
coherence_reordered = generate_coherence(path, name, int(fps), int(target_fps), literal_eval(frame_skips),
int(animal_index), int(t), literal_eval(order))
results_ = results(path, name)
results_.save_sav(coherence_reordered, vname)
if __name__ == '__main__':
main(sys.argv)
| 3,054
| 34.523256
| 113
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_app/analysis_subroutines/analysis_scripts/kinematics_cdf.py
|
import getopt
import sys
from ast import literal_eval
import numpy as np
from analysis_subroutines.analysis_utilities.load_data import load_sav
from analysis_utilities.visuals import plot_kinematics_cdf
def main(argv):
path = None
name = None
var = None
vname = None
bp = None
c = None
x_range = None
leg = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:n:v:V:b:c:r:l:m:o:',
['path=', 'file=', 'variables=', 'variable_name', 'bodypart=',
'colors=', 'range=', 'legend=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-v', '--variables'):
var = option_value
elif option_key in ('-V', '--variable_name'):
vname = option_value
elif option_key in ('-b', '--bodypart'):
bp = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-r', '--range'):
x_range = option_value
elif option_key in ('-l', '--legend'):
leg = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLES :', var)
print('VARIABLE NAME :', vname)
print('BODYPART :', bp)
print('COLOR :', c)
print('RANGE :', x_range)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
kin_data = load_sav(path, name, var)
if vname == 'Distance':
conv = 0
elif vname == 'Speed':
conv = 1
elif vname == 'Duration':
conv = 2
if conv == 0:
data = [np.concatenate(kin_data[0][int(bp)] / 23.5126),
np.concatenate(kin_data[1][int(bp)] / 23.5126)]
elif conv == 1:
data = [np.concatenate(kin_data[2][int(bp)] * 60 / 23.5126),
np.concatenate(kin_data[3][int(bp)] * 60 / 23.5126)]
elif conv == 2:
data = [kin_data[4][int(bp)] / 60,
kin_data[5][int(bp)] / 60]
plot_kinematics_cdf(None, var, vname, data, literal_eval(c), 50, 4, int(leg), (16, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 2,559
| 30.604938
| 112
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/config/global_config.py
|
UMAP_PARAMS = {
'min_dist': 0.0,
'random_state': 42,
}
HDBSCAN_PARAMS = {
'min_samples': 1
}
| 105
| 12.25
| 23
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/config/__init__.py
|
from bsoid_app.config.global_config import *
| 44
| 44
| 44
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/load_css.py
|
import streamlit as st
def local_css(file_name):
with open(file_name) as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
| 164
| 22.571429
| 81
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/load_json.py
|
import numpy as np
import pandas as pd
from tqdm import tqdm
def read_json_single(filename):
pose_names = {0: "Nose", 1: "Neck", 2: "RShoulder", 3: "RElbow", 4: "RWrist", 5: "LShoulder",
6: "LElbow", 7: "LWrist", 8: "MidHip", 9: "RHip", 10: "RKnee", 11: "RAnkle", 12: "LHip",
13: "LKnee", 14: "LAnkle", 15: "REye", 16: "LEye", 17: "REar", 18: "LEar", 19: "LBigToe",
20: "LSmallToe", 21: "LHeel", 22: "RBigToe", 23: "RSmallToe", 24: "RHeel", 25: "Background"}
df = pd.read_json(filename)
data_arr = df['people']
data_length = len(data_arr[0]['pose_keypoints_2d'])
x_val = data_arr[0]['pose_keypoints_2d'][0:data_length:3]
y_val = data_arr[0]['pose_keypoints_2d'][1:data_length:3]
l_val = data_arr[0]['pose_keypoints_2d'][2:data_length:3]
xyl = []
for i in range(int(data_length / 3)):
xyl.extend([x_val[i], y_val[i], l_val[i]])
xyl_array = np.array(xyl).reshape(1, len(xyl))
a = []
for i in range(len(pose_names) - 1):
a.extend((('Openpose', pose_names[i], 'x'), ('OpenPose', pose_names[i], 'y'),
('Openpose', pose_names[i], 'likelihood')))
micolumns = pd.MultiIndex.from_tuples(a, names=['Algorithm', 'Body parts', 'Frame number'])
df2 = pd.DataFrame(xyl_array, columns=micolumns)
fname = filename.rpartition('/')[-1].rpartition('_')[0].rpartition('_')[0]
df2.to_csv(str.join('', (filename.rpartition('/')[0], '/', fname, '.csv')), index=True, chunksize=10000,
encoding='utf-8')
dfc = pd.read_csv(str.join('', (filename.rpartition('/')[0], '/', fname, '.csv')), low_memory=False)
return dfc
def json2csv_multi(filenames):
pose_names = {0: "Nose", 1: "Neck", 2: "RShoulder", 3: "RElbow", 4: "RWrist", 5: "LShoulder",
6: "LElbow", 7: "LWrist", 8: "MidHip", 9: "RHip", 10: "RKnee", 11: "RAnkle", 12: "LHip",
13: "LKnee", 14: "LAnkle", 15: "REye", 16: "LEye", 17: "REar", 18: "LEar", 19: "LBigToe",
20: "LSmallToe", 21: "LHeel", 22: "RBigToe", 23: "RSmallToe", 24: "RHeel", 25: "Background"}
xyl_array = np.empty((len(filenames), (len(pose_names) - 1) * 3))
empty_count = 0
for j, ff in enumerate(tqdm(filenames)):
df = pd.read_json(ff)
data_arr = df['people']
try:
data_length = len(data_arr[0]['pose_keypoints_2d'])
x_val = data_arr[0]['pose_keypoints_2d'][0:data_length:3]
y_val = data_arr[0]['pose_keypoints_2d'][1:data_length:3]
l_val = data_arr[0]['pose_keypoints_2d'][2:data_length:3]
xyl = []
for i in range(int(data_length / 3)):
xyl.extend([x_val[i], y_val[i], l_val[i]])
xyl_array[j, :] = np.array(xyl).reshape(1, len(xyl))
except KeyError:
xyl_array[j, :] = xyl_array[j - 1, :]
empty_count += 1
a = []
for i in range(len(pose_names) - 1):
a.extend((('Openpose', pose_names[i], 'x'), ('OpenPose', pose_names[i], 'y'),
('Openpose', pose_names[i], 'likelihood')))
micolumns = pd.MultiIndex.from_tuples(a, names=['Algorithm', 'Body parts', 'Frame number'])
df2 = pd.DataFrame(xyl_array, columns=micolumns)
fname = filenames[0].rpartition('/')[-1].rpartition('_')[0].rpartition('_')[0]
df2.to_csv(str.join('', (filenames[0].rpartition('/')[0], '/', fname, '.csv')), index=True, chunksize=10000,
encoding='utf-8')
return
| 3,497
| 52
| 112
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/load_workspace.py
|
import streamlit as st
import os
import joblib
@st.cache
def load_data(path, name):
with open(os.path.join(path, str.join('', (name, '_data.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def query_workspace():
working_dir = st.sidebar.text_input('Enter the prior B-SOiD working directory:')
try:
os.listdir(working_dir)
st.markdown(
'You have selected **{}** for prior working directory.'.format(working_dir))
except FileNotFoundError:
st.error('No such directory')
files = [i for i in os.listdir(working_dir) if os.path.isfile(os.path.join(working_dir, i)) and \
'_data.sav' in i and not '_accuracy' in i and not '_coherence' in i]
bsoid_variables = [files[i].partition('_data.sav')[0] for i in range(len(files))]
bsoid_prefix = []
for var in bsoid_variables:
if var not in bsoid_prefix:
bsoid_prefix.append(var)
prefix = st.selectbox('Select prior B-SOiD prefix', bsoid_prefix)
try:
st.markdown('You have selected **{}_XXX.sav** for prior prefix.'.format(prefix))
except TypeError:
st.error('Please input a prior prefix to load workspace.')
return working_dir, prefix
@st.cache
def load_feats(path, name):
with open(os.path.join(path, str.join('', (name, '_feats.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
@st.cache
def load_embeddings(path, name):
with open(os.path.join(path, str.join('', (name, '_embeddings.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
@st.cache
def load_clusters(path, name):
with open(os.path.join(path, str.join('', (name, '_clusters.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
@st.cache(allow_output_mutation=True)
def load_classifier(path, name):
with open(os.path.join(path, str.join('', (name, '_randomforest.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
@st.cache
def load_predictions(path, name):
with open(os.path.join(path, str.join('', (name, '_predictions.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
def load_new_feats(path, name):
with open(os.path.join(path, str.join('', (name, '_new_feats.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
| 2,410
| 29.518987
| 101
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/likelihoodprocessing.py
|
"""
likelihood processing analysis_utilities
Forward fill low likelihood (x,y)
"""
import glob
import re
import numpy as np
import pandas as pd
from tqdm import tqdm
def boxcar_center(a, n):
a1 = pd.Series(a)
moving_avg = np.array(a1.rolling(window=n, min_periods=1, center=True).mean())
return moving_avg
def convert_int(s):
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
return [convert_int(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
l.sort(key=alphanum_key)
def get_filenames(base_path, folder):
filenames = glob.glob(base_path + folder + '/*.csv')
sort_nicely(filenames)
return filenames
def get_filenamesh5(base_path, folder):
filenames = glob.glob(base_path + folder + '/*.h5')
sort_nicely(filenames)
return filenames
def get_filenamesjson(base_path, folder):
filenames = glob.glob(base_path + folder + '/*.json')
sort_nicely(filenames)
return filenames
def import_folders(base_path, folders: list, pose):
fldrs = []
filenames = []
rawdata_li = []
data_li = []
perc_rect_li = []
for i, fd in enumerate(folders): # Loop through folders
f = get_filenames(base_path, fd)
for j, filename in enumerate(f):
curr_df = pd.read_csv(filename, low_memory=False)
curr_df_filt, perc_rect = adp_filt(curr_df, pose)
rawdata_li.append(curr_df)
perc_rect_li.append(perc_rect)
data_li.append(curr_df_filt)
fldrs.append(fd)
filenames.append(f)
data = np.array(data_li)
return fldrs, filenames, data, perc_rect_li
def adp_filt(currdf: object, pose):
lIndex = []
xIndex = []
yIndex = []
currdf = np.array(currdf[1:])
for header in pose:
if currdf[0][header + 1] == "likelihood":
lIndex.append(header)
elif currdf[0][header + 1] == "x":
xIndex.append(header)
elif currdf[0][header + 1] == "y":
yIndex.append(header)
curr_df1 = currdf[:, 1:]
datax = curr_df1[1:, np.array(xIndex)]
datay = curr_df1[1:, np.array(yIndex)]
data_lh = curr_df1[1:, np.array(lIndex)]
currdf_filt = np.zeros((datax.shape[0], (datax.shape[1]) * 2))
perc_rect = []
for i in range(data_lh.shape[1]):
perc_rect.append(0)
for x in tqdm(range(data_lh.shape[1])):
a, b = np.histogram(data_lh[1:, x].astype(np.float))
rise_a = np.where(np.diff(a) >= 0)
if rise_a[0][0] > 1:
llh = b[rise_a[0][0]]
else:
llh = b[rise_a[0][1]]
data_lh_float = data_lh[:, x].astype(np.float)
perc_rect[x] = np.sum(data_lh_float < llh) / data_lh.shape[0]
currdf_filt[0, (2 * x):(2 * x + 2)] = np.hstack([datax[0, x], datay[0, x]])
for i in range(1, data_lh.shape[0]):
if data_lh_float[i] < llh:
currdf_filt[i, (2 * x):(2 * x + 2)] = currdf_filt[i - 1, (2 * x):(2 * x + 2)]
else:
currdf_filt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[i, x], datay[i, x]])
currdf_filt = np.array(currdf_filt)
currdf_filt = currdf_filt.astype(np.float)
return currdf_filt, perc_rect
def adp_filt_h5(currdf: object, pose):
lIndex = []
xIndex = []
yIndex = []
headers = np.array(currdf.columns.get_level_values(2)[:])
for header in pose:
if headers[header] == "likelihood":
lIndex.append(header)
elif headers[header] == "x":
xIndex.append(header)
elif headers[header] == "y":
yIndex.append(header)
curr_df1 = np.array(currdf)
datax = curr_df1[:, np.array(xIndex)]
datay = curr_df1[:, np.array(yIndex)]
data_lh = curr_df1[:, np.array(lIndex)]
currdf_filt = np.zeros((datax.shape[0], (datax.shape[1]) * 2))
perc_rect = []
for i in range(data_lh.shape[1]):
perc_rect.append(0)
for x in tqdm(range(data_lh.shape[1])):
a, b = np.histogram(data_lh[1:, x].astype(np.float))
rise_a = np.where(np.diff(a) >= 0)
if rise_a[0][0] > 1:
llh = b[rise_a[0][0]]
else:
llh = b[rise_a[0][1]]
data_lh_float = data_lh[:, x].astype(np.float)
perc_rect[x] = np.sum(data_lh_float < llh) / data_lh.shape[0]
currdf_filt[0, (2 * x):(2 * x + 2)] = np.hstack([datax[0, x], datay[0, x]])
for i in range(1, data_lh.shape[0]):
if data_lh_float[i] < llh:
currdf_filt[i, (2 * x):(2 * x + 2)] = currdf_filt[i - 1, (2 * x):(2 * x + 2)]
else:
currdf_filt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[i, x], datay[i, x]])
currdf_filt = np.array(currdf_filt)
currdf_filt = currdf_filt.astype(np.float)
return currdf_filt, perc_rect
def adp_filt_sleap_h5(currdf: object, pose):
datax = currdf['tracks'][0][0][pose]
datay = currdf['tracks'][0][1][pose]
currdf_filt = np.zeros((datax.shape[1], (datax.shape[0]) * 2))
perc_rect = []
for i in range(len(pose)):
perc_rect.append(np.argwhere(np.isnan(datax[i]) == True).shape[0] / datax.shape[1])
for x in tqdm(range(datax.shape[0])):
first_not_nan = np.where(np.isnan(datax[x, :]) == False)[0][0]
currdf_filt[0, (2 * x):(2 * x + 2)] = np.hstack([datax[x, first_not_nan], datay[x, first_not_nan]])
for i in range(1, datax.shape[1]):
if np.isnan(datax[x][i]):
currdf_filt[i, (2 * x):(2 * x + 2)] = currdf_filt[i - 1, (2 * x):(2 * x + 2)]
else:
currdf_filt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[x, i], datay[x, i]])
currdf_filt = np.array(currdf_filt)
currdf_filt = currdf_filt.astype(np.float)
return currdf_filt, perc_rect
def no_filt_sleap_h5(currdf: object, pose):
datax = currdf['tracks'][0][0][pose]
datay = currdf['tracks'][0][1][pose]
pose_ = []
currdf_nofilt = np.zeros((datax.shape[1], (datax.shape[0]) * 2))
for x in tqdm(range(datax.shape[0])):
pose_.append(currdf['node_names'][pose[x]])
print(pose_)
for i in range(0, datax.shape[1]):
currdf_nofilt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[x, i], datay[x, i]])
currdf_nofilt = np.array(currdf_nofilt)
header = pd.MultiIndex.from_product([['SLEAP'],
[i for i in pose_],
['x', 'y']],
names=['algorithm', 'pose', 'coord'])
df = pd.DataFrame(currdf_nofilt, columns=header)
return df
| 6,593
| 33.52356
| 107
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/bsoid_classification.py
|
import itertools
import math
import numpy as np
from bsoid_app.bsoid_utilities.likelihoodprocessing import boxcar_center
def bsoid_extract(data, fps):
"""
Extracts features based on (x,y) positions
:param data: list, csv data
:param fps: scalar, input for camera frame-rate
:return f_10fps: 2D array, extracted features
"""
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
feats = []
for m in range(len(data)):
dataRange = len(data[m])
dxy_r = []
dis_r = []
for r in range(dataRange):
if r < dataRange - 1:
dis = []
for c in range(0, data[m].shape[1], 2):
dis.append(np.linalg.norm(data[m][r + 1, c:c + 2] - data[m][r, c:c + 2]))
dis_r.append(dis)
dxy = []
for i, j in itertools.combinations(range(0, data[m].shape[1], 2), 2):
dxy.append(data[m][r, i:i + 2] - data[m][r, j:j + 2])
dxy_r.append(dxy)
dis_r = np.array(dis_r)
dxy_r = np.array(dxy_r)
dis_smth = []
dxy_eu = np.zeros([dataRange, dxy_r.shape[1]])
ang = np.zeros([dataRange - 1, dxy_r.shape[1]])
dxy_smth = []
ang_smth = []
for l in range(dis_r.shape[1]):
dis_smth.append(boxcar_center(dis_r[:, l], win_len))
for k in range(dxy_r.shape[1]):
for kk in range(dataRange):
dxy_eu[kk, k] = np.linalg.norm(dxy_r[kk, k, :])
if kk < dataRange - 1:
b_3d = np.hstack([dxy_r[kk + 1, k, :], 0])
a_3d = np.hstack([dxy_r[kk, k, :], 0])
c = np.cross(b_3d, a_3d)
ang[kk, k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c),
np.dot(dxy_r[kk, k, :], dxy_r[kk + 1, k, :])))
dxy_smth.append(boxcar_center(dxy_eu[:, k], win_len))
ang_smth.append(boxcar_center(ang[:, k], win_len))
dis_smth = np.array(dis_smth)
dxy_smth = np.array(dxy_smth)
ang_smth = np.array(ang_smth)
feats.append(np.vstack((dxy_smth[:, 1:], ang_smth, dis_smth)))
f_10fps = []
for n in range(0, len(feats)):
feats1 = np.zeros(len(data[n]))
for s in range(math.floor(fps / 10)):
for k in range(round(fps / 10) + s, len(feats[n][0]), round(fps / 10)):
if k > round(fps / 10) + s:
feats1 = np.concatenate((feats1.reshape(feats1.shape[0], feats1.shape[1]),
np.hstack((np.mean((feats[n][0:dxy_smth.shape[0],
range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]),
axis=1))).reshape(len(feats[0]), 1)), axis=1)
else:
feats1 = np.hstack((np.mean((feats[n][0:dxy_smth.shape[0], range(k - round(fps / 10), k)]),
axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]), axis=1))).reshape(len(feats[0]), 1)
f_10fps.append(feats1)
return f_10fps
def bsoid_predict(feats, clf):
"""
:param feats: list, multiple feats (original feature space)
:param clf: Obj, MLP classifier
:return nonfs_labels: list, label/100ms
"""
labels_fslow = []
for i in range(0, len(feats)):
labels = clf.predict(feats[i].T)
labels_fslow.append(labels)
return labels_fslow
| 3,998
| 42.467391
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/statistics.py
|
"""
Summary statistics
"""
from bsoid_app.analysis_subroutines.analysis_utilities.statistics import *
| 106
| 10.888889
| 74
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/videoprocessing.py
|
import os
import random
import sys
import cv2
import imageio
import numpy as np
from skimage.transform import rescale
from tqdm import tqdm
from bsoid_app.bsoid_utilities.likelihoodprocessing import sort_nicely
from bsoid_app.bsoid_utilities.statistics import repeating_numbers
def create_labeled_vid(labels, crit, counts, output_fps, frame_dir, output_path):
"""
:param labels: 1D array, labels from training or testing
:param crit: scalar, minimum duration for random selection of behaviors, default 300ms
:param counts: scalar, number of randomly generated examples, default 5
:param frame_dir: string, directory to where you extracted vid images in LOCAL_CONFIG
:param output_path: string, directory to where you want to store short video examples in LOCAL_CONFIG
"""
images = [img for img in os.listdir(frame_dir) if img.endswith(".png")]
sort_nicely(images)
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
fourcc = cv2.VideoWriter_fourcc(*'avc1')
frame = cv2.imread(os.path.join(frame_dir, images[0]))
height, width, layers = frame.shape
rnges = []
n, idx, lengths = repeating_numbers(labels)
idx2 = []
for i, j in enumerate(lengths):
if j >= crit:
rnges.append(range(idx[i], idx[i] + j))
idx2.append(i)
for b, i in enumerate(tqdm(np.unique(labels))):
a = []
for j in range(0, len(rnges)):
if n[idx2[j]] == i:
a.append(rnges[j])
try:
rand_rnges = random.sample(a, min(len(a), counts))
for k in range(0, len(rand_rnges)):
video_name = 'group_{}_example_{}.mp4'.format(i, k)
grp_images = []
for l in rand_rnges[k]:
grp_images.append(images[l])
video = cv2.VideoWriter(os.path.join(output_path, video_name), fourcc, output_fps, (width, height))
for image in grp_images:
video.write(cv2.imread(os.path.join(frame_dir, image)))
cv2.destroyAllWindows()
video.release()
except:
pass
return
class TargetFormat(object):
GIF = ".gif"
MP4 = ".mp4"
AVI = ".avi"
def convertFile(inputpath, targetFormat):
outputpath = os.path.splitext(inputpath)[0] + targetFormat
reader = imageio.get_reader(inputpath)
fps = reader.get_meta_data()['fps']
writer = imageio.get_writer(outputpath, fps=fps)
for i, im in enumerate(reader):
im_rescaled = rescale(im, (0.5, 0.5, 1), anti_aliasing=False)
sys.stdout.write("\rframe {0}".format(i))
sys.stdout.flush()
writer.append_data(im_rescaled)
writer.close()
def convert2gif(file, targetFormat):
convertFile(file, targetFormat)
| 2,845
| 33.707317
| 115
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/visuals.py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import streamlit as st
from matplotlib.axes._axes import _log as matplotlib_axes_logger
from sklearn.metrics import plot_confusion_matrix
matplotlib_axes_logger.setLevel('ERROR')
def plot_bar(sub_threshold):
st.write('If the below __% noise__ (y-axis) is unreasonable, consider refining pose-estimation software.')
sub_threshold_df = pd.DataFrame(sub_threshold)
col1, col2 = st.beta_columns([3, 2])
col1.line_chart(sub_threshold_df)
col2.write(sub_threshold_df)
def show_data_table(raw_input_data, processed_input_data):
try:
ID = int(
st.number_input('Enter data file _index__:', min_value=1, max_value=len(raw_input_data), value=1))
st.write(raw_input_data[ID - 1])
st.write(processed_input_data[ID - 1])
except IndexError:
pass
def plot_classes(data, assignments):
""" Plot umap_embeddings for HDBSCAN assignments
:param data: 2D array, umap_embeddings
:param assignments: 1D array, HDBSCAN assignments
"""
uk = list(np.unique(assignments))
R = np.linspace(0, 1, len(uk))
cmap = plt.cm.get_cmap("Spectral")(R)
umap_x, umap_y, umap_z = data[:, 0], data[:, 1], data[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for g in np.unique(assignments):
idx = np.where(np.array(assignments) == g)
ax.scatter(umap_x[idx], umap_y[idx], umap_z[idx], c=cmap[g],
label=g, s=0.4, marker='o', alpha=0.8)
ax.set_xlabel('Dim. 1')
ax.set_ylabel('Dim. 2')
ax.set_zlabel('Dim. 3')
plt.legend(ncol=3, markerscale=6)
return fig, plt
def plot_accuracy(scores):
"""
:param scores: 1D array, cross-validated accuracies for MLP classifier.
"""
fig = plt.figure(facecolor='w', edgecolor='k')
fig.suptitle("Performance on {} % data".format(0.2 * 100))
ax = sns.violinplot(data=scores, palette="muted", scale="count", inner="quartile", width=0.4, linewidth=2,
scale_hue=False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(1)
ax = plt.gca()
ax.set_xticks([])
ax.set_xlabel('RF classifier')
ax.set_ylabel('Accuracy')
return fig, ax
def plot_confusion(validate_clf, x_test, y_test):
titles_options = [("Non-normalized confusion matrix", None), ("Normalized confusion matrix", 'true')]
st.write(
'Two confusion matrices - top: counts, bottom: probability with **true positives in diagonal**')
confusion = []
for title, normalize in titles_options:
cm = plot_confusion_matrix(validate_clf, x_test, y_test, cmap=sns.cm.rocket_r, normalize=normalize)
cm.ax_.set_title(title)
confusion.append(cm.figure_)
return confusion
def plot_tmat(tm: object):
"""
:param tm: object, transition matrix data frame
:param fps: scalar, camera frame-rate
"""
fig = plt.figure()
fig.suptitle("Transition matrix of {} behaviors".format(tm.shape[0]))
sns.heatmap(tm, annot=True)
plt.xlabel("Next frame behavior")
plt.ylabel("Current frame behavior")
return fig
| 3,358
| 34.357895
| 110
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_utilities/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_py/main.py
|
"""
A master that runs BOTH
1. Training a unsupervised machine learning model based on patterns in spatio-temporal (x,y) changes.
2. Predicting new behaviors using (x,y) based on learned classifier.
"""
import os
import time
import joblib
import numpy as np
import pandas as pd
from bsoid_py.config import *
def build(train_folders):
"""
:param train_folders: list, folders to build behavioral model on
:returns f_10fps, trained_tsne, gmm_assignments, classifier, scores: see bsoid_py.train
Automatically saves single CSV file containing training outputs (in 10Hz, 100ms per row):
1. original features (number of training data points by 7 dimensions, columns 1-7)
2. embedded features (number of training data points by 3 dimensions, columns 8-10)
3. em-gmm assignments (number of training data points by 1, columns 11)
Automatically saves classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
"""
import bsoid_py.train
f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores = bsoid_py.train.main(train_folders)
alldata = np.concatenate([f_10fps.T, trained_tsne, gmm_assignments.reshape(len(gmm_assignments), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('Features', 'Relative snout to forepaws placement'),
('', 'Relative snout to hind paws placement'),
('', 'Inter-forepaw distance'),
('', 'Body length'), ('', 'Body angle'), ('', 'Snout displacement'),
('', 'Tail-base displacement'),
('Embedded t-SNE', 'Dimension 1'), ('', 'Dimension 2'),
('', 'Dimension 3'), ('EM-GMM', 'Assignment No.')],
names=['Type', 'Frame@10Hz'])
training_data = pd.DataFrame(alldata, columns=micolumns)
timestr = time.strftime("_%Y%m%d_%H%M")
training_data.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_trainlabels_10Hz', timestr, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'wb') as f:
joblib.dump([classifier, scaler], f)
logging.info('Saved.')
return f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores
def run(predict_folders):
"""
:param predict_folders: list, folders to run prediction using behavioral model
:returns labels_fslow, labels_fshigh: see bsoid_py.classify
Automatically loads classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
Automatically saves CSV files containing new outputs (1 in 10Hz, 1 in FPS, both with same format):
1. original features (number of training data points by 7 dimensions, columns 1-7)
2. SVM predicted labels (number of training data points by 1, columns 8)
"""
import bsoid_py.classify
from bsoid_py.utils.likelihoodprocessing import get_filenames
import bsoid_py.utils.statistics
from bsoid_py.utils.visuals import plot_tmat
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'rb') as fr:
behv_model, scaler = joblib.load(fr)
data_new, feats_new, labels_fslow, labels_fshigh = bsoid_py.classify.main(predict_folders, scaler, FPS, behv_model)
filenames = []
all_df = []
for i, fd in enumerate(predict_folders): # Loop through folders
f = get_filenames(fd)
for j, filename in enumerate(f):
logging.info('Importing CSV file {} from folder {}'.format(j + 1, i + 1))
curr_df = pd.read_csv(filename, low_memory=False)
filenames.append(filename)
all_df.append(curr_df)
for i in range(0, len(feats_new)):
alldata = np.concatenate([feats_new[i].T, labels_fslow[i].reshape(len(labels_fslow[i]), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('Features', 'Relative snout to forepaws placement'),
('', 'Relative snout to hind paws placement'),
('', 'Inter-forepaw distance'),
('', 'Body length'), ('', 'Body angle'), ('', 'Snout displacement'),
('', 'Tail-base displacement'),
('SVM classifier', 'B-SOiD labels')],
names=['Type', 'Frame@10Hz'])
predictions = pd.DataFrame(alldata, columns=micolumns)
timestr = time.strftime("_%Y%m%d_%H%M")
csvname = os.path.basename(filenames[i]).rpartition('.')[0]
predictions.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_labels_10Hz', timestr, csvname, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
runlen_df1, dur_stats1, df_tm1 = bsoid_py.utils.statistics.main(labels_fslow[i])
if PLOT_TRAINING:
plot_tmat(df_tm1, FPS)
runlen_df1.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_runlen_10Hz', timestr, csvname, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
dur_stats1.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_stats_10Hz', timestr, csvname, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
df_tm1.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_transitions_10Hz', timestr, csvname, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
labels_fshigh_pad = np.pad(labels_fshigh[i], (6, 0), 'edge')
df2 = pd.DataFrame(labels_fshigh_pad, columns={'B-SOiD labels'})
df2.loc[len(df2)] = ''
df2.loc[len(df2)] = ''
df2 = df2.shift()
df2.loc[0] = ''
df2 = df2.shift()
df2.loc[0] = ''
frames = [df2, all_df[0]]
xyfs_df = pd.concat(frames, axis=1)
csvname = os.path.basename(filenames[i]).rpartition('.')[0]
xyfs_df.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_labels_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
runlen_df2, dur_stats2, df_tm2 = bsoid_py.utils.statistics.main(labels_fshigh[i])
runlen_df2.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_runlen_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
dur_stats2.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_stats_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
df_tm2.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_transitions_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_predictions.sav'))), 'wb') as f:
joblib.dump([labels_fslow, labels_fshigh], f)
logging.info('All saved.')
return data_new, feats_new, labels_fslow, labels_fshigh
def main(train_folders, predict_folders):
"""
:param train_folders: list, folders to build behavioral model on
:param predict_folders: list, folders to run prediction using behavioral model
:returns f_10fps, trained_tsne, gmm_assignments, classifier, scores: see bsoid_py.train
:returns feats_new, labels_fslow, labels_fshigh: see bsoid_py.classify
Automatically saves and loads classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
Automatically saves CSV files containing training and new outputs
"""
f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores = build(train_folders)
data_new, feats_new, labels_fslow, labels_fshigh = run(predict_folders)
return f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores, \
data_new, feats_new, labels_fslow, labels_fshigh
if __name__ == "__main__":
f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores, \
data_new, feats_new, labels_fslow, labels_fshigh = main(TRAIN_FOLDERS, PREDICT_FOLDERS)
| 8,580
| 58.17931
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/classify.py
|
"""
Classify behaviors based on (x,y) using trained B-SOiD behavioral model.
B-SOiD behavioral model has been developed using bsoid_py.main.build()
"""
import math
import numpy as np
from bsoid_py.utils import videoprocessing
from bsoid_py.utils.likelihoodprocessing import boxcar_center
from bsoid_py.utils.visuals import *
def bsoid_extract(data, bodyparts=BODYPARTS, fps=FPS):
"""
Extracts features based on (x,y) positions
:param data: list, csv data
:param bodyparts: dict, body parts with their orders
:param fps: scalar, input for camera frame-rate
:return f_10fps: 2D array, extracted features
"""
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
feats = []
for m in range(len(data)):
logging.info('Extracting features from CSV file {}...'.format(m + 1))
dataRange = len(data[m])
fpd = data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1'):2 * bodyparts.get('Forepaw/Shoulder1') + 2] - \
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder2'):2 * bodyparts.get('Forepaw/Shoulder2') + 2]
cfp = np.vstack(((data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1')] +
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder2')]) / 2,
(data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1') + 1] +
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1') + 1]) / 2)).T
cfp_pt = np.vstack(([cfp[:, 0] - data[m][:, 2 * bodyparts.get('Tailbase')],
cfp[:, 1] - data[m][:, 2 * bodyparts.get('Tailbase') + 1]])).T
chp = np.vstack((((data[m][:, 2 * bodyparts.get('Hindpaw/Hip1')] +
data[m][:, 2 * bodyparts.get('Hindpaw/Hip2')]) / 2),
((data[m][:, 2 * bodyparts.get('Hindpaw/Hip1') + 1] +
data[m][:, 2 * bodyparts.get('Hindpaw/Hip2') + 1]) / 2))).T
chp_pt = np.vstack(([chp[:, 0] - data[m][:, 2 * bodyparts.get('Tailbase')],
chp[:, 1] - data[m][:, 2 * bodyparts.get('Tailbase') + 1]])).T
sn_pt = np.vstack(([data[m][:, 2 * bodyparts.get('Snout/Head')] - data[m][:, 2 * bodyparts.get('Tailbase')],
data[m][:, 2 * bodyparts.get('Snout/Head') + 1] - data[m][:,
2 * bodyparts.get('Tailbase') + 1]])).T
fpd_norm = np.zeros(dataRange)
cfp_pt_norm = np.zeros(dataRange)
chp_pt_norm = np.zeros(dataRange)
sn_pt_norm = np.zeros(dataRange)
for i in range(1, dataRange):
fpd_norm[i] = np.array(np.linalg.norm(fpd[i, :]))
cfp_pt_norm[i] = np.linalg.norm(cfp_pt[i, :])
chp_pt_norm[i] = np.linalg.norm(chp_pt[i, :])
sn_pt_norm[i] = np.linalg.norm(sn_pt[i, :])
fpd_norm_smth = boxcar_center(fpd_norm, win_len)
sn_cfp_norm_smth = boxcar_center(sn_pt_norm - cfp_pt_norm, win_len)
sn_chp_norm_smth = boxcar_center(sn_pt_norm - chp_pt_norm, win_len)
sn_pt_norm_smth = boxcar_center(sn_pt_norm, win_len)
sn_pt_ang = np.zeros(dataRange - 1)
sn_disp = np.zeros(dataRange - 1)
pt_disp = np.zeros(dataRange - 1)
for k in range(0, dataRange - 1):
b_3d = np.hstack([sn_pt[k + 1, :], 0])
a_3d = np.hstack([sn_pt[k, :], 0])
c = np.cross(b_3d, a_3d)
sn_pt_ang[k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c), np.dot(sn_pt[k, :], sn_pt[k + 1, :])))
sn_disp[k] = np.linalg.norm(
data[m][k + 1, 2 * bodyparts.get('Snout/Head'):2 * bodyparts.get('Snout/Head') + 1] -
data[m][k, 2 * bodyparts.get('Snout/Head'):2 * bodyparts.get('Snout/Head') + 1])
pt_disp[k] = np.linalg.norm(
data[m][k + 1, 2 * bodyparts.get('Tailbase'):2 * bodyparts.get('Tailbase') + 1] -
data[m][k, 2 * bodyparts.get('Tailbase'):2 * bodyparts.get('Tailbase') + 1])
sn_pt_ang_smth = boxcar_center(sn_pt_ang, win_len)
sn_disp_smth = boxcar_center(sn_disp, win_len)
pt_disp_smth = boxcar_center(pt_disp, win_len)
feats.append(np.vstack((sn_cfp_norm_smth[1:], sn_chp_norm_smth[1:], fpd_norm_smth[1:],
sn_pt_norm_smth[1:], sn_pt_ang_smth[:], sn_disp_smth[:], pt_disp_smth[:])))
logging.info('Done extracting features from a total of {} training CSV files.'.format(len(data)))
f_10fps = []
for n in range(0, len(feats)):
feats1 = np.zeros(len(data[n]))
for k in range(round(fps / 10) - 1, len(feats[n][0]), round(fps / 10)):
if k > round(fps / 10) - 1:
feats1 = np.concatenate((feats1.reshape(feats1.shape[0], feats1.shape[1]),
np.hstack((np.mean((feats[n][0:4, range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][4:7, range(k - round(fps / 10), k)]),
axis=1))).reshape(len(feats[0]), 1)), axis=1)
else:
feats1 = np.hstack((np.mean((feats[n][0:4, range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][4:7, range(k - round(fps / 10), k)]), axis=1))).reshape(
len(feats[0]), 1)
logging.info('Done integrating features into 100ms bins from CSV file {}.'.format(n + 1))
f_10fps.append(feats1)
return f_10fps
def bsoid_predict(feats, scaler, model):
"""
:param feats: list, multiple feats (original feature space)
:param model: Obj, SVM classifier
:return labels_fslow: list, label/100ms
"""
labels_fslow = []
for i in range(0, len(feats)):
logging.info('Predicting file {} with {} instances '
'using learned classifier: {}{}...'.format(i + 1, feats[i].shape[1], 'bsoid_', MODEL_NAME))
feats_sc = scaler.transform(feats[i].T).T
labels = model.predict(feats_sc.T)
logging.info('Done predicting file {} with {} instances in {} D space.'.format(i + 1, feats[i].shape[1],
feats[i].shape[0]))
labels_fslow.append(labels)
logging.info('Done predicting a total of {} files.'.format(len(feats)))
return labels_fslow
def bsoid_frameshift(data_new, scaler, fps, model):
"""
Frame-shift paradigm to output behavior/frame
:param data_new: list, new data from predict_folders
:param fps: scalar, argument specifying camera frame-rate in LOCAL_CONFIG
:param model: Obj, SVM classifier
:return labels_fshigh, 1D array, label/frame
"""
labels_fs = []
labels_fs2 = []
labels_fshigh = []
for i in range(0, len(data_new)):
data_offset = []
for j in range(math.floor(fps / 10)):
data_offset.append(data_new[i][j:, :])
feats_new = bsoid_extract(data_offset)
labels = bsoid_predict(feats_new, scaler, model)
for m in range(0, len(labels)):
labels[m] = labels[m][::-1]
labels_pad = -1 * np.ones([len(labels), len(max(labels, key=lambda x: len(x)))])
for n, l in enumerate(labels):
labels_pad[n][0:len(l)] = l
labels_pad[n] = labels_pad[n][::-1]
if n > 0:
labels_pad[n][0:n] = labels_pad[n - 1][0:n]
labels_fs.append(labels_pad.astype(int))
for k in range(0, len(labels_fs)):
labels_fs2 = []
for l in range(math.floor(fps / 10)):
labels_fs2.append(labels_fs[k][l])
labels_fshigh.append(np.array(labels_fs2).flatten('F'))
logging.info('Done frameshift-predicting a total of {} files.'.format(len(data_new)))
return labels_fshigh
def main(predict_folders, scaler, fps, behv_model):
"""
:param predict_folders: list, data folders
:param fps: scalar, camera frame-rate
:behv_model: object, SVM classifier
:return data_new: list, csv data
:return feats_new: 2D array, extracted features
:return labels_fslow, 1D array, label/100ms
:return labels_fshigh, 1D array, label/frame
"""
import bsoid_py.utils.likelihoodprocessing
filenames, data_new, perc_rect = bsoid_py.utils.likelihoodprocessing.main(predict_folders)
feats_new = bsoid_extract(data_new)
labels_fslow = bsoid_predict(feats_new, scaler, behv_model)
labels_fshigh = bsoid_frameshift(data_new, scaler, fps, behv_model)
if PLOT_TRAINING:
plot_feats(feats_new, labels_fslow)
if GEN_VIDEOS:
videoprocessing.main(VID_NAME, labels_fslow[ID], FPS, FRAME_DIR)
return data_new, feats_new, labels_fslow, labels_fshigh
| 8,818
| 50.573099
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_py/train.py
|
"""
Based on the natural statistics of the mouse configuration using (x,y) positions,
we distill information down to 3 dimensions and run pattern recognition.
Then, we utilize these output and original feature space to train a B-SOiD behavioral model.
"""
import math
import numpy as np
from sklearn import mixture, svm
from bhtsne import tsne
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from bsoid_py.utils.likelihoodprocessing import boxcar_center
from bsoid_py.utils.visuals import *
def bsoid_tsne(data: list, bodyparts=BODYPARTS, fps=FPS, comp=COMP):
"""
Trains t-SNE (unsupervised) given a set of features based on (x,y) positions
:param data: list of 3D array
:param bodyparts: dict, body parts with their orders in LOCAL_CONFIG
:param fps: scalar, argument specifying camera frame-rate in LOCAL_CONFIG
:param comp: boolean (0 or 1), argument to compile data or not in LOCAL_CONFIG
:return f_10fps: 2D array, features
:retrun f_10fps_sc: 2D array, standardized features
:return trained_tsne: 2D array, trained t-SNE space
"""
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
feats = []
for m in range(len(data)):
logging.info('Extracting features from CSV file {}...'.format(m + 1))
dataRange = len(data[m])
fpd = data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1'):2 * bodyparts.get('Forepaw/Shoulder1') + 2] - \
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder2'):2 * bodyparts.get('Forepaw/Shoulder2') + 2]
cfp = np.vstack(((data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1')] +
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder2')]) / 2,
(data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1') + 1] +
data[m][:, 2 * bodyparts.get('Forepaw/Shoulder1') + 1]) / 2)).T
cfp_pt = np.vstack(([cfp[:, 0] - data[m][:, 2 * bodyparts.get('Tailbase')],
cfp[:, 1] - data[m][:, 2 * bodyparts.get('Tailbase') + 1]])).T
chp = np.vstack((((data[m][:, 2 * bodyparts.get('Hindpaw/Hip1')] +
data[m][:, 2 * bodyparts.get('Hindpaw/Hip2')]) / 2),
((data[m][:, 2 * bodyparts.get('Hindpaw/Hip1') + 1] +
data[m][:, 2 * bodyparts.get('Hindpaw/Hip2') + 1]) / 2))).T
chp_pt = np.vstack(([chp[:, 0] - data[m][:, 2 * bodyparts.get('Tailbase')],
chp[:, 1] - data[m][:, 2 * bodyparts.get('Tailbase') + 1]])).T
sn_pt = np.vstack(([data[m][:, 2 * bodyparts.get('Snout/Head')] - data[m][:, 2 * bodyparts.get('Tailbase')],
data[m][:, 2 * bodyparts.get('Snout/Head') + 1] - data[m][:,
2 * bodyparts.get('Tailbase') + 1]])).T
fpd_norm = np.zeros(dataRange)
cfp_pt_norm = np.zeros(dataRange)
chp_pt_norm = np.zeros(dataRange)
sn_pt_norm = np.zeros(dataRange)
for i in range(1, dataRange):
fpd_norm[i] = np.array(np.linalg.norm(fpd[i, :]))
cfp_pt_norm[i] = np.linalg.norm(cfp_pt[i, :])
chp_pt_norm[i] = np.linalg.norm(chp_pt[i, :])
sn_pt_norm[i] = np.linalg.norm(sn_pt[i, :])
fpd_norm_smth = boxcar_center(fpd_norm, win_len)
sn_cfp_norm_smth = boxcar_center(sn_pt_norm - cfp_pt_norm, win_len)
sn_chp_norm_smth = boxcar_center(sn_pt_norm - chp_pt_norm, win_len)
sn_pt_norm_smth = boxcar_center(sn_pt_norm, win_len)
sn_pt_ang = np.zeros(dataRange - 1)
sn_disp = np.zeros(dataRange - 1)
pt_disp = np.zeros(dataRange - 1)
for k in range(0, dataRange - 1):
b_3d = np.hstack([sn_pt[k + 1, :], 0])
a_3d = np.hstack([sn_pt[k, :], 0])
c = np.cross(b_3d, a_3d)
sn_pt_ang[k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c), np.dot(sn_pt[k, :], sn_pt[k + 1, :])))
sn_disp[k] = np.linalg.norm(
data[m][k + 1, 2 * bodyparts.get('Snout/Head'):2 * bodyparts.get('Snout/Head') + 1] -
data[m][k, 2 * bodyparts.get('Snout/Head'):2 * bodyparts.get('Snout/Head') + 1])
pt_disp[k] = np.linalg.norm(
data[m][k + 1, 2 * bodyparts.get('Tailbase'):2 * bodyparts.get('Tailbase') + 1] -
data[m][k, 2 * bodyparts.get('Tailbase'):2 * bodyparts.get('Tailbase') + 1])
sn_pt_ang_smth = boxcar_center(sn_pt_ang, win_len)
sn_disp_smth = boxcar_center(sn_disp, win_len)
pt_disp_smth = boxcar_center(pt_disp, win_len)
feats.append(np.vstack((sn_cfp_norm_smth[1:], sn_chp_norm_smth[1:], fpd_norm_smth[1:],
sn_pt_norm_smth[1:], sn_pt_ang_smth[:], sn_disp_smth[:], pt_disp_smth[:])))
logging.info('Done extracting features from a total of {} training CSV files.'.format(len(data)))
if comp == 0:
f_10fps = []
f_10fps_sc = []
trained_tsne = []
for n in range(0, len(feats)):
feats1 = np.zeros(len(data[n]))
for k in range(round(fps / 10) - 1, len(feats[n][0]), round(fps / 10)):
if k > round(fps / 10) - 1:
feats1 = np.concatenate((feats1.reshape(feats1.shape[0], feats1.shape[1]),
np.hstack((np.mean((feats[n][0:4, range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][4:7, range(k - round(fps / 10), k)]),
axis=1))).reshape(len(feats[0]), 1)), axis=1)
else:
feats1 = np.hstack((np.mean((feats[n][0:4, range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][4:7, range(k - round(fps / 10), k)]), axis=1))).reshape(
len(feats[0]), 1)
logging.info('Done integrating features into 100ms bins from CSV file {}.'.format(n + 1))
if comp == 1:
if n > 0:
f_10fps = np.concatenate((f_10fps, feats1), axis=1)
else:
f_10fps = feats1
else:
f_10fps.append(feats1)
scaler = StandardScaler()
scaler.fit(feats1.T)
feats1_stnd = scaler.transform(feats1.T).T
f_10fps_sc.append(feats1_stnd)
logging.info('Training t-SNE to embed {} instances from {} D '
'into 3 D from CSV file {}...'.format(f_10fps_sc[n].shape[1], f_10fps_sc[n].shape[0],
n + 1))
trained_tsne_i = tsne(f_10fps_sc[n].T, dimensions=3, perplexity=np.sqrt(f_10fps_sc[n].shape[1]),
theta=0.5, rand_seed=23)
trained_tsne.append(trained_tsne_i)
logging.info('Done embedding into 3 D.')
if comp == 1:
scaler = StandardScaler()
scaler.fit(f_10fps.T)
f_10fps_sc = scaler.transform(f_10fps.T).T
logging.info('Training t-SNE to embed {} instances from {} D '
'into 3 D from a total of {} CSV files...'.format(f_10fps_sc.shape[1], f_10fps_sc.shape[0],
len(data)))
trained_tsne = tsne(f_10fps_sc.T, dimensions=3, perplexity=np.sqrt(f_10fps_sc.shape[1]),
theta=0.5, rand_seed=23)
logging.info('Done embedding into 3 D.')
return f_10fps, f_10fps_sc, trained_tsne, scaler
def bsoid_gmm(trained_tsne, comp=COMP, emgmm_params=EMGMM_PARAMS):
"""
Trains EM-GMM (unsupervised) given learned t-SNE space
:param trained_tsne: 2D array, trained t-SNE space
:param comp: boolean (0 or 1), argument to compile data or not in LOCAL_CONFIG
:param emgmm_params: dict, EMGMM_PARAMS in GLOBAL_CONFIG
:return assignments: Converged EM-GMM group assignments
"""
if comp == 1:
logging.info('Running EM-GMM on {} instances in {} D space...'.format(*trained_tsne.shape))
gmm = mixture.GaussianMixture(**emgmm_params).fit(trained_tsne)
logging.info('Predicting labels for {} instances in {} D space...'.format(*trained_tsne.shape))
assigns = gmm.predict(trained_tsne)
else:
assigns = []
for i in tqdm(range(len(trained_tsne))):
logging.info('Running EM-GMM on {} instances in {} D space...'.format(*trained_tsne[i].shape))
gmm = mixture.GaussianMixture(**emgmm_params).fit(trained_tsne[i])
logging.info('Predicting labels for {} instances in {} D space...'.format(*trained_tsne[i].shape))
assign = gmm.predict(trained_tsne[i])
assigns.append(assign)
logging.info('Done predicting labels for {} instances in {} D space...'.format(*trained_tsne.shape))
uk = list(np.unique(assigns))
assignments_li = []
for i in assigns:
indexVal = uk.index(i)
assignments_li.append(indexVal)
assignments = np.array(assignments_li)
return assignments
def bsoid_svm(feats, labels, comp=COMP, hldout=HLDOUT, cv_it=CV_IT, svm_params=SVM_PARAMS):
"""
Trains SVM classifier
:param feats: 2D array, original feature space, standardized
:param labels: 1D array, GMM output assignments
:param hldout: scalar, test partition ratio for validating SVM performance in GLOBAL_CONFIG
:param cv_it: scalar, iterations for cross-validation in GLOBAL_CONFIG
:param svm_params: dict, SVM parameters in GLOBAL_CONFIG
:return classifier: obj, SVM classifier
:return scores: 1D array, cross-validated accuracy
"""
if comp == 1:
feats_train, feats_test, labels_train, labels_test = train_test_split(feats.T, labels.T, test_size=hldout,
random_state=23)
logging.info(
'Training SVM on randomly partitioned {}% of training data...'.format(
(1 - hldout) * 100))
classifier = svm.SVC(**svm_params)
classifier.fit(feats_train, labels_train)
logging.info('Done training SVM mapping {} features to {} assignments.'.format(
feats_train.shape, labels_train.shape))
logging.info('Predicting randomly sampled (non-overlapped) assignments '
'using the remaining {}%...'.format(HLDOUT * 100))
scores = cross_val_score(classifier, feats_test, labels_test, cv=cv_it, n_jobs=-1)
timestr = time.strftime("_%Y%m%d_%H%M")
if PLOT_TRAINING:
np.set_printoptions(precision=2)
titles_options = [("Non-normalized confusion matrix", None),
("Normalized confusion matrix", 'true')]
titlenames = [("counts"), ("norm")]
j = 0
for title, normalize in titles_options:
disp = plot_confusion_matrix(classifier, feats_test, labels_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
my_file = 'confusion_matrix_{}'.format(titlenames[j])
disp.figure_.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
j += 1
plt.show()
else:
classifier = []
scores = []
for i in range(len(feats)):
feats_train, feats_test, labels_train, labels_test = train_test_split(feats[i].T, labels[i].T,
test_size=hldout,
random_state=23)
logging.info(
'Training SVM on randomly partitioned {}% of training data...'.format(
(1 - hldout) * 100))
classifier = svm.SVC(**svm_params)
clf.fit(feats_train, labels_train)
classifier.append(clf)
logging.info(
'Done training SVM mapping {} features to {} assignments.'.format(
feats_train.shape, labels_train.shape))
logging.info('Predicting randomly sampled (non-overlapped) assignments '
'using the remaining {}%...'.format(HLDOUT * 100))
sc = cross_val_score(classifier, feats_test, labels_test, cv=cv_it, n_jobs=-1)
timestr = time.strftime("_%Y%m%d_%H%M")
if PLOT_TRAINING:
np.set_printoptions(precision=2)
titles_options = [("Non-normalized confusion matrix", None),
("Normalized confusion matrix", 'true')]
j = 0
titlenames = [("counts"), ("norm")]
for title, normalize in titles_options:
disp = plot_confusion_matrix(classifier, feats_test, labels_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
my_file = 'confusion_matrix_clf{}_{}'.format(i, titlenames[j])
disp.figure_.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
j += 1
plt.show()
logging.info(
'Scored cross-validated SVM performance.'.format(feats_train.shape, labels_train.shape))
return classifier, scores
def main(train_folders: list):
"""
:param train_folders: list, training data folders
:return f_10fps: 2D array, features
:return trained_tsne: 2D array, trained t-SNE space
:return gmm_assignments: Converged EM-GMM group assignments
:return classifier: obj, SVM classifier
:return scores: 1D array, cross-validated accuracy
"""
import bsoid_py.utils.likelihoodprocessing
filenames, training_data, perc_rect = bsoid_py.utils.likelihoodprocessing.main(train_folders)
f_10fps, f_10fps_sc, trained_tsne, scaler = bsoid_tsne(training_data)
gmm_assignments = bsoid_gmm(trained_tsne)
classifier, scores = bsoid_svm(f_10fps_sc, gmm_assignments)
if PLOT_TRAINING:
plot_classes(trained_tsne, gmm_assignments)
plot_accuracy(scores)
plot_feats(f_10fps, gmm_assignments)
return f_10fps, trained_tsne, scaler, gmm_assignments, classifier, scores
| 14,811
| 53.859259
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/config/GLOBAL_CONFIG.py
|
################### THINGS YOU PROBABLY DONT'T WANT TO CHANGE ###################
import logging
import sys
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level='INFO',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
# EM_GMM parameters
EMGMM_PARAMS = {
'n_components': 30,
'covariance_type': 'full', # t-sne structure means nothing.
'tol': 0.001,
'reg_covar': 1e-06,
'max_iter': 100,
'n_init': 20, # 20 iterations to escape poor initialization
'init_params': 'random', # random initialization
'random_state': 23,
'verbose': 1 # set this to 0 if you don't want to show progress for em-gmm.
}
# Multi-class support vector machine classifier params
SVM_PARAMS = {
'C': 10, # 100 units, 10 layers
'gamma': 0.5, # logistics appears to outperform tanh and relu
'probability': True,
'random_state': 0, # adaptive or constant, not too much of a diff
'verbose': 0 # set to 1 for tuning your feedforward neural network
}
HLDOUT = 0.2 # Test partition ratio to validate clustering separation.
CV_IT = 10 # Number of iterations for cross-validation to show it's not over-fitting.
| 1,180
| 31.805556
| 86
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/config/LOCAL_CONFIG.py
|
################### THINGS YOU MAY WANT TO CHANGE ###################
BASE_PATH = '/Users/ahsu/B-SOID/datasets' # Base directory path.
TRAIN_FOLDERS = ['/Train1'] # Data folders used to training neural network.
PREDICT_FOLDERS = ['/Data1'] # Data folders, can contain the same as training or new data for consistency.
# This version requires the six body parts Snout/Head, Forepaws/Shoulders, Hindpaws/Hips, Tailbase.
BODYPARTS = {
'Snout/Head': 0,
'Neck': None,
'Forepaw/Shoulder1': 1,
'Forepaw/Shoulder2': 2,
'Bodycenter': None,
'Hindpaw/Hip1': 3,
'Hindpaw/Hip2': 4,
'Tailbase': 5,
'Tailroot': None
}
FPS = 60 # Frame-rate of your video,
# note that you can use a different number for new data as long as the video is same scale/view
COMP = 1 # COMP = 1: Train one classifier for all CSV files; COMP = 0: Classifier/CSV file.
# Output directory to where you want the analysis to be stored
OUTPUT_PATH = '/Users/ahsu/Desktop/bsoid_py_beta'
# Machine learning model name
MODEL_NAME = 'c57bl6_n6_30min'
# Pick a video
VID_NAME = '/Users/ahsu/B-SOID/datasets/Data1/2019-04-19_09-34-36cut0_30min.mp4'
# What number would be video be in terms of prediction order? (0=file 1/folder1, 1=file2/folder 1, etc.)
ID = 0
# Create a folder to store extracted images, make sure this folder exist.
# This program will predict labels and print them on these images
FRAME_DIR = '/Users/ahsu/B-SOID/datasets/Data1/0_30min_10fpsPNGs'
# In addition, this will also create an entire sample group videos for ease of understanding
SHORTVID_DIR = '/Users/ahsu/B-SOID/datasets/Data1/examples'
# IF YOU'D LIKE TO SKIP PLOTTING/CREATION OF VIDEOS, change below plot settings to False
PLOT_TRAINING = True
GEN_VIDEOS = True
| 1,744
| 40.547619
| 107
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/config/__init__.py
|
from bsoid_py.config.LOCAL_CONFIG import *
from bsoid_py.config.GLOBAL_CONFIG import *
| 86
| 42.5
| 43
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/utils/likelihoodprocessing.py
|
"""
likelihood processing utilities
Forward fill low likelihood (x,y)
"""
import glob
import re
import numpy as np
from tqdm import tqdm
from bsoid_py.utils.visuals import *
def boxcar_center(a, n):
a1 = pd.Series(a)
moving_avg = np.array(a1.rolling(window=n, min_periods=1, center=True).mean())
return moving_avg
def convert_int(s):
""" Converts digit string to integer
"""
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [convert_int(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def get_filenames(folder):
"""
Gets a list of filenames within a folder
:param folder: str, folder path
:return: list, filenames
"""
filenames = glob.glob(BASE_PATH + folder + '/*.csv')
sort_nicely(filenames)
return filenames
def import_folders(folders: list):
"""
Import multiple folders containing .csv files and process them
:param folders: list, data folders
:return filenames: list, data filenames
:return data: list, filtered csv data
:return perc_rect_li: list, percent filtered
"""
filenames = []
rawdata_li = []
data_li = []
perc_rect_li = []
for i, fd in enumerate(folders): # Loop through folders
f = get_filenames(fd)
for j, filename in enumerate(f):
logging.info('Importing CSV file {} from folder {}'.format(j + 1, i + 1))
curr_df = pd.read_csv(filename, low_memory=False)
curr_df_filt, perc_rect = adp_filt(curr_df)
logging.info('Done preprocessing (x,y) from file {}, folder {}.'.format(j + 1, i + 1))
rawdata_li.append(curr_df)
perc_rect_li.append(perc_rect)
data_li.append(curr_df_filt)
filenames.append(f)
logging.info('Processed {} CSV files from folder: {}'.format(len(f), fd))
data = np.array(data_li)
logging.info('Processed a total of {} CSV files, and compiled into a {} data list.'.format(len(data_li),
data.shape))
return filenames, data, perc_rect_li
def adp_filt(currdf: object):
"""
:param currdf: object, csv data frame
:return currdf_filt: 2D array, filtered data
:return perc_rect: 1D array, percent filtered per BODYPART
"""
lIndex = []
xIndex = []
yIndex = []
currdf = np.array(currdf[1:])
for header in range(len(currdf[0])):
if currdf[0][header] == "likelihood":
lIndex.append(header)
elif currdf[0][header] == "x":
xIndex.append(header)
elif currdf[0][header] == "y":
yIndex.append(header)
logging.info('Extracting likelihood value...')
curr_df1 = currdf[:, 1:]
datax = curr_df1[:, np.array(xIndex) - 1]
datay = curr_df1[:, np.array(yIndex) - 1]
data_lh = curr_df1[:, np.array(lIndex) - 1]
currdf_filt = np.zeros((datax.shape[0] - 1, (datax.shape[1]) * 2))
perc_rect = []
logging.info('Computing data threshold to forward fill any sub-threshold (x,y)...')
for i in range(data_lh.shape[1]):
perc_rect.append(0)
for x in tqdm(range(data_lh.shape[1])):
a, b = np.histogram(data_lh[1:, x].astype(np.float))
rise_a = np.where(np.diff(a) >= 0)
if rise_a[0][0] > 1:
llh = ((b[rise_a[0][0]] + b[rise_a[0][0]-1]) / 2)
else:
llh = ((b[rise_a[0][1]] + b[rise_a[0][1]-1]) / 2)
data_lh_float = data_lh[1:, x].astype(np.float)
perc_rect[x] = np.sum(data_lh_float < llh) / data_lh.shape[0]
for i in range(1, data_lh.shape[0] - 1):
if data_lh_float[i] < llh:
currdf_filt[i, (2 * x):(2 * x + 2)] = currdf_filt[i - 1, (2 * x):(2 * x + 2)]
else:
currdf_filt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[i, x], datay[i, x]])
currdf_filt = np.array(currdf_filt[1:])
currdf_filt = currdf_filt.astype(np.float)
return currdf_filt, perc_rect
def main(folders: list):
"""
:param folders: list, data folders
:return filenames: list, data filenames
:return data: list, filtered data list
:retrun perc_rect: 1D array, percent filtered per BODYPART
"""
filenames, data, perc_rect = import_folders(folders)
return filenames, data, perc_rect
if __name__ == '__main__':
main(TRAIN_FOLDERS)
| 4,618
| 31.076389
| 108
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/utils/statistics.py
|
"""
Summary statistics
"""
import os
import time
import numpy as np
import pandas as pd
from bsoid_py.config import *
def transition_matrix(labels):
"""
:param labels: 1D array, predicted labels
:return df_tm: object, transition matrix data frame
"""
n = 1 + max(labels)
tm = [[0] * n for _ in range(n)]
for (i, j) in zip(labels, labels[1:]):
tm[i][j] += 1
for row in tm:
s = sum(row)
if s > 0:
row[:] = [f / s for f in row]
df_tm = pd.DataFrame(tm)
return df_tm
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return z, p, ia[i]
def behv_time(labels):
"""
:param labels: 1D array, predicted labels
:return beh_t: 1D array, percent time for each label
"""
beh_t = []
for i in range(0, len(np.unique(labels))):
t = np.sum(labels == i) / labels.shape[0]
beh_t.append(t)
return beh_t
def behv_dur(labels):
"""
:param labels: 1D array, predicted labels
:return runlen_df: object, behavioral duration run lengths data frame
:return dur_stats: object, behavioral duration statistics data frame
"""
lengths, pos, grp = rle(labels)
df_lengths = pd.DataFrame(lengths, columns={'Run lengths'})
df_grp = pd.DataFrame(grp, columns={'B-SOiD labels'})
df_pos = pd.DataFrame(pos, columns={'Start time (frames)'})
runlengths = [df_grp, df_pos, df_lengths]
runlen_df = pd.concat(runlengths, axis=1)
beh_t = behv_time(labels)
dur_means = []
dur_quant0 = []
dur_quant1 = []
dur_quant2 = []
dur_quant3 = []
dur_quant4 = []
for i in range(0, len(np.unique(grp))):
try:
dur_means.append(np.mean(lengths[np.where(grp == i)]))
dur_quant0.append(np.quantile(lengths[np.where(grp == i)], 0.1))
dur_quant1.append(np.quantile(lengths[np.where(grp == i)], 0.25))
dur_quant2.append(np.quantile(lengths[np.where(grp == i)], 0.5))
dur_quant3.append(np.quantile(lengths[np.where(grp == i)], 0.75))
dur_quant4.append(np.quantile(lengths[np.where(grp == i)], 0.9))
except:
# dur_means.append(0)
dur_quant0.append(0)
dur_quant1.append(0)
dur_quant2.append(0)
dur_quant3.append(0)
dur_quant4.append(0)
alldata = np.concatenate([np.array(beh_t).reshape(len(np.array(beh_t)), 1),
np.array(dur_means).reshape(len(np.array(dur_means)), 1),
np.array(dur_quant0).reshape(len(np.array(dur_quant0)), 1),
np.array(dur_quant1).reshape(len(np.array(dur_quant1)), 1),
np.array(dur_quant2).reshape(len(np.array(dur_quant2)), 1),
np.array(dur_quant3).reshape(len(np.array(dur_quant3)), 1),
np.array(dur_quant4).reshape(len(np.array(dur_quant4)), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('Stats', 'Percent of time'),
('', 'Mean duration (frames)'), ('', '10th %tile (frames)'),
('', '25th %tile (frames)'), ('', '50th %tile (frames)'),
('', '75th %tile (frames)'), ('', '90th %tile (frames)')],
names=['', 'B-SOiD labels'])
dur_stats = pd.DataFrame(alldata, columns=micolumns)
return runlen_df, dur_stats
def main(labels):
"""
:param labels: 1D array: predicted labels
:param output_path: string, output directory
:return dur_stats: object, behavioral duration statistics data frame
:return tm: object, transition matrix data frame
"""
runlen_df, dur_stats = behv_dur(labels)
tm = transition_matrix(labels)
return runlen_df, dur_stats, tm
| 4,417
| 36.12605
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/utils/videoprocessing.py
|
"""
Extracting frames from videos
"""
import glob
import random
import numpy as np
import cv2
from tqdm import tqdm
from bsoid_py.utils.likelihoodprocessing import sort_nicely
from bsoid_py.utils.visuals import *
def get_vidnames(folder):
"""
Gets a list of filenames within a folder
:param folder: str, folder path
:return: list, video filenames
"""
vidnames = glob.glob(BASE_PATH + folder + '/*.mp4')
sort_nicely(vidnames)
return vidnames
def vid2frame(vidname, labels, fps, output_path=FRAME_DIR):
"""
Extracts frames every 100ms to match the labels for visualizations
:param vidname: string, path to video
:param labels: 1D array, labels from training
:param fps: scalar, frame-rate of original camera
:param output_path: string, path to output
"""
vidobj = cv2.VideoCapture(vidname)
pbar = tqdm(total=int(vidobj.get(cv2.CAP_PROP_FRAME_COUNT)))
width = vidobj.get(3)
height = vidobj.get(4)
labels = np.hstack((labels[0],labels)) # fill the first frame
count = 0
count1 = 0
font_scale = 1
font = cv2.FONT_HERSHEY_COMPLEX
rectangle_bgr = (0, 0, 0)
while vidobj.isOpened():
ret, frame = vidobj.read()
if ret:
text = 'Group' + str(labels[count1])
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
text_offset_x = 50
text_offset_y = 50
box_coords = ((text_offset_x - 12, text_offset_y + 12),
(text_offset_x + text_width + 12, text_offset_y - text_height - 8))
cv2.rectangle(frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(frame, text, (text_offset_x, text_offset_y), font,
fontScale=font_scale, color=(255, 255, 255), thickness=1)
cv2.imwrite(os.path.join(output_path, 'frame{:d}.png'.format(count1)), frame)
count += round(fps / 10) # i.e. at 60fps, this skips every 5
count1 += 1
vidobj.set(1, count)
pbar.update(round(fps / 10))
else:
vidobj.release()
break
pbar.close()
return
def import_vidfolders(folders: list, output_path: list):
"""
Import multiple folders containing .mp4 files and extract frames from them
:param folders: list of folder paths
:param output_path: list, directory to where you want to store extracted vid images in LOCAL_CONFIG
"""
vidnames = []
for i, fd in enumerate(folders): # Loop through folders
v = get_vidnames(fd)
for j, vidname in enumerate(v):
logging.info('Extracting frames from {} and appending labels to these images...'.format(vidname))
vid2frame(vidname, output_path)
logging.info('Done extracting images and writing labels, from MP4 file {}'.format(j + 1))
vidnames.append(v)
logging.info('Processed {} MP4 files from folder: {}'.format(len(v), fd))
return
def repeatingNumbers(labels):
"""
:param labels: 1D array, predicted labels
:return n_list: 1D array, the label number
:return idx: 1D array, label start index
:return lengths: 1D array, how long each bout lasted for
"""
i = 0
n_list = []
idx = []
lengths = []
while i < len(labels) - 1:
n = labels[i]
n_list.append(n)
startIndex = i
idx.append(i)
while i < len(labels) - 1 and labels[i] == labels[i + 1]:
i = i + 1
endIndex = i
length = endIndex - startIndex
lengths.append(length)
i = i + 1
return n_list, idx, lengths
def create_labeled_vid(labels, crit=3, counts=3, frame_dir=FRAME_DIR, output_path=SHORTVID_DIR):
"""
:param labels: 1D array, labels from training or testing
:param crit: scalar, minimum duration for random selection of behaviors, default 300ms
:param counts: scalar, number of randomly generated examples, default 5
:param frame_dir: string, directory to where you extracted vid images in LOCAL_CONFIG
:param output_path: string, directory to where you want to store short video examples in LOCAL_CONFIG
"""
images = [img for img in os.listdir(frame_dir) if img.endswith(".png")]
sort_nicely(images)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
frame = cv2.imread(os.path.join(frame_dir, images[0]))
height, width, layers = frame.shape
rnges = []
n, idx, lengths = repeatingNumbers(labels)
idx2 = []
for i, j in enumerate(lengths):
if j >= crit:
rnges.append(range(idx[i], idx[i] + j))
idx2.append(i)
for i in tqdm(range(0, len(np.unique(labels)))):
a = []
for j in range(0, len(rnges)):
if n[idx2[j]] == i:
a.append(rnges[j])
try:
rand_rnges = random.sample(a, counts)
for k in range(0, len(rand_rnges)):
video_name = 'group_{}_example_{}.mp4'.format(i, k)
grpimages = []
for l in rand_rnges[k]:
grpimages.append(images[l])
video = cv2.VideoWriter(os.path.join(output_path, video_name), fourcc, 5, (width, height))
for image in grpimages:
video.write(cv2.imread(os.path.join(frame_dir, image)))
cv2.destroyAllWindows()
video.release()
except:
pass
return
def main(vidname, labels, fps, output_path):
vid2frame(vidname, labels, fps, output_path)
create_labeled_vid(labels, crit=3, counts=5, frame_dir=output_path, output_path=SHORTVID_DIR)
return
| 5,705
| 34.886792
| 109
|
py
|
B-SOID
|
B-SOID-master/bsoid_py/utils/visuals.py
|
"""
Visualization functions and saving plots.
"""
import os
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.axes._axes import _log as matplotlib_axes_logger
import numpy as np
import pandas as pd
import seaborn as sn
from bsoid_py.config import *
matplotlib_axes_logger.setLevel('ERROR')
def plot_tsne3d(data):
""" Plot trained_tsne
:param data: trained_tsne
"""
tsne_x, tsne_y, tsne_z = data[:, 0], data[:, 1], data[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(tsne_x, tsne_y, tsne_z, s=1, marker='o', alpha=0.8)
ax.set_xlabel('Dim. 1')
ax.set_ylabel('Dim. 2')
ax.set_zlabel('Dim. 3')
ax.view_init(70, 135)
plt.title('Embedding of the training set by t-SNE')
plt.show()
def plot_classes(data, assignments):
""" Plot trained_tsne for EM-GMM assignments
:param data: 2D array, trained_tsne
:param assignments: 1D array, EM-GMM assignments
"""
uk = list(np.unique(assignments))
R = np.linspace(0, 1, len(uk))
cmap = plt.cm.get_cmap("Spectral")(R)
tsne_x, tsne_y, tsne_z = data[:, 0], data[:, 1], data[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for g in np.unique(assignments):
idx = np.where(np.array(assignments) == g)
ax.scatter(tsne_x[idx], tsne_y[idx], tsne_z[idx], c=cmap[g],
label=g, s=0.5, marker='o', alpha=0.8)
ax.set_xlabel('Dim. 1')
ax.set_ylabel('Dim. 2')
ax.set_zlabel('Dim. 3')
ax.view_init(70, 135)
plt.title('Assignments by GMM')
plt.legend(ncol=3)
plt.show()
timestr = time.strftime("_%Y%m%d_%H%M")
my_file = 'train_assignments'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
def plot_accuracy(scores):
"""
:param scores: 1D array, cross-validated accuracies for SVM classifier.
"""
fig = plt.figure(facecolor='w', edgecolor='k')
fig.suptitle("Performance on {} % data".format(HLDOUT * 100))
ax = fig.add_subplot(111)
ax.boxplot(scores, notch=None)
x = np.random.normal(1, 0.04, size=len(scores))
plt.scatter(x, scores, s=40, c='r', alpha=0.5)
ax.set_xlabel('SVM classifier')
ax.set_ylabel('Accuracy')
plt.show()
timestr = time.strftime("_%Y%m%d_%H%M")
my_file = 'clf_scores'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
def plot_durhist(lengths, grp):
"""
:param lengths: 1D array, run lengths of each bout.
:param grp: 1D array, corresponding label.
"""
timestr = time.strftime("_%Y%m%d_%H%M")
fig, ax = plt.subplots()
R = np.linspace(0, 1, len(np.unique(grp)))
cmap = plt.cm.get_cmap("Spectral")(R)
for i in range(0, len(np.unique(grp))):
fig.suptitle("Duration histogram of {} behaviors".format(len(np.unique(TM))))
x = lengths[np.where(grp == i)]
ax.hist(x, density=True, color=cmap[i], alpha=0.3, label='Group {}'.format(i))
plt.legend(loc='upper right')
plt.show()
my_file = 'duration_hist_100msbins'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
return
def plot_tmat(tm: object, fps):
"""
:param tm: object, transition matrix data frame
:param fps: scalar, camera frame-rate
"""
timestr = time.strftime("_%Y%m%d_%H%M")
fig = plt.figure()
fig.suptitle("Transition matrix of {} behaviors".format(tm.shape[0]))
sn.heatmap(tm, annot=True)
plt.xlabel("Next frame behavior")
plt.ylabel("Current frame behavior")
plt.show()
my_file = 'transition_matrix'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, str(fps), timestr, '.svg'))))
return
def plot_feats(feats: list, labels: list):
"""
:param feats: list, features for multiple sessions
:param labels: list, labels for multiple sessions
"""
result = isinstance(labels, list)
timestr = time.strftime("_%Y%m%d_%H%M")
if result:
for k in range(0, len(feats)):
labels_k = np.array(labels[k])
feats_k = np.array(feats[k])
R = np.linspace(0, 1, len(np.unique(labels_k)))
color = plt.cm.get_cmap("Spectral")(R)
feat_ls = ("Relative snout to forepaws placement", "Relative snout to hind paws placement",
"Inter-forepaw distance", "Body length", "Body angle",
"Snout displacement", "Tail-base displacement")
for j in range(0, feats_k.shape[0]):
fig = plt.figure(facecolor='w', edgecolor='k')
for i in range(0, len(np.unique(labels_k))):
plt.subplot(len(np.unique(labels_k)), 1, i + 1)
if j == 2 or j == 3 or j == 5 or j == 6:
plt.hist(feats_k[j, labels_k == i],
bins=np.linspace(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]), num=50),
range=(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :])),
color=color[i], density=True)
fig.suptitle("{} pixels".format(feat_ls[j]))
plt.xlim(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]))
if i < len(np.unique(labels_k)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
else:
plt.hist(feats_k[j, labels_k == i],
bins=np.linspace(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]), num=50),
range=(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :])),
color=color[i], density=True)
plt.xlim(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]))
fig.suptitle("{} pixels".format(feat_ls[j]))
if i < len(np.unique(labels_k)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
my_file = 'sess{}_feat{}_hist'.format(k + 1, j + 1)
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
plt.show()
else:
R = np.linspace(0, 1, len(np.unique(labels)))
color = plt.cm.get_cmap("Spectral")(R)
feat_ls = ("Relative snout to forepaws placement", "Relative snout to hind paws placement",
"Inter-forepaw distance", "Body length", "Body angle",
"Snout displacement", "Tail-base displacement")
for j in range(0, feats.shape[0]):
fig = plt.figure(facecolor='w', edgecolor='k')
for i in range(0, len(np.unique(labels))):
plt.subplot(len(np.unique(labels)), 1, i + 1)
if j == 2 or j == 3 or j == 5 or j == 6:
plt.hist(feats[j, labels == i],
bins=np.linspace(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :]), num=50),
range=(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :])),
color=color[i], density=True)
fig.suptitle("{} pixels".format(feat_ls[j]))
plt.xlim(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :]))
if i < len(np.unique(labels)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
else:
plt.hist(feats[j, labels == i],
bins=np.linspace(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :]), num=50),
range=(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :])),
color=color[i], density=True)
plt.xlim(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :]))
fig.suptitle("{} pixels".format(feat_ls[j]))
if i < len(np.unique(labels)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
my_file = 'feat{}_hist'.format(j + 1)
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
plt.show()
def main():
return
if __name__ == '__main__':
main()
| 8,980
| 43.02451
| 113
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/main.py
|
"""
A master that runs BOTH
1. Training a unsupervised + supervised machine learning model based on patterns in spatio-temporal (x,y) changes.
2. Predicting new behaviors using (x,y) based on learned classifier.
"""
import os
import time
import itertools
import joblib
import numpy as np
import pandas as pd
from bsoid_umap.config import *
def build(train_folders):
"""
:param train_folders: list, folders to build behavioral model on
:returns f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments: see bsoid_umap.train
Automatically saves single CSV file containing training outputs.
Automatically saves classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
"""
import bsoid_umap.train
from bsoid_umap.utils.statistics import feat_dist
f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, scores, \
nn_assignments = bsoid_umap.train.main(train_folders)
timestr = time.strftime("_%Y%m%d_%H%M")
feat_range, feat_med, p_cts, edges = feat_dist(f_10fps)
f_range_df = pd.DataFrame(feat_range, columns=['5%tile', '95%tile'])
f_med_df = pd.DataFrame(feat_med, columns=['median'])
f_pcts_df = pd.DataFrame(p_cts)
f_pcts_df.columns = pd.MultiIndex.from_product([f_pcts_df.columns, ['prob']])
f_edge_df = pd.DataFrame(edges)
f_edge_df.columns = pd.MultiIndex.from_product([f_edge_df.columns, ['edge']])
f_dist_data = pd.concat((f_range_df, f_med_df, f_pcts_df, f_edge_df), axis=1)
f_dist_data.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_featdist_10Hz', timestr, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
length_nm = []
angle_nm = []
disp_nm = []
for i, j in itertools.combinations(range(0, int(np.sqrt(f_10fps.shape[0]))), 2):
length_nm.append(['distance between points:', i+1, j+1])
angle_nm.append(['angular change for points:', i+1, j+1])
for i in range(int(np.sqrt(f_10fps.shape[0]))):
disp_nm.append(['displacement for point:', i+1, i+1])
mcol = np.vstack((length_nm, angle_nm, disp_nm))
feat_nm_df = pd.DataFrame(f_10fps.T, columns=mcol)
umaphdb_data = np.concatenate([umap_embeddings, hdb_assignments.reshape(len(hdb_assignments), 1),
soft_assignments.reshape(len(soft_assignments), 1),
nn_assignments.reshape(len(nn_assignments), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('UMAP embeddings', 'Dimension 1'), ('', 'Dimension 2'),
('', 'Dimension 3'), ('HDBSCAN', 'Assignment No.'),
('HDBSCAN*SOFT', 'Assignment No.'), ('Neural Net', 'Assignment No.')],
names=['Type', 'Frame@10Hz'])
umaphdb_df = pd.DataFrame(umaphdb_data, columns=micolumns)
training_data = pd.concat((feat_nm_df, umaphdb_df), axis=1)
soft_clust_prob = pd.DataFrame(soft_clusters)
training_data.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_trainlabels_10Hz', timestr, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
soft_clust_prob.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_labelprob_10Hz', timestr, '.csv')))),
index=True, chunksize=10000, encoding='utf-8')
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'wb') as f:
joblib.dump([f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters,
nn_classifier, scores, nn_assignments], f)
logging.info('Saved.')
return f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, \
scores, nn_assignments
# def retrain(train_folders):
# """
# :param train_folders: list, folders to build behavioral model on
# :returns f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments: see bsoid_umap.train
# Automatically saves single CSV file containing training outputs.
# Automatically saves classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
# """
# with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'rb') as fr:
# f_10fps, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, scores, \
# nn_assignments = joblib.load(fr)
# from bsoid_umap.utils.videoprocessing import vid2frame
# vid2frame(VID_NAME, f_10fps[ID], FPS, FRAME_DIR)
# labels_df = pd.read_csv('/Users/ahsu/Sign2Speech/Notebook/labels.csv', low_memory=False)
#
# import bsoid_umap.retrain
# f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments = bsoid_umap.train.main(train_folders)
# alldata = np.concatenate([umap_embeddings, nn_assignments.reshape(len(nn_assignments), 1)], axis=1)
# micolumns = pd.MultiIndex.from_tuples([('UMAP embeddings', 'Dimension 1'), ('', 'Dimension 2'),
# ('', 'Dimension 3'), ('Neural Net', 'Assignment No.')],
# names=['Type', 'Frame@10Hz'])
# training_data = pd.DataFrame(alldata, columns=micolumns)
# timestr = time.strftime("_%Y%m%d_%H%M")
# training_data.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_trainlabels_10Hz', timestr, '.csv')))),
# index=True, chunksize=10000, encoding='utf-8')
# with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'wb') as f:
# joblib.dump([f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments], f)
# logging.info('Saved.')
# return f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments
def run(predict_folders):
"""
:param predict_folders: list, folders to run prediction using behavioral model
:returns data_new, fs_labels: see bsoid_umap.classify
Automatically loads classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
Automatically saves CSV files containing new outputs.
"""
import bsoid_umap.classify
from bsoid_umap.utils.likelihoodprocessing import get_filenames
import bsoid_umap.utils.statistics
from bsoid_umap.utils.visuals import plot_tmat
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_', MODEL_NAME, '.sav'))), 'rb') as fr:
f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, \
nn_classifier, scores, nn_assignments = joblib.load(fr)
data_new, fs_labels = bsoid_umap.classify.main(predict_folders, FPS, nn_classifier)
filenames = []
all_df = []
for i, fd in enumerate(predict_folders): # Loop through folders
f = get_filenames(fd)
for j, filename in enumerate(f):
logging.info('Importing CSV file {} from folder {}'.format(j + 1, i + 1))
curr_df = pd.read_csv(filename, low_memory=False)
filenames.append(filename)
all_df.append(curr_df)
for i in range(0, len(fs_labels)):
timestr = time.strftime("_%Y%m%d_%H%M")
csvname = os.path.basename(filenames[i]).rpartition('.')[0]
fs_labels_pad = np.pad(fs_labels[i], (6, 0), 'edge')
df2 = pd.DataFrame(fs_labels_pad, columns={'B-SOiD labels'})
df2.loc[len(df2)] = ''
df2.loc[len(df2)] = ''
df2 = df2.shift()
df2.loc[0] = ''
df2 = df2.shift()
df2.loc[0] = ''
frames = [df2, all_df[0]]
xyfs_df = pd.concat(frames, axis=1)
xyfs_df.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_labels_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
runlen_df, dur_stats, df_tm = bsoid_umap.utils.statistics.main(fs_labels[i])
runlen_df.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_runlen_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
dur_stats.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_stats_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
df_tm.to_csv((os.path.join(OUTPUT_PATH, str.join('', ('bsoid_transitions_', str(FPS), 'Hz', timestr, csvname,
'.csv')))),
index=True, chunksize=10000, encoding='utf-8')
if PLOT:
fig = plot_tmat(df_tm)
my_file = 'transition_matrix'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, str(FPS), 'Hz', timestr, csvname, '.svg'))))
with open(os.path.join(OUTPUT_PATH, str.join('', ('bsoid_predictions', timestr, '.sav'))), 'wb') as f:
joblib.dump([data_new, fs_labels], f)
logging.info('All saved.')
return data_new, fs_labels
def main(train_folders, predict_folders):
"""
:param train_folders: list, folders to build behavioral model on
:param predict_folders: list, folders to run prediction using behavioral model
:returns f_10fps, umap_embeddings, nn_classifier, scores, nn_assignments: see bsoid_umap.train
:returns feats_new, fs_labels: see bsoid_umap.classify
Automatically saves and loads classifier in OUTPUTPATH with MODELNAME in LOCAL_CONFIG
Automatically saves CSV files containing training and new outputs
"""
f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, \
scores, nn_assignments = build(train_folders)
data_new, fs_labels = run(predict_folders)
return f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, \
scores, nn_assignments, data_new, fs_labels
if __name__ == "__main__":
f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, nn_classifier, \
scores, nn_assignments, data_new, fs_labels = main(TRAIN_FOLDERS, PREDICT_FOLDERS)
| 10,285
| 55.828729
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/classify.py
|
"""
Classify behaviors based on (x,y) using trained B-SOiD behavioral model.
B-SOiD behavioral model has been developed using bsoid_umap.main.build()
"""
import math
import itertools
import numpy as np
from bsoid_umap.utils import videoprocessing
from bsoid_umap.utils.likelihoodprocessing import boxcar_center
from bsoid_umap.utils.visuals import *
def bsoid_extract(data, fps=FPS):
"""
Extracts features based on (x,y) positions
:param data: list, csv data
:param fps: scalar, input for camera frame-rate
:return f_10fps: 2D array, extracted features
"""
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
feats = []
for m in range(len(data)):
logging.info('Extracting features from CSV file {}...'.format(m + 1))
dataRange = len(data[m])
dxy_r = []
dis_r = []
for r in range(dataRange):
if r < dataRange - 1:
dis = []
for c in range(0, data[m].shape[1], 2):
dis.append(np.linalg.norm(data[m][r + 1, c:c + 2] - data[m][r, c:c + 2]))
dis_r.append(dis)
dxy = []
for i, j in itertools.combinations(range(0, data[m].shape[1], 2), 2):
dxy.append(data[m][r, i:i + 2] - data[m][r, j:j + 2])
dxy_r.append(dxy)
dis_r = np.array(dis_r)
dxy_r = np.array(dxy_r)
dis_smth = []
dxy_eu = np.zeros([dataRange, dxy_r.shape[1]])
ang = np.zeros([dataRange - 1, dxy_r.shape[1]])
dxy_smth = []
ang_smth = []
for l in range(dis_r.shape[1]):
dis_smth.append(boxcar_center(dis_r[:, l], win_len))
for k in range(dxy_r.shape[1]):
for kk in range(dataRange):
dxy_eu[kk, k] = np.linalg.norm(dxy_r[kk, k, :])
if kk < dataRange - 1:
b_3d = np.hstack([dxy_r[kk + 1, k, :], 0])
a_3d = np.hstack([dxy_r[kk, k, :], 0])
c = np.cross(b_3d, a_3d)
ang[kk, k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c),
np.dot(dxy_r[kk, k, :], dxy_r[kk + 1, k, :])))
dxy_smth.append(boxcar_center(dxy_eu[:, k], win_len))
ang_smth.append(boxcar_center(ang[:, k], win_len))
dis_smth = np.array(dis_smth)
dxy_smth = np.array(dxy_smth)
ang_smth = np.array(ang_smth)
feats.append(np.vstack((dxy_smth[:, 1:], ang_smth, dis_smth)))
logging.info('Done extracting features from a total of {} training CSV files.'.format(len(data)))
f_10fps = []
for n in range(0, len(feats)):
feats1 = np.zeros(len(data[n]))
for k in range(round(fps / 10), len(feats[n][0]), round(fps / 10)):
if k > round(fps / 10):
feats1 = np.concatenate((feats1.reshape(feats1.shape[0], feats1.shape[1]),
np.hstack((np.mean((feats[n][0:dxy_smth.shape[0],
range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]),
axis=1))).reshape(len(feats[0]), 1)), axis=1)
else:
feats1 = np.hstack((np.mean((feats[n][0:dxy_smth.shape[0], range(k - round(fps / 10), k)]),
axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]), axis=1))).reshape(len(feats[0]), 1)
logging.info('Done integrating features into 100ms bins from CSV file {}.'.format(n + 1))
f_10fps.append(feats1)
return f_10fps
def bsoid_predict(feats, clf):
"""
:param feats: list, multiple feats (original feature space)
:param clf: Obj, MLP classifier
:return nonfs_labels: list, label/100ms
"""
labels_fslow = []
for i in range(0, len(feats)):
logging.info('Predicting file {} with {} instances '
'using learned classifier: {}{}...'.format(i + 1, feats[i].shape[1], 'bsoid_', MODEL_NAME))
labels = clf.predict(feats[i].T)
logging.info('Done predicting file {} with {} instances in {} D space.'.format(i + 1, feats[i].shape[1],
feats[i].shape[0]))
labels_fslow.append(labels)
logging.info('Done predicting a total of {} files.'.format(len(feats)))
return labels_fslow
def bsoid_frameshift(data_new, fps, clf):
"""
Frame-shift paradigm to output behavior/frame
:param data_new: list, new data from predict_folders
:param fps: scalar, argument specifying camera frame-rate in LOCAL_CONFIG
:param clf: Obj, MLP classifier
:return fs_labels, 1D array, label/frame
"""
labels_fs = []
labels_fs2 = []
labels_fshigh = []
for i in range(0, len(data_new)):
data_offset = []
for j in range(math.floor(fps / 10)):
data_offset.append(data_new[i][j:, :])
feats_new = bsoid_extract(data_offset)
labels = bsoid_predict(feats_new, clf)
for m in range(0, len(labels)):
labels[m] = labels[m][::-1]
labels_pad = -1 * np.ones([len(labels), len(max(labels, key=lambda x: len(x)))])
for n, l in enumerate(labels):
labels_pad[n][0:len(l)] = l
labels_pad[n] = labels_pad[n][::-1]
if n > 0:
labels_pad[n][0:n] = labels_pad[n - 1][0:n]
labels_fs.append(labels_pad.astype(int))
for k in range(0, len(labels_fs)):
labels_fs2 = []
for l in range(math.floor(fps / 10)):
labels_fs2.append(labels_fs[k][l])
labels_fshigh.append(np.array(labels_fs2).flatten('F'))
logging.info('Done frameshift-predicting a total of {} files.'.format(len(data_new)))
return labels_fshigh
def main(predict_folders, fps, clf):
"""
:param predict_folders: list, data folders
:param fps: scalar, camera frame-rate
:param clf: object, MLP classifier
:return data_new: list, csv data
:return fs_labels, 1D array, label/frame
"""
import bsoid_umap.utils.likelihoodprocessing
filenames, data_new, perc_rect = bsoid_umap.utils.likelihoodprocessing.main(predict_folders)
fs_labels = bsoid_frameshift(data_new, fps, clf)
if VID:
videoprocessing.main(VID_NAME, fs_labels[ID][0:-1:int(round(FPS / 10))], FPS, FRAME_DIR)
return data_new, fs_labels
| 6,827
| 43.337662
| 112
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_umap/train.py
|
"""
Based on the natural statistics of the mouse configuration using (x,y) positions,
we distill information down to 3 dimensions and run unsupervised pattern recognition.
Then, we utilize these output and original feature space to train a B-SOiD neural network model.
"""
import math
import itertools
import random
import hdbscan
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import umap
from bsoid_umap.utils.likelihoodprocessing import boxcar_center
from bsoid_umap.utils.visuals import *
def bsoid_feats(data: list, fps=FPS):
"""
Trains UMAP (unsupervised) given a set of features based on (x,y) positions
:param data: list of 3D array
:param fps: scalar, argument specifying camera frame-rate in LOCAL_CONFIG
:return f_10fps: 2D array, features
:return f_10fps_sc: 2D array, standardized/session features
"""
win_len = np.int(np.round(0.05 / (1 / fps)) * 2 - 1)
feats = []
for m in range(len(data)):
logging.info('Extracting features from CSV file {}...'.format(m + 1))
dataRange = len(data[m])
dxy_r = []
dis_r = []
for r in range(dataRange):
if r < dataRange - 1:
dis = []
for c in range(0, data[m].shape[1], 2):
dis.append(np.linalg.norm(data[m][r + 1, c:c + 2] - data[m][r, c:c + 2]))
dis_r.append(dis)
dxy = []
for i, j in itertools.combinations(range(0, data[m].shape[1], 2), 2):
dxy.append(data[m][r, i:i + 2] - data[m][r, j:j + 2])
dxy_r.append(dxy)
dis_r = np.array(dis_r)
dxy_r = np.array(dxy_r)
dis_smth = []
dxy_eu = np.zeros([dataRange, dxy_r.shape[1]])
ang = np.zeros([dataRange - 1, dxy_r.shape[1]])
dxy_smth = []
ang_smth = []
for l in range(dis_r.shape[1]):
dis_smth.append(boxcar_center(dis_r[:, l], win_len))
for k in range(dxy_r.shape[1]):
for kk in range(dataRange):
dxy_eu[kk, k] = np.linalg.norm(dxy_r[kk, k, :])
if kk < dataRange - 1:
b_3d = np.hstack([dxy_r[kk + 1, k, :], 0])
a_3d = np.hstack([dxy_r[kk, k, :], 0])
c = np.cross(b_3d, a_3d)
ang[kk, k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c),
np.dot(dxy_r[kk, k, :], dxy_r[kk + 1, k, :])))
dxy_smth.append(boxcar_center(dxy_eu[:, k], win_len))
ang_smth.append(boxcar_center(ang[:, k], win_len))
dis_smth = np.array(dis_smth)
dxy_smth = np.array(dxy_smth)
ang_smth = np.array(ang_smth)
feats.append(np.vstack((dxy_smth[:, 1:], ang_smth, dis_smth)))
logging.info('Done extracting features from a total of {} training CSV files.'.format(len(data)))
for n in range(0, len(feats)):
feats1 = np.zeros(len(data[n]))
for k in range(round(fps / 10), len(feats[n][0]), round(fps / 10)):
if k > round(fps / 10):
feats1 = np.concatenate((feats1.reshape(feats1.shape[0], feats1.shape[1]),
np.hstack((np.mean((feats[n][0:dxy_smth.shape[0],
range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]),
axis=1))).reshape(len(feats[0]), 1)), axis=1)
else:
feats1 = np.hstack((np.mean((feats[n][0:dxy_smth.shape[0], range(k - round(fps / 10), k)]), axis=1),
np.sum((feats[n][dxy_smth.shape[0]:feats[n].shape[0],
range(k - round(fps / 10), k)]), axis=1))).reshape(len(feats[0]), 1)
logging.info('Done integrating features into 100ms bins from CSV file {}.'.format(n + 1))
if n > 0:
f_10fps = np.concatenate((f_10fps, feats1), axis=1)
scaler = StandardScaler()
scaler.fit(feats1.T)
feats1_sc = scaler.transform(feats1.T).T
f_10fps_sc = np.concatenate((f_10fps_sc, feats1_sc), axis=1)
else:
f_10fps = feats1
scaler = StandardScaler()
scaler.fit(feats1.T)
feats1_sc = scaler.transform(feats1.T).T
f_10fps_sc = feats1_sc # scaling is important as I've seen wildly different stdev/feat between sessions
return f_10fps, f_10fps_sc
def bsoid_umap_embed(f_10fps_sc, umap_params=UMAP_PARAMS):
"""
Trains UMAP (unsupervised) given a set of features based on (x,y) positions
:param f_10fps_sc: 2D array, standardized/session features
:param umap_params: dict, UMAP params in GLOBAL_CONFIG
:return trained_umap: object, trained UMAP transformer
:return umap_embeddings: 2D array, embedded UMAP space
"""
###### So far, use of PCA is not necessary. If, however, features go beyond 100, consider taking top 50 PCs #####
# if f_10fps_sc.shape[0] > 50:
# logging.info('Compressing {} instances from {} D '
# 'into {} D using PCA'.format(f_10fps_sc.shape[1], f_10fps_sc.shape[0],
# 50))
# feats_train = PCA(n_components=50, random_state=23).fit_transform(f_10fps_sc.T)
# pca = PCA(n_components=50).fit(f_10fps_sc.T)
# logging.info('Done linear transformation with PCA.')
# logging.info('The top {} Principal Components '
# 'explained {}% variance'.format(50, 100 * np.sum(pca.explained_variance_ratio_)))
################ FastICA potentially useful for demixing signal ################
# lowd_feats = FastICA(n_components=10, random_state=23).fit_transform(f_10fps.T)
# feats_train = lowd_feats
feats_train = f_10fps_sc.T
logging.info('Transforming all {} instances from {} D into {} D'.format(feats_train.shape[0],
feats_train.shape[1],
umap_params.get('n_components')))
trained_umap = umap.UMAP(n_neighbors=int(round(np.sqrt(feats_train.shape[0]))), # power law
**umap_params).fit(feats_train)
umap_embeddings = trained_umap.embedding_
logging.info('Done non-linear transformation with UMAP from {} D into {} D.'.format(feats_train.shape[1],
umap_embeddings.shape[1]))
return trained_umap, umap_embeddings
def bsoid_hdbscan(umap_embeddings, hdbscan_params=HDBSCAN_PARAMS):
"""
Trains HDBSCAN (unsupervised) given learned UMAP space
:param umap_embeddings: 2D array, embedded UMAP space
:param hdbscan_params: dict, HDBSCAN params in GLOBAL_CONFIG
:return assignments: HDBSCAN assignments
"""
highest_numulab = -np.infty
numulab = []
min_cluster_range = range(6, 21)
logging.info('Running HDBSCAN on {} instances in {} D space...'.format(*umap_embeddings.shape))
for min_c in min_cluster_range:
trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
min_cluster_size=int(round(0.001 * min_c * umap_embeddings.shape[0])),
**hdbscan_params).fit(umap_embeddings)
numulab.append(len(np.unique(trained_classifier.labels_)))
if numulab[-1] > highest_numulab:
logging.info('Adjusting minimum cluster size to maximize cluster number...')
highest_numulab = numulab[-1]
best_clf = trained_classifier
assignments = best_clf.labels_
soft_clusters = hdbscan.all_points_membership_vectors(best_clf)
soft_assignments = np.argmax(soft_clusters, axis=1)
# trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
# min_cluster_size=round(umap_embeddings.shape[0] * 0.007), # just < 1%/cluster
# **hdbscan_params).fit(umap_embeddings)
# assignments = best_clf.labels_
logging.info('Done predicting labels for {} instances in {} D space...'.format(*umap_embeddings.shape))
return assignments, soft_clusters, soft_assignments
def bsoid_nn(feats, labels, hldout=HLDOUT, cv_it=CV_IT, mlp_params=MLP_PARAMS):
"""
Trains MLP classifier
:param feats: 2D array, original feature space, standardized
:param labels: 1D array, HDBSCAN assignments
:param hldout: scalar, test partition ratio for validating MLP performance in GLOBAL_CONFIG
:param cv_it: scalar, iterations for cross-validation in GLOBAL_CONFIG
:param mlp_params: dict, MLP parameters in GLOBAL_CONFIG
:return clf: obj, MLP classifier
:return scores: 1D array, cross-validated accuracy
:return nn_assignments: 1D array, neural net predictions
"""
feats_filt = feats[:, labels >= 0]
labels_filt = labels[labels >= 0]
feats_train, feats_test, labels_train, labels_test = train_test_split(feats_filt.T, labels_filt.T,
test_size=hldout, random_state=23)
logging.info(
'Training feedforward neural network on randomly partitioned {}% of training data...'.format(
(1 - hldout) * 100))
classifier = MLPClassifier(**mlp_params)
classifier.fit(feats_train, labels_train)
clf = MLPClassifier(**mlp_params)
clf.fit(feats_filt.T, labels_filt.T)
nn_assignments = clf.predict(feats.T)
logging.info('Done training feedforward neural network '
'mapping {} features to {} assignments.'.format(feats_train.shape, labels_train.shape))
scores = cross_val_score(classifier, feats_test, labels_test, cv=cv_it, n_jobs=-1)
timestr = time.strftime("_%Y%m%d_%H%M")
if PLOT:
np.set_printoptions(precision=2)
titles_options = [("Non-normalized confusion matrix", None),
("Normalized confusion matrix", 'true')]
titlenames = [("counts"), ("norm")]
j = 0
for title, normalize in titles_options:
disp = plot_confusion_matrix(classifier, feats_test, labels_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
my_file = 'confusion_matrix_{}'.format(titlenames[j])
disp.figure_.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
j += 1
plt.show()
logging.info(
'Scored cross-validated feedforward neural network performance.'.format(feats_train.shape, labels_train.shape))
return clf, scores, nn_assignments
def main(train_folders: list):
"""
:param train_folders: list, training data folders
:return f_10fps: 2D array, features
:return umap_embeddings: 2D array, embedded UMAP space
:return nn_classifier: obj, MLP classifier
:return scores: 1D array, cross-validated accuracy
:return nn_assignments: neural net predictions
"""
import bsoid_umap.utils.likelihoodprocessing
filenames, training_data, perc_rect = bsoid_umap.utils.likelihoodprocessing.main(train_folders)
f_10fps, f_10fps_sc = bsoid_feats(training_data)
trained_umap, umap_embeddings = bsoid_umap_embed(f_10fps_sc)
hdb_assignments, soft_clusters, soft_assignments = bsoid_hdbscan(umap_embeddings)
nn_classifier, scores, nn_assignments = bsoid_nn(f_10fps, soft_assignments)
if PLOT:
timestr = time.strftime("_%Y%m%d_%H%M")
fig1 = plot_classes(umap_embeddings[hdb_assignments >= 0], hdb_assignments[hdb_assignments >= 0])
my_file1 = 'hdb_soft_assignments'
fig1.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file1, timestr, '.svg'))))
plot_accuracy(scores)
return f_10fps, f_10fps_sc, umap_embeddings, hdb_assignments, soft_assignments, soft_clusters, \
nn_classifier, scores, nn_assignments
| 12,714
| 51.110656
| 121
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/config/GLOBAL_CONFIG.py
|
################### THINGS YOU PROBABLY DONT'T WANT TO CHANGE ###################
import logging
import sys
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level='INFO',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
# UMAP params, nonlinear transform
UMAP_PARAMS = {
'n_components': 3,
'min_dist': 0.0, # small value
'random_state': 23,
}
# HDBSCAN params, density based clustering
HDBSCAN_PARAMS = {
'min_samples': 10 # small value
}
# Feedforward neural network (MLP) params
MLP_PARAMS = {
'hidden_layer_sizes': (100, 10), # 100 units, 10 layers
'activation': 'logistic', # logistics appears to outperform tanh and relu
'solver': 'adam',
'learning_rate': 'constant',
'learning_rate_init': 0.001, # learning rate not too high
'alpha': 0.0001, # regularization default is better than higher values.
'max_iter': 1000,
'early_stopping': False,
'verbose': 0 # set to 1 for tuning your feedforward neural network
}
HLDOUT = 0.2 # Test partition ratio to validate clustering separation.
CV_IT = 10 # Number of iterations for cross-validation to show it's not over-fitting.
| 1,176
| 29.179487
| 86
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/config/LOCAL_CONFIG.py
|
################### THINGS YOU MAY WANT TO CHANGE ###################
BASE_PATH = '/Users/ahsu/B-SOID/datasets' # Base directory path.
TRAIN_FOLDERS = ['/Train1', '/Train2'] # Data folders used to training neural network.
PREDICT_FOLDERS = ['/Data1'] # Data folders, can contain the same as training or new data for consistency.
FPS = 60 # Frame-rate of your video,
# Output directory to where you want the analysis to be stored, including csv, model and plots.
OUTPUT_PATH = '/Users/ahsu/Desktop/bsoid_umap_beta'
MODEL_NAME = 'c57bl6_n3_60min' # Machine learning model name
# IF YOU'D LIKE TO SKIP PLOTS/VIDEOS, change below PLOT/VID settings to False
PLOT = True
VID = True # if this is true, make sure direct to the video below AND that you created the two specified folders!
# Create a folder to store extracted images, MAKE SURE THIS FOLDER EXISTS.
FRAME_DIR = '/Users/ahsu/B-SOID/datasets/Data1/0_30min_10fpsPNGs'
# Create a folder to store created video snippets/group, MAKE SURE THIS FOLDER EXISTS.
SHORTVID_DIR = '/Users/ahsu/B-SOID/datasets/Data1/examples'
# Now, pick an example video that corresponds to one of the csv files from the PREDICT_FOLDERS
VID_NAME = '/Users/ahsu/B-SOID/datasets/Data1/2019-04-19_09-34-36cut0_30min.mp4'
ID = 0 # What number would the video be in terms of prediction order? (0=file 1/folder1, 1=file2/folder 1, etc.)
# for semisupervised portion
# CSV_PATH =
| 1,411
| 51.296296
| 114
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/config/__init__.py
|
from bsoid_umap.config.LOCAL_CONFIG import *
from bsoid_umap.config.GLOBAL_CONFIG import *
| 90
| 44.5
| 45
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/utils/likelihoodprocessing.py
|
"""
likelihood processing utilities
Forward fill low likelihood (x,y)
"""
import glob
import re
import numpy as np
from tqdm import tqdm
from bsoid_umap.utils.visuals import *
def boxcar_center(a, n):
a1 = pd.Series(a)
moving_avg = np.array(a1.rolling(window=n, min_periods=1, center=True).mean())
return moving_avg
def convert_int(s):
""" Converts digit string to integer
"""
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [convert_int(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def get_filenames(folder):
"""
Gets a list of filenames within a folder
:param folder: str, folder path
:return: list, filenames
"""
filenames = glob.glob(BASE_PATH + folder + '/*.csv')
sort_nicely(filenames)
return filenames
def import_folders(folders: list):
"""
Import multiple folders containing .csv files and process them
:param folders: list, data folders
:return filenames: list, data filenames
:return data: list, filtered csv data
:return perc_rect_li: list, percent filtered
"""
filenames = []
rawdata_li = []
data_li = []
perc_rect_li = []
for i, fd in enumerate(folders): # Loop through folders
f = get_filenames(fd)
for j, filename in enumerate(f):
logging.info('Importing CSV file {} from folder {}'.format(j + 1, i + 1))
curr_df = pd.read_csv(filename, low_memory=False)
curr_df_filt, perc_rect = adp_filt(curr_df)
logging.info('Done preprocessing (x,y) from file {}, folder {}.'.format(j + 1, i + 1))
rawdata_li.append(curr_df)
perc_rect_li.append(perc_rect)
data_li.append(curr_df_filt)
filenames.append(f)
logging.info('Processed {} CSV files from folder: {}'.format(len(f), fd))
data = np.array(data_li)
logging.info('Processed a total of {} CSV files, and compiled into a {} data list.'.format(len(data_li),
data.shape))
return filenames, data, perc_rect_li
def adp_filt(currdf: object):
"""
:param currdf: object, csv data frame
:return currdf_filt: 2D array, filtered data
:return perc_rect: 1D array, percent filtered per BODYPART
"""
lIndex = []
xIndex = []
yIndex = []
currdf = np.array(currdf[1:])
for header in range(len(currdf[0])):
if currdf[0][header] == "likelihood":
lIndex.append(header)
elif currdf[0][header] == "x":
xIndex.append(header)
elif currdf[0][header] == "y":
yIndex.append(header)
logging.info('Extracting likelihood value...')
curr_df1 = currdf[:, 1:]
datax = curr_df1[:, np.array(xIndex) - 1]
datay = curr_df1[:, np.array(yIndex) - 1]
data_lh = curr_df1[:, np.array(lIndex) - 1]
currdf_filt = np.zeros((datax.shape[0] - 1, (datax.shape[1]) * 2))
perc_rect = []
logging.info('Computing data threshold to forward fill any sub-threshold (x,y)...')
for i in range(data_lh.shape[1]):
perc_rect.append(0)
for x in tqdm(range(data_lh.shape[1])):
a, b = np.histogram(data_lh[1:, x].astype(np.float))
rise_a = np.where(np.diff(a) >= 0)
if rise_a[0][0] > 1:
llh = b[rise_a[0][0]]
else:
llh = b[rise_a[0][1]]
data_lh_float = data_lh[1:, x].astype(np.float)
perc_rect[x] = np.sum(data_lh_float < llh) / data_lh.shape[0]
for i in range(1, data_lh.shape[0] - 1):
if data_lh_float[i] < llh:
currdf_filt[i, (2 * x):(2 * x + 2)] = currdf_filt[i - 1, (2 * x):(2 * x + 2)]
else:
currdf_filt[i, (2 * x):(2 * x + 2)] = np.hstack([datax[i, x], datay[i, x]])
currdf_filt = np.array(currdf_filt[1:])
currdf_filt = currdf_filt.astype(np.float)
return currdf_filt, perc_rect
def main(folders: list):
"""
:param folders: list, data folders
:return filenames: list, data filenames
:return data: list, filtered data list
:retrun perc_rect: 1D array, percent filtered per BODYPART
"""
filenames, data, perc_rect = import_folders(folders)
return filenames, data, perc_rect
if __name__ == '__main__':
main(TRAIN_FOLDERS)
| 4,564
| 30.701389
| 108
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/utils/statistics.py
|
"""
Summary statistics
"""
import os
import time
import numpy as np
import pandas as pd
from bsoid_umap.config import *
def feat_dist(feats):
feat_range = []
feat_med = []
p_cts = []
edges = []
for i in range(feats.shape[0]):
feat_range.append([np.quantile(feats[i, :], 0.05), np.quantile(feats[i, :], 0.95)])
feat_med.append(np.quantile(feats[i, :], 0.5))
p_ct, edge = np.histogram(feats[i, :], 50, density=True)
p_cts.append(p_ct)
edges.append(edge)
return feat_range, feat_med, p_cts, edges
def transition_matrix(labels):
"""
:param labels: 1D array, predicted labels
:return df_tm: object, transition matrix data frame
"""
n = 1 + max(labels)
tm = [[0] * n for _ in range(n)]
for (i, j) in zip(labels, labels[1:]):
tm[i][j] += 1
for row in tm:
s = sum(row)
if s > 0:
row[:] = [f / s for f in row]
df_tm = pd.DataFrame(tm)
return df_tm
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return z, p, ia[i]
def behv_time(labels):
"""
:param labels: 1D array, predicted labels
:return beh_t: 1D array, percent time for each label
"""
beh_t = []
for i in range(0, len(np.unique(labels))):
t = np.sum(labels == i) / labels.shape[0]
beh_t.append(t)
return beh_t
def behv_dur(labels):
"""
:param labels: 1D array, predicted labels
:return runlen_df: object, behavioral duration run lengths data frame
:return dur_stats: object, behavioral duration statistics data frame
"""
lengths, pos, grp = rle(labels)
df_lengths = pd.DataFrame(lengths, columns={'Run lengths'})
df_grp = pd.DataFrame(grp, columns={'B-SOiD labels'})
df_pos = pd.DataFrame(pos, columns={'Start time (frames)'})
runlengths = [df_grp, df_pos, df_lengths]
runlen_df = pd.concat(runlengths, axis=1)
beh_t = behv_time(labels)
dur_means = []
dur_quant0 = []
dur_quant1 = []
dur_quant2 = []
dur_quant3 = []
dur_quant4 = []
for i in range(0, len(np.unique(grp))):
try:
dur_means.append(np.mean(lengths[np.where(grp == i)]))
dur_quant0.append(np.quantile(lengths[np.where(grp == i)], 0.1))
dur_quant1.append(np.quantile(lengths[np.where(grp == i)], 0.25))
dur_quant2.append(np.quantile(lengths[np.where(grp == i)], 0.5))
dur_quant3.append(np.quantile(lengths[np.where(grp == i)], 0.75))
dur_quant4.append(np.quantile(lengths[np.where(grp == i)], 0.9))
except:
# dur_means.append(0)
dur_quant0.append(0)
dur_quant1.append(0)
dur_quant2.append(0)
dur_quant3.append(0)
dur_quant4.append(0)
alldata = np.concatenate([np.array(beh_t).reshape(len(np.array(beh_t)), 1),
np.array(dur_means).reshape(len(np.array(dur_means)), 1),
np.array(dur_quant0).reshape(len(np.array(dur_quant0)), 1),
np.array(dur_quant1).reshape(len(np.array(dur_quant1)), 1),
np.array(dur_quant2).reshape(len(np.array(dur_quant2)), 1),
np.array(dur_quant3).reshape(len(np.array(dur_quant3)), 1),
np.array(dur_quant4).reshape(len(np.array(dur_quant4)), 1)], axis=1)
micolumns = pd.MultiIndex.from_tuples([('Stats', 'Percent of time'),
('', 'Mean duration (frames)'), ('', '10th %tile (frames)'),
('', '25th %tile (frames)'), ('', '50th %tile (frames)'),
('', '75th %tile (frames)'), ('', '90th %tile (frames)')],
names=['', 'B-SOiD labels'])
dur_stats = pd.DataFrame(alldata, columns=micolumns)
return runlen_df, dur_stats
def main(labels):
"""
:param labels: 1D array: predicted labels
:param output_path: string, output directory
:return dur_stats: object, behavioral duration statistics data frame
:return tm: object, transition matrix data frame
"""
runlen_df, dur_stats = behv_dur(labels)
tm = transition_matrix(labels)
return runlen_df, dur_stats, tm
| 4,859
| 35.541353
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/utils/videoprocessing.py
|
"""
Extracting frames from videos
"""
import glob
import random
import numpy as np
import cv2
from tqdm import tqdm
from bsoid_umap.utils.likelihoodprocessing import sort_nicely
from bsoid_umap.utils.visuals import *
def get_vidnames(folder):
"""
Gets a list of filenames within a folder
:param folder: str, folder path
:return: list, video filenames
"""
vidnames = glob.glob(BASE_PATH + folder + '/*.mp4')
sort_nicely(vidnames)
return vidnames
def vid2frame(vidname, labels, fps, output_path=FRAME_DIR):
"""
Extracts frames every 100ms to match the labels for visualizations
:param vidname: string, path to video
:param labels: 1D array, labels from training
:param fps: scalar, frame-rate of original camera
:param output_path: string, path to output
"""
vidobj = cv2.VideoCapture(vidname)
pbar = tqdm(total=int(vidobj.get(cv2.CAP_PROP_FRAME_COUNT)))
width = vidobj.get(3)
height = vidobj.get(4)
labels = np.hstack((labels[0], labels)) # fill the first frame
count = 0
count1 = 0
font_scale = 1
font = cv2.FONT_HERSHEY_COMPLEX
rectangle_bgr = (0, 0, 0)
while vidobj.isOpened():
ret, frame = vidobj.read()
if ret:
try:
text = 'Group' + str(labels[count1])
(text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]
text_offset_x = 50
text_offset_y = 50
box_coords = ((text_offset_x - 12, text_offset_y + 12),
(text_offset_x + text_width + 12, text_offset_y - text_height - 8))
cv2.rectangle(frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
cv2.putText(frame, text, (text_offset_x, text_offset_y), font,
fontScale=font_scale, color=(255, 255, 255), thickness=1)
cv2.imwrite(os.path.join(output_path, 'frame{:d}.png'.format(count1)), frame)
count += round(fps / 10) # i.e. at 60fps, this skips every 5
count1 += 1
vidobj.set(1, count)
pbar.update(round(fps / 10))
except:
pass
else:
vidobj.release()
break
pbar.close()
return
def import_vidfolders(folders: list, output_path: list):
"""
Import multiple folders containing .mp4 files and extract frames from them
:param folders: list of folder paths
:param output_path: list, directory to where you want to store extracted vid images in LOCAL_CONFIG
"""
vidnames = []
for i, fd in enumerate(folders): # Loop through folders
v = get_vidnames(fd)
for j, vidname in enumerate(v):
logging.info('Extracting frames from {} and appending labels to these images...'.format(vidname))
vid2frame(vidname, output_path)
logging.info('Done extracting images and writing labels, from MP4 file {}'.format(j + 1))
vidnames.append(v)
logging.info('Processed {} MP4 files from folder: {}'.format(len(v), fd))
return
def repeatingNumbers(labels):
"""
:param labels: 1D array, predicted labels
:return n_list: 1D array, the label number
:return idx: 1D array, label start index
:return lengths: 1D array, how long each bout lasted for
"""
i = 0
n_list = []
idx = []
lengths = []
while i < len(labels) - 1:
n = labels[i]
n_list.append(n)
startIndex = i
idx.append(i)
while i < len(labels) - 1 and labels[i] == labels[i + 1]:
i = i + 1
endIndex = i
length = endIndex - startIndex
lengths.append(length)
i = i + 1
return n_list, idx, lengths
def create_labeled_vid(labels, crit=3, counts=5, frame_dir=FRAME_DIR, output_path=SHORTVID_DIR):
"""
:param labels: 1D array, labels from training or testing
:param crit: scalar, minimum duration for random selection of behaviors, default 300ms
:param counts: scalar, number of randomly generated examples, default 5
:param frame_dir: string, directory to where you extracted vid images in LOCAL_CONFIG
:param output_path: string, directory to where you want to store short video examples in LOCAL_CONFIG
"""
images = [img for img in os.listdir(frame_dir) if img.endswith(".png")]
sort_nicely(images)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
frame = cv2.imread(os.path.join(frame_dir, images[0]))
height, width, layers = frame.shape
rnges = []
n, idx, lengths = repeatingNumbers(labels)
idx2 = []
for i, j in enumerate(lengths):
if j >= crit:
rnges.append(range(idx[i], idx[i] + j))
idx2.append(i)
for b, i in enumerate(tqdm(np.unique(labels))):
a = []
for j in range(0, len(rnges)):
if n[idx2[j]] == i:
a.append(rnges[j])
try:
rand_rnges = random.sample(a, counts)
for k in range(0, len(rand_rnges)):
video_name = 'group_{}_example_{}.mp4'.format(i, k)
grpimages = []
for l in rand_rnges[k]:
grpimages.append(images[l])
video = cv2.VideoWriter(os.path.join(output_path, video_name), fourcc, 5, (width, height))
for image in grpimages:
video.write(cv2.imread(os.path.join(frame_dir, image)))
cv2.destroyAllWindows()
video.release()
except:
pass
return
def main(vidname, labels, fps, output_path):
vid2frame(vidname, labels, fps, output_path)
create_labeled_vid(labels, crit=3, counts=5, frame_dir=output_path, output_path=SHORTVID_DIR)
return
| 5,824
| 34.95679
| 109
|
py
|
B-SOID
|
B-SOID-master/bsoid_umap/utils/visuals.py
|
"""
Visualization functions and saving plots.
"""
import os
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.axes._axes import _log as matplotlib_axes_logger
import numpy as np
import pandas as pd
import seaborn as sn
from bsoid_umap.config import *
matplotlib_axes_logger.setLevel('ERROR')
def plot_classes(data, assignments):
""" Plot umap_embeddings for HDBSCAN assignments
:param data: 2D array, umap_embeddings
:param assignments: 1D array, HDBSCAN assignments
"""
uk = list(np.unique(assignments))
R = np.linspace(0, 1, len(uk))
cmap = plt.cm.get_cmap("Spectral")(R)
umap_x, umap_y, umap_z = data[:, 0], data[:, 1], data[:, 2]
# umap_x, umap_y= data[:, 0], data[:, 1]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax = fig.add_subplot(111)
for g in np.unique(assignments):
idx = np.where(np.array(assignments) == g)
ax.scatter(umap_x[idx], umap_y[idx], umap_z[idx], c=cmap[g],
label=g, s=0.5, marker='o', alpha=0.8)
# ax.scatter(umap_x[idx], umap_y[idx], c=cmap[g],
# label=g, s=0.5, marker='o', alpha=0.8)
ax.set_xlabel('Dim. 1')
ax.set_ylabel('Dim. 2')
ax.set_zlabel('Dim. 3')
plt.title('UMAP enhanced clustering')
plt.legend(ncol=3)
plt.show()
return fig
def plot_accuracy(scores):
"""
:param scores: 1D array, cross-validated accuracies for MLP classifier.
"""
fig = plt.figure(facecolor='w', edgecolor='k')
fig.suptitle("Performance on {} % data".format(HLDOUT * 100))
ax = fig.add_subplot(111)
ax.boxplot(scores, notch=None)
x = np.random.normal(1, 0.04, size=len(scores))
plt.scatter(x, scores, s=40, c='r', alpha=0.5)
ax.set_xlabel('MLP classifier')
ax.set_ylabel('Accuracy')
plt.show()
timestr = time.strftime("_%Y%m%d_%H%M")
my_file = 'clf_scores'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
def plot_durhist(lengths, grp):
"""
:param lengths: 1D array, run lengths of each bout.
:param grp: 1D array, corresponding label.
"""
timestr = time.strftime("_%Y%m%d_%H%M")
fig, ax = plt.subplots()
R = np.linspace(0, 1, len(np.unique(grp)))
cmap = plt.cm.get_cmap("Spectral")(R)
for i in range(0, len(np.unique(grp))):
fig.suptitle("Duration histogram of {} behaviors".format(len(np.unique(TM))))
x = lengths[np.where(grp == i)]
ax.hist(x, density=True, color=cmap[i], alpha=0.3, label='Group {}'.format(i))
plt.legend(loc='upper right')
plt.show()
my_file = 'duration_hist_100msbins'
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
return
def plot_tmat(tm: object):
"""
:param tm: object, transition matrix data frame
:param fps: scalar, camera frame-rate
"""
fig = plt.figure()
fig.suptitle("Transition matrix of {} behaviors".format(tm.shape[0]))
sn.heatmap(tm, annot=True)
plt.xlabel("Next frame behavior")
plt.ylabel("Current frame behavior")
plt.show()
return fig
def plot_feats(feats: list, labels: list):
"""
:param feats: list, features for multiple sessions
:param labels: list, labels for multiple sessions
"""
result = isinstance(labels, list)
timestr = time.strftime("_%Y%m%d_%H%M")
if result:
for k in range(0, len(feats)):
labels_k = np.array(labels[k])
feats_k = np.array(feats[k])
R = np.linspace(0, 1, len(np.unique(labels_k)))
color = plt.cm.get_cmap("Spectral")(R)
feat_ls = ("Relative snout to forepaws placement", "Relative snout to hind paws placement",
"Inter-forepaw distance", "Body length", "Body angle",
"Snout displacement", "Tail-base displacement")
for j in range(0, feats_k.shape[0]):
fig = plt.figure(facecolor='w', edgecolor='k')
for i in range(0, len(np.unique(labels_k))-1):
plt.subplot(len(np.unique(labels_k)), 1, i + 1)
if j == 2 or j == 3 or j == 5 or j == 6:
plt.hist(feats_k[j, labels_k == i],
bins=np.linspace(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]), num=50),
range=(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :])),
color=color[i], density=True)
fig.suptitle("{} pixels".format(feat_ls[j]))
plt.xlim(0, np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]))
if i < len(np.unique(labels_k)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
else:
plt.hist(feats_k[j, labels_k == i],
bins=np.linspace(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]), num=50),
range=(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :])),
color=color[i], density=True)
plt.xlim(np.mean(feats_k[j, :]) - 3 * np.std(feats_k[j, :]),
np.mean(feats_k[j, :]) + 3 * np.std(feats_k[j, :]))
fig.suptitle("{} pixels".format(feat_ls[j]))
if i < len(np.unique(labels_k)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
my_file = 'sess{}_feat{}_hist'.format(k + 1, j + 1)
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
plt.show()
else:
R = np.linspace(0, 1, len(np.unique(labels)))
color = plt.cm.get_cmap("Spectral")(R)
feat_ls = ("Relative snout to forepaws placement", "Relative snout to hind paws placement",
"Inter-forepaw distance", "Body length", "Body angle",
"Snout displacement", "Tail-base displacement")
for j in range(0, feats.shape[0]):
fig = plt.figure(facecolor='w', edgecolor='k')
for i in range(0, len(np.unique(labels))-1):
plt.subplot(len(np.unique(labels)), 1, i + 1)
if j == 2 or j == 3 or j == 5 or j == 6:
plt.hist(feats[j, labels == i],
bins=np.linspace(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :]), num=50),
range=(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :])),
color=color[i], density=True)
fig.suptitle("{} pixels".format(feat_ls[j]))
plt.xlim(0, np.mean(feats[j, :]) + 3 * np.std(feats[j, :]))
if i < len(np.unique(labels)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
else:
plt.hist(feats[j, labels == i],
bins=np.linspace(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :]), num=50),
range=(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :])),
color=color[i], density=True)
plt.xlim(np.mean(feats[j, :]) - 3 * np.std(feats[j, :]),
np.mean(feats[j, :]) + 3 * np.std(feats[j, :]))
fig.suptitle("{} pixels".format(feat_ls[j]))
if i < len(np.unique(labels)) - 1:
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
my_file = 'feat{}_hist'.format(j + 1)
fig.savefig(os.path.join(OUTPUT_PATH, str.join('', (my_file, timestr, '.svg'))))
plt.show()
def main():
return
if __name__ == '__main__':
main()
| 8,376
| 44.037634
| 113
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/main.py
|
import torch
import random
import copy
import numpy as np
import time
from BResidual import BResidual
from options import arg_parameter
from data_util import load_cifar10, load_mnist
from federated import Cifar10FedEngine
from aggregator import parameter_aggregate, read_out
from util import *
def main(args):
args.device = torch.device(args.device)
print("Prepare data and model...")
if args.dataset == "cifar10":
train_batches, test_batches, A, overall_tbatches = load_cifar10(args)
model = BResidual(3)
elif args.dataset == "mnist":
train_batches, test_batches, A, overall_tbatches = load_mnist(args)
model = BResidual(1)
else:
print("Unknown model type ... ")
train_batches, test_batches, A, overall_tbatches, model = None
print("Prepare parameter holders")
w_server, w_local = model.get_state()
w_server = [w_server] * args.clients
w_local = [w_local] * args.clients
global_model = copy.deepcopy(w_server)
personalized_model = copy.deepcopy(w_server)
server_state = None
client_states = [None] * args.clients
print2file(str(args), args.logDir, True)
nParams = sum([p.nelement() for p in model.parameters()])
print2file('Number of model parameters is ' + str(nParams), args.logDir, True)
print("Start Training...")
num_collaborator = max(int(args.client_frac * args.clients), 1)
for com in range(1, args.com_round + 1):
selected_user = np.random.choice(range(args.clients), num_collaborator, replace=False)
train_time = []
train_loss = []
train_acc = []
for c in selected_user:
# Training
engine = Cifar10FedEngine(args, copy.deepcopy(train_batches[c]), global_model[c], personalized_model[c],
w_local[c], {}, c, 0, "Train", server_state, client_states[c])
outputs = engine.run()
w_server[c] = copy.deepcopy(outputs['params'][0])
w_local[c] = copy.deepcopy(outputs['params'][1])
train_time.append(outputs["time"])
train_loss.append(outputs["loss"])
train_acc.append(outputs["acc"])
client_states[c] = outputs["c_state"]
mtrain_time = np.mean(train_time)
mtrain_loss = np.mean(train_loss)
mtrain_acc = np.mean(train_acc)
log = 'Communication Round: {:03d}, Train Loss: {:.4f},' \
' Train Accuracy: {:.4f}, Training Time: {:.4f}/com_round'
print2file(log.format(com, mtrain_time, mtrain_loss, mtrain_acc),
args.logDir, True)
# Server aggregation
t1 = time.time()
personalized_model, client_states, server_state = \
parameter_aggregate(args, A, w_server, global_model, server_state, client_states, selected_user)
t2 = time.time()
log = 'Communication Round: {:03d}, Aggregation Time: {:.4f} secs'
print2file(log.format(com, (t2 - t1)), args.logDir, True)
# Readout for global model
global_model = read_out(personalized_model, args.device)
# Validation
if com % args.valid_freq == 0:
single_vtime = []
single_vloss = []
single_vacc = []
all_vtime = []
all_vloss = []
all_vacc = []
for c in range(args.clients):
batch_time = []
batch_loss = []
batch_acc = []
for batch in test_batches:
tengine = Cifar10FedEngine(args, copy.deepcopy(batch), personalized_model[c], personalized_model[c],
w_local[c], {}, c, 0, "Test", server_state, client_states[c])
outputs = tengine.run()
batch_time.append(outputs["time"])
batch_loss.append(outputs["loss"])
batch_acc.append(outputs["acc"])
single_vtime.append(batch_time[c])
single_vloss.append(batch_loss[c])
single_vacc.append(batch_acc[c])
all_vtime.append(np.mean(batch_time))
all_vloss.append(np.mean(batch_loss))
all_vacc.append(np.mean(batch_acc))
single_log = 'SingleValidation Round: {:03d}, Valid Loss: {:.4f}, ' \
'Valid Accuracy: {:.4f}, Valid SD: {:.4f}, Test Time: {:.4f}/epoch'
print2file(single_log.format(com, np.mean(single_vloss), np.mean(single_vacc), np.std(single_vacc),
np.mean(single_vtime)), args.logDir, True)
all_log = 'AllValidation Round: {:03d}, Valid Loss: {:.4f}, ' \
'Valid Accuracy: {:.4f}, Valid SD: {:.4f}, Test Time: {:.4f}/epoch'
print2file(all_log.format(com, np.mean(all_vloss), np.mean(all_vacc), np.std(all_vacc),
np.mean(all_vtime)), args.logDir, True)
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
option = arg_parameter()
initial_environment(option.seed)
main(option)
print("Everything so far so good....")
| 5,188
| 37.437037
| 120
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/BResidual.py
|
import torch.nn as nn
import torch
from collections import namedtuple
import numpy as np
union = lambda *dicts: {k: v for d in dicts for (k, v) in d.items()}
sep = '_'
RelativePath = namedtuple('RelativePath', ('parts'))
rel_path = lambda *parts: RelativePath(parts)
class BResidual(nn.Module):
def __init__(self, reg_channel):
losses = {
'loss': (nn.CrossEntropyLoss(reduction='none'), [('classifier',), ('target',)]),
'correct': (Correct(), [('classifier',), ('target',)]),
}
network = union(net(reg_channel), losses)
self.graph = build_graph(network)
super().__init__()
for n, (v, _) in self.graph.items():
setattr(self, n, v)
def forward(self, inputs):
self.cache = dict(inputs)
for n, (_, i) in self.graph.items():
self.cache[n] = getattr(self, n)(*[self.cache[x] for x in i])
return self.cache
def half(self):
# for module in self.children():
# if type(module) is not nn.BatchNorm2d:
# module.half()
return self
def get_state(self, mode="full"):
# return [i for i in self.named_parameters()], []
return self.state_dict(), []
def set_state(self, w_server, w_local, mode="full"):
# sd = self.state_dict()
# for key, param in w_server:
# if key in sd.keys():
# sd[key] = param.clone().detach()
# else:
# print("Server layers mismatch at 'set_state' function.")
#
# for key, param in w_local:
# if key in sd.keys():
# sd[key] = param.clone().detach()
# else:
# print("Local layers mismatch at 'set_state' function.")
self.load_state_dict(w_server)
def conv_bn(c_in, c_out, bn_weight_init=1.0, **kw):
return {
'conv': nn.Conv2d(c_in, c_out, kernel_size=3, stride=1, padding=1, bias=False),
'bn': batch_norm(c_out, bn_weight_init=bn_weight_init, **kw),
'relu': nn.ReLU(True)
}
def residual(c, **kw):
return {
'in': Identity(),
'res1': conv_bn(c, c, **kw),
'res2': conv_bn(c, c, **kw),
'add': (Add(), [rel_path('in'), rel_path('res2', 'relu')]),
}
def basic_net(channels, weight, pool, **kw):
return {
'prep': conv_bn(channels["reg"], channels['prep'], **kw),
# 'prep': conv_bn(3, channels['prep'], **kw),
'layer1': dict(conv_bn(channels['prep'], channels['layer1'], **kw), pool=pool),
'layer2': dict(conv_bn(channels['layer1'], channels['layer2'], **kw), pool=pool),
'layer3': dict(conv_bn(channels['layer2'], channels['layer3'], **kw), pool=pool),
# 'pool': nn.MaxPool2d(8),
'pool': nn.MaxPool2d(2),
'flatten': Flatten(),
'linear': nn.Linear(channels['layer3'], 10, bias=False),
'classifier': Mul(weight),
}
def net(reg_channel, channels=None, weight=0.2, pool=nn.MaxPool2d(2), extra_layers=(), res_layers=('layer1', 'layer2'), **kw):
channels = channels or {'reg': reg_channel, 'prep': 64, 'layer1': 128, 'layer2': 256, 'layer3': 256, }
n = basic_net(channels, weight, pool, **kw)
for layer in res_layers:
n[layer]['residual'] = residual(channels[layer], **kw)
for layer in extra_layers:
n[layer]['extra'] = conv_bn(channels[layer], channels[layer], **kw)
return n
def build_graph(net):
net = dict(path_iter(net))
default_inputs = [[('input',)]]+[[k] for k in net.keys()]
with_default_inputs = lambda vals: (val if isinstance(val, tuple) else (val, default_inputs[idx]) for idx, val in enumerate(vals))
parts = lambda path, pfx: tuple(pfx) + path.parts if isinstance(path, RelativePath) else (path,) if isinstance(path, str) else path
return {sep.join((*pfx, name)): (val, [sep.join(parts(x, pfx)) for x in inputs]) for (*pfx, name), (val, inputs) in zip(net.keys(), with_default_inputs(net.values()))}
def path_iter(nested_dict, pfx=()):
for name, val in nested_dict.items():
if isinstance(val, dict):
yield from path_iter(val, (*pfx, name))
else:
yield ((*pfx, name), val)
def batch_norm(num_channels, bn_bias_init=None, bn_bias_freeze=False, bn_weight_init=None, bn_weight_freeze=False):
m = nn.BatchNorm2d(num_channels)
if bn_bias_init is not None:
m.bias.data.fill_(bn_bias_init)
if bn_bias_freeze:
m.bias.requires_grad = False
if bn_weight_init is not None:
m.weight.data.fill_(bn_weight_init)
if bn_weight_freeze:
m.weight.requires_grad = False
return m
class Identity(nn.Module):
def forward(self, x): return x
class Mul(nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def __call__(self, x):
return x * self.weight
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), x.size(1))
class Add(nn.Module):
def forward(self, x, y): return x + y
class Correct(nn.Module):
def forward(self, classifier, target):
return classifier.max(dim = 1)[1] == target
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=cifar10_mean, std=cifar10_std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean * 255
x *= 1.0 / (255 * std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data loading
#####################
class Batches():
def __init__(self, dataset, batch_size, shuffle, device, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.device = device
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle,
drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
# return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x, y) in self.dataloader)
if self.device is not None:
return ({'input': x.to(self.device), 'target': y.to(self.device).long()} for (x, y) in self.dataloader)
else:
return ({'input': x, 'target': y.long()} for (x, y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
| 6,919
| 32.756098
| 171
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/GraphConstructor.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphConstructor(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphConstructor, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) - torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
return adj
def eval(self, idx, full=False):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
if not full:
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
| 2,074
| 31.421875
| 103
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/aggregator.py
|
import copy
import torch
import os
import pickle as pk
from util import sd_matrixing
from data_util import normalize_adj
from GraphConstructor import GraphConstructor
from optimiser import FedProx
import numpy as np
from scipy import linalg
def parameter_aggregate(args, A, w_server, global_model, server_state, client_states, active_idx):
# update global weights
new_s_state = None
new_c_state = [None] * args.clients
if args.agg == 'avg' or args.agg == "prox" or args.agg == "scaf":
w_server = average_dic(w_server, args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
elif args.agg == "att":
w_server = att_dic(w_server, global_model[0], args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
elif args.agg == "graph" or args.agg == "graph_v2" or args.agg == "graph_v3":
personalized_model = graph_dic(w_server, A, args)
elif args.agg == "scaffold":
new_s_state, new_c_state = scaffold_update(server_state, client_states, active_idx, args)
w_server = average_dic(w_server, args.device)
w_server = [w_server] * args.clients
personalized_model = copy.deepcopy(w_server)
else:
personalized_model = None
exit('Unrecognized aggregation.')
return personalized_model, new_c_state, new_s_state
def average_dic(model_dic, device, dp=0.001):
w_avg = copy.deepcopy(model_dic[0])
for k in w_avg.keys():
for i in range(1, len(model_dic)):
w_avg[k] = w_avg[k].data.clone().detach() + model_dic[i][k].data.clone().detach()
w_avg[k] = w_avg[k].data.clone().detach().div(len(model_dic)) + torch.mul(torch.randn(w_avg[k].shape), dp)
return w_avg
def att_dic(w_clients, w_server, device, stepsize=1, metric=1, dp=0.001):
w_next = copy.deepcopy(w_server)
att, att_mat = {}, {}
for k in w_server.keys():
w_next[k] = torch.zeros_like(w_server[k]).cpu()
att[k] = torch.zeros(len(w_clients)).cpu()
for k in w_next.keys():
for i in range(0, len(w_clients)):
att[k][i] = torch.norm((w_server[k]-w_clients[i][k]).type(torch.float32), metric)
for k in w_next.keys():
att[k] = torch.nn.functional.softmax(att[k], dim=0)
for k in w_next.keys():
att_weight = torch.zeros_like(w_server[k])
for i in range(0, len(w_clients)):
datatype = w_server[k].dtype
att_weight += torch.mul(w_server[k] - w_clients[i][k], att[k][i].type(datatype))
w_next[k] = w_server[k] - torch.mul(att_weight, stepsize) + torch.mul(torch.randn(w_server[k].shape), dp)
return w_next
def graph_dic(models_dic, pre_A, args):
keys = []
key_shapes = []
param_metrix = []
for model in models_dic:
param_metrix.append(sd_matrixing(model).clone().detach())
param_metrix = torch.stack(param_metrix)
for key, param in models_dic[0].items():
keys.append(key)
key_shapes.append(list(param.data.shape))
if args.agg == "graph_v2" or args.agg == "graph_v3":
# constract adj
subgraph_size = min(args.subgraph_size, args.clients)
A = generate_adj(param_metrix, args, subgraph_size).cpu().detach().numpy()
A = normalize_adj(A)
A = torch.tensor(A)
if args.agg == "graph_v3":
A = (1 - args.adjbeta) * pre_A + args.adjbeta * A
else:
A = pre_A
# Aggregating
aggregated_param = torch.mm(A, param_metrix)
for i in range(args.layers - 1):
aggregated_param = torch.mm(A, aggregated_param)
new_param_matrix = (args.serveralpha * aggregated_param) + ((1 - args.serveralpha) * param_metrix)
# reconstract parameter
for i in range(len(models_dic)):
pointer = 0
for k in range(len(keys)):
num_p = 1
for n in key_shapes[k]:
num_p *= n
models_dic[i][keys[k]] = new_param_matrix[i][pointer:pointer + num_p].reshape(key_shapes[k])
pointer += num_p
return models_dic
def scaffold_update(server_state, client_states, active_ids, args):
active_clients = [client_states[i] for i in active_ids]
c_delta = []
cc = [client_state["c_i_delta"] for client_state in active_clients]
for ind in range(len(server_state["c"])):
# handles the int64 and float data types jointly
c_delta.append(
torch.mean(torch.stack([c_i_delta[ind].float() for c_i_delta in cc]), dim=0).to(server_state["c"][ind].dtype)
)
c_delta = tuple(c_delta)
c = []
for param_1, param_2 in zip(server_state["c"], c_delta):
c.append(param_1 + param_2 * args.clients * args.client_frac / args.clients)
c = tuple(c)
new_server_state = {
"global_round": server_state["global_round"] + 1,
"c": c
}
new_client_state = [{
"global_round": new_server_state["global_round"],
"model_delta": None,
"c_i": client["c_i"],
"c_i_delta": None,
"c": server_state["c"]
} for client in client_states]
return new_server_state, new_client_state
def generate_adj(param_metrix, args, subgraph_size):
dist_metrix = torch.zeros((len(param_metrix), len(param_metrix)))
for i in range(len(param_metrix)):
for j in range(len(param_metrix)):
dist_metrix[i][j] = torch.nn.functional.pairwise_distance(
param_metrix[i].view(1, -1), param_metrix[j].view(1, -1), p=2).clone().detach()
dist_metrix = torch.nn.functional.normalize(dist_metrix).to(args.device)
gc = GraphConstructor(args.clients, subgraph_size, args.node_dim,
args.device, args.adjalpha).to(args.device)
idx = torch.arange(args.clients).to(args.device)
optimizer = torch.optim.SGD(gc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
for e in range(args.gc_epoch):
optimizer.zero_grad()
adj = gc(idx)
adj = torch.nn.functional.normalize(adj)
loss = torch.nn.functional.mse_loss(adj, dist_metrix)
loss.backward()
optimizer.step()
adj = gc.eval(idx).to("cpu")
return adj
def read_out(personalized_models, device):
# average pooling as read out function
global_model = average_dic(personalized_models, device, 0)
return [global_model] * len(personalized_models)
| 6,429
| 34.921788
| 121
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/optimiser.py
|
import numpy as np
import torch
from collections import namedtuple
from util import PiecewiseLinear
from torch.optim.optimizer import Optimizer, required
import torch.distributed as dist
class TorchOptimiser():
def __init__(self, weights, optimizer, step_number=0, **opt_params):
self.weights = weights
self.step_number = step_number
self.opt_params = opt_params
self._opt = optimizer(weights, **self.param_values())
def param_values(self):
return {k: v(self.step_number) if callable(v) else v for k, v in self.opt_params.items()}
def step(self):
self.step_number += 1
self._opt.param_groups[0].update(**self.param_values())
self._opt.step()
def __repr__(self):
return repr(self._opt)
def SGD(weights, lr=0, momentum=0, weight_decay=0, dampening=0, nesterov=False):
return TorchOptimiser(weights, torch.optim.SGD, lr=lr, momentum=momentum,
weight_decay=weight_decay, dampening=dampening,
nesterov=nesterov)
class FedProx(Optimizer):
def __init__(self, params, ratio, gmf, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, variance=0, mu=0):
self.gmf = gmf
self.ratio = ratio
self.itr = 0
self.a_sum = 0
self.mu = mu
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, variance=variance)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(FedProx, self).__init__(params, defaults)
def __setstate__(self, state):
super(FedProx, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
if 'old_init' not in param_state:
param_state['old_init'] = torch.clone(p.data).detach()
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# apply proximal update
d_p.add_(self.mu, p.data - param_state['old_init'])
p.data.add_(-group['lr'], d_p)
return loss
def average(self):
param_list = []
for group in self.param_groups:
for p in group['params']:
p.data.mul_(self.ratio)
param_list.append(p.data)
communicate(param_list, dist.all_reduce)
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['old_init'] = torch.clone(p.data).detach()
# Reinitialize momentum buffer
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].zero_()
# helper functions for fedprox
def communicate(tensors, communication_op):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Communicate a list of tensors.
Arguments:
tensors (Iterable[Tensor]): list of tensors.
communication_op: a method or partial object which takes a tensor as
input and communicates it. It can be a partial object around
something like torch.distributed.all_reduce.
"""
flat_tensor = flatten_tensors(tensors)
communication_op(tensor=flat_tensor)
for f, t in zip(unflatten_tensors(flat_tensor, tensors), tensors):
t.set_(f)
def flatten_tensors(tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].view(-1).clone()
flat = torch.cat([t.view(-1) for t in tensors], dim=0)
return flat
def unflatten_tensors(flat, tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
| 6,524
| 35.049724
| 97
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/data_util.py
|
import torch
import numpy as np
import scipy.sparse as sp
from torchvision import datasets
from collections import namedtuple
from torchvision import datasets, transforms
import pickle as pk
def load_image(args):
data_dir = "./data/" + str(args.dataset)
data_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
data_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
trans = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(0.1),
transforms.RandomVerticalFlip(0.1),
transforms.ToTensor(),
transforms.Normalize(data_mean, data_std)]
apply_transform = transforms.Compose(trans)
train_set = datasets.CIFAR10(data_dir, train=True, download=True, transform=apply_transform)
test_set = datasets.CIFAR10(data_dir, train=False, download=True, transform=apply_transform)
train_set.topk = 5
train_set.targets = np.array(train_set.targets)
test_set.targets = np.array(test_set.targets)
# split
train_user_groups, test_user_groups, A = split_equal_noniid(
train_set, test_set, args.shards, args.edge_frac, args.clients)
def load_cifar10(args):
data_dir = "./data/" + str(args.dataset)
train_set = datasets.CIFAR10(root=data_dir, train=True, download=True)
test_set = datasets.CIFAR10(root=data_dir, train=False, download=True)
data_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
data_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
train_set.topk = 5
train_set.targets = np.array(train_set.targets)
test_set.targets = np.array(test_set.targets)
train_transforms = [Crop(32, 32), FlipLR(), Cutout(8, 8)]
# split
train_user_groups, test_user_groups, A = split_equal_noniid(
train_set, test_set, args.shards, args.edge_frac, args.clients)
train_set = list(zip(transpose(normalise(pad(train_set.data, 4), data_mean, data_std)), train_set.targets))
test_set = list(zip(transpose(normalise(test_set.data, data_mean, data_std)), test_set.targets))
train_batches = []
test_batches = []
for key, users in train_user_groups.items():
train_batches.append(Batches(Transform([train_set[u.astype(int)] for u in users],
train_transforms), args.batch_size, shuffle=True, device=args.device,
set_random_choices=True, drop_last=True))
for key, users in test_user_groups.items():
test_batches.append(Batches([test_set[u.astype(int)] for u in users],
args.batch_size, shuffle=False, device=args.device, drop_last=False))
overall_tbatches = Batches(test_set, args.batch_size, shuffle=False,
device=args.device, drop_last=False)
return train_batches, test_batches, A, overall_tbatches
# Image data related
def load_mnist(args):
data_dir = "./data/" + str(args.dataset)
trans = [transforms.ToTensor(),
transforms.Normalize(*((0.1307,), (0.3081,)))]
apply_transform = transforms.Compose(trans)
train_dataset = datasets.MNIST(data_dir, train=True, download=True, transform=apply_transform)
test_dataset = datasets.MNIST(data_dir, train=False, download=True, transform=apply_transform)
train_dataset.topk = 5
train_dataset.data = torch.unsqueeze(train_dataset.data, 1)
train_dataset.targets = np.array(train_dataset.targets)
train_dataset.data = train_dataset.data.type(torch.FloatTensor)
test_dataset.data = torch.unsqueeze(test_dataset.data, 1)
test_dataset.targets = np.array(test_dataset.targets)
test_dataset.data = test_dataset.data.type(torch.FloatTensor)
train_user_groups, test_user_groups, A = split_equal_noniid(
train_dataset, test_dataset, args.shards, args.edge_frac, args.clients)
train_set = list(zip(train_dataset.data, train_dataset.targets))
test_set = list(zip(test_dataset.data, test_dataset.targets))
train_batches = []
test_batches = []
for key, users in train_user_groups.items():
train_batches.append(Batches([train_set[u.astype(int)] for u in users], args.batch_size,
shuffle=True, device=args.device, drop_last=True))
for key, users in test_user_groups.items():
test_batches.append(Batches([test_set[u.astype(int)] for u in users], args.batch_size,
shuffle=False, device=args.device, drop_last=False))
overall_tbatches = Batches(test_set, args.batch_size, shuffle=False,
device=args.device, drop_last=False)
return train_batches, test_batches, A, overall_tbatches
def split_equal_noniid(train_dataset, test_dataset, shards, edge_frac, clients):
"""
:param train_dataset:
:param test_dataset:
:param shards:
:param edge_frac:
:param clients:
:return:
"""
total_shards = shards * clients
shard_size = int(len(train_dataset.data) / total_shards)
idx_shard = [i for i in range(total_shards)]
train_dict_users = {i: np.array([]) for i in range(clients)}
idxs = np.arange(total_shards * shard_size)
labels = train_dataset.targets
dict_label_dist = {i: np.array([]) for i in range(clients)}
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
label_count = np.bincount(idxs_labels[1])
# generate adj
A = np.zeros((clients, clients))
num_label = len(set(labels))
label_dist = [[] for _ in range(num_label)]
# partitions for train data
for i in range(clients):
rand_set = np.random.choice(idx_shard, shards, replace=False)
idx_shard = list(set(idx_shard) - set(rand_set))
selected_labels = idxs_labels[1, rand_set * shard_size]
label_type = np.array(list(set(selected_labels)))
sample_size = [np.count_nonzero(selected_labels == j) for j in label_type]
int(shard_size * shards / len(label_type))
dict_label_dist[i] = np.array((label_type, sample_size))
for j, l in enumerate(label_type):
start_idx = sum(label_count[0:l])
end_idx = start_idx + label_count[l]
sample_array = idxs[start_idx: end_idx]
train_dict_users[i] = np.concatenate(
(train_dict_users[i], np.random.choice(
sample_array, sample_size[j] * shard_size, replace=False)), axis=0)
# for cifar-100, control the sparsity of A
label_size = np.array([np.count_nonzero(
labels[train_dict_users[i].astype(int)] == j) for j in label_type])
pram_label_idx = np.array(sorted(range(len(label_size)),
key=lambda i: label_size[i])[min(-train_dataset.topk, shards):])
for label_type in label_type[pram_label_idx]:
label_dist[label_type].append(i)
# prepare A
link_list = []
for user_arr in label_dist:
for user_a in user_arr:
for user_b in user_arr:
link_list.append([user_a, user_b])
link_sample = list(range(len(link_list)))
link_idx = np.random.choice(link_sample, int(edge_frac * len(link_list)), replace=False)
for idx in link_idx:
# A[link_list[idx][0], link_list[idx][1]] = A[link_list[idx][0], link_list[idx][1]] + 1
A[link_list[idx][0], link_list[idx][1]] = 1
# partition for test data
total_shards = shards * clients
shard_size = int(len(test_dataset.data) / total_shards)
test_dict_users = {i: np.array([]) for i in range(clients)}
idxs = np.arange(total_shards * shard_size)
labels = test_dataset.targets
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
label_count = np.bincount(idxs_labels[1])
for i in range(clients):
for j, l in enumerate(dict_label_dist[i][0]):
start_idx = sum(label_count[0:l])
end_idx = start_idx + label_count[l]
sample_array = idxs[start_idx: end_idx]
test_dict_users[i] = np.concatenate(
(test_dict_users[i], np.random.choice(
sample_array, dict_label_dist[i][1][j] * shard_size, replace=False)), axis=0)
return train_dict_users, test_dict_users, torch.tensor(normalize_adj(A), dtype=torch.float32)
class Batches():
def __init__(self, dataset, batch_size, shuffle, device, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.device = device
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle,
drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
if self.device is not None:
return ({'input': x.to(self.device), 'target': y.to(self.device).long()} for (x, y) in self.dataloader)
else:
return ({'input': x, 'target': y.long()} for (x, y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:, y0:y0 + self.h, x0:x0 + self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W + 1 - self.w), 'y0': range(H + 1 - self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:, y0:y0 + self.h, x0:x0 + self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W + 1 - self.w), 'y0': range(H + 1 - self.h)}
class Transform:
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k, v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k: np.random.choice(v, size=N) for (k, v) in options.items()})
def normalise(x, mean, std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean * 255
x *= 1.0 / (255 * std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
| 11,858
| 36.647619
| 119
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/federated.py
|
import threading
import datetime
import torch
import time
import numpy as np
from BResidual import BResidual
from optimiser import SGD
from util import sd_matrixing, PiecewiseLinear, trainable_params, StatsLogger
class Cifar10FedEngine:
def __init__(self, args, dataloader, global_param, server_param, local_param,
outputs, cid, tid, mode, server_state, client_states):
self.args = args
self.dataloader = dataloader
self.global_param = global_param
self.server_param = server_param
self.local_param = local_param
self.server_state = server_state
self.client_state = client_states
self.client_id = cid
self.outputs = outputs
self.thread = tid
self.mode = mode
self.model = self.prepare_model()
# self.threadLock = threading.Lock()
self.m1, self.m2, self.m3, self.reg1, self.reg2 = None, None, None, None, None
def prepare_model(self):
if self.args.dataset == "cifar10":
model = BResidual(3)
elif self.args.dataset == "mnist":
model = BResidual(1)
else:
print("Unknown model type ... ")
model = None
model.set_state(self.global_param, self.local_param)
return model
def run(self):
self.model.to(self.args.device)
output = self.client_run()
self.free_memory()
return output
def client_run(self):
lr_schedule = PiecewiseLinear([0, 5, self.args.client_epochs], [0, 0.4, 0.001])
lr = lambda step: lr_schedule(step / len(self.dataloader)) / self.args.batch_size
opt = SGD(trainable_params(self.model), lr=lr, momentum=0.9, weight_decay=5e-4
* self.args.batch_size, nesterov=True)
mean_loss = []
mean_acc = []
t1 = time.time()
c_state = None
if self.mode == "Train":
# training process
for epoch in range(self.args.client_epochs):
stats = self.batch_run(True, opt.step)
mean_loss.append(stats.mean('loss'))
mean_acc.append(stats.mean('correct'))
# log = "Train - Epoch: " + str(epoch) + ' train loss: ' + str(stats.mean('loss')) +\
# ' train acc: ' + str(stats.mean('correct'))
# self.logger(log, True)
elif self.mode == "Test":
# validation process
stats = self.batch_run(False)
mean_loss.append(stats.mean('loss'))
mean_acc.append(stats.mean('correct'))
# log = 'Test - test loss: ' + str(stats.mean('loss')) + ' test acc: ' \
# + str(stats.mean('correct'))
# self.logger(log)
time_cost = time.time() - t1
log = self.mode + ' - Thread: {:03d}, Client: {:03d}. Average Loss: {:.4f},' \
' Average Accuracy: {:.4f}, Total Time Cost: {:.4f}'
self.logger(log.format(self.thread, self.client_id, np.mean(mean_loss), np.mean(mean_acc),
time_cost), True)
self.model.to("cpu")
output = {"params": self.model.get_state(),
"time": time_cost,
"loss": np.mean(mean_loss),
"acc": np.mean(mean_acc),
"client_state": self.client_state,
"c_state": c_state}
# self.outputs[self.thread] = output
return output
def batch_run(self, training, optimizer_step=None, stats=None):
stats = stats or StatsLogger(('loss', 'correct'))
self.model.train(training)
for batch in self.dataloader:
output = self.model(batch)
output['loss'] = self.criterion(output['loss'], self.mode)
stats.append(output)
if training:
output['loss'].sum().backward()
optimizer_step()
self.model.zero_grad()
batch["input"].to("cpu")
batch["target"].to("cpu")
return stats
def criterion(self, loss, mode):
if self.args.agg == "avg":
pass
elif self.args.reg > 0 and mode != "PerTrain" and self.args.clients != 1:
self.m1 = sd_matrixing(self.model.get_state()[0]).reshape(1, -1).to(self.args.device)
self.m2 = sd_matrixing(self.server_param).reshape(1, -1).to(self.args.device)
self.m3 = sd_matrixing(self.global_param).reshape(1, -1).to(self.args.device)
self.reg1 = torch.nn.functional.pairwise_distance(self.m1, self.m2, p=2)
self.reg2 = torch.nn.functional.pairwise_distance(self.m1, self.m3, p=2)
loss = loss + 0.3 * self.reg1 + 0.3 * self.reg2
return loss
def free_memory(self):
if self.m1 is not None:
self.m1.to("cpu")
if self.m2 is not None:
self.m2.to("cpu")
if self.m3 is not None:
self.m3.to("cpu")
if self.reg1 is not None:
self.reg1.to("cpu")
if self.reg2 is not None:
self.reg2.to("cpu")
torch.cuda.empty_cache()
def logger(self, buf, p=False):
if p:
print(buf)
# self.threadLock.acquire()
with open(self.args.logDir, 'a+') as f:
f.write(str(datetime.datetime.now()) + '\t' + buf + '\n')
# self.threadLock.release()
| 5,404
| 35.033333
| 101
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/options.py
|
import argparse
def arg_parameter():
parser = argparse.ArgumentParser()
# Training arguments
parser.add_argument('--device', type=str, default='cuda:1', help='')
parser.add_argument('--dataset', type=str, default='mnist', help="name of dataset")
parser.add_argument('--adj_data', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path')
parser.add_argument('--unequal', type=int, default=0, help='whether to use unequal data splits')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--debug', type=int, default=0, help='debug mode')
parser.add_argument('--reg', type=int, default=1, help='enable regulizer or not for local train')
parser.add_argument('--com_round', type=int, default=20, help='Number of communication round to train.')
parser.add_argument('--epoch', type=int, default=10, help='epoch for each communication round.')
parser.add_argument('--client_epochs', type=int, default=20, help='epoch for each communication round.')
parser.add_argument('--logDir', default='./log/,default.txt', help='Path for log info')
parser.add_argument('--num_thread', type=int, default=5, help='number of threading to use for client training.')
parser.add_argument('--dataaug', type=int, default=0, help='data augmentation')
parser.add_argument('--evalall', type=int, default=0, help='use all or partial validation dataset for test')
# Federated arguments
parser.add_argument('--clients', type=int, default=100, help="number of users: K")
parser.add_argument('--shards', type=int, default=2, help="each client roughly have 2 data classes")
parser.add_argument('--serveralpha', type=float, default=1, help='server prop alpha')
parser.add_argument('--serverbeta', type=float, default=0.3, help='personalized agg rate alpha')
parser.add_argument('--deep', type=int, default=0, help='0: 1 layer only, 1: 2 layers, 3:full-layers')
parser.add_argument('--agg', type=str, default='none', help='averaging strategy')
parser.add_argument('--dp', type=float, default=0.005, help='differential privacy')
parser.add_argument('--epsilon', type=float, default=1, help='stepsize')
parser.add_argument('--ord', type=int, default=2, help='similarity metric')
parser.add_argument('--sm', type=str, default='full', help='state mode, for baselines running')
parser.add_argument('--layers', type=int, default=3, help='number of layers')
parser.add_argument('--client_frac', type=float, default=1, help='the fraction of clients')
# Graph Learning
parser.add_argument('--subgraph_size', type=int, default=30, help='k')
parser.add_argument('--adjalpha', type=float, default=3, help='adj alpha')
parser.add_argument('--gc_epoch', type=int, default=10, help='')
parser.add_argument('--adjbeta', type=float, default=0.05, help='update ratio')
parser.add_argument('--edge_frac', type=float, default=1, help='the fraction of clients')
# CNN tasks related
parser.add_argument('--num_classes', type=int, default=10, help="number of classes")
parser.add_argument('--num_channels', type=int, default=1, help="number of channels of imgs")
parser.add_argument('--hidden', type=str, default="10,20,320,50", help="Whether use max pooling rather than strided convolutions")
parser.add_argument('--bn', type=int, default=0, help="enable batch norm of CNN model")
# RNN tasks related
parser.add_argument('--clip', type=int, default=5, help='clip')
parser.add_argument('--step_size1', type=int, default=2500, help='step_size')
parser.add_argument('--step_size2', type=int, default=100, help='step_size')
parser.add_argument('--rnn_hidden', type=int, default=64, help='Number of rnn hidden size.')
# Others
parser.add_argument('--kernel_num', type=int, default=9, help='number of each kind of kernel')
parser.add_argument('--kernel_sizes', type=str, default='3,4,5', help='comma-separated kernel size to use for convolution')
parser.add_argument('--norm', type=str, default='batch_norm', help="batch_norm, layer_norm, or None")
parser.add_argument('--num_filters', type=int, default=32, help="number of filters for conv nets -- 32 for mini-imagenet, 64 for omiglot.")
parser.add_argument('--max_pool', type=str, default='True', help="Whether use max pooling rather than strided convolutions")
parser.add_argument('--print_every', type=int, default=100, help='')
parser.add_argument('--save', type=str, default='./save/', help='save path')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate')
parser.add_argument('--valid_freq', type=int, default=1, help='validation at every n communication round')
parser.add_argument('--cl', type=int, default=1, help='whether to do curriculum learning')
parser.add_argument('--gcn_depth', type=int, default=2, help='graph convolution depth')
parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')
parser.add_argument('--node_dim', type=int, default=40, help='dim of nodes')
parser.add_argument('--dilation_exponential', type=int, default=1, help='dilation exponential')
parser.add_argument('--conv_channels', type=int, default=32, help='convolution channels')
parser.add_argument('--residual_channels', type=int, default=32, help='residual channels')
parser.add_argument('--skip_channels', type=int, default=64, help='skip channels')
parser.add_argument('--end_channels', type=int, default=128, help='end channels')
parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension')
parser.add_argument('--seq_in_len', type=int, default=12, help='input sequence length')
parser.add_argument('--seq_out_len', type=int, default=12, help='output sequence length')
parser.add_argument('--propalpha', type=float, default=0.05, help='prop alpha')
parser.add_argument('--num_split', type=int, default=1, help='number of splits for graphs')
parser.add_argument('--runs', type=int, default=10, help='number of runs')
args = parser.parse_args()
hidden = []
for h in args.hidden.split(","):
hidden.append(int(h))
args.hidden = hidden
logs = args.logDir.split(",")
args.logDir = logs[0] + args.dataset + "-" + args.agg + "-" + \
str(args.com_round) + "-" + str(args.client_epochs) + "-" + logs[1]
return args
| 6,621
| 70.204301
| 143
|
py
|
SFL-Structural-Federated-Learning
|
SFL-Structural-Federated-Learning-main/util.py
|
import datetime
import random
import os
import torch
import numpy as np
from collections import namedtuple
from functools import singledispatch
def print2file(buf, out_file, p=False):
if p:
print(buf)
outfd = open(out_file, 'a+')
outfd.write(str(datetime.datetime.now()) + '\t' + buf + '\n')
outfd.close()
def initial_environment(seed, cpu_num=5, deterministic=False):
os.environ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def sd_matrixing(state_dic):
"""
Turn state dic into a vector
:param state_dic:
:return:
"""
keys = []
param_vector = None
for key, param in state_dic.items():
keys.append(key)
if param_vector is None:
param_vector = param.clone().detach().flatten().cpu()
else:
if len(list(param.size())) == 0:
param_vector = torch.cat((param_vector, param.clone().detach().view(1).cpu().type(torch.float32)), 0)
else:
param_vector = torch.cat((param_vector, param.clone().detach().flatten().cpu()), 0)
return param_vector
def trainable_params(model):
result = []
for p in model.parameters():
if p.requires_grad:
result.append(p)
return result
class PiecewiseLinear(namedtuple('PiecewiseLinear', ('knots', 'vals'))):
def __call__(self, t):
return np.interp([t], self.knots, self.vals)[0]
class StatsLogger():
def __init__(self, keys):
self._stats = {k: [] for k in keys}
def append(self, output):
for k, v in self._stats.items():
v.append(output[k].detach())
def stats(self, key):
return cat(*self._stats[key])
def mean(self, key):
return np.mean(to_numpy(self.stats(key)), dtype=np.float)
@singledispatch
def cat(*xs):
raise NotImplementedError
@singledispatch
def to_numpy(x):
raise NotImplementedError
@cat.register(torch.Tensor)
def _(*xs):
return torch.cat(xs)
@to_numpy.register(torch.Tensor)
def _(x):
return x.detach().cpu().numpy()
| 2,561
| 24.366337
| 117
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/divider.py
|
#!/usr/bin/env python3
"""Test module to develop divider scripts"""
import os
from typing import List, Union
# noinspection PyUnresolvedReferences
from plot.colors import (maroon, orange, yellow, green, purple, pink, blue,
lightblue, bar_colors)
# noinspection PyUnresolvedReferences
from plot.tls import compute_tls_curve
# noinspection PyUnresolvedReferences
from client_time import get_stacks, replace_psi_stacks, replace_bloom_stacks
# noinspection PyUnresolvedReferences
from plot.plot import (bar_plot, OUTPUT_DIR, INPUT_DIR, stacked_bar_plot,
read_data, error_plot_mult, stacked_bar_plot_mult,
join_stack_data, bar_plot_mult, stacked_bar_plot_two_y,
EXTENSION, mean_confidence_interval, Y_LIM, TITLE,
make_legend, output, PRINT)
PLOT_ALL = 0
TLS = 1
output_dir_client = OUTPUT_DIR + "client/"
os.makedirs(output_dir_client, exist_ok=True)
input_dir = INPUT_DIR + "client/"
psi_colors = [maroon, orange, yellow, green,
purple, pink]
bloom_colors = [maroon, blue, lightblue, green, purple, pink]
both_colors = [maroon, orange, yellow, blue, lightblue,
green, purple, pink]
hatches: List[Union[str, None]] = [None for _ in range(len(both_colors) + 2)]
psi_hatches = hatches[:]
bloom_hatches = hatches[:]
x_index = 5 # Results
indices = [6, 8, 9, 11, 12, 13, 14, 15, 16]
PSI_TLS_IND = 3
OT_TLS_BOTH = 7
OT_TLS_PSI = 5
OT_TLS_BLOOM = 4
if TLS:
ot_tls_color = 'forestgreen'
psi_tls_color = 'lightyellow'
psi_colors.insert(PSI_TLS_IND, psi_tls_color)
both_colors.insert(PSI_TLS_IND, psi_tls_color)
psi_colors.insert(OT_TLS_PSI - 1, ot_tls_color)
bloom_colors.insert(OT_TLS_BLOOM - 1, ot_tls_color)
both_colors.insert(OT_TLS_BOTH - 1, ot_tls_color)
psi_hatches[PSI_TLS_IND] = '////////'
psi_hatches[OT_TLS_PSI] = '////////'
bloom_hatches[OT_TLS_BLOOM] = '////////'
hatches[PSI_TLS_IND] = '////////'
hatches[OT_TLS_BOTH] = '////////'
# -----------------------------------------------------------------------------
bloom_phases = [
'Hashkey Retr.', 'Bloom Retr.', 'Matching',
'Key Retr. (OT)', 'Record Retr.', 'Decryption']
psi_phases = [
'Hashkey Retr.', 'PSI Prep.', 'PSI Exec.',
'Key Retr. (OT)', 'Record Retr.', 'Decryption']
both_phases = [
'Hashkey Retr.', 'PSI Prep.', 'PSI Exec.',
'Bloom Retr.', 'Matching', 'Key Retr. (OT)',
'Record Retr.', 'Decryption'
]
if TLS:
bloom_phases.insert(OT_TLS_BLOOM, 'OT TLS')
psi_phases.insert(PSI_TLS_IND, 'PSI TLS')
psi_phases.insert(OT_TLS_PSI, 'OT TLS')
both_phases.insert(PSI_TLS_IND, 'PSI TLS')
both_phases.insert(OT_TLS_BOTH, 'OT TLS')
ylabel = "Time [s]"
name = "butthead_client_ikv2"
input_file = input_dir + name + '.csv'
read_stacks_ikv2 = get_stacks(input_file)
name = "butthead_client_ikv1"
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv1 = get_stacks(input_file)
output_dir = output_dir_client + 'divided/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'ikv_joined_bar{EXTENSION}'
xlabel = "Phase"
title = "Client App - IKV Data - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_ikv1):
if i not in [0, 2, 3, 4]:
# Ignore start and PSI Stack
data1[i] = s[77]
data2 = {}
for i, s in enumerate(read_stacks_ikv2):
if i not in [0, 2, 3, 4]:
# Ignore start and PSI Stack
data2[i] = s[77]
| 3,469
| 35.145833
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/provider_comm.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
from plot.colors import bar_colors
from plot.plot import (bar_plot, OUTPUT_DIR, INPUT_DIR, stacked_bar_plot,
read_data, stacked_bar_plot_mult, convert_to_gb)
PLOT_ALL = 1
EXTENSION = '.png'
output_dir_provider = OUTPUT_DIR + "provider_comm/"
os.makedirs(output_dir_provider, exist_ok=True)
input_dir = INPUT_DIR + "provider/"
colors = bar_colors
values = {
13: "From KS",
15: "ToKS",
17: "FromSS",
19: "ToSS",
21: "To OT",
23: "From OT",
}
indizes = sorted(values.keys())
labels = [values[i] for i in indizes]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Upload Dependence
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_uploads"
output_dir = output_dir_provider + "rand_uploads/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
x_ind = 2
# Read Data
read_stacks = []
for i in indizes:
d = read_data(input_file, x_ind, i)
d = convert_to_gb(d)
read_stacks.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Provider App - 500 Uploads - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
data[i] = s[500] # Only consider 500 matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploads [#]"
ylabel = "Size [GB]"
title = "Provider App - 1-1000 Uploads - 10 Reps"
stacks = read_stacks
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel, title,
label_step=1, colors=colors)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Length Dependence
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_record_length"
output_dir = output_dir_provider + "rand_length/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
x_ind = 3
# Read Data
read_stacks = []
for i in indizes:
d = read_data(input_file, x_ind, i)
d = convert_to_gb(d)
read_stacks.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Provider App - 500 Record Len - 100 Uploads - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
data[i] = s[500] # Only consider 500 matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploads [#]"
ylabel = "Size [GB]"
title = "Provider App - 100 Uploads - Length 1-1000 - 10 Reps"
stacks = read_stacks
xticks = list(range(100, 1000, 100)) + [1000]
xticks = [i if i in xticks else '' for i in range(100, 1001, 100)]
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel, title,
label_step=1, colors=colors,
xticks=xticks)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_ikv"
output_dir = output_dir_provider + "ikv/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
x_ind = 2
# Read Data
read_stacks_ikv1 = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 4620] = old_d[m]
d = convert_to_gb(d)
read_stacks_ikv1.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Provider App - IKV Data - Upload (Ran. Choice from 4620 " \
"Records) - 10 Reps"
data = {}
for i, s in enumerate(read_stacks_ikv1):
data[i] = s[100] # Only consider 500 matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploaded Records [%]"
ylabel = "Size [GB]"
title = "Provider App - IKV Data (4620 Records) - 10 Reps"
stacks = read_stacks_ikv1 # Don't plot start time
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel, title,
label_step=1, colors=colors)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Data 2 - Non Random
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_ikv2"
output_dir = output_dir_provider + "ikv2/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
x_ind = 2
# Read Data
read_stacks_ikv2 = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 4620] = old_d[m]
d = convert_to_gb(d)
read_stacks_ikv2.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Provider App - IKV Data - Upload (Seq. Choice from 4620 " \
"Records) - 10 Reps"
data = {}
for i, s in enumerate(read_stacks_ikv2):
data[i] = s[100] # Only consider 500 matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploaded Records [%]"
ylabel = "Size [GB]"
title = "Provider App - IKV Data (4620 Records) - 10 Reps"
stacks = read_stacks_ikv2 # Don't plot start time
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel, title,
label_step=1, colors=colors)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Both IKV
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
if 1 or PLOT_ALL:
output_file = output_dir + f'../' + f'ikv_stacked{EXTENSION}'
xlabel = "Uploaded Records [%]"
ylabel = "Size [GB]"
title = "Provider App - IKV Data (4620 Records) - Seq. vs Ran. - 10 Reps"
stacks = read_stacks_ikv2
stacked_bar_plot_mult(output_file,
[read_stacks_ikv2, read_stacks_ikv1], labels[:],
xlabel, ylabel, title,
label_step=1, colors=colors)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_wzl"
output_dir = output_dir_provider + "wzl/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
x_ind = 2
# Read Data
read_stacks = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 600] = old_d[m]
d = convert_to_gb(d)
read_stacks.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Provider App - WZL Data - Full Upload (600 Records) - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
data[i] = s[100]
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploaded Records [%]"
ylabel = "Size [GB]"
title = "Provider App - WZL Data (600 Records) - 10 Reps"
stacks = read_stacks[:]
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel, title,
label_step=1, colors=colors)
| 10,408
| 38.729008
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/provider_time.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
from typing import List, Union
# noinspection PyUnresolvedReferences
from plot.colors import bar_colors, maroon
# noinspection PyUnresolvedReferences
from plot.plot import (OUTPUT_DIR, INPUT_DIR,
read_data, stacked_bar_plot_mult, EXTENSION, Legend,
read_ram_max, convert_mib_to_gb, error_plot_mult)
# noinspection PyUnresolvedReferences
from plot.tls import compute_tls_curve
PLOT_ALL = 1
TLS = 1
output_dir_provider = OUTPUT_DIR + "provider/"
os.makedirs(output_dir_provider, exist_ok=True)
input_dir = INPUT_DIR + "provider/"
colors = bar_colors
del colors[1] # Remove blue
colors[0] = maroon
hatches: List[Union[None, str]] = [None for _ in range(len(colors))]
TLS_INDEX = 2
if TLS:
ot_tls_color = 'forestgreen'
colors.insert(TLS_INDEX - 1, ot_tls_color)
hatches[TLS_INDEX] = '////////'
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Leave out invisible xticks
#
# phases = (
# 'Parse Input', 'Hash Key R.', 'Hash Set', 'OT Index Comp.',
# 'Key R. (OT)', 'Set Key', 'Encryption', 'Sending')
phases = [
'Hash Key R.', 'Key R. (OT)', 'Encryption', 'Sending']
if TLS:
phases.insert(TLS_INDEX, 'OT TLS')
indizes = [4, 6, 9, 11, 12]
x_ind = 2
adjust_bar = None # (0.1, 0.99, 0.99, 0.15)
adjust_bar2 = None # (0.075, 0.99, 0.99, 0.15)
adjust_stacked_bar = (0.07, 0.98, 0.94, 0.20)
adjust_stacked_bar2 = (0.09, 0.98, 0.94, 0.20)
ylabel = "Time [s]"
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Upload Dependence
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_uploads"
output_dir = output_dir_provider + "rand_uploads/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
# Read Data
read_stacks = []
for i in indizes:
d = read_data(input_file, x_ind, i)
read_stacks.append(d)
for i in range(len(read_stacks) - 1, 0, -1):
data = read_stacks[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
data[matches][j] = v - read_stacks[i - 1][matches][j]
if TLS:
# OT
sent = read_data(input_file, x_ind, 21)
recv = read_data(input_file, x_ind, 23)
base = {}
for x in read_stacks[TLS_INDEX]:
base[x] = [0 for _ in range(len(read_stacks[3][x]))]
tls = compute_tls_curve(base, sent, recv)
read_stacks.insert(TLS_INDEX + 1, tls)
# -----------------------------------------------------------------------------
# Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Provider App - 500 Uploads - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks):
# if i not in [0]:
# # Ignore start and non significant stacks
# data[i] = s[500] # Only consider 500 matches
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# adjust=adjust_bar,
# xlabels=phases[:])
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if 0 or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploads [#]"
title = "Provider App - 1-1000 Uploads - 10 Reps"
stacks = read_stacks[1:] # Don't plot start time
legend_texts = phases[:]
legend_texts[0] = 'Hash Key Retrieval (R.)'
stacked_bar_plot_mult(output_file, [stacks], xlabel, ylabel, title,
stack_legend=Legend(legend_texts),
adjust=adjust_stacked_bar, label_step=1,
colors=colors,
hatches=hatches,
y_lim=62)
# RAM Plot
if 0 or PLOT_ALL:
output_file = output_dir + f'provider_ram{EXTENSION}'
xlabel = "Uploads [#]"
title = "Title"
d = read_ram_max(
input_dir + 'butthead_provider_uploads_ram' + '.csv', 2, 4)
convert_mib_to_gb(d)
error_plot_mult(
[d],
output_file,
100,
0,
35,
5,
xlabel,
"RAM Usage [GB]",
r"Title",
auto_ylabels=True,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Length Dependence
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_record_length"
output_dir = output_dir_provider + "rand_length/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
# Read Data
read_stacks = []
for i in indizes:
d = read_data(input_file, 3, i)
read_stacks.append(d)
for i in range(len(read_stacks) - 1, 0, -1):
data = read_stacks[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
data[matches][j] = v - read_stacks[i - 1][matches][j]
if TLS:
# OT
sent = read_data(input_file, 3, 21)
recv = read_data(input_file, 3, 23)
base = {}
for x in read_stacks[TLS_INDEX]:
base[x] = [0 for _ in range(len(read_stacks[3][x]))]
tls = compute_tls_curve(base, sent, recv)
read_stacks.insert(TLS_INDEX + 1, tls)
# -----------------------------------------------------------------------------
# Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Provider App - 500 Record Len - 100 Uploads - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks):
# if i not in [0]:
# # Ignore start and non significant stacks
# data[i] = s[500] # Only consider 500 matches
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# adjust=adjust_bar2, xlabels=phases[:])
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Record Length [#]"
title = "Provider App - 100 Uploads - Length 1-1000 - 10 Reps"
stacks = read_stacks[1:] # Don't plot start time
xticks = list(range(100, 1000, 100)) + [1000]
xticks = [i if i in xticks else '' for i in range(100, 1001, 100)]
stacked_bar_plot_mult(output_file, [stacks], xlabel, ylabel, title,
stack_legend=Legend(phases[:]),
label_step=1, colors=colors, hatches=hatches,
adjust=adjust_stacked_bar2, xlabels=xticks,
y_lim=15)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_ikv"
output_dir = output_dir_provider + "ikv/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv1 = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 4620] = old_d[m]
read_stacks_ikv1.append(d)
for i in range(len(read_stacks_ikv1) - 1, 0, -1):
data = read_stacks_ikv1[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
data[matches][j] = v - read_stacks_ikv1[i - 1][matches][j]
if TLS:
# OT
sent = read_data(input_file, x_ind, 21)
recv = read_data(input_file, x_ind, 23)
base = {}
for x in sent:
base[x] = [0 for _ in range(len(sent[x]))]
tls = compute_tls_curve(base, sent, recv)
d = {}
for m in tls:
d[m * 100 // 4620] = tls[m]
read_stacks_ikv1.insert(TLS_INDEX + 1, d)
# -----------------------------------------------------------------------------
# Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Provider App - IKV Data - Upload (Ran. Choice from 4620 " \
# "Records) - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks_ikv1):
# if i not in [0]:
# # Ignore start and non significant stacks
# data[i] = s[100] # Only consider 500 matches
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# adjust=adjust_bar2, xlabels=phases[:])
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if 1 or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploaded Records [%]"
title = "Provider App - IKV Data (4620 Records) - 10 Reps"
stacks = read_stacks_ikv1[1:] # Don't plot start time
stacked_bar_plot_mult(output_file, [stacks], xlabel, ylabel, title,
stack_legend=Legend(phases[:]),
adjust=None, label_step=1,
colors=colors,
hatches=hatches, y_lim=15)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Data 2 - Non Random
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_ikv2"
output_dir = output_dir_provider + "ikv2/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv2 = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 4620] = old_d[m]
read_stacks_ikv2.append(d)
for i in range(len(read_stacks_ikv2) - 1, 0, -1):
data = read_stacks_ikv2[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
data[matches][j] = v - read_stacks_ikv2[i - 1][matches][j]
if TLS:
# OT
sent = read_data(input_file, x_ind, 21)
recv = read_data(input_file, x_ind, 23)
base = {}
for x in sent:
base[x] = [0 for _ in range(len(sent[x]))]
tls = compute_tls_curve(base, sent, recv)
d = {}
for m in tls:
d[m * 100 // 4620] = tls[m]
read_stacks_ikv2.insert(TLS_INDEX + 1, d)
# -----------------------------------------------------------------------------
# Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Provider App - IKV Data - Upload (Seq. Choice from 4620 " \
# "Records) - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks_ikv2):
# if i not in [0]:
# # Ignore start and non significant stacks
# data[i] = s[100] # Only consider 500 matches
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# xlabels=phases[:],
# adjust=adjust_bar2)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
# xlabel = "Uploaded Records [%]"
# title = "Provider App - IKV Data (4620 Records) - 10 Reps"
#
# stacks = read_stacks_ikv2[1:] # Don't plot start time
# stacked_bar_plot_mult(output_file, [stacks], xlabel, ylabel, title,
# stack_legend=Legend(phases[:]),
# adjust=adjust_stacked_bar,
# label_step=1, colors=colors, hatches=hatches)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Both IKV
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
if 0 or PLOT_ALL:
output_file = output_dir + '../' + f'ikv_stacked{EXTENSION}'
xlabel = "Uploaded Records [%]"
title = "Provider App - IKV Data (4620 Records) - Seq. vs Ran. - " \
"" \
"10 Reps"
stacks = read_stacks_ikv2[1:] # Don't plot start time
stacked_bar_plot_mult(output_file,
[read_stacks_ikv2[1:], read_stacks_ikv1[1:]],
xlabel, ylabel, title,
stack_legend=Legend(phases[:]),
adjust=adjust_stacked_bar2,
label_step=1, colors=colors, hatches=hatches,
y_lim=14.5)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_provider_wzl"
output_dir = output_dir_provider + "wzl/"
os.makedirs(output_dir, exist_ok=True)
# Read Data
input_file = input_dir + name + '.csv'
# Read Data
read_stacks = []
for i in indizes:
old_d = read_data(input_file, x_ind, i)
d = {}
for m in old_d:
d[m * 100 // 600] = old_d[m]
read_stacks.append(d)
for i in range(len(read_stacks) - 1, 0, -1):
data = read_stacks[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
data[matches][j] = v - read_stacks[i - 1][matches][j]
if TLS:
# OT
sent = read_data(input_file, x_ind, 21)
recv = read_data(input_file, x_ind, 23)
base = {}
for x in sent:
base[x] = [0 for _ in range(len(sent[x]))]
tls = compute_tls_curve(base, sent, recv)
d = {}
for m in tls:
d[m * 100 // 600] = tls[m]
read_stacks.insert(TLS_INDEX + 1, d)
# -----------------------------------------------------------------------------
# Bar Plot
# if False or PLOT_ALL:
# output_file = output_dir + f'provider_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Provider App - WZL Data - Full Upload (600 Records) - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks):
# if i not in [0]:
# # Ignore start and non significant stacks
# data[i] = s[100] # Only consider 500 matches
# # adjust = list(adjust_bar[:])
# # adjust[0] = 0.085
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# adjust=None, xlabels=phases[:])
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'provider_stacked_bar{EXTENSION}'
xlabel = "Uploaded Records [%]"
title = "Provider App - WZL Data (600 Records) - 10 Reps"
stacks = read_stacks[1:] # Don't plot start time
stacked_bar_plot_mult(output_file, [stacks], xlabel, ylabel,
title,
stack_legend=Legend(phases[:]),
adjust=adjust_stacked_bar,
label_step=1, colors=colors, hatches=hatches,
y_lim=30)
| 16,340
| 39.649254
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/client_ram.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
from plot.plot import (OUTPUT_DIR, INPUT_DIR,
read_y_only, read_data, read_ram,
mean_confidence_interval, error_plot_mult, )
PLOT_ALL = 1
output_dir = OUTPUT_DIR + "client/"
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + "client/"
# Preserver--------------------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_bloom"
ram_file = input_dir + name + '_ram.csv'
input_file = input_dir + name + '.csv'
output_file = output_dir + 'client_ram' + '.png'
# Read Data
# Only consider last line, because each ram measurement is different
data, max_y = read_ram(ram_file, start_line=5, end_line=6)
# convert to GiB
for key in data:
data[key] = [i / (2 ** 10) for i in data[key]]
times = []
for i in range(6, 17):
times.append(read_y_only(input_file, i, start_line=5, end_line=6))
means = []
for t in times:
m, h = mean_confidence_interval(t)
means.append(m)
start_time = means[0]
xticks = []
for m in means[1:]:
xticks.append(m - start_time)
# Configuration
xlabel = "Phase"
ylabel = "RAM Usage [GiB]"
title = "RAM Client App - 1000 Matches - Rel. Offset 0.3%"
x_labels = [
'Candidates', 'Hash Key', 'PSI Prep.', 'PSI Exec.', 'PSI Final',
'Bloom Retr.', 'Matching', 'Key Retr. (OT)', 'Record Retr.', 'Decryption']
# x_labels = [
# 'Hash Key R.', 'PSI Prep.', 'PSI Exec.',
# 'Key R. (OT)', 'Record R.', 'Decryption']
print("Length of RAM measurement: ", max(data.keys()), 's')
print("Length of Time measurement: ", xticks[-1], 's')
# Fit lengths
# new_data = {}
# step = xticks[-1] / len(data.keys())
# for i, key in enumerate(sorted(data.keys())):
# new_data[(i + 1) * step] = data[key]
# data = new_data
for i in range(len(xticks[:]) - 1, -1, -1):
if xticks[i] < 0:
del xticks[i]
del x_labels[i]
xstep = 0.5
min_y = 0
max_y = 20
y_step = 5
print(xticks, x_labels)
error_plot_mult([data], output_file, xstep, min_y, max_y, y_step, xlabel, ylabel,
title, xticks=xticks, xlabels=x_labels,
ver_grid=True, x_rotation=90, auto_ylabels=True)
| 2,377
| 32.492958
| 85
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/bloom_full_cap.py
|
#!/usr/bin/env python3
"""Bloom dependence on Capacity."""
from typing import List
import numpy as np
from numpy import log as ln
# noinspection PyUnresolvedReferences
from plot.plot import (read_data, convert_to_gb, convert_to_minutes,
INPUT_DIR, OUTPUT_DIR, error_plot_mult,
EXTENSION, plot_settings, Legend)
PLOT_ALL = 0
output_dir = OUTPUT_DIR + 'bloom_capacity_dep/'
input_dir = INPUT_DIR + 'bloom_full_cap/'
name = "butthead_bloom_cap"
xticks = [i for i in range(0, 10 ** 9 + 1, 10 ** 8)]
xlabels: List[str] = [0] + [
f"{round(i / 10 ** 9, 1)} Bil" for i in
xticks[1:]]
# Adjusts
BOTTOM_ADJUST = 0.22
RIGHT_ADJUST = 0.935
TOP_ADJUST = 0.97
LEFT_ADJUST = 0.13
def comp_bloom_bits(n, p):
m = np.ceil(-n * ln(p) / (ln(2) ** 2))
return m
def bit_to_gb(b):
return b / 8 / 1000 / 1000 / 1000
# Size
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 1, 5)
d = convert_to_gb(d)
theo = {}
for x in d:
theo[x] = [bit_to_gb(comp_bloom_bits(x, 10 ** -20))] * 2
error_plot_mult(
[d, theo],
output_dir + f'size{EXTENSION}',
100000000,
0,
20,
4,
"Capacity [#]",
"Size [GB]",
r"Bloom Filter Size (FP Rate: $1^{{-20}}$, Stored: To Cap., "
r"10 Reps)",
legend=Legend(['Measured', 'Theoretic'], location="upper left"),
adjust=(LEFT_ADJUST, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks[:],
xlabels=xlabels[:],
x_label_step=2
)
# Query Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 1, 7)
# d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'query_time{EXTENSION}',
100000000,
0,
80,
20,
"Capacity [#]",
"Query Time [s]",
r"Time for Query (FP Rate: $1^{{-20}}$, Stored: To Cap., "
r"1Bil Queries, "
"10 Reps)",
adjust=(LEFT_ADJUST, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks[:],
xlabels=xlabels[:],
x_label_step=2
)
# Insertion Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 1, 6)
d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'insert_time{EXTENSION}',
100000000,
0,
400,
100,
"Capacity [#]",
"Insertion Time [min]",
r"Time for Insertion (FP Rate: $1^{{-20}}$, Stored: To Cap., "
r"10 Reps)",
adjust=(
LEFT_ADJUST + 0.02, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks[:],
xlabels=xlabels[:],
x_label_step=2
)
# Insertion Time alone
if 1 or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"butthead_bloom_cap_fixed_insert.csv", 1, 6)
d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'solo_insert_time{EXTENSION}',
100000000,
0,
100,
20,
"Capacity [#]",
"Insertion Time [min]",
r"Time for Insertion (FP Rate: $1^{{-20}}$, Stored: 100 Mio., "
r"10 Reps)",
adjust=(
LEFT_ADJUST + 0.02, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks[:],
xlabels=xlabels[:],
x_label_step=2
)
| 3,809
| 28.083969
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/psi_comm.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import math
import os
from plot.plot import (read_data, INPUT_DIR, OUTPUT_DIR,
read_data_mult, convert_to_gb, stacked_bar_plot,
stacked_bar_plot_mult)
PLOT_ALL = True
output_dir = OUTPUT_DIR + 'psi_comm/'
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + 'psi/'
to_server = 11
from_server = 13
# Setsize ---------------------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_psi_setsize"
input_file = input_dir + f"setsize/{name}2.csv"
d_to = read_data(input_file, 2, to_server)
d_from = read_data(input_file, 2, from_server)
convert_to_gb(d_to)
convert_to_gb(d_from)
minor_xlabels = [f"{int(i / 10 ** 6)} Mio" for i in sorted(d_to.keys()) if
i % 10 ** 6 == 0]
for i in [3, 5, 7, 9, 11, 13, 15, 17, 19]:
minor_xlabels[i] = ''
xticks = [i for i, x in enumerate(sorted((d_to.keys()))) if
x != 0 and x % 10 ** 6 != 0]
minor_xticks = [i for i, _ in enumerate(sorted((d_to.keys()))) if
i not in xticks]
xlabels = [fr"$2^{{ {i} }}$" for i in range(20, 26, 1)]
stacked_bar_plot(
output_dir + f'{name}.png',
[d_to, d_from],
["Receiver -> Sender", "Sender -> Receiver"],
"Set Size [#]",
"Size [GB]",
"PSI Communication Overhead (No TLS/MAL)",
xlabels=xlabels,
xticks=xticks,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
label_step=1
)
# Latency WITHOUT TLS ---------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_psi_latency"
input_file = input_dir + f"latency/{name}.csv"
data_list_to = read_data_mult(input_file, 2, to_server, 7)
data_list_from = read_data_mult(input_file, 2, from_server, 7)
for d in data_list_to.values():
convert_to_gb(d)
for d in data_list_from.values():
convert_to_gb(d)
latencies = sorted(data_list_to.keys(), reverse=True)
legend_labels = [f"{i}ms" for i in latencies]
data = []
for lat in list(data_list_to.keys()):
data.append([data_list_to[lat], data_list_from[lat]])
xlabels = [f"{i // 1000000}M" if not math.log2(
i).is_integer() else fr"$2^{{{int(math.log2(i))}}}$" for i in
sorted(list(data_list_to.values())[0])]
stacked_bar_plot_mult(
output_dir + f'{name}.png',
data,
["Receiver -> Sender", "Sender -> Receiver"],
"Set Size [#]",
"Size [GB]",
r"PSI Communication Overhead (No TLS/MAL)",
xticks=xlabels,
label_step=1,
# legend_pos=None
)
# # Bandwidth WITHOUT TLS
# -------------------------------------------------------
# if False or PLOT_ALL:
# name = "butthead_psi_bandwidth"
# data_list = read_data_mult(input_dir + f"bandwidth/{name}.csv", 2, 10, 8)
# bws = sorted(data_list.keys())[1:]
# legend_labels = [f"{round(i / 1000)} Mbit/s" for i in bws]
# bws.append(0)
# legend_labels.append('Unlimited')
# for data in data_list.values():
# convert_to_minutes(data)
# xticks = list(range(0, 10 ** 6 + 1, 10 ** 5))
# error_plot_mult(
# [data_list[i] for i in bws],
# output_dir + f'{name}.png',
# 100000,
# 0,
# 20,
# 5,
# "PSI Setsize [#]",
# "Time [min]",
# "PSI Execution Time w/ restricted Bandwidth [1:10] (No TLS/MAL)",
# legend_labels=legend_labels,
# x_label_step=2,
# legend_pos=None,
# xticks=xticks,
# x_labels=[0] + [f"{i / 10 ** 6:1} Mio" for i in xticks[1:]]
# )
# # Comparison Plot
# -------------------------------------------------------------
# if True or PLOT_ALL:
# MEASURED_TLS = True
# legend = []
# data_list = []
# fmts = ['-' for _ in range(4)]
# # Dashed line for theoretic tls + RR 17
# fmts[1], fmts[2] = '--', '--'
#
# baseline = read_data(input_dir +
# f"baseline/butthead_psi_baseline.csv", 2,
# 10)
#
# malicious = read_data(input_dir + f"malicious/butthead_psi_rr16.csv",
# 2,
# 10)
# data_list.append(malicious)
# legend.append("RR16")
#
# if MEASURED_TLS:
# tls = read_data(input_dir + f"tls/butthead_psi_tls.csv", 2,
# 10)
# data_list.append(tls)
# legend.append("KKRT16 TLS (Meas./Broken)")
# y_step = 10
# y_max = 60
# fmts.insert(1, ':')
# else:
# y_step = 10
# y_max = 60
#
# malicious = read_data(input_dir + f"malicious/butthead_psi_rr17.csv",
# 2,
# 10)
# data_list.append(malicious)
# legend.append("RR17 (Broken)")
#
# THEO_TLS = True
# if THEO_TLS:
# # Read sent Bytes
# sent = read_data(
# input_dir + f"baseline/butthead_psi_baseline.csv", 2, 11)
# # Read received Bytes
# received = read_data(
# input_dir + f"baseline/butthead_psi_baseline.csv", 2, 13)
# # Add overhead
# tls_theoretic = compute_tls_curve(baseline, sent, received)
# data_list.append(tls_theoretic)
# legend.append("KKRT16 TLS (Theo.)")
#
# data_list.append(baseline)
# legend.append("KKRT16")
#
# error_plot_mult(
# data_list,
# output_dir + f'psi_compare.png',
# 100000,
# 0,
# y_max,
# y_step,
# "PSI Set Size [#]",
# "Time [s]",
# "PSI Execution Time (10 Reps)",
# legend_labels=legend,
# x_label_step=2,
# legend_pos=None,
# x_labels=[f"{round(i):,}" for i in range(100000, 1000001, 100000)],
# xticks=range(100000, 1000001, 100000),
# fmts=fmts
# )
| 5,945
| 32.784091
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/psi_time.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
# noinspection PyUnresolvedReferences
from plot.colors import green, orange, blue
from plot.plot import (read_data, INPUT_DIR, OUTPUT_DIR,
read_data_mult, error_plot_mult, convert_to_minutes,
EXTENSION, plot_settings, Legend, read_ram_max,
convert_to_gb, convert_mib_to_gb)
# noinspection PyUnresolvedReferences
from plot.tls import compute_tls_curve
PLOT_ALL = 0
output_dir = OUTPUT_DIR + 'psi/'
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + 'psi/'
# Setsize ---------------------------------------------------------------------
# if False or PLOT_ALL:
# name = "butthead_psi_setsize"
# d = read_data(input_dir + f"setsize/{name}2.csv", 2, 10)
# minor_xticks = list(range(10 ** 6, 2 * 10 ** 7 + 1, 10 ** 6))
# xticks = [2 ** i for i in range(20, 25)]
# minor_xlabels = [f"" for i in minor_xticks]
# minor_xlabels[5] = "6 Mio"
# minor_xlabels[9] = "10 Mio"
# minor_xlabels[14] = "15 Mio"
# minor_xlabels[-1] = "20 Mio"
# xlabels = [rf"$2^{{{i}}}$" for i in range(20, 25)]
# error_plot_mult(
# [d],
# output_dir + f'{name}{EXTENSION}',
# 1,
# 0,
# 80,
# 10,
# "PSI Set Size [#]",
# "Time [s]",
# "PSI Execution Time (No TLS/MAL)",
# adjust=None, # (0.09, 0.96, 0.98, 0.18),
# xlabels=xlabels,
# xticks=xticks,
# minor_xticks=minor_xticks,
# minor_xlabels=minor_xlabels,
# # x_rotation=30
# )
# Latency WITHOUT TLS ---------------------------------------------------------
if False or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_psi_latency"
data_list = read_data_mult(input_dir + f"latency/{name}.csv", 2, 10, 7)
for d in data_list.values():
# Remove 2er potencies
del d[2 ** 20]
del d[2 ** 21]
del d[2 ** 22]
del d[2 ** 23]
del d[2 ** 24]
convert_to_minutes(d)
print("Runtime at 300ms: ",
sum(data_list[300][10 ** 7]) / len(data_list[300][10 ** 7]),
"min")
latencies = sorted(data_list.keys(), reverse=True)
legend_labels = [f"{i}ms" for i in latencies]
legend_labels[-2] = " 50ms"
legend_labels[-1] = " 0ms"
xticks = list(range(0, 10 ** 7 + 1, 2 * 10 ** 6))
error_plot_mult(
[data_list[i] for i in latencies],
output_dir + f'{name}{EXTENSION}',
2 * 10 ** 6,
0,
5,
1,
"PSI Set Size [#]",
"Time [min]",
"PSI Execution Time w/ Latency(No TLS/MAL)",
adjust=(0.11, 0.935, 0.76, 0.21),
# for 2/3 Height (0.19, 0.93, 0.98, 0.16),
legend=Legend(legend_labels, location='above'),
x_label_step=1,
y_lim=5,
xticks=xticks,
xlabels=[0] + [f"{i // 10 ** 6} Mio" for i in xticks[1:]]
)
# Bandwidth WITHOUT TLS -------------------------------------------------------
if False or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_psi_bandwidth"
data_list = read_data_mult(
input_dir + f"bandwidth/{name}.csv", 2, 10, 8)
bws = sorted(data_list.keys())[1:]
bws.append(0)
legend_labels = [
r" 6Mbit/s",
r" 50Mbit/s",
r"100Mbit/s",
"Unlimited"
]
for data in data_list.values():
convert_to_minutes(data)
print("Runtime at 6Mibt/s: ",
sum(data_list[6000][10**6]) / len(data_list[6000][10**6]), "min")
xticks = list(range(0, 10 ** 6 + 1, 10 ** 5))
error_plot_mult(
[data_list[i] for i in bws],
output_dir + f'{name}{EXTENSION}',
100000,
0,
5,
1,
"PSI Set Size [#]",
"Time [min]",
"PSI Execution Time w/ restricted Bandwidth [1:10] (No TLS/MAL)",
adjust=(0.11, 0.93, 0.76, 0.21),
# for 2/3 height (0.15, 0.93, 0.98, 0.16),
legend=Legend(legend_labels, location='above', ncols=2),
x_label_step=2,
xticks=xticks,
xlabels=[0] + [f"{i / 10 ** 6:1} Mio" for i in xticks[1:]],
y_lim=5
)
# Comparison Plot -------------------------------------------------------------
if 0 or PLOT_ALL:
MEASURED_TLS = True
legend = []
data_list = []
fmts = ['-' for _ in range(4)]
# Dotted line for RR 17
fmts[1] = ':'
# Dashed for TLS theo
fmts[2] = '--'
baseline = read_data(input_dir + f"baseline/butthead_psi_baseline.csv", 2,
10)
malicious = read_data(input_dir + f"malicious/butthead_psi_rr16.csv",
2,
10)
data_list.append(malicious)
legend.append("RR16")
if MEASURED_TLS:
tls = read_data(input_dir + f"tls/butthead_psi_tls.csv", 2,
10)
data_list.append(tls)
legend.append("KKRT16 TLS (Meas.)")
y_step = 10
y_max = 60
fmts.insert(1, '-')
else:
y_step = 10
y_max = 60
malicious = read_data(input_dir + f"malicious/butthead_psi_rr17.csv",
2,
10)
data_list.append(malicious)
legend.append("RR17 (Broken)")
THEO_TLS = True
if THEO_TLS:
# Read sent Bytes
sent = read_data(
input_dir + f"baseline/butthead_psi_baseline.csv", 2, 11)
# Read received Bytes
received = read_data(
input_dir + f"baseline/butthead_psi_baseline.csv", 2, 13)
# Add overhead
tls_theoretic = compute_tls_curve(baseline, sent, received)
data_list.append(tls_theoretic)
legend.append("KKRT16 TLS (Theo.)")
data_list.append(baseline)
legend.append("KKRT16")
error_plot_mult(
data_list,
output_dir + f'psi_compare{EXTENSION}',
100000,
0,
y_max,
y_step,
"PSI Set Size [#]",
"Time [s]",
"PSI Execution Time (10 Reps)",
adjust=None, # (0.09, 0.97, 0.98, 0.16),
legend=Legend(legend),
x_label_step=2,
xlabels=[f"{round(i):,}" for i in range(100000, 1000001, 100000)],
xticks=list(range(100000, 1000001, 100000)),
fmts=fmts,
auto_ylabels=True,
)
# # Set Size ---------------------------------------------------------------------
# if False or PLOT_ALL:
# name = "butthead_psi_setsize_ram"
# dc = read_ram_max(
# input_dir + f"setsize/butthead_psi_setsize2_clientram.csv", 2, 9)
# ds = read_ram_max(
# input_dir + f"setsize/butthead_psi_setsize2_serverram.csv", 2, 9)
# convert_mib_to_gb(dc)
# convert_mib_to_gb(ds)
# minor_xticks = list(range(10 ** 6, 2 * 10 ** 7 + 1, 10 ** 6))
# xticks = [2 ** i for i in range(20, 25)]
# minor_xlabels = [f"" for i in minor_xticks]
# minor_xlabels[5] = "6 Mio"
# minor_xlabels[9] = "10 Mio"
# minor_xlabels[14] = "15 Mio"
# minor_xlabels[-1] = "20 Mio"
# xlabels = [rf"$2^{{{i}}}$" for i in range(20, 25)]
# error_plot_mult(
# [dc, ds],
# output_dir + f'{name}{EXTENSION}',
# 1,
# 0,
# 80,
# 10,
# "PSI Set Size [#]",
# "RAM Usage [GB]",
# "PSI Execution Time (No TLS/MAL)",
# adjust=None, # (0.09, 0.96, 0.98, 0.18),
# xlabels=xlabels,
# xticks=xticks,
# minor_xticks=minor_xticks,
# minor_xlabels=minor_xlabels,
# legend=Legend(['Client', 'Server'], 'top'),
# auto_ylabels=True
# # x_rotation=30
# )
# Setsize RAM + Normal---------------------------------------------------------
if 0 or PLOT_ALL:
name = "butthead_psi_setsize"
d = read_data(input_dir + f"setsize/{name}2.csv", 2, 10)
dc = read_ram_max(
input_dir + f"setsize/butthead_psi_setsize2_clientram.csv", 2, 9)
ds = read_ram_max(
input_dir + f"setsize/butthead_psi_setsize2_serverram.csv", 2, 9)
convert_mib_to_gb(dc)
convert_mib_to_gb(ds)
print("Runtime at 2^20: ",
sum(d[2 ** 20]) / len(d[2 ** 20]), "s")
print("Server RAM usage at 2^20: ",
sum(ds[2 ** 20]) / len(ds[2 ** 20]), "GB")
print("Runtime at 20Mio: ",
sum(d[2 * 10 ** 7]) / len(d[2 * 10 ** 7]), "s")
print("Server RAM usage at 20Mio: ",
sum(ds[2 * 10 ** 7]) / len(ds[2 * 10 ** 7]), "GB")
minor_xticks = list(range(10 ** 6, 2 * 10 ** 7 + 1, 10 ** 6))
xticks = [2 ** i for i in range(20, 25)]
minor_xlabels = [f"" for i in minor_xticks]
minor_xlabels[5] = "6 Mio"
minor_xlabels[9] = "10 Mio"
minor_xlabels[14] = "15 Mio"
minor_xlabels[-1] = "20 Mio"
xlabels = [rf"$2^{{{i}}}$" for i in range(20, 25)]
error_plot_mult(
[d, dc, ds],
output_dir + f'psi_setsize{EXTENSION}',
1,
0,
80,
10,
"PSI Set Size [#]",
"Time [s]",
"PSI Execution Time (No TLS/MAL)",
adjust=None, # (0.09, 0.96, 0.98, 0.18),
xlabels=xlabels,
xticks=xticks,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
legend=Legend(['Runtime', 'Client', 'Server'], 'top'),
auto_ylabels=True,
second_y_axis=[0, 1, 1],
second_y_label="RAM Usage [GB]",
second_ylim=15,
y_lim=80,
colors=[blue, orange, green],
second_y_lim_bottom=0
# x_rotation=30
)
| 9,773
| 33.415493
| 82
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/all.py
|
#!/usr/bin/env python3
"""Execute all plot scripts."""
# import client_ram
# noinspection PyUnresolvedReferences
import bloom_full_cap
# noinspection PyUnresolvedReferences
import bloom_full_err
# noinspection PyUnresolvedReferences
import bloom_full_query
# noinspection PyUnresolvedReferences
import client_time
# noinspection PyUnresolvedReferences
import metric
# import provider_ram
# noinspection PyUnresolvedReferences
import ot_time
# noinspection PyUnresolvedReferences
import provider_time
# noinspection PyUnresolvedReferences
import psi_time
| 555
| 24.272727
| 37
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/client_comm.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import copy
import os
from plot.colors import *
from plot.plot import (bar_plot, OUTPUT_DIR, INPUT_DIR, stacked_bar_plot,
read_data, join_stack_data, bar_plot_mult,
stacked_bar_plot_two_y,
convert_to_gb, convert_to_mb)
PLOT_ALL = 1
EXTENSION = '.png'
output_dir_client = OUTPUT_DIR + "client_comm/"
os.makedirs(output_dir_client, exist_ok=True)
input_dir = INPUT_DIR + "client/"
values = {
17: "From KS",
19: "ToKS",
21: "FromSS",
23: "ToSS",
25: "To OT",
27: "From OT",
29: "To PSI",
31: "From PSI"
}
indizes = sorted(values.keys())
labels = [values[i] for i in indizes]
indizes_wo_psi = [i for i in sorted(values.keys())[:-2]]
labels_wo_psi = [values[i] for i in indizes_wo_psi]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random PSI & Bloom
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_psi_vs_bloom"
output_dir = output_dir_client + 'psi_vs_bloom/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
# Read Data
read_stacks = []
for i in indizes:
d = read_data(input_file, 4, i)
d = convert_to_gb(d)
read_stacks.append(d)
# -----------------------------------------------------------------------------
# Bar Plot
# --------------------------
# ---------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL:
output_file = output_dir + f'joined_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Full Client App - 500 Matches - Rel. Offset 0.3% - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
data[i] = s[500] # Only consider 500 matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot - Relative Offset 2 - Butthead - Bloom
if 0 or PLOT_ALL:
output_file = output_dir + f'client_stacked_bloom_butthead{EXTENSION}'
xlabel = "Matches [#]"
ylabel = "Size [GB]"
title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
stacks = read_stacks[:-2]
stacked_bar_plot(output_file, stacks, labels_wo_psi[:], xlabel,
ylabel, title, label_step=1)
# -----------------------------------------------------------------------------
# Both
# -----------------------------------------------------------------------------
if 0 or PLOT_ALL:
output_file = output_dir + f'client_stacked_butthead{EXTENSION}'
xlabel = "Matches [#]"
ylabel = "Size [GB]"
title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
stacks = read_stacks[:]
stacked_bar_plot(output_file, stacks, labels[:], xlabel, ylabel,
title, label_step=1)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Client 1
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_wzl1"
num_matches = 10
output_dir = output_dir_client + 'wzl/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_wzl1 = []
for i in indizes:
d = read_data(input_file, 5, i)
d = convert_to_mb(d)
read_stacks_wzl1.append(d)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Client 2
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_wzl2"
num_matches = 6
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_wzl2 = []
for i in indizes:
d = read_data(input_file, 5, i)
d = convert_to_mb(d)
read_stacks_wzl2.append(d)
# -----------------------------------------------------------------------------
# Same for Joined
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL:
output_file = output_dir + f'wzl_joined_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [MB]"
title = "Client App - WZL Data - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_wzl1):
data1[i] = s[10] # Only consider 500 matches
data2 = {}
for i, s in enumerate(read_stacks_wzl2):
data2[i] = s[6] # Only consider 500 matches
bar_plot_mult(output_file, [data1, data2], xlabel, ylabel, title,
['WZL1', 'WZL2'],
xticks=labels[:],
small_xticks=True,
)
# -----------------------------------------------------------------------------
# Both Both
# -----------------------------------------------------------------------------
if 0 or PLOT_ALL:
output_file = output_dir + f'client_wzl{EXTENSION}'
xlabel = "Metric"
ylabel = "Size [MB]"
title = "Full Client App - WZL Data - 10 Reps"
stacks = join_stack_data([read_stacks_wzl1, read_stacks_wzl2],
[['WZL1'], ['WZL2']])
stacked_bar_plot(output_file, stacks, labels[:],
xlabel, ylabel, title,
label_step=1)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Client 1 - Rounding: 2, rel. Offset 2%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_ikv1"
num_matches = 77
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv1 = []
for i in indizes_wo_psi:
d = read_data(input_file, 5, i)
d = convert_to_mb(d)
read_stacks_ikv1.append(d)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Client 2 - Rounding: 2, rel. Offset 2.5%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_ikv2"
num_matches = 77
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv2 = []
for i in indizes_wo_psi:
d = read_data(input_file, 5, i)
d = convert_to_mb(d)
read_stacks_ikv2.append(d)
# -----------------------------------------------------------------------------
# Both Both
# -----------------------------------------------------------------------------
if 1 or PLOT_ALL:
output_dir = output_dir_client + 'ikv/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'client_ikv{EXTENSION}'
xlabel = "Metric"
ylabel = "Size [MB]"
title = "Client App - IKV Data - rel. Offset - 10 Reps"
stacks1 = copy.deepcopy(read_stacks_ikv1)
stacks2 = copy.deepcopy(read_stacks_ikv2)
stacked_bar_plot_two_y(output_file, [stacks1, stacks2], labels_wo_psi[:],
xlabel, ylabel, title,
label_step=1, )
# -----------------------------------------------------------------------------
# Same for Joined
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL:
output_dir = output_dir_client + 'ikv/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'ikv_joined_bar{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [MB]"
title = "Client App - IKV Data - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_ikv1):
data1[i] = s[77]
data2 = {}
for i, s in enumerate(read_stacks_ikv2):
data2[i] = s[77]
bar_plot_mult(output_file, [data1, data2], xlabel, ylabel, title,
legend=['IKV1', 'IKV2'],
xticks=labels_wo_psi[:],
small_xticks=True,
second_y_axis=False)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Client 2 - Rounding: 3, ID Len 10, rel. Offset 0.5%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_bloom"
num_matches = 500
output_dir = output_dir_client + 'bloom/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
# Read Data
read_stacks = []
for i in indizes_wo_psi:
d = read_data(input_file, 5, i)
d = convert_to_gb(d)
read_stacks.append(d)
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Bloom
if False or PLOT_ALL:
output_file = output_dir + f'client_bar_bloom{EXTENSION}'
xlabel = "Phase"
ylabel = "Size [GB]"
title = "Full Client App - 500 Matches - Rel. Offset 0.5% - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
data[i] = s[num_matches] # Only consider one number of matches
bar_plot(output_file, data, xlabel, ylabel, title, xticks=labels_wo_psi[:],
small_xticks=True)
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'client_stacked_bloom{EXTENSION}'
xlabel = "Results [#]"
ylabel = "Size [GB]"
title = "Full Client App - ID Len. 10 - Rel. Offset 0.5% - 10 Reps"
stacks = copy.deepcopy(read_stacks)
stacked_bar_plot(output_file, stacks, labels_wo_psi[:], xlabel, ylabel,
title,
label_step=1)
| 10,321
| 39.960317
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/bloom_full_query.py
|
#!/usr/bin/env python3
"""Bloom dependence on Query Size."""
# noinspection PyUnresolvedReferences
from typing import List
from plot.plot import (read_data, convert_to_gb, convert_to_minutes,
INPUT_DIR, OUTPUT_DIR, error_plot_mult,
EXTENSION, plot_settings, Legend)
PLOT_ALL = 1
output_dir = OUTPUT_DIR + 'bloom_query_dep/'
input_dir = INPUT_DIR + 'bloom_full_query/'
name = "butthead_bloom_query"
xticks = [i for i in range(0, 10 ** 9 + 1, 10 ** 8)]
xlabels: List[str] = [0] + [
f"{round(i / 10 ** 9, 1)} Bil" for i in
xticks[1:]]
BOTTOM_ADJUST = 0.22
RIGHT_ADJUST = 0.935
TOP_ADJUST = 0.97
LEFT_ADJUST = 0.11
# Query Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 4, 7)
d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'query_time{EXTENSION}',
100000000,
0,
350,
50,
"Queries [#]",
"Query Time [min]",
r"Time for Query (FP Rate: $1^{{-20}}$, Stored: To Cap., "
r"1Bil Queries, "
"10 Reps)",
x_label_step=2,
auto_ylabels=True,
adjust=(LEFT_ADJUST, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks,
xlabels=xlabels
)
| 1,381
| 28.404255
| 74
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/client_time.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import copy
import os
from typing import List, Union
from matplotlib import patches
# noinspection PyUnresolvedReferences
from plot.colors import (maroon, orange, yellow, green, purple, pink, blue,
lightblue)
# noinspection PyUnresolvedReferences
from plot.plot import (OUTPUT_DIR, INPUT_DIR,
read_data, error_plot_mult, stacked_bar_plot_mult,
join_stack_data,
EXTENSION,
Legend, BarText, font_size, plot_settings, figure_width,
cm, read_ram_max, convert_mib_to_gb)
# noinspection PyUnresolvedReferences
from plot.tls import compute_tls_curve
PLOT_ALL = 0
TLS = 1
IKV1 = "IM-2%"
IKV2 = "IM-2.5%"
IKV3 = "IM-3%"
WZL1 = "MT-Material"
WZL2 = "MT-Diameter"
output_dir_client = OUTPUT_DIR + "client/"
os.makedirs(output_dir_client, exist_ok=True)
input_dir = INPUT_DIR + "client/"
psi_colors = [maroon, orange, yellow, green,
purple, pink]
bloom_colors = [maroon, blue, lightblue, green, purple, pink]
both_colors = [maroon, orange, yellow, blue, lightblue,
green, purple, pink]
hatches: List[Union[str, None]] = [None for _ in range(len(both_colors) + 2)]
psi_hatches = hatches[:]
bloom_hatches = hatches[:]
x_index = 5 # Results
indices = [6, 8, 9, 11, 12, 13, 14, 15, 16]
PSI_TLS_IND = 3
OT_TLS_BOTH = 7
OT_TLS_PSI = 5
OT_TLS_BLOOM = 4
TLS_HATCH = '////////'
hatch_rec = patches.Patch(facecolor="white", hatch=TLS_HATCH)
if TLS:
ot_tls_color = 'forestgreen'
psi_tls_color = 'lightyellow'
psi_colors.insert(PSI_TLS_IND, psi_tls_color)
both_colors.insert(PSI_TLS_IND, psi_tls_color)
psi_colors.insert(OT_TLS_PSI - 1, ot_tls_color)
bloom_colors.insert(OT_TLS_BLOOM - 1, ot_tls_color)
both_colors.insert(OT_TLS_BOTH - 1, ot_tls_color)
psi_hatches[PSI_TLS_IND] = TLS_HATCH
psi_hatches[OT_TLS_PSI] = TLS_HATCH
bloom_hatches[OT_TLS_BLOOM] = TLS_HATCH
hatches[PSI_TLS_IND] = TLS_HATCH
hatches[OT_TLS_BOTH] = TLS_HATCH
# -----------------------------------------------------------------------------
bloom_phases = [
'Hash Key R.', 'Bloom R.', 'Matching',
'Key R. (OT)', 'Record R.', 'Decryption']
psi_phases = [
'Hash Key R.', 'PSI Prep.', 'PSI Exec.',
'Key R. (OT)', 'Record R.', 'Decryption']
both_phases = [
'Hash Key R.', 'PSI Prep.', 'PSI Exec.',
'Bloom R.', 'Matching', 'Key R. (OT)',
'Record R.', 'Decryption'
]
both_phases_no_tls = [
'Hash Key R.', 'PSI Prep.', 'PSI Exec.',
'Bloom R.', 'Matching', 'Key R. (OT)',
'Record R.', 'Decryption'
]
bloom_phases_no_tls = [
'Hash Key R.', 'Bloom R.', 'Matching',
'Key R. (OT)', 'Record R.', 'Decryption']
if TLS:
bloom_phases.insert(OT_TLS_BLOOM, 'OT TLS')
psi_phases.insert(PSI_TLS_IND, 'PSI TLS')
psi_phases.insert(OT_TLS_PSI, 'OT TLS')
both_phases.insert(PSI_TLS_IND, 'PSI TLS')
both_phases.insert(OT_TLS_BOTH, 'OT TLS')
ylabel = "Time [s]"
adjust_bar = None # (0.095, 0.99, 0.99, 0.16)
adjust_bar2 = None # (0.085, 0.99, 0.99, 0.16)
adjust_stacked_bar = None # (0.085, 0.99, 0.92, 0.16)
adjust_stacked_bar2 = None # (0.095, 0.99, 0.92, 0.16)
textboxes = [{'s': 'Standard BPE', 'y': 0.85, 'i': 0},
{'s': 'PSI-based PPE', 'y': 0.85, 'i': 1},
]
backgrounds = [(0.5, 2.5, 'gray'), (2.5, 4.5, 'lightgray')]
joined_order = [0, 3, 4, 1, 2, 5, 6, 7]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def remove_stacks(stack_lst: list, indexes: List[int]) -> list:
"""Remove all defined indices."""
for j in sorted(indexes, reverse=True):
# Start with largest value
del stack_lst[j]
return stack_lst
def remove_psi_stack(stack_lst: list) -> list:
"""Remove PSI Stacks and start time."""
indexes = [3, 2, 0] if not TLS else [4, 3, 2, 0]
return remove_stacks(stack_lst, indexes)
def remove_bloom_stacks(stack_lst: list) -> list:
""""Remove Bloom Stacks and start time."""
indexes = [5, 4, 0] if not TLS else [6, 5, 0]
return remove_stacks(stack_lst, indexes)
def replace_psi_stacks(stack_lst: list) -> list:
"""Replace by 0 instead of remove"""
ind = [2, 3]
if TLS:
ind.append(4)
for index in ind:
for k in stack_lst[index]:
stack_lst[index][k] = [0, 0]
del stack_lst[0]
return stack_lst
def replace_bloom_stacks(stack_lst: list) -> list:
"""Replace by 0 instead of remove"""
if TLS:
ind = [5, 6]
else:
ind = [4, 5]
for index in ind:
for k in stack_lst[index]:
stack_lst[index][k] = [0, 0]
del stack_lst[0]
return stack_lst
def add_tls_overhead(filename: str, stack_lst: List[dict],
x_ind: int = x_index):
"""Add the theoretic TLS overhead to correct pos. in lsit"""
OT_SENT_COL = 25
OT_RECV_COL = 27
PSI_SENT_COL = 29
PSI_RECV_COL = 31
# Add PSI first
sent = read_data(filename, x_ind, PSI_SENT_COL)
recv = read_data(filename, x_ind, PSI_RECV_COL)
base = {}
for e in stack_lst[0]:
base[e] = [0 for _ in range(len(stack_lst[0][e]))]
tls = compute_tls_curve(base, sent, recv)
stack_lst.insert(PSI_TLS_IND + 1, tls) # +1 for start time
# OT
sent = read_data(filename, x_ind, OT_SENT_COL)
recv = read_data(filename, x_ind, OT_RECV_COL)
base = {}
for e in stack_lst[0]:
base[e] = [0 for _ in range(len(stack_lst[0][e]))]
tls = compute_tls_curve(base, sent, recv)
stack_lst.insert(OT_TLS_BOTH + 1, tls) # +1 for start time
# noinspection PyShadowingNames
def substract_prev(stack_lst: List[dict]) -> List[dict]:
"""Substraact the previous time except it is 0, then go further back.
A time might be zeor if it was not measured, e.g., PSI.
"""
for i in range(len(stack_lst) - 1, 0, -1):
data = stack_lst[i]
for matches in data.keys():
for j, v in enumerate(data[matches]):
# Subtract prev. time
k = i - 1
while stack_lst[k][matches][j] == 0:
k -= 1
data[matches][j] = v - stack_lst[k][matches][j]
return stack_lst
# noinspection PyShadowingNames
def read_stacks_from_file(input_file: str) -> List[dict]:
"""Read values from file"""
stacks = []
for i in indices:
d = read_data(input_file, x_index, i)
stacks.append(d)
return stacks
def get_stacks(filename: str) -> List[dict]:
"""Get the readily formated files"""
stack_lst = substract_prev(read_stacks_from_file(filename))
if TLS:
add_tls_overhead(filename, stack_lst)
return stack_lst
def get_tls_on_top(d1: dict) -> List[dict]:
"""Put TLS on top.
Indices of d1: 1 - 10 (0 has been removed as start time)
--> PSI TLS = 6
OT TLS = 8
"""
lower_layer = {
j: d1[index] for j, index in enumerate([1, 2, 3, 5, 6, 7, 9, 10])}
upper_layer = {index: [0, 0] for index in range(8)}
upper_layer[2] = d1[4]
upper_layer[5] = d1[8]
return [lower_layer, upper_layer]
def get_tls_on_top_no_psi(d1: dict) -> List[dict]:
"""Put TLS on top.
Indices of d1: 1, 5, 6,7,8,9, 10 (0 has been removed as start time)
-->
OT TLS = 8
"""
lower_layer = {
j: d1[index] for j, index in enumerate([1, 5, 6, 7, 9, 10])}
upper_layer = {index: [0, 0] for index in range(6)}
upper_layer[3] = d1[8]
return [lower_layer, upper_layer]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random PSI & Bloom
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_psi_vs_bloom2"
output_dir = output_dir_client + 'psi_vs_bloom/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
read_stacks = get_stacks(input_file)
# -----------------------------------------------------------------------------
# Bar Plot
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL:
output_file = output_dir + f'joined_bar{EXTENSION}'
xlabel = "Phase"
title = "Full Client App - 500 Matches - Rel. Offset 0.3% - 10 Reps"
data = {}
for i, s in enumerate(read_stacks):
if i != 0:
data[i] = s[500] # Only consider 500 matches
data = get_tls_on_top(data)
legend = Legend(['Random-0.3%'], location=Legend.TOP,
custom_labels=[(hatch_rec, "TLS")])
tbs = copy.deepcopy(textboxes)
stacked_bar_plot_mult(output_file, [data], xlabel, ylabel, title,
bar_legend=legend,
adjust=adjust_bar, # small_xticks=True,
xlabels=both_phases_no_tls[:],
backgrounds=backgrounds,
text_boxes=tbs,
order=joined_order,
hatches=[None, TLS_HATCH],
colors_depend_on_bar=True,
colors=[blue, orange],
bar_texts=[
BarText(0, 0, 2, 0, r'|S|≈0.3 Mio.', 'center'),
BarText(0, 0, 3.5, 0, r'|S|≈0.3 Mio.',
'center')],
y_lim=60
)
# -----------------------------------------------------------------------------
# Stacked Bar Plot - Relative Offset 2 - Butthead - Bloom
# if 0 or PLOT_ALL:
# output_file = output_dir + f'client_stacked_bloom_butthead{EXTENSION}'
# xlabel = "Matches [#]"
# title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
# bloom_stacks = remove_psi_stack(copy.deepcopy(read_stacks))
# stacked_bar_plot_mult(output_file, [bloom_stacks], xlabel,
# ylabel,
# title,
# stack_legend=Legend(bloom_phases[:],
# empty_positions=[1]),
# label_step=1, colors=bloom_colors,
# hatches=bloom_hatches,
# adjust=adjust_stacked_bar,
# # bar_te Text(0, 1, 'all', 0, r'|S|≈0.3 Mio.',
# 'in')]
# textboxes=[dict(x=0.08, y=0.7, s=r'|S|≈0.3 Mio.',
# transform='transAxes', ha='center',
# va='center', fontsize=font_size)]
# )
# -----------------------------------------------------------------------------
# Same for PSI
# -----------------------------------------------------------------------------
# Stacked Bar Plot - Relative Offset 2 - Butthead - PSI
# if 0 or PLOT_ALL:
# output_file = output_dir + f'client_stacked_psi_butthead{EXTENSION}'
# xlabel = "Matches [#]"
# title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
# psi_stacks = remove_bloom_stacks(copy.deepcopy(read_stacks))
# stacked_bar_plot_mult(output_file, [psi_stacks], xlabel,
# ylabel,
# title,
# stack_legend=Legend(psi_phases[:]),
# label_step=1, colors=psi_colors,
# hatches=psi_hatches,
# adjust=adjust_stacked_bar2,
# # bar_texts=[
# # BarText(0, 1, 'all', 0, r'|S|≈0.3 Mio.',
# 'in')]
# textboxes=[dict(x=0.08, y=0.9, s=r'|S|≈0.3 Mio.',
# transform='transAxes', ha='center',
# va='center', fontsize=font_size)],
# y_lim=800
# )
# -----------------------------------------------------------------------------
# Both -> Replaced by divider.py
# -----------------------------------------------------------------------------
if 0 or PLOT_ALL:
output_file = output_dir + f'client_stacked_butthead{EXTENSION}'
xlabel = "Matches [#]"
title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
bloom_stacks = replace_psi_stacks(copy.deepcopy(read_stacks))
psi_stacks = replace_bloom_stacks(copy.deepcopy(read_stacks))
# stacked_bar_plot_two_y(output_file,
# [bloom_stacks, psi_stacks], both_phases[:],
# xlabel, ylabel, title,
# label_step=1, colors=both_colors, hatches=hatches,
# xticks=list(range(0, 1001, 100)),
# adjust=(0.085, 0.9, 0.92, 0.16))
if 0 or PLOT_ALL:
with plot_settings():
output_file = output_dir + f'client_stacked_butthead{EXTENSION}'
xlabel = "Matches [#]"
title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
bloom_stacks = replace_psi_stacks(copy.deepcopy(read_stacks))
psi_stacks = replace_bloom_stacks(copy.deepcopy(read_stacks))
# Shuffle order
labels = both_phases[:]
order = [0, 6, 7, 8, 9, 4, 5, 1, 2, 3]
bloom_stacks = [bloom_stacks[i] for i in order]
psi_stacks = [psi_stacks[i] for i in order]
labels = [labels[i] for i in order]
colors = [both_colors[i] for i in order]
shatches = [hatches[i] for i in order]
legend_order = [0, 7, 8, 9, 5, 6, 1, 2, 3, 4]
stacked_bar_plot_mult(output_file,
[bloom_stacks, psi_stacks],
xlabel,
ylabel,
title,
stack_legend=Legend(labels, order=legend_order),
label_step=1,
colors=colors,
hatches=shatches,
xlabels=list(range(0, 1001, 100)),
y_lim=130,
# divide_y=True,
# ymin=0,
# ymax=90,
# ymin2=585,
# ymax2=690,
adjust=None, # (0.09, 0.97, 0.92, 0.16),
textboxes=[dict(x=0.1, y=0.72, s=r'|S|≈0.3 Mio.',
transform='transAxes',
ha='center',
va='center', fontsize=font_size)]
)
# -----------------------------------------------------------------------------
# Execution Time - Butthead
# if False or PLOT_ALL:
# input_file = input_dir + name + '.csv'
# output_file = output_dir + f'client_line_butthead{EXTENSION}'
# xlabel = "Matches [#]"
# title = "Full Client App - 0-1000 Matches - Rel. Offset 0.3% - 10 Reps"
# start = read_data(input_file, 4, 6)
# psi_start = read_data(input_file, 4, 8)
# bloom_start = read_data(input_file, 4, 11)
# bloom_end = read_data(input_file, 4, 11)
# end = read_data(input_file, 4, 16)
# bloom = {}
# psi = {}
# for x in end:
# bloom[x] = []
# psi[x] = []
# for i, _ in enumerate(end[x]):
# bt = end[x][i] - bloom_start[x][i] + \
# (psi_start[x][i] - start[x][i])
# bloom[x].append(bt)
# pt = (end[x][i] - bloom_end[x][i] +
# (bloom_start[x][i] - start[x][i]))
# psi[x].append(pt)
# error_plot_mult(
# [bloom, psi],
# output_file,
# 100,
# 0,
# 700,
# 100,
# xlabel,
# ylabel,
# title,
# ['Bloom', 'PSI'],
# x_label_step=1,
# xticks=sorted(end.keys()),
# x_labels=sorted(end.keys()),
# legend_pos=None
#
# )
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Client 1
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_wzl1"
output_dir = output_dir_client + 'wzl/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
read_stacks_wzl1 = get_stacks(input_file)
# -----------------------------------------------------------------------------
# Same for Joined
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
# if 0:
# output_file = output_dir + f'joined_bar{EXTENSION}'
# xlabel = "Phase"
# title = "Client App - WZL Data - Match all Materials - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks_wzl1):
# if i != 0:
# data[i] = s[10] # Only consider 500 matches
# bar_plot_mult(output_file, data, xlabel, ylabel, title,
# xticks=both_phases[:],
# small_xticks=True)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# WZL Client 2
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_wzl2"
input_file = input_dir + name + '.csv'
read_stacks_wzl2 = get_stacks(input_file)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Same for Joined
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL and TLS:
with plot_settings():
output_file = output_dir + f'wzl_joined_bar{EXTENSION}'
xlabel = "Phase"
title = "Client App - WZL Data - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_wzl1):
if i != 0:
data1[i] = s[10] # Only consider 500 matches
data2 = {}
for i, s in enumerate(read_stacks_wzl2):
if i != 0:
data2[i] = s[6] # Only consider 500 matches
data1 = get_tls_on_top(data1)
data2 = get_tls_on_top(data2)
legend = Legend([WZL1, WZL2], location=Legend.TOP,
custom_labels=[(hatch_rec, "TLS")])
tbs = copy.deepcopy(textboxes)
for t in tbs:
t['y'] = 0.83
stacked_bar_plot_mult(output_file, [data1, data2], xlabel, ylabel,
title,
xlabels=both_phases_no_tls[:],
adjust=adjust_bar2, # small_xticks=True,
backgrounds=backgrounds,
text_boxes=tbs,
order=joined_order,
hatches=[None, TLS_HATCH],
colors_depend_on_bar=True,
colors=[blue, orange],
bar_legend=legend,
bar_texts=[
BarText(0, 0, 2, 0, '|S|=11', 'center'),
BarText(0, 0, 4, 0, '|S|=11', 'center'),
BarText(1, 0, 2, 0, '|S|=701', 'center'),
BarText(1, 0, 4, 0, '|S|=701', 'center')
],
y_lim=31
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Both Both
# -----------------------------------------------------------------------------
# if 0 or PLOT_ALL:
# output_file = output_dir + f'client_wzl{EXTENSION}'
# xlabel = "Metric"
# title = "Full Client App - WZL Data - 10 Reps"
# stacks = join_stack_data([read_stacks_wzl1, read_stacks_wzl2],
# [[WZL1], [WZL2]])
# bloom_stacks = replace_psi_stacks(copy.deepcopy(stacks))
# psi_stacks = replace_bloom_stacks(copy.deepcopy(stacks))
# stacked_bar_plot_mult(output_file, [bloom_stacks, psi_stacks],
# xlabel, ylabel, title,
# stack_legend=Legend(both_phases[:]),
# label_step=1, colors=both_colors,
# hatches=hatches[:],
# adjust=adjust_stacked_bar, y_lim=45,
# bar_texts=[
# BarText(0, 4, 0, 0, '|S|=11', 'in', rotation=0,
# color='white'),
# BarText(1, 2, 0, 0, '|S|=11', 'in',
# rotation=0),
# BarText(0, 4, 1, 0, '|S|=701', 'in',
# rotation=0,
# color='white'),
# BarText(1, 2, 1, 0, '|S|=701', 'in',
# rotation=0)
# ]
# )
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Client 1 - Rounding: 2, rel. Offset 2%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_ikv1"
input_file = input_dir + name + '.csv'
# Read Data
read_stacks_ikv1 = get_stacks(input_file)
# -----------------------------------------------------------------------------
if 0 or PLOT_ALL and TLS:
output_dir = output_dir_client + 'ikv/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'ikv1_bar{EXTENSION}'
xlabel = "Phase"
title = "Client App - IKV Data - IM-2% - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_ikv1):
if i != 0:
data1[i] = s[77] # Only consider 500 matches
data1 = get_tls_on_top(data1)
legend = Legend([IKV1], location=Legend.TOP,
custom_labels=[(hatch_rec, "TLS")])
tbs = copy.deepcopy(textboxes)
y = 0.85
tbs[0]['y'] = y
tbs[1]['y'] = y
# tbs.append({'s': '(Section 8.2)', 'y': y-0.09,
# 'i': 1, 'fontsize': legend_font_size})
stacked_bar_plot_mult(output_file, [data1], xlabel, ylabel,
title,
xlabels=both_phases_no_tls[:],
adjust=adjust_bar2, # small_xticks=True,
backgrounds=backgrounds,
text_boxes=tbs,
order=joined_order,
hatches=[None, TLS_HATCH],
colors_depend_on_bar=True,
colors=[blue, orange],
bar_legend=legend,
bar_texts=[
BarText(0, 0, 2, 0, r'|S|≈1 Mio.', 'on'),
BarText(0, 1, 3.5, 0, r'|S|≈1 Mio.', 'center'),
],
y_lim=120
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Client 2 - Rounding: 2, rel. Offset 2.5%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_ikv2"
input_file = input_dir + name + '.csv'
read_stacks_ikv2 = get_stacks(input_file)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# IKV Client 3 - Rounding: 2, rel. Offset 3%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_client_ikv3"
input_file = input_dir + name + '.csv'
read_stacks_ikv3 = get_stacks(input_file)
if 1 or PLOT_ALL and TLS:
output_dir = output_dir_client + 'ikv/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'ikv3_bar{EXTENSION}'
xlabel = "Phase"
title = "Client App - IKV Data - IM-3% - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_ikv3):
if i not in [0, 2, 3, 4]:
# Ignore start and PSI Stack
data1[i] = s[77]
data1 = get_tls_on_top_no_psi(data1)
legend = Legend([IKV3], location=Legend.TOP,
custom_labels=[(hatch_rec, "TLS")])
tbs = copy.deepcopy(textboxes)
y = 0.85
tbs[0]['y'] = y
tbs[1]['y'] = y
stacked_bar_plot_mult(output_file, [data1], xlabel, ylabel,
title,
bar_legend=Legend([IKV3],
location=Legend.TOP,
custom_labels=[
(hatch_rec, "TLS")]),
ymin=0,
ymax=35,
ymin2=49000,
ymax2=52000,
y_label_coord=-0.09,
xlabels=bloom_phases_no_tls[:],
adjust=None,
divide_y=True,
colors_depend_on_bar=True,
colors=[blue, orange],
bar_texts=[
BarText(0, 0, 2, 1, r'|S|≈2.5 Bil.', 'center',
color='white'),
],
gridspec_kw={'height_ratios': [1, 2.5]},
y_lim=31,
y_label_coord2=-0.6
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Both Both
# -----------------------------------------------------------------------------
# if 0 or PLOT_ALL:
# output_dir = output_dir_client + 'ikv/'
# os.makedirs(output_dir, exist_ok=True)
# output_file = output_dir + f'client_ikv{EXTENSION}'
# xlabel = "Metric"
# title = "Client App - IKV Data - rel. Offset - 10 Reps"
# stacks1 = remove_psi_stack(copy.deepcopy(read_stacks_ikv1))
# stacks2 = remove_psi_stack(copy.deepcopy(read_stacks_ikv2))
# stacked_bar_plot_two_y(output_file, [stacks1, stacks2], bloom_phases[:],
# xlabel, ylabel, title,
# label_step=1, colors=bloom_colors,
# hatches=bloom_hatches,
# adjust=adjust_stacked_bar)
# -----------------------------------------------------------------------------
# Same for Joined
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Joined
if 0 or PLOT_ALL:
output_dir = output_dir_client + 'ikv/'
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + f'ikv_joined_bar{EXTENSION}'
xlabel = "Phase"
title = "Client App - IKV Data - 10 Reps"
data1 = {}
for i, s in enumerate(read_stacks_ikv1):
if i not in [0, 2, 3, 4]:
# Ignore start and PSI Stack
data1[i] = s[77]
data2 = {}
for i, s in enumerate(read_stacks_ikv2):
if i not in [0, 2, 3, 4]:
# Ignore start and PSI Stack
data2[i] = s[77]
data1 = get_tls_on_top_no_psi(data1)
data2 = get_tls_on_top_no_psi(data2)
stacked_bar_plot_mult(output_file, [data1, data2], xlabel, ylabel,
title,
bar_legend=Legend([IKV1, IKV2],
location=Legend.TOP,
custom_labels=[(hatch_rec, "TLS")]),
ymin=0,
ymax=35,
ymin2=5600,
ymax2=6500,
y_label_coord=-0.09,
xlabels=bloom_phases_no_tls[:],
adjust=None,
divide_y=True,
colors_depend_on_bar=True,
colors=[blue, orange],
bar_texts=[
BarText(0, 0, 2, 1, r'|S|≈1 Mio.', 'in',
color='white'),
BarText(1, 0, 2, 1, r'|S|≈143 Mio.',
'center')],
gridspec_kw={'height_ratios': [1, 2.5]},
y_lim=31,
y_label_coord2=-0.6
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Random Client 2 - Rounding: 3, ID Len 10, rel. Offset 0.5%
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
name = "butthead_bloom"
output_dir = output_dir_client + 'bloom/'
os.makedirs(output_dir, exist_ok=True)
input_file = input_dir + name + '.csv'
read_stacks = get_stacks(input_file)
# -----------------------------------------------------------------------------
# Bar Plot - Butthead Run - Bloom
# if False or PLOT_ALL:
# output_file = output_dir + f'client_bar_bloom{EXTENSION}'
# xlabel = "Phase"
# title = "Full Client App - 500 Matches - Rel. Offset 0.5% - 10 Reps"
# data = {}
# for i, s in enumerate(read_stacks):
# if i not in [0, 2, 3, 4]:
# # Ignore start and PSI Stack
# data[i] = s[500] # Only consider one number of matches
# stacked_bar_plot_mult(output_file, [[data]], xlabel, ylabel, title,
# xlabels=bloom_phases[:],
# adjust=adjust_bar,
# bar_texts=[
# BarText(0, 0, 2, 0, '|S|≈29 Mio.', 'in',
# color='white')],
# )
# -----------------------------------------------------------------------------
# Stacked Bar Plot
if False or PLOT_ALL:
output_file = output_dir + f'client_stacked_bloom{EXTENSION}'
xlabel = "Results [#]"
title = "Full Client legend=Legend(legend_labels)App - ID Len. 10 -" \
" Rel. Offset 0.5% - 10 Reps"
stacks = remove_psi_stack(copy.deepcopy(read_stacks))
stacked_bar_plot_mult(output_file, [stacks], xlabel,
ylabel,
title,
stack_legend=Legend(bloom_phases[:],
empty_positions=[1]),
adjust=adjust_stacked_bar2,
label_step=1, colors=bloom_colors,
hatches=bloom_hatches,
# bar_texts=[
# BarText(0, 2, 'all', 0, r'|S|≈29 Mio.', 'in')],
textboxes=[dict(x=0.075, y=0.88, s=r'|S|≈29 Mio.',
transform='transAxes', ha='center',
va='center', fontsize=font_size)],
y_lim=450
)
# -----------------------------------------------------------------------------
# RAM Plot
if 0 or PLOT_ALL:
output_file = output_dir + f'client_bloom_ram{EXTENSION}'
xlabel = "Results [#]"
title = "Full Client legend=Legend(legend_labels)App - ID Len. 10 -" \
" Rel. Offset 0.5% - 10 Reps"
d = read_ram_max(input_dir + 'butthead_bloom_ram' + '.csv', 2, 3)
convert_mib_to_gb(d)
error_plot_mult(
[d],
output_file,
100,
0,
35,
5,
xlabel,
"RAM Usage [GB]",
r"Title",
# auto_ylabels=True,
)
if 0 or PLOT_ALL:
output_file = OUTPUT_DIR + 'client/psi_vs_bloom/' + \
f'client_ram{EXTENSION}'
xlabel = "Results [#]"
title = "Full Client legend=Legend(legend_labels)App - ID Len. 10 -" \
" Rel. Offset 0.5% - 10 Reps"
d = read_ram_max(input_dir + 'butthead_psi_vs_bloom2_ram' + '.csv', 2, 3)
convert_mib_to_gb(d)
error_plot_mult(
[d],
output_file,
100,
0,
35,
5,
xlabel,
"RAM Usage [GB]",
r"Title",
# auto_ylabels=True,
)
| 33,636
| 41.959132
| 80
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/ot_time.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
# noinspection PyUnresolvedReferences
from plot.colors import blue, orange, green
from plot.plot import (read_data, INPUT_DIR, OUTPUT_DIR,
error_plot_mult, read_data_mult, convert_to_minutes,
EXTENSION, plot_settings, Legend, read_ram_max,
convert_mib_to_gb)
# noinspection PyUnresolvedReferences
from plot.tls import compute_tls_curve
PLOT_ALL = 0
output_dir = OUTPUT_DIR + 'ot/'
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + 'ot/'
# # Setsize WITHOUT TLS ---------------------------------------------------------
# if False or PLOT_ALL:
# with plot_settings(half_width=True):
# name = "butthead_setsize"
# d = read_data(input_dir + f"setsize/{name}.csv", 2, 12)
# minor_xticks = list(range(10 ** 6, 10 ** 7 + 1, 10 ** 6))
# xticks = [2 ** i for i in range(20, 24)]
# xlabels = [fr"$2^{{{i}}}$" for i in range(20, 24)]
# minor_xlabels = [f"" for i in minor_xticks]
# # minor_xlabels[4] = "5 Mio"
# # minor_xlabels[2] = "3 Mio"
# minor_xlabels[6] = "7 Mio"
# minor_xlabels[-1] = "10 Mio"
# for x in list(d.keys())[:]:
# if x > 10 ** 7:
# del d[x]
# error_plot_mult(
# [d],
# output_dir + f'{name}{EXTENSION}',
# 1,
# 0,
# 60,
# 10,
# "Set Size [#]",
# "Time [s]",
# "OT Execution Time for 10 OTs (No TLS/MAL)",
# adjust=(0.13, 0.93, 0.975, 0.215),
# x_label_step=1,
# xlabels=xlabels,
# xticks=xticks,
# minor_xticks=minor_xticks,
# minor_xlabels=minor_xlabels
# )
# # TotalOTs WITHOUT TLS --------------------------------------------------------
# if False or PLOT_ALL:
# with plot_settings(half_width=True):
# name = "butthead_numOTs"
# d = read_data(input_dir + f"numOTs/{name}.csv", 3, 12)
# xticks = list(range(0, 200, 30)) + [200]
# minor_xticks = [i for i in range(0, 200, 10) if i not in xticks]
# error_plot_mult(
# [d],
# output_dir + f'{name}{EXTENSION}',
# 10,
# 0,
# 80,
# 20,
# "Number of OT Extensions [#]",
# "Time [s]",
# r"OT Execution Time for Setsize $2^20$ (No TLS/MAL)",
# adjust=(0.13, 0.93, 0.975, 0.215),
# xticks=xticks,
# minor_xticks=minor_xticks,
# minor_xlabels=['' for _ in minor_xticks]
# )
# Latency WITHOUT TLS ---------------------------------------------------------
if False or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_latency"
data_list = read_data_mult(input_dir + f"latency/{name}.csv", 3, 12, 6)
for i in data_list.keys():
data_list[i] = convert_to_minutes(data_list[i])
latencies = sorted(data_list.keys(), reverse=True)
legend_labels = [f"{i}ms" for i in latencies]
legend_labels[-2] = " 50ms"
legend_labels[-1] = " 0ms"
print("Runtime at 300ms: ",
sum(data_list[300][100]) / len(data_list[300][100]),
"min")
error_plot_mult(
[data_list[i] for i in latencies],
output_dir + f'{name}{EXTENSION}',
20,
0,
8,
2,
"Number of OT Extensions [#]",
"Time [min]",
r"OT Execution Time with Latency (SS $2^{20}$, No TLS/MAL)",
adjust=(0.11, 0.96, 0.76, 0.21),
# for 2/3 height (0.19, 0.95, 0.98, 0.16)
legend=Legend(legend_labels, location='above'),
x_label_step=1,
)
# Bandwidth WITHOUT TLS -------------------------------------------------------
# if False:
# name = "butthead_bandwidth"
# data_list = read_data_mult(input_dir + f"bandwidth/{name}.csv", 3, 12, 7)
# for i in data_list.keys():
# data_list[i] = convert_to_minutes(data_list[i])
# latencies = [6000, 50000, 100000, 0]
# legend_labels = [f"{round(i / 1000)}Mbit/s" for i in latencies if
# i is not 0]
# legend_labels.append("Unlimited") # 0 has special meaning
# error_plot_mult(
# [data_list[i] for i in latencies],
# output_dir + f'{name}_sync{EXTENSION}',
# 20,
# 0,
# 40,
# 5,
# "Number of OT Extensions [#]",
# "Time [min]",
# r"OT Execution Time with limited BW (SS $2^{20}$, No TLS/MAL)",
# adjust=None, # (0.17, 0.93, 0.98, 0.14),
# legend_labels=legend_labels,
# x_label_step=1,
# legend_pos=None
# )
# Async Bandwidth WITHOUT TLS -------------------------------------------------
if False or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_ot_bandwidth_async"
data_list = read_data_mult(
input_dir + f"bandwidth/{name}.csv", 3, 12, 7)
for i in data_list.keys():
data_list[i] = convert_to_minutes(data_list[i])
latencies = [6000, 50000, 100000, 0]
legend_labels = [
r" 6Mbit/s",
r" 50Mbit/s",
r"100Mbit/s",
"Unlimited"
]
print("Runtime at 6Mibt/s: ",
sum(data_list[6000][100]) / len(data_list[6000][100]),
"min")
error_plot_mult(
[data_list[i] for i in latencies],
output_dir + f'butthead_ot_bandwidth{EXTENSION}',
20,
0,
20,
5,
"Number of OT Extensions [#]",
"Time [min]",
r"OT Execution Time with limited BW (1:10) (SS $2^{20}$, "
r"No TLS/MAL)",
adjust=(0.13, 0.96, 0.76, 0.21),
# for 2/3 height(0.17, 0.95, 0.98, 0.16),
legend=Legend(legend_labels, location='above', ncols=2),
x_label_step=1,
y_lim=20
)
# Comprasion Plot--------------------------------------------------------------
if 0 or PLOT_ALL:
MEASURED_TLS = True
legend = []
data_list = []
if MEASURED_TLS:
tls = read_data(input_dir + f"tls/butthead_ot_tls.csv", 3,
12)
data_list.append(tls)
legend.append("KKRT16 (128Bit) TLS (Meas.)")
y_step = 10 # 30
y_max = 40 # 280
else:
y_step = 10
y_max = 40
malicious = read_data(
input_dir + f"malicious/butthead_ot_malicious.csv", 3, 12)
data_list.append(malicious)
legend.append("OOS16 (76Bit)")
baseline128 = read_data(
input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 12)
if True:
# Theoretic TLS
# Read sent Bytes
sent = read_data(
input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 13)
# Read received Bytes
received = read_data(
input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 15)
# Add overhead
tls_theoretic = compute_tls_curve(baseline128, sent, received)
data_list.append(tls_theoretic)
legend.append("KKRT16 (128Bit) TLS (Theo.)")
data_list.append(baseline128)
legend.append("KKRT16 (128Bit)")
baseline76 = read_data(input_dir + f"baseline/butthead_ot_baseline76.csv",
3,
12)
data_list.append(baseline76)
legend.append("KKRT16 (76Bit)")
fmts = ['-' for _ in range(len(data_list))]
# Dashed line for theoretic tls + RR 17
fmts[2] = '--'
error_plot_mult(
data_list,
output_dir + f'ot_comparison{EXTENSION}',
20,
0,
y_max,
y_step,
"Number of OT Extensions [#]",
"Time [s]",
r"OT Execution Time for Set Size $2^{20}$ (10 Reps)",
adjust=None, # (0.09, 0.97, 0.98, 0.16),
x_label_step=1,
legend=Legend(legend),
fmts=fmts,
y_lim=40
)
# Setsize RAM ---------------------------------------------------------
if 0 or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_setsize"
d = read_data(input_dir + f"setsize/butthead_setsize.csv", 2, 12)
dc = read_ram_max(
input_dir + f"setsize/butthead_setsize_clientram.csv", 2, 11)
ds = read_ram_max(
input_dir + f"setsize/butthead_setsize_serverram.csv", 2, 11)
convert_mib_to_gb(dc)
convert_mib_to_gb(ds)
print("Runtime at 2^20: ",
sum(d[2 ** 20]) / len(d[2 ** 20]), "s")
print("Server RAM usage at 2^20: ",
sum(ds[2 ** 20]) / len(ds[2 ** 20]), "GB")
minor_xticks = list(range(10 ** 6, 10 ** 7 + 1, 10 ** 6))
xticks = [2 ** i for i in range(20, 24)]
xlabels = [fr"$2^{{{i}}}$" for i in range(20, 24)]
minor_xlabels = [f"" for i in minor_xticks]
# minor_xlabels[4] = "5 Mio"
# minor_xlabels[2] = "3 Mio"
minor_xlabels[6] = "7 Mio"
minor_xlabels[-1] = "10 Mio"
for x in list(ds.keys())[:]:
if x > 10 ** 7:
del d[x]
del ds[x]
del dc[x]
error_plot_mult(
[d, dc, ds],
output_dir + f'{name}{EXTENSION}',
1,
0,
60,
10,
"Set Size [#]",
"Time [s]",
"OT Execution Time for 10 OTs (No TLS/MAL)",
adjust=(0.13, 0.88, 0.94, 0.215),
x_label_step=1,
xlabels=xlabels,
xticks=xticks,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
legend=Legend(['Runtime', 'Client', 'Server'], 'top'),
auto_ylabels=True,
second_y_axis=[0, 1, 1],
second_y_label="RAM Usage [GB]",
second_ylim=6,
second_y_lim_bottom=0,
y_lim=75,
colors=[blue, orange, green],
)
# TotalOTs RAM --------------------------------------------------------
if 0 or PLOT_ALL:
with plot_settings(half_width=True):
name = "butthead_numOTs"
d = read_data(input_dir + f"numOTs/butthead_numOTs.csv", 3, 12)
dc = read_ram_max(input_dir + f"numOTs/butthead_numOTs_clientram.csv",
3, 11)
ds = read_ram_max(input_dir + f"numOTs/butthead_numOTs_serverram.csv",
3, 11)
convert_mib_to_gb(dc)
convert_mib_to_gb(ds)
print("Runtime for 200 OTes: ",
sum(d[200]) / len(d[200]), "s")
print("Server RAM usage for 200 OTes: ",
sum(ds[200]) / len(ds[200]), "GB")
xticks = list(range(0, 200, 30)) + [200]
minor_xticks = [i for i in range(0, 200, 10) if i not in xticks]
error_plot_mult(
[d, dc, ds],
output_dir + f'{name}{EXTENSION}',
10,
0,
80,
20,
"Number of OT Extensions [#]",
"Time [s]",
r"OT Execution Time for Set Size $2^20$ (No TLS/MAL)",
adjust=(0.13, 0.88, 0.94, 0.215),
xticks=xticks,
minor_xticks=minor_xticks,
minor_xlabels=['' for _ in minor_xticks],
legend=Legend(['Runtime', 'Client', 'Server'], 'top'),
auto_ylabels=True,
second_y_axis=[0, 1, 1],
second_y_label="RAM Usage [GB]",
second_ylim=6,
colors=[blue, orange, green],
second_y_lim_bottom=0,
y_lim=75,
)
| 11,744
| 34.917431
| 81
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/bloom_full_err.py
|
#!/usr/bin/env python3
"""Bloom dependence on Error Rate."""
import numpy as np
from numpy import log as ln
# noinspection PyUnresolvedReferences
from plot.plot import (read_data, convert_to_gb, convert_to_minutes,
INPUT_DIR, OUTPUT_DIR, read_fp,
error_plot_mult, EXTENSION, plot_settings, Legend, )
PLOT_ALL = 1
output_dir = OUTPUT_DIR + 'bloom_error_dep/'
input_dir = INPUT_DIR + 'bloom_full_err/'
name = "butthead_bloom_fp"
xticks = [10 ** (-1)] + [10 ** (-i) for i in range(2, 21, 1) if i % 3 == 0]
xlabels = [fr"$10^{{-1}}$"] + [fr"$10^{{-{i}}}$" for i in range(2, 21, 1) if
i % 3 == 0] + [fr"$10^{{-20}}$"]
minor_xticks = [10 ** (-i) for i in range(2, 21, 1) if i % 3 != 0]
minor_xlabels = ["" for _ in minor_xticks]
# Adjusts
BOTTOM_ADJUST = 0.22
RIGHT_ADJUST = 0.985
TOP_ADJUST = 0.97
LEFT_ADJUST = 0.14
def comp_bloom_bits(n, p):
m = np.ceil(-n * ln(p) / (ln(2) ** 2))
return m
def bit_to_gb(b):
return b / 8 / 1000 / 1000 / 1000
# Size
if 0 or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 2, 5, x_is_float=True)
d = convert_to_gb(d)
theo = {}
for x in d:
theo[x] = [bit_to_gb(comp_bloom_bits(100000000, x))] * 2
error_plot_mult(
[d, theo],
output_dir + f'size{EXTENSION}',
100000000,
0,
1.5,
0.5,
"FP Rate",
"Size [GB]",
"Bloom Filter Size (Capacity: 100Mio, Stored: To Cap., 10 Reps)",
legend=Legend(['Measured', 'Theoretic'], location='upper left'),
adjust=(LEFT_ADJUST, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks,
xlabels=xlabels,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
log_base=10,
x_log=True,
x_sync=True,
reverseX=True,
)
# Query Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 2, 7, x_is_float=True)
error_plot_mult(
[d],
output_dir + f'query_time{EXTENSION}',
100000000,
0,
40,
10,
"FP Rate",
"Query Time [s]",
"Time for Query (Capacity: 100Mio, Stored: To Cap., 100Mio "
"Queries, "
"10 Reps)",
adjust=(LEFT_ADJUST - 0.01, RIGHT_ADJUST, TOP_ADJUST,
BOTTOM_ADJUST),
# 2/3 H adjust=(0.17, 0.98, 0.98, 0.16),
xticks=xticks,
xlabels=xlabels,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
log_base=10,
x_log=True,
x_sync=True
)
# Insertion Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 2, 6, x_is_float=True)
d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'insert_time{EXTENSION}',
100000000,
0,
20,
5,
"FP Rate",
"Insertion Time [min]",
"Time for Insertion (Capacity: 100Mio, Stored: To Cap., 10 Reps)",
adjust=(LEFT_ADJUST - 0.01, RIGHT_ADJUST, TOP_ADJUST,
BOTTOM_ADJUST),
# 2/3 Height adjust=(0.17, 0.98, 0.98, 0.16),
xticks=xticks,
xlabels=xlabels,
minor_xticks=minor_xticks,
minor_xlabels=minor_xlabels,
log_base=10,
x_log=True,
x_sync=True
)
# FP Rate
if 0 or PLOT_ALL:
d = read_fp(input_dir + f"{name}.csv", 2, 4, 8, x_is_float=True)
xticks = [10 ** (-i) for i in range(1, 10, 1)]
xlabels = [fr"$10^{{-{i}}}$" for i in range(1, 10, 1)]
for k in list(d.keys())[:]:
if k < 10 ** -9:
del d[k]
error_plot_mult(
[d],
output_dir + f'fp_rate{EXTENSION}',
100000000,
0,
0.1,
0.02,
"FP Rate (Configured)",
"FP Rate (Measured)",
"False Positives (Capacity: 100Mio, Stored: To Cap., 100Mio Queries, "
"10 Reps)",
adjust=None, # (0.06, 0.978, 0.979, 0.165),
xticks=xticks,
xlabels=xlabels,
log_base=10,
x_log=True,
x_sync=True,
reverseX=False,
y_log=True,
ylabels=[
rf"$10^{{{i}}}$"
for i in range(-9, 0)
],
yticks=[
10 ** i
for i in range(-9, 0)
],
grid=True,
)
| 4,716
| 28.666667
| 78
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/provider_ram.py
|
#!/usr/bin/env python3
"""Ram Plot of Provider Eval."""
from plot.plot import (OUTPUT_DIR, INPUT_DIR,
read_y_only, read_ram,
mean_confidence_interval, error_plot_mult)
PLOT_ALL = True
output_dir = OUTPUT_DIR + "provider/"
input_dir = INPUT_DIR + "provider/"
# Offset 2---------------------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_provider_uploads"
ram_file = input_dir + name + '_ram.csv'
input_file = input_dir + name + '.csv'
output_file = output_dir + 'provider_ram' + '.png'
# Read Data
# Only consider last line, because each ram measurement is different
data, max_y = read_ram(ram_file, start_line=-1, y=4)
times = []
for i in range(4, 13):
times.append(read_y_only(input_file, i, start_line=-6, end_line=-5))
means = []
for t in times:
m, h = mean_confidence_interval(t)
means.append(m)
start_time = means[0]
xticks = []
for m in means[1:]:
xticks.append(m - start_time)
# Configuration
xlabel = "Phase"
ylabel = "RAM Usage [MiB]"
title = "RAM Provider App - 500 Uplodads"
# x_labels = (
# 'Parse Input', 'Hash Key Retr.', 'Hash Set', 'OT Index Comp.',
# 'Key Retr. (OT)', 'Set Key', 'Encryption', 'Sending')
x_labels = [
'', 'Hash Key Retr.', '', '',
'Key Retr. (OT)', '', ' ', 'Sending']
print("Length of RAM measurement: ", max(data.keys()), 's')
print("Length of Time measurement: ", xticks[-1], 's')
xstep = 0.5
min_y = 0
max_y = 20000
y_step = 5000
for key, value in sorted(dict(zip(xticks, x_labels)).items(), key=lambda x: x[0]):
print("{} : {}".format(key, value))
error_plot_mult([data], output_file, xstep, min_y, max_y, y_step, xlabel, ylabel,
title, xticks=xticks, xlabels=x_labels,
ver_grid=True, x_rotation=90, auto_ylabels=True)
| 1,975
| 34.285714
| 86
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/__init__.py
|
#!/usr/bin/env python3
"Plot Module"
| 37
| 11.666667
| 22
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/ot_comm.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import math
import os
from plot.plot import (read_data, INPUT_DIR, OUTPUT_DIR,
read_data_mult, stacked_bar_plot, convert_to_gb,
stacked_bar_plot_mult)
PLOT_ALL = True
output_dir = OUTPUT_DIR + 'ot_comm/'
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + 'ot/'
to_server = 13
from_server = 15
# Setsize WITHOUT TLS ---------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_setsize"
d_to = read_data(input_dir + f"setsize/{name}.csv", 2, to_server)
d_from = read_data(input_dir + f"setsize/{name}.csv", 2, from_server)
convert_to_gb(d_to)
convert_to_gb(d_from)
for x in list(d_to.keys())[:]:
if x > 10 ** 7:
del d_to[x]
del d_from[x]
xlabels = [
f"{int(i / 10 ** 6)}M" if i % 10 ** 6 == 0 else fr"$2^{{"
fr"{int(math.log2(i))}}}$"
for i in sorted(d_to.keys())]
xlabels[0] = int(min(sorted(d_to.keys())))
stacked_bar_plot(
output_dir + f'{name}.png',
[d_to, d_from],
["Receiver -> Sender", "Sender -> Receiver"],
"Set Size [#]",
"Size [GB]",
"OT Communication Overhead - 10 OT Extensions",
xlabels=xlabels,
label_step=1
)
# TotalOTs WITHOUT TLS --------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_numOTs"
input_file = input_dir + f"numOTs/{name}.csv"
d_to = read_data(input_file, 3, to_server)
d_from = read_data(input_file, 3, from_server)
convert_to_gb(d_to)
convert_to_gb(d_from)
stacked_bar_plot(
output_dir + f'{name}.png',
[d_to, d_from],
["Receiver -> Sender", "Sender -> Receiver"],
"Set Size [#]",
"Size [GB]",
r"OT Communication Overhead - Set Size $2^{20}$",
# xticks=xlabels,
label_step=2
)
# Latency WITHOUT TLS ---------------------------------------------------------
if False or PLOT_ALL:
name = "butthead_latency"
input_file = input_dir + f"latency/{name}.csv"
data_list_to = read_data_mult(input_file, 3, to_server, 6)
data_list_from = read_data_mult(input_file, 3, from_server, 6)
for d in data_list_to.values():
convert_to_gb(d)
for d in data_list_from.values():
convert_to_gb(d)
latencies = sorted(data_list_to.keys(), reverse=True)
legend_labels = [f"{i}ms" for i in latencies]
data = []
for lat in list(data_list_to.keys()):
data.append([data_list_to[lat], data_list_from[lat]])
stacked_bar_plot_mult(
output_dir + f'{name}.png',
data,
["Receiver -> Sender", "Sender -> Receiver"],
"Number of OT Extensions [#]",
"Size [GB]",
r"OT Communication Overhead - Set Size $2^{20}$",
# xticks=xlabels,
label_step=1,
# legend_pos=None
)
#
# # Bandwidth WITHOUT TLS
# -------------------------------------------------------
# if False or PLOT_ALL:
# name = "butthead_bandwidth"
# data_list = read_data_mult(input_dir + f"bandwidth/{name}.csv", 3, 12, 7)
# for i in data_list.keys():
# data_list[i] = convert_to_minutes(data_list[i])
# latencies = [6000, 50000, 100000, 0]
# legend_labels = [f"{round(i / 1000)}Mbit/s" for i in latencies if
# i is not 0]
# legend_labels.append("Unlimited") # 0 has special meaning
# error_plot_mult(
# [data_list[i] for i in latencies],
# output_dir + f'{name}.png',
# 20,
# 0,
# 40,
# 5,
# "Number of OT Extensions [#]",
# "Time [min]",
# r"OT Execution Time with limited BW (SS $2^{20}$, No TLS/MAL)",
# legend_labels=legend_labels,
# x_label_step=1,
# legend_pos=None
# )
#
# # Async Bandwidth WITHOUT TLS
# -------------------------------------------------
# if False or PLOT_ALL:
# name = "butthead_ot_bandwidth_async"
# data_list = read_data_mult(input_dir + f"bandwidth/{name}.csv", 3, 12, 7)
# for i in data_list.keys():
# data_list[i] = convert_to_minutes(data_list[i])
# latencies = [6000, 50000, 100000, 0]
# legend_labels = [f"{round(i / 1000)}Mbit/s" for i in latencies if
# i is not 0]
# legend_labels.append("Unlimited") # 0 has special meaning
# error_plot_mult(
# [data_list[i] for i in latencies],
# output_dir + f'{name}.png',
# 20,
# 0,
# 40,
# 5,
# "Number of OT Extensions [#]",
# "Time [min]",
# r"OT Execution Time with limited BW (1:10) (SS $2^{20}$,
# No TLS/MAL)",
# legend_labels=legend_labels,
# x_label_step=1,
# legend_pos=None
# )
#
# # Comprasion
# Plot--------------------------------------------------------------
# if True or PLOT_ALL:
# MEASURED_TLS = False
# legend = []
# data_list = []
#
# if MEASURED_TLS:
# tls = read_data(input_dir + f"tls/butthead_ot_tls.csv", 3,
# 12)
# data_list.append(tls)
# legend.append("KKRT16 (128Bit) TLS (Meas.)")
# y_step = 30
# y_max = 280
# else:
# y_step = 10
# y_max = 40
#
# malicious = read_data(
# input_dir + f"malicious/butthead_ot_malicious.csv", 3, 12)
# data_list.append(malicious)
# legend.append("OOS16 (76Bit)")
#
# baseline128 = read_data(
# input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 12)
#
# if True:
# # Theoretic TLS
# # Read sent Bytes
# sent = read_data(
# input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 13)
# # Read received Bytes
# received = read_data(
# input_dir + f"baseline/butthead_ot_baseline128.csv", 3, 15)
# # Add overhead
# tls_theoretic = compute_tls_curve(baseline128, sent, received)
# data_list.append(tls_theoretic)
# legend.append("KKRT16 (128Bit) TLS (Theo.)")
#
# data_list.append(baseline128)
# legend.append("KKRT16 (128Bit)")
#
# baseline76 = read_data(input_dir +
# f"baseline/butthead_ot_baseline76.csv",
# 3,
# 12)
# data_list.append(baseline76)
# legend.append("KKRT16 (76Bit)")
#
# fmts = ['-' for _ in range(len(data_list))]
# # Dashed line for theoretic tls + RR 17
# fmts[1] = '--'
#
# error_plot_mult(
# data_list,
# output_dir + f'ot_comparison.png',
# 20,
# 0,
# y_max,
# y_step,
# "Number of OT Extensions [#]",
# "Time [s]",
# r"OT Execution Time for Setsize $2^{20}$ (10 Reps)",
# x_label_step=1,
# legend_labels=legend,
# fmts=fmts
# )
| 6,924
| 31.97619
| 82
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/bloom_full_insert.py
|
#!/usr/bin/env python3
"""Bloom dependence on Insert Size."""
# noinspection PyUnresolvedReferences
from typing import List
from plot.plot import (read_data, convert_to_gb, convert_to_minutes,
INPUT_DIR, OUTPUT_DIR, error_plot_mult,
EXTENSION, plot_settings, Legend)
PLOT_ALL = 1
output_dir = OUTPUT_DIR + 'bloom_insert_dep/'
input_dir = INPUT_DIR + 'bloom_full_insert/'
name = "butthead_bloom_insert"
xticks = [i for i in range(0, 10 ** 9 + 1, 10 ** 8)]
xlabels: List[str] = [0] + [
f"{round(i / 10 ** 9, 1)} Bil" for i in
xticks[1:]]
BOTTOM_ADJUST = 0.22
RIGHT_ADJUST = 0.935
TOP_ADJUST = 0.97
LEFT_ADJUST = 0.15
# Insert Time
if False or PLOT_ALL:
with plot_settings(half_width=True):
d = read_data(input_dir + f"{name}.csv", 3, 6)
d = convert_to_minutes(d)
error_plot_mult(
[d],
output_dir + f'insert_time{EXTENSION}',
100000000,
0,
350,
50,
"Inserted Elements [#]",
"Insertion Time [min]",
r"",
x_label_step=2,
auto_ylabels=True,
adjust=(
LEFT_ADJUST, RIGHT_ADJUST, TOP_ADJUST, BOTTOM_ADJUST),
xticks=xticks,
xlabels=xlabels
)
| 1,310
| 27.5
| 70
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/metric.py
|
#!/usr/bin/env python3
"""Bloom Size Plot."""
import os
# noinspection PyUnresolvedReferences
import pickle
from plot.plot import (error_plot_mult, read_data,
INPUT_DIR, OUTPUT_DIR, EXTENSION, plot_settings)
PLOT_ALL = True
output_dir = OUTPUT_DIR + 'metric/'
os.makedirs(output_dir, exist_ok=True)
input_dir = INPUT_DIR + 'metric/'
# ROWS
METRIC = 1
OFFSET = 2
POSITIVE = 3
TOTAL_LEN = 4
ID_LEN = 5
ROUNDING = 6
CANDIDATES = 7
y_label = "Candidates [#]"
x_axis = {
OFFSET: "Offset [%]",
TOTAL_LEN: "Record Length [#]",
ID_LEN: "ID Parameters [#]",
ROUNDING: "Rounding Value"
}
# ADJUSTS
TOP = 0.985
BOTTOM = 0.22
LEFT = 0.16
RIGHT = 0.95
evals = [
{
'name': 'metric_id12',
'x': ROUNDING,
'x_step': 2,
'x_float': False,
'y': CANDIDATES,
'title': 'Rel. Offset: ID Len. 10; 10% Offset',
'y_log': True,
'auto_y': True,
'y_step': 1 * 10 ** 20,
'y_max': 5 * 10 ** 20,
'adjust': (LEFT + 0.005, RIGHT + 0.03, TOP, BOTTOM),
'half_width': True,
'out': 'rounding_single',
'xticks': [i for i in range(0, 10, 2)],
'xlabels': [i for i in range(0, 10, 2)],
'minor_xticks': [i for i in range(1, 10, 2)],
'minor_xlabels': ['' for i in range(1, 10, 2)],
},
{
'name': 'metric_id1',
'x': OFFSET,
'x_step': 5,
'x_float': True,
'y': CANDIDATES,
'title': 'Rel. Offset: ID Len. 10; Rounding: 3',
'y_log': True,
'adjust': (LEFT, RIGHT + 0.02, TOP, BOTTOM),
'half_width': True,
'out': 'offset',
'xticks': [i for i in range(0, 51, 10)],
'xlabels': [i for i in range(0, 51, 10)],
},
# {
# 'name': 'metric_id2',
# 'x': OFFSET,
# 'x_step': 5,
# 'x_float': True,
# 'y': CANDIDATES,
# 'title': 'Rel. Offset: ID Len. 10; Rounding: 3; Positive Offset',
# 'y_log': True,
# 'adjust': (LEFT, RIGHT, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'offset_positive'
# },
# {
# 'name': 'metric_id3',
# 'x': TOTAL_LEN,
# 'x_step': 10,
# 'x_float': False,
# 'y': CANDIDATES,
# 'title': 'Rel. Offset: ID Len. 10; Rounding: 3; 10% Offset',
# 'y_log': False,
# 'auto_y': False,
# 'y_step': 1 * 10 ** 20,
# 'y_max': 5 * 10 ** 20,
# 'scientific_y': True,
# 'adjust': (LEFT, RIGHT, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'total_len'
# },
{
'name': 'metric_id4',
'x': ID_LEN,
'x_step': 10,
'x_float': False,
'y': CANDIDATES,
'title': 'Rel. Offset: Rounding: 3; 10% Offset',
'y_log': True,
'adjust': (LEFT + 0.015, RIGHT + 0.01, TOP, BOTTOM),
'half_width': True,
'out': 'id_len',
'y_lim_bottom': 1,
'xticks': [i for i in range(0, 101, 20)],
'xlabels': [i for i in range(0, 101, 20)],
'minor_xticks': [i for i in range(10, 101, 20)],
'minor_xlabels': ['' for i in range(10, 101, 20)],
},
{
'name': 'metric_id5',
'x': ROUNDING,
'x_step': 2,
'x_float': False,
'y': CANDIDATES,
'title': 'Rel. Offset: ID Len. 10; 10% Offset',
'y_log': True,
'adjust': (LEFT + 0.005, RIGHT + 0.03, TOP, BOTTOM),
'half_width': True,
'out': 'rounding_all',
'y_lim_bottom': 1,
'xticks': [i for i in range(0, 10, 2)],
'xlabels': [i for i in range(0, 10, 2)],
'minor_xticks': [i for i in range(1, 10, 2)],
'minor_xlabels': ['' for i in range(1, 10, 2)],
},
# {
# 'name': 'metric_id6',
# 'x': OFFSET,
# 'x_step': 5,
# 'x_float': True,
# 'y': CANDIDATES,
# 'title': 'IKV - Relative Offset',
# 'y_log': True,
# 'adjust': (LEFT, RIGHT, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'offset_ikv'
# },
# {
# 'name': 'metric_id7',
# 'x': ROUNDING,
# 'x_step': 2,
# 'x_float': False,
# 'y': CANDIDATES,
# 'title': 'IKV - Relative Offset',
# 'y_log': True,
# 'adjust': (LEFT + 0.015, RIGHT + 0.03, TOP - 0.03, BOTTOM),
# 'half_width': True,
# 'out': 'rounding_ikv'
# },
# {
# 'name': 'metric_id8',
# 'x': ID_LEN,
# 'x_step': 2,
# 'x_float': False,
# 'y': CANDIDATES,
# 'title': 'IKV - Relative Offset',
# 'y_log': True,
# 'adjust': (LEFT, RIGHT + 0.02, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'id_len_ikv'
# },
# {
# 'name': 'metric_id9',
# 'x': OFFSET,
# 'x_step': 5,
# 'x_float': True,
# 'y': CANDIDATES,
# 'title': 'WZL - Relative Offset',
# 'y_log': True,
# # 'y_lim': 10 ** 39,
# 'adjust': (LEFT, RIGHT, TOP - 0.025, BOTTOM),
# 'half_width': True,
# 'out': 'offset_wzl'
# },
# {
# 'name': 'metric_id10',
# 'x': ROUNDING,
# 'x_step': 2,
# 'x_float': False,
# 'y': CANDIDATES,
# 'title': 'WZL - Relative Offset',
# 'y_log': True,
# 'adjust': (LEFT + 0.015, RIGHT + 0.03, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'rounding_wzl'
# },
# {
# 'name': 'metric_id11',
# 'x': ID_LEN,
# 'x_step': 2,
# 'x_float': False,
# 'y': CANDIDATES,
# 'title': 'WZL - Relative Offset',
# 'y_log': True,
# 'adjust': (LEFT, RIGHT + 0.02, TOP, BOTTOM),
# 'half_width': True,
# 'out': 'id_len_wzl'
# },
]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Metric
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
for eval in evals:
d = read_data(input_dir + f"{eval['name']}.csv", eval['x'], eval['y'],
x_is_float=eval['x_float'])
with open(input_dir + f"{eval['name']}.pyc", 'wb') as f:
pickle.dump(d, f)
for eval in evals:
with plot_settings(half_width=eval['half_width']):
with open(input_dir + f"{eval['name']}.pyc", 'rb') as f:
d = pickle.load(f)
error_plot_mult(
[d],
output_dir + f"{eval['out']}{EXTENSION}",
eval['x_step'],
0,
eval['y_max'] if 'y_max' in eval else 0,
eval['y_step'] if 'y_step' in eval else 0,
x_axis[eval['x']],
y_label,
eval['title'],
adjust=eval['adjust'] if 'adjust' in eval else None,
y_log=eval['y_log'],
auto_ylabels=True if 'auto_y' not in eval else eval['auto_y'],
y_lim=eval['y_lim'] if 'y_lim' in eval else None,
y_lim_bottom=eval[
'y_lim_bottom'] if 'y_lim_bottom' in eval else None,
xticks=eval[
'xticks'] if 'xticks' in eval else None,
xlabels=eval[
'xlabels'] if 'xlabels' in eval else None,
minor_xticks=eval[
'minor_xticks'] if 'minor_xticks' in eval else None,
minor_xlabels=eval[
'minor_xlabels'] if 'minor_xlabels' in eval else None,
)
| 7,586
| 29.716599
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/plot/plot.py
|
#!/usr/bin/env python3
"""
This file contains general plot functionality.
"""
import json
import math
import os
from contextlib import contextmanager
from copy import deepcopy
from typing import List, Tuple, Dict, Union
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from matplotlib import patches, transforms
from matplotlib import ticker
from matplotlib.axes import Axes
from matplotlib.container import BarContainer
from matplotlib.figure import Figure
from scipy.constants import golden as golden_ratio
from .colors import bar_colors, blue, orange
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
PRINT = 1 # If false, use show instead of print
EXTENSION = '.pdf' # '.png'
TITLE = False # Print Plot titles?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Default Settings
# -----------------------------------------------------------------------------
def cm(value: float or int) -> float:
"""Calculate the number that has to be given as size to obtain cm."""
return value / 2.54
def golden_height(width: float) -> float:
"""Return the height corresponding to the golden ratio of a given width"""
return (1.0 / golden_ratio) * width
# os.path.dirname moves on directory up
_cur_dir = os.path.dirname(os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
WORKING_DIR = os.path.abspath(_cur_dir) + '/'
# OUTPUT_DIR = WORKING_DIR + 'results_eval/' + 'plots/'
THESIS_DIR = os.path.dirname(os.path.dirname(
WORKING_DIR)) + '/2019-ma-buchholz-thesis/Thesis/figures/plots/'
OUTPUT_DIR = THESIS_DIR
os.makedirs(OUTPUT_DIR, exist_ok=True)
INPUT_DIR = WORKING_DIR + 'results_eval/'
LLNCS_WIDTH = 12.2 # cm
LLNCS_HALF_WIDTH = 0.5 * 12.2
THESIS_WIDTH = 427.43153 * 0.03514
# IEEE_WIDTH = 241.14749 * 0.03514 # pt in cm
IEEE_WIDTH = 241.14749 * 0.03514 # pt in cm
hatch_patterns = ("/", "\\", "x", "o", "|", "-", "+", "O", ".", "*")
Y_LIM = 1.1 # Space at top of plot in stacked bar plots
figure_width = THESIS_WIDTH # IEEE_WIDTH
figure_height = 5.0
# Thesis fonts: 12pt Text, 10.95 pt Caption, 10pt subcation
font_size = 9
ticks_fontsize = font_size - 1
legend_font_size = font_size - 1
default_settings = {
# By Roman
'figure.figsize': (cm(figure_width), cm(figure_height)),
'font.size': font_size,
'legend.fontsize': legend_font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'ytick.labelsize': ticks_fontsize,
'xtick.labelsize': ticks_fontsize,
'hatch.linewidth': 0.8,
'xtick.minor.pad': 1,
'axes.labelpad': 3,
# By Erik
'legend.framealpha': 1,
'legend.edgecolor': 'black',
'legend.fancybox': False,
'legend.handletextpad': 0.2,
'legend.columnspacing': 0.8,
'figure.dpi': 1000,
# 'figure.autolayout': True,
'legend.facecolor': 'white',
'lines.linewidth': 1.5,
'errorbar.capsize': 3, # Länge der Hüte
'lines.markeredgewidth': 0.7, # Dicke des horizontalen Strichs/Error Caps
'lines.markersize': 3,
# 'text.usetex' : True
}
plt.rcParams.update(default_settings)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Helper functins
# -----------------------------------------------------------------------------
class Legend(object):
"""Represents a legend object
empty_positions: Empty spaces in legend
order: Reorder legend according to list
axis: The axis to add the legend to
markers: List of plot objects
legend_labels: The labels to use
location: 'top' or default matplotlib values
"""
STACKS: str = 'STACKS'
BARS: str = 'BARS'
TOP: str = 'top'
target: str = STACKS
location: str = None
markers: list = []
ncols: int = None
labels: List[str] = []
axis = plt
order: Union[Tuple[int], List[int], None] = None
empty_positions: Union[List[int], None] = None
custom_labels: List[tuple] = None
def __init__(self, labels: List[str], location: str = None,
custom_labels: List[tuple] = None,
empty_positions: Union[List[int], None] = None,
order: Union[Tuple[int], List[int], None] = None,
ncols: int = None):
self.labels = labels
self.location = location
self.custom_labels = custom_labels
self.empty_positions = empty_positions
self.order = order
self.ncols = ncols
def make(self):
"""Add legend to axis"""
if self.empty_positions is not None:
r = patches.Rectangle((0, 0), 1, 1, fill=False,
edgecolor='none',
visible=False)
for pos in self.empty_positions:
self.labels.insert(pos, "")
self.markers.insert(pos, r)
if self.custom_labels is not None:
if len(self.labels) == 0:
# Only custom labels wanted
self.markers = []
for m, t in self.custom_labels:
self.markers.append(m)
self.labels.append(t)
if self.order is not None:
self.markers = [self.markers[i] for i in self.order]
self.labels = [self.labels[i] for i in self.order]
if self.ncols is not None:
columns = self.ncols
elif len(self.labels) <= 5:
columns = len(self.labels)
else:
columns = math.ceil(len(self.labels) / 2)
if self.location == "top":
legend = self.axis.legend(self.markers, self.labels,
loc='center', ncol=columns,
bbox_to_anchor=(0.5, 1))
elif self.location == 'above':
legend = self.axis.legend(self.markers, self.labels,
loc='lower center', ncol=columns,
bbox_to_anchor=(0.5, 1))
else:
# Best location
legend = self.axis.legend(self.markers, self.labels,
loc=self.location)
legend.get_frame().set_linewidth(0.4)
return legend
def set_minor_xticks(ax: Axes, minor_xticks: List[float],
minor_xlabels: List[str],
rotation: float = None,
labelsize: float = None):
"""Set minor xticks"""
if minor_xticks is not None:
ax.xaxis.set_minor_locator(ticker.FixedLocator(minor_xticks))
ax.xaxis.set_minor_formatter(ticker.FixedFormatter(minor_xlabels))
if labelsize is not None:
ax.xaxis.set_tick_params(which='minor', labelsize=labelsize)
if rotation is not None:
ax.xaxis.set_tick_params(which='minor', rotation=rotation)
@contextmanager
def plot_settings(params: dict = None,
half_width: bool = False,
two_third: bool = False):
"""Update the rcParams for one plot only"""
if params is None:
params = {}
if half_width:
params = {
'font.size': font_size - 1,
'legend.fontsize': legend_font_size - 1,
'axes.titlesize': font_size - 1,
'axes.labelsize': font_size - 1,
'ytick.labelsize': ticks_fontsize - 1,
'xtick.labelsize': ticks_fontsize - 1,
'figure.figsize': (
cm(figure_width) / 2, cm(6.0) * 2 / 3)
}
if two_third:
params['figure.figsize'] = (
cm(figure_width), cm(figure_height) * 2 / 3)
plt.rcParams.update(params)
yield
plt.rcParams.update(default_settings)
# noinspection PyUnresolvedReferences
def crop_plot(adjust: Union[list, tuple, None],
figure: Figure,
hspace: Union[float, None] = None,
divider: bool = False
) -> None:
"""Adjust plot margin"""
if adjust is not None:
figure.subplots_adjust(left=adjust[0], right=adjust[1], top=adjust[2],
bottom=adjust[3])
else:
figure.tight_layout()
if divider:
# We need more space because of axis dividers
right_offset = 0
left_offset = 0.012
bottom_offset = 0.05
top_offset = 0.04
else:
right_offset = 0.01
left_offset = 0.02
bottom_offset = 0.05
top_offset = 0.04
figure.subplots_adjust(
left=figure.subplotpars.left - left_offset,
right=figure.subplotpars.right + right_offset,
top=figure.subplotpars.top + top_offset,
bottom=figure.subplotpars.bottom - bottom_offset,
)
plt.subplots_adjust(hspace=hspace)
def output(filename: str) -> None:
"""Print to file or show."""
os.makedirs(os.path.dirname(filename), exist_ok=True)
if PRINT:
plt.savefig(filename)
else:
plt.show()
print("Print to: %s" % filename)
plt.close()
def add_gray_bars(axis: Axes, gray_bg: List[tuple]):
"""Add vertical gray BG"""
res = []
if gray_bg is not None:
for start, end, color in gray_bg:
res.append(
axis.axvspan(start, end, facecolor=color, alpha=0.5,
zorder=-1))
return res
def add_text_boxes_into_bars(axis: Axes, text_boxes: List[dict],
bars: List[patches.Polygon]):
"""Add Text Boxes into the gray bars"""
for t in text_boxes:
lower_left = bars[t['i']].xy[0]
upper_right = bars[t['i']].xy[2]
del t['i']
x = (lower_left[0] + upper_right[0]) / 2
y = t['y']
del t['y']
trans = transforms.blended_transform_factory(
axis.transData, axis.transAxes)
axis.text(x, y, **t, transform=trans,
horizontalalignment='center',
verticalalignment='center',
# bbox=dict(boxstyle='square', fc="w", ec="k",
# linewidth=0.2)
)
def mean_confidence_interval(data: list, confidence: float = 0.99) -> \
Tuple[float, float]:
"""Compute the mean and the corresponding confidence interval of the
given data.
:param confidence: Confidence interval to use, default: 99%
:param data: List of number to compute mean and interval for
"""
a = 1.0 * np.array(data)
n = len(a)
if n == 1:
return a[0], 0
m, se = np.mean(a), scipy.stats.sem(a)
h: float = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return float(m), h
def x_to_int(data: dict) -> dict:
"""
Convert all keys to ints.
"""
return {int(k): v for k, v in data.items()}
def convert_to_kbyte(data: dict) -> dict:
"""
Divide all keys by 1000
"""
for x in data:
data[x] = [y / 1000 for y in data[x]]
return data
def convert_to_mb(data: dict) -> dict:
"""Divide all keys by 10^6"""
for x in data:
data[x] = [y / 1000000 for y in data[x]]
return data
def convert_to_gb(data: dict) -> dict:
"""Divide all keys by 10^9"""
for x in data:
data[x] = [y / 1000000000 for y in data[x]]
return data
def convert_mib_to_gb(data: dict) -> dict:
"""1MiB = 2^20/10^9 GB"""
for x in data:
data[x] = [y * 2 ** 20 / 1000000000 for y in data[x]]
return data
def convert_x_to_percent(data: dict) -> dict:
"""Divide all keys by 10^9"""
new_data = {}
for x in data:
new_data[int(x * 100)] = data[x]
return new_data
def convert_to_percent_of_x(data: dict) -> dict:
"""Convert all y-values to percent of the corr. x-value."""
for x in data:
data[x] = [int(y / x * 100) for y in data[x]]
return data
def convert_to_minutes(data: dict) -> dict:
"""Divide all y-values by 60."""
for x in data:
data[x] = [y / 60 for y in data[x]]
return data
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Read input from file
# -----------------------------------------------------------------------------
def remove_head(lines: List[str]) -> List[str]:
"""Remove header from list of lines."""
found = False
i = 0
while not found:
if "END-HEADER" in lines[i]:
found = True
else:
i += 1
return lines[i + 1:]
def read_data(file: str, x_index: int, y_index: int = -1,
x_is_float: bool = False) -> dict:
"""
Read the data from a file.
:param file: Input file
:param x_index: Index of X column
:param y_index: [optional] Index of Y column
:param x_is_float: [optional] x is interpreted as float instead of int
:return: Dict in following Format:
X-values as keys, Y-values as List, even if there is only one.
{
x1: [y1, y2, y3],
x2: [y4],
x3. [y5, y6]
}
"""
with open(file, "r") as fd:
lines = fd.readlines()
lines = remove_head(lines)
# Read out data
result = {}
for line in lines:
values = line.split(";")
if x_is_float:
x = float(values[x_index])
else:
x = int(values[x_index])
y = float(values[y_index])
if x in result:
result[x].append(y)
else:
result[x] = [y]
return result
def read_data_mult(file: str, x_index: int, y_index: int,
z_index: int, x_is_float: bool = False) -> Dict[int, dict]:
"""
Read multiple data dicts from a file.
:param z_index: The index of the different curves
:param file: Input file
:param x_index: Index of X column
:param y_index: [optional] Index of Y column
:param x_is_float: [optional] x is interpreted as float instead of int
:return: Dict of dicts in following Format:
X-values as keys, Y-values as List, even if there is only one.
{
z1:{
x1: [y1, y2, y3],
x2: [y4],
x3. [y5, y6]
},
z2:{
x1: [y1, y2, y3],
x2: [y4],
x3. [y5, y6]
}
}
"""
with open(file, "r") as fd:
lines = fd.readlines()
lines = remove_head(lines)
# Read out data
result = {}
for line in lines:
values = line.split(";")
if x_is_float:
x = float(values[x_index])
else:
x = int(values[x_index])
y = float(values[y_index])
z = int(values[z_index])
if z not in result:
result[z] = {}
if x in result[z]:
result[z][x].append(y)
else:
result[z][x] = [y]
return result
def read_fp(file: str, x_index: int, query_col: int = -2,
fp_col: int = -1, x_is_float: bool = False) -> dict:
"""
Read the false positive data from a file.
:param file: File to read from
:param x_index: Column of x value
:param query_col: [optional] Column of # Queries
:param fp_col: [optional] Column of # FPs
:param x_is_float: [optional] x is interpreted as float instead of int
:return: Dict in following Format:
X-values as keys, Y-values as List, even if there is only one.
{
x1: [y1, y2, y3],
x2: [y4],
x3. [y5, y6]
}
"""
with open(file, "r") as fd:
lines = fd.readlines()
lines = remove_head(lines)
# Read out data
result = {}
for line in lines:
values = line.split(";")
if x_is_float:
x = float(values[x_index])
else:
x = int(values[x_index])
# y has to be devided by total number of requests
if float(values[query_col]) > 0:
y = float(values[fp_col]) / float(values[query_col])
else:
y = 0
# y = y * 100 # to percent
if x in result:
result[x].append(y)
else:
result[x] = [y]
return result
def read_y_only(file: str, y_index: int,
start_line: int = 0, end_line: int = None) -> List[float]:
"""
Read the data from a file.
:param file: Input file.
:param y_index: Column of Y values
:param end_line: Last line to consider
:param start_line: First line to consider
:return: List with all found Y values
"""
with open(file, "r") as fd:
lines = fd.readlines()
lines = remove_head(lines)
# Read out data
result = []
for line in lines[start_line:end_line]:
values = line.split(";")
y = float(values[y_index])
result.append(y)
return result
def read_ram(file: str, measurement_interval=0.5,
start_line: int = 0, end_line: int = None, y: int = 3) -> (dict, float):
"""
Read data from RAM Eval file.
:param y: column of ram array.
:param measurement_interval: time interval of measurements
:param file: Input file
:param end_line: Last line to consider
:param start_line: First line to consider
:return: dict with times as x-values
"""
with open(file, "r") as f:
lines = f.readlines()
lines = remove_head(lines)
result = {0: [0]}
max_value = 0
for line in lines[start_line:end_line]:
values: List[float] = json.loads(line.split(";")[y])
for i, v in enumerate(values):
max_value = max(max_value, v)
# i + 1 b/c first measurement at end of first interval.
time = (i + 1) * measurement_interval
if time in result:
# noinspection PyTypeChecker
result[time].append(v)
else:
result[time] = [v]
print("Max Ram Usage: ", max_value)
return result, max_value
def read_ram_max(file: str, x_index: int, y_index: int = -1,
x_is_float: bool = False) -> dict:
"""
Read maximal ram value for each x.
:param file: Input file
:param x_index: Index of X column
:param y_index: [optional] Index of Y column
:param x_is_float: [optional] x is interpreted as float instead of int
:return: Dict in following Format:
X-values as keys, Y-values as List, even if there is only one.
{
x1: [y1, y2, y3],
x2: [y4],
x3. [y5, y6]
}
"""
with open(file, "r") as fd:
lines = fd.readlines()
lines = remove_head(lines)
# Read out data
result = {}
for line in lines:
values = line.split(";")
if x_is_float:
x = float(values[x_index])
else:
x = int(values[x_index])
ram_values = json.loads(values[y_index])
y = float(max(ram_values))
if x in result:
result[x].append(y)
else:
result[x] = [y]
return result
def join_stack_data(in_stacks, new_keys=None):
"""Join two stacked-bar-plot lists into one plot."""
stacks = []
for s in in_stacks:
stacks.append(deepcopy(s))
if new_keys is not None:
for j, s in enumerate(stacks):
for e in s:
for i, key in enumerate(sorted(e.keys())):
e[new_keys[j][i]] = e[key]
del e[key]
result = stacks[0]
for i, a in enumerate(result):
for stack in stacks[1:]:
for key in stack[i]:
if key in a:
raise RuntimeError(
"Cannot join because same key in two stacks.")
a[key] = stack[i][key]
return result
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Plot Scripts
# -----------------------------------------------------------------------------
def box_plot(
data,
filename,
x_step,
min_y,
max_y,
y_step,
xlabel,
ylabel,
title,
adjust,
x_label_step=1) -> None:
"""
Create a box plot with errorbars
:param data: Data in the following Format:
A dict with the X-Values as keys and the Y-Values as a list (even
if there is only one Y-Value per X-value).
{ x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]}
:param filename: Output filename
:param x_step: Stepsize of xticks
:param min_y: Start of Y-Axis
:param max_y: Maximal ytick
:param y_step: Stepsize of yticks
:param xlabel: Large Label of X-Axis
:param ylabel: Large Label of Y-Axis
:param title: Title of Plot
:param adjust: Tuple with 4 values (left, right, top, bottom)
:param x_label_step: [optional] Evey ith label printed on x-axis
:return: None
"""
fig, ax = plt.subplots()
# Construct value list
x_values = sorted(data.keys())
y_values = []
for x in x_values:
y_values.append(data[x])
# Calculate positions of boxes
n = len(y_values)
xticks = np.arange(x_step, n * x_step + 1, x_step)
# Plot
boxplot = ax.boxplot(
y_values,
False,
None, # No outliers
True,
1.5,
widths=x_step / 1.25, # Width of boxes
showmeans=True, # Show average
positions=xticks # Position of boxes
)
# Set colors
plt.setp(boxplot['whiskers'], color='blue')
plt.setp(boxplot['medians'], color='red')
plt.setp(boxplot['means'], color='red')
plt.setp(boxplot['boxes'], color='blue')
# Set axis labels
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Set Title
if TITLE:
plt.title(title, y=0.98)
# Set size of axis and caption
plt.xlim([0, (n + 1) * x_step])
# Only label every ith x-tick
for i in range(0, len(x_values), 1):
if i % x_label_step != 0:
x_values[i] = ''
# X-Ticks
plt.xticks(xticks, x_values)
# Y-Ticks
plt.yticks(np.arange(min_y, max_y + y_step, y_step))
# Crop
crop_plot(adjust, figure=fig)
output(filename)
def convert_numbers_to_str(xlabels: Union[List[float], List[int], List[str]]):
"""
Format the labels in textual form like Mil/Bil etc.
:param xlabels: The xlabels to edit
:return: Edited xlabels
"""
for i, t in enumerate(xlabels):
if t < 1000:
xlabels[i] = f"{t}"
elif t < 1000000:
xlabels[i] = f"{(t // 1000):1} k"
elif t < 1000000000:
xlabels[i] = f"{t // 1000000} Mio"
elif t < 1000000000000:
xlabels[i] = f"{t // 1000000000} Bil"
else:
xlabels[i] = f"{t:.{1}E}"
return xlabels
def set_xticks(axis: Axes, xticks: List[float], xlabels: List[str],
minor_xticks: List[float], minor_xlabels: List[str],
x_values: List[float], x_step: float,
x_label_step: int,
logarithmic: bool,
center_xlabels: bool,
x_rotation: float,
log_base: int = 1):
"""
Add formatted xticks to axis
:param axis: Axis to add labels to
:param xticks: Locations of major xticks
:param xlabels: Labels of major xticks
:param minor_xticks: Locations of minor xticks
:param minor_xlabels: Labels of minor xticks
:param x_values: X-Values used by Plot
:param x_step: Step value for xticks
:param x_label_step: Only label every ith with major + label
:param logarithmic: logarithmic axis?
:param center_xlabels: Put labels between texts
:param x_rotation: Rotate labels
:param log_base: [optional] Log Base
"""
if logarithmic:
min_tick = math.log(min(x_values), log_base)
max_tick = math.log(max(x_values), log_base)
if min_tick > 0:
min_tick = int(math.ceil(min_tick))
max_tick = int(math.ceil(max_tick))
else:
min_tick = int(math.floor(min_tick))
max_tick = int(math.floor(max_tick))
else:
min_tick, max_tick = None, None
# Define xticks
if xticks is None:
if logarithmic:
xticks = [log_base ** i for i in range(min_tick, max_tick + 1)]
else:
xticks = list(np.arange(0, max(x_values) + 1, x_step))
# Define xlabels
if xlabels is None:
if logarithmic:
# Logarithmic Axis
xlabels = [
fr"{log_base}^{{{i}}}"
for i in range(min_tick, max_tick + 1)
]
else:
# Normal Scale
xlabels = list(xticks[:])
xlabels = convert_numbers_to_str(xlabels)
if minor_xticks is None:
minor_xticks = []
if minor_xlabels is None:
minor_xlabels = []
# Make unlabeled ticks minor
remove_indices = []
for i in range(0, len(xticks[:]), 1):
if i % x_label_step != 0:
# noinspection PyTypeChecker
minor_xticks.append(xticks[i])
minor_xlabels.append('')
remove_indices.append(i)
for i in sorted(remove_indices, reverse=True):
del xticks[i]
del xlabels[i]
# Label
if center_xlabels:
axis.set_xlim(0, max(x_values))
axis.xaxis.set_major_locator(ticker.FixedLocator(xticks))
# Com Text pos
ticks = [0] + list(xticks)
minor_ticks = []
for i, t in enumerate(ticks[1:]):
minor_ticks.append((t + ticks[i]) / 2)
axis.xaxis.set_minor_locator(ticker.FixedLocator(minor_ticks))
axis.xaxis.set_major_formatter(ticker.NullFormatter())
axis.xaxis.set_minor_formatter(ticker.FixedFormatter(xlabels))
for tick in axis.xaxis.get_minor_ticks():
tick.label.set_fontsize(ticks_fontsize)
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('center')
else:
axis.set_xticks(xticks)
axis.set_xticklabels(xlabels)
if x_rotation is not None:
axis.xaxis.set_tick_params(which='major',
labelrotation=x_rotation)
# fontdict={
# 'horizontalalignment': 'right',
# 'labelrotation': x_rotation})
set_minor_xticks(axis, minor_xticks, minor_xlabels, x_rotation)
def set_yticks(axis: Axes, yticks: List[float], ylabels: List[str],
min_y: float, max_y: float, y_step: float,
logarithmic: bool, y_label_step: int, scientific_y: bool,
log_base: int = 10):
"""
Add Y-Ticks to axis
:param axis: Axis to add ticks to
:param yticks: Major Y-Tick Locations
:param ylabels: Major Y-Tick Labels
:param min_y: Minimal Y Value
:param max_y: Maximal Y Value
:param y_step: Y Step
:param logarithmic: Axis logarithmic?
:param y_label_step: Only label every ith with major tick + label
:param scientific_y: Scientific Format?
:param log_base: [optional] Log Base
"""
if ylabels is None:
if not logarithmic:
yticks = np.arange(min_y, max_y + y_step, y_step)
if ylabels is None:
if scientific_y:
ylabels = [f"{e:.{1}E}" for e in yticks]
else:
ylabels = list(yticks[:])
ylabels = convert_numbers_to_str(ylabels)
else:
# Logarithmic scale
max_tick = int(math.ceil(math.log(max_y, log_base)))
yticks = [log_base ** i for i in range(max_tick + 1)]
ylabels = yticks
# Only label every ith y-tick
for i in range(0, len(ylabels)):
if i % y_label_step != 0:
ylabels[i] = ''
# Label
plt.yticks(yticks, ylabels)
def error_plot_mult(
data_list: List[dict],
filename: str,
x_step: float,
min_y: float,
max_y: float,
y_step: float,
xlabel: str,
ylabel: str,
title: str,
legend: Legend = None,
adjust: Union[list, tuple] = None,
x_label_step: int = 1,
y_label_step: int = 1,
scientific_y: bool = False,
x_log: bool = False,
y_log: bool = False,
xticks: List[float] = None,
xlabels: List[str] = None,
minor_xticks: List[float] = None,
minor_xlabels: List[str] = None,
yticks: List[float] = None,
ylabels: List[str] = None,
x_sync: bool = False,
log_base=10,
reverseX: bool = True,
grid: bool = False,
ver_grid: bool = False,
hor_grid: bool = False,
fmts: List[str] = None,
y_lim: int = None,
y_lim_bottom: int = None,
center_x_labels: bool = False,
x_rotation: float = None,
auto_ylabels: bool = False,
second_y_axis: List[int] = None,
second_y_label: str = None,
colors: List[str] = bar_colors,
second_ylim: float = None,
second_y_lim_bottom: int = None,
) -> None:
"""
Create multiple lineplots in one plot
:param data_list: List of Data Dicts in the following Format:
A dict with the X-Values as keys and the Y-Values as a list (even
if there is only one Y-Value per X-value).
{ x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]}
:param filename: Output filename
:param x_step: Stepsize of xticks
:param min_y: Start of Y-Axis
:param max_y: Maximal ytick
:param y_step: Stepsize of yticks
:param xlabel: Large Label of X-Axis
:param ylabel: Large Label of Y-Axis
:param title: Title of Plot
:param legend: [optional] Legend object
:param adjust: [optional] Tuple with 4 values (left, right, top, bottom)
:param x_label_step: [optional] Evey ith label printed on X-Axis
:param y_label_step: [optional] Evey ith label printed on >-Axis
:param scientific_y: [optional] Y-ticks in scientific representation
:param x_log: [optional] Logarithmic X-Axis if True
:param y_log: [optional] Logarithmic Y-Axis if True
:param xticks: [optional] Location of X-Ticks
:param xlabels: [optional] Text of X-Ticks
:param minor_xticks: [optional] Location of minor X-Ticks
:param minor_xlabels: [optional] Text of minor X-Ticks
:param yticks: Location of Y-Ticks
:param ylabels: [optional] Text of Y-Ticks
:param x_sync: [optional] If True, don't set xlim to 0
:param log_base: [optional] Base for logarithmic Axes
:param reverseX: [optional] Reverse X-Values
:param grid: [optional] Display Grid behind plot
:param fmts: [optional] Line styles
:param hor_grid: [optional] Show horizontal Grid
:param ver_grid: [optional] Show vertical Grid
:param y_lim: [optional] Explicit upper limit on y
:param y_lim_bottom: [optional] Explicit lower limit on y
:param x_rotation: [optional] Rotation degree of xlabels
:param auto_ylabels: [optional] Use auto ylabels
:param second_ylim: [optional] Second axis' ylim
:param second_y_lim_bottom: [optional] Explicit lower limit on second y
:param colors: [optional] Colors of lines
:param second_y_axis: [optional] List of 0 or 1 to assign each line to
one axis
:param second_y_label: [optional] Label for second Y axis
:param center_x_labels: Should xlabels be centered between ticks?
:return: None
"""
fig, ax1 = plt.subplots()
if second_y_axis is not None:
ax2 = ax1.twinx()
else:
ax2 = None
# Construct value lists
x_values = sorted(data_list[0].keys(), reverse=reverseX)
y_values = []
for i, d in enumerate(data_list):
y_values.append([])
for x in x_values:
y_values[i].append(d[x])
# Compute means and errors.
y_means = []
y_errors = []
for i, d in enumerate(data_list):
y_means.append([])
y_errors.append([])
for y in y_values[i]:
m, h = mean_confidence_interval(y, 0.95)
y_means[i].append(m)
y_errors[i].append(h)
# Plot
plts = []
if len(data_list) <= 2:
colors = [blue, orange]
axes = [ax1, ax2]
for i, d in enumerate(data_list):
if second_y_axis is not None:
ax = axes[second_y_axis[i]]
else:
ax = ax1
plts.append(ax.errorbar(
x_values,
y_means[i],
yerr=y_errors[i],
fmt='-' if fmts is None else fmts[i],
color=colors[i],
ecolor='black',
elinewidth=1, # Dicke des vertikalen Strichs
barsabove=True,
))
# Set axis labels
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if second_y_label:
ax2.set_ylabel(second_y_label)
# Set Title
if TITLE:
plt.title(title, y=0.98)
# Set size of axis and caption
if not x_sync:
ax1.set_xlim([0, max(x_values)])
else:
ax1.set_xlim(sorted([min(x_values), max(x_values)], reverse=reverseX))
# Define bottom
if y_lim_bottom is not None:
ax1.set_ylim(bottom=y_lim_bottom)
elif y_log:
# Automatic
pass
else:
ax1.set_ylim(bottom=0)
if second_ylim:
ax2.set_ylim(top=second_ylim)
if second_y_lim_bottom is not None:
ax2.set_ylim(bottom=second_y_lim_bottom)
# Custom upper limit
if y_lim is not None:
ax1.set_ylim(top=y_lim)
# Type of Axis ------------------------------------------------------------
# Logarithmic X Axis:
if x_log:
ax1.set_xscale('log')
# Logarithmic Y Axis:
if y_log:
ax1.set_yscale('log')
# X-Axis Labels -----------------------------------------------------------
set_xticks(ax1, xticks, xlabels, minor_xticks, minor_xlabels, x_values,
x_step, x_label_step, x_log, center_x_labels, x_rotation,
log_base)
# -------------------------------------------------------------------------
# Y-Axis labels -----------------------------------------------------------
if not auto_ylabels:
set_yticks(ax1, yticks, ylabels, min_y, max_y, y_step, y_log,
y_label_step, scientific_y, log_base)
# -------------------------------------------------------------------------
if grid:
ver_grid = True
hor_grid = True
ax1.yaxis.grid(hor_grid)
ax1.xaxis.grid(ver_grid)
# Legend
if len(data_list) > 1:
legend.markers = [p[0] for p in plts]
legend.make()
# Crop
crop_plot(adjust, figure=fig)
output(filename)
class BarText(object):
"""
Represents a text that can be added to a bar.
"""
fontsize: float = font_size
rotation: float = 90
color: str = "black"
def __init__(self,
bar_id: int,
stack_id: int,
x_value: float,
axis_id: int,
text: str,
loc: str,
rotation: float = 90,
color: str = 'black'
):
"""
Initialize object with values
:param bar_id: The bar to add text to
:param stack_id: The stack to add text to
:param x_value: The x_value to add text to. (Max be +0.5 for in
between)
:param axis_id: The axis to add to
:param text: The text
:param loc: 'in' for within bar, 'on' for on top of bar,
'center' for centered in Y-Axis
"""
self.bar_id = bar_id
self.stack_id = stack_id
self.x_value = x_value
self.axis_id = axis_id
self.text = text
self.loc = loc
self.rotation = rotation
self.color = color
def add_bar_text(bar_texts: List[BarText], bar_id: int, stack_id: int,
bar_lists: List[BarContainer], axes: List[Axes]):
"""Add test to bars."""
for bt in bar_texts:
a, b, x, t, loc, ax_id = bt.bar_id, bt.stack_id, bt.x_value, \
bt.text, bt.loc, bt.axis_id
ax = axes[ax_id]
bar_list = bar_lists[ax_id]
if bar_id == a and stack_id == b:
for idx, rect in enumerate(bar_list):
if x is 'all' or x == idx or x == idx + 0.5:
# Wenn der Matching Balken groß genug ist,
# kann es auf den Balken geschrieben werden
# Wenn nicht, und die Balken sehr klein sind
# Centered man den Text vertikal
# und wenn das auch nicht geht, dann einfach
# über die Bar
x_value = rect.get_x()
height = rect.get_height()
ha = 'center'
if x == idx + 0.5:
# In between bars
# noinspection PyUnresolvedReferences
x_value = (rect.get_x() + bar_list[
idx + 1].get_x() + rect.get_width()) / 2.
else:
x_value += rect.get_width() / 2.
trans, va = ax.transData, 'center'
if loc == 'in':
y = rect.get_y() + 0.5 * height
elif loc == 'on':
y = rect.get_y() + 1.1 * height
va = 'bottom'
elif loc == 'center':
# center
y = 0.5
trans = \
transforms.blended_transform_factory(
ax.transData, ax.transAxes)
elif loc == 'between':
y = 1
trans = \
transforms.blended_transform_factory(
ax.transData, ax.transAxes)
else:
raise RuntimeError("Unknown location")
ax.text(
x_value,
y, t, transform=trans,
ha=ha, va=va,
rotation=bt.rotation,
fontsize=bt.fontsize,
color=bt.color
)
def stacked_bar_plot_mult(
filename: str,
data_list: List[List[dict]],
xlabel: str,
ylabel: str,
title: str,
bar_legend: Legend = None,
stack_legend: Legend = None,
adjust: Union[list, tuple] = None,
y_log1: bool = False,
y_log2: bool = False,
x_log: bool = False,
xticks: List[int] = None,
xlabels: List[str] = None,
minor_xticks: List[int] = None,
minor_xlabels: List[str] = None,
stacks_depend_on_prev=False,
label_step: int = 1,
colors: List[str] = bar_colors,
hatches: List[str] = None,
y_lim: float = None,
backgrounds: List[tuple] = None,
text_boxes: List[dict] = tuple(),
order: List[int] = None,
rotate_xticks: bool = False,
colors_depend_on_bar: bool = False,
divide_y: bool = False,
ymin: float = None,
ymax: float = None,
ymin2: float = None,
ymax2: float = None,
y_label_coord: float = -0.075,
y_label_coord2: float = 0,
bar_texts: List[BarText] = None,
textboxes: List[dict] = None,
gridspec_kw: dict = None
) -> None:
"""
:param filename: Path to output file.
:param data_list:
List of List of Data Dict:
[
[ {x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]},
{x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]}
],
[ {x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]},
{x1: [y12, y12, y13], x2: [y21], x3: [y31, y32, y33]}
],
]
The outer list defines the bars, the inner list the stacks
:param stack_legend: [optional] Label stacks
:param bar_legend: [optional] Label bars
:param xlabel: Large label of x-axis
:param ylabel: Large label of y-axis
:param title: Title of whole plot
:param adjust: [optional] Tuple with 4 values (left, right, top, bottom)
:param y_log1: [optional] Is the first y-scale logarithmic?
:param y_log2: [optional] Is the 2nd y-scale logarithmic?
:param x_log: [optional] Is the x-scale logarithmic?
:param xticks: [optional] Location of x-ticks
:param xlabels: [optional] Custom x-tick labels
:param minor_xticks: [optional] Location of minor x-ticks
:param minor_xlabels: [optional] Text for minor x-ticks
:param stacks_depend_on_prev: [optional] If true, larger stacks already
include the size/time/value of smaller ones
:param colors: [optional] Custom Bar Colors
:param hatches: [optional] Custom hatches
:param rotate_xticks: [optional] Rotate the x-ticks
:param backgrounds: [optional] List of tuples (start, end, color)
:param order: [optional] Reorder Bars
:param text_boxes: [optional] Text boxes for backgrounds
:param colors_depend_on_bar: [optional] Vary colors for Bars not stacks
:param divide_y: [optional] Use a divided Y-Axis
:param ymin: [mandatory for divide_y=True] Lower Limit of lower part
:param ymax: [mandatory for divide_y=True] Upper Limit of lower part
:param ymin2: [mandatory for divide_y=True] Lower Limit Limit of upper part
:param ymax2: [mandatory for divide_y=True] Upper Limit of upper part
:param y_label_coord: [optional] X Position of Y-Axis label
:param y_label_coord2: [optional] Y Position of Y-Axis label
:type y_lim: [optional] Custom upper limit
:param label_step: [optional] Print only every ith step
:param bar_texts: [optional] Bar texts to add to Bar
:param gridspec_kw: [optional] Gridspec Values for Y Axis
:param textboxes: [optional] add arbitrary text boxes
:return:
"""
if divide_y:
# noinspection PyTypeChecker
fig, (ax1, ax2) = plt.subplots(
2, 1, sharex=True, gridspec_kw=gridspec_kw)
# ax2 is lower plot
if ymin is None or ymax is None or ymin2 is None or ymax2 is None:
raise RuntimeError(
"Axis limits need to be defined for divided y axis.")
else:
fig, ax1 = plt.subplots()
ax2 = None
num_bars = len(data_list)
num_stacks = len(data_list[0])
if num_bars == 1:
bar_width = 0.5
else:
bar_width = 0.8 / num_bars
if len(data_list[0]) <= 1:
# there is only one stack
colors = [blue, orange]
fat_bars = False
if len(data_list[0][0].keys()) == 2:
fat_bars = True
bar_width = 0.2
x_values, y_max = [], 0
bars = [] # Format = bars[bar_num][stack_num][ax_num]
for k, data in enumerate(data_list):
# Read Data
if stack_legend is not None and len(data) != len(stack_legend.labels):
raise ValueError(
f"Got data for {len(data)} bars, but legend labels"
f"for {len(stack_legend.labels)}.")
num_stacks = len(data)
x_values = sorted(data[0].keys())
if order is not None:
x_values = [x_values[i] for i in order]
means = {}
errors = {}
for i, stack in enumerate(data):
if set(x_values) != set(stack.keys()):
raise ValueError(
f"Differing X-Values between stack 0 and {i}.")
for x in x_values:
m, h = mean_confidence_interval(
stack[x]
)
if i in means:
means[i].append(m)
errors[i].append(h)
else:
means[i] = [m]
errors[i] = [h]
ind = [
(i - (num_bars / 2) * bar_width) + 0.5 * bar_width + k * bar_width
for i in
range(len(means[0]))]
if fat_bars:
ind = [0.5 + k * 0.2, 1.3 + k * 0.2]
bottoms = [0 for _ in ind]
inner_bars = [] # inner_bars[stack_num][ax_num]
for i in range(num_stacks):
hatch = None if hatches is None else hatches[i]
if colors_depend_on_bar:
color = colors[k]
else:
color = colors[i]
if divide_y:
axes = [ax1, ax2]
else:
axes = [ax1]
ax_bars = []
for ax_num, ax in enumerate(axes):
p = ax.bar(ind, means[i], bar_width, yerr=errors[i],
bottom=bottoms,
error_kw={
'elinewidth': 1, # Dicke des vertikalen Strichs
},
color=color,
hatch=hatch
)
ax_bars.append(p)
inner_bars.append(ax_bars)
if bar_texts is not None:
add_bar_text(bar_texts, k, i, ax_bars, axes)
if not stacks_depend_on_prev:
# noinspection PyUnresolvedReferences
bottoms = [means[i][j] + bottoms[j] for j in
range(len(bottoms))]
tmp = max(bottoms)
else:
tmp = max(means[i])
y_max = max(tmp, y_max)
bars.append(inner_bars)
if textboxes is not None:
axis = ax1
# Use lower axis
for box in textboxes:
if box['transform'] == 'transAxes':
box['transform'] = axis.transAxes
axis.text(**box)
# Set Axis Labels
if divide_y:
ax2.set_xlabel(xlabel)
else:
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if divide_y:
ax1.yaxis.set_label_coords(y_label_coord, y_label_coord2)
# Axis Scaling
if y_lim is not None:
ax1.set_ylim(top=y_lim)
else:
ax1.set_ylim(top=(y_max * Y_LIM))
if y_log1:
ax1.set_yscale('log')
else:
# Axis Limits
ax1.set_ylim(bottom=0)
if divide_y:
# Limits
ax1.set_ylim(ymin2, ymax2) # Upper part
ax2.set_ylim(ymin, ymax) # Lower part
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(labeltop=False) # don't put tick labels at the top
ax2.xaxis.tick_bottom()
if divide_y:
xaxis = ax2
else:
xaxis = ax1
if x_log:
xaxis.set_xscale('log')
if fat_bars:
xaxis.set_xlim(left=0, right=2)
# Axis Ticks
if xlabels is None:
xlabels = x_values
# Only label every second x-tick
for i in range(0, len(xlabels), 1):
if i % label_step != 0:
xlabels[i] = ''
if fat_bars:
ind = [0.6, 1.4]
else:
ind = [i for i in range(len(x_values))]
if xticks:
ind = [ind[i] for i in xticks]
elif order is not None:
xlabels = [xlabels[i] for i in order]
if rotate_xticks:
xaxis.set_xticks(ind)
xaxis.set_xticklabels(xlabels,
fontdict={'horizontalalignment': 'right',
'rotation': 30})
else:
xaxis.set_xticks(ind)
xaxis.set_xticklabels(xlabels)
if divide_y:
ax1.set_xticks(ind)
# Deactivate Top Ticks altogether
ax1.tick_params(top=False)
# Minor Ticks
set_minor_xticks(xaxis, minor_xticks, minor_xlabels, None, 2)
# Gray bars
gray_bars = add_gray_bars(ax1, backgrounds)
# Text Boxes into gray bars
add_text_boxes_into_bars(ax1, text_boxes, gray_bars)
# Divider
if divide_y:
# Marks----------------------------------------------------------------
# -----------------------------------------------------------------------------
# This looks pretty good, and was fairly painless, but you can get that
# cut-out diagonal lines look with just a bit more work. The important
# thing to know here is that in axes coordinates, which are always
# between 0-1, spine endpoints are at these locations (0,0), (0,1),
# (1,0), and (1,1). Thus, we just need to put the diagonals in the
# appropriate corners of each of our axes, and so long as we use the
# right transform and disable clipping.
# Align diagonal lines
if gridspec_kw is not None and 'height_ratios' in gridspec_kw.keys():
stretch = (1. * gridspec_kw['height_ratios'][1]) / \
gridspec_kw['height_ratios'][0]
else:
stretch = 1.0 # Fallback
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax1.transAxes, color='k', clip_on=False,
linewidth=plt.rcParams['axes.linewidth'])
ax1.plot((-d, +d), (-(stretch * d), +(stretch * d)),
**kwargs) # top-left diagonal
ax1.plot((1 - d, 1 + d), (-(stretch * d), +(stretch * d)),
**kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d),
**kwargs) # bottom-right diagonal
# What's cool about this is that now if we vary the distance between
# ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(),
# the diagonal lines will move accordingly, and stay right at the tips
# of the spines they are 'breaking'
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Set Title
if TITLE:
plt.title(title, y=0.98)
# Legend
first_legend = None
if stack_legend is not None:
stack_legend.markers = [stack[0][0] for stack in bars[0]]
stack_legend.axis = ax1
if stack_legend.location is None:
stack_legend.location = Legend.TOP
first_legend = stack_legend.make()
if bar_legend is not None:
bar_legend.markers = [bar[0][0][0] for bar in bars]
bar_legend.axis = ax1
bar_legend.make()
if first_legend is not None:
plt.gca().add_artist(first_legend)
# Crop
hspace = 0.15 if divide_y else None
crop_plot(adjust, figure=fig, hspace=hspace, divider=divide_y)
output(filename)
| 51,086
| 32.543664
| 87
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/plot/__init__.py
| 0
| 0
| 0
|
py
|
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/plot/tls.py
|
#!/usr/bin/env python3
"""This module contains functions for the theoretic TLS approximation."""
from copy import deepcopy
# Deprecated ------------------------------------------------------------------
# Handshake cost < 1s
# https://www.comsys.rwth-aachen.de/fileadmin/papers/2019/2019-hiller-lcn
# -case_for_tls_session_sharing.pdf
# According to https://tools.ietf.org/id/draft-mattsson-uta-tls-overhead-01
# .html
# AES-128-GCM (Worst Case) provides 41.5MB/s in SW
# PER_BYTE_OVERHEAD = 1. / 1000000. / 41.5
# -----------------------------------------------------------------------------
# The following values have been produces with the tls_test script
# TLSv1.2, ECDHE-RSA-AES256-GCM-SHA384, secp256r1
HANDSHAKE_COST = 0.05394 # 53.94ms
PER_BYTE_OVERHEAD = 1. / 1000000. / 567.16 # 567.16MB/s
def compute_tls_curve(baseline, sent, recv):
"""
Compute the TLS values
:param baseline: The baseline curve to add TLS to
:param sent: Sent bytes [same X-Values as baseline]
:param recv: Received bytes [same X-Values as baseline]
:return:
"""
tls = deepcopy(baseline)
for x in tls:
for i, _ in enumerate(tls[x]):
tls[x][i] += HANDSHAKE_COST
tls[x][i] += sent[x][i] * PER_BYTE_OVERHEAD
tls[x][i] += recv[x][i] * PER_BYTE_OVERHEAD
return tls
| 1,332
| 35.027027
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/results_eval/scripts/plot/colors.py
|
"This module contains the different colors for the plots."
# Matplotlib Default colors
blue = '#377eb8'
orange = '#ff7f00'
green = '#4daf4a'
red = '#e41a1c'
purple = '#984ea3'
brown = '#a65628'
pink = '#f781bf'
gray = '#999999'
yellow = 'gold'
lightblue = '#92c5de'
lightgreen = '#bae4b3'
lightorange = '#fed98e'
darkorange = '#d95f0e'
maroon = "#7f0000"
# Colorbrewer
bar_colors = [red, blue, green, purple, orange, yellow,
brown, pink, gray]
| 459
| 20.904762
| 58
|
py
|
parameter-exchange
|
parameter-exchange-master/src/setup.py
|
#!/usr/bin/env python3
"""Setup requirements.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
from setuptools import setup, find_packages
from lib import config
with open(config.WORKING_DIR + "README.md", "r") as f:
readme = f.read()
with open(config.WORKING_DIR + "docker/requirements.txt", "r") as f:
requirements = f.read()
setup(
name="Privacy-Preserving Exchange of Process Parameters",
version="1.0.0",
maintainer="Erik Buchholz",
maintainer_email="erik.buchholz@rwth-aachen.de",
description="Implementation of the Erik's master thesis.",
long_description=readme,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
extras_require={"test": ["coverage"]},
test_suite="test"
)
| 852
| 25.65625
| 68
|
py
|
parameter-exchange
|
parameter-exchange-master/src/client_db_cli.py
|
#!/usr/bin/env python3
"""This monument contains the CLI to interact with the client DB.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import logging
import sys
from lib import db_cli, config
from lib.base_client import UserType
from lib.logging import configure_root_loger
if __name__ == '__main__': # pragma no cover
configure_root_loger(logging.INFO, config.LOG_DIR + "client_db.log")
log = logging.getLogger()
db_cli.main(UserType.CLIENT, sys.argv[1:])
| 538
| 25.95
| 72
|
py
|
parameter-exchange
|
parameter-exchange-master/src/PSIReceiver.py
|
#!/usr/bin/env python3
"""Acts as receiver for PSIs.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import datetime
import logging
import sys
from typing import List
import lib.config as config
from lib.logging import configure_root_loger
sys.path.append(config.WORKING_DIR + 'cython/psi')
# Python Version of libPSI
# noinspection PyUnresolvedReferences
from cPSIInterface import PyPSIReceiver # noqa
configure_root_loger(logging.INFO, config.LOG_DIR + "psi_receiver.log")
log = logging.getLogger()
def main(args: list) -> List[int]:
"""
Start the PSI Receiver based on the given CL args.
:param args: command line arguments (argv[1:])
:return: List of received values
"""
log.info("Starting PSI Receiver.")
parser = argparse.ArgumentParser("PSI Receiver")
parser.add_argument("SetSize", help="Size of PSI Set.",
type=int, action="store", default=config.PSI_SETSIZE,
metavar="setSize", nargs='?')
parser.add_argument("-p", "--port", action="store",
help="Port of PSI Server",
default=config.PSI_PORT, type=int)
parser.add_argument("-n", "--hostname", action="store",
help="IP or DNS of PSI Server",
default=config.PSI_HOST, type=str)
parser.add_argument("-s", "--scheme", action="store", type=str,
help="PSI Scheme to use", default=config.PSI_SCHEME)
args = parser.parse_args(args=args)
recv = PyPSIReceiver()
recv.statSecParam = config.PSI_STATSECPARAM
recv.setSize = args.SetSize
recv.hostName = args.hostname
recv.port = args.port
recv.numThreads = config.PSI_THREADS
recv.tls = config.PSI_TLS
recv.rootCA = config.TLS_ROOT_CA
set = list(range(recv.setSize))
time1 = datetime.datetime.now().timestamp()
result = recv.execute(args.scheme, set)
time2 = datetime.datetime.now().timestamp()
log.info(f"Transmission took {str(time2 - time1)}s.")
log.info(f"Received: {str(result)}")
log.info("Finishing PSI Receiver.")
return result
if __name__ == '__main__': # pragma no cover
main(sys.argv[1:])
| 2,277
| 29.783784
| 77
|
py
|
parameter-exchange
|
parameter-exchange-master/src/OTReceiver.py
|
#!/usr/bin/env python3
"""Acts as receiver for OTs.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import datetime
import logging
import sys
from typing import List
import lib.config as config
from lib.logging import configure_root_loger
sys.path.append(config.WORKING_DIR + 'cython/ot')
# Python Version of libOTe
# noinspection PyUnresolvedReferences
from cOTInterface import PyOTReceiver # noqa
configure_root_loger(logging.INFO, config.LOG_DIR + "ot_receiver.log")
log = logging.getLogger()
def main(args: list) -> List[int]:
"""
Start the OT Receiver based on the given CL args.
:param args: command line arguments (argv[1:])
:return: List of received values
"""
log.info("Starting OT Receiver.")
parser = argparse.ArgumentParser("OT Receiver")
parser.add_argument("TotalOTs", help="Number of OTs to perform",
type=int, action="store")
parser.add_argument("-p", "--port", action="store", help="Port of OT "
"Server",
default=1213, type=int)
parser.add_argument("-n", "--hostname", action="store", help="IP or DNS "
"of OT "
"Server",
default="127.0.0.1", type=str)
args = parser.parse_args(args=args)
recv = PyOTReceiver()
recv.totalOTs = args.TotalOTs
recv.numChosenMsgs = config.OT_SETSIZE
recv.hostName = args.hostname
recv.port = args.port
recv.rootCA = config.TLS_ROOT_CA
choices = []
for x in range(recv.totalOTs):
choices.append(x)
time1 = datetime.datetime.now().timestamp()
result = recv.execute(choices, config.OT_TLS)
time2 = datetime.datetime.now().timestamp()
log.info(f"Transmission took {str(time2 - time1)}s.")
log.info(f"Received: {str(result)}")
log.info("Finishing OT Receiver.")
return result
if __name__ == '__main__': # pragma no cover
main(sys.argv[1:])
| 2,157
| 29.394366
| 77
|
py
|
parameter-exchange
|
parameter-exchange-master/src/client.py
|
#!/usr/bin/env python3
"""Client Application of client type end-users.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import atexit
import copy
import json
import logging
import multiprocessing
import pickle
import pprint
import shutil
import sys
import time
from itertools import tee
from typing import List
# noinspection PyUnresolvedReferences
from memory_profiler import profile, memory_usage
from pybloomfilter import BloomFilter
from lib import config, helpers
from lib.base_client import BaseClient, UserType, ServerType
from lib.helpers import parse_list, to_base64, print_time, from_base64
from lib.logging import configure_root_loger
from lib.record import Record, hash_to_index
from lib.similarity_metrics import map_metric, RecordIterator, \
SimilarityMetricIterator
configure_root_loger(logging.INFO, config.LOG_DIR + "client.log")
log = logging.getLogger()
class Client(BaseClient):
"""Client Application for end users."""
type = UserType.CLIENT
metric = "offset-1"
_psi_mode = config.PSI_MODE
def get_record(self, h: str) -> List[Record]:
"""Retrieve record with given hash."""
j = {'hash': h}
resp = self.post(f"{self.STORAGESERVER}/retrieve_record",
json=j)
suc = resp.json()['success']
if suc:
log.debug("Successfully retrieved record.")
return resp.json()['records']
else:
msg = resp.json()['msg']
raise RuntimeError(f"Failed to retrieve record: {msg}")
def _batch_get_encrpyted_records(self, hash_list: List[str]) -> \
List[str]:
"""
Retrieve record with given hash.
:param hash_list: List of **base64**-encoded hashes
:return: List of encrypted records as json.dumps(dict)
"""
j = {'hashes': hash_list}
resp = self.post(f"{self.STORAGESERVER}/batch_retrieve_records",
json=j)
suc = resp.json()['success']
if suc:
log.debug("Successfully retrieved records.")
return resp.json()['records']
else:
msg = f"Failed to retrieve record: {resp.json()['msg']}"
raise RuntimeError(msg)
def batch_get_records(self, candidates: List[Record]) -> List[Record]:
"""
Retrieve the records for all hashes in the list
:param candidates: Record objects for all candidates (hash_set)
:return: List of retrieved records (decyrpted)
"""
log.info("4.1 Retrieve encryption keys.")
start = time.monotonic()
ot_indices = []
# No duplicates
for r in candidates:
if r.get_ot_index() not in ot_indices:
ot_indices.append(r.get_ot_index())
enc_keys = self._get_enc_keys(ot_indices)
# Create mapping
enc_keys = dict(zip(
ot_indices,
enc_keys
))
self.eval['key_retrieve_time'] = time.monotonic()
log.info(
f"4.1 - Retrieve keys took: {print_time(time.monotonic() - start)}")
log.info("4.2 Retrieve encrypted records.")
start = time.monotonic()
hash_list = [to_base64(r.get_long_hash()) for r in candidates]
records = self._batch_get_encrpyted_records(hash_list)
if not records:
self.eval['record_retrieve_time'] = time.monotonic()
self.eval['decryption_time'] = time.monotonic()
return []
res_list = []
self.eval['record_retrieve_time'] = time.monotonic()
log.info(
f"4.2 - Retrieve records: {print_time(time.monotonic() - start)}")
log.info("4.3 Decrypting.")
start = time.monotonic()
for h, c in records:
c = json.loads(c)
key = enc_keys[hash_to_index(from_base64(h), config.OT_INDEX_LEN)]
log.debug(
f"Using key {key} for record {h}.")
res_list.append(Record.from_ciphertext(c, key))
self.eval['decryption_time'] = time.monotonic()
log.info(
f"4.3 - Decryption took: {print_time(time.monotonic() - start)}")
return res_list
def _get_bloom_filter(self) -> BloomFilter or None:
"""
Retrieve the bloom filter from storage server
:return: Bloom filter
"""
resp = self.get(f"{self.STORAGESERVER}/bloom")
suc = resp.json()['success']
if suc:
log.debug("Successfully retrieved bloom filter.")
tmp = helpers.get_temp_file() + '.bloom'
b = BloomFilter.from_base64(tmp,
resp.json()['bloom'].encode())
atexit.register(shutil.rmtree, tmp, True) # Remove and ignore
# errors
return b
else:
msg = resp.json()['msg']
raise RuntimeError(f"Failed to retrieve bloom filter: {msg}")
# noinspection PyUnboundLocalVariable
def _perform_psi(self, client_set: List[int]) -> List[int]:
log.debug("Perform PSI.")
if len(client_set) == 0:
return []
r = self.get(f"{self.STORAGESERVER}/psi")
d = r.json()
if not d['success']:
raise RuntimeError(f"PSI failed: {d['msg']}")
log.debug("Retrieved PSI connection information.")
if d['tls'] != config.PSI_TLS:
log.error("Server TLS setting mismatch.")
raise RuntimeError("Mismatch of server and client TLS "
"settings.")
host = d['host']
port = d['port']
set_size = d['setSize']
if set_size < len(client_set):
raise RuntimeError("Client Set larger than PSI Setsize.")
# Padding - KKRT16 does not allow for duplicates
client_set = list(set(client_set)) # Remove duplicates
# log.debug(f"Client set without dummies: {str(client_set)}")
dummy = config.PSI_DUMMY_START_CLIENT
while len(client_set) < set_size:
client_set.append(dummy)
dummy += 1
if d['tls']:
tls = "with"
else:
tls = "without"
log.info(f"Connecting for PSI to host {host} on port {port} {tls} "
f"TLS. Setsize: {set_size}")
if config.EVAL: # pragma no cover
to_svr, tsvr_file = helpers.start_trans_measurement(
port, direction="dst", sleep=False)
from_svr, fsvr_file = helpers.start_trans_measurement(
port, direction="src", sleep=False)
self.eval['psi_tcpdump_sent'].append(tsvr_file)
self.eval['psi_tcpdump_recv'].append(fsvr_file)
time.sleep(1) # Wait for startup of tcpdump
matches = self._receive_psi(client_set, host, port, d['tls'])
log.debug(f"Completed PSI.")
log.debug(f"Matches: {str(matches)}")
# Remove dummies
matches = [m for m in matches if m < config.PSI_DUMMY_START_CLIENT]
return matches
def compute_matches_bloom(self,
candidate_iterator: SimilarityMetricIterator) \
-> List[Record]:
"""
Compute list of records stored on server side using a bloom filter.
:param candidate_iterator: Iterator over candidates
:return: List of Records found on server side.
"""
if self._psi_mode:
raise RuntimeError("Matches cannot be computed with bloom filter "
"because PSI-Mode is enabled.")
log.info(f"3.1 Retrieve bloom filter.")
b = self._get_bloom_filter()
self.eval['bloom_filter_retrieve_time'] = time.monotonic()
log.info(f"3.2 Compute matches with Bloom Filter.")
if config.PARALLEL:
# parallel
num_procs = multiprocessing.cpu_count()
its = candidate_iterator.split(num_procs)
num_procs = len(its)
log.debug(
f"Using {num_procs} parallel processes for bloom matching.")
processes = []
m = multiprocessing.Manager()
output = m.list()
def matching(client_set: RecordIterator,
i: int): # pragma no cover
"""Compute matching with given part of client set."""
log.debug(
f"Proc {i} iterating over {len(client_set)} elements.")
m = [i for i in client_set if to_base64(
i.get_long_hash()) in b]
output.extend(m)
for i in range(num_procs):
client_set = RecordIterator(its[i], self._hash_key)
p = multiprocessing.Process(target=matching,
args=(client_set, i))
processes.append(p)
p.start()
atexit.register(p.terminate)
for i, p in enumerate(processes):
p.join()
atexit.unregister(p.terminate)
matches = list(output)
log.debug("All processes terminated.")
else:
client_set = RecordIterator(candidate_iterator,
self.get_hash_key())
matches = [i for i in client_set if to_base64(
i.get_long_hash()) in b]
self.eval['bloom_matching_time'] = time.monotonic()
return matches
def compute_matches_psi(self,
candidate_iterator: SimilarityMetricIterator) -> \
List[Record]:
"""
Compute list of records stored on server side using a bloom filter.
:param candidate_iterator: Iterator over candidates
:return: List of Records found on server side.
"""
if not self._psi_mode:
raise RuntimeError("Matches cannot be computed with PSI "
"because PSI-Mode is not enabled.")
if len(candidate_iterator) > config.PSI_SETSIZE:
raise RuntimeError("Candidate Set is too large for PSI! "
f"Candidates: {len(candidate_iterator)} "
f"PSI Setsize: {config.PSI_SETSIZE}")
log.info(f"3.1 Compute matches via PSI.")
client_set = RecordIterator(candidate_iterator, self.get_hash_key())
it1, it2 = tee(client_set)
psi_indizes = list(set([r.get_psi_index() for r in it1]))
# for r in it1:
# if r.get_psi_index() not in psi_indizes:
# psi_indizes.append(r.get_psi_index())
log.debug("Created PSI client set.")
self.eval['psi_preparation_time'] = time.monotonic()
matching_indizes = self._perform_psi(psi_indizes)
self.eval['psi_execution_time'] = time.monotonic()
matches = [r for r in it2
if r.get_psi_index() in matching_indizes]
self.eval['psi_set_construction_time'] = time.monotonic()
return matches
def compute_candidates(self, target: List[float],
metric_name: str = None
) -> SimilarityMetricIterator:
"""
Return all similarity candidates for the given target by using the
provided similarity metric.
:param target: The target list to compute similarity for.
:param metric_name: Name of the similarity metric to use.
:return:
"""
if metric_name is None:
metric_name = self.metric
metric, args = map_metric(metric_name)
log.debug(f"Compute candidates using {metric_name}.")
return metric(target, *args)
# @profile(stream=f)
def full_retrieve(self, target: List[float]) -> List[Record]:
"""
Perform a full retrieval
:param target: The target vector to retrieve similar values for
:return: The list of retrieved candidates.
"""
self.eval['start_time'] = time.monotonic()
try:
log.debug(f"Retrieve matches for: {target}")
log.info(f"1. Compute candidates.")
candidate_iterator = self.compute_candidates(target)
self.eval['compute_candidates_time'] = time.monotonic()
log.info(f"1 - Computed {len(candidate_iterator)} candidates.")
log.info(f"2. Retrieve hash secret.")
start = time.monotonic()
self._hash_key = self.get_hash_key()
self.eval['hash_key_time'] = time.monotonic()
log.info(
f"2 - Retrieval of Hash Secret took: "
f"{print_time(time.monotonic() - start)}")
log.info(f"3. Compute Matches.")
start = time.monotonic()
if not config.EVAL:
if self._psi_mode:
matches = self.compute_matches_psi(candidate_iterator)
else:
matches = self.compute_matches_bloom(candidate_iterator)
else: # pragma no cover
# Do BOTH PSI and BLOOM if PSI Mode enabled.
candidate_iterator2 = copy.deepcopy(candidate_iterator)
if self._psi_mode:
psi_matches = self.compute_matches_psi(candidate_iterator)
self.eval['psi_matches'] = len(psi_matches)
else:
self.eval['psi_matches'] = 0
self.eval['psi_preparation_time'] = 0
self.eval['psi_execution_time'] = 0
self.eval['psi_set_construction_time'] = 0
# For Debugging only:
# for i in candidate_iterator:
# print(i)
# print("Real Length:", len([i for i in candidate_iterator]))
# for r in RecordIterator(candidate_iterator, self.get_hash_key()):
# print(to_base64(r.get_long_hash()))
self._psi_mode = False
bloom_matches = self.compute_matches_bloom(candidate_iterator2)
self.eval['bloom_matches'] = len(bloom_matches)
matches = bloom_matches
self.eval['num_matches'] = len(matches)
log.info(f"3 - Computed {len(matches)} matches.")
# log.debug(str([r.record for r in matches]))
log.info(
f"3 - Matching took: {print_time(time.monotonic() - start)}")
log.info(f"4. Retrieve records.")
result = self.batch_get_records(matches)
log.info(f"Found {len(result)} result vectors.")
return result
except Exception as e:
log.exception(str(e))
raise e
def activate_psi_mode(self):
"""Enables PSI Mode."""
self._psi_mode = True
def get_client_parser() -> argparse.ArgumentParser:
"""Return an argparser for the client application."""
c_parser = argparse.ArgumentParser(description="Client App")
action_group = c_parser.add_mutually_exclusive_group(required=False)
c_parser.add_argument("id", help="ID of User", type=str,
action="store")
c_parser.add_argument("password", help="Password of User", type=str,
action="store")
c_parser.add_argument('-m', "--metric", help="Name of similarity metric",
type=str, action="store")
c_parser.add_argument('-e', "--eval", help="Eval communication file",
type=str, action="store", required=config.EVAL)
c_parser.add_argument('-p', "--psi", help="Use PSI Mode.",
action="store_true")
c_parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity. (-v INFO, -vv DEBUG)")
action_group.add_argument("-t", "--get_token", action='store_true',
help="Retrieve get_token for user with given "
"ID.")
action_group.add_argument('--hash_key', action='store_true',
help='Retrieve hash key from server.')
action_group.add_argument('-g', '--get_record', type=str, action='store',
help="Retrieve record with given hash ["
"Base64].", metavar="HASH", dest='hash')
action_group.add_argument('-s', '--similar', action='store', type=str,
help="Compute a list of suitable candidates.")
action_group.add_argument('-r', '--retrieve_matches', action='store',
type=str, dest="target",
help="Retrieve all possibly helpful values.")
return c_parser
if __name__ == '__main__': # pragma no cover
parser = get_client_parser()
args = parser.parse_args()
# Logging
if args.verbose == 1:
log.setLevel(logging.INFO)
elif args.verbose == 2:
log.setLevel(logging.DEBUG)
c = Client(args.id)
c.set_password(args.password)
# Test credentials
try:
t = c.get_token(ServerType.KeyServer)
print("> Log-in successful.")
except RuntimeError as e:
if "Authentication failed" in str(e):
print("> Username or password is not corect! Exiting.")
else:
log.error(str(e), exc_info=True)
print("> Authentication failed.")
sys.exit()
if args.metric is not None:
try:
map_metric(args.metric)
except ValueError as e:
log.error("Metric could not be interpreted", exc_info=True)
sys.exit()
c.metric = args.metric
if args.psi:
c.activate_psi_mode()
com_file = None
if config.EVAL:
com_file = args.eval
# Perform action
try:
if args.get_token:
print(f"> Generated token at Key-Server:\n> {t}")
elif args.hash_key:
print(f"> Hash Key:\n> {c.get_hash_key().hex()}")
elif args.hash is not None:
h = args.hash
r_list = c.get_record(h)
print(f"> Retrieved: {[str(r) for r in r_list]}")
elif args.similar is not None:
target = parse_list(args.similar)
logging.debug(f"Got: {str(target)}")
print("> Number of Candidates: ",
len(list(c.compute_candidates(target, c.metric))))
elif args.target is not None:
target = parse_list(args.target)
if config.EVAL:
def execClient():
"""Execute full retrieve and catch errors
:return: result, error
"""
try:
return c.full_retrieve(target), None
except Exception as e:
error = str(e)
log.exception(error)
return None, error
ram_usage, (result, error) = memory_usage(
(execClient,),
interval=config.RAM_INTERVAL,
include_children=True,
retval=True,
)
c.eval['result'] = result
c.eval['ram_usage'] = ram_usage
c.eval['error'] = error
with open(com_file, "wb") as fd:
pickle.dump(c.eval, fd)
else:
res = c.full_retrieve(target)
print("> Result:\n> ", end='')
pprint.pprint([str(r) for r in res])
except Exception as e:
log.error(str(e), exc_info=True)
sys.exit()
| 19,715
| 38.432
| 87
|
py
|
parameter-exchange
|
parameter-exchange-master/src/random_record_generator.py
|
#!/usr/bin/env python3
"""Generate random records.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import sys
from typing import List
from lib import config
import random
def main(args=List[str]):
"""
Generate random records according to the given CL arguments.
:param args: Command line arguments
"""
parser = argparse.ArgumentParser("Record Generator")
parser.add_argument('num', type=int, help="Number of records to generate.",
metavar="NUM")
parser.add_argument('-o', '--output', type=str, default="records.tmp",
help="Output file.")
parser.add_argument('-l', '--length', type=int, default=config.RECORD_LENGTH,
help="Length of records")
parser.add_argument('--max', type=float, default=100,
help="Maximal value for items.")
parser.add_argument('--min', type=float, default=0,
help="Minimal value for items.")
args = parser.parse_args(args)
records = []
random.seed()
for _ in range(args.num):
records.append(
[
random.uniform(args.min, args.max)
for _ in range(args.length)
]
)
with open(args.output, "w") as fd:
fd.writelines([f"{str(r)}\n" for r in records])
if __name__ == '__main__': # pragma no cover
main(sys.argv[1:])
| 1,483
| 28.098039
| 81
|
py
|
parameter-exchange
|
parameter-exchange-master/src/owner_db_cli.py
|
#!/usr/bin/env python3
"""This monument contains the CLI to interact with the owner DB.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import logging
import sys
from lib import db_cli, config
from lib.base_client import UserType
from lib.logging import configure_root_loger
if __name__ == '__main__': # pragma no cover
configure_root_loger(logging.INFO, config.LOG_DIR + "owner_db.log")
log = logging.getLogger()
db_cli.main(UserType.OWNER, sys.argv[1:])
| 535
| 25.8
| 71
|
py
|
parameter-exchange
|
parameter-exchange-master/src/__init__.py
| 0
| 0
| 0
|
py
|
|
parameter-exchange
|
parameter-exchange-master/src/data_provider.py
|
#!/usr/bin/env python3
"""Client Application of data provider type end-users.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import json
import logging
import os
import pickle
import sys
import time
from typing import List, Tuple, Iterable
from memory_profiler import memory_usage
import lib.config as config
from lib.base_client import BaseClient, UserType, ServerType
from lib.helpers import parse_list, to_base64, print_time
from lib.logging import configure_root_loger
from lib.record import Record
configure_root_loger(logging.INFO, config.LOG_DIR + "data_provider.log")
log = logging.getLogger()
class DataProvider(BaseClient):
"""Data Provider Client for end users."""
type = UserType.OWNER
def _store_record_on_server(self, hash_val: bytes, ciphertext: dict,
owner: str) -> None:
"""
Store the given record on the storage server.
:param hash_val: [Bytes] Long hash of record as returned by records
:param ciphertext: [Dict] encrypted record as returned by records
object
:param owner: [str] owner of record as string
:return:
"""
j = {
'hash': to_base64(hash_val),
'ciphertext': ciphertext,
'owner': owner
}
r = self.post(f"{self.STORAGESERVER}/store_record", json=j)
suc = r.json()['success']
if suc:
log.info("Successfully stored record.")
else:
msg = r.json()['msg']
raise RuntimeError(f"Failed to store record: {msg}")
def _batch_store_records_on_server(
self,
records: Iterable[Tuple[str, str, str]]) -> None:
"""
Store all records in the list on the storage server.
:param records: List of records in following form:
[
('Base64-1', json.dumps(ciphertext1), 'owner1'),
('Base64-2', json.dumps(ciphertext2), 'owner2')
]
:return: Task ID
"""
r = self.post(f"{self.STORAGESERVER}/batch_store_records",
json=records)
self.eval['cx_sizes'] = [len(rec[1]) for rec in records]
self.eval['json_length'] = len(json.dumps(records))
suc = r.json()['success']
if suc:
log.info("Successfully stored requests.")
else:
msg = r.json()['msg']
raise RuntimeError(f"Failed to store records: {msg}")
def store_records(self, records: List[Record]) -> None:
"""Prepare all records in the list with hashing and encryption and
store them on the storage server."""
start = time.monotonic()
log.debug("Store Records called.")
# 1: Retrieve Hash Key
log.info("1: Retrieve Hash Key")
hash_key = self.get_hash_key()
self.eval['hash_key_time'] = time.monotonic()
log.info(
f"1: Retrieve Hash Key took: {print_time(time.monotonic()-start)}")
start = time.monotonic()
# 2: Update hash_keys of record
log.info("2: Compute Hashes")
for r in records:
r.set_hash_key(hash_key)
self.eval['hash_set_time'] = time.monotonic()
log.info(f"2: Set Hash Key took: {print_time(time.monotonic()-start)}")
start = time.monotonic()
# 3: Get OT hashes
log.info("3: Get OT Indizes")
ot_indices = [r.get_ot_index() for r in records]
self.eval['ot_index_time'] = time.monotonic()
log.info(
f"3: Get OT Indizes took: {print_time(time.monotonic()-start)}")
start = time.monotonic()
# 4: Request encryption keys
log.info("4: Request Encryption Keys")
enc_keys = self._get_enc_keys(ot_indices)
self.eval['key_retrieve_time'] = time.monotonic()
log.info(f"4: Request Encryption Keys took:"
f"{print_time(time.monotonic()-start)}")
start = time.monotonic()
# 5: Encrypt records
log.info("5: Set encryption keys")
for i, r in enumerate(records):
r.set_encryption_key(enc_keys[i])
r.owner = self.user
self.eval['set_key_time'] = time.monotonic()
# 6: Create record list
log.info("6: Create record list [Includes Encryption].")
record_list = [
r.get_upload_format() for r in records
]
self.eval['encryption_time'] = time.monotonic()
log.info(
f"6: Create record list took: {print_time(time.monotonic()-start)}")
start = time.monotonic()
log.info("7: Send encrypted records to server")
self._batch_store_records_on_server(record_list)
self.eval['send_time'] = time.monotonic()
log.info(f"7: Send encrypted records to server took:"
f"{print_time(time.monotonic()-start)}")
def store_from_file(self, file: str) -> None:
"""
Return all records from file and store at storage server
:param file: path to the file containing the records
:return: Task ID of storage command
"""
self.eval['start_time'] = time.monotonic()
records = []
with open(file, "r") as fd:
for line in fd:
records.append(Record(parse_list(line)))
self.eval['parsed_list_time'] = time.monotonic()
log.info(f"Parsed {len(records)} records.")
self.store_records(records)
def get_provider_parser() -> argparse.ArgumentParser:
"""Return an argument parser for the data provider CLI."""
dp_parser = argparse.ArgumentParser(description="Data Provider Client")
action_group = dp_parser.add_mutually_exclusive_group(required=False)
dp_parser.add_argument("id", help="ID of User", type=str,
action="store")
dp_parser.add_argument("password", help="Password of User", type=str,
action="store")
dp_parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity. (-v INFO, -vv DEBUG)")
action_group.add_argument("-t", "--get_token", action='store_true',
help="Retrieve get_token for user with given "
"ID.")
action_group.add_argument("-f", "--load_file", action='store',
help="Store all records in file on storage "
"server.", dest="file")
action_group.add_argument("-a", "--add",
help="String representation of record to add.",
)
dp_parser.add_argument('-e', "--eval", help="Eval communication file",
type=str, action="store", required=config.EVAL)
return dp_parser
if __name__ == '__main__': # pragma no cover
parser = get_provider_parser()
args = parser.parse_args()
# Logging
if args.verbose == 1:
log.setLevel(logging.INFO)
elif args.verbose == 2:
log.setLevel(logging.DEBUG)
dp = DataProvider(args.id)
dp.set_password(args.password)
# Test credentials
try:
t = dp.get_token(ServerType.KeyServer)
print("> Log-in successful.")
except RuntimeError as e:
if "Authentication failed" in str(e):
print("> Username or password is not corect! Exiting.")
else:
log.error(str(e), exc_info=True)
print("> Authentication failed.")
sys.exit()
# Perform action
try:
if args.get_token:
print(f"> Generated token at Key-Server:\n> {t}")
elif args.file:
if not os.path.exists(args.file):
msg = f"The given file does not exist: {args.file}"
log.error(msg)
print(f"> {msg}")
else:
if config.EVAL:
com_file = args.eval
def execDP():
"""Execute store from file
:return: result, error
"""
try:
return dp.store_from_file(args.file), None
except Exception as err:
err = str(err)
log.exception(err)
return None, err
ram_usage, (result, error) = memory_usage(
(execDP,),
interval=config.RAM_INTERVAL,
include_children=True,
retval=True,
)
dp.eval['result'] = result
dp.eval['ram_usage'] = ram_usage
dp.eval['error'] = error
with open(com_file, "wb") as fd:
pickle.dump(dp.eval, fd)
else:
dp.store_from_file(args.file)
print("> Successfully stored records on server.")
elif args.add:
string = args.add
r_list = string.strip('][').split(',')
r_list = [float(i) for i in r_list]
log.debug(f"Got: {str(r_list)}")
r = Record(r_list)
dp.store_records([r])
print("> Successfully stored records on server.")
except Exception as e:
log.error(str(e), exc_info=True)
sys.exit()
| 9,526
| 34.815789
| 80
|
py
|
parameter-exchange
|
parameter-exchange-master/src/tools/wzl_parser.py
|
#!/usr/bin/env python3
"""Parser for WZL Data.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import json
from lib.config import WORKING_DIR
# Mappings from Names to Ints--------------------------------------------------
steuerung = {
'Siemens': 1
}
motor = {
'1FT7034-5AK7': 1,
'1FT7036-5AK7': 2,
'1FT7042-5AK7': 3,
}
werkstuek = {
'Einsatzstahl': 1,
'Grauguss': 2,
'Aluminium gewalzt': 3,
'Rostfreier Stahl': 4,
'Temperguss': 5,
'Superlegierungen': 6,
'Automatenstahl': 7,
'Aluminium gegossen': 8,
'Werkzeugstahl': 9,
'Kupferlegierungen': 10,
}
werkzeug = {
'HSS': 1,
}
result = []
with open(f"{WORKING_DIR}/data/wzl_data_unconverted.csv", "r") as f:
lines = f.readlines()
# Skip header
lines = lines[1:]
string_fields = [0, 8, 9, 10, 14, 15]
for line in lines:
values = line.split(';')
values = [float(v) if i not in string_fields else v for i,
v in enumerate(values)]
# Map Names to ints, because we cannot handle Names
values[0] = steuerung[values[0]]
values[8] = motor[values[8]]
values[9] = motor[values[9]]
values[10] = motor[values[10]]
values[14] = werkstuek[values[14]]
values[15] = werkzeug[values[15]]
result.append([float(i) for i in values])
rounding_vec = [3 if i not in string_fields else 0 for i in range(len(values))]
print("Generated vectors: ", len(result))
print("Rounding vector = ", rounding_vec)
print("Record Length: ", len(result[0]))
with open(f"{WORKING_DIR}/data/wzl_data.txt", "w") as fd:
fd.writelines([f"{str(r)}\n" for r in result])
| 1,663
| 25.83871
| 79
|
py
|
parameter-exchange
|
parameter-exchange-master/src/tools/__init__.py
|
#!/usr/bin/env python3
"""This module..."""
| 44
| 14
| 22
|
py
|
parameter-exchange
|
parameter-exchange-master/src/tools/ikv_parser.py
|
#!/usr/bin/env python3
"""Parser for IKV Lego Data.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import json
from lib.config import WORKING_DIR
with open(f"{WORKING_DIR}/data/ikv_data_unconverted.json", "r") as f:
d = json.load(f)
result = {}
lists_only = []
for key in d:
element = d[key]
output = []
if 'x' in element:
# ignore legolist and heightlist
for i in range(len(element['x'])):
v = []
# Each is own vector
# Geometry first
for param in element['geometry'].values():
v.append(param)
# Then x
for param in element['x'][i]:
v.append(param)
# Last y
for param in element['y'][i]:
v.append(param)
output.append(v)
result[key] = output
lists_only.extend(output)
with open(f"{WORKING_DIR}/data/lego_converted.txt", "w") as f:
for key in result:
f.write(f"{key}:\n")
for i, v in enumerate(result[key]):
f.write(f"\t{i}: {str(v)}\n")
with open(f"{WORKING_DIR}/data/lego_converted.json", "w") as f:
json.dump(result, f)
print("Generated vectors: ", len(lists_only))
print("Record Length: ", len(lists_only[0]))
with open(f"{WORKING_DIR}/data/ikv_data.txt", "w") as fd:
fd.writelines([f"{str(r)}\n" for r in lists_only])
| 1,429
| 27.039216
| 69
|
py
|
parameter-exchange
|
parameter-exchange-master/src/eval/bloom_eval.py
|
#!/usr/bin/env python3
"""Evaluate bloom filter properties.
Copyright (c) 2020.
Author: Erik Buchholz
Maintainer: Erik Buchholz
E-mail: buchholz@comsys.rwth-aachen.de
"""
import argparse
import logging
import math
import os
import random
import time
from tempfile import NamedTemporaryFile
import numpy
from pybloomfilter import BloomFilter
from eval.shared import get_last_line
from lib import config
from lib.logging import configure_root_loger
from .shared import lb
# Constants -------------------------------------------------------------------
CAPACITY = range(0, 1000000001, 100000000) # 10 ** 8
# ERROR_RATE = 10 ** -20
ERROR_RATE = [10 ** (-i) for i in range(1, 21)]
INSERT = 10 ** 8 # range(0, 1000000001, 100000000) # CAPACITY # FULL
INSERT_STEP = 10 ** 7
ROUNDS_START = 0
ROUNDS_END = 10
QUERY = 0 # range(10 ** 8, 10 ** 9 + 1, 10 ** 8) # 10 ** 8 # 100.000.000
QUERY_ALL = False
RESUME = False
FILL = False
# -----------------------------------------------------------------------------
log = configure_root_loger(logging.INFO, None)
def get_file_path(base_name: str, file_name: str) -> str:
"""Return path to file."""
directory = config.EVAL_DIR + base_name + "/"
os.makedirs(directory, exist_ok=True)
return directory + file_name
def get_file_name(base_name: str, query=True) -> str:
"""
Compute file path to eval path.
"""
file_name = f"{base_name}_{random.randint(1, 1000)}.csv"
while os.path.exists(file_name):
file_name = f"{base_name}_{random.randint(1, 1000)}.csv"
return file_name
def write_header(base_name: str, file_path: str, row_format: str) -> None:
"""Write eval header to file."""
with open(file_path, 'w') as fd:
fd.write("------------------------HEADER------------------------\n")
fd.write(f"EVAL: {base_name}\n")
fd.write(f"Capacity: {CAPACITY}\n")
fd.write(f"Error Rate: {ERROR_RATE}\n")
if FILL:
fd.write(f"Inserted Dummy values: Filled to capacity.\n")
else:
fd.write(f"Inserted Dummy values: {INSERT}\n")
if "Partial" in base_name:
fd.write(f"INSERT STEP: {INSERT_STEP}\n")
if QUERY_ALL:
fd.write(f"Perfomed queries: 100 / FP Rate\n")
else:
fd.write(f"Perfomed queries: {QUERY}\n")
fd.write(f"Rounds: {ROUNDS_END}\n")
fd.write(f"{row_format}\n")
fd.write("----------------------END-HEADER----------------------\n")
def get_round(file_path: str) -> int:
"""Read round from given file."""
with open(file_path, 'r') as fd:
last = get_last_line(fd)
state = last.split(';')
rs = int(state[0])
log.warning(f"Resuming at round {ROUNDS_START}")
return rs
def bloom_full(basename: str):
"""Measure all values at once."""
if basename is None:
raise ValueError("No basename given.")
file_path = get_file_path("bloom_full", basename)
partial_insert_file = file_path.replace(".csv", "_partial_insert.csv")
rs = ROUNDS_START
if not RESUME or not os.path.exists(file_path):
# Write header if new file only
row_fmt = f"ROUND;CAPACITY;ERROR_RATE;INSERTED ELEMENTS;" \
f"QUERIED ELEMENTS;SIZE;INSERT TIME;QUERY TIME;" \
f"# False Positives"
write_header("Bloom Full", file_path, row_fmt)
# row_fmt = f"ROUND;CAPACITY;ERROR_RATE;INSERTED ELEMENTS;" \
# f"SIZE;INSERT_TIME[s](for elements added in step);"
# write_header("Bloom Partial Insert", partial_insert_file, row_fmt)
else:
# Read values to resume
rs = get_round(file_path)
for r in lb(range(rs, ROUNDS_END), "Rounds"):
for capacity in lb(CAPACITY, "Capacities", leave=False):
for error_rate in lb(ERROR_RATE, "Error Rates", leave=False):
if FILL:
i = [capacity]
else:
i = lb(INSERT, "Inserts", leave=False)
for insert in i:
with NamedTemporaryFile() as tmp:
b = BloomFilter(capacity, error_rate, tmp.name)
real_set = [random.random() for _ in range(insert)]
start = time.monotonic()
for s in real_set:
# Add random value
b.add(s)
insert_time = time.monotonic() - start
size = len(b.to_base64())
if QUERY_ALL:
query_range = int(math.ceil(100 / error_rate))
else:
query_range = QUERY
for query in lb(query_range, "Queries", leave=False):
# +1 because only values <1 stored
query_set = [
random.random() + 1 for _ in range(query)]
start = time.monotonic()
false_positives = 0
for q in query_set:
if q in b:
false_positives += 1
query_time = time.monotonic() - start
with open(file_path, "a") as fd:
fd.write(
f"{r};{capacity};{error_rate};"
f"{insert};{query};{size};{insert_time};"
f"{query_time};{false_positives}\n")
if __name__ == '__main__':
p = argparse.ArgumentParser("Bloom Eval")
p.add_argument('--resume', help="Resume Eval", action="store_true")
p.add_argument('--fill', help="Fill up to capacity", action="store_true")
p.add_argument('-r', '--reps', help="Rounds", action='store', default=0,
type=int)
p.add_argument('-c', '--capacity',
help="Capacity: Either constant or 3 values.",
metavar=('CONSTANT/MIN', 'MAX STEP'),
nargs='+', action='store', type=int)
p.add_argument('-e', '--error',
help="Error Rate: Either constant or 3 values.",
action='store', type=float,
metavar=('CONSTANT/MIN', 'MAX STEP'),
nargs='+')
p.add_argument('-i', '--insert',
help="Inserted Values: Either constant or 3 values.",
metavar=('CONSTANT/MIN', 'MAX STEP'),
nargs='+', action='store', type=int)
p.add_argument('-q', '--query',
help="Number of random element queries:"
"Either constant or 3 values.",
metavar=('CONSTANT/MIN', 'MAX STEP'),
nargs='+', action='store', type=int)
p.add_argument('-o', '--out', type=str, action='store',
help="Filename", required=True)
args = p.parse_args()
if args.resume:
RESUME = True
if args.fill:
log.warning("Using Fill Mode!")
FILL = True
else:
log.warning("Not using Fill Mode!")
FILL = False
if args.reps > 0:
ROUNDS_END = args.reps
if args.capacity is not None:
if len(args.capacity) == 1:
CAPACITY = args.capacity[0]
elif len(args.capacity) == 3:
CAPACITY = range(args.capacity[0],
args.capacity[1] + args.capacity[2],
args.capacity[2])
else:
raise ValueError("Either 1 or 3 capacity parameters!")
if args.error is not None:
if len(args.error) == 1:
ERROR_RATE = args.error[0]
elif len(args.error) == 3:
ERROR_RATE = numpy.arange(args.error[0],
args.error[1] + args.error[2],
args.error[2])
else:
raise ValueError("Either 1 or 3 error parameters!")
if args.insert is not None:
if len(args.insert) == 1:
INSERT = args.insert[0]
elif len(args.insert) == 3:
INSERT = range(args.insert[0],
args.insert[1] + args.insert[2],
args.insert[2])
else:
raise ValueError("Either 1 or 3 insert parameters!")
if args.query is not None:
if len(args.query) == 1:
QUERY = args.query[0]
elif len(args.query) == 3:
QUERY = range(args.query[0],
args.query[1] + args.query[2],
args.query[2])
else:
raise ValueError("Either 1 or 3 query parameters!")
filename = args.out + ".csv"
bloom_full(filename)
| 8,821
| 38.738739
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.