id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1666116 | <reponame>deanwang539/davis_v0.1
from davis import app
app.run()
| StarcoderdataPython |
3387149 | ## A bunch of summary plots for the minimal model
"""
7D, right
PLOT_FIT_QUALITY_VS_DEPTH_effect_ll_per_whisk
STATS__PLOT_FIT_QUALITY_VS_DEPTH_ll_per_whisk
Bar plot of fit quality by cell type and depth
7D, left
PLOT_FIT_QUALITY_VS_DEPTH_vdepth_ll_per_whisk
N/A
Depth plot of fit quality by cell type
S7C, right
PLOT_FIT_QUALITY_VS_DEPTH_effect_score
STATS__PLOT_FIT_QUALITY_VS_DEPTH_score
Bar plot of fit quality by cell type and depth
S7C, left
PLOT_FIT_QUALITY_VS_DEPTH_vdepth_score
N/A
Depth plot of fit quality by cell type
S7A
PLOT_FEATURE_CORRELATION_MAP
STATS__PLOT_FEATURE_CORRELATION_MAP
Heatmap of correlation between features in the GLM
7E, top
PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER
STATS__PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER
Fraction of significantly modulated neurons by each feature family in each task
7E, bottom
VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER
STATS__VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER
Venn diagram of significantly modulated neurons by each feature family in each task
7F
PLOT_TASK_COEFS_OVER_TIME_both_regular
STATS__PLOT_TASK_COEFS_OVER_TIME_both_regular
Plot fraction significantly modulated neurons by each task variable over time in trial
S7D
PLOT_TASK_COEFS_OVER_TIME_optodrop
STATS__PLOT_TASK_COEFS_OVER_TIME_optodrop
Plot fraction significantly modulated neurons by each task variable over time in trial, dropping opto sessions
8A
PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_fracsig
STATS__PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM
Fraction of significantly modulated neurons by whisking amplitude in each cell type
8B
PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_scaled_coef_single
STATS__PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM
Bar plot of whisking amplitude modulation by cell type
8C, left
PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_vdepth_scaled_coef_single
N/A
Depth plot of whisking amplitude modulation by cell type
8C, right
PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_vdepth_scaled_coef_single_hz
N/A
Depth plot of whisking amplitude modulation by cell type, in Hz
S8C
PLOT_PCA_CONTACT_COEFS
STATS__PLOT_PCA_CONTACT_COEFS
Plot of PCA components of contact coefficients
8I
HEATMAP_CONTACT_COEF_BY_RECLOC_discrimination
STATS__HEATMAP_CONTACT_COEF_BY_RECLOC_discrimination
Heatmap of contact responses for each neuron during discrimination
S8F
BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS
STATS__BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS
Bar plot of contact coefficients by whisker, task, and stratum
8H
BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK
STATS__BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK
Bar plot of contact coefficients by whisker and task
6D
PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_fracsig
STATS__PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM
Fraction of significantly modulated neurons by contact in each cell type
8E
PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_scaled_coef_single
STATS__PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM
Bar plot of contact modulation by cell type
8F, left
PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_vdepth_scaled_coef_single
N/A
Depth plot of contact modulation by cell type
8F, right
PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM_vdepth_scaled_coef_single_hz
N/A
Depth plot of contact modulation by cell type, in Hz
S8D, E
BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM
STATS__BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM
Bar plot of contact coefficients by whisker, task, recording location, and stratum
S8B, left
PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_strength
STATS__PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY
Bar plot of best-whisker contact modulation by cell type
S8B, right
PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_selectivity
STATS__PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY
Bar plot of best/worst contact response ratio by cell type
"""
import json
import os
import copy
import pandas
import numpy as np
import matplotlib.pyplot as plt
import my
import my.plot
import my.bootstrap
import sklearn.decomposition
import sklearn.preprocessing
import statsmodels.stats.multitest
import matplotlib_venn
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm, AnovaRM
import scipy.stats
import extras
## Parameters
with open('../parameters') as fi:
params = json.load(fi)
## Set up plotting
my.plot.poster_defaults()
my.plot.font_embed()
this_WHISKER2COLOR = {'C1': 'b', 'C2': 'g', 'C3': 'r'}
DELTA = chr(916)
## Tasks to iterate over
model_name = 'minimal'
task_l = [
'detection',
'discrimination',
]
## Load waveform info stuff
big_waveform_info_df = my.dataload.load_bwid(params, drop_1_and_6b=True)
## Load metadata about sessions
session_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)
## Load results from main4b
glm_results_dir = os.path.join(params['glm_fits_dir'])
model_results_dir = os.path.join(glm_results_dir, model_name)
coef_wscale_df = pandas.read_pickle(os.path.join(
model_results_dir, 'coef_wscale_df'))
fitting_results_df = pandas.read_pickle(os.path.join(
model_results_dir, 'fitting_results_df'))
# Normalize likelihood to the null, and for the amount of data
fitting_results_df['ll_per_whisk'] = (
(fitting_results_df['likelihood'] - fitting_results_df['null_likelihood']) /
fitting_results_df['len_ytest'])
# Convert nats to bits
fitting_results_df['ll_per_whisk'] = (
fitting_results_df['ll_per_whisk'] / np.log(2))
## Include only those results left in big_waveform_info_df
# (e.g., after dropping 1 and 6b)
coef_wscale_df = my.misc.slice_df_by_some_levels(
coef_wscale_df, big_waveform_info_df.index)
fitting_results_df = my.misc.slice_df_by_some_levels(
fitting_results_df, big_waveform_info_df.index)
# Count the neurons remaining
fit_neurons = coef_wscale_df.index.to_frame()[
['task', 'session', 'neuron']].drop_duplicates().reset_index(drop=True)
print("n = {} neurons fit in this model".format(len(fit_neurons)))
print("by task:")
print(fit_neurons.groupby('task').size().to_string() + '\n')
## Load the baseline firing rates
FR_overall = pandas.read_pickle(
os.path.join(params['neural_dir'], 'FR_overall'))
## Load the raw features
neural_unbinned_features = pandas.read_pickle(
os.path.join(params['glm_dir'], 'features', model_name,
'neural_unbinned_features'))
## Plot flags
# Fitting results
PLOT_FIT_QUALITY_VS_DEPTH = True
PLOT_FEATURE_CORRELATION_MAP = True
# Summary plots
PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER = True
VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER = True
# Task responses
PLOT_TASK_COEFS_OVER_TIME = True
# Whisking responses
PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM = True
# Contact responses
PLOT_PCA_CONTACT_COEFS = True
HEATMAP_CONTACT_COEF_BY_RECLOC = True
BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS = True
BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK = True
PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM = True
BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM = True
PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY = True
## Plots
if PLOT_FIT_QUALITY_VS_DEPTH:
for fitting_metric in ['ll_per_whisk', 'score']:
## Plot fit quality versus depth (and by stratum * NS)
analysis = 'actual_best'
# Extract scores
scores = fitting_results_df[fitting_metric].xs(
analysis, level='analysis').droplevel(
['n_reg_lambda', 'n_permute']).mean(
level=['task', 'session', 'neuron'])
## Join on metadata
data = scores.to_frame().join(
big_waveform_info_df[['stratum', 'NS', 'layer', 'Z_corrected']],
on=['session', 'neuron'])
# Check for nulls here
assert not data[
['stratum', 'NS', 'layer', 'Z_corrected']].isnull().any().any()
## Aggregate over neurons
agg_res = extras.aggregate_coefs_over_neurons(data, fitting_metric)
## Bar plot the effect size
f, ax = my.plot.figure_1x1_small()
ax_junk, bar_container = my.plot.grouped_bar_plot(
df=agg_res['mean'],
index2plot_kwargs=extras.index2plot_kwargs__NS2color,
yerrlo=agg_res['lo'],
yerrhi=agg_res['hi'],
ax=ax,
elinewidth=1.5,
group_name_fig_ypos=.175,
)
# Pretty
my.plot.despine(ax)
ax.set_ylim((0, .3))
ax.set_yticks((0, .1, .2, .3))
ax.set_ylabel('{}goodness-of-fit\n(bits / whisk)'.format(DELTA))
if fitting_metric == 'll_per_whisk':
ax.set_ylabel('{}goodness-of-fit\n(bits / whisk)'.format(DELTA))
elif fitting_metric == 'score':
ax.set_ylabel('{}goodness-of-fit\n(pseudo R2)'.format(DELTA))
# Error bar pretty
lc = bar_container.lines[2][0]
lc.set_clip_on(False)
# Save
f.savefig('PLOT_FIT_QUALITY_VS_DEPTH_effect_{}.svg'.format(fitting_metric))
f.savefig('PLOT_FIT_QUALITY_VS_DEPTH_effect_{}.png'.format(fitting_metric), dpi=300)
## Plot versus depth
f, ax = my.plot.figure_1x1_small()
if fitting_metric == 'll_per_whisk':
my.plot.smooth_and_plot_versus_depth(
data, fitting_metric, ax=ax, layer_boundaries_ylim=(-.001, 1.1))
# Pretty y
ax.set_ylim((-.001, 1.1))
ax.set_ylabel('{}goodness-of-fit\n(bits / whisk)'.format(DELTA))
ax.set_yticks((0, .5, 1))
elif fitting_metric == 'score':
my.plot.smooth_and_plot_versus_depth(
data, fitting_metric, ax=ax, layer_boundaries_ylim=(-.001, .6))
# Pretty y
ax.set_ylim((-.001, .6))
ax.set_ylabel('{}goodness-of-fit\n(pseudo R2)'.format(DELTA))
ax.set_yticks((0, .3, .6))
## Save
f.savefig(
'PLOT_FIT_QUALITY_VS_DEPTH_vdepth_{}.svg'.format(fitting_metric))
f.savefig(
'PLOT_FIT_QUALITY_VS_DEPTH_vdepth_{}.png'.format(fitting_metric),
dpi=300)
## Stats
stats_filename = 'STATS__PLOT_FIT_QUALITY_VS_DEPTH_{}'.format(fitting_metric)
with open(stats_filename, 'w') as fi:
assert analysis == 'actual_best'
fi.write(stats_filename + '\n')
fi.write('n = {} neurons\n'.format(len(data)))
fi.write('including both tasks\n')
fi.write('fitting metric: {}\n'.format(fitting_metric))
if fitting_metric == 'll_per_whisk':
fi.write('log-likelihood per whisk in bits, versus a complete null '
'(not +drift-population)\n')
fi.write('using the best reg for each neuron (not single reg)\n')
fi.write('counts by stratum and NS:\n')
fi.write(agg_res['agg'].size().to_string() + '\n')
fi.write('errorbars: 95% CIs from bootstrapping\n')
with open(stats_filename, 'r') as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_FEATURE_CORRELATION_MAP:
# Results are almost identical regardless of method but pearson is fastest
corrdf = neural_unbinned_features.corr(method='pearson')
# For simplicity drop task indicators
# TODO: mean these over time, or something
# They're mainly correlated with each other, not with sensorimotor, anyway
corrdf = corrdf.drop('task', axis=0).drop('task', axis=1)
# Drop nuisance
# Mildly neg corr with set point and pos corr with the contacts and ampl
#~ corrdf = corrdf.drop(
#~ 'log_cycle_duration', axis=0).drop(
#~ 'log_cycle_duration', axis=1)
# Actually C0 is a nice contrast
#~ # Drop C0 which isn't particularly correlated
#~ corrdf = corrdf.drop(
#~ 'C0', axis=0, level=1).drop(
#~ 'C0', axis=1, level=1)
# Mask out the self-similarity
mask = np.eye(len(corrdf)).astype(np.bool)
corrdf.values[mask] = 0#np.nan
# Pretty labels
pretty_labels = []
for metric, label in corrdf.index:
if metric == 'contact_binarized':
pretty_labels.append('{} contact'.format(label))
elif metric == 'whisking_indiv_set_point_smoothed':
pretty_labels.append('{} set point'.format(label))
elif 'whisking' in metric:
pretty_labels.append(label)
elif metric == 'log_cycle_duration':
pretty_labels.append('cycle duration')
else:
pretty_labels.append('unk')
1/0
# Plot
cmap = copy.copy(plt.cm.RdBu_r)
cmap.set_bad('k') # can't get this to work
f, ax = plt.subplots(figsize=(4.75, 3.5))
f.subplots_adjust(left=.2, bottom=.35, top=.975, right=.9)
im = my.plot.imshow(
corrdf.fillna(1), ax=ax, axis_call='image', cmap=cmap,
)
## Pretty
ax.set_xticks(range(len(corrdf)))
ax.set_xticklabels(pretty_labels, rotation=90, size=12)
ax.set_xlim((-.5, len(corrdf) - .5))
ax.set_yticks(range(len(corrdf)))
ax.set_yticklabels(pretty_labels, size=12)
ax.set_ylim((len(corrdf) - .5, -.5))
# Colorbar
cb = my.plot.colorbar(ax=ax, shrink=.8)
cb.mappable.set_clim((-1, 1))
cb.set_ticks((-1, 0, 1))
cb.set_label("correlation\nPearson's {}".format(chr(961)))
## Save
f.savefig('PLOT_FEATURE_CORRELATION_MAP.svg')
f.savefig('PLOT_FEATURE_CORRELATION_MAP.png', dpi=300)
## Stats
stats_filename = 'STATS__PLOT_FEATURE_CORRELATION_MAP'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('feature matrix shape: {}\n'.format(neural_unbinned_features.shape))
fi.write('concatting over all sessions\n')
fi.write('excluding task indicator variables\n')
with open(stats_filename, 'r') as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER:
# Aggregate task coefficients by time, and contact coefficients by whisker
# This is probably okay, because these coefficients are unlikely to have
# opposite sign
# Set point needs to be considered separately because it definitely
# can have opposite sign over whiskers
# But that's okay because set point / pose is whisker-specific anyway
## Extract coef_single_p for all, because coef_best_z was optimized without CV
coef_p = coef_wscale_df['coef_single_p'].copy()
# Drop nuisance
coef_p = coef_p.drop(['drift', 'log_cycle_duration'], level='metric')
# Drop C0
# This barely changes anything, though it slightly lowers frac sig
# for detection set point
# Let's keep it since it's in the model
#~ coef_p = coef_p.drop('C0', level='label')
## Split task into separate variables
# Convert index to DataFrame
idxdf = coef_p.index.to_frame().reset_index(drop=True)
# Replace each task variable in turn
sw_flag_l = [
'current_choice', 'prev_choice', 'previous_outcome', 'current_outcome']
for sw_flag in sw_flag_l:
# Mask
mask = idxdf['label'].str.startswith(sw_flag).fillna(False)
# Replace
idxdf.loc[mask, 'metric'] = sw_flag
idxdf.loc[mask, 'label'] = [
int(label.split('_')[-1])
for label in idxdf.loc[mask, 'label'].values]
# Return index
coef_p.index = pandas.MultiIndex.from_frame(idxdf)
coef_p = coef_p.sort_index()
## Check for dropped coefficients
assert not coef_p.isnull().any()
# This is probably fine as long as it's not too extreme
# And as long as an entire family of coefficients isn't missing
#~ n_neurons_by_metric_label = coef_p.groupby(['metric', 'label']).size()
#~ n_missing_fit_neurons = len(fit_neurons) - n_neurons_by_metric_label
#~ if (n_missing_fit_neurons != 0).any():
#~ print("some neurons missing coefficients:")
#~ print(n_missing_fit_neurons[n_missing_fit_neurons != 0])
# Count the number of neurons with at least one coefficient per family
# It's okay if individual labels are missing (e.g., C0 contact) as long
# as there is at least one label per family
n_fit_neurons_by_metric = coef_p.index.to_frame()[
['session', 'neuron', 'metric']].drop_duplicates().reset_index(
drop=True).groupby('metric').size()
assert (n_fit_neurons_by_metric == len(fit_neurons)).all()
## Aggregate significance over time (task variables) or whisker
## (set point and contact variables)
# Correct by group
# Bonferroni here to control FWER within the family (metric)
coef_p2 = coef_p.groupby(['task', 'session', 'neuron', 'metric']).apply(
lambda ser: my.stats.adjust_pval(ser, 'bonferroni'))
# Take minimum p within each metric (e.g., over whiskers and time)
coef_minp_by_metric = coef_p2.min(
level=[lev for lev in coef_p2.index.names if lev != 'label'])
# Threshold
coef_sig = coef_minp_by_metric < .05
## Aggregate over neurons
# Mean over session * neuron within task * metric
mfracsig = coef_sig.groupby(['metric', 'task']).mean().unstack('task')
# Sort by mean
mfracsig = mfracsig.loc[
mfracsig.mean(1).sort_values().index
].copy()
## Rename and reorder
# Rename
mfracsig = mfracsig.rename(index={
'contact_binarized': 'contact',
'current_choice': 'choice',
'current_outcome': 'outcome',
'prev_choice': 'previous choice',
'previous_outcome': 'previous outcome',
'whisking_global_smoothed': 'whisking amplitude',
'whisking_indiv_set_point_smoothed': 'whisking set point'
})
# Plot position and tick position
tick2label = pandas.Series({
0: 'previous choice',
1: 'choice',
2: 'previous outcome',
3: 'outcome',
4.75: 'contact',
6.5: 'whisking amplitude',
7.5: 'whisking set point',
}).sort_index()
colors = ['r'] * 4 + ['b'] + ['g'] * 2
# Force sort (note: increasing order of importance)
mfracsig = mfracsig.loc[tick2label.values]
assert not mfracsig.isnull().any().any()
# Positions, from smallest (0) to largest
tick_pos = tick2label.index.values
## Plot
f, axa = plt.subplots(1, len(task_l),
figsize=(5.5, 2.25), sharex=True, sharey=True)
f.subplots_adjust(left=.275, wspace=.4, bottom=.225, top=.9, right=.97)
for task in task_l:
## Slice ax by task
ax = axa[task_l.index(task)]
ax.set_title(task)
## Plot signif
bar_container = ax.barh(
y=tick_pos,
left=0,
width=mfracsig[task].values,
ec='k', fc='lightgray', alpha=.4, lw=0,
)
for color, patch in zip(colors, bar_container.patches):
patch.set_facecolor(color)
# Plot positive again to get the edge alpha correct
ax.barh(
y=tick_pos,
left=0,
width=mfracsig[task].values,
ec='k', fc='none', lw=.75,
)
# Plot nonsig
ns_frac = 1 - mfracsig[task].values
ax.barh(
y=tick_pos,
left=(1 - ns_frac),
width=ns_frac,
ec='k', fc='w', lw=.75,
clip_on=False,
)
## Pretty
for ax in axa.flatten():
my.plot.despine(ax)
ax.set_yticks(tick_pos)
ax.set_yticklabels(mfracsig.index.values, size=12)
ax.set_ylim((-.5, np.max(tick_pos) + 0.5))
ax.set_xlim((0, 1))
ax.set_xticks((0, .25, .5, .75, 1))
ax.set_xticklabels(('0.0', '', '0.5', '', '1.0'))
f.text(.625, .05, 'fraction of recorded neurons that are significant',
ha='center', va='center')
## Save
f.savefig('PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER.svg')
f.savefig('PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER.png', dpi=300)
## Stats
stats_filename = 'STATS__PLOT_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons\n'.format(len(fit_neurons)))
fi.write('by task:\n{}\n'.format(fit_neurons.groupby('task').size()))
fi.write('significance taken from coef_single_p, bonferroni'
' corrected within metric family, and meaned over neurons\n')
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER:
## Calculate fracsig within TWC families
## Extract coef_single_p for all, because coef_best_z was optimized without CV
coef_p = coef_wscale_df['coef_single_p'].copy()
## Convert to TWC family representation
# Rename metric to task, whisking, or contact
idx = coef_p.index.to_frame().reset_index(drop=True)
idx['metric'] = idx['metric'].replace({
'whisking_indiv_set_point_smoothed': 'whisking',
'whisking_global_smoothed': 'whisking',
'contact_binarized': 'contact',
})
coef_p.index = pandas.MultiIndex.from_frame(
idx[['metric', 'label', 'task', 'session', 'neuron']])
coef_p = coef_p.sort_index()
assert not coef_p.index.duplicated().any()
# Include only TWC families
coef_p = coef_p.loc[['task', 'whisking', 'contact']].sort_index()
assert not coef_p.isnull().any()
# Reorder in standard order
coef_p = coef_p.reorder_levels(
['task', 'session', 'neuron', 'metric', 'label']).sort_index()
## Check for dropped coefficients
# Count the number of neurons with at least one coefficient per family
# It's okay if individual labels are missing (e.g., C0 contact) as long
# as there is at least one label per family
n_fit_neurons_by_metric = coef_p.index.to_frame()[
['session', 'neuron', 'metric']].drop_duplicates().reset_index(
drop=True).groupby('metric').size()
assert (n_fit_neurons_by_metric == len(fit_neurons)).all()
## Aggregate significance over time (task variables) or whisker
## (set point and contact variables)
# Correct by group
# Bonferroni here to control FWER within the family (metric)
coef_p2 = coef_p.groupby(['task', 'session', 'neuron', 'metric']).apply(
lambda ser: my.stats.adjust_pval(ser, 'bonferroni'))
# Take minimum p within each metric (e.g., over whiskers and time)
coef_minp_by_metric = coef_p2.min(
level=[lev for lev in coef_p2.index.names if lev != 'label'])
# Threshold
coef_sig = coef_minp_by_metric < .05
# Unstack metric to get replicates on rows
coef_sig = coef_sig.unstack('metric')
## Aggregate over neurons
# Mean over session * neuron within task * metric
mfracsig = coef_sig.mean(level='task').T
# Sort by mean
mfracsig = mfracsig.loc[
mfracsig.mean(1).sort_values().index
].copy()
## Venn by task
task_l = ['detection', 'discrimination']
f, axa = plt.subplots(1, 2, figsize=(4.75, 2))
f.subplots_adjust(left=.05, right=.9, bottom=.05, top=.8, wspace=.6)
for task in task_l:
# Get ax
ax = axa[task_l.index(task)]
ax.set_title(task, pad=10)
# Count every combination
task_coef_sig = coef_sig.loc[task]
sets = [
set(task_coef_sig.index[task_coef_sig['task'].values]),
set(task_coef_sig.index[task_coef_sig['whisking'].values]),
set(task_coef_sig.index[task_coef_sig['contact'].values]),
]
venn_res = matplotlib_venn.venn3_unweighted(
sets,
['task', 'whisking', 'contacts'],
ax=ax,
subset_label_formatter=lambda val: '{}%'.format(int(100 * val / len(task_coef_sig))),
)
# Pretty
for label in venn_res.set_labels + venn_res.subset_labels:
label.set_fontsize(12)
## Save
f.savefig('VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER.svg')
f.savefig('VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER.png', dpi=300)
## Stats
stats_filename = 'STATS__VENN_FRAC_SIG_COEFS_BY_TASK_AGGREGATING_TIME_AND_WHISKER'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons\n'.format(len(fit_neurons)))
fi.write('by task:\n{}\n'.format(fit_neurons.groupby('task').size()))
fi.write('significance taken from coef_single_p, bonferroni'
' corrected within TWC family\n')
fi.write('# of neurons not signif for any family:\n')
fi.write(
(coef_sig == False).all(1).astype(np.int).sum(
level='task').to_string() + '\n')
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_TASK_COEFS_OVER_TIME:
## Whether to pool
# Results are similar across tasks
include_task = 'both' # 'discrimination'
## Plot with and without dropping sessions with any non-sham opto trials
for drop_opto in [False, True]:
## Slice task
task_coefs = coef_wscale_df.xs('task', level='metric', drop_level=False).copy()
# Pool or not
if include_task == 'both':
task_coefs = task_coefs.droplevel('task')
else:
task_coefs = task_coefs.loc[include_task]
task_coefs.index = task_coefs.index.remove_unused_levels()
## Drop opto
if drop_opto:
include_session = ~(session_df['opto'] & ~session_df['sham'])
include_session = include_session.index[include_session.values]
task_coefs = task_coefs.loc[include_session.intersection(task_coefs.index.levels[0])]
task_coefs.index = task_coefs.index.remove_unused_levels()
## Split task into separate variables
# Convert index to DataFrame
idxdf = task_coefs.index.to_frame().reset_index(drop=True)
# Replace each task variable in turn
sw_flag_l = [
'current_choice', 'prev_choice', 'previous_outcome', 'current_outcome']
for sw_flag in sw_flag_l:
# Mask
mask = idxdf['label'].str.startswith(sw_flag).fillna(False)
# Replace
idxdf.loc[mask, 'metric'] = sw_flag
idxdf.loc[mask, 'label'] = [
int(label.split('_')[-1])
for label in idxdf.loc[mask, 'label'].values]
# Return index
task_coefs.index = pandas.MultiIndex.from_frame(idxdf)
task_coefs = task_coefs.sort_index()
## Futher parameterize
# Define signif
# Could correct here. If not, expect 5% positives in each bin
task_coefs['signif'] = task_coefs['coef_single_p'] < .05
# abs coef
task_coefs['abs_coef_single_z'] = task_coefs['coef_single_z'].abs()
task_coefs['abs_scaled_coef_single'] = task_coefs['scaled_coef_single'].abs()
## Aggregate
coef_metric_to_agg_l = ['signif', 'coef_single_z', 'scaled_coef_single',
'abs_coef_single_z', 'abs_scaled_coef_single',]
# By metric * label
aggmean = task_coefs.groupby(['metric', 'label'])[coef_metric_to_agg_l].mean()
aggerr = task_coefs.groupby(['metric', 'label'])[coef_metric_to_agg_l].sem()
# Binomial the significant fraction
signif = task_coefs['signif'].unstack(['session', 'neuron'])
assert not signif.isnull().any().any()
signif_err = pandas.DataFrame(
[my.stats.binom_confint(data=row)
for row in signif.astype(np.int).values],
index=signif.index, columns=['lo', 'hi'],
)
signif_err = signif_err.unstack('metric').swaplevel(
axis=1).sort_index(axis=1)
## Plot
# left axis: choice; right axis: outcome
f, axa = plt.subplots(1, 2, figsize=(5.5, 2.25), sharex=True, sharey=True)
f.subplots_adjust(bottom=.225, left=.15, right=.95, wspace=.2)
for coef_metric in ['signif']: #coef_metric_to_agg_l:
## Slice
coef_mean = aggmean[coef_metric].unstack('metric')
coef_err = aggerr[coef_metric].unstack('metric')
# Account for bin center
assert (np.diff(coef_mean.index.values) == 100).all()
assert (np.diff(coef_err.index.values) == 100).all()
coef_mean.index += 50
coef_err.index += 50
## Plot
for metric in coef_mean.columns:
topl_mean = coef_mean[metric]
topl_err = coef_err[metric]
# Plot kwargs
if metric == 'current_choice':
ax = axa[1]
color = 'k' #'purple'
linestyle = '-'
zorder = 1
elif metric == 'prev_choice':
ax = axa[1]
color = 'k' #'purple'
linestyle = '--'
zorder = 1
elif metric == 'current_outcome':
ax = axa[0]
color = 'k' #'orange'
linestyle = '-'
zorder = 0
elif metric == 'previous_outcome':
ax = axa[0]
color = 'k' #'orange'
linestyle = '--'
zorder = 0
else:
1/0
# Plot
ax.plot(
topl_mean.index.values / 200.,
topl_mean,
color=color, linestyle=linestyle, zorder=zorder)
if coef_metric == 'signif':
topl_err_lo = signif_err.loc[:, (metric, 'lo')]
topl_err_hi = signif_err.loc[:, (metric, 'hi')]
ax.fill_between(
x=topl_mean.index.values / 200.,
y1=(topl_err_lo).values,
y2=(topl_err_hi).values,
color=color, alpha=.2, lw=0)
else:
ax.fill_between(
x=topl_mean.index.values / 200.,
y1=(topl_mean - topl_err).values,
y2=(topl_mean + topl_err).values,
color=color, alpha=.2, lw=0)
## Pretty
for ax in axa:
my.plot.despine(ax)
ax.set_xlim((-2, 0.5))
ax.set_xticks((-2, -1, 0))
#~ ax.set_ylim((1, 3))
#~ ax.set_xlabel('time in trial (s)')
#~ ax.set_title(coef_metric)
if coef_metric == 'signif':
ax.set_ylim((0, .55))
ax.set_yticks((0, .25, .5))
ax.plot(ax.get_xlim(), [.05, .05], 'k--', lw=.8)
## Labels
axa[1].set_title('choice')
axa[0].set_title('outcome')
axa[0].set_ylabel('fraction significant')
f.text(.55, .04, 'time in trial (s)', ha='center', va='center')
## Legend
axa[0].plot([-1.3, -.95], [.1, .1], 'k--')
axa[0].plot([-1.3, -.95], [.18, .18], 'k-')
axa[0].text(-.8, .1, 'previous trial', ha='left', va='center', size=12)
axa[0].text(-.8, .18, 'current trial', ha='left', va='center', size=12)
## Save
trailing_string = '{}_{}'.format(include_task, 'optodrop' if drop_opto else 'regular')
f.savefig('PLOT_TASK_COEFS_OVER_TIME_{}.svg'.format(trailing_string))
f.savefig('PLOT_TASK_COEFS_OVER_TIME_{}.png'.format(trailing_string), dpi=300)
## Stats
stats_filename = 'STATS__PLOT_TASK_COEFS_OVER_TIME_{}'.format(trailing_string)
with open(stats_filename, 'w') as fi:
session_neuron_list = task_coefs.index.to_frame()[
['session', 'neuron']].drop_duplicates().reset_index(drop=True)
fi.write(stats_filename + '\n')
fi.write('n = {} neurons\n'.format(len(session_neuron_list)))
fi.write('using coef_single_p < .05 to evaluate signif without correction\n')
fi.write('error bars: 95% CIs from binom\n')
with open(stats_filename, 'r') as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM:
## Get data
# Slice whisking_amplitude
data = coef_wscale_df.xs('amplitude', level='label').copy()
# Drop useless label level
data = data.droplevel('metric')
# Keep only discrimination for now
#~ data = data.loc['discrimination'].copy()
# But the results basically look the same if pooled
data = data.droplevel('task')
## Slice coef_metric
data = data[['scaled_coef_single', 'coef_single', 'coef_single_p']]
# Convert scaled to FR gain / 10 degrees of whisking
data['scaled_coef_single'] = data['scaled_coef_single'] * 10
# Convert gain to delta Hz
data['scaled_coef_single_hz'] = np.exp(
data['scaled_coef_single']).mul(
FR_overall.loc[data.index]).sub(
FR_overall.loc[data.index])
# Assess sign and significance
data['positive'] = data['coef_single'] > 0
data['signif'] = data['coef_single_p'] < .05
data['pos_sig'] = data['positive'] & data['signif']
data['neg_sig'] = ~data['positive'] & data['signif']
## Join on metadata
data = data.join(
big_waveform_info_df[['stratum', 'NS', 'layer', 'Z_corrected']],
on=['session', 'neuron'])
## Aggregate over neurons within stratum * NS
# coef_metric to aggregate
coef_metric_l = [
'scaled_coef_single', 'scaled_coef_single_hz', 'pos_sig', 'neg_sig',
]
# Aggregate
agg_res = extras.aggregate_coefs_over_neurons(data, coef_metric_l)
## Bar plot the effect size
for effect_sz_col in ['scaled_coef_single']:
# Figure handle
f, ax = my.plot.figure_1x1_small()
# Grouped bar plot
my.plot.grouped_bar_plot(
df=agg_res['mean'][effect_sz_col],
index2plot_kwargs=extras.index2plot_kwargs__NS2color,
yerrlo=agg_res['lo'][effect_sz_col],
yerrhi=agg_res['hi'][effect_sz_col],
ax=ax,
group_name_fig_ypos=.175,
elinewidth=1.5,
)
# Pretty
my.plot.despine(ax)
ax.set_xticks([])
# Set the ylim in either firing rate gain or spikes
if effect_sz_col == 'scaled_coef_single':
# Deal with log-scale on yaxis
coef_ticklabels = np.array([1, 1.2, 1.4])
coef_ticks = np.log(coef_ticklabels)
ax.set_ylim(np.log((1, 1.4)))
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
ax.set_ylabel('firing rate gain\n(fold change / 10{})'.format(chr(176)))
elif effect_sz_col == 'scaled_coef_single_hz':
# Not a log scale
coef_ticklabels = np.array([0, 2, 4])
coef_ticks = coef_ticklabels
ax.set_ylim((-.5, 4.5))
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
# Line at zero
xlim = ax.get_xlim()
ax.plot(xlim, [0, 0], 'k-', lw=.75)
ax.set_xlim(xlim)
ax.set_ylabel('evoked firing rate\n(Hz / 10{})'.format(chr(176)))
else:
ax.set_ylabel(effect_sz_col)
# Save
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_'
'BY_NS_AND_STRATUM_{}.svg'.format(effect_sz_col)
)
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_'
'BY_NS_AND_STRATUM_{}.png'.format(effect_sz_col), dpi=300,
)
## Pie chart the fraction significant
# Extract pos_sig and neg_sig
mfracsig = agg_res['mean'][['pos_sig', 'neg_sig']]
# Pie chart
f, ax = my.plot.figure_1x1_small()
extras.horizontal_bar_pie_chart_signif(mfracsig, ax=ax)
# Save
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_fracsig.svg')
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_fracsig.png', dpi=300)
## Stats on fracsig
# Only do stats on this effect_sz_col
effect_sz_col = 'scaled_coef_single'
stats_filename = (
'STATS__PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_'
'MEAN_BY_NS_AND_STRATUM'
)
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons total\n'.format(len(data)))
fi.write('pooling across tasks because similar\n')
fi.write('error bars: 95% CIs by bootstrapping\n')
for (stratum, NS) in agg_res['mean'].index:
n_neurons = agg_res['agg'].size().loc[(stratum, NS)]
frac_pos_sig = agg_res['mean'].loc[(stratum, NS), 'pos_sig']
frac_neg_sig = agg_res['mean'].loc[(stratum, NS), 'neg_sig']
effect_mean = agg_res['mean'].loc[(stratum, NS), effect_sz_col]
# Currently this is from bootstrapping
# Make sure it changes in sync with the above
effect_errlo = agg_res['lo'].loc[(stratum, NS), effect_sz_col]
effect_errhi = agg_res['hi'].loc[(stratum, NS), effect_sz_col]
# Dump
fi.write('{} {}\n'.format(stratum, 'NS' if NS else 'RS'))
fi.write('effect_sz_col: {}\n'.format(effect_sz_col))
fi.write('n = {} neurons\n'.format(n_neurons))
fi.write(
'effect of whisking: mean {:.3f}, CI {:.3f} - {:.3f}\n'.format(
effect_mean,
effect_errlo,
effect_errhi,
))
fi.write(
'effect of whisking: exp(mean) {:.3f}, exp(CI) {:.3f} - {:.3f}\n'.format(
np.exp(effect_mean),
np.exp(effect_errlo),
np.exp(effect_errhi),
))
fi.write('frac pos sig: {} / {} = {:.4f}\n'.format(
int(n_neurons * frac_pos_sig),
n_neurons,
frac_pos_sig,
))
fi.write('\n')
# Print
with open(stats_filename) as fi:
print(''.join(fi.readlines()))
## Plot versus depth
for effect_sz_col in ['scaled_coef_single', 'scaled_coef_single_hz']:
# Make figure handle
f, ax = my.plot.figure_1x1_small()
# These will be yaxis
if effect_sz_col == 'scaled_coef_single':
ylim = np.log((.4, 2.5))
coef_ticklabels = np.array([.5, 1, 2])
coef_ticks = np.log(coef_ticklabels)
else:
ylim = (-8, 20)
coef_ticklabels = np.array([0, 10, 20])
coef_ticks = coef_ticklabels
# Plot
my.plot.smooth_and_plot_versus_depth(
data, effect_sz_col, ax=ax, layer_boundaries_ylim=ylim)
# Set y-axis
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
ax.set_ylim(ylim)
if effect_sz_col == 'scaled_coef_single':
ax.set_ylabel('firing rate gain\n(fold change / 10{})'.format(chr(176)))
else:
ax.set_ylabel('{} firing rate\n(Hz / 10{})'.format(DELTA, chr(176)))
# Plot unity line
xlim = ax.get_xlim()
ax.plot(xlim, [0, 0], 'k-', lw=.8)
ax.set_xlim(xlim)
## Save
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_vdepth_{}.svg'.format(effect_sz_col))
f.savefig(
'PLOT_DISCRIMINATION_WHISKING_AMPL_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_vdepth_{}.png'.format(effect_sz_col), dpi=300)
## Conventional way to load contact responses to each whisker
def extract_contact_responses_to_each_whisker(
coef_wscale_df,
big_waveform_info_df=None,
coef_metric='scaled_coef_single',
include_task=('discrimination',),
floor_response_at_zero=False,
drop_always_suppressed=False,
drop_C0=True,
drop_neurons_missing_whisker=True,
combine_off_target=True,
):
"""Extract contact responses of each whisker
big_waveform_info_df : DataFrame or None
Joins on metadata, or does nothing if None
coef_metric : which coef_metric to use
Using the "scaled" metrics puts the results in a per-contact basis
Probably better to use _single rather than _best, because otherwise
different regularization by neuron contaminates the results
coef_best_z is used for significance
drop_neurons_missing_whisker : bool
If True, drop neurons missing a coefficient for any of the whiskers
C1-C3 (or C0-C3 if drop_C0 is False)
combine_off_target : bool
If True, replace all strings in 'recording_location' that are not
in the C-row with the string 'off'
"""
## Get data
# Slice contact_binarized
data = coef_wscale_df.xs('contact_binarized', level='metric').copy()
# Slice coef_metric
data = data[coef_metric]
# Floor negative responses to zero
if floor_response_at_zero:
data[data < 0] = 0
# Unstack label
data = data.unstack('label')
if drop_C0:
# Drop C0 which is often missing
data = data.drop('C0', 1)
# Drop any neuron missing C1-C3
if drop_neurons_missing_whisker:
data = data.dropna()
# Include task
# Using list prevents droplevel
data = data.loc[list(include_task)]
# Drop neurons that are suppressed by every whisker
if drop_always_suppressed:
data = data[(data > 0).any(1)]
## Join on metadata
if big_waveform_info_df is not None:
data = data.join(
big_waveform_info_df[['NS', 'layer', 'stratum', 'Z_corrected',
'recording_location', 'crow_recording_location']],
on=['session', 'neuron'])
# Replace all off-target locations with the string 'off'
if combine_off_target:
off_mask = (
data['recording_location'] != data['crow_recording_location']
)
data.loc[off_mask, 'recording_location'] = 'off'
return data
if PLOT_PCA_CONTACT_COEFS:
## Get data
data = extract_contact_responses_to_each_whisker(
coef_wscale_df, big_waveform_info_df,
include_task=('detection', 'discrimination',),
)
## PCA params
# Setting both to False does no standardization
# With standardization: PC0 is response strength, PC1 is topographic
# Without standardization: PC0 already shows the C1 preference,
# just not as clean in general
# Standardizing each neuron to mean zero gets rid of PC0, but otherwise
# basically the same.
standardize_mean = True
standardize_std = True
## Standardize
whiskers_to_pca = ['C1', 'C2', 'C3']
# Standardize each feature (whisker), leaving each neuron with its mean
scaler = sklearn.preprocessing.StandardScaler(
with_mean=standardize_mean, with_std=standardize_std)
scaled_arr = scaler.fit_transform(
data[whiskers_to_pca].values)
# Keep track of the index
to_pca = pandas.DataFrame(
scaled_arr, index=data.index, columns=whiskers_to_pca)
# Do PCA
pca = sklearn.decomposition.PCA()
transformed = pca.fit_transform(to_pca)
transdf = pandas.DataFrame(transformed, index=to_pca.index)
## Extract components
components_df = pandas.DataFrame(pca.components_,
columns=to_pca.columns).T
# Make this one a consistent sign
if components_df.loc['C1', 1] < components_df.loc['C3', 1]:
components_df.loc[:, 1] = -components_df.loc[:, 1].values
## Plot the PCs
f, ax = my.plot.figure_1x1_small()
line0, = ax.plot(components_df[0], label='PC1')
line1, = ax.plot(components_df[1], label='PC2')
#~ line2, = ax.plot(components_df[2], label='PC3')
f.text(.85, .9, 'PC1', color=line0.get_color())
f.text(.85, .82, 'PC2', color=line1.get_color())
#~ f.text(.85, .74, 'PC3', color=line2.get_color())
my.plot.despine(ax)
ax.set_ylim((-1, 1))
ax.set_yticks((-1, 0, 1))
ax.plot([0, 2], [0, 0], 'k--', lw=.8)
ax.set_xlabel('contacting whisker')
ax.set_ylabel('weight')
## Save
f.savefig('PLOT_PCA_CONTACT_COEFS.svg')
f.savefig('PLOT_PCA_CONTACT_COEFS.png', dpi=300)
## Stats
n_by_task = data.reset_index().groupby('task').size()
stats_filename = 'STATS__PLOT_PCA_CONTACT_COEFS'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('# neurons by task:\n' + n_by_task.to_string() + '\n\n')
fi.write(
'PCA standardizing both mean and std over features (whiskers), '
'leaving each neuron with its mean\n')
fi.write('including only neurons for which we have coefficients for each whisker\n')
fi.write('pooling over tasks because similar\n')
fi.write('PC1 explained variance: {:0.2f}\n'.format(pca.explained_variance_ratio_[0]))
fi.write('PC2 explained variance: {:0.2f}\n'.format(pca.explained_variance_ratio_[1]))
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if HEATMAP_CONTACT_COEF_BY_RECLOC:
## Get data
data = extract_contact_responses_to_each_whisker(
coef_wscale_df, big_waveform_info_df,
include_task=('detection', 'discrimination'))
## Separately plot each task (figure)
# The detection one actually looks pretty cool, but just do discrimination
task_l = ['detection', 'discrimination']
task_l = ['discrimination']
for task in task_l:
task_data = data.loc[task]
## Get numbers of axes and their relative widths
# Number of neurons in each recloc, strict split
split_metric = 'recording_location'
recloc2N = task_data[split_metric].value_counts()
# Arrange the axes this way
recording_location_l1 = ['C1', 'C2', 'C3']
recording_location_l2 = ['fill', 'off', 'fill',]
# Each plot will have 4 axes, the last of which is a colorbar
# Calculate empty 'fill' axes for the second one
fill = recloc2N[['C1', 'C1', 'C3']].sum() - recloc2N['off']
recording_location_N1 = np.array(
[recloc2N['C1'], recloc2N['C2'], recloc2N['C3']])
recording_location_N2 = np.array(
[0, recloc2N['off'], fill])
# width_ratios
width_ratios1 = recording_location_N1 / recording_location_N1.sum()
width_ratios2 = recording_location_N2 / recording_location_N2.sum()
## Create figure handles
f1, axa1 = plt.subplots(
1, len(width_ratios1), figsize=(8.4, 1.1),
gridspec_kw={'width_ratios': width_ratios1})
f1.subplots_adjust(left=.05, right=.95, bottom=.25, top=.7, wspace=.1)
f2, axa2 = plt.subplots(
1, len(width_ratios2), figsize=(8.4, 1.1),
gridspec_kw={'width_ratios': width_ratios2})
f2.subplots_adjust(left=.05, right=.95, bottom=.25, top=.7, wspace=.1)
axa2[0].set_visible(False)
axa2[2].set_visible(False)
# Add the cbar ax
cbar_ax = f2.add_axes([.875, .27, .008, .4])
## Iterate over recloc (axis)
for recloc in recording_location_l1 + recording_location_l2:
# Get ax unless 'fill'
if recloc == 'fill':
continue
elif recloc in recording_location_l1:
ax = axa1[recording_location_l1.index(recloc)]
f = f1
else:
ax = axa2[recording_location_l2.index(recloc)]
f = f2
recloc_data = task_data[task_data[split_metric] == recloc]
# Title by recloc
if recloc == 'off':
ax.set_title('outside C-row')
else:
ax.set_title('{} column'.format(recloc))
# Imshow the data sorted by the component
topl = recloc_data[['C1', 'C2', 'C3']].T
# Sort by C1-C3
topl = topl.reindex(
(topl.loc['C1'] - topl.loc['C3']).sort_values().index,
axis=1)
# Imshow
my.plot.imshow(topl.values, ax=ax)
# Lims
ax.set_xticks((0, topl.shape[1] - 1))
ax.set_xticklabels((1, topl.shape[1]))
ax.tick_params(labelsize=12)
ax.set_xlim((-.5, topl.shape[1] - .5))
ax.set_ylim((topl.shape[0] - .5, -.5))
if recloc in ['off', 'C1']:
ax.set_yticks(range(topl.shape[0]))
ax.set_yticklabels(topl.index.values, size=12)
else:
ax.set_yticklabels([])
ax.set_yticks([])
## Pretty
for f, axa in zip([f1, f2], [axa1, axa2]):
# Set color limits
my.plot.harmonize_clim_in_subplots(fig=f, clim=[-1, 1])
# colorbar in the last ax
if f is f2:
cb = f.colorbar(mappable=f.axes[1].images[0], cax=cbar_ax)
ticklabels = [.5, 1, 2]
cb.set_ticks(np.log(ticklabels))
cb.set_ticklabels(ticklabels)
cb.ax.tick_params(labelsize=12)
# Label only the second one
#~ f2.text(.5, .01, 'neurons sorted by whisker preference',
#~ ha='center', va='bottom')
#~ f1.text(.015, .5, 'contacting\nwhisker', ha='center', va='center', rotation=90)
f2.text(.825, .46, 'firing\nrate\ngain', ha='center', va='center')
## Save
f1.savefig('HEATMAP_CONTACT_COEF_BY_RECLOC_{}_top.svg'.format(task))
f1.savefig('HEATMAP_CONTACT_COEF_BY_RECLOC_{}_top.png'.format(task), dpi=300)
f2.savefig('HEATMAP_CONTACT_COEF_BY_RECLOC_{}_bottom.svg'.format(task))
f2.savefig('HEATMAP_CONTACT_COEF_BY_RECLOC_{}_bottom.png'.format(task), dpi=300)
## Stats
stats_filename = 'STATS__HEATMAP_CONTACT_COEF_BY_RECLOC_{}'.format(task)
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons\n'.format(len(data.loc[task])))
fi.write('{} only\n'.format(task))
fi.write('excluding L1 and L6b and neurons without C1-C3 coef\n')
fi.write('using scaled_coef_single to permit comparison\n')
fi.write('splitting based on recording_location (strict)\n')
fi.write('# neurons per recloc:{}\n'.format(
data.loc[task].groupby('recording_location').size().to_string()))
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS:
## Get data
# Dropping neurons missing any whisker (default) loses all 200CR sessions
# because of C3
data = extract_contact_responses_to_each_whisker(
coef_wscale_df,
big_waveform_info_df,
include_task=('detection', 'discrimination',),
)
## Plot
# axes are stratum
# Each axis is vs wic and NS
NS_l = [False, True]
whisker_names = ['C1', 'C2', 'C3']
stratum_l = ['superficial', 'deep']
def index2plot_kwargs(ser):
color = 'b' if ser['NS'] else 'r'
return {'fc': color}
def index2label(ser):
return ser['wic']
def group_index2group_label(NS):
if NS:
return 'inhib'
else:
return 'excit'
## Figure handles
task_l = ['detection', 'discrimination']
f, axa = plt.subplots(
len(task_l),
len(stratum_l),
figsize=(5, 4.3),
sharey='row', sharex=True,
)
f.subplots_adjust(wspace=.4, hspace=.6)
## Iterate over task
for task in task_l:
# Group by stratum
gobj = data.loc[task].groupby('stratum')
# Iterate over stratum (axis)
for stratum, this_data in gobj:
## Slice
try:
ax = axa[
stratum_l.index(stratum),
task_l.index(task),
]
except ValueError:
continue
ax.set_title(stratum)
## Aggregate
agg = this_data.groupby('NS')
meaned = agg[whisker_names].mean().stack()
agg_sem = agg[whisker_names].sem().stack()
meaned.index.names = ['NS', 'wic']
## Plot
my.plot.grouped_bar_plot(
meaned,
yerrlo=(meaned - agg_sem), #agg_err['lo'], #(meaned - agg_sem),
yerrhi=(meaned + agg_sem), #agg_err['hi'], #(meaned + agg_sem),
ax=ax,
index2label=index2label,
index2plot_kwargs=index2plot_kwargs,
group_index2group_label=lambda s: None,
group_name_fig_ypos=-.1,
xtls_kwargs={'size': 12},
)
## Pretty
for ax in axa.flatten():
if ax in axa[0]:
# Superficial
yticklabels = np.array([1, 2])
yticks = np.log(yticklabels)
ylim = np.log([1, 2])
else:
# Deep
yticklabels = np.array([1, 1.3])
yticks = np.log(yticklabels)
ylim = np.log([1, 1.3])
my.plot.despine(ax)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
ax.set_ylim(ylim)
# Label columns by task
f.text(.289, .98, task_l[0], ha='center', va='center')
f.text(.74, .98, task_l[1], ha='center', va='center')
f.text(.025, .5, 'firing rate gain', ha='center', va='center', rotation=90)
## Save
f.savefig(
'BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS.svg')
f.savefig(
'BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS.png', dpi=300)
## Stats
n_by_cell_type = data.reset_index().groupby(
['task', 'stratum', 'NS']).size().unstack('task').T
stats_filename = 'STATS__BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK_STRATUM_AND_NS'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons ({} det., {} disc.)\n'.format(
len(data),
len(data.loc['detection']),
len(data.loc['discrimination']),
))
fi.write('finer-grained N:\n' + n_by_cell_type.to_string() + '\n')
fi.write('error bars: SEM\n')
fi.write('including only neurons for which we have contacts by each whisker\n')
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK:
## Get data
# Dropping neurons missing any whisker (default) loses 181212_200CR and
# 181213_200CR because of C3
data = extract_contact_responses_to_each_whisker(
coef_wscale_df,
big_waveform_info_df,
include_task=('detection', 'discrimination',),
)
## Plot
# axes are stratum
# Each axis is vs wic and NS
NS_l = [False, True]
whisker_names = ['C1', 'C2', 'C3']
stratum_l = ['superficial', 'deep']
def index2plot_kwargs(ser):
return {'fc': 'w', 'ec': 'k'}
def index2label(ser):
return ser['wic']
def group_index2group_label(NS):
if NS:
return 'inhib'
else:
return 'excit'
## Plot handles
task_l = ['detection', 'discrimination']
# Figure handles
#~ f, axa = my.plot.figure_1x2_small(sharey=True, sharex=True)
f, axa = plt.subplots(1, 2, figsize=(5, 3), sharex=True, sharey=True)
f.subplots_adjust(bottom=.2, left=.18, right=.95, wspace=.4, top=.825)
## Aggregate
groupby_l = ['task']
agg = data.groupby(groupby_l)
# Mean and SEM
agg_mean = agg[whisker_names].mean()
agg_err = agg[whisker_names].sem()
agg_mean.columns.name = 'wic'
agg_err.columns.name = 'wic'
## Stats
REPEATED_MEASURES = False
# Extract variables to consider
data_table = data.reset_index()[
['task', 'session', 'neuron', 'C1', 'C2', 'C3', 'NS', 'stratum',
'crow_recording_location']
]
# Shorten this name
data_table = data_table.rename(columns={'crow_recording_location': 'recloc'})
# Construct subject id
data_table['subject'] = [
'{}-{}'.format(session, neuron) for session, neuron in
zip(data_table['session'], data_table['neuron'])]
data_table = data_table.drop(['session', 'neuron'], 1)
if REPEATED_MEASURES:
# Ideally, treat response to each whisker as a repeated measure
# But this requires between-subjects factors not yet implemented
# Stack the whisker
data_table = data_table.set_index(
[col for col in data_table.columns if col not in ['C1', 'C2', 'C3']])
data_table.columns.name = 'whisker'
data_table = data_table.stack().rename('response').reset_index()
else:
# Simplify as C3 - C1
data_table['response'] = data_table['C1'] - data_table['C3']
# Define formula
formula = (
"response ~ task + stratum + NS + "
"C(recloc, levels=['off','C1','C2','C3'])"
)
# Build linear model
lm = ols(formula, data_table).fit()
# Run ANOVA
# This reveals that task, stratum, and recloc are the most important factors
# NS only slightly matters
# discrimination, superficial, and C1 recloc are most associated with C1>C3 effect
aov = anova_lm(lm)
## Simple AnovaRM on whisker, separately by task only
# Extract variables to consider
data_table = data.reset_index()[
['task', 'session', 'neuron', 'C1', 'C2', 'C3']
]
# Construct subject id
data_table['subject'] = [
'{}-{}'.format(session, neuron) for session, neuron in
zip(data_table['session'], data_table['neuron'])]
data_table = data_table.drop(['session', 'neuron'], 1)
# Separate AnovaRM on each task, leaving only whisker
data_table = data_table.set_index(['task', 'subject'])
data_table.columns.name = 'whisker'
data_table = data_table.stack().rename('response')
# Separately for each task
task_posthoc_l = []
task_aov_l = []
for task in task_l:
# Slice data for this task
task_data = data_table.loc[task].reset_index()
# Run AnovaRM on task_data
aov = AnovaRM(
task_data, depvar='response', subject='subject',
within=['whisker'])
res = aov.fit()
# Extract pvalue
aov_pvalue = res.anova_table.loc['whisker', 'Pr > F']
task_aov_l.append((task, aov_pvalue))
# Post-hoc
for w0 in ['C1', 'C2', 'C3']:
for w1 in ['C1', 'C2', 'C3']:
# Avoid double-testing or self-testing
if w0 >= w1:
continue
# Extract data from each whisker
data0 = task_data.loc[task_data['whisker'] == w0, 'response'].values
data1 = task_data.loc[task_data['whisker'] == w1, 'response'].values
# Paired
pvalue = scipy.stats.ttest_rel(data0, data1)[1]
# Store
task_posthoc_l.append((task, w0, w1, pvalue))
# Concat posthoc
# Currently this reveals a barely nonsig anova for detection, and
# a highly sig anova for discrimination. For discrimination, all
# whiskers highly sig. For detection, C2 slightly differs from the others.
# The detection results are so nearing signif that I'm not sure whether
# to rely on them.
task_aov_pvalue = pandas.DataFrame.from_records(
task_aov_l, columns=['task', 'aov_pvalue']).set_index(
'task')['aov_pvalue']
task_posthoc_df = pandas.DataFrame.from_records(
task_posthoc_l, columns=['task', 'w0', 'w1', 'pvalue']).set_index(
['task', 'w0', 'w1'])['pvalue'].sort_index()
## Iterate over task
for task in agg_mean.index:
## Slice by task
topl_mean = agg_mean.loc[task]
topl_err = agg_err.loc[task]
## Get ax
ax = axa[
task_l.index(task),
]
ax.set_title('{}'.format(task), pad=15)
## Plot
my.plot.grouped_bar_plot(
topl_mean,
yerrlo=(topl_mean - topl_err),
yerrhi=(topl_mean + topl_err),
ax=ax,
index2label=index2label,
index2plot_kwargs=index2plot_kwargs,
group_index2group_label=group_index2group_label,
group_name_fig_ypos=-.1,
xtls_kwargs={'size': 12},
)
## Stats
stats_yval = np.log(1.28)
ax.plot([0, 2], [stats_yval] * 2, 'k-', lw=.75)
task_pval = task_aov_pvalue.loc[task]
task_sig_str = my.stats.pvalue_to_significance_string(task_pval)
ax.text(1, stats_yval, task_sig_str, ha='center', va='bottom', size=12)
## Pretty
yticklabels = np.array([1, 1.1, 1.2, 1.3])
yticks = np.log(yticklabels)
ylim = np.log([1, 1.3])
for ax in axa.flatten():
my.plot.despine(ax)
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
ax.set_ylim(ylim)
## Pretty
axa[0].set_ylabel('firing rate gain\n(fold / contact)')
f.text(.55, .05, 'contacting whisker', ha='center', va='center')
## Save
f.savefig('BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK.svg')
f.savefig('BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK.png', dpi=300)
## Stats
stats_filename = 'STATS__BAR_PLOT_CONTACT_COEFS_EACH_WHISKER_BY_TASK'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons ({} det., {} disc.)\n'.format(
len(data),
len(data.loc['detection']),
len(data.loc['discrimination']),
))
fi.write('excluding 200CR\n')
fi.write('error bars: SEM\n')
fi.write('anova by task:\n{}\n'.format(task_aov_pvalue.to_string()))
fi.write('posthoc paired t-test by task:\n{}\n'.format(
task_posthoc_df.to_string()))
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM:
## Get data
# Slice contact_binarized
data = coef_wscale_df.xs('contact_binarized', level='metric').copy()
# Keep only discrimination for now
#~ data = data.loc['discrimination'].copy()
# Results pretty similar if pooled
data = data.droplevel('task')
## Slice coef_metric
# Using the "scaled" metrics puts the results in a per-contact basis
data = data[['scaled_coef_single', 'coef_single_p']]
## Aggregate over whiskers
# Unstack whisker
data = data.unstack('label')
# Drop C0, though since we mean over whiskers, this might be unnecessary
data = data.drop('C0', axis=1, level='label')
data.columns = data.columns.remove_unused_levels()
# Drop neurons missing whiskers
drop_mask = data.isnull().any(1)
if drop_mask.sum() > 0:
print("warning: dropping {} neurons\n".format(drop_mask.sum()))
data = data.loc[~drop_mask]
data.index = data.index.remove_unused_levels()
# Mean response over whiskers
# Could consider maxing over whiskers here, but that only slight
# raises responses and produces some annoying outliers
meaned_responses = data['scaled_coef_single'].mean(axis=1)
# Adjust p-value over whiskers
# Use Bonferroni to control FWER
adjusted_pvalues_l = []
for row in data['coef_single_p'].values:
adjusted_pvalues_l.append(
statsmodels.stats.multitest.multipletests(
row, method='bonferroni')[1])
adjusted_pvalues = pandas.DataFrame(
adjusted_pvalues_l,
index=data['coef_single_p'].index,
columns=data['coef_single_p'].columns,
)
# Min p-value over whiskers
adjusted_pvalues = adjusted_pvalues.min(1)
# Concat response and pvalue
data = pandas.concat([
meaned_responses,
adjusted_pvalues,
], keys=['scaled_coef_single', 'coef_single_p'], axis=1,
names=['coef_metric'])
## Scaling
# Convert gain to delta Hz
data['scaled_coef_single_hz'] = np.exp(
data['scaled_coef_single']).mul(
FR_overall.loc[data.index]).sub(
FR_overall.loc[data.index])
# Assess sign and significance
data['positive'] = data['scaled_coef_single'] > 0
data['signif'] = data['coef_single_p'] < .05
data['pos_sig'] = data['positive'] & data['signif']
data['neg_sig'] = ~data['positive'] & data['signif']
## Join on metadata
data = data.join(
big_waveform_info_df[['stratum', 'NS', 'layer', 'Z_corrected']],
on=['session', 'neuron'])
## Aggregate over neurons within stratum * NS
# coef_metric to aggregate
coef_metric_l = [
'scaled_coef_single', 'scaled_coef_single_hz', 'pos_sig', 'neg_sig',
]
# Aggregate
agg_res = extras.aggregate_coefs_over_neurons(data, coef_metric_l)
## Bar plot the effect size
for effect_sz_col in ['scaled_coef_single']:
# Figure handle
f, ax = my.plot.figure_1x1_small()
# Grouped bar plot
my.plot.grouped_bar_plot(
df=agg_res['mean'][effect_sz_col],
index2plot_kwargs=extras.index2plot_kwargs__NS2color,
yerrlo=agg_res['lo'][effect_sz_col],
yerrhi=agg_res['hi'][effect_sz_col],
ax=ax,
group_name_fig_ypos=.175,
elinewidth=1.5,
)
# Pretty
my.plot.despine(ax)
ax.set_xticks([])
# Set the ylim in either firing rate gain or spikes
if effect_sz_col == 'scaled_coef_single':
# Deal with log-scale on yaxis
coef_ticklabels = np.array([1, 1.2, 1.4])
coef_ticks = np.log(coef_ticklabels)
ax.set_ylim(np.log((1, 1.5)))
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
ax.set_ylabel('firing rate gain\n(fold change / contact)')
elif effect_sz_col == 'scaled_coef_single_hz':
# Not a log scale
coef_ticklabels = np.array([0, 3, 6])
coef_ticks = coef_ticklabels
ax.set_ylim([0, 6])
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
ax.set_ylabel('evoked firing rate\n(Hz / contact)')
else:
ax.set_ylabel(effect_sz_col)
# Save
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_'
'BY_NS_AND_STRATUM_{}.svg'.format(effect_sz_col)
)
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_'
'BY_NS_AND_STRATUM_{}.png'.format(effect_sz_col), dpi=300,
)
## Pie chart the fraction significant
# Extract pos_sig and neg_sig
mfracsig = agg_res['mean'][['pos_sig', 'neg_sig']]
# Pie chart
f, ax = my.plot.figure_1x1_small()
extras.horizontal_bar_pie_chart_signif(mfracsig, ax=ax)
# Save
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_fracsig.svg')
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_fracsig.png', dpi=300)
## Stats on fracsig
# Only do stats on this effect_sz_col
effect_sz_col = 'scaled_coef_single'
stats_filename = (
'STATS__PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_'
'MEAN_BY_NS_AND_STRATUM'
)
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('n = {} neurons total\n'.format(len(data)))
fi.write('pooling across tasks because similar\n')
fi.write('dropped C0; meaned response over whiskers; '
'bonferroni-adjusted p-values and min over whiskers\n')
fi.write('error bars: 95% CIs from bootstrapping\n')
for (stratum, NS) in agg_res['mean'].index:
n_neurons = agg_res['agg'].size().loc[(stratum, NS)]
frac_pos_sig = agg_res['mean'].loc[(stratum, NS), 'pos_sig']
frac_neg_sig = agg_res['mean'].loc[(stratum, NS), 'neg_sig']
effect_mean = agg_res['mean'].loc[(stratum, NS), effect_sz_col]
# Currently this is from bootstrapping
# Make sure it changes in sync with the above
effect_errlo = agg_res['lo'].loc[(stratum, NS), effect_sz_col]
effect_errhi = agg_res['hi'].loc[(stratum, NS), effect_sz_col]
# Dump
fi.write('{} {}\n'.format(stratum, 'NS' if NS else 'RS'))
fi.write('effect_sz_col: {}\n'.format(effect_sz_col))
fi.write('n = {} neurons\n'.format(n_neurons))
fi.write(
'effect of contacts: mean {:.3f}, CI {:.3f} - {:.3f}\n'.format(
effect_mean,
effect_errlo,
effect_errhi,
))
fi.write(
'effect of contacts: exp(mean) {:.3f}, exp(CI) {:.3f} - {:.3f}\n'.format(
np.exp(effect_mean),
np.exp(effect_errlo),
np.exp(effect_errhi),
))
fi.write('frac pos sig: {} / {} = {:.4f}\n'.format(
int(n_neurons * frac_pos_sig),
n_neurons,
frac_pos_sig,
))
fi.write('\n')
# Print
with open(stats_filename) as fi:
print(''.join(fi.readlines()))
## Plot versus depth
for effect_sz_col in ['scaled_coef_single', 'scaled_coef_single_hz']:
# Make figure handle
f, ax = my.plot.figure_1x1_small()
# These will be yaxis
if effect_sz_col == 'scaled_coef_single':
ylim = np.log((.5, 3.5))
coef_ticklabels = np.array([.5, 1, 2])
coef_ticks = np.log(coef_ticklabels)
else:
ylim = (-5, 25)
coef_ticklabels = np.array([0, 10, 20])
coef_ticks = coef_ticklabels
# Plot
my.plot.smooth_and_plot_versus_depth(
data, effect_sz_col, ax=ax, layer_boundaries_ylim=ylim)
# Set y-axis
ax.set_yticks(coef_ticks)
ax.set_yticklabels(coef_ticklabels)
ax.set_ylim(ylim)
if effect_sz_col == 'scaled_coef_single':
ax.set_ylabel('firing rate gain\n(fold / contact)'.format(chr(176)))
else:
ax.set_ylabel('{} firing rate\n(Hz / contact)'.format(DELTA, chr(176)))
# Plot unity line
xlim = ax.get_xlim()
ax.plot(xlim, [0, 0], 'k-', lw=.8)
ax.set_xlim(xlim)
## Save
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_vdepth_{}.svg'.format(effect_sz_col))
f.savefig(
'PLOT_DISCRIMINATION_CONTACTS_FRACSIG_AND_MEAN_BY_NS_AND_STRATUM'
'_vdepth_{}.png'.format(effect_sz_col), dpi=300)
if BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM:
# Bar plot of contact response to each whisker, by task * stratum * recloc
# Combine over NS since similar
# Will show that detection has decent topography, but discrimination
# has strong C1 bias
## Get data
data = extract_contact_responses_to_each_whisker(
coef_wscale_df, big_waveform_info_df,
include_task=('detection', 'discrimination',),
)
## Group
whisker_l = ['C1', 'C2', 'C3']
grouping_keys = ['task', 'recording_location', 'stratum']
agg = data.groupby(grouping_keys)[whisker_l]
# Mean and sem
agg_mean = agg.mean()
agg_err = agg.sem()
agg_mean.columns.name = 'whisker'
agg_err.columns.name = 'whisker'
# Stack whisker
agg_mean = agg_mean.stack()
agg_err = agg_err.stack()
## Plot
# Rows: stratum. Cols: task.
# Axes: recording location * whisker in contact
#f, ax = my.plot.figure_1x1_standard()
stratum_l = ['superficial', 'deep']
task_l = ['detection', 'discrimination']
f, axa = plt.subplots(
len(stratum_l),
len(task_l),
figsize=(6.5, 4.75),
sharex=True, sharey=True,
)
f.subplots_adjust(left=.125, right=.95, bottom=.15, top=.94, hspace=.8, wspace=.2)
def index2plot_kwargs(idx):
if idx['whisker'] == 'C1':
fc = 'b'
elif idx['whisker'] == 'C2':
fc = 'g'
elif idx['whisker'] == 'C3':
fc = 'r'
else:
fc = 'white'
if idx['recording_location'] == idx['whisker']:
alpha = 1
else:
alpha = .25
ec = 'k'
return {'fc': fc, 'ec': ec, 'alpha': alpha}
def group_index2group_label(recloc):
return {'C1': 'C1\ncolumn', 'C2': 'C2\ncolumn', 'C3': 'C3\ncolumn',
'off': 'off-\ntarget'}[recloc]
## Iterate over task and stratum (axes)
for task in task_l:
for stratum in stratum_l:
# Get ax
ax = axa[
stratum_l.index(stratum),
task_l.index(task),
]
# Title ax
ax.set_title('{} ({} layers)'.format(task, stratum))
# Slice data
topl = agg_mean.loc[task].xs(stratum, level='stratum')
topl_err = agg_err.loc[task].xs(stratum, level='stratum')
# Plot handles
if ax in axa[-1]:
this_group_index2group_label = group_index2group_label
group_name_fig_ypos = .09
else:
this_group_index2group_label = group_index2group_label
group_name_fig_ypos = .585
# Plot
my.plot.grouped_bar_plot(
topl,
index2plot_kwargs=index2plot_kwargs,
yerrlo=(topl - topl_err),
yerrhi=(topl + topl_err),
group_index2group_label=this_group_index2group_label,
group_name_kwargs={'size': 12},
ax=ax,
group_name_fig_ypos=group_name_fig_ypos,
)
## Pretty
# Legend
f.text(.97, .92, 'C1 contact', ha='center', va='center', size=12, color='b')
f.text(.97, .88, 'C2 contact', ha='center', va='center', size=12, color='g')
f.text(.97, .84, 'C3 contact', ha='center', va='center', size=12, color='r')
# Pretty each ax
for ax in axa.flatten():
# Despine
my.plot.despine(ax)
# Line at zero
xlim = ax.get_xlim()
ax.plot(xlim, [0, 0], 'k-', lw=.75)
ax.set_xlim(xlim)
# Limits and ticks
yticklabels = np.array([1, 1.5, 2])
ylim = np.array([.9, 2])
ax.set_yticks(np.log(yticklabels))
ax.set_yticklabels(yticklabels)
ax.set_ylim(np.log(ylim))
# Labels
if ax in axa[:, 0]:
ax.set_ylabel('firing rate gain')
if ax in axa[-1]:
ax.set_xlabel('recording location', labelpad=30)
## Save
f.savefig(
'BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM.svg')
f.savefig(
'BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM.png', dpi=300)
## Stats
stats_filename = 'STATS__BAR_PLOT_DISCRIMINATION_CONTACT_COEFS_EACH_WHISKER_BY_TASK_RECLOC_AND_STRATUM'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('including all neurons with contact coefficients\n')
fi.write('\nN=...\n')
fi.write(agg.size().unstack('task').to_string() + '\n\n')
fi.write(agg.size().unstack('task').sum().to_string() + '\n\n')
fi.write('error bars: SEM\n')
with open(stats_filename, 'r') as fi:
lines = fi.readlines()
print(''.join(lines))
if PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY:
## Get data
# Pool across tasks because largely similar
data = extract_contact_responses_to_each_whisker(
coef_wscale_df, big_waveform_info_df,
include_task=('discrimination', 'detection',),
)
## Summarize
data['bestresp'] = data[['C1', 'C2', 'C3']].max(1)
data['selectivity'] = (
data[['C1', 'C2', 'C3']].max(1) - data[['C1', 'C2', 'C3']].min(1))
## Summarize
gobj = data.groupby(['NS', 'stratum'])
rec_l = []
rec_keys_l = []
for group_key, sub_data in gobj:
m_bestresp = sub_data['bestresp'].mean()
m_selectivity = sub_data['selectivity'].mean()
ci_bestresp = my.bootstrap.simple_bootstrap(
sub_data['bestresp'].values)[2]
ci_selectivity = my.bootstrap.simple_bootstrap(
sub_data['selectivity'].values)[2]
rec_l.append({
'm_bestresp': m_bestresp,
'm_selectivity': m_selectivity,
'ci_lo_bestresp': ci_bestresp[0],
'ci_hi_bestresp': ci_bestresp[1],
'ci_lo_selectivity': ci_selectivity[0],
'ci_hi_selectivity': ci_selectivity[1],
'NS': group_key[0],
'stratum': group_key[1]
})
parameterized = pandas.DataFrame.from_records(rec_l).set_index(
['stratum', 'NS']).sort_index()
# Reorder
parameterized = parameterized.reindex(['superficial', 'deep'], level=0)
## Plot
f, ax = my.plot.figure_1x1_small()
my.plot.grouped_bar_plot(
parameterized['m_bestresp'],
yerrlo=parameterized['ci_lo_bestresp'],
yerrhi=parameterized['ci_hi_bestresp'],
index2label=extras.index2label__stratum_NS,
group_index2group_label=extras.group_index2group_label__stratum_NS,
index2plot_kwargs=extras.index2plot_kwargs__stratum_NS,
ax=ax)
ax.set_title('best whisker')
ax.set_ylabel('firing rate gain')
my.plot.despine(ax)
yticklabels = (1, 2)
ax.set_yticks(np.log(yticklabels))
ax.set_yticklabels(yticklabels)
f.savefig('PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_strength.svg')
f.savefig('PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_strength.png', dpi=300)
f, ax = my.plot.figure_1x1_small()
my.plot.grouped_bar_plot(
parameterized['m_selectivity'],
yerrlo=parameterized['ci_lo_selectivity'],
yerrhi=parameterized['ci_hi_selectivity'],
index2label=extras.index2label__stratum_NS,
group_index2group_label=extras.group_index2group_label__stratum_NS,
index2plot_kwargs=extras.index2plot_kwargs__stratum_NS,
ax=ax)
ax.set_title('selectivity')
ax.set_ylabel('best / worst whisker')
my.plot.despine(ax)
yticklabels = (1, 2)
ax.set_yticks(np.log(yticklabels))
ax.set_yticklabels(yticklabels)
f.savefig('PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_selectivity.svg')
f.savefig('PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY_selectivity.png', dpi=300)
## Stats
n_by_task = data.reset_index().groupby('task').size()
n_by_cell_type = data.reset_index().groupby(
['task', 'stratum', 'NS']).size().unstack('task').T
stats_filename = 'STATS__PLOT_CONTACT_COEF_STRENGTH_AND_SELECTIVITY'
with open(stats_filename, 'w') as fi:
fi.write(stats_filename + '\n')
fi.write('# neurons by task:\n' + n_by_task.to_string() + '\n\n')
fi.write('# neurons by task, stratum, NS:\n' + n_by_cell_type.to_string() + '\n\n')
fi.write('error bars: 95% bootstrapped CIs\n')
fi.write('including only neurons for which we have contacts by each whisker\n')
fi.write('pooling over tasks because similar\n')
with open(stats_filename) as fi:
lines = fi.readlines()
print(''.join(lines))
plt.show()
| StarcoderdataPython |
1672152 | import faceDet.ViolaJones.Regions as region
import numpy as np
class WeakClassifier:
def __init__(self, positive_regions, negative_regions, threshold, polarity):
"""
This is the actual feature which can also be called a weak classifier.
:param positive_regions: positively contributing region
:param negative_regions: negatively contributing region
:param threshold: threshold
:param polarity: polarity 1 or -1
"""
self.positive_regions = positive_regions
self.negative_regions = negative_regions
self.threshold = threshold
self.polarity = polarity
def classify(self, x):
"""
Classifies an integral image based on a feature f and the classifiers threshold and polarity
:param x: the integral image
:return:
1 if polarity * feature(x) < polarity * threshold
0 otherwise
"""
return 1 if self.polarity * self.computeVal(x) < self.polarity * self.threshold else 0
def computeVal(self, x):
"""
Computes the feature value through pos, neg rectangles provided
:param x: the integral image
:return: value of the feature by summing positive and subtracting negative region
"""
return sum([pos.computeScore(x) for pos in self.positive_regions]) - sum(
[neg.computeScore(x) for neg in self.negative_regions])
def __str__(self):
return "Feature(WeakClassifier): (threshold=%d, polarity=%d, %s, %s" % (
self.threshold, self.polarity, str(self.positive_regions), str(self.negative_regions))
def computeFeatures(frameSize):
"""
Builds all possible modified features in frameSize
:param frameSize: a tuple of form (height, width)
:return: an array of tuples. Each tuple's first element is an array of the rectangle regions which positively contribute to the feature.
The second element is an array of rectangle regions negatively contributing to the feature
"""
height, width = frameSize
features = []
for w in range(1, width + 1, 2): # width, height are the frame values 53
for h in range(1, height + 1, 2): # i, j are the positions; w, h are the width and height of feature
i = 0
while i + w < width:
j = 0
while j + h < height:
# 2 rectangle features
immediate = region.RectangleRegion(i, j, w, h)
right = region.RectangleRegion(i + w, j, w, h)
if i + 2 * w < width: # Horizontally Adjacent
features.append(([immediate], [right])) # positive, negative region to consider
bottom = region.RectangleRegion(i, j + h, w, h)
if j + 2 * h < height: # Vertically Adjacent
features.append(([immediate], [bottom]))
right_2 = region.RectangleRegion(i + 2 * w, j, w, h)
# 3 rectangle features
if i + 3 * w < width: # Horizontally Adjacent
features.append(([right], [right_2, immediate]))
bottom_2 = region.RectangleRegion(i, j + 2 * h, w, h)
if j + 3 * h < height: # Vertically Adjacent
features.append(([bottom], [bottom_2, immediate]))
# 4 rectangle features
bottom_right = region.RectangleRegion(i + w, j + h, w, h)
if i + 2 * w < width and j + 2 * h < height:
features.append(([right, bottom], [immediate, bottom_right]))
j += 2
i += 2
print("Computed %d features" % (len(features)))
return np.array(features)
def apply_features(features, training_data):
"""
:param features:
An array of tuples [(positive), (negative)].
:param training_data: Array of tuples [(intergalImage, classificationValue)].
:return:
X: A numpy array of shape (len(features), len(training_data)). Each row represents the value of a single feature for each training example
y: A numpy array of shape len(training_data). y = training_data[1]
"""
X = np.zeros((len(features), len(training_data)))
y = np.array(list(map(lambda data: data[1], training_data))) # y is only the actual classification of images
i = 0
for positive_regions, negative_regions in features: # apply same feature to all images, repeat for all features
# if i%100 == 0: print("Applied %d" % i) # num of features applied
feature = lambda ii: sum([pos.computeScore(ii) for pos in positive_regions]) - sum(
[neg.computeScore(ii) for neg in negative_regions])
# data[0] is training data, feature(data[0]) is where the training data is applied thorough map
# provide training data to feature function above
X[i] = list(map(lambda data: feature(data[0]), training_data))
i += 1
return X, y | StarcoderdataPython |
1706816 | <gh_stars>0
from django.conf.urls import url, patterns, include
from ginger.conf.urls import scan
from . import views
urlpatterns = scan(views) + patterns("", url("", include("django.contrib.auth.urls")),) | StarcoderdataPython |
3241713 | <filename>prtgrestcli/enums.py<gh_stars>0
from enum import Enum
class PrtgErrorCodes(Enum):
OK = 0
WARNING = 1
SYSTEM_ERROR = 2
PROTOCOL_ERROR = 3
CONTENT_ERROR = 4
def __str__(self):
return str(self.value)
class PrtgUnits(Enum):
BytesBandwidth = "BytesBandwidth"
BytesMemory = "BytesMemory"
BytesDisk = "BytesDisk"
Temperature = "Temperature"
Percent = "Percent"
TimeResponse = "TimeResponse"
TimeSeconds = "TimeSeconds"
Custom = "Custom"
Count = "Count"
CPU = "CPU (*)"
BytesFile = "BytesFile"
SpeedDisk = "SpeedDisk"
SpeedNet = "SpeedNet"
TimeHours = "TimeHours"
def __str__(self):
return str(self.value)
class PrtgSizes(Enum):
One = "One"
Kilo = "Kilo"
Mega = "Mega"
Giga = "Giga"
Tera = "Tera"
Byte = "Byte"
KiloByte = "KiloByte"
MegaByte = "MegaByte"
GigaByte = "GigaByte"
TeraByte = "TeraByte"
Bit = "Bit"
KiloBit = "KiloBit"
MegaBit = "MegaBit"
GigaBit = "GigaBit"
TeraBit = "TeraBit"
def __str__(self):
return str(self.value)
class PrtgSpeedTimes(Enum):
Second = "Second"
Minute = "Minute"
Hour = "Hour"
Day = "Day"
def __str__(self):
return str(self.value)
class PrtgModes(Enum):
Absolute = "Absolute"
Difference = "Difference"
def __str__(self):
return str(self.value)
class PrtgDecimalModes(Enum):
Auto = "Auto"
All = "All"
def __str__(self):
return str(self.value)
| StarcoderdataPython |
1682329 | <gh_stars>0
import argparse
import shlex
import subprocess
import itertools
import ipaddress
import time
NODE_0='172.16.58.3'
NODE_1='10.1.1.2'
BRIDGE='br0'
# Syntax: python connect_container.py -B br0 -N click0 -D 3
def attach_container(bridge, container_name):
interfaces=('eth0', 'eth1')
for interface in interfaces:
cmd = '/usr/bin/sudo /usr/bin/ovs-docker add-port {} {} {}'
cmd = cmd.format(bridge, interface, container_name)
subprocess.check_call(shlex.split(cmd))
def find_container_ports(bridge, container_name):
interfaces=('eth0', 'eth1')
of_ports = []
for interface in interfaces:
cmd='/usr/bin/sudo /usr/bin/ovs-vsctl --data=bare --no-heading \
--columns=name find interface external_ids:container_id={} \
external_ids:container_iface={}'
cmd = cmd.format(container_name, interface)
ovs_port = subprocess.check_output(cmd, shell=True)
ovs_port = ovs_port.strip()
cmd='/usr/bin/sudo /usr/bin/ovs-ofctl show {} | grep {}'
cmd=cmd.format(bridge, ovs_port) + " | awk -F '(' '{ print $1 }'"
of_port = subprocess.check_output(cmd, shell=True)
of_port= of_port.strip()
of_ports.append(of_port)
return of_ports
def pairwise(iterable):
's -> (s0, s1), (s2, s3), s4, s5), ...'
a = iter(iterable)
return itertools.izip(a,a)
def connect_container(bridge, client_ip, server_ip, container_name):
interfaces=('eth0', 'eth1')
of_ports = find_container_ports(bridge, container_name)
of_ports = [1] + of_ports + [2]
# Connect client to server (direction = 1 (only client to server) or 3)
for in_port,out_port in pairwise(of_ports):
cmd='/usr/bin/sudo /usr/bin/ovs-ofctl add-flow {} "priority=100 ip in_port={} nw_src={} nw_dst={} actions=output:{}"'
cmd=cmd.format(bridge, in_port, client_ip, server_ip, out_port)
subprocess.check_call(shlex.split(cmd))
# Connect server to client (direction=2 (only server to client) or 3)
for in_port,out_port in pairwise(reversed(of_ports)):
cmd='/usr/bin/sudo /usr/bin/ovs-ofctl add-flow {} "priority=100 ip in_port={} nw_src={} nw_dst={} actions=output:{}"'
cmd=cmd.format(bridge, in_port, server_ip, client_ip, out_port)
subprocess.check_call(shlex.split(cmd))
def connect_container_dummy(container_name):
cmd='/usr/bin/sudo docker network connect network2 {}'
cmd=cmd.format(container_name)
subprocess.check_call(shlex.split(cmd))
def start_containers(container, name):
cmd='/usr/bin/sudo /usr/bin/docker run -itd --rm --network=none --privileged --name {} {}'
cmd=cmd.format(name, container)
subprocess.check_call(shlex.split(cmd))
def get_names(number):
list=['test{}'.format(i) for i in range(number)]
return list
def get_ip_range(base_ip, num):
try:
base_ip = ipaddress.ip_address(unicode(base_ip))
except:
print('Invalid ip address: {}'.format(base_ip))
sys.exit(1)
ips = [base_ip + i for i in range(num)]
return ips
def main():
parser=argparse.ArgumentParser(description='Connect container to vswitch')
parser.add_argument('--container', '-C', required=True, type=str)
# parser.add_argument('--name', '-N', required=True, type=str)
parser.add_argument('--instances', '-n', required=True, type=int)
args=parser.parse_args()
name_list = []
client_ips = []
server_ips = []
name_list = get_names(args.instances)
client_ips = get_ip_range(NODE_0, args.instances)
server_ips = get_ip_range(NODE_1, args.instances)
# interval in seconds
interval = 30
# start the lsof cmd in the background
cmd='sudo lsof -r {} -F 0 > {} &'
cmd.format(interval, output_filename)
subprocess.check_call(shlex.split(cmd))
start_time = int(time.time())
for i in range(0, len(name_list)):
while int(time.time()) <= (start_time + (i+1)*interval):
sleep(2)
start_containers(args.container, name_list[i])
connect_container_dummy(name_list[i])
if __name__ == '__main__':
main()
| StarcoderdataPython |
1607037 | # creates: h2.emt.traj
from ase import Atoms
from ase.calculators.emt import EMT
from ase.optimize import QuasiNewton
system = Atoms('H2', positions=[[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
calc = EMT()
system.calc = calc
opt = QuasiNewton(system, trajectory='h2.emt.traj')
opt.run(fmax=0.05) | StarcoderdataPython |
1616085 | <gh_stars>0
#
# See ../LICENSE
#
# This file is a configuration example for a MyISAM table with cities, considering Deleted and Non deleted as distinct objects.
#
#
import config_base
import field_isam as isam
import field_inno_antelope as inno
import scanner_shared
import validators
validate_pop = validators.make_validator_int_range(1, 1.0*10001000*1000)
def validate_cc(f):
if validators.validate_null(f):
return 0.0
s = f.s
if all(map(str.isupper, s)):
return 1.0
return 0.0
def validate_isamdel(f, n):
d = map(ord, f.get_raw_data())
if d[0] == 0:
return 1.0
return 0.0
def validate_isamnotdel(f, n):
return 1.0 - validate_isamdel(f, n)
_validate_ID = validators.make_validator_int_range(1, 10000)
def validate_id(f,n):
return _validate_ID(f)
def validate_city(f):
s = f.s
if s.rstrip().count(' ') > 7 :
return 0.0
return 1.0
def validator(entry):
score = validate_cc(entry["CountryCode"])
score += validate_city(entry["Name"])
score += validate_pop(entry["Population"])
return score
scanner_settings = config_base.base_config()
scanner_settings["row_format"] = [
scanner_shared.RowFormat("Not deleted City", [
{ "name":"ignoreme", "type": scanner_shared.Null}, #HEADER
{ "name": "header", "type": inno.Noise, "min_len": 1, "max_len":1, "validator": validate_isamnotdel, "min_validation": 0.5},
{ "name": "ID", "type": isam.Int, "signed": True, "validator": validate_id, "min_validation": 0.5},
{ "name": "Name", "type": isam.CharFixed, "char_length": 35},
{ "name": "CountryCode", "type": isam.CharFixed, "char_length": 3},
{ "name": "District", "type": isam.CharFixed, "char_length": 20},
{ "name": "Population", "type": isam.Int, "signed":True},
]),
scanner_shared.RowFormat("Deleted City", [
{ "name":"ignoreme", "type": scanner_shared.Null}, #HEADER
{ "name": "header", "type": inno.Noise, "min_len": 7, "max_len":7, "validator": validate_isamdel, "min_validation": 0.5},
{ "name": "ID", "type": scanner_shared.Null},
{ "name": "Name", "type": isam.CharFixed, "char_length": 33},
{ "name": "CountryCode", "type": isam.CharFixed, "char_length": 3},
{ "name": "District", "type": isam.CharFixed, "char_length": 20},
{ "name": "Population", "type": isam.Int, "signed":True},
]),
]
scanner_settings["filename"] = '../datafiles/dumps/isam_city.MYD'
scanner_settings["everybytemode"] = True
scanner_settings["row_validator"] = validator
scanner_settings["accept_score"] = lambda x: x > 2.9
| StarcoderdataPython |
3363681 | <reponame>janiszewskibartlomiej/own_framework_for_e2e_tests
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from page_objects.base_page import BasePage
from utils.automation_functions import (
data_loader,
save_to_json,
get_path_to_file_in_test_reports_by_current_date,
)
from utils.paths_builder import data_folder_path
class Sitemap(BasePage):
ALL_HREF_WITHOUT_BASE_LOCATOR = (By.CSS_SELECTOR, "a[href]:not([href='\/'])")
ACTIVE_ARROW_NEXT_PAGE_LOCATOR = (
By.CSS_SELECTOR,
"button[aria-label='Next page']:not([disabled])",
)
SPINNER_BORDER_LOCATOR = (By.CSS_SELECTOR, ".MuiPaper-root div.spinner-border")
def __init__(self, driver):
super().__init__(driver)
self.driver: WebDriver = driver
@property
def href_list(self) -> list:
all_elements = self.get_all_elements(by_locator=self.ALL_HREF_WITHOUT_BASE_LOCATOR)
return [
el.get_attribute("href")
for el in all_elements
if os.environ.get("REACT_BASE_URL").split(".")[-2] in el.get_attribute("href")
and "http" in el.get_attribute("href")
]
def click_on_active_arrow_next_page(self) -> None:
if self.element_is_on_a_page(by_locator=self.ACTIVE_ARROW_NEXT_PAGE_LOCATOR):
self.scroll_to_invisible_element(target=self.ACTIVE_ARROW_NEXT_PAGE_LOCATOR)
self.click_on(by_locator=self.ACTIVE_ARROW_NEXT_PAGE_LOCATOR)
self.element_is_not_visible(by_locator=self.SPINNER_BORDER_LOCATOR)
def get_new_sitemap_from_endpoints(self, all_endpoints) -> list:
endpoints_list = []
urls = data_loader(test_name="endpoints_from_home_page")
for url in urls["sitemap"]:
self.driver.get(url=url)
self.element_is_not_visible(by_locator=self.SPINNER_BORDER_LOCATOR)
endpoints_list.extend(self.href_list)
if all_endpoints:
while self.element_is_on_a_page(by_locator=self.ACTIVE_ARROW_NEXT_PAGE_LOCATOR):
self.click_on_active_arrow_next_page()
endpoints_list.extend(self.href_list)
return endpoints_list
@staticmethod
def save_sitemap_to_json(data: dict, name: str) -> str:
target = f"{data_folder_path()}{os.sep}{name}.json"
save_to_json(data=data, path=target)
return target
@staticmethod
def save_console_logs_to_json(data: dict) -> str:
path = get_path_to_file_in_test_reports_by_current_date(
name=f"console_logs_{int(time.time() * 1000)}.json"
)
save_to_json(data=data, path=path)
return path
| StarcoderdataPython |
85891 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:40:08 2020
plot composite plots sea level & barotropic currents for ROMS sensitivity experiments with uniform wind speed change and closed english channel,
and difference in responses w.r.t. same experiments with an open english channel
@author: thermans
"""
import xarray as xr
import matplotlib.ticker as mticker
import matplotlib.pyplot as plt
import os
import numpy as np
import fnmatch
import cmocean
import cartopy.crs as ccrs
plt.close('all')
out_dir = '/Users/thermans/Documents/PhD/Phase4_seasonal/Figures' #where to store the figure
#initialize figure
my_cmap = cmocean.cm.balance
my_cmap.set_bad('grey')
fig=plt.figure(figsize=(6, 7.5))
gs = fig.add_gridspec(2,2)
gs.update(hspace = .1,wspace=.2,bottom=0.1,top=.98)
plot_titles = ['(a) Exp_SW_cc, DJF','(b) Exp_NE_cc, JJA','(c) Closed minus open','(d) Closed minus open']
exps_dir = '/Users/thermans/Documents/Modeling/ROMS/northsea8/'
exps = ['swWindUniform_sq2ms_chclosed','neWindUniform_sq2ms_chclosed','swWindUniform_sq2ms','neWindUniform_sq2ms'] #experiments to plot
#reference experiments (with open and closed channels)
org_dir = '/Users/thermans/Documents/Modeling/ROMS/northsea8/'
org_exps = ['Exp70_1993_1995_era5_gl12v1_v3e10_rx013_chclosed','Exp70_1993_1995_era5_gl12v1_v3e10_rx013_chclosed',
'Exp39_1993_2018_era5_gl12v1_v3e10_rx013','Exp39_1993_2018_era5_gl12v1_v3e10_rx013']
bathy = xr.open_dataset('/Users/thermans/Documents/Modeling/ROMS/preprocessing/bathymetry/etopo1_bedrock_bigNorthSea1.nc')
for e,exp in enumerate(exps):
dns = fnmatch.filter(os.listdir(exps_dir),"*"+exp)
ds = xr.open_dataset(os.path.join(exps_dir,dns[0],'NorthSea8_avg_timeseries_monthly.nc'))
org_ds = xr.open_dataset(os.path.join(org_dir,org_exps[e],'NorthSea8_avg_timeseries_monthly.nc'))
if np.mod(e,2)==0: #select months from desired season
season='DJF'
else:
season='JJA'
if season == 'DJF':
time_i = [1,11,12,13,23,24,25,35]
elif season == 'MAM':
time_i = [2,3,4,14,15,16,26,27,28]
elif season == 'JJA':
time_i = [5,6,7,17,18,19,29,30,31]
elif season == 'SON':
time_i = [8,9,10,20,21,22,32,33,34]
diff_zeta = (ds-org_ds).zeta.isel(ocean_time=time_i).mean(dim='ocean_time') #calculate diff w.r.t. reference
diff_vbar = (ds-org_ds).vbar.isel(ocean_time=time_i).mean(dim='ocean_time')
diff_ubar = (ds-org_ds).ubar.isel(ocean_time=time_i).mean(dim='ocean_time')
#interpolate u,v points to rho points
diff_vbar_rho = np.empty((218,242))
diff_vbar_rho[:] = np.nan
diff_vbar_rho[1:-1,:] = (diff_vbar[0:-1,:] + diff_vbar[1:,:])/2
diff_ubar_rho = np.empty((218,242))
diff_ubar_rho[:] = np.nan
diff_ubar_rho[:,1:-1] = (diff_ubar[:,0:-1] + diff_ubar[:,1:])/2
if e==0: #store outcomes for closed channel to subtract outcomes from open channel
diff_zeta_sw_cc = diff_zeta
diff_ubar_rho_sw_cc = diff_ubar_rho
diff_vbar_rho_sw_cc = diff_vbar_rho
elif e==1:
diff_zeta_ne_cc = diff_zeta
diff_ubar_rho_ne_cc = diff_ubar_rho
diff_vbar_rho_ne_cc = diff_vbar_rho
if e<2:
ax = fig.add_subplot(gs[0,e], projection=ccrs.Orthographic(0, 50))
else:
ax = fig.add_subplot(gs[1,e-2], projection=ccrs.Orthographic(0, 50))
sl = 100*diff_zeta
u = diff_ubar_rho[::3,::3]
v = diff_vbar_rho[::3,::3]
if e==2:
sl = 100*diff_zeta_sw_cc - sl
u = diff_ubar_rho_sw_cc[::3,::3]-u
v = diff_vbar_rho_sw_cc[::3,::3]-v
elif e==3:
sl = 100*diff_zeta_ne_cc - sl
u = diff_ubar_rho_ne_cc[::3,::3]-u
v = diff_vbar_rho_ne_cc[::3,::3]-v
if e>1:
im=sl.plot.pcolormesh("lon_rho", "lat_rho", ax=ax,vmin=-4,vmax=4,cmap=my_cmap,label='dSSH [m]',transform=ccrs.PlateCarree(),add_colorbar=False,rasterized=True)
else:
im=sl.plot.pcolormesh("lon_rho", "lat_rho", ax=ax,vmin=-12,vmax=12,cmap=my_cmap,label='dSSH [m]',transform=ccrs.PlateCarree(),add_colorbar=False,rasterized=True)
bathy.Band1.plot.contour(transform=ccrs.PlateCarree(),ax=ax,levels=[-200],colors=['white'],add_colorbar=False)
plt.scatter(1.55,51.05,edgecolor='lightgreen',facecolor='none',transform=ccrs.PlateCarree(),marker='o',s=250,linewidth=1.5,zorder=5) #mark closed channel with green circle
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,linewidth=1, color='lightgrey', alpha=0, linestyle='-')
gl.xlocator = mticker.FixedLocator([-5, 0, 5])
gl.top_labels = gl.right_labels = gl.left_labels = gl.bottom_labels = False
if np.mod(e,2)==0:
gl.left_labels = True #don't label top and right axes
if e>1:
gl.bottom_labels = True
gl.xlabel_style = {'color': 'black','rotation':0}
gl.ylabel_style = {'color': 'black','rotation':0}
ax.coastlines(resolution='10m',color='black',zorder=4)
ax.set_extent([-8.5,7.5,47,59.5])
q=ax.quiver(ds.lon_rho.values[::3,::3],ds.lat_rho.values[::3,::3],u,v,
scale=.5,color='black',width=.005,edgecolors='k',transform=ccrs.PlateCarree())
ax.set_title(plot_titles[e])
if e==1:
cax = fig.add_axes([0.26, .57, .5, .02])
fig.colorbar(im, cax=cax,orientation='horizontal',label='Sea-level response [cm]',extend='neither')
qk = ax.quiverkey(q, 0.82, 0.59, 0.05, label='5 cm/s', labelpos='E',coordinates='figure')
if e==3:
cax = fig.add_axes([0.26, .09, .5, .02])
fig.colorbar(im, cax=cax,orientation='horizontal',label='Sea-level difference [cm]',extend='neither')
qk = ax.quiverkey(q, 0.82, 0.11, 0.05, label='5 cm/s', labelpos='E',coordinates='figure')
fig.savefig(os.path.join(out_dir,'Figure7_uniform_wind_cc_roms.pdf'),dpi=300) | StarcoderdataPython |
1606876 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Draw time series from csv files or lists
Data series come from the columns of the csv files multi columns can be used from
the same file. One column for x values (it can be numerical or date/time data) and
more columns for y1, y2, .., yn. Alternatively lists can be given with the values.
As many charts will be drawn as many y columns given.
If more input files are given multiple lines will be drawn in the same subfigure.
It is poissible to define different number of y columns for the input csv files.
The input csv files may have different number of rows and columns. column indeces are
zero based.
The data series are given by a list with maximum 6 elementst
1st the name of the csv file separated by ';' and
list of column ordinal numbers first is for x, the followings are for y values
(0 base index), the same column can be repeated
x column can be numerical or date/time, y columns are numerical
or complex list [ x1, [y11, y12], x2, [y21 y22]]
2nd list of multipliers for the columns, default 1 for each column
3rd list of offsets for the columns, default 0 for each column
4th list of simple format specifiers for y values, default is matplotlib defaults
5th list of legend labels for y values, default filename:column
Here is a simple example with three input files and two subplots
first.csv:
1;0.34;2.56
2;0.58;1.43
3;1.02;1.1
second.csv:
A;1.2;0.86;0.55;6.54
B;1.9;1.7;0.72;5.78
C;2.4;1.45;0.4;1.34
D;2.8;0.86;.88;5.12
third.csv
1;0.75;1.8
2;2.1;2.5
3;1.8;3.1
titles = ["line1", "line2", "points"]
units = ["m", "mm", "degree"]
data_series = [[['first.csv', [0, 1, 2]], [1, 1, 0.56], [0, 0, 0], ['g--', 'r'], ['l1', 'l2']],
[['second.csv', [1, 3, 2]], [1, 1, 1], [0, 0, 0], ['', ''], ['b1', 'b2']],
[['third.csv', [0, 2]], [1, 0.75], [0, -0.3], ['b+']]]
g = GraphPlot(titles, units, data_series)
g.draw()
"""
import csv
import os.path
import re
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
class GraphPlot:
""" class to plot a graph
:param titles: list of titles for subplots
:param units: units for aces
:param data_series: list of list of titles, units and for each data file, filename column numbers, scales, offsets, formats, labels; scales, offset, formats and labels are optional
"""
SEPARATOR = ";" # update for your input
def __init__(self, titles, units, data_series):
""" initialize instance """
self.titles = titles
self.units = units
self.x = []
self.y = []
self.fmts = []
self.labels = []
for serie in data_series:
scales = [1] * len(serie[0][1]) # default scales
if len(serie) > 1:
scales = serie[1]
offsets = [0] * len(serie[0][1]) # default offsets
if len(serie) > 2:
offsets = serie[2]
if isinstance(serie[0][0], str):
act_x, act_y = self.load(serie[0][0], serie[0][1], scales, offsets)
else:
act_x = serie[0][0]
act_y = serie[0][1]
self.x.append(act_x)
self.y.append(act_y)
fmt = [''] * len(serie[0][1]) # default formats
if len(serie) > 3:
fmt = serie[3]
if isinstance(serie[0][0], str):
label = ["{}:{}".format(serie[0][0], str(col))
for col in serie[0][1][1:]]
else:
label = [str(col+1) for col in range(len(serie[1][1:]))]
if len(serie) > 4:
label = serie[4]
self.labels.append(label)
self.fmts.append(fmt)
try:
self.main_title, _ = os.path.splitext(os.path.basename(data_series[0][0]))
except Exception:
self.main_title, _ = os.path.splitext(os.path.basename(__file__))
def draw(self):
""" draw multi graph """
rows = max([len(yi) for yi in self.y])
fig = plt.figure()
fig.canvas.set_window_title(self.main_title)
for ind in range(rows):
ax = plt.subplot(rows, 1, ind+1)
for i in range(len(self.x)):
if len(self.y[i]) > ind:
if isinstance(self.x[i][0], datetime):
plt.plot_date(self.x[i], self.y[i][ind], self.fmts[i][ind],
label=self.labels[i][ind])
else:
plt.plot(self.x[i], self.y[i][ind], self.fmts[i][ind],
label=self.labels[i][ind])
plt.xticks(rotation=45)
plt.xlabel(self.units[0])
plt.ylabel(self.units[ind+1])
plt.grid()
plt.legend()
ax.set_title(self.titles[ind])
fig.tight_layout()
plt.show()
fig.savefig(self.main_title + '.png')
@staticmethod
def load(fname, cols, scales, offsets):
""" load input data
:param fname: name of csv input file
:param cols: ordinal column numbers to use
:param scales: multipliers for columns
:param offsets: offsets for columns
:returns tuple x and y values (multiple y series as list)
"""
data = []
with open(fname, newline='') as f:
reader = csv.reader(f, delimiter=GraphPlot.SEPARATOR)
for row in reader:
data.append(row)
if re.match("[0-9]{4}-[0-9]{2}-[0-9]{2}", data[0][cols[0]]):
x = [datetime.strptime(row[cols[0]], '%Y-%m-%d %H:%M:%S.%f')
for row in data]
else:
x = [float(row[cols[0]]) * scales[0] + offsets[0] for row in data]
y = []
for i in range(1, len(cols)):
y.append([float(row[cols[i]]) * scales[i] + offsets[i] for row in data])
return (x, y)
if __name__ == "__main__":
from sys import argv
from math import (sin, cos, pi)
DEMO_ID = 1
if len(argv) > 1:
DEMO_ID = int(argv[1])
if DEMO_ID == 1:
TITLES = ["line1", "line2", "points"]
UNITS = ["m", "mm", "degree", "m"]
X1 = [1, 2, 3, 4, 5, 6]
Y11 = [0.34, 0.58, 1.02, 1.21, 1.52, 1.61]
Y12 = [2.56, 1.43, 1.1, 0.8, 0.48, 0.67]
X2 = [1.2, 1.9, 2.4, 2.8, 3.5, 5.8]
Y21 = [0.86, 1.7, 1.45, 0.86, 1.2, 3.0]
Y22 = [0.55, 0.72, 0.4, 0.88, 0.99, 2.0]
# x3 == x1
Y31 = [1.8, 2.5, 3.1, 2.6, 2.3, 2.8]
DATA_SERIES = [[[X1, [Y11, Y12, Y12]],
[1, 1, 0.56, 1], [0, 0, 0, 1],
['g--', 'r', 'ro'], ['l1', 'l2', 'l2']],
[[X2, [Y22, Y21, Y22]],
[1, 1, 1, 0.75], [0, 0, 0, -0.5],
['', '', 'yx'], ['b1', 'b2', 'b1']],
[[X1, [Y31]], [1, 0.75], [0, -0.3], ['b+']]]
G = GraphPlot(TITLES, UNITS, DATA_SERIES)
G.draw()
elif DEMO_ID == 2:
TITLES = ["trigonometry"]
UNITS = ["fok", "-", "-"]
DATA_SERIES = [[['test/sin_cos.csv', [0, 2]], [1, 1], [0, 0],
[''], ['sin']],
[['test/sin_cos.csv', [0, 3]], [1, 1], [0, 0],
[''], ['cos']]]
G = GraphPlot(TITLES, UNITS, DATA_SERIES)
G.draw()
elif DEMO_ID == 3:
TITLES = ["trigonometry"]
UNITS = ["fok", "-", "-"]
X = list(range(0, 370, 10))
Y1 = [sin(xi / 180 * pi) for xi in range(0, 370, 10)]
Y2 = [cos(xi / 180 * pi) for xi in range(0, 370, 10)]
DATA_SERIES = [[[X, [Y1]], [0, 2], [1, 1], [0, 0],
[''], ['sin']],
[[X, [Y2]], [0, 3], [1, 1], [0, 0],
[''], ['cos']]]
G = GraphPlot(TITLES, UNITS, DATA_SERIES)
G.draw()
| StarcoderdataPython |
4837747 | # Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
from awx.main.utils.insights import filter_insights_api_response
from awx.main.tests.data.insights import TEST_INSIGHTS_HOSTS, TEST_INSIGHTS_PLANS, TEST_INSIGHTS_REMEDIATIONS
def test_filter_insights_api_response():
actual = filter_insights_api_response(
TEST_INSIGHTS_HOSTS['results'][0], TEST_INSIGHTS_PLANS, TEST_INSIGHTS_REMEDIATIONS)
assert actual['last_check_in'] == '2019-03-19T21:59:09.213151-04:00'
assert len(actual['reports']) == 5
assert len(actual['reports'][0]['maintenance_actions']) == 1
assert actual['reports'][0]['maintenance_actions'][0]['name'] == "Fix Critical CVEs"
rule = actual['reports'][0]['rule']
assert rule['severity'] == 'WARN'
assert rule['description'] == (
"Kernel vulnerable to side-channel attacks in modern microprocessors (CVE-2017-5715/Spectre)")
assert rule['category'] == 'Security'
assert rule['summary'] == (
"A vulnerability was discovered in modern microprocessors supported by the kernel,"
" whereby an unprivileged attacker can use this flaw to bypass restrictions to gain read"
" access to privileged memory.\nThe issue was reported as [CVE-2017-5715 / Spectre]"
"(https://access.redhat.com/security/cve/CVE-2017-5715).\n")
| StarcoderdataPython |
4800550 | <filename>api/accounts/api.py
from rest_framework.generics import GenericAPIView as View
from rest_framework.response import Response
from rest_framework.status import (
HTTP_201_CREATED,
HTTP_205_RESET_CONTENT,
HTTP_401_UNAUTHORIZED,
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.models import Token
from django.contrib.auth import authenticate, login, logout
from accounts.serializers import UserSerializer, LoginSerializer
class RegisterUserAPI(View):
serializer_class = UserSerializer
def post(self, request):
user_data = request.data
user_register_serializer = self.get_serializer(data=user_data)
user_register_serializer.is_valid(raise_exception=True)
user_register_serializer.save()
return Response(
{
"message": "Registration Successful",
"data": user_register_serializer.data,
},
status=HTTP_201_CREATED,
)
class LoginUserAPI(View):
serializer_class = LoginSerializer
def post(self, request):
user_creds = request.data
user_login_serializer = self.get_serializer(data=user_creds)
user_login_serializer.is_valid(raise_exception=True)
user = authenticate(**user_login_serializer.data)
if not user:
return Response(
{"message": "Invalid credentials"}, status=HTTP_401_UNAUTHORIZED
)
login(request, user)
return Response(
{"message": "Login successful", "data": UserSerializer(user).data}
)
class LogoutUserAPI(View):
def get(self, request):
if request.user:
user_token = Token.objects.filter(user=request.user)
if user_token.exists():
user_token.delete()
logout(request)
return Response(
{"message": "You are currently logged out"},
status=HTTP_205_RESET_CONTENT,
)
class ProfileUserAPI(View):
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get(self, request):
user_serializer = self.get_serializer(request.user)
return Response({"data": user_serializer.data})
def post(self, request):
user_data = request.data
user_serializer = self.get_serializer(
request.user, data=user_data, partial=True
)
user_serializer.is_valid(raise_exception=True)
user_serializer.save()
return Response(
{"message": "User has been updated", "data": user_serializer.data}
)
| StarcoderdataPython |
96125 | """Discover and load entry points from installed packages."""
# Copyright (c) <NAME> and contributors
# Distributed under the terms of the MIT license; see LICENSE file.
from contextlib import contextmanager
import glob
from importlib import import_module
import io
import itertools
import os.path as osp
import re
import sys
import warnings
import zipfile
if sys.version_info[0] >= 3:
import configparser
else:
from backports import configparser
entry_point_pattern = re.compile(r"""
(?P<modulename>\w+(\.\w+)*)
(:(?P<objectname>\w+(\.\w+)*))?
\s*
(\[(?P<extras>.+)\])?
$
""", re.VERBOSE)
__version__ = '0.2.3'
class BadEntryPoint(Exception):
"""Raised when an entry point can't be parsed.
"""
def __init__(self, epstr):
self.epstr = epstr
def __str__(self):
return "Couldn't parse entry point spec: %r" % self.epstr
@staticmethod
@contextmanager
def err_to_warnings():
try:
yield
except BadEntryPoint as e:
warnings.warn(str(e))
class NoSuchEntryPoint(Exception):
"""Raised by :func:`get_single` when no matching entry point is found."""
def __init__(self, group, name):
self.group = group
self.name = name
def __str__(self):
return "No {!r} entry point found in group {!r}".format(self.name, self.group)
class CaseSensitiveConfigParser(configparser.ConfigParser):
optionxform = staticmethod(str)
class EntryPoint(object):
def __init__(self, name, module_name, object_name, extras=None, distro=None):
self.name = name
self.module_name = module_name
self.object_name = object_name
self.extras = extras
self.distro = distro
def __repr__(self):
return "EntryPoint(%r, %r, %r, %r)" % \
(self.name, self.module_name, self.object_name, self.distro)
def load(self):
"""Load the object to which this entry point refers.
"""
mod = import_module(self.module_name)
obj = mod
if self.object_name:
for attr in self.object_name.split('.'):
obj = getattr(obj, attr)
return obj
@classmethod
def from_string(cls, epstr, name, distro=None):
"""Parse an entry point from the syntax in entry_points.txt
:param str epstr: The entry point string (not including 'name =')
:param str name: The name of this entry point
:param Distribution distro: The distribution in which the entry point was found
:rtype: EntryPoint
:raises BadEntryPoint: if *epstr* can't be parsed as an entry point.
"""
m = entry_point_pattern.match(epstr)
if m:
mod, obj, extras = m.group('modulename', 'objectname', 'extras')
if extras is not None:
extras = re.split(',\s*', extras)
return cls(name, mod, obj, extras, distro)
else:
raise BadEntryPoint(epstr)
class Distribution(object):
def __init__(self, name, version):
self.name = name
self.version = version
def __repr__(self):
return "Distribution(%r, %r)" % (self.name, self.version)
def iter_files_distros(path=None, repeated_distro='first'):
if path is None:
path = sys.path
# Distributions found earlier in path will shadow those with the same name
# found later. If these distributions used different module names, it may
# actually be possible to import both, but in most cases this shadowing
# will be correct.
distro_names_seen = set()
for folder in path:
if folder.rstrip('/\\').endswith('.egg'):
# Gah, eggs
egg_name = osp.basename(folder)
if '-' in egg_name:
distro = Distribution(*egg_name.split('-')[:2])
if (repeated_distro == 'first') \
and (distro.name in distro_names_seen):
continue
distro_names_seen.add(distro.name)
else:
distro = None
if osp.isdir(folder):
ep_path = osp.join(folder, 'EGG-INFO', 'entry_points.txt')
if osp.isfile(ep_path):
cp = CaseSensitiveConfigParser()
cp.read(ep_path)
yield cp, distro
elif zipfile.is_zipfile(folder):
z = zipfile.ZipFile(folder)
try:
info = z.getinfo('EGG-INFO/entry_points.txt')
except KeyError:
continue
cp = CaseSensitiveConfigParser()
with z.open(info) as f:
fu = io.TextIOWrapper(f)
cp.read_file(fu,
source=osp.join(folder, 'EGG-INFO', 'entry_points.txt'))
yield cp, distro
for path in itertools.chain(
glob.iglob(osp.join(folder, '*.dist-info', 'entry_points.txt')),
glob.iglob(osp.join(folder, '*.egg-info', 'entry_points.txt'))
):
distro_name_version = osp.splitext(osp.basename(osp.dirname(path)))[0]
if '-' in distro_name_version:
distro = Distribution(*distro_name_version.split('-', 1))
if (repeated_distro == 'first') \
and (distro.name in distro_names_seen):
continue
distro_names_seen.add(distro.name)
else:
distro = None
cp = CaseSensitiveConfigParser()
cp.read(path)
yield cp, distro
def get_single(group, name, path=None):
"""Find a single entry point.
Returns an :class:`EntryPoint` object, or raises :exc:`NoSuchEntryPoint`
if no match is found.
"""
for config, distro in iter_files_distros(path=path):
if (group in config) and (name in config[group]):
epstr = config[group][name]
with BadEntryPoint.err_to_warnings():
return EntryPoint.from_string(epstr, name, distro)
raise NoSuchEntryPoint(group, name)
def get_group_named(group, path=None):
"""Find a group of entry points with unique names.
Returns a dictionary of names to :class:`EntryPoint` objects.
"""
result = {}
for ep in get_group_all(group, path=path):
if ep.name not in result:
result[ep.name] = ep
return result
def get_group_all(group, path=None):
"""Find all entry points in a group.
Returns a list of :class:`EntryPoint` objects.
"""
result = []
for config, distro in iter_files_distros(path=path):
if group in config:
for name, epstr in config[group].items():
with BadEntryPoint.err_to_warnings():
result.append(EntryPoint.from_string(epstr, name, distro))
return result
if __name__ == '__main__':
import pprint
pprint.pprint(get_group_all('console_scripts'))
| StarcoderdataPython |
3366071 | <filename>app/post/urls.py
from django.urls import path
from post.views import (
PostListAPI,
PostLikeSave,
PostDetailAPI,
SearchAPI,
PostLikeList,
PostCreateUpdateDestroy)
app_name = 'post'
urlpatterns = [
path('', PostCreateUpdateDestroy.as_view({
'post': 'create',
'patch': 'partial_update',
'delete': 'destroy'
})),
path('list/', PostListAPI.as_view()),
path('detail/', PostDetailAPI.as_view()),
path('search/', SearchAPI.as_view()),
path('like/', PostLikeSave.as_view()),
path('like/list/', PostLikeList.as_view()),
]
| StarcoderdataPython |
3241560 | <gh_stars>0
print ("Hi!") | StarcoderdataPython |
1681743 | #!/usr/bin/env python
""" performs a large scale test of TB sequence storage.
adds simulated TB samples, groups of which have evolved from common ancestors
This program does not generate the simulated sequences; this is done by make_large_sequence_set.py
#### Scenario tested
A large number of samples are derived from sequencing of TB over a number of years.
When sequencing started on a large scale (ca. 2015), a number of TB strains ('ancestor sequences') were
circulating. Over time, these transmitted and their descendents were isolated.
Sequences corresponding to this scenario are simulated, and added to the server in a random order.
No repacking is used following insertion, but findNeighbour3-dbmanager can be run in the background to achieve repacking.
No clustering is enabled.
#### Background
This test is designed to test the speed of insertion and storage requirements of a set of samples simulating those encountered with a highly clonal pathogen, such as TB.
#### Outcome measures
Insertion time
Time to read all the neighbours of one sample
#### How outcome measures are recorded
They are written to files by the script
#### How to run the test
Simulate the samples to be added:
The below will create 100,000 samples; adjust parameters as needed to create more or fewer samples.
Parameter 1 is the number of ancestor sequences.
Parameter 4 is the number of children of each ancestor. Increasing this to 1000 will produce 1M samples.
Note that these samples are stored on disc.
``` python make_large_sequence_set.py 1000 500 3 100 1e-8 ../demos/lss_tb/seqs```
To run the test, start up a server, e.g.
```python findNeighbour3-server.py ../demos/lss_tb/config/config.json```
Optionally launch one or more findNeighbour3-dbmanager processes
```python findNeighbour3-dbmanager.py ../demos/lss_tb/config/config.json```
Then run the test.
**Note**: at present, the server which the test runs against isn't configurable.
It runs against the server running on localhost:5020. Minor changes to the config file will cange this
The client url needs to be be passed to the call instantiating the fn4Client().
The below inserts until 500 samples are present in the server.
The below inserts 100 samples, then pauses for 1 hour.
Set the third parameter to 0 to avoid pausing.
```python demo_lss_tb.py 500 ../demos/lss_tb/seqs ../demos/lss_tb/output 100 3600```
If we now do this, then 250 more will be added
```python demo_large_matrix.py 750 ../demos/large_matrix_1/output 100 3600```
* How the output is analysed
This will analyse all output from the above:
```Rscript demo_depict_timings.R ../demos/large_matrix_1/output```
"""
import os
import glob
import random
import datetime
import argparse
import pathlib
import time
from Bio import SeqIO
from fn4client import fn4Client
if __name__ == "__main__":
# maximum number to add
parser = argparse.ArgumentParser(
description="Generate and add to the server groups of sequences which have evolved from each other"
)
parser.add_argument(
"max_sequences", type=int, nargs=1, help="sequences will be added until max_sequences exist in the server."
)
parser.add_argument("inputdir", type=str, nargs=1, help="input fasta files will be read from the inputdir")
parser.add_argument("outputdir", type=str, nargs=1, help="output will be written to the outputdir")
parser.add_argument(
"pause_after",
type=int,
nargs=1,
help="insertion will pause after adding pause_after sequences (set to zero to never pause)",
)
parser.add_argument("pause_duration", type=int, nargs=1, help="pause_duration in seconds")
args = parser.parse_args()
max_sequences = args.max_sequences[0]
pause_after = args.pause_after[0]
pause_duration = args.pause_duration[0]
outputdir = os.path.abspath(args.outputdir[0])
inputdir = os.path.abspath(args.inputdir[0])
# make the directories if they don't exist
p = pathlib.Path(outputdir)
p.mkdir(parents=True, exist_ok=True)
p = pathlib.Path(inputdir)
p.mkdir(parents=True, exist_ok=True)
# determine input files
inputfiles = glob.glob(os.path.join(inputdir, "*.fasta"))
random.shuffle(inputfiles) # read them in order
if len(inputfiles) < max_sequences:
raise ValueError(
"Asked to add {0} sequences, but only {1} are available in the input directory {2}".format(
max_sequences, len(inputfiles), inputdir
)
)
else:
inputfiles = inputfiles[0:max_sequences]
print("opening connection to fn3 server")
fn4c = fn4Client(baseurl="http://127.0.0.1:5020")
# determine all masked positions
excluded_positions = fn4c.nucleotides_excluded()
# determine how many samples there are currently in the server.
nSamples = len(fn4c.guids())
print("There are {0} existing samples. Adding more ..".format(nSamples))
# create output file with header line
outputfile = os.path.join(outputdir, "timings_{0}.tsv".format(nSamples))
nAdded_this_batch = 0
with open(outputfile, "w+t") as f:
output_line = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\n".format(
"nSamples", "s_insert", "e_insert", "d_insert", "s_read", "e_read", "d_read"
)
f.write(output_line)
while nAdded_this_batch < max_sequences:
if pause_after > 0:
if nAdded_this_batch % pause_after == 0:
print("Insert paused; will resume after {0} seconds.".format(pause_duration))
time.sleep(pause_duration)
# read samples
inputfile = inputfiles[nAdded_this_batch]
nSamples += 1
nAdded_this_batch += 1
with open(inputfile, "rt") as f_in:
for record in SeqIO.parse(f_in, "fasta"):
seq = str(record.seq)
guid = str(record.id)
# add
print(
"Inserting", guid, "(samples in this batch = ", nAdded_this_batch, "); will pause every ", pause_after
)
stime1 = datetime.datetime.now()
resp = fn4c.insert(guid=guid, seq=seq)
etime1 = datetime.datetime.now()
delta1 = etime1 - stime1
print("Insert yielded status code {0} after {1}".format(resp.status_code, delta1))
# recover neighbours of guid
stime2 = datetime.datetime.now()
# check it exists
if not fn4c.guid_exists(guid):
print("Guid {0} was not inserted".format(guid))
else:
neighbours = fn4c.guid2neighbours(guid, threshold=10000000)
etime2 = datetime.datetime.now()
delta2 = etime2 - stime2
print("Recovered {1} neighbours of {0}".format(guid, len(neighbours)))
output_line = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\n".format(
nSamples, stime1, etime1, delta1, stime2, etime2, delta2
)
f.write(output_line)
f.flush()
# delete the source file - keep disc space down
try:
os.unlink(inputfile)
print("Fasta input file deleted")
except PermissionError:
print("Fasta input file not deleted, as locked")
print("Have added {0} sequences, stopping.".format(nSamples))
exit(0)
| StarcoderdataPython |
3241160 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print "normal"
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print m
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print "binomial"
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print tic-toc
if example == 3:
print "Poisson"
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print tic-toc
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| StarcoderdataPython |
3250781 | """
the tests for the main app functionality
"""
from copy_unique import __version__
def test_version() -> None:
"is __version__ a 5-character string"
assert isinstance(__version__, str)
assert len(__version__) == 5
| StarcoderdataPython |
3249294 | import numpy as np
import torch
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
def get_save_path(name):
return 'saves/{}.pt'.format(name)
def save_model(model, name):
torch.save(model, get_save_path(name))
def load_model(name):
model = torch.load(get_save_path(name), map_location=device)
model.eval()
return model
def perlin(x, y, seed=0):
# permutation table
np.random.seed(seed)
p = np.arange(256, dtype=int)
np.random.shuffle(p)
p = np.stack([p, p]).flatten()
# coordinates of the top-left
xi = int(x)
yi = int(y)
# internal coordinates
xf = x - xi
yf = y - yi
# fade factors
u = fade(xf)
v = fade(yf)
# noise components
n00 = gradient(p[p[xi]+yi], xf, yf)
n01 = gradient(p[p[xi]+yi+1], xf, yf-1)
n11 = gradient(p[p[xi+1]+yi+1], xf-1, yf-1)
n10 = gradient(p[p[xi+1]+yi], xf-1, yf)
# combine noises
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01
return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here
def lerp(a, b, x):
"linear interpolation"
return a + x * (b-a)
def fade(t):
"6t^5 - 15t^4 + 10t^3"
return 6 * t**5 - 15 * t**4 + 10 * t**3
def gradient(h, x, y):
"grad converts h to the right gradient vector and return the dot product with (x,y)"
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:, :, 0] * x + g[:, :, 1] * y
| StarcoderdataPython |
1612713 | #!/usr/bin/bash
# -*- coding: utf-8 -*-
def my_append_list(l, e):
"Simuliert die Append-Methode. Hängt e an l an."
l[len(l):] = [e]
return l
def my_append_string(s, e):
"Simuliert die Append-Methode. Hängt e an s an."
s += e
return s
def my_append_tuple(t, e):
"Simuliert die Append-Methode. Hängt e an t an."
t += (e,)
return t
# Liste
l = ["a", "b", "c"]
l = my_append(l, "d")
print l
# String
s = "abcde"
s = my_append_string(s, "f")
print s
# Tupel
t = (1, 2, 3)
t = my_append_tuple(t, 4)
print t
| StarcoderdataPython |
158841 | <gh_stars>10-100
import os
from dotenv import load_dotenv
import pytest
from legislice.download import Client
from authorityspoke.io import loaders, name_index, readers
from authorityspoke.facts import Exhibit
from authorityspoke.rules import Rule
from authorityspoke.io.fake_enactments import FakeClient
load_dotenv()
TOKEN = os.getenv("LEGISLICE_API_TOKEN")
class TestRuleDump:
def test_dump_rule(self, make_rule):
rule = make_rule["h2"]
dumped = rule.dict()
content = dumped["procedure"]["inputs"][0]["predicate"]["content"]
assert content == "$thing was on the premises of $place"
def test_dump_and_read_rule(self, make_procedure, e_search_clause):
rule = Rule(procedure=make_procedure["c2"], enactments=e_search_clause)
dumped = rule.dict()
loaded = Rule(**dumped)
content = loaded.despite[0].predicate.content
assert "the distance between $place1 and $place2 was" in content
class TestLoadRules:
"""
Tests loading Rules, possibly for linking to legislation without
reference to any Opinion or Holding.
"""
client = Client(api_token=TOKEN)
def test_loading_rules(self, fake_beard_client):
beard_rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
assert (
beard_rules[0].outputs[0].predicate.content
== "${the_suspected_beard} was a beard"
)
def test_imported_rule_is_type_rule(self, fake_beard_client):
beard_rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
assert isinstance(beard_rules[0].rule, Rule)
def test_rule_short_string(self, fake_beard_client):
beard_rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
assert beard_rules[0].rule.short_string.lower().startswith("the rule")
def test_index_names_from_sibling_inputs(self):
raw_rules = loaders.load_holdings("beard_rules.yaml")
indexed_rules, mentioned = name_index.index_names(raw_rules[0]["inputs"])
key = "the suspected beard occurred on or below the chin"
assert mentioned[key]["terms"][0] == "the suspected beard"
def test_rule_with_exhibit_as_context_factor(self, fake_beard_client):
rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
exhibit = rules[5].inputs[0].terms[2]
assert isinstance(exhibit, Exhibit)
def test_read_rules_without_regime(self, fake_beard_client):
beard_rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
assert beard_rules[0].inputs[0].short_string == (
"the fact that <the suspected beard> was facial hair"
)
def test_correct_context_after_loading_rules(self, fake_beard_client):
beard_rules = loaders.read_holdings_from_file(
"beard_rules.yaml", client=fake_beard_client
)
elements_of_offense = beard_rules[11]
assert len(elements_of_offense.despite) == 1
assert (
elements_of_offense.despite[0].generic_terms()[0].name
== "the Department of Beards"
)
def test_load_any_enactments(self, fake_beard_client):
"""Test bug where holding's enactment's aren't loaded."""
beard_dictionary = loaders.load_holdings("beard_rules.yaml")
shorter = [beard_dictionary[0]]
beard_rules = readers.read_holdings(shorter, client=fake_beard_client)
expected = "facial hair no shorter than 5 millimetres"
assert expected in beard_rules[0].enactments[0].selected_text()
@pytest.mark.vcr
def test_generic_terms_after_adding_rules(self, fake_beard_client):
beard_dictionary = loaders.load_holdings("beard_rules.yaml")
beard_rules = readers.read_holdings(beard_dictionary, client=fake_beard_client)
loan_is_transfer = beard_rules[7]
elements_of_offense = beard_rules[11]
loan_without_exceptions = (
loan_is_transfer
+ elements_of_offense.inputs[1]
+ elements_of_offense.inputs[2]
+ elements_of_offense.enactments[1]
)
loan_establishes_offense = loan_without_exceptions + elements_of_offense
assert str(loan_establishes_offense.outputs[0]) == (
"the fact that <the defendant> committed the offense of improper "
"transfer of beardcoin"
)
assert len(loan_establishes_offense.despite) == 1
assert (
loan_establishes_offense.despite[0].generic_terms()[0].name
== "the Department of Beards"
)
| StarcoderdataPython |
16009 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize("installed_packages", [
("haproxy20"),
("socat"),
("keepalived"),
("bind"),
])
def test_packages_installed(host, installed_packages):
rpackage = host.package(installed_packages)
assert rpackage.is_installed
@pytest.mark.parametrize("services", [
("haproxy"),
# ("keepalive"),
("named"),
])
def test_services_running_and_enabled(host, services):
service = host.service(services)
assert service.is_enabled
assert service.is_running
@pytest.mark.parametrize("files", [
("/etc/pki/haproxy/star_haproxy.pem"),
])
def test_star_haproxy_pem(host, files):
star_haproxy_pem = host.file(files)
assert star_haproxy_pem.user == "root"
assert star_haproxy_pem.group == "root"
assert star_haproxy_pem.mode == 0o600
assert star_haproxy_pem.contains('-----BEGIN CERTIFICATE-----')
assert star_haproxy_pem.contains('-----BEGIN RSA PRIVATE KEY-----')
def test_sysctl_non_local_bind(host):
non_local_bind = host.sysctl("net.ipv4.ip_nonlocal_bind")
assert non_local_bind == 1
| StarcoderdataPython |
1774357 | <gh_stars>0
import os
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.utils.config.config_file_parser import ConfigFileParser
from autoPyTorch.utils.config.config_option import ConfigOption, to_bool
from autoPyTorch.utils.hyperparameter_search_space_update import \
parse_hyperparameter_search_space_updates
class SetAutoNetConfig(PipelineNode):
def fit(self, pipeline_config, autonet, data_manager, run_result_dir):
parser = autonet.get_autonet_config_file_parser()
config = parser.read(os.path.join(run_result_dir, "autonet.config"))
parser.set_defaults(config)
updates=None
if os.path.exists(os.path.join(run_result_dir, "hyperparameter_search_space_updates")):
updates = parse_hyperparameter_search_space_updates(
os.path.join(run_result_dir, "hyperparameter_search_space_updates"))
if (pipeline_config['use_dataset_metric'] and data_manager.metric is not None):
config['optimize_metric'] = data_manager.metric
if (pipeline_config['use_dataset_max_runtime'] and data_manager.max_runtime is not None):
config['max_runtime'] = data_manager.max_runtime
if "hyperparameter_search_space_updates" in config:
del config["hyperparameter_search_space_updates"]
config['log_level'] = pipeline_config['log_level']
autonet.update_autonet_config(hyperparameter_search_space_updates=updates,
**config)
return dict()
def get_pipeline_config_options(self):
options = [
ConfigOption("use_dataset_metric", default=False, type=to_bool),
ConfigOption("use_dataset_max_runtime", default=False, type=to_bool),
]
return options
| StarcoderdataPython |
1764442 | <reponame>evamwangi/bc-7-Todo_List<gh_stars>0
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from config import config
bootstrap = Bootstrap()
db = SQLAlchemy()
#Initialization of the Flask-Login
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
'''
Is a factory function which allows specification of
configurations before the app is created.
It takes as an argument the name of a configuration
to use for the application.
'''
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
login_manager.init_app(app)
bootstrap.init_app(app)
db = SQLAlchemy(app)
db.init_app(app)
db.create_all()
from main import main as main_blueprint
from auth import auth as auth_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(auth_blueprint)
return app
def db_connect():
return sqlite3.connect(app.config['DATABASE'])
| StarcoderdataPython |
1777762 | <gh_stars>1-10
# Source:
# https://peterroelants.github.io/posts/neural-network-implementation-part01/
import matplotlib
matplotlib.use('TkAgg')
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
np.random.seed(seed=13)
def print_versions():
print('Python: {}.{}.{}'.format(*sys.version_info[:3]))
print('numpy: {}'.format(np.__version__))
print('matplotlib: {}'.format(matplotlib.__version__))
print('seaborn: {}'.format(sns.__version__))
# print_versions()
def f(x):
# Generate the target values t from x with small Gaussian noise
# so that the estimation won't be perfect.
# This function represents the line that generates t without noise.
# f(x) = 2x
return x * 2
def nn(x, w):
# Output function y = x * w
return x * w
def loss(y, t):
# MSE loss function
return np.mean((t - y)**2)
###
# Plot the samples and regression line
###
# Vector of input samples
# 20 values sampled from uniform dist between 0 and 1
x = np.random.uniform(0, 1, 20)
# Generate the Guassian noise error for each sample in x
noise = np.random.randn(x.shape[0]) * 0.2
# Create targets using the formula: f(x) + N(0, 0.2)
t = f(x) + noise
def plot_samples():
# Plot the target t versus the input x
plt.figure(figsize=(5, 3))
plt.plot(x, t, 'o', label='$t$')
# Plot the initial line
plt.plot([0, 1], [f(0), f(1)], 'b--', label='$f(x)$')
plt.xlabel('$x$', fontsize=12)
plt.ylabel('$t$', fontsize=12)
plt.axis((0, 1, 0, 2))
plt.title('inputs (x) vs targets (t)')
plt.legend(loc=2)
plt.show()
# plot_samples()
###
# Plot the loss vs. the given weight w
###
# Vector of weights for which we want to plot the loss
ws = np.linspace(0, 4, num=100) # weight values
# Create a vectorized function
vectorized_fn = np.vectorize(lambda w: loss(nn(x, w), t))
# Calculate the loss for each weight in ws
loss_ws = vectorized_fn(ws)
def plot_loss_function():
plt.figure(figsize=(5, 4))
plt.plot(ws, loss_ws, 'r--', label='loss')
plt.xlabel('$weights$', fontsize=12)
plt.ylabel('$MSE$', fontsize=12)
plt.title('loss function with respect to $w$')
plt.xlim(0, 4)
plt.legend()
plt.show()
# plot_loss_function()
###
# Plot gradient descent updates on the loss function
###
def gradient(w, x, t):
"""Gradient function to multiply the learning rate by.
Note: y = nn(x, w) = x * w.
Gradient: 2x(y_pred - y_true)
"""
return 2 * x * (nn(x, w) - t)
def delta_w(w_k, x, t, learning_rate):
"""Full weight update function for a batch of samples."""
return learning_rate * np.mean(gradient(w_k, x, t))
# Initial weight parameter
w = np.random.rand()
# Set the learning rate
learning_rate = 0.9
# Perform the gradient descent updates and print the weights and loss
# Number of gradient descent updates
nb_of_iterations = 4
# Keep track of weight and loss values; include initial values
w_loss = [(w, loss(nn(x, w), t))]
for i in range(nb_of_iterations):
# Delta w update
dw = delta_w(w, x, t, learning_rate)
print(f'gradient at w({i}): {dw:.4f}')
# Update the current weight parameter in the negative direction of the
# gradient
w = w - dw
# Loss value
loss_value = loss(nn(x, w), t)
# Save weight and loss
w_loss.append((w, loss_value))
# Print the final weight and loss
# Target weight value is around 2.0
for i in range(0, len(w_loss)):
w = w_loss[i][0]
l = w_loss[i][1]
print(f'w({i}): {w:.4f}\tloss: {l:.4f}')
def plot_gradient_updates():
# Visualise the gradient descent updates
plt.figure(figsize=(6, 4))
plt.plot(ws, loss_ws, 'r--', label='loss') # Loss curve
# Plot the updates
for i in range(0, len(w_loss) - 1):
w1, c1 = w_loss[i]
w2, c2 = w_loss[i + 1]
plt.plot(w1, c1, 'bo')
plt.plot([w1, w2], [c1, c2], 'b-')
plt.text(w1, c1 + 0.05, f'${i}$')
plt.plot(w2, c2, 'bo', label='$w(k)$')
plt.text(w2, c2 + 0.05, f'${i+1}$')
# Show figure
plt.xlabel('$w$', fontsize=12)
plt.ylabel('$\\xi$', fontsize=12)
plt.title('Gradient descent updates plotted on loss function')
plt.xlim(0, 4)
plt.legend(loc=1)
plt.show()
# plot_gradient_updates()
###
# Plot the final results
###
w = np.random.rand()
nb_of_iterations = 10
for i in range(nb_of_iterations):
dw = delta_w(w, x, t, learning_rate)
w = w - dw
def plot_final_result():
# Note that there is no bias term as both lines pass through the origin
# Plot the fitted line agains the target line
plt.figure(figsize=(6, 4))
# Plot the target t versus the input x
plt.plot(x, t, 'o', label='$t$')
# Plot the initial line
plt.plot([0, 1], [f(0), f(1)], 'b--', label='$f(x)$')
# plot the fitted line
plt.plot([0, 1], [0 * w, 1 * w], 'r-', label='$y = w * x$')
plt.xlabel('$x$', fontsize=12)
plt.ylabel('$t$', fontsize=12)
plt.title('input vs target')
plt.legend(loc=2)
plt.ylim(0, 2)
plt.xlim(0, 1)
plt.show()
plot_final_result()
| StarcoderdataPython |
4818454 | #!/usr/bin/env python
"""
FACADE
Use this pattern when:
1. you want to provide a simple interface to a complex subsystem. Subsystems often get more complex as they evolve. Most patterns, when applied, result in more and smaller classes. This makes the subsystem more reusable and easier to customize, but it also becomes harder to use for clients that don't need to customize it. A facade can provide a simple default view of the subsystem that is good enough for most clients. Only clients needing more customizability will need to look beyond the facade.
2. there are many dependencies between clients and the implementation classes of an abstraction. Introduce a facade to decouple the subsystem from clients and other subsystems, thereby promoting subsystem independence and portability.
3. you want to layer your subsystems. Use a facade to define an entry point to each subsystem level. If subsystems are dependent, then you can simplify the dependencies between them by making them communicate with each other solely through their facades.
"""
class SubsystemClassA(object):
"""
1. implement subsystem functionality.
2. handle work assigned by the Facade object.
3. have no knowledge of the facade; that is, they keep no references to it.
"""
def operation(self):
print 'Hello, World!'
class SubsystemClassB(object):
def operation(self):
print 'Wassup, Bro!!'
class Facade(object):
"""
1. knows which subsystem classes are responsible for a request.
2. delegates client requests to appropriate subsystem objects.
"""
def operationA(self):
obj = SubsystemClassA()
obj.operation()
def operationB(self):
obj = SubsystemClassB()
obj.operation()
class Client(object):
def main(self, cmd):
# Clients communicate with the subsystem by sending requests to Facade,
# which forwards them to the approrpiate subsystem object(s). Although
# the subsystem objects perform the actual work, the facade may have
# to do work of its own to translate its interface to subsystem
# interfaces.
#
# Clients that use the facade don't have to access it's subsystem
# objects directly.
compiler = Facade()
if cmd == 'hello':
compiler.operationA()
elif cmd == 'hi':
compiler.operationB()
else:
raise NotImplementedError
if __name__ == '__main__':
c = Client()
c.main('hello')
c.main('hi')
| StarcoderdataPython |
4831308 | """Ingest John Hopkins University Covid-19 data.
Load covid data from C3 AI datalake or from JHU and convert it to C3 AI format.
See: https://github.com/reichlab/covid19-forecast-hub/data-truth
"""
import io
import pandas as pd
import requests
from onequietnight.data import c3ai
metrics = ["JHU_ConfirmedCases", "JHU_ConfirmedDeaths"]
def resolve_csse_id(df, locations_df, c3_id, csse_id):
"""Conforms CSSE data to C3 data.
This function uses csse_id to identify the c3_id.
"""
df = pd.merge(
locations_df[[c3_id, "id"]],
df.reset_index(),
left_on=c3_id,
right_on=csse_id,
how="inner",
)
out = df.drop(columns=[csse_id, c3_id]).set_index("id").T
out.index = pd.DatetimeIndex(out.index, name="dates")
return out
def conform_csse(metric_name, state_df, county_df, locations_df):
"""Conforms CSSE data to C3 data.
This function conforms both the state and county level CSSE data.
"""
df = pd.concat(
[
resolve_csse_id(state_df, locations_df, "location_name", "index"),
resolve_csse_id(county_df, locations_df, "fips.id", "FIPS"),
],
axis=1,
)
df = df.stack("id").reset_index(name=metric_name)
return df
def get_county_truth(df):
"""Format county data.
From covid19-forecast-hub/data-truth/get-truth-data.py."""
county = df[pd.notnull(df.FIPS)]
county = county[(county.FIPS >= 100) & (county.FIPS < 80001)]
county.FIPS = (county.FIPS.astype(int)).map("{:05d}".format)
county_agg = county.groupby(["FIPS"]).sum()
return county_agg
def get_truth(url):
"""Get data from CSSE.
From covid19-forecast-hub/data-truth/get-truth-data.py."""
url_req = requests.get(url).content
df = pd.read_csv(io.StringIO(url_req.decode("utf-8")))
# aggregate by state and nationally
state_agg = df.groupby(["Province_State"]).sum()
us_nat = df.groupby(["Country_Region"]).sum()
county_agg = get_county_truth(df)
df_state_nat = state_agg.append(us_nat)
# drop unnecessary columns
df_state_nat_truth = df_state_nat.drop(
df_state_nat.columns[list(range(0, 6))], axis=1
)
df_county_truth = county_agg.drop(county_agg.columns[list(range(0, 5))], axis=1)
df_state_nat_truth_cumulative = df_state_nat_truth
df_county_truth_cumulative = df_county_truth
df_state_nat_truth_incident = (
df_state_nat_truth_cumulative
- df_state_nat_truth_cumulative.shift(periods=1, axis="columns")
)
df_county_truth_incident = (
df_county_truth_cumulative
- df_county_truth_cumulative.shift(periods=1, axis="columns")
)
# lower bound truth values to 0.0
df_state_nat_truth_incident = df_state_nat_truth_incident.clip(lower=0.0)
df_county_truth_incident = df_county_truth_incident.clip(lower=0.0)
return (
df_state_nat_truth_cumulative,
df_state_nat_truth_incident,
df_county_truth_cumulative,
df_county_truth_incident,
)
def load_data_jhu(env):
locations_df = env.locations_df
county_url = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
"master/csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_deaths_US.csv"
)
state_nat_cum_death, _, county_cum_death, _ = get_truth(url=county_url)
state_url = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
"master/csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_confirmed_US.csv"
)
state_nat_cum_case, _, county_cum_case, _ = get_truth(url=state_url)
name_case = "JHU_ConfirmedCases"
name_death = "JHU_ConfirmedDeaths"
data = {
name_case: conform_csse(
name_case, state_nat_cum_case, county_cum_case, locations_df
),
name_death: conform_csse(
name_death, state_nat_cum_death, county_cum_death, locations_df
),
}
return data
def load_data(env):
if env.load_data_jhu:
return load_data_jhu(env)
else:
return {
name: c3ai.load_data(env, name, levels=["country", "state", "county"])
for name in metrics
}
| StarcoderdataPython |
3358671 | <filename>trebol/interface.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.gen
import bcrypt
__all__ = ["create_new_user"]
@tornado.gen.coroutine
def get_next_id(db, collection):
counter = yield db.counters.find_and_modify(
{"_id": "{}id".format(collection)},
{"$inc": {"seq": 1}},
new=True,
)
raise tornado.gen.Return(counter["seq"])
@tornado.gen.coroutine
def create_new_user(db, email, password, group):
password = <PASSWORD>.hashpw(password.encode(), bcrypt.gensalt(8))
id = yield get_next_id(db, "user")
yield db.users.insert({
"_id": id, "email": email, "hash": password, "group": group})
| StarcoderdataPython |
1690994 | # NOTICE:
# This file should not be deleted, or ImportError will be raised in Python 2.7 when importing plugin
| StarcoderdataPython |
3201172 | <reponame>matthewdargan/Spotify-Siri-Integration
#You can import any modules required here
#This is name of the module - it can be anything you want
moduleName = "life"
#These are the words you must say for this module to be executed
commandWords = ["meaning","life"]
#This is the main function which will be execute when the above command words are said
def execute(command):
print("\n")
print("------------------The meaning of life is 42-------------------")
print("\n")
| StarcoderdataPython |
3334510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import scipy
import scipy.signal
import scipy.linalg
from .eig_algorithms import eigspaces_right_real
def inverse_DSS(A, B, C, D, E):
constrN = A.shape[0]
statesN = A.shape[1]
inputsN = B.shape[1]
outputN = C.shape[0]
constrNnew = constrN + inputsN
statesNnew = statesN + inputsN
inputsNnew = inputsN
outputNnew = outputN
assert inputsN == outputN
newA = np.zeros((constrNnew, statesNnew))
newE = np.zeros((constrNnew, statesNnew))
newB = np.zeros((constrNnew, inputsNnew))
newC = np.zeros((outputNnew, statesNnew))
newD = np.zeros((outputNnew, inputsNnew))
newA[:constrN, :statesN] = A
newA[:constrN, statesN:] = B
newA[constrN:, :statesN] = C
newA[constrN:, statesN:] = D
newE[:constrN, :statesN] = E
newB[constrN:, :inputsN] = -1
newC[:outputN, statesN:] = 1
return newA, newB, newC, newD, newE
def reduce_modal(A, B, C, D, E, mode="C", tol=1e-7, use_svd=False):
"""
TODO simplify a bunch!
"""
v_pairs = eigspaces_right_real(A, E, tol=tol)
A_projections = []
if mode == "C":
# should maybe use SVD for rank estimation?
# evects are columns are eig-idx, rows are eig-vectors
for eigs, evects in v_pairs:
# columns are B-in, rows are SS-eigen
q, r, P = scipy.linalg.qr((E @ evects).T @ B, pivoting=True)
for idx in range(q.shape[0] - 1, -1, -1):
if np.sum(r[idx] ** 2) < tol:
continue
else:
idx += 1
break
idx_split = idx
A_project = evects @ q[idx_split:].T
if A_project.shape[1] > 0:
A_projections.append(A_project)
elif mode == "O":
# TODO untested so far
for eigs, evects in v_pairs:
print(eigs)
print(evects)
# columns are C-out, rows are SS-eigen
q, r, P = scipy.linalg.qr((C @ evects).T, pivoting=True)
for idx in range(q.shape[0] - 1, -1, -1):
if np.sum(r[idx] ** 2) < tol:
continue
else:
idx += 1
break
idx_split = idx
A_project = evects @ q[idx_split:].T
if A_project.shape[1] > 0:
A_projections.append(A_project)
else:
raise RuntimeError("Can only reduce mode='C' or 'O'")
if not A_projections:
return A, B, C, D, E, False
A_projections = np.hstack(A_projections)
Aq, Ar = scipy.linalg.qr(A_projections, mode="economic")
A_SS_projection = np.diag(np.ones(A.shape[0])) - Aq @ Aq.T.conjugate()
if use_svd:
Au, As, Av = np.linalg.svd(A_SS_projection)
for idx in range(len(As) - 1, -1, -1):
if As[idx] < tol:
continue
else:
idx += 1
break
idx_split = idx
p_project_imU = Au[:, :idx_split]
p_project_kerU = Au[:, idx_split:]
else:
Aq, Ar, Ap = scipy.linalg.qr(A_SS_projection, mode="economic", pivoting=True)
for idx in range(Aq.shape[0] - 1, -1, -1):
if np.sum(Ar[idx] ** 2) < tol:
continue
else:
idx += 1
break
idx_split = idx
p_project_imU = Aq[:, :idx_split]
p_project_kerU = Aq[:, idx_split:]
E_projections = E @ p_project_kerU
Eq, Er = scipy.linalg.qr(E_projections, mode="economic")
E_SS_projection = np.diag(np.ones(A.shape[0])) - Eq @ Eq.T.conjugate()
if use_svd:
Eu, Es, Ev = np.linalg.svd(E_SS_projection)
p_project_im = Ev[:idx_split]
p_project_ker = Ev[idx_split:]
else:
Eq, Er, Ep = scipy.linalg.qr(E_SS_projection, mode="economic", pivoting=True)
p_project_im = Eq.T[:idx_split]
p_project_ker = Eq.T[idx_split:]
# TODO, have this check the mode argument
if mode == "C":
assert np.all((p_project_ker @ B) ** 2 < tol)
if mode == "O":
assert np.all((C @ p_project_kerU) ** 2 < tol)
B = p_project_im @ B
A = p_project_im @ A @ p_project_imU
E = p_project_im @ E @ p_project_imU
C = C @ p_project_imU
return A, B, C, D, E, True
def controllable_staircase(
A,
B,
C,
D,
E,
tol=1e-9,
):
"""
Implementation of
COMPUTATION OF IRREDUCIBLE GENERALIZED STATE-SPACE REALIZATIONS ANDRAS VARGA
using givens rotations.
it is very slow, but numerically stable
TODO, add pivoting,
TODO, make it use the U-T property on E better for speed
TODO, make it output Q and Z to apply to aux matrices, perhaps use them on C
"""
# from icecream import ic
# import tabulate
Ninputs = B.shape[1]
Nstates = A.shape[0]
Nconstr = A.shape[1]
Noutput = C.shape[0]
BA, E = scipy.linalg.qr_multiply(E, np.hstack([B, A]), pivoting=False, mode="left")
Nmin = min(Nconstr, Nstates)
for CidxBA in range(0, Nmin - 1):
for RidxBA in range(Nconstr - 1, CidxBA, -1):
# create a givens rotation for Q reduction on BA
BAv0 = BA[RidxBA - 1, CidxBA]
BAv1 = BA[RidxBA, CidxBA]
BAvSq = BAv0 ** 2 + BAv1 ** 2
if BAvSq < tol:
continue
BAvAbs = BAvSq ** 0.5
c = BAv1 / BAvAbs
s = BAv0 / BAvAbs
M = np.array([[s, +c], [-c, s]])
BA[RidxBA - 1 : RidxBA + 1, :] = M @ BA[RidxBA - 1 : RidxBA + 1, :]
# TODO, use the U-T to be more efficient
E[RidxBA - 1 : RidxBA + 1, :] = M @ E[RidxBA - 1 : RidxBA + 1, :]
Cidx = RidxBA
Ridx = RidxBA
# row and col swap
Ev0 = E[Ridx, Cidx - 1]
Ev1 = E[Ridx, Cidx]
EvSq = Ev0 ** 2 + Ev1 ** 2
if EvSq < tol:
continue
EvAbs = EvSq ** 0.5
c = Ev0 / EvAbs
s = Ev1 / EvAbs
MT = np.array([[s, +c], [-c, s]])
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] = (
BA[:, Ninputs:][:, Cidx - 1 : Cidx + 1] @ MT
)
C[:, Cidx - 1 : Cidx + 1] = C[:, Cidx - 1 : Cidx + 1] @ MT
# TODO, use the U-T to be more efficient
E[:, Cidx - 1 : Cidx + 1] = E[:, Cidx - 1 : Cidx + 1] @ MT
B = BA[:, :Ninputs]
A = BA[:, Ninputs:]
return A, B, C, D, E
| StarcoderdataPython |
1718808 | <filename>NaiveBayes.py
#!/usr/bin/python
import numpy as np
def logit(x):
""" Computes logit function
Parameters
----------
x : {float, int}
Returns
-------
out : {float, int}
Logit value
"""
if x > 0:
out = np.log(1. * x / (1 - x))
return out
def complement(x):
""" Computes complement of a probability/parameter
Parameters
----------
x : {float, int}
Returns
-------
out : {float, int}
Complement
"""
out = 1 - x
return out
class NaiveBayes:
def __init__(self, prior=[], conditional=[]):
self.model_prior = prior
self.model_conditional = conditional
# Parameters from fitted model: prior & class conditional
def fit(self, X, Y):
"""Fits Naive Bayes generative model according to training data.
Parameters
----------
X : {array}, shape (n_samples, n_features)
Y : array-like, shape (n_samples,)
Returns
-------
self : object
Returns self.
"""
K = len(np.unique(Y))
p = np.shape(X)[1]
n = np.shape(X)[0]
conditional = []
# Class conditional distribution parameters (Laplace smoothing)
prior = []
# Class prior distribution parameters (MLE)
for label in xrange(K):
indices = np.where(Y == label + 1)[0]
temp_split = X[indices, :]
temp_count = np.shape(temp_split)[0]
prior.append(1. * temp_count / n)
temp_sum = np.apply_along_axis(sum, 0, temp_split.toarray())
conditional.append(1. * (1 + 1. * temp_sum) / (2 + temp_count))
self.model_prior = prior
self.model_conditional = conditional
return self
def predict(self, params, test):
"""Makes predictions on test data based on Naive Bayes model
Parameters
----------
test: {array}, shape (n_samples, n_features)
Returns
-------
preds : list
Returns predicted class for each test point.
"""
vect_logit = np.vectorize(logit)
vect_comp = np.vectorize(complement)
n_test = np.shape(test)[0]
prior = self.model_prior
conditional = self.model_conditional
K = len(prior)
p = np.shape(conditional)[1]
test_p = np.shape(test)[1]
if p != test_p:
print "Error: Number of training and testing features differ"
return
preds = []
weight = vect_logit(conditional)
# Stores weight matrix
condition_comp = vect_comp(conditional)
intercept = np.log(
prior) + np.sum(np.apply_along_axis(np.log, 1, condition_comp), axis=1)
# Stores intercept vector
weight = weight.transpose()
classifier = test.dot(weight) + intercept
preds = np.argmax(classifier, axis=1) + 1
# Add 1 because the classes have a 1-based index
preds = preds.reshape(n_test, 1)
return preds
| StarcoderdataPython |
158490 | <gh_stars>100-1000
from modeltranslation.translator import translator, TranslationOptions
from openbook_common.models import Emoji, EmojiGroup, Badge, Language
class EmojiGroupTranslationOptions(TranslationOptions):
fields = ('keyword',)
translator.register(EmojiGroup, EmojiGroupTranslationOptions)
class EmojiTranslationOptions(TranslationOptions):
fields = ('keyword',)
translator.register(Emoji, EmojiTranslationOptions)
class BadgeTranslationOptions(TranslationOptions):
fields = ('keyword_description',)
translator.register(Badge, BadgeTranslationOptions)
class LanguageTranslationOptions(TranslationOptions):
fields = ('name',)
translator.register(Language, LanguageTranslationOptions)
| StarcoderdataPython |
4821740 | <reponame>abixadamj/lekcja-enter-przyklady
import sqlite3
from random import randint
connection = sqlite3.connect('hasla.sqlite')
cursor = connection.cursor()
vals = ("login_"+str(randint(1,500)), randint(100,1000) )
try:
cursor.execute('INSERT INTO Hasla ("Username","Password") VALUES (?,?)', vals)
print("Udało się dodać do bazy", vals)
except:
print("Error ;-)")
connection.commit()
connection.close()
print("Koniec")
| StarcoderdataPython |
1784692 | # -*- coding: utf-8 -*-
# @Author: MR_Radish
# @Date: 2018-07-24 11:18:50
# @E-mail: <EMAIL>
# @FileName: imutils.py
# @TODO: imutils, it’s a library that
# we are going to write ourselves and create “convenience”
# methods to do common tasks like translation, rotation, and
# resizing.It made by myself
import numpy as np
import cv2
def translate(image, tx, ty):
M = np.float32([[1, 0, tx], [0, 1, ty]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return shifted
def rotate(image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
# 在图像数组image中是按w、h来存的,所以这里的顺序是这样。
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def resize(image, width = None, height = None, inter = cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
# 宽度是None,说明指定了高度
if width is None:
r = height / h
dim = (int(w * r), height)
# 高度是None,说明指定了宽度
if height is None:
r = width / w
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
| StarcoderdataPython |
23822 | from decimal import Decimal
def ensure_decimal(value):
return value if isinstance(value, Decimal) else Decimal(value)
| StarcoderdataPython |
1695730 | <gh_stars>10-100
from typing import Any, Dict, Iterable
import numpy as np
from fugue.workflow.workflow import FugueWorkflow
from tune import Space, Trial, TrialDecision, TrialReport
from tune.constants import TUNE_REPORT_METRIC
from tune.concepts.dataset import TuneDatasetBuilder
from tune.iterative.objective import IterativeObjectiveFunc
from tune.iterative.study import IterativeStudy
from tune.concepts.flow import TrialJudge
def f(x, a, b):
return -np.log(x + 0.01) * a + b
class F(IterativeObjectiveFunc):
def __init__(self) -> None:
self.step = 0
super().__init__()
def copy(self) -> "F":
return F()
def initialize(self) -> None:
assert self.step == 0 # because of copy
def run_single_iteration(self) -> TrialReport:
self.step += 1
trial = self.current_trial
return TrialReport(
trial=trial,
metric=f(
self.step,
trial.params.simple_value["a"],
trial.params.simple_value["b"],
),
)
class J(TrialJudge):
def __init__(self, schedule):
super().__init__()
self.schedule = schedule
def can_accept(self, trial: Trial) -> bool:
return True
def get_budget(self, trial: Trial, rung: int) -> float:
return float(self.schedule[rung]) if rung < len(self.schedule) else 0.0
def judge(self, report: TrialReport) -> TrialDecision:
return TrialDecision(
report,
budget=self.get_budget(report.trial, report.rung + 1),
should_checkpoint=False,
metadata={},
)
def test_iterative_study(tmpdir):
def assert_metric(df: Iterable[Dict[str, Any]], metric: float) -> None:
for row in df:
assert row[TUNE_REPORT_METRIC] < metric
study = IterativeStudy(F(), str(tmpdir))
space = sum(
Space(a=a, b=b)
for a, b in [(1.1, 0.2), (0.8, -0.2), (1.2, -0.1), (0.7, 0.3), (1.0, 1.5)]
)
dag = FugueWorkflow()
dataset = TuneDatasetBuilder(space, str(tmpdir)).build(dag)
result = study.optimize(
dataset,
J([1, 2, 3, 4]),
)
result.result(1).show()
result.result(1).output(assert_metric, params=dict(metric=-2.8))
dag.run()
| StarcoderdataPython |
1764096 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import operator
from typing import Callable, Iterable, Sequence, Tuple, TypeVar
from pants.base.deprecated import deprecated
_T = TypeVar("_T")
Filter = Callable[[_T], bool]
def _extract_modifier(modified_param: str) -> Tuple[Callable[[bool], bool], str]:
if modified_param.startswith("-"):
return operator.not_, modified_param[1:]
identity_func = lambda x: x
return identity_func, modified_param[1:] if modified_param.startswith("+") else modified_param
def create_filter(predicate_param: str, predicate_factory: Callable[[str], Filter]) -> Filter:
"""Create a filter function from a string parameter.
:param predicate_param: Create a filter for this param string. Each string is a
comma-separated list of arguments to the predicate_factory.
If the entire comma-separated list is prefixed by a '-' then the
sense of the resulting filter is inverted.
:param predicate_factory: A function that takes a parameter and returns a predicate, i.e., a
function that takes a single parameter (of whatever type the filter
operates on) and returns a boolean.
:return: A filter function of one argument that is the logical OR of the predicates for each of
the comma-separated arguments. If the comma-separated list was prefixed by a '-',
the sense of the filter is inverted.
:API: public
"""
modifier, param = _extract_modifier(predicate_param)
predicates = [predicate_factory(p) for p in param.split(",")]
def filt(x: _T) -> bool:
return modifier(any(pred(x) for pred in predicates))
return filt
def create_filters(
predicate_params: Iterable[str], predicate_factory: Callable[[str], Filter]
) -> Sequence[Filter]:
"""Create filter functions from a list of string parameters.
:param predicate_params: A list of predicate_param arguments as in `create_filter`.
:param predicate_factory: As in `create_filter`.
:API: public
"""
filters = []
for predicate_param in predicate_params:
filters.append(create_filter(predicate_param, predicate_factory))
return filters
@deprecated(
removal_version="1.31.0.dev0",
hint_message="Use `pants.util.filtering.and_filters`, which behaves identically.",
)
def wrap_filters(filters: Iterable[Filter]) -> Filter:
"""Returns a single filter that short-circuit ANDs the specified filters.
:API: public
"""
return and_filters(filters)
def and_filters(filters: Iterable[Filter]) -> Filter:
"""Returns a single filter that short-circuit ANDs the specified filters.
:API: public
"""
def combined_filter(x: _T) -> bool:
for filt in filters:
if not filt(x):
return False
return True
return combined_filter
| StarcoderdataPython |
4929 | <filename>coremltools/converters/mil/frontend/tensorflow/converter.py
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
from coremltools.converters.mil.input_types import (
InputType,
TensorType,
ImageType,
RangeDim,
_get_shaping_class,
)
from coremltools.converters.mil.input_types import Shape as InputShape
from coremltools.converters.mil.mil.var import Var
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.mil.types import is_tensor
from coremltools.converters.mil.mil import types
from .basic_graph_ops import topsort, simple_topsort
from .convert_utils import convert_graph
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Program
from coremltools.converters.mil.mil import Function
from .ssa_passes.tf_passes import tensorflow_passes
from coremltools.converters._profile_utils import _profile
# TranscriptionContext maintains a map of tf_node.name --> ssa_var available
# to the current TF --> tfssa transcription.
class TranscriptionContext:
def __init__(self, name=None):
self.name = name if name is not None else ""
self.context = {}
self.graphs = {}
# TF loops are represented as functions, so nested loops becomes
# stacked functions. Stacked functions are translated to nested
# blocks in Program, like
#
# while_loop(loop_vars=(%a, %b))
# cond_block1(%a.x, %b.x) {
# ...some ops
# } -> (%bool_var1)
# body_block1(%a.x, %b.x) {
# %ret_axx = while_loop(loop_vars=(%a.x,))
# cond_block2(%a.x.x) {
# ...some ops
# } -> (%bool_var2)
# body_block2(%a.x.x) {
# ...some ops
# } -> (%new_a.x.x)
# } -> (%ret_axx)
# ....some ops using %ret_a
# } -> (%ret_ax, %ret_bx)
#
# During the translation of cond_block2, we'd have func_input_stack
#
# (%a.x.x,)
# (%a.x, %b.x)
#
# where [%a.x.x] would be unstacked once cond_block2 is done.
self.func_input_stack = [] # list of tuple[Var]
def add(self, tf_name, ssa_vars, is_new_var=True):
"""
ssa_vars: list[Var] / tuple[Var] (multiple outputs) or
Var (single_output)
is_new_var: True if ssa_vars are newly created for tf_name.
"""
if tf_name in self.context:
# Overriding allow us to translate while_loop body twice (which is
# needed to figure out shapes changes during iterates)
msg = "TF var %s is added again. Overriding previous value"
logging.info(msg % tf_name)
if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name:
msg = (
"MIL op's name ({}) does not match TensorFlow's node name ({})."
" Warning: Node added to context must have the same name as the name passed to context."
)
raise ValueError(msg.format(tf_name, ssa_vars.name))
self.context[tf_name] = ssa_vars
def add_graph(self, graph_name, graph):
self.graphs[graph_name] = graph
def get_graph(self, graph_name):
if graph_name not in self.graphs:
msg = "Graph '{}' not found in: {}"
raise KeyError(msg.format(graph_name, list(self.graphs.keys())))
return self.graphs[graph_name]
def stack_func_inputs(self, inputs):
self.func_input_stack.append(inputs)
def unstack_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
self.func_input_stack.pop()
def get_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
return self.func_input_stack[-1]
def __getitem__(self, tf_name):
if tf_name not in self.context:
msg = "TF var {} not found in context {}"
raise KeyError(msg.format(tf_name, self.name))
return self.context[tf_name]
def __contains__(self, tf_name):
return tf_name in self.context
class TFConverter:
def __init__(self, tfssa, inputs=None, outputs=None, **kwargs):
"""
tfssa: TensorFlow IR.
inputs: list of TensorType or ImageType, optional, defaults to None.
outputs: list of str or str, optional, defaults to None.
A list of names of the output nodes or a str for single output name.
If None, the converter will try to extract the output information from
TensorFlow model.
"""
self.tfssa = tfssa
self.global_type = {}
self.inputs = None
main_func = tfssa.functions["main"]
graph = main_func.graph
# Filter the inputs to only Placeholder names
tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"]
placeholder_names = []
if inputs is not None:
# Check inputs format
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"Type of inputs should be list or tuple, got {} instead.".format(
type(inputs)
)
)
if not all([isinstance(i, InputType) for i in inputs]):
raise ValueError(
"Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format(
[type(i) for i in inputs]
)
)
# Special case: if there's only 1 input and 1 placeholder, we match them.
if len(tf_placeholder_names) == 1 and len(inputs) == 1:
if inputs[0].name is None:
inputs[0].name = tf_placeholder_names[0]
# filter out those inputs which is not in tf_placeholder_names
inputs = [x for x in inputs if x.name in tf_placeholder_names]
# We fill in shapes for user-specified input that doesn't have shape
for inp in inputs:
# Check inputs existence
if inp.name is None:
raise ValueError(
"Unable to infer input's name or input name was not provided"
)
if inp.name not in tf_placeholder_names:
raise ValueError(
"Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format(
inp.name, tf_placeholder_names
)
)
if inp.shape is None:
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name)
# _get_shaping_class does not accept -1 or None dimension.
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inp.shape = _get_shaping_class(shape)
# Extract placeholders that users didn't specify.
user_input_names = [inp.name for inp in inputs]
for name in tf_placeholder_names:
if name not in user_input_names:
placeholder_names.append(name)
else:
inputs = []
placeholder_names = tf_placeholder_names
# name -> (shape, mil_type) mapping. shape has type list[int]
added_inputs = {}
for inp in main_func.inputs:
if inp not in placeholder_names:
continue
node = graph[inp]
dtype = node.attr['dtype']
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp)
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inputs.append(TensorType(name=inp, shape=shape, dtype=dtype))
added_inputs[inp] = (shape, dtype)
if len(added_inputs) > 0:
logging.info(
"Adding Input not specified by users: '{}'".format(
added_inputs)
)
for idx, inp in enumerate(inputs):
# We set the default image format in TF as NHWC, since NHWC is used
# for TF unless GPU is specified as device.
if isinstance(inp, ImageType) and inputs[idx].channel_first is None:
inputs[idx].channel_first = False
self.inputs = tuple(inputs)
for inputtype in self.inputs:
if not isinstance(inputtype.shape, InputShape):
continue
if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]):
continue
node = graph[inputtype.name]
shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape]
node.attr["_output_shapes"] = [shape] # list of length 1
# infer outputs if not provided
self._validate_outputs(tfssa, outputs)
outputs = main_func.outputs if outputs is None else outputs
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [x if isinstance(x, str) else x.name for x in outputs]
self.outputs = outputs
# We would like a stack so that we run conversion sequentially.
self.graph_stack = self._get_stack(tfssa, root="main")
self.context = TranscriptionContext()
self.tensorflow_passes = tensorflow_passes
def _get_placeholder_shape_from_tf_graph(self, tfgraph, name):
error_message = "Unable to determine the shape of input: {}." \
" Please provide its shape during conversion, using \n" \
"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name)
if tfgraph[name].attr.get("shape", None) is not None:
shape = tfgraph[name].attr["shape"]
elif tfgraph[name].attr.get("_output_shapes", None) is not None:
shape = tfgraph[name].attr["_output_shapes"][0]
if shape is None:
raise ValueError(error_message)
else:
raise ValueError(error_message)
return shape
def _get_stack(self, tfssa, root="main"):
# We're trying to get a order of how to loop through the graphs.
# This is NOT necessarily a DAG.
dep = {x: [] for x in tfssa.functions}
for fname in tfssa.functions:
for node in tfssa.functions[fname].graph.values():
func_x, func_y = None, None
if node.op == "while":
func_x = node.attr["body_function"]
func_y = node.attr["cond_function"]
if func_x and fname not in dep[func_x]:
dep[func_x].append(fname)
if func_y and fname not in dep[func_y]:
dep[func_y].append(fname)
assert len(dep[root]) == 0
graph_stack = simple_topsort(dep)
return graph_stack
@staticmethod
def _get_tensor_name(tensor):
ret = None
if isinstance(tensor, str):
ret = tensor
else:
ret = tensor.name
return ret.split(":")[0]
def _validate_outputs(self, tfssa, outputs):
if outputs is None:
return
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
output_nodes = []
for f in tfssa.functions.values():
output_nodes += list(f.outputs)
all_nodes = []
for f in tfssa.functions.values():
all_nodes += list(f.graph.keys())
for n in outputs:
if self._get_tensor_name(n) not in output_nodes + all_nodes:
raise KeyError('Output node name "{}" does exist.'.format(n))
def check_placeholder_output(self, prog, outputs_name):
"""
Handle the cases where placeholder is output.
There is a case where the program is like
main(%Placeholder: (5,fp32)) {
block3() {
} -> (%Placeholder)
}
But self.outputs = ["Placeholder:0"]
We need to change the block output to Placeholder:0 by inserting an identity
"""
block = prog["main"]
input_name = [x.name for x in list(block.inputs.values())]
with block:
new_outputs = []
for output, output_name in zip(block.outputs, outputs_name):
if output.name not in input_name or output.name == output_name:
new_output = output
else:
new_output = mb.identity(x=output, name=output_name)
new_outputs.append(new_output)
block.set_outputs(new_outputs)
def convert_main_graph(self, prog, graph):
func_inputs = {}
for input_type in self.inputs:
func_inputs[input_type.name] = mb.placeholder(
input_type.shape.symbolic_shape, dtype=input_type.dtype)
prog.set_main_input_types(self.inputs)
with Function(func_inputs) as ssa_func:
# Get the input Var
for name in func_inputs.keys():
self.context.add(name, ssa_func.inputs[name])
outputs = convert_graph(self.context, graph, self.outputs)
ssa_func.set_outputs(outputs)
prog.add_function("main", ssa_func)
# check duplicate output
# Note: sometimes two outputs are pointing to the same Var, we should
# create mb.identity for those cases
block = prog["main"]
with block:
name_counts = {}
new_outputs = [output for output in block.outputs]
for i, v_o in enumerate(block.outputs):
if v_o.name not in name_counts:
name_counts[v_o.name] = 1
else:
name_counts[v_o.name] += 1
new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name])
x = mb.identity(x=v_o, name=new_name)
new_outputs[i] = x
block.set_outputs(new_outputs)
# Rename outputs to TF's name. This is needed when the last op doesn't
# generate a new Var (e.g., get_tuple, Identity etc.), and thus the
# last Var would have a different name than the last TF op's name.
#
# Example:
#
# TF code:
# x = tf.placeholder(tf.float32, shape=(1,))
# y = tf.placeholder(tf.float32, shape=(1,))
# c = lambda i, j: \
# tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))
# b = lambda i, j: (tf.add(i, 1), j)
# res = tf.while_loop(c, b, [x, y])
#
# Resulting nodes (excluding the nodes in while loop cond & body):
#
# node name: Placeholder op type: Placeholder inputs: []
# node name: Placeholder_1 op type: Placeholder inputs: []
# node name: make_input_0 op type: make_tuple inputs: ['Placeholder',
# 'Placeholder_1']
# node name: while_0 op type: while inputs: ['make_input_0']
# node name: while/Exit op type: get_tuple inputs: ['while_0']
# node name: while/Exit_1 op type: get_tuple inputs: ['while_0']
#
# Observe that return node `while/Exit` is an output from get_tuple,
# which in our translation simply unpack a python tuple of Vars
# ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to
# rename `while_0:0` to `while/Exit` in order for users to find the
# output.
# Note: only rename the output if the output is not Placeholder.
input_names = [x.name for x in self.inputs]
for v_o, out_name in zip(prog["main"].outputs, self.outputs):
if v_o.name != out_name and v_o.name not in input_names:
logging.info(
"Renaming output var: '{}' -> '{}'".format(v_o.name, out_name)
)
v_o.name = out_name
self.check_placeholder_output(prog, self.outputs)
@_profile
def convert(self):
prog = Program()
if len(self.graph_stack) == 0:
raise ValueError("At least one TF function must be present")
if self.graph_stack[0] != "main":
msg = "TF root graph must be named 'main'. Got {}"
raise ValueError(msg.format(self.graph_stack[0]))
graph = self.tfssa.functions["main"].graph
for g_name in self.graph_stack[1:]:
self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)
self.convert_main_graph(prog, graph)
# Apply TF frontend passes on Program. These passes are different
# from passes applied to tfssa.
self.tensorflow_passes(prog)
return prog
| StarcoderdataPython |
101302 | <gh_stars>0
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import os
import posixpath
import numpy as np
from molmod.units import *
from pyiron.atomistics.structure.atoms import Atoms
from pyiron_base import ProjectHDFio, Project
from pyiron.quickff.quickff import QuickFFInput
from pyiron.quickff.quickff import QuickFF
class TestQuickFF(unittest.TestCase):
"""
Tests the pyiron.quickff.quickff QuickFF class
"""
@classmethod
def setUpClass(cls):
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.execution_path, "test_quickff"))
cls.job = cls.project.create_job("QuickFF", "trial")
cls.job_complete = QuickFF(
project=ProjectHDFio(project=cls.project, file_name="quickff_complete"),
job_name="quickff_complete",
)
@classmethod
def tearDownClass(cls):
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
project = Project(os.path.join(cls.execution_path, "test_quickff"))
project.remove_jobs_silently(recursive=True)
project.remove(enable=True)
def setUp(self):
if self.project.load('trial') is not None:
self.project.remove_job('trial')
self.job = self.project.create_job("QuickFF", 'trial')
if self.project.load('quickff_complete') is not None:
self.project.remove_job("quickff_complete")
self.job_complete = QuickFF(
project=ProjectHDFio(project=self.project, file_name="quickff_complete"),
job_name="quickff_complete",
)
def test_init(self):
self.assertEqual(self.job.__name__, "QuickFF")
self.assertIsInstance(self.job.input, QuickFFInput)
self.assertEqual(self.job.ffatypes, None)
self.assertEqual(self.job.ffatype_ids, None)
self.assertEqual(self.job.aiener, None)
self.assertEqual(self.job.aigrad, None)
self.assertEqual(self.job.aihess, None)
self.assertEqual(self.job.fn_ei, None)
self.assertEqual(self.job.fn_vdw, None)
def test_input(self):
self.assertIsInstance(self.job.input['fn_yaff'], (str))
self.assertIsInstance(self.job.input['fn_charmm22_prm'], (type(None)))
self.assertIsInstance(self.job.input['fn_charmm22_psf'], (type(None)))
self.assertIsInstance(self.job.input['fn_sys'], (str))
self.assertIsInstance(self.job.input['plot_traj'], (type(None)))
self.assertIsInstance(self.job.input['xyz_traj'], (bool))
self.assertIsInstance(self.job.input['fn_traj'], (type(None)))
self.assertIsInstance(self.job.input['log_level'], (str))
self.assertIsInstance(self.job.input['log_file'], (str))
self.assertIsInstance(self.job.input['program_mode'], (str))
self.assertIsInstance(self.job.input['only_traj'], (str))
self.assertIsInstance(self.job.input['ffatypes'], (type(None)))
self.assertIsInstance(self.job.input['ei'], (type(None)))
self.assertIsInstance(self.job.input['ei_rcut'], (type(None)))
self.assertIsInstance(self.job.input['vdw'], (type(None)))
self.assertIsInstance(self.job.input['vdw_rcut'], (float))
self.assertIsInstance(self.job.input['covres'], (type(None)))
self.assertIsInstance(self.job.input['excl_bonds'], (type(None)))
self.assertIsInstance(self.job.input['excl_bends'], (type(None)))
self.assertIsInstance(self.job.input['excl_dihs'], (type(None)))
self.assertIsInstance(self.job.input['excl_oopds'], (type(None)))
self.assertIsInstance(self.job.input['do_hess_mass_weighting'], (bool))
self.assertIsInstance(self.job.input['do_hess_negfreq_proj'], (bool))
self.assertIsInstance(self.job.input['do_cross_svd'], (bool))
self.assertIsInstance(self.job.input['pert_traj_tol'], (float))
self.assertIsInstance(self.job.input['pert_traj_energy_noise'], (type(None)))
self.assertIsInstance(self.job.input['cross_svd_rcond'], (float))
self.assertIsInstance(self.job.input['do_bonds'], (bool))
self.assertIsInstance(self.job.input['do_bends'], (bool))
self.assertIsInstance(self.job.input['do_dihedrals'], (bool))
self.assertIsInstance(self.job.input['do_oops'], (bool))
self.assertIsInstance(self.job.input['do_cross_ASS'], (bool))
self.assertIsInstance(self.job.input['do_cross_ASA'], (bool))
self.assertIsInstance(self.job.input['do_cross_DSS'], (bool))
self.assertIsInstance(self.job.input['do_cross_DSD'], (bool))
self.assertIsInstance(self.job.input['do_cross_DAA'], (bool))
self.assertIsInstance(self.job.input['do_cross_DAD'], (bool))
self.assertIsInstance(self.job.input['consistent_cross_rvs'], (bool))
self.assertIsInstance(self.job.input['remove_dysfunctional_cross'], (bool))
self.assertIsInstance(self.job.input['bond_term'], (str))
self.assertIsInstance(self.job.input['bend_term'], (str))
self.assertIsInstance(self.job.input['do_squarebend'], (bool))
self.assertIsInstance(self.job.input['do_bendclin'], (bool))
self.assertIsInstance(self.job.input['do_sqoopdist_to_oopdist'], (bool))
self.job.set_ei(posixpath.join(self.execution_path, "../static/quickff_test_files/pars_mbisgauss.txt"))
self.assertEqual(self.job.input['ei'], 'pars_mbisgauss.txt')
self.assertEqual(self.job.fn_ei, posixpath.join(self.execution_path, "../static/quickff_test_files/pars_mbisgauss.txt"))
self.job.set_vdw(posixpath.join(self.execution_path, "../static/quickff_test_files/pars_vdw.txt"))
self.assertEqual(self.job.input['vdw'], 'pars_vdw.txt')
self.assertEqual(self.job.fn_vdw, posixpath.join(self.execution_path, "../static/quickff_test_files/pars_vdw.txt"))
def test_set_structure(self):
self.assertEqual(self.job.structure, None)
self.job.read_abinitio(posixpath.join(self.execution_path, "../static/quickff_test_files/input.fchk"))
self.assertIsInstance(self.job.structure, (Atoms))
self.assertIsInstance(self.job.aiener, (float))
self.assertTrue(
np.array_equal(self.job.aigrad.shape, (len(self.job.structure), 3))
)
self.assertTrue(
np.array_equal(self.job.aihess.shape, (len(self.job.structure), 3, len(self.job.structure), 3))
)
self.job.detect_ffatypes(ffatype_level='low')
ffatypes = self.job.ffatypes
ffatype_ids = self.job.ffatype_ids
full_list = [ffatypes[i] for i in ffatype_ids]
self.job.detect_ffatypes(ffatypes=full_list)
self.assertCountEqual(self.job.ffatypes, ffatypes)
self.assertCountEqual(self.job.ffatype_ids, ffatype_ids)
rules =[
('H', '1'),
('C', '6'),
]
self.job.detect_ffatypes(ffatype_rules=rules)
self.assertCountEqual(self.job.ffatypes, ffatypes)
self.assertCountEqual(self.job.ffatype_ids, ffatype_ids)
self.assertRaises(IOError, self.job.detect_ffatypes, ffatype_rules=rules, ffatype_level='high')
def test_run_complete(self):
self.job_complete.read_abinitio(
posixpath.join(self.execution_path, "../static/quickff_test_files/input.fchk")
)
self.job_complete.set_ei(
posixpath.join(self.execution_path, "../static/quickff_test_files/pars_mbisgauss.txt")
)
self.job_complete.detect_ffatypes(ffatype_level='low')
file_directory = posixpath.join(
self.execution_path, '../static/quickff_test_files/'
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "system.chk")
)
self.job_complete.restart_file_list.append(
posixpath.join(file_directory, "pars_cov.txt")
)
self.job_complete.run(run_mode="manual")
self.job_complete.status.collect = True
self.job_complete.run()
nodes = [
'bend',
'bond',
'cross',
'oopdist',
'torsion',
]
with self.job_complete.project_hdf5.open("output/generic") as h_gen:
hdf_nodes = h_gen.list_nodes()
self.assertTrue(all([node in hdf_nodes for node in nodes]))
nodes = [
'bonds',
'ffatype_ids',
'ffatypes',
'numbers',
'pos',
'rvecs'
]
with self.job_complete.project_hdf5.open("output/system") as h_gen:
hdf_nodes = h_gen.list_nodes()
self.assertTrue(all([node in hdf_nodes for node in nodes]))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
126372 | <reponame>YuweiYin/Algorithm_YuweiYin
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0089-Gray-Code.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-08
=================================================================="""
# import collections
import sys
import time
from typing import List
# import queue
"""
LeetCode - 0089 - (Medium) - Gray Code
https://leetcode.com/problems/gray-code/
Description:
An n-bit gray code sequence is a sequence of 2n integers where:
Every integer is in the inclusive range [0, 2n - 1],
The first integer is 0,
An integer appears no more than once in the sequence,
The binary representation of every pair of adjacent integers differs by exactly one bit, and
The binary representation of the first and last integers differs by exactly one bit.
Requirement:
Given an integer n, return any valid n-bit gray code sequence.
Example 1:
Input: n = 2
Output: [0,1,3,2]
Explanation:
The binary representation of [0,1,3,2] is [00,01,11,10].
- 00 and 01 differ by one bit
- 01 and 11 differ by one bit
- 11 and 10 differ by one bit
- 10 and 00 differ by one bit
[0,2,3,1] is also a valid gray code sequence, whose binary representation is [00,10,11,01].
- 00 and 10 differ by one bit
- 10 and 11 differ by one bit
- 11 and 01 differ by one bit
- 01 and 00 differ by one bit
Example 2:
Input: n = 1
Output: [0,1]
Constraints:
1 <= n <= 16
"""
class Solution:
def grayCode(self, n: int) -> List[int]:
if not isinstance(n, int) or n <= 0:
return []
return self._grayCode(n)
def _grayCode(self, n: int) -> List[int]:
def __grayCodeSymmetricalGeneration() -> List[int]:
"""
init: [b0], where b0 means binary 0, e.g., decimal integer 10 = b1010
step 0: [b0, b1], where b1 is b0's (step+1)-th bit (count from the lowest bit) convert from 0 to 1
step 1: [b00, b01, b11, b10], where b11 is b01's (step+1)-th bit 0->1, and b10 is b00's (step+1)-th bit 0->1
step 2: [b000, b001, b011, b010, b110, b111, b101, b100], where b110 is b010's (step+1)-th bit 0->1,
and b111 is b011's (step+1)-th bit 0->1, so does b101 and b100
so on and so forth
"""
gray_code = [0] # init
for step in range(n): # perform n steps, step = 0, 1, 2, ...
# each step, expand current gray_code twice large
for rev_index in range(len(gray_code) - 1, -1, -1): # backwards
gray_code.append(gray_code[rev_index] | (1 << step)) # (step+1)-th bit 0->1
# print(gray_code)
# Example: n = 4
# step 0: [0, 1]
# step 1: [0, 1, 3, 2]
# step 2: [0, 1, 3, 2, 6, 7, 5, 4]
# step 3: [0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8]
return gray_code
def __grayCodeXor() -> List[int]:
"""
init: [b0, b0, b0, ..., b0], all 2^n 0 in total. where b0 means binary 0, e.g., decimal integer 10 = b1010
step b000: [b0], where b0 is (step)b000 ^ (b000 >> 1)
step b001: [b00, b01], where b01 is (step)b001 ^ (b001 >> 1) = b001 ^ b000 = b001
step b010: [b000, b001, b011], where b011 is (step)b010 ^ (b010 >> 1) = b010 ^ b001 = b011
step b011: [b000, b001, b011, b010], where b011 is (step)b011 ^ (b011 >> 1) = b011 ^ b001 = b010
so on and so forth
"""
gray_code = [0 for _ in range(1 << n)] # pre define the whole gray_code list
for step in range(1 << n): # generate 1 code in 1 loop, step = 0, 1, 2, ...
gray_code[step] = step ^ (step >> 1)
# print(gray_code)
# Example: n = 3
# step 0: [0, 0, 0, 0, 0, 0, 0, 0]
# step 1: [0, 1, 0, 0, 0, 0, 0, 0]
# step 2: [0, 1, 3, 0, 0, 0, 0, 0]
# step 3: [0, 1, 3, 2, 0, 0, 0, 0]
# step 4: [0, 1, 3, 2, 6, 0, 0, 0]
# step 5: [0, 1, 3, 2, 6, 7, 0, 0]
# step 6: [0, 1, 3, 2, 6, 7, 5, 0]
# step 7: [0, 1, 3, 2, 6, 7, 5, 4]
return gray_code
# return __grayCodeSymmetricalGeneration()
return __grayCodeXor()
def main():
# Example 1: Output: [0, 1, 3, 2] or [0, 2, 3, 1]
# n = 2
# Example 2: Output: [0, 1]
# n = 1
# Example 3: Output: [0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8]
n = 4
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.grayCode(n)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1701500 | <reponame>d4l3k/cs425
from PIL import Image
import numpy as np
import math
from scipy import signal
def boxfilter(n):
assert n % 2 == 1, "Dimension must be odd"
return np.full((n,n), 1/(n*n))
def gauss1d(sigma):
# l is the length of the gaussian filter
l = math.ceil(sigma * 6)
if l % 2 == 0:
l += 1
edge = math.floor(l/2)
a = np.arange(-edge, edge+1, 1)
f = np.exp(-np.power(a, 2)/(2*math.pow(sigma,2)))
f /= np.sum(f)
return f
def gauss2d(sigma):
d1 = gauss1d(sigma)
d2 = d1[np.newaxis]
d2t = np.transpose(d2)
return signal.convolve2d(d2,d2t)
def gaussconvolve2d(array, sigma):
filt = gauss2d(sigma)
return signal.convolve2d(array, filt, 'same')
im = Image.open("maru.jpg").convert('L')
#im.show()
im2 = Image.fromarray(np.uint8(gaussconvolve2d(np.asarray(im), 3)))
im2.show()
| StarcoderdataPython |
3263136 | #soru1
metin = ("Açık bilim, araştırma çıktılarına ve süreçlerine herkesin serbestçe erişmesini, bunların ortak kullanımını, dağıtımını ve üretimini kolaylaştıran bilim uygulamasıdır.")
slices = metin[:20]
print(slices)
#soru2
liste = ["Açık Bilim", "Açık Erişim", "Açık Lisans", "Açık Eğitim", "Açık Veri", "Açık Kültür"]
for i in liste:
print(i)
#soru3
sozluk = {"elma" : "Ağaçta yetişen bir tür meyve" , "salatalık" : "Fidan üzerinde büyüyen bir tür sebze" }
giris = input("aramak istediğiniz kelimeyi giriniz")
print("lütfen küçük harf kullanınız...")
while True:
if giris == "elma":
print(sozluk["elma"])
break
elif giris == "salatalık":
print(sozluk["salatalık"])
break
else:
print("yanlış giriş")
break
| StarcoderdataPython |
3396853 | <gh_stars>1-10
from django.contrib import admin
from .models import Maha1, Incidences
from leaflet.admin import LeafletGeoAdmin
class IncidencesAdmin(LeafletGeoAdmin):
list_display = ['name', 'location']
class MahaAdmin(LeafletGeoAdmin):
pass
admin.site.register(Incidences, IncidencesAdmin)
admin.site.register(Maha1, MahaAdmin) | StarcoderdataPython |
3238316 | import sys
import os
def message(type, content = ''):
return '%s%s;' % (type, content)
def jeton():
return message('J')
def prime(n):
return message('P', str(n))
def unknown(n):
return message('?', str(n))
def noprime(n):
return message('N', str(n))
def decode(str):
type = str[0]
content = str[1:]
return (type, content)
def process(content, token, primes):
val = int(content)
for p in primes:
if val % p == 0:
return (noprime(val), token, primes)
return (unknown(val), token, primes)
def dispatch(received, token, primes):
(type, content) = decode(received)
if type == 'J':
token = True
return ('', token, primes)
forward = content + ';'
if type == '?':
return process(content, token, primes)
if None == 'P' and token:
token = False
val = int(content)
primes.append(val)
return (jeton(), token, primes)
if None == 'D':
print >>sys.stderr, primes
if type == 'L':
print >>sys.stderr, primes[-1]
if type == 'S' and len(primes) > 0:
print >>sys.stderr, '%d primes, last=%d' % (len(primes), primes[-1])
return (forward, token, primes)
def input():
valid = set('?PJNSDL;1234567890\n\t\r ')
blank = set('\n\t\r ')
(char, res) = ('', '')
while char != ';':
char = os.read(0, 1)
if char not in valid:
if char == '':
return ''
print >>None.stderr, "input: invalid char '%s'" % char
continue
if char in blank:
continue
res += char
return res[:-1]
def main_process():
token = False
primes = []
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
incoming = input()
while len(incoming) > 0:
(forward, token, primes) = dispatch(incoming, token, primes)
if len(forward) > 0:
print forward
incoming = input()
print >>sys.stderr, primes
print >>sys.stderr, 'process(%d): %d primes found' % (os.getpid(), len(primes))
def main_master(nproc):
if not parallel and last_out * last_out > last_out + nproc:
nslot = (nproc - 1) + nslot
parallel = True
print >>sys.stderr, '--> Going Parallel'
while nslot > 0:
i = i + 1
print unknown(i)
nslot = nslot - 1
(type, content) = decode(input())
if type == '?':
print >>sys.stderr, content, prime(int(content))
last_out = int(content)
nslot = nslot + 1
if type == 'J':
print jeton()
if type == 'N':
print >>sys.stderr, '.'last_out = int(content)nslot = nslot + 1,
if type == 'P':
print >>sys.stderr, 'ERROR: Prime seen on output of ring. Lost token??'
continue
if __name__ == '__main__':
print >>sys.stderr, 'process starting...'
print >>sys.stderr, sys.argv
if len(sys.argv) > 1 and sys.argv[1] == '-t':
import doctest
doctest.testmod()
sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == '-h':
help('__main__')
sys.exit(0)
if len(sys.argv) > 1:
if int(sys.argv[1]) > 0:
main_master(int(sys.argv[1]))
else:
help('__main__')
if len(sys.argv) == 1:
main_process()
| StarcoderdataPython |
89142 | <gh_stars>0
"""Admin classes for the ``cmsplugin_blog_categories`` app."""
from django.contrib import admin
from django.utils.translation import get_language
from django.utils.translation import ugettext_lazy as _
from cmsplugin_blog.admin import EntryAdmin
from simple_translation.admin import TranslationAdmin
from simple_translation.utils import get_preferred_translation_from_lang
from cmsplugin_blog_categories.models import Category, EntryCategory
class EntryCategoryInline(admin.TabularInline):
model = EntryCategory
extra = 1
class EntryCategoryAdmin(admin.ModelAdmin):
list_display = ['entry_title', 'category_title', ]
def category_title(self, obj):
"""Returns the best available translation for the category title."""
return obj.category.get_translation().title
category_title.short_description = _('Category')
def entry_title(self, obj):
"""Returns the best available translation for the entry title."""
lang = get_language()
entry_title = get_preferred_translation_from_lang(obj.entry, lang)
return entry_title.title
entry_title.short_description = _('Entry title')
class CategoryAdmin(TranslationAdmin):
list_display = ['title', 'languages', ]
def title(self, obj):
"""Returns the best available translation for the catrgory title."""
return obj.get_translation().title
title.short_description = _('Title')
# Enhance original EntryAdmin
EntryAdmin.inlines = EntryAdmin.inlines[:] + [EntryCategoryInline]
# Register our own admins
admin.site.register(Category, CategoryAdmin)
admin.site.register(EntryCategory, EntryCategoryAdmin)
| StarcoderdataPython |
3304131 | <filename>pymager/persistence/_schemamigrator.py
"""
Copyright 2010 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from zope.interface import Interface, implements
class SchemaMigrator(Interface):
""" Manages the Schema, Metadata, and stores references to the Engine and Session Maker """
def create_schema(self):
""" Create the database metadata """
def drop_all_tables(self):
""" Drop all tables """
def session_template(self):
""" Creates a Spring JDBC-like template """
| StarcoderdataPython |
1600071 | # The ArtNET Receiver is based upon the work of
# https://github.com/Blinkinlabs/BlinkyTape_Python/blob/master/artnet-receiver.py by <NAME>
import sys
import time
import socket
from struct import unpack
import threading
UDP_IP = "" # listen on all sockets- INADDR_ANY
UDP_PORT = 0x1936 # Art-net is supposed to only use this address
class ArtnetPacket:
ARTNET_HEADER = b'Art-Net\x00'
OP_OUTPUT = 0x0050
def __init__(self):
self.op_code = None
self.ver = None
self.sequence = None
self.physical = None
self.universe = None
self.length = None
self.data = None
@staticmethod
def unpack_raw_artnet_packet(raw_data):
if unpack('!8s', raw_data[:8])[0] != ArtnetPacket.ARTNET_HEADER:
return None
packet = ArtnetPacket()
# We can only handle data packets
(packet.op_code,) = unpack('!H', raw_data[8:10])
if packet.op_code != ArtnetPacket.OP_OUTPUT:
return None
(packet.op_code, packet.ver, packet.sequence, packet.physical,
packet.universe, packet.length) = unpack('!HHBBHH', raw_data[8:18])
(packet.universe,) = unpack('<H', raw_data[14:16])
(packet.data,) = unpack(
'{0}s'.format(int(packet.length)),
raw_data[18:18+int(packet.length)])
return packet
class ArtNetReceiver(threading.Thread):
def __init__(self, universe, comm = None, listenStart = 0, listenStop = 512, ip = UDP_IP, port = UDP_PORT):
threading.Thread.__init__(self)
self.universe = universe
self.listenStart = listenStart
self.listenStop = listenStop
self.stopped = False
self.comm = comm
print(("Listening for ArtNet Packages in {0}:{1}").format(ip, port))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.sock.bind((ip, port))
self.sock.setblocking(False)
self.actualData = [-1] * (listenStop + 1)
self.lastTime = time.time()
self.lastSequence = 0
self.callbacks = [False] * (listenStop + 1)
self.lastPacketTime = time.time()-3;
self.status = False
def watchdog(self):
if self.stopped:
print "Watchdog ended"
return
threading.Timer(0.5, self.watchdog).start();
actTime = time.time();
statusChanged = False
if actTime - self.lastPacketTime > 2:
statusChanged = self.status != False
self.status = False
else:
statusChanged = self.status != True
self.status = True
if statusChanged and self.status:
print "Now Receiving ArtNET Signal"
self.comm["signal"] = True
if statusChanged and not self.status:
print "ArtNET Signal LOST!!!"
self.comm["signal"] = False
def registerCallback(self, address, function):
'''
Function needs this footprint:
function(address, value, changeAmount, completeUniverse)
'''
if type(address) is list:
for singleAddress in address:
self.callbacks[singleAddress] = (function, True, address)
else:
self.callbacks[address] = (function, False, address)
def stop(self):
print "Stopping Artnet Receiver"
self.stopped = True
def run(self):
self.watchdog()
self.callbacksToRun = {}
while not self.stopped:
try:
data, addr = self.sock.recvfrom(1024)
packet = ArtnetPacket.unpack_raw_artnet_packet(data)
if packet != None:
self.lastPacketTime = time.time();
if packet != None and packet.universe == self.universe:
if packet.sequence != self.lastSequence:
pass
i = 0;
for newData in packet.data:
if i >= self.listenStop:
break;
oldDataValue = self.actualData[i]
newDataValue = unpack('B',newData)[0]
self.actualData[i] = newDataValue
# if newDataValue != oldDataValue:
# print "Change in Data for Channel {0} went to {1}".format(i + 1, newDataValue)
if newDataValue != oldDataValue and self.callbacks[i]:
(callback, isWide, addresses) = self.callbacks[i]
if isWide:
self.callbacksToRun[addresses[0]] = (callback, addresses, isWide)
else:
self.callbacksToRun[addresses] = (callback, addresses, isWide)
i += 1
# Do the callbacks
for cbStartAdrr in self.callbacksToRun:
(callback, addresses, isWide) = self.callbacksToRun[cbStartAdrr];
if isWide:
callback(addresses, [self.actualData[i] for i in addresses], self.actualData)
else:
callback(addresses, self.actualData[addresses], self.actualData)
self.callbacksToRun = {}
self.lastSequence = packet.sequence
except socket.error as e:
time.sleep(0.01);
except KeyboardInterrupt:
self.sock.close()
return False
print "Artnet Receiver Terminated" | StarcoderdataPython |
95362 | <filename>Project Euler Problems/Problem55.py<gh_stars>0
def checkpalindromic(num):
i = 1
newNum = num
while i <= 50:
newNum = newNum + int(str(newNum)[::-1])
i = i + 1
if str(newNum) == str(newNum)[::-1]:
return True
return False
ans = 0
for i in range(1,10000):
if not checkpalindromic(i):
ans += 1
print(ans) | StarcoderdataPython |
18122 | from typing import Dict, Any
import pytest
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
@pytest.fixture()
def mock_bc_integration() -> BcPlatformIntegration:
bc_integration.bc_api_key = "<KEY>"
bc_integration.setup_bridgecrew_credentials(
repo_id="bridgecrewio/checkov",
skip_fixes=True,
skip_suppressions=True,
skip_policy_download=True,
source=SourceType("Github", False),
source_version="1.0",
repo_branch="master",
)
return bc_integration
@pytest.fixture()
def scan_result() -> Dict[str, Any]:
return {
"repository": "/abs_path/to/app/requirements.txt",
"passed": True,
"packages": {"type": "python", "name": "django", "version": "1.2", "path": "/abs_path/to/app/requirements.txt"},
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": [
{
"id": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"riskFactors": ["Critical severity", "Has fix", "Attack complexity: low", "Attack vector: network"],
"impactedVersions": ["\u003c1.11.27"],
"publishedDate": "2019-12-18T20:15:00+01:00",
"discoveredDate": "2019-12-18T19:15:00Z",
"fixDate": "2019-12-18T20:15:00+01:00",
}
],
"vulnerabilityDistribution": {"critical": 1, "high": 0, "medium": 0, "low": 0, "total": 0},
}
| StarcoderdataPython |
1726010 | from interpreter.typing.basic_type import BasicType
class UnionType(BasicType):
def __init__(self, lhs, rhs):
BasicType.__init__(self, None, {}, False)
self.lhs = lhs
self.rhs = rhs
def compare_value(self, other_type):
return self.lhs.compare_value(other_type) or self.rhs.compare_value(other_type)
def has_property(self, name, property_type=None):
return self.lhs.has_property(name, property_type) or self.rhs.has_property(name, property_type)
def get_property_type(self, name):
if self.lhs.has_property(name):
if (self.rhs.has_property(name)):
return UnionType(self.lhs.get_property_type(name), self.rhs.get_property_type(name))
else:
return self.lhs.get_property_type(name)
if self.rhs.has_property(name):
return self.rhs.get_property_type(name)
return None
def __repr__(self):
return "UnionType({}, {})".format(repr(self.lhs), repr(self.rhs))
| StarcoderdataPython |
128524 | class C(object):
foo = None
| StarcoderdataPython |
3208305 | import os
import os.path as osp
import numpy as np
from scipy.integrate import odeint
import moviepy.editor as mpy
from qtpy.QtCore import Qt
from qtpy.QtCore import QPointF
from qtpy.QtGui import QColor
from nezzle.graphics import EllipseNode
from nezzle.graphics import TextLabel
from nezzle.graphics import CurvedEdge
from nezzle.graphics import Triangle, Hammer
from nezzle.graphics import Network
from nezzle.io import write_image
def create_network(pos_x, pos_y, state, norm_abs_state):
color_white = np.array([255, 255, 255, 0])
color_up = np.array([255, 0, 0, 0])
color_dn = np.array([0, 0, 255, 0])
net = Network('Lorenz network')
x = EllipseNode('X', 40, 40, pos=QPointF(pos_x[0], pos_y[0]))
y = EllipseNode('Y', 40, 40, pos=QPointF(pos_x[1], pos_y[1]))
z = EllipseNode('Z', 40, 40, pos=QPointF(pos_x[2], pos_y[2]))
net.add_node(x)
net.add_node(y)
net.add_node(z)
head = Triangle(width=10, height=10, offset=4)
edge1 = CurvedEdge("EDGE1", x, y, width=4, head=head)
edge1["FILL_COLOR"] = Qt.black
edge1["CP_POS_X"] = -10
edge1["CP_POS_Y"] = -50
head = Triangle(width=10, height=10, offset=4)
edge2 = CurvedEdge("EDGE2", y, x, width=4, head=head)
edge2["FILL_COLOR"] = Qt.black
edge2["CP_POS_X"] = 10
edge2["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge3 = CurvedEdge("EDGE3", y, z, width=4, head=head)
edge3["FILL_COLOR"] = Qt.black
edge3["CP_POS_X"] = -28
edge3["CP_POS_Y"] = -28
head = Hammer(width=14, height=4, offset=4)
edge4 = CurvedEdge("EDGE3", z, y, width=4, head=head)
edge4["FILL_COLOR"] = Qt.black
edge4["CP_POS_X"] = 45
edge4["CP_POS_Y"] = 40
head = Triangle(width=10, height=10, offset=4)
edge5 = CurvedEdge("EDGE3", z, x, width=4, head=head)
edge5["FILL_COLOR"] = Qt.black
edge5["CP_POS_X"] = -45
edge5["CP_POS_Y"] = 40
net.add_edge(edge1)
net.add_edge(edge2)
net.add_edge(edge3)
net.add_edge(edge4)
net.add_edge(edge5)
for i, node in enumerate([x, y, z]):
if state[i] > 0.0:
color = color_white + norm_abs_state[i] * (color_up - color_white)
else:
color = color_white + norm_abs_state[i] * (color_dn - color_white)
color[3] = 255
node["FILL_COLOR"] = QColor(*color)
node["BORDER_COLOR"] = Qt.black
node["BORDER_WIDTH"] = 2
node["WIDTH"] = node["HEIGHT"] = 20 + 50 * norm_abs_state[i]
label_name = TextLabel(node, node.iden)
label_name["FONT_SIZE"] = 10 + 30 * norm_abs_state[i]
label_name["TEXT_COLOR"] = Qt.white
label_name.align()
lightness = QColor(node["FILL_COLOR"]).lightness()
if lightness < 200:
label_name["TEXT_COLOR"] = Qt.white
label_name["FONT_BOLD"] = True
else:
label_name["TEXT_COLOR"] = Qt.black
label_name["FONT_BOLD"] = False
net.add_label(label_name)
# end of for
return net
def create_movie(fpaths, fout):
clips = []
for fpath in fpaths:
img = mpy.ImageClip(fpath).set_duration(0.2)
clips.append(img)
concat_clip = mpy.concatenate_videoclips(clips,
bg_color=(255, 255, 255),
method="compose")
concat_clip.write_gif(fout, fps=10)
def update(nav, net):
# Solve the ODE of Lorenz system
def ode(s, t):
sigma = 10
beta = 2.667
rho = 28
x, y, z = s
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
t = np.arange(0, 20, 0.1)
y0 = np.array([0, 1, 1.05])
s = odeint(ode, y0, t)
abs_s = np.abs(s)
norm_abs_s = abs_s / abs_s.max(axis=0)
pos_x = np.array([-100.0, 100.0, 0.0])
pos_y = np.array([0.0, 0.0, 120.0])
dpath = osp.join(osp.dirname(__file__), "lorenz-dynamics-results")
os.makedirs(dpath, exist_ok=True)
fpaths = []
for i, (state, norm_abs_state) in enumerate(zip(s, norm_abs_s)):
net = create_network(pos_x, pos_y, state, norm_abs_state)
fpath = osp.join(dpath, "lorenz-dynamics-%03d.png"%(i))
fpaths.append(fpath)
write_image(net, fpath, scale_width=200, scale_height=200)
# end of for
create_movie(fpaths, osp.join(dpath, "lorenz-dynamics.gif"))
| StarcoderdataPython |
196994 | from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Q
import logging
from mooringlicensing.components.approvals.email import send_vessel_nomination_reminder_mail
from mooringlicensing.components.approvals.models import Approval, WaitingListAllocation, \
MooringLicence, AuthorisedUserPermit
from mooringlicensing.components.main.models import NumberOfDaysType, NumberOfDaysSetting
from mooringlicensing.management.commands.utils import ml_meet_vessel_requirement
from mooringlicensing.settings import (
CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_ML,
CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_WLA,
CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_AUP,
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Send email to WLA/ML holder configurable number of days before end of six month period in which a new vessel is to be nominated'
def handle(self, *args, **options):
today = timezone.localtime(timezone.now()).date()
self.perform(WaitingListAllocation.code, today, **options)
self.perform(MooringLicence.code, today, **options)
self.perform(AuthorisedUserPermit.code, today, **options)
def perform(self, approval_type, today, **options):
errors = []
updates = []
# Retrieve the number of days before expiry date of the approvals to email
if approval_type == WaitingListAllocation.code:
days_type = NumberOfDaysType.objects.get(code=CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_WLA)
approval_class = WaitingListAllocation
elif approval_type == MooringLicence.code:
days_type = NumberOfDaysType.objects.get(code=CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_ML)
approval_class = MooringLicence
elif approval_type == AuthorisedUserPermit.code:
days_type = NumberOfDaysType.objects.get(code=CODE_DAYS_BEFORE_END_OF_SIX_MONTH_PERIOD_AUP)
approval_class = AuthorisedUserPermit
else:
# Do nothing
return
days_setting = NumberOfDaysSetting.get_setting_by_date(days_type, today)
if not days_setting:
# No number of days found
raise ImproperlyConfigured("NumberOfDays: {} is not defined for the date: {}".format(days_type.name, today))
# NOTE: When sending the reminder:
# sold_date + 6months - number_of_days < today
# sold_date < today - 6months + number_of_days
boundary_date = today - relativedelta(months=+6) + relativedelta(days=days_setting.number_of_days)
logger.info('Running command {}'.format(__name__))
# For debug
params = options.get('params')
debug = True if params.get('debug', 'f').lower() in ['true', 't', 'yes', 'y'] else False
approval_lodgement_number = params.get('send_vessel_nominate_reminder_lodgement_number', 'no-number')
# Get approvals
if approval_type == WaitingListAllocation.code:
queries = Q()
queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED))
queries &= Q(current_proposal__vessel_ownership__end_date__lt=boundary_date)
queries &= Q(vessel_nomination_reminder_sent=False)
if debug:
queries = queries | Q(lodgement_number__iexact=approval_lodgement_number)
approvals = approval_class.objects.filter(queries)
elif approval_type == MooringLicence.code:
queries = Q()
queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED))
queries &= Q(vessel_nomination_reminder_sent=False)
possible_approvals = approval_class.objects.filter(queries)
approvals = []
for approval in possible_approvals:
# Check if there is at least one vessel which meets the ML vessel requirement
if not ml_meet_vessel_requirement(approval, boundary_date):
approvals.append(approval)
if debug:
apps = MooringLicence.objects.filter(lodgement_number__iexact=approval_lodgement_number)
if apps:
approvals.append(apps[0])
elif approval_type == AuthorisedUserPermit.code:
queries = Q()
queries &= Q(status__in=(Approval.APPROVAL_STATUS_CURRENT, Approval.APPROVAL_STATUS_SUSPENDED))
queries &= Q(current_proposal__vessel_ownership__end_date__lt=boundary_date)
queries &= Q(vessel_nomination_reminder_sent=False)
if debug:
queries = queries | Q(lodgement_number__iexact=approval_lodgement_number)
approvals = approval_class.objects.filter(queries)
for a in approvals:
try:
send_vessel_nomination_reminder_mail(a)
a.vessel_nomination_reminder_sent = True
a.save()
logger.info('Reminder to permission holder sent for Approval {}'.format(a.lodgement_number))
updates.append(a.lodgement_number)
except Exception as e:
err_msg = 'Error sending reminder to permission holder for Approval {}'.format(a.lodgement_number)
logger.error('{}\n{}'.format(err_msg, str(e)))
errors.append(err_msg)
cmd_name = __name__.split('.')[-1].replace('_', ' ').upper()
err_str = '<strong style="color: red;">Errors: {}</strong>'.format(len(errors)) if len(
errors) > 0 else '<strong style="color: green;">Errors: 0</strong>'
msg = '<p>{} completed. {}. IDs updated: {}.</p>'.format(cmd_name, err_str, updates)
logger.info(msg)
print(msg) # will redirect to cron_tasks.log file, by the parent script
| StarcoderdataPython |
1684870 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Issue object integration functionality via cron job."""
# pylint: disable=invalid-name
import logging
from datetime import datetime
from ggrc import db
from ggrc.models import all_models
from ggrc.integrations.synchronization_jobs import sync_utils
from ggrc.integrations import constants
logger = logging.getLogger(__name__)
FIELDS_TO_CHECK = ()
ISSUE_STATUS_MAPPING = {
"new": "Draft",
"assigned": "Draft",
"accepted": "Active",
"fixed": "Fixed",
"verified": "Fixed and Verified",
"not_reproducible": "Deprecated",
"intended_behavior": "Deprecated",
"obsolete": "Deprecated",
"infeasible": "Deprecated",
"duplicate": "Deprecated",
}
def get_current_issue_tracker_person_acl(sync_object, role_name):
"""Returns acl which used for sending emails to Issue Tracker."""
# TODO: Reduce number of queries to DB.
acls = [(person, acl) for person, acl in sync_object.access_control_list
if acl.ac_role.name == role_name]
acls = sorted(acls, key=lambda acl: acl[0].name)
return acls[0] if acls else (None, None)
def sync_assignee_email(issuetracker_state, sync_object, assignees_role):
"""Sync issue assignee email."""
issue_tracker_assignee = issuetracker_state.get("assignee")
new_assignee = all_models.Person.query.filter_by(
email=issue_tracker_assignee
).first()
if new_assignee:
issue_primary_contacts = sync_object.get_persons_for_rolename(
"Primary Contacts"
)
primary_contact_emails = [person.email
for person in issue_primary_contacts]
if issue_tracker_assignee not in primary_contact_emails:
person, current_assignee_acl = get_current_issue_tracker_person_acl(
sync_object,
"Primary Contacts"
)
if current_assignee_acl:
current_assignee_acl.remove_person(person)
sync_object.add_person_with_role(new_assignee, assignees_role)
def sync_verifier_email(issuetracker_state, sync_object, admin_role):
"""Sync Issue verifier email."""
issue_tracker_verifier = issuetracker_state.get("verifier")
new_verifier = all_models.Person.query.filter_by(
email=issue_tracker_verifier
).first()
if new_verifier:
issue_admins = sync_object.get_persons_for_rolename("Admin")
admin_emails = [admin.email for admin in issue_admins]
if issue_tracker_verifier not in admin_emails:
person, current_verifier_acl = get_current_issue_tracker_person_acl(
sync_object,
"Admin",
)
if current_verifier_acl:
current_verifier_acl.remove_person(person)
sync_object.add_person_with_role(new_verifier, admin_role)
def sync_statuses(issuetracker_state, sync_object):
"""Sync issue object statuses."""
issue_tracker_status = issuetracker_state.get("status")
if issue_tracker_status:
issue_tracker_status = issue_tracker_status.lower()
if ISSUE_STATUS_MAPPING[issue_tracker_status] != sync_object.status:
sync_object.status = ISSUE_STATUS_MAPPING[issue_tracker_status]
def sync_due_date(custom_fields, sync_object):
"""Sync issue object due date."""
issue_tracker_due_date = custom_fields.get(
constants.CustomFields.DUE_DATE
)
date_format = "%Y-%m-%d"
if issue_tracker_due_date:
sync_object.due_date = datetime.strptime(
issue_tracker_due_date, date_format
)
def sync_issue_attributes():
"""Synchronizes issue tracker ticket attrs with the Issue object attrs.
Synchronize issue status and email list (Primary contacts and Admins).
"""
issuetracker_issues = sync_utils.collect_issue_tracker_info(
"Issue"
)
if not issuetracker_issues:
return
assignees_role = all_models.AccessControlRole.query.filter_by(
object_type=all_models.Issue.__name__, name="Primary Contacts"
).first()
admin_role = all_models.AccessControlRole.query.filter_by(
object_type=all_models.Issue.__name__, name="Admin"
).first()
processed_ids = set()
for batch in sync_utils.iter_issue_batches(issuetracker_issues.keys()):
for issue_id, issuetracker_state in batch.iteritems():
issue_id = str(issue_id)
issue_info = issuetracker_issues.get(issue_id)
if not issue_info:
logger.warning(
"Got an unexpected issue from Issue Tracker: %s", issue_id)
continue
processed_ids.add(issue_id)
sync_object = issue_info["object"]
# Sync attributes.
sync_statuses(issuetracker_state, sync_object)
sync_assignee_email(issuetracker_state, sync_object, assignees_role)
sync_verifier_email(issuetracker_state, sync_object, admin_role)
custom_fields = {
constants.CustomFields.DUE_DATE: sync_utils.parse_due_date(
issuetracker_state.get("custom_fields", [])
)
}
sync_due_date(custom_fields, sync_object)
db.session.commit()
logger.debug("Sync is done, %d issue(s) were processed.", len(processed_ids))
missing_ids = set(issuetracker_issues) - processed_ids
if missing_ids:
logger.warning(
"Some issues are linked to Issue "
"but were not found in Issue Tracker: %s",
", ".join(str(i) for i in missing_ids)
)
| StarcoderdataPython |
106142 | <filename>spark/example/count.py
import pyspark
print("*****", pyspark.SparkContext().parallelize(range(0, 10)).count(), "*****")
| StarcoderdataPython |
3302332 | from libnessus.parser import NessusParser
from libnessus.reportjson import ReportEncoder
import json
from pprint import pprint
nessus_obj_list = NessusParser.parse_fromfile('/vagrant/nessus.xml')
for nessuso in nessus_obj_list:
pprint(json.dumps(nessuso, cls=ReportEncoder))
| StarcoderdataPython |
14881 | <reponame>fjarri/grunnur
import pytest
import numpy
from grunnur import (
cuda_api_id, opencl_api_id,
StaticKernel, VirtualSizeError, API, Context, Queue, MultiQueue, Array, MultiArray
)
from grunnur.template import DefTemplate
from .mock_base import MockKernel, MockDefTemplate, MockDefTemplate
from .mock_pycuda import PyCUDADeviceInfo
from .mock_pyopencl import PyOpenCLDeviceInfo
from .test_program import _test_constant_memory
SRC = """
KERNEL void multiply(GLOBAL_MEM int *dest, GLOBAL_MEM int *a, GLOBAL_MEM int *b)
{
${static.begin};
const int i = ${static.global_id}(0);
const int j = ${static.global_id}(1);
const int idx = ${static.global_flat_id}();
dest[idx] = a[i] * b[j];
}
"""
def test_compile_static(mock_or_real_context):
context, mocked = mock_or_real_context
if mocked:
kernel = MockKernel('multiply', [None, None, None], max_total_local_sizes={0: 1024})
src = MockDefTemplate(kernels=[kernel])
else:
src = SRC
a = numpy.arange(11).astype(numpy.int32)
b = numpy.arange(15).astype(numpy.int32)
ref = numpy.outer(a, b)
queue = Queue(context.device)
a_dev = Array.from_host(queue, a)
b_dev = Array.from_host(queue, b)
res_dev = Array.empty(context.device, (11, 15), numpy.int32)
multiply = StaticKernel(context.device, src, 'multiply', (11, 15))
multiply(queue, res_dev, a_dev, b_dev)
res = res_dev.get(queue)
if not mocked:
assert (res == ref).all()
def test_compile_static_multi_device(mock_or_real_multi_device_context):
context, mocked = mock_or_real_multi_device_context
if mocked:
kernel = MockKernel(
'multiply', [None, None, None], max_total_local_sizes={0: 1024, 1: 512})
src = MockDefTemplate(kernels=[kernel])
else:
src = SRC
a = numpy.arange(22).astype(numpy.int32)
b = numpy.arange(15).astype(numpy.int32)
ref = numpy.outer(a, b)
mqueue = MultiQueue.on_devices(context.devices[[0, 1]])
a_dev = MultiArray.from_host(mqueue, a)
b_dev = MultiArray.from_host(mqueue, b, splay=MultiArray.CloneSplay())
res_dev = MultiArray.empty(mqueue.devices, (22, 15), ref.dtype)
multiply = StaticKernel(mqueue.devices, src, 'multiply', res_dev.shapes)
multiply(mqueue, res_dev, a_dev, b_dev)
res = res_dev.get(mqueue)
if not mocked:
assert (res == ref).all()
def test_constant_memory(mock_or_real_context):
context, mocked = mock_or_real_context
_test_constant_memory(context=context, mocked=mocked, is_static=True)
def test_find_local_size(mock_context):
kernel = MockKernel('multiply', [None], max_total_local_sizes={0: 64})
src = MockDefTemplate(kernels=[kernel])
multiply = StaticKernel(mock_context.device, src, 'multiply', (11, 15))
assert multiply._vs_metadata[mock_context.devices[0]].real_global_size == (16, 12)
assert multiply._vs_metadata[mock_context.devices[0]].real_local_size == (16, 4)
def test_reserved_names(mock_context):
kernel = MockKernel('test', [None])
src = MockDefTemplate(kernels=[kernel])
with pytest.raises(ValueError, match="The global name 'static' is reserved in static kernels"):
multiply = StaticKernel(mock_context.device, src, 'test', (1024,), render_globals=dict(static=1))
def test_zero_max_total_local_size(mock_context):
kernel = MockKernel('test', [None], max_total_local_sizes={0: 0})
src = MockDefTemplate(kernels=[kernel])
with pytest.raises(
VirtualSizeError,
match="The kernel requires too much resourses to be executed with any local size"):
multiply = StaticKernel(mock_context.device, src, 'test', (1024,))
def test_virtual_sizes_error_propagated(mock_backend_pycuda):
# Testing for PyCUDA backend only since mocked PyOpenCL backend does not have a way
# to set maximum global sizes (PyOpenCL devices don't have a corresponding parameter),
# and PyCUDA is enough to test the required code path.
device_info = PyCUDADeviceInfo(
max_threads_per_block=2**4,
max_block_dim_x=2**4,
max_block_dim_y=2**4,
max_block_dim_z=2**4,
max_grid_dim_x=2**10,
max_grid_dim_y=2**10,
max_grid_dim_z=2**8)
mock_backend_pycuda.add_devices([device_info])
api = API.from_api_id(mock_backend_pycuda.api_id)
device = api.platforms[0].devices[0]
context = Context.from_devices([device])
kernel = MockKernel('test', [None], max_total_local_sizes={0: 16})
src = MockDefTemplate(kernels=[kernel])
# Just enough to fit in the grid limits
multiply = StaticKernel(context.device, src, 'test', (2**14, 2**10, 2**8), (2**4, 1, 1))
# Global size is too large to fit on the device,
# so virtual size finding fails and the error is propagated to the user.
with pytest.raises(
VirtualSizeError,
match="Bounding global size \\(16384, 2048, 256\\) is too large"):
multiply = StaticKernel(context.device, src, 'test', (2**14, 2**11, 2**8), (2**4, 1, 1))
def test_builtin_globals(mock_backend_pycuda):
mock_backend_pycuda.add_devices([
PyCUDADeviceInfo(max_threads_per_block=1024),
PyCUDADeviceInfo(max_threads_per_block=512)])
source_template = DefTemplate.from_string(
'mock_source', [],
"""
KERNEL void test()
{
int max_total_local_size = ${device_params.max_total_local_size};
}
""")
api = API.from_api_id(mock_backend_pycuda.api_id)
context = Context.from_devices([api.platforms[0].devices[0], api.platforms[0].devices[1]])
src = MockDefTemplate(
kernels=[MockKernel('test', [None], max_total_local_sizes={0: 1024, 1: 512})],
source_template=source_template)
kernel = StaticKernel(context.devices, src, 'test', (1024,))
assert 'max_total_local_size = 1024' in kernel.sources[context.devices[0]].source
assert 'max_total_local_size = 512' in kernel.sources[context.devices[1]].source
| StarcoderdataPython |
1663622 | import tensorflow as tf
class FaceRecGraph(object):
def __init__(self):
self.graph = tf.Graph()
| StarcoderdataPython |
1707468 | from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
import hashlib
# travis encrypt PASSWORD=password -a -x
sha = SHA256.new()
sha.update(raw_input('Password:'))
key = sha.hexdigest()[:AES.block_size*2]
text = open('emails.txt', 'rb').read()
iv = text[:AES.block_size]
cipher = text[AES.block_size:]
aes = AES.new(key, AES.MODE_CBC, iv)
plain = aes.decrypt(cipher).strip()
print 'Decrypted: "' + plain + '"'
plain += ','+raw_input('Append data:')
plain += ' '*(AES.block_size - len(plain)%AES.block_size)
iv = Random.new().read(AES.block_size)
aes = AES.new(key, AES.MODE_CBC, iv)
cipher = aes.encrypt(plain)
open('emails.txt', 'wb').write(iv+cipher)
| StarcoderdataPython |
3358531 | from setuptools import setup
#from setuptools import find_packages
setup(
name="EpiRank",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
packages=['EpiRank'],
include_package_data=False,
url="https://bitbucket.org/wcchin/epirank3",
license="LICENSE.txt",
description="algorithm using forward and backward movements of a commuting network for capturing diffusion.",
long_description=open("README.md").read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='spatial network concentration',
install_requires=[
"networkx",
"pandas",
"numpy",
"scipy",
],
)
| StarcoderdataPython |
85257 | <reponame>mshvartsman/hydra<filename>tests/test_examples/test_patterns.py<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
from pathlib import Path
from subprocess import check_output
from typing import Any, List
import pytest
from omegaconf import DictConfig
from hydra.test_utils.test_utils import (
TTaskRunner,
chdir_hydra_root,
verify_dir_outputs,
)
chdir_hydra_root()
# TODO: fails in pip install mode, need to figure out a solution for tests that needs to install modules.
# A reasonable solution is to only test the examples in the directory with pip install -e through nox.
@pytest.mark.skip("Disabled") # type: ignore
def test_patterns_objects(tmpdir: Path) -> None:
cmd = [
sys.executable,
"examples/patterns/objects/my_app.py",
"hydra.run.dir=" + str(tmpdir),
]
result = check_output(cmd)
assert (
result.decode("utf-8").rstrip()
== "MySQL connecting to localhost with user=root and password=<PASSWORD>"
)
def test_specializing_config_example(
hydra_restore_singletons: Any, hydra_task_runner: TTaskRunner
) -> None:
with hydra_task_runner(
calling_file="examples/patterns/specializing_config/example.py",
calling_module=None,
config_path="conf",
config_name="config.yaml",
overrides=["dataset=cifar10"],
) as task:
assert task.job_ret is not None and task.job_ret.cfg == dict(
dataset=dict(name="cifar10", path="/datasets/cifar10"),
model=dict(num_layers=5, type="alexnet"),
)
verify_dir_outputs(task.job_ret, overrides=task.overrides)
@pytest.mark.parametrize( # type: ignore
"args,output_conf",
[
(
[],
{
"db": {
"target": "examples.patterns.objects.my_app.MySQLConnection",
"params": {"host": "localhost", "user": "root", "password": 1234},
}
},
),
(
["db=postgresql"],
{
"db": {
"target": "examples.patterns.objects.my_app.PostgreSQLConnection",
"params": {
"host": "localhost",
"user": "root",
"password": <PASSWORD>,
"database": "tutorial",
},
}
},
),
],
)
def test_objects_example(
hydra_restore_singletons: Any,
tmpdir: Path,
hydra_task_runner: TTaskRunner,
args: List[str],
output_conf: DictConfig,
) -> None:
with hydra_task_runner(
calling_file="examples/patterns/objects/my_app.py",
calling_module=None,
config_path="conf",
config_name="config.yaml",
overrides=args,
) as task:
assert task.job_ret is not None
assert task.job_ret.cfg == output_conf
verify_dir_outputs(task.job_ret, overrides=task.overrides)
| StarcoderdataPython |
3365139 | import numpy as np
import calc.mathops as mo
import util.rdate as rd
import ml.gbm as gbm
import sys
import os
import json
def resource_path(res_):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, res_)
return os.path.join(res_)
def print_timestamp():
print rd.get_timestamp()
def print_ndarray():
if len(sys.argv) != 2:
print "Wrong number of arguments"
else:
cnt = int(sys.argv[1])
print mo.get_randn(n=cnt)
def serialize_sklearn_model():
num_points = 100
scale = 0.5
X = np.linspace(-2, 2, num_points)
y = np.sin(2 * np.pi * X) + scale * np.random.normal(num_points)
X = X.reshape(-1, 1)
y = y.reshape(num_points, )
gbm.gbm_regressor(X, y)
def read_package_data():
cfg_path = resource_path("resources/main.json")
with open(cfg_path) as cf:
dct = json.load(cf)
print dct
if __name__ == "__main__":
print_timestamp()
print_ndarray()
serialize_sklearn_model()
read_package_data()
| StarcoderdataPython |
3264514 | from enum import Enum
class StrEnum(str, Enum):
...
class PlatformVersion(StrEnum):
WINDOWS = 'Windows'
LINUX = 'Linux'
MAC = 'Darwin'
| StarcoderdataPython |
3344169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Config Loader/Saver """
import json
import os
from account import Account, AccountJSONEncoder
class Config:
""" Config class """
_path = os.environ["HOME"] + "/.gitaccount"
_config = {}
def __init__(self, path=None):
""" init Method """
self._path = path if path else self._path
self.load()
def load(self):
""" Config load """
if not os.path.exists(self._path):
self.save()
with open(self._path, "r") as _:
self._config = {
k: Account.forge(v)
for k, v in json.load(_).items()
}
def save(self):
""" Config Save """
with open(self._path, "w") as _:
json.dump(self._config, _, cls=AccountJSONEncoder)
def add(self, account: Account):
""" Add Account """
self._config[account.account] = account
def edit(self, account_name, account: Account):
""" Edit Account """
del self._config[account_name]
self._config[account.account] = account
def get(self, account: Account):
""" Get Account """
return self._config[
account.account] if account.account in self._config else False
def remove(self, account: Account):
""" Remove Account """
self._config = [x for x in self._config if x != account]
def show(self):
""" show List """
return self._config
if __name__ == "__main__":
pass
| StarcoderdataPython |
1699102 | <gh_stars>0
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
from solo.admin import SingletonModelAdmin
from .forms import OpenIDConnectConfigForm
from .models import OpenIDConnectConfig
@admin.register(OpenIDConnectConfig)
class OpenIDConnectConfigAdmin(DynamicArrayMixin, SingletonModelAdmin):
form = OpenIDConnectConfigForm
fieldsets = (
(
_("Activation"),
{"fields": ("enabled",)},
),
(
_("Common settings"),
{
"fields": (
"oidc_rp_client_id",
"oidc_rp_client_secret",
"oidc_rp_scopes_list",
"oidc_rp_sign_algo",
"oidc_rp_idp_sign_key",
)
},
),
(
_("Endpoints"),
{
"fields": (
"oidc_op_discovery_endpoint",
"oidc_op_jwks_endpoint",
"oidc_op_authorization_endpoint",
"oidc_op_token_endpoint",
"oidc_op_user_endpoint",
)
},
),
(
_("User profile"),
{
"fields": (
"username_claim",
"groups_claim",
"claim_mapping",
"sync_groups",
"sync_groups_glob_pattern",
"make_users_staff",
)
},
),
)
| StarcoderdataPython |
72098 | <reponame>rimmartin/cctbx_project
from __future__ import division
from scitbx.array_family import flex
from cma_es import cma_es
class cma_es_driver(object):
"""
This object provides one with a easy interface to cma_es optimisation.
For now, no options can be set, this will be added in the future.
"""
def __init__(self, N, mean, sigma, evaluator, l=0):
self.N = N
self.x = mean
self.sigma = sigma
self.evaluator = evaluator
self.optimizer = cma_es(self.N, self.x, self.sigma, l)
self.count = 0
while (not self.optimizer.converged() ):
# get sample population
p = self.optimizer.sample_population()
pop_size = p.accessor().all()[0]
# update objective function
v = flex.double(pop_size)
for i in xrange(pop_size):
vector = p[(i*N):(i*N + N)]
v[i] = self.evaluator( vector )
self.optimizer.update_distribution(v)
self.count += 1
self.x_final = self.optimizer.get_result()
self.score_final = self.evaluator( self.x_final )
def tst_it():
def function(vector):
x = vector[0]
y = vector[1]
result =100.0*((y-x*x)**2.0) + (1-x)**2.0
return result
m = flex.double( [5,5] )
s = flex.double( [3,3] )
obj = cma_es_driver( 2, m, s, function )
assert abs(obj.x_final[0]-1)<1e-8
assert abs(obj.x_final[1]-1)<1e-8
# =============================================================================
if (__name__ == '__main__'):
tst_it()
print 'Ok'
| StarcoderdataPython |
3224331 | <reponame>psyphh/xifa<gh_stars>1-10
from .gpcm import GPCM
from .grm import GRM
__all__ = ["GRM",
"GPCM"]
| StarcoderdataPython |
3244341 | <reponame>Seiwell0610/MessageTag
import discord
from discord.ext import commands
import os
print(os.path.basename(__file__))
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
embed = discord.Embed(title="ヘルプ", color=discord.Color.blue())
embed.add_field(name="#tag", value="該当のタグが付いているデータを表示します。", inline=False)
embed.add_field(name="#add", value="データを追加します。", inline=False)
embed.add_field(name="#remove", value="データを削除します。", inline=False)
embed.add_field(name="#mytag", value="登録されているタグの一覧を表示", inline=False)
embed.add_field(name="#ac", value="ユーザーのデータベースを作成します。", inline=False)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
| StarcoderdataPython |
3252833 | <filename>augraphy/augmentations/gamma.py
import os
import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class Gamma(Augmentation):
"""Adjusts the gamma of the whole image by a chosen multiplier.
:param range: Pair of ints determining the range from which to sample the
gamma shift.
:type range: tuple, optional
:param p: The probability that this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
range=(0.5, 1.5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.range = range
def __repr__(self):
return f"Gamma(range={self.range}, p={self.p})"
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
image = image.astype(np.uint8)
value = random.uniform(self.range[0], self.range[1])
invGamma = 1.0 / value
table = np.array(
[((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)],
).astype("uint8")
frame = cv2.LUT(image, table)
return frame
| StarcoderdataPython |
1771501 | <reponame>deng113jie/ExeTeraCovid<gh_stars>1-10
from io import BytesIO
import unittest
from datetime import datetime
import exetera.core.session as esess
from exeteracovid.algorithms.covid_test import match_assessment
class TestCovidTest(unittest.TestCase):
def test_match_assessment(self):
bio = BytesIO()
with esess.Session() as s:
src = s.open_dataset(bio, 'w', 'src')
# test df
tests = src.create_dataframe('tests')
pid = tests.create_numeric('patient_id', 'int32')
pid.data.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
d = tests.create_timestamp('created_at')
d.data.write([datetime(2020, 1, i).timestamp() for i in range(5, 15)])
pid = tests.create_numeric('result', 'int32')
pid.data.write([3, 4, 3, 4, 3, 4, 3, 4, 3, 4])
#assessment df
asmt = src.create_dataframe('assessments')
pid = asmt.create_numeric('patient_id', 'int32')
pid.data.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
d = asmt.create_timestamp('created_at')
d.data.write([datetime(2020, 1, i).timestamp() for i in list(reversed(range(7, 17)))])
result = src.create_dataframe('result')
match_assessment(tests, asmt, result, 5)
self.assertListEqual(result['patient_id_l'].data[:].tolist(), list([7, 8, 9]))
result = src.create_dataframe('result2')
match_assessment(tests, asmt, result, 5, True)
self.assertListEqual(result['patient_id_l'].data[:].tolist(), list([8]))
| StarcoderdataPython |
150293 | # MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from typing import List
from typing import Optional
import requests
from squeak.core import CBaseSqueak
from squeak.core import CResqueak
from squeak.core import CSqueak
from squeak.core.keys import SqueakPublicKey
from squeaknode.core.offer import Offer
from squeaknode.core.peer_address import Network
from squeaknode.core.squeak_peer import SqueakPeer
logger = logging.getLogger(__name__)
REQUEST_TIMEOUT_S = 10
class PeerClient:
def __init__(
self,
peer: SqueakPeer,
proxy_host: Optional[str],
proxy_port: Optional[int],
):
self.peer = peer
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.base_url = f"http://{peer.address.host}:{peer.address.port}"
self.proxies = {}
if peer.address.network == Network.TORV3 and \
proxy_host is not None and \
proxy_port is not None:
self.proxies = {
"http": f'socks5h://{proxy_host}:{proxy_port}',
}
logger.debug("Using base url: {}".format(self.base_url))
logger.debug("Using proxies: {}".format(self.proxies))
def lookup(
self,
min_block: int,
max_block: int,
pubkeys: List[SqueakPublicKey],
) -> List[bytes]:
pubkeys_str = [
pubkey.to_bytes().hex()
for pubkey in pubkeys
]
payload = {
'minblock': min_block,
'maxblock': max_block,
'pubkeys': pubkeys_str,
}
url = f"{self.base_url}/lookup"
r = requests.get( # type: ignore
url,
params=payload, # type: ignore
proxies=self.proxies,
timeout=REQUEST_TIMEOUT_S,
)
squeak_hashes_str = r.json()
return [
bytes.fromhex(squeak_hash_str)
for squeak_hash_str in squeak_hashes_str
]
def get_squeak(self, squeak_hash: bytes) -> Optional[CBaseSqueak]:
squeak_hash_str = squeak_hash.hex()
url = f"{self.base_url}/squeak/{squeak_hash_str}"
r = requests.get(
url,
proxies=self.proxies,
timeout=REQUEST_TIMEOUT_S,
)
if r.status_code != requests.codes.ok:
return None
squeak_bytes = r.content
try:
return CSqueak.deserialize(squeak_bytes)
except Exception:
pass
try:
return CResqueak.deserialize(squeak_bytes)
except Exception:
pass
return None
def get_secret_key(self, squeak_hash: bytes) -> Optional[bytes]:
squeak_hash_str = squeak_hash.hex()
url = f"{self.base_url}/secretkey/{squeak_hash_str}"
r = requests.get(
url,
proxies=self.proxies,
timeout=REQUEST_TIMEOUT_S,
)
if r.status_code != requests.codes.ok:
return None
secret_key = r.content
return secret_key
def get_offer(self, squeak_hash: bytes) -> Optional[Offer]:
squeak_hash_str = squeak_hash.hex()
url = f"{self.base_url}/offer/{squeak_hash_str}"
r = requests.get(
url,
proxies=self.proxies,
timeout=REQUEST_TIMEOUT_S,
)
if r.status_code != requests.codes.ok:
return None
offer_json = r.json()
offer = Offer(
squeak_hash=bytes.fromhex(offer_json['squeak_hash']),
nonce=bytes.fromhex(offer_json['nonce']),
payment_request=offer_json['payment_request'],
host=offer_json['host'],
port=int(offer_json['port']),
)
return offer
| StarcoderdataPython |
90770 | <reponame>davidcollom/Flexget
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget.api.app import base_message
from flexget.utils import json
class TestFormatChecker(object):
config = 'tasks: {}'
def test_quality(self, api_client, schema_match):
payload1 = {'quality': '720p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'quality': '720p-1080p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_quality_req(self, api_client, schema_match):
payload1 = {'quality_requirements': '720p-1080p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'quality': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_time(self, api_client, schema_match):
payload = {'time': '10:00'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload = {'time': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_interval(self, api_client, schema_match):
payload1 = {'interval': '1 day'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'interval': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_percent(self, api_client, schema_match):
payload1 = {'percent': '79%'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'percent': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_size(self, api_client, schema_match):
payload1 = {'size': '4GB'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'percent': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_regex(self, api_client, schema_match):
payload1 = {'regex': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'regex': '(('}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_file(self, api_client, schema_match):
payload1 = {'file': 'test_format_checker_api.py'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'file': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_path(self, api_client, schema_match):
payload1 = {'path': '../api_tests'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'path': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_url(self, api_client, schema_match):
payload1 = {'url': 'http://google.com'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'url': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_episode_identifier(self, api_client, schema_match):
payload1 = {'episode_identifier': 's01e01'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'episode_identifier': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
| StarcoderdataPython |
3273241 | from . import mesos
Mesos = mesos.Mesos
__version__ = "1.0.0"
__all__ = ['mesos']
| StarcoderdataPython |
30035 | # Eigen pretty printer
__import__('eigengdb').register_eigen_printers(None)
| StarcoderdataPython |
3223696 | <gh_stars>0
def test_log_format():
from parrot_api.core import log_event
log_event(
level='error', status='failure', process_type='delivery', payload={'message': 'test'}
)
| StarcoderdataPython |
171559 | <reponame>iamjdcollins/districtwebsite
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from mptt.admin import MPTTModelAdmin
from apps.pages.admin import PrecinctMapInline
import apps.common.functions as commonfunctions
from .models import Location, City, State, Zipcode, Language, TranslationType, SchoolType, OpenEnrollmentStatus, BoardPrecinct, BoardMeetingType, BoardPolicySection, DistrictCalendarEventCategory, DistrictLogoGroup, DistrictLogoStyleVariation, SchoolOption
class LocationAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class CityAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class StateAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class ZipcodeAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class LanguageAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class TranslationTypeAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class SchoolTypeAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class OpenEnrollmentStatusAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class BoardPrecinctAdmin(MPTTModelAdmin,GuardedModelAdmin):
inlines = [PrecinctMapInline,]
has_change_permission = commonfunctions.has_change_permission
has_add_permission = commonfunctions.has_add_permission
has_delete_permission = commonfunctions.has_delete_permission
save_formset = commonfunctions.save_formset
save_model = commonfunctions.save_model
response_change = commonfunctions.response_change
class BoardMeetingTypeAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class BoardPolicySectionAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class DistrictCalendarEventCategoryAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class DistrictLogoGroupAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class DistrictLogoStyleVariationAdmin(MPTTModelAdmin,GuardedModelAdmin):
pass
class SchoolOptionAdmin(MPTTModelAdmin, GuardedModelAdmin):
pass
admin.site.register(Location, LocationAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(State, StateAdmin)
admin.site.register(Zipcode, ZipcodeAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(TranslationType, TranslationTypeAdmin)
admin.site.register(SchoolType, SchoolTypeAdmin)
admin.site.register(OpenEnrollmentStatus, OpenEnrollmentStatusAdmin)
admin.site.register(BoardPrecinct, BoardPrecinctAdmin)
admin.site.register(BoardMeetingType, BoardMeetingTypeAdmin)
admin.site.register(BoardPolicySection, BoardPolicySectionAdmin)
admin.site.register(DistrictCalendarEventCategory, DistrictCalendarEventCategoryAdmin)
admin.site.register(DistrictLogoGroup, DistrictLogoGroupAdmin)
admin.site.register(DistrictLogoStyleVariation, DistrictLogoStyleVariationAdmin)
admin.site.register(SchoolOption, SchoolOptionAdmin)
| StarcoderdataPython |
4822173 | <filename>rotary_online/rotary_online/doctype/meeting/meeting.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME>, Rotaract Charitable Trust and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, getdate, today, to_timedelta
from frappe.model.document import Document
class Meeting(Document):
def validate(self):
self.validate_date()
self.calculate_totals()
self.set_zone()
self.document_status='draft'
self.reporting_month = getdate(self.date).strftime("%B")
def on_submit(self):
frappe.db.set_value('Meeting', self.name, 'document_status', 'submitted')
def on_cancel(self):
frappe.db.set_value('Meeting', self.name, 'document_status', 'cancelled')
def calculate_totals(self):
self.total = cint(self.rotarians) + cint(self.other_club) + cint(self.partners) \
+ cint(self.guest)
def set_zone(self):
self.zone = frappe.db.get_value("Club", self.club, "zone")
def validate_date(self):
if self.date > today():
frappe.throw("Did you fix the Flux Capacitor ? \n Meeting Date is Greater than today.")
if to_timedelta(self.start_time) > to_timedelta(self.end_time):
frappe.throw("Start Time cannot be greater than End Time.")
| StarcoderdataPython |
110993 | from wagtail.blocks.static_block import * # NOQA
| StarcoderdataPython |
1618878 | '''Given models finetuned on existing benchmarks, evaluate on RNPC tasks.'''
import os
import sys
os.chdir("../../../..")
root_dir = os.getcwd()
sys.path.append(f"{root_dir}/source")
# config
from configuration import Config
config_path = (f'source/Qa/eval_on_RNPC/other_models/config.json')
config = Config.from_json_file(config_path)
os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_devices
import torch
from Qa.utils import compute_scores, n_classes_dict_NP, label_text2id, label_id2text, task_fieldnames, unchanged_fields
from utils import entailment, event_plausibility
import numpy as np
import csv
from transformers import AutoTokenizer, AutoModelForSequenceClassification
if __name__ == "__main__":
# config
cache_dir = config.cache_dir
task = eval(config.task)
if task not in ["SPTE", "MPTE", "EPC"]:
raise ValueError("Unspported task. Please choose from 'SPTE', 'MPTE', and 'EPC'.")
# models available for evaluation for each task
model_dict = {"SPTE": ["textattack/bert-base-uncased-snli",
"textattack/bert-base-uncased-MNLI",
"roberta-large-mnli",
"facebook/bart-large-mnli",],
"MPTE": ["veronica320/MPE_bert",
"veronica320/MPE_bert-l",
"veronica320/MPE_roberta",
"veronica320/MPE_roberta-l"],
"EPC": ["veronica320/ADEPT_bert",
"veronica320/ADEPT_bert-l",
"veronica320/ADEPT_roberta",
"veronica320/ADEPT_roberta-l"]
}
model_names = model_dict[task]
print(f"Evaluating models on {task}: ...")
for model_name in model_names:
model_name_simple = model_name.split("/")[-1]
print(model_name_simple)
# load model
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
model = AutoModelForSequenceClassification.from_pretrained(model_name, cache_dir=cache_dir).to(f"cuda:0")
# input and output
frn = f"data/RNPC/tasks/{task}.csv"
pred_dir = f"output_dir/RNPC/eval_models_ft_on_benchmark/{task}"
fwn = f"{pred_dir}/{model_name_simple}.csv"
if not os.path.isdir(pred_dir):
os.makedirs(pred_dir)
gold_labels, pred_labels = [], []
with open(frn, "r") as fr, open(fwn, "w") as fw:
reader = csv.DictReader(fr)
writer = csv.DictWriter(fw,fieldnames=task_fieldnames[task])
writer.writeheader()
for i, row in enumerate(reader):
new_row = {}
for field in unchanged_fields:
if field in row:
new_row[field] = row[field]
# prediction
if task in ["SPTE", "MPTE"]:
premise, hypothesis = row["premise"], row["hypothesis"]
confidence, pred_label_id = entailment(model, tokenizer, model_name, premise, hypothesis)
else:
first_event, second_event = row["first_event"], row["second_event"]
confidence, pred_label_id = event_plausibility(model, tokenizer, model_name, first_event, second_event)
gold_label = row["label"]
pred_label = label_id2text(pred_label_id, task)
gold_labels.append(label_text2id(gold_label, task))
pred_labels.append(pred_label_id)
new_row["confidence"], new_row["gold label"], new_row["pred label"] = confidence, gold_label, pred_label
writer.writerow(new_row)
# compute accuracy, precision, recall, and f1
scores = compute_scores(n_classes_dict_NP[task], gold_labels, pred_labels)
print(scores)
| StarcoderdataPython |
1608134 | from scapy.all import *
from mirage.core.module import WirelessModule
from mirage.libs.esb_utils.scapy_esb_layers import *
from mirage.libs.esb_utils.packets import *
from mirage.libs.esb_utils.constants import *
from mirage.libs.esb_utils.dissectors import *
from mirage.libs.esb_utils.rfstorm import *
from mirage.libs.esb_utils.pcap import *
from mirage.libs.esb_utils.helpers import *
from mirage.libs import wireless,io
class ESBEmitter(wireless.Emitter):
'''
This class is an Emitter for the Enhanced ShockBurst protocol ("esb").
It can instantiates the following devices :
* RFStorm Device (``mirage.libs.esb_utils.rfstorm.ESBRFStormDevice``) **[ interface "rfstormX" (e.g. "rfstormX") ]**
* PCAP Device (``mirage.libs.esb_utils.pcap.ESBPCAPDevice``) **[ interface "<file>.pcap" (e.g. "capture.pcap") ]**
'''
def __init__(self,interface="rfstorm0"):
deviceClass = None
if "rfstorm" in interface:
deviceClass = ESBRFStormDevice
elif interface[-5:] == ".pcap":
deviceClass = ESBPCAPDevice
super().__init__(interface=interface,packetType=ESBPacket,deviceType=deviceClass)
def convert(self,packet):
new = ESB_Hdr(address=packet.address)
if packet.protocol == "generic":
if isinstance(packet,ESBPingRequestPacket):
new /= ESB_Payload_Hdr()/ESB_Ping_Request(ping_payload=packet.payload)
elif isinstance(packet,ESBAckResponsePacket):
new /= ESB_Payload_Hdr()/ESB_Ack_Response(ack_payload=packet.payload)
new.no_ack=1
elif packet.protocol == "logitech":
new /= ESB_Payload_Hdr()/Logitech_Unifying_Hdr()
if isinstance(packet,ESBLogitechSetTimeoutPacket):
new /= Logitech_Set_Keepalive_Payload(timeout=packet.timeout)
elif isinstance(packet,ESBLogitechUnencryptedKeyReleasePacket):
new /= Logitech_Unencrypted_Keystroke_Payload(hid_data=packet.hidData)
elif isinstance(packet,ESBLogitechUnencryptedKeyPressPacket):
new /= Logitech_Unencrypted_Keystroke_Payload(hid_data=packet.hidData)
elif isinstance(packet,ESBLogitechKeepAlivePacket):
new /= Logitech_Keepalive_Payload(timeout=packet.timeout)
elif isinstance(packet,ESBLogitechMultimediaKeyPressPacket):
new /= Logitech_Multimedia_Key_Payload(hid_key_scan_code=packet.hidData)
elif isinstance(packet,ESBLogitechMultimediaKeyReleasePacket):
new /= Logitech_Multimedia_Key_Payload(hid_key_scan_code=b"\x00\x00\x00\x00")
elif isinstance(packet,ESBLogitechMousePacket):
new /= Logitech_Mouse_Payload(movement=packet.move,button_mask=packet.buttonMask)
elif isinstance(packet,ESBLogitechEncryptedKeystrokePacket):
new /= Logitech_Encrypted_Keystroke_Payload(unknown=packet.unknown,hid_data=packet.hidData, aes_counter=packet.aesCounter)
else:
new /= ESB_Payload_Hdr(packet.payload)
return new
class ESBReceiver(wireless.Receiver):
'''
This class is a Receiver for the Enhanced ShockBurst protocol ("esb").
It can instantiates the following devices :
* RFStorm Device (``mirage.libs.esb_utils.rfstorm.ESBRFStormDevice``) **[ interface "rfstormX" (e.g. "rfstormX") ]**
* PCAP Device (``mirage.libs.esb_utils.pcap.ESBPCAPDevice``) **[ interface "<file>.pcap" (e.g. "capture.pcap") ]**
'''
def __init__(self,interface="rfstorm0"):
deviceClass = None
if "rfstorm" in interface:
deviceClass = ESBRFStormDevice
elif interface[-5:] == ".pcap":
deviceClass = ESBPCAPDevice
super().__init__(interface=interface,packetType=ESBPacket,deviceType=deviceClass)
def convert(self,packet):
channel = self.getChannel()
payload = raw(packet[ESB_Payload_Hdr:]) if ESB_Payload_Hdr in packet else b""
new = ESBPacket(address=packet.address, payload=payload)
if ESB_Ack_Response in packet or payload == b"":
new = ESBAckResponsePacket(address=packet.address,payload=payload)
elif Logitech_Unifying_Hdr in packet:
if Logitech_Mouse_Payload in packet:
new = ESBLogitechMousePacket(
address=packet.address,
payload=payload,
buttonMask = packet.button_mask,
move=packet.movement
)
elif Logitech_Set_Keepalive_Payload in packet:
new = ESBLogitechSetTimeoutPacket(
address=packet.address,
payload=payload,
timeout=packet.timeout
)
elif Logitech_Keepalive_Payload in packet:
new = ESBLogitechKeepAlivePacket(
address=packet.address,
payload=payload,
timeout=packet.timeout
)
elif Logitech_Unencrypted_Keystroke_Payload in packet:
if packet.hid_data == b"\x00\x00\x00\x00\x00\x00\x00":
new = ESBLogitechUnencryptedKeyReleasePacket(
address=packet.address,
payload=payload
)
else:
new = ESBLogitechUnencryptedKeyPressPacket(
address=packet.address,
payload=payload,
hidData = packet.hid_data
)
elif Logitech_Multimedia_Key_Payload in packet:
if packet.hid_key_scan_code == b"\x00\x00\x00\x00":
new = ESBLogitechMultimediaKeyReleasePacket(
address=packet.address,
payload=payload
)
else:
new = ESBLogitechMultimediaKeyPressPacket(
address=packet.address,
payload=payload,
hidData = packet.hid_key_scan_code
)
elif Logitech_Encrypted_Keystroke_Payload in packet:
new = ESBLogitechEncryptedKeystrokePacket(
address=packet.address,
payload=payload,
unknown=packet.unknown,
hidData = packet.hid_data,
aesCounter = packet.aes_counter
)
new.additionalInformations = ESBSniffingParameters(channel=channel)
return new
WirelessModule.registerEmitter("esb",ESBEmitter)
WirelessModule.registerReceiver("esb",ESBReceiver)
| StarcoderdataPython |
3303856 | <reponame>aquemy/HCBR
from random import randint
from subprocess import Popen, PIPE
from hcbr import HCBRClassifier
import numpy as np
from sklearn.metrics import accuracy_score, matthews_corrcoef
from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, RepeatedKFold
HCBR_BIN = '../../../build/hcbr'
DATASET = 'breast'
def example_1(clf, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
clf.fit(X_train, y_train)
pred = clf.predict(X_test, y_test)
score = accuracy_score(y_test, pred)
mcc = matthews_corrcoef(y_test, pred)
print(score, mcc)
def example_2(clf, X, y):
random_state = randint(0,2**32)
k=10
n=5
rkf = RepeatedStratifiedKFold(n_splits=k, n_repeats=n, random_state=random_state)
averages = [0] * n
for i, (train, test) in enumerate(rkf.split(X, y)):
it = i // k
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
clf.fit(X_train, y_train)
pred = clf.predict(X_test, y_test)
acc = accuracy_score(y_test, pred)
mcc = matthews_corrcoef(y_test, pred)
print(it, acc, mcc)
averages[it] += acc
averages = map(lambda x: x / k, averages)
print('BEST ACCURACY: {}'.format(max(averages)))
def main():
# Load the dataset
with open('../../../data/{}_casebase.txt'.format(DATASET)) as file:
X = file.readlines()
X = np.array([map(int, d.split()) for d in X])
file.close()
with open('../../../data/{}_outcomes.txt'.format(DATASET)) as file:
y = file.readlines()
y = np.array([int(o.strip()) for o in y])
file.close()
# Create the classifier by specifying the configuration file
clf = HCBRClassifier(params_file='../../../data/parameters/{}.params.json'.format(DATASET))
# example 1: simple split
print('----- EXAMPLE 1 -----')
example_1(clf, X, y)
# example 2: stratified K-fold cross-validation
print('----- EXAMPLE 2 -----')
example_2(clf, X, y)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3355053 | # standard imports
import datetime
import time
import os
from os import path
import platform
import signal
import pygame
from pygame.locals import QUIT, VIDEORESIZE, KEYDOWN, K_q
import requests
# local imports
import config
def exit_gracefully(signum, frame):
sys.exit(0)
signal.signal(signal.SIGTERM, exit_gracefully)
###############################################################################
class MyDisplay:
screen = None
def __init__(self):
self.last_update_check = 0
self.get_forecast()
"Ininitializes a pygame screen using the framebuffer"
if platform.system() == 'Darwin':
pygame.display.init()
driver = pygame.display.get_driver()
print('Using the {0} driver.'.format(driver))
else:
disp_no = os.getenv("DISPLAY")
if disp_no:
print("X Display = {0}".format(disp_no))
drivers = ['x11', 'fbcon', 'directfb', 'svgalib']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print('Driver: {0} failed.'.format(driver))
continue
found = True
break
if not found:
print("No suitable video driver found!")
raise Exception('No suitable video driver found!')
#size = (800, 480)
size = (pygame.display.Info().current_w,
pygame.display.Info().current_h)
size_half = (int(pygame.display.Info().current_w * 0.5),
int(pygame.display.Info().current_h * 0.5 + 50))
if config.FULLSCREEN:
self.screen = pygame.display.set_mode(size, pygame.NOFRAME)
self.xmax = pygame.display.Info().current_w
self.ymax = pygame.display.Info().current_h
print("Framebuffer Size: %d x %d" % (size[0], size[1]))
else:
self.screen = pygame.display.set_mode(size_half, pygame.RESIZABLE)
pygame.display.set_caption('WIND Dashboard')
self.xmax = pygame.display.get_surface().get_width()
self.ymax = pygame.display.get_surface().get_height()
print(self.xmax, self.ymax)
# Clear the screen to start
self.screen.fill((0, 0, 0))
# Initialise font support
pygame.font.init()
# Render the screen
pygame.mouse.set_visible(0)
pygame.display.update()
def __del__(self):
"Destructor to make sure pygame shuts down, etc."
def deg_to_compass(self, degrees):
val = int((degrees/22.5)+.5)
dirs = ["N", "NNE", "NE", "ENE",
"E", "ESE", "SE", "SSE",
"S", "SSW", "SW", "WSW",
"W", "WNW", "NW", "NNW"]
return dirs[(val % 16)]
def get_forecast(self):
if (time.time() - self.last_update_check) > config.DS_CHECK_INTERVAL:
self.last_update_check = time.time()
#try:
if not config.HOLFUY_API_KEY:
url = 'https://www.windguru.cz/int/iapi.php?q=station_data_current&id_station={s}&date_format=Y-m-d%20H%3Ai%3As%20T&_mha=f4d18b6c'.format(s=config.ID_STATION)
url_h = 'https://www.windguru.cz/station/{s}'.format(s=config.ID_STATION)
headers = {'Referer' : url_h}
self.wind = requests.get(url, headers = headers).json()
else:
url_holfuy = "http://api.holfuy.com/live/?s={s}&pw={pw}&&m=JSON&tu=C&su=knots&batt"
querystring_h = {
"s": config.ID_STATION,
"pw": config.HOLFUY_API_KEY
}
self.wind = requests.request("GET", url_holfuy, params=querystring_h).json()
return True
def holfuy(self):
text_color = (255, 255, 255)
font_name = "dejavusans"
regular_font = pygame.font.SysFont(font_name, int(self.ymax * 0.16), bold=1)
small_font = pygame.font.SysFont(font_name, int(self.ymax * 0.13), bold=1)
error_font = pygame.font.SysFont(font_name, int(self.ymax * 0.05), bold=1)
if self.xmax <= 1024:
icon_wind_size = '400'
else:
icon_wind_size = '700'
if 'error' in self.wind.values() or 'error' in self.wind:
text = "ERROR"
text_render = error_font.render(text, True, (255, 0, 0))
text_rect = text_render.get_rect(center=(self.xmax * 0.5, self.ymax * 0.2))
self.screen.blit(text_render, text_rect)
text = "Wrong wind data in config.py ."
text_render = error_font.render(text, True, (255, 0, 0))
text_rect = text_render.get_rect(center=(self.xmax * 0.5, self.ymax * 0.4))
self.screen.blit(text_render, text_rect)
logo = path.join(path.dirname(__file__), 'icons/logo/{}/wind.png'.format(icon_wind_size))
logo_load = pygame.image.load(logo)
self.screen.blit(logo_load, (self.xmax * 0.3, self.ymax * 0.5))
elif not config.HOLFUY_API_KEY:
wind_speed = self.wind['wind_avg']
wind_gust = self.wind['wind_max']
wind_dir = self.wind['wind_direction']
if 0 <= wind_speed <= 14:
text_regular = (51, 187, 255)
icon = path.join(path.dirname(__file__), 'icons/blue/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 14.1 < wind_speed <= 17:
text_regular = (97, 209, 97)
icon = path.join(path.dirname(__file__), 'icons/green/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 17.1 < wind_speed <= 24:
text_regular = (255, 182, 32)
icon = path.join(path.dirname(__file__), 'icons/orange/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 24.1 < wind_speed <= 30:
text_regular = (255, 102, 0)
icon = path.join(path.dirname(__file__), 'icons/brown/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 30.1 < wind_speed <= 500:
text_regular = (255, 26, 140)
icon = path.join(path.dirname(__file__), 'icons/purple/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
text = ("{} knt").format(wind_speed)
text_render = regular_font.render(text, True, text_regular)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.18))
self.screen.blit(text_render, text_rect)
text = ("{} knt").format(wind_gust)
text_render = regular_font.render(text, True, text_regular)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.37))
self.screen.blit(text_render, text_rect)
text = "%s° " % wind_dir
text_render = small_font.render(text, True, text_color)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.58))
self.screen.blit(text_render, text_rect)
icon_load = pygame.image.load(icon).convert_alpha()
self.screen.blit(icon_load, (self.xmax * 0.04, self.ymax * 0.08))
logo = path.join(path.dirname(__file__), 'icons/logo/{}/windguru.png'.format(icon_wind_size))
logo_load = pygame.image.load(logo)
self.screen.blit(logo_load, (self.xmax * 0.6, self.ymax * 0.72))
else:
wind_speed = self.wind['wind']['speed']
wind_gust = self.wind['wind']['gust']
wind_dir = self.wind['wind']['direction']
if 0 <= wind_speed <= 14:
text_regular = (51, 187, 255)
icon = path.join(path.dirname(__file__), 'icons/blue/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 14.1 < wind_speed <= 17:
text_regular = (97, 209, 97)
icon = path.join(path.dirname(__file__), 'icons/green/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 17.1 < wind_speed <= 24:
text_regular = (255, 182, 32)
icon = path.join(path.dirname(__file__), 'icons/orange/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 24.1 < wind_speed <= 30:
text_regular = (255, 102, 0)
icon = path.join(path.dirname(__file__), 'icons/brown/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
if 30.1 < wind_speed <= 500:
text_regular = (255, 26, 140)
icon = path.join(path.dirname(__file__), 'icons/purple/{}/{}.png'.format(icon_wind_size, self.deg_to_compass(wind_dir)))
text = ("{} knt").format(wind_speed)
text_render = regular_font.render(text, True, text_regular)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.18))
self.screen.blit(text_render, text_rect)
text = ("{} knt").format(wind_gust)
text_render = regular_font.render(text, True, text_regular)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.37))
self.screen.blit(text_render, text_rect)
text = "%s° " % wind_dir
text_render = small_font.render(text, True, text_color)
text_rect = text_render.get_rect(center=(self.xmax * 0.75, self.ymax * 0.58))
self.screen.blit(text_render, text_rect)
icon_load = pygame.image.load(icon).convert_alpha()
self.screen.blit(icon_load, (self.xmax * 0.04, self.ymax * 0.08))
logo = path.join(path.dirname(__file__), 'icons/logo/{}/holfuy.png'.format(icon_wind_size))
logo_load = pygame.image.load(logo)
self.screen.blit(logo_load, (self.xmax * 0.6, self.ymax * 0.72))
# Update the display
pygame.display.update()
########################################################################
# Create an instance of the lcd display class.
MY_DISP = MyDisplay()
RUNNING = True # Stay running while True
SECONDS = 0 # Seconds Placeholder to pace display.
# Loads data from holfuy into class variables.
if MY_DISP.get_forecast() is False:
print('Error: Wrong data for wind.')
RUNNING = False
while RUNNING:
MY_DISP.holfuy()
# Look for and process keyboard events to change modes.
for event in pygame.event.get():
if event.type == QUIT:
RUNNING = False
if event.type == pygame.KEYDOWN:
# On 'q' or keypad enter key, quit the program.
if event.key == pygame.K_q:
RUNNING = False
# Refresh the weather data once per minute.
if int(SECONDS) == 0:
MY_DISP.get_forecast()
# Loop timer.
pygame.time.wait(100)
pygame.quit()
| StarcoderdataPython |
3204928 | extensions = ["myst_parser"]
exclude_patterns = ["_build"]
myst_disable_syntax = ["emphasis"]
myst_dmath_allow_space = False
mathjax_config = {}
myst_amsmath_enable = True
myst_deflist_enable = True
myst_figure_enable = True
| StarcoderdataPython |
1761162 | """
import math
print(math.sin(1))
print(math.cos(2))
print(math.tan(3))
print(math.pow(2, 2))
print(math.sqrt(4))
from math import sin
print(sin(1))
import math as m
print(m.sin(1))
print(m.cos(2))
print(m.tan(3))
print(m.pow(2, 2))
print(m.sqrt(4))
from math import sin as s
print(s(1))
"""
| StarcoderdataPython |
3378182 | #! python3.8
# -*- coding: utf-8 -*-
# File name: geocoder.py
# Author: <NAME>
# Email: <EMAIL>
# Created: 27.11.2019
# Modified: 27.11.2019
"""
TODO:
Module's docstring
"""
# Standard imports
# ---
# Third party imports
from opencage.geocoder import OpenCageGeocode
# Package imports
from gigfinder import secrets
# Get API key and create geocoder
geocoder = OpenCageGeocode(secrets.get_opencage_apikey())
def address_to_lat_lng(street="", nr="", plz="", city="", country=""):
"""TODO: Docstring"""
address = "{0} {1}, {2} {3}, {4}".format(street, nr, plz, city, country)
result = geocoder.geocode(address)
r = result[0]
lat = r['geometry']['lat']
lng = r['geometry']['lng']
return lat, lng
| StarcoderdataPython |
3330906 | <reponame>mirceaulinic/irrd
"""Set prefix_length in existing RPSL objects
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-03-04 16:14:17.862510
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql as pg
from sqlalchemy.ext.declarative import declarative_base
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
Base = declarative_base()
class RPSLDatabaseObject(Base): # type:ignore
__tablename__ = 'rpsl_objects'
pk = sa.Column(pg.UUID(as_uuid=True), server_default=sa.text('gen_random_uuid()'), primary_key=True)
object_class = sa.Column(sa.String, nullable=False, index=True)
ip_size = sa.Column(sa.DECIMAL(scale=0))
prefix_length = sa.Column(sa.Integer, nullable=True)
def upgrade():
connection = op.get_bind()
t_rpsl_objects = RPSLDatabaseObject.__table__
for length in range(33):
ip_size = pow(2, 32-length)
connection.execute(t_rpsl_objects.update().where(
sa.and_(t_rpsl_objects.c.ip_size == ip_size, t_rpsl_objects.c.object_class == 'route')
).values(
prefix_length=length,
))
for length in range(129):
ip_size = pow(2, 128-length)
connection.execute(t_rpsl_objects.update().where(
sa.and_(t_rpsl_objects.c.ip_size == ip_size, t_rpsl_objects.c.object_class == 'route6')
).values(
prefix_length=length,
))
def downgrade():
pass
| StarcoderdataPython |
1625797 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..registration import Registration
def test_Registration_inputs():
input_map = dict(
args=dict(argstr="%s",),
environ=dict(nohash=True, usedefault=True,),
fixed_image=dict(argstr="-f %s", extensions=None, mandatory=True,),
fixed_mask=dict(argstr="-fMask %s", extensions=None,),
initial_transform=dict(argstr="-t0 %s", extensions=None,),
moving_image=dict(argstr="-m %s", extensions=None, mandatory=True,),
moving_mask=dict(argstr="-mMask %s", extensions=None,),
num_threads=dict(argstr="-threads %01d", nohash=True, usedefault=True,),
output_path=dict(argstr="-out %s", mandatory=True, usedefault=True,),
parameters=dict(argstr="-p %s...", mandatory=True,),
)
inputs = Registration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Registration_outputs():
output_map = dict(
transform=dict(),
warped_file=dict(extensions=None,),
warped_files=dict(),
warped_files_flags=dict(),
)
outputs = Registration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| StarcoderdataPython |
4824725 | #!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Baxter RSDK Gripper Example: joystick
"""
from __future__ import print_function
import argparse
import rospy
import baxter_interface
import baxter_external_devices
from baxter_interface import CHECK_VERSION
def map_joystick(joystick):
"""
maps joystick input to gripper commands
@param joystick: an instance of a Joystick
"""
# initialize interfaces
print("Getting robot state... ")
rs = baxter_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
left = baxter_interface.Gripper('left', CHECK_VERSION)
right = baxter_interface.Gripper('right', CHECK_VERSION)
def clean_shutdown():
print("\nExiting example...")
if not init_state:
print("Disabling robot...")
rs.disable()
rospy.on_shutdown(clean_shutdown)
# decrease position dead_band
left.set_dead_band(2.5)
right.set_dead_band(2.5)
# abbreviations
jhi = lambda s: joystick.stick_value(s) > 0
jlo = lambda s: joystick.stick_value(s) < 0
bdn = joystick.button_down
bup = joystick.button_up
def print_help(bindings_list):
print("Press Ctrl-C to quit.")
for bindings in bindings_list:
for (test, _cmd, doc) in bindings:
if callable(doc):
doc = doc()
print("%s: %s" % (str(test[1]), doc))
def capability_warning(gripper, cmd):
msg = ("%s %s - not capable of '%s' command" %
(gripper.name, gripper.type(), cmd))
print(msg)
def offset_position(gripper, offset):
if gripper.type() != 'electric':
capability_warning(gripper, 'set_position')
return
current = gripper.position()
gripper.command_position(current + offset)
def offset_holding(gripper, offset):
if gripper.type() != 'electric':
capability_warning(gripper, 'set_holding_force')
return
current = gripper.parameters()['holding_force']
gripper.set_holding_force(current + offset)
def offset_velocity(gripper, offset):
if gripper.type() != 'electric':
capability_warning(gripper, 'set_velocity')
return
current = gripper.parameters()['velocity']
gripper.set_velocity(current + offset)
bindings_list = []
bindings = (
#(test, command, description)
((bdn, ['btnDown']), (left.reboot, []), "left: reboot"),
((bdn, ['btnLeft']), (right.reboot, []), "right: reboot"),
((bdn, ['btnRight']), (left.calibrate, []), "left: calibrate"),
((bdn, ['btnUp']), (right.calibrate, []), "right: calibrate"),
((bdn, ['rightTrigger']), (left.close, []), "left: close"),
((bdn, ['leftTrigger']), (right.close, []), "right: close"),
((bup, ['rightTrigger']), (left.open, []), "left: open (release)"),
((bup, ['leftTrigger']), (right.open, []), "right: open (release)"),
((bdn, ['rightBumper']), (left.stop, []), "left: stop"),
((bdn, ['leftBumper']), (right.stop, []), "right: stop"),
((jlo, ['rightStickHorz']), (offset_position, [left, -15.0]),
"left: decrease position"),
((jlo, ['leftStickHorz']), (offset_position, [right, -15.0]),
"right: decrease position"),
((jhi, ['rightStickHorz']), (offset_position, [left, 15.0]),
"left: increase position"),
((jhi, ['leftStickHorz']), (offset_position, [right, 15.0]),
"right: increase position"),
((jlo, ['rightStickVert']), (offset_holding, [left, -5.0]),
"left: decrease holding force"),
((jlo, ['leftStickVert']), (offset_holding, [right, -5.0]),
"right: decrease holding force"),
((jhi, ['rightStickVert']), (offset_holding, [left, 5.0]),
"left: increase holding force"),
((jhi, ['leftStickVert']), (offset_holding, [right, 5.0]),
"right: increase holding force"),
((bdn, ['dPadDown']), (offset_velocity, [left, -5.0]),
"left: decrease velocity"),
((bdn, ['dPadLeft']), (offset_velocity, [right, -5.0]),
"right: decrease velocity"),
((bdn, ['dPadRight']), (offset_velocity, [left, 5.0]),
"left: increase velocity"),
((bdn, ['dPadUp']), (offset_velocity, [right, 5.0]),
"right: increase velocity"),
((bdn, ['function1']), (print_help, [bindings_list]), "help"),
((bdn, ['function2']), (print_help, [bindings_list]), "help"),
)
bindings_list.append(bindings)
print("Enabling robot...")
rs.enable()
rate = rospy.Rate(100)
print_help(bindings_list)
print("Press <Start> button for help; Ctrl-C to stop...")
while not rospy.is_shutdown():
# test each joystick condition and call binding cmd if true
for (test, cmd, doc) in bindings:
if test[0](*test[1]):
cmd[0](*cmd[1])
print(doc)
rate.sleep()
rospy.signal_shutdown("Example finished.")
def main():
"""RSDK Gripper Example: Joystick Control
Use a game controller to control the grippers.
Attach a game controller to your dev machine and run this
example along with the ROS joy_node to control Baxter's
grippers using the joysticks and buttons. Be sure to provide
the *joystick* type you are using as an argument to setup
appropriate key mappings.
Uses the baxter_interface.Gripper class and the helper classes
in baxter_external_devices.Joystick.
"""
epilog = """
See help inside the example with the "Start" button for controller
key bindings.
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-j', '--joystick', required=True, choices=['xbox', 'logitech', 'ps3'],
help='specify the type of joystick to use'
)
args = parser.parse_args(rospy.myargv()[1:])
joystick = None
if args.joystick == 'xbox':
joystick = baxter_external_devices.joystick.XboxController()
elif args.joystick == 'logitech':
joystick = baxter_external_devices.joystick.LogitechController()
elif args.joystick == 'ps3':
joystick = baxter_external_devices.joystick.PS3Controller()
else:
# Should never reach this case with proper argparse usage
parser.error("Unsupported joystick type '%s'" % (args.joystick))
print("Initializing node... ")
rospy.init_node("rsdk_gripper_joystick")
map_joystick(joystick)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1738689 | <reponame>idlesign/django-logexpose
from django.test import TestCase, RequestFactory
from django.http import HttpResponse
from django.utils import timezone
from .loggers.base import BaseLogger
from .loggers.request import RequestLogger
from .loggers.process import ProcessLogger
from .backends.base import BaseDataBackend
from .backends.database import DatabaseBackend
from .models import Record
from .utils import thread_get_logger, thread_init_logger, get_func_path, thread_get_backend, thread_init_backend, \
_THREAD_LOCAL, _THREAD_ATTR_BACKENDS, _THREAD_ATTR_LOGGERS
from .toolbox import get_logger
from .decorators import logexpose as logexpose_decor
from .middleware import RequestLoggerMiddleware
from .exceptions import LoggerNotRegistered
LEVELS = ('debug', 'info', 'warning', 'error', 'critical')
def test_func(a, b=10, d=66):
return a
class LogexposeTestCase(TestCase):
def tearDown(self):
# Cleaning.
try:
delattr(_THREAD_LOCAL, _THREAD_ATTR_LOGGERS)
except AttributeError:
pass
try:
delattr(_THREAD_LOCAL, _THREAD_ATTR_BACKENDS)
except AttributeError:
pass
class BaseLoggerTest(LogexposeTestCase):
def test_grp_id(self):
logger_1 = BaseLogger()
self.assertEqual(logger_1.grp_id, 'default')
logger_2 = BaseLogger(grp_id=33)
self.assertEqual(logger_2.grp_id, 33)
grp_id = logger_1.pick_grp_id()
self.assertEqual(grp_id, 'default')
grp_id = logger_2.pick_grp_id()
self.assertEqual(grp_id, 33)
def test_msg_id(self):
logger_1 = BaseLogger()
self.assertIsNotNone(logger_1.generate_msg_id())
def test_level_method(self):
logger_1 = BaseLogger()
self.assertEqual(logger_1.get_level_method('debug'), logger_1.debug)
self.assertEqual(logger_1.get_level_method('info'), logger_1.info)
self.assertEqual(logger_1.get_level_method('warning'), logger_1.warning)
self.assertEqual(logger_1.get_level_method('error'), logger_1.error)
self.assertEqual(logger_1.get_level_method('critical'), logger_1.critical)
def test_levels(self):
logger_1 = BaseLogger()
for level in LEVELS:
msg = 'simple'
grp_id = 12
msg_id = 21
parent_msg_id = 10
props = {'a': 'b'}
gid, mid, pid = getattr(logger_1, level)(msg, grp_id, msg_id, props, parent_msg_id)
self.assertEqual(gid, grp_id)
self.assertEqual(mid, msg_id)
self.assertEqual(pid, parent_msg_id)
msg = 'simple2'
grp_id = None
msg_id = None
props = None
gid, mid, pid = getattr(logger_1, level)(msg, grp_id, msg_id, props)
self.assertEqual(gid, 'default')
self.assertIsInstance(mid, str)
self.assertEqual(pid, None)
def test_process_request(self):
self.assertIsNone(BaseLogger.process_request(22))
def test_process_response(self):
self.assertIsNone(BaseLogger.process_response(24, 42))
def test_default_func_before(self):
logger_1 = BaseLogger()
ids_tuple = (1, 2, 3)
new_ids_tuple = logger_1.default_func_before(test_func, [1, 23], {'another': 'yes'}, 'info', ids_tuple)
self.assertEqual(ids_tuple, new_ids_tuple)
def test_default_func_after(self):
logger_1 = BaseLogger()
ids_tuple = (1, 2, 3)
gid, mid, pid = logger_1.default_func_after(test_func, [11, 2], {'another': 'no'}, 'warning', ids_tuple)
self.assertEqual(gid, ids_tuple[0])
self.assertIsInstance(mid, str)
self.assertNotEqual(mid, ids_tuple[1])
self.assertEqual(pid, ids_tuple[2])
def test_get_from_thread(self):
logger = thread_get_logger('base')
self.assertIsNone(logger)
logger = BaseLogger.get_from_thread()
self.assertIsInstance(logger, BaseLogger)
class RequestLoggerTest(LogexposeTestCase):
def test_pick_grp_id(self):
req = RequestFactory().get('/')
logger_1 = RequestLogger(req)
logger_1.grp_id = 44
self.assertEqual(logger_1.pick_grp_id(99), 44)
class UtilsTest(LogexposeTestCase):
def test_get_logger_from_thread(self):
logger = thread_get_logger('base')
self.assertIsNone(logger)
logger = thread_init_logger('base', BaseLogger)
self.assertIs(logger, BaseLogger)
logger = thread_get_logger('base')
self.assertIs(logger, BaseLogger)
def test_get_backend_from_thread(self):
alias = DatabaseBackend.__name__
backend = thread_get_backend(alias)
self.assertIsNone(backend)
backend = thread_init_backend(alias, DatabaseBackend)
self.assertIs(backend, DatabaseBackend)
backend = thread_get_backend(alias)
self.assertIs(backend, DatabaseBackend)
def test_get_func_path(self):
path = get_func_path(test_func)
self.assertIn('tests.test_func', path)
class ToolboxTest(LogexposeTestCase):
def test_get_logger(self):
logger = get_logger('base')
self.assertIsNone(logger)
thread_init_logger('base', BaseLogger)
logger = get_logger('base')
self.assertIs(logger, BaseLogger)
class MiddlewareTest(LogexposeTestCase):
def test_middleware(self):
mware = RequestLoggerMiddleware()
logger = get_logger('rlog')
self.assertIsNone(logger)
logger = get_logger('plog')
self.assertIsNone(logger)
req = RequestFactory().get('/')
mware.process_request(req)
logger = get_logger('rlog')
self.assertIsInstance(logger, RequestLogger)
logger = get_logger('plog')
self.assertIsInstance(logger, ProcessLogger)
resp = mware.process_response(req, HttpResponse())
self.assertIn('logexpose-client-id', resp._headers)
class DecoratorsTest(LogexposeTestCase):
def test_logexpose_dec(self):
func = logexpose_decor('base')(test_func)
self.assertRaises(LoggerNotRegistered, func, 'func_result', 2)
BaseLogger.get_from_thread() # Initialize logger.
result = func('func_result', 2)
self.assertEqual(result, 'func_result')
class RecordModelTest(LogexposeTestCase):
def test_create(self):
time = timezone.now()
r = Record(**{
'time': time,
'lvl': 'info',
'msg': 'some message',
'mid': '123',
'pid': None,
'gid': 'default',
'logger': 'some',
'props': DatabaseBackend.prepare_props({'a': 'b'})
})
r.save()
self.assertIsNotNone(r.pk)
self.assertEqual(str(r), '%s INFO: some message' % time)
| StarcoderdataPython |
4802895 |
import cqparts
from cqparts.display import display
from cqparts.constraint import Fixed, Coincident
from cqparts.constraint import Mate
from cqparts.utils import CoordSystem
from cqparts.catalogue import JSONCatalogue
import cqparts_motors
import os
from .motor_mount import MountedStepper
from .stepper import Stepper
filename = os.path.join(
os.path.dirname(cqparts_motors.__file__), "catalogue", "stepper-nema.json"
)
catalogue = JSONCatalogue(filename)
item = catalogue.get_query()
steppers = catalogue.iter_items()
stepper_list = []
for i in steppers:
s = catalogue.deserialize_item(i)
cl = type(str(i["id"]), (Stepper,), {})
p = cl.class_params(hidden=True)
for j, k in p.items():
pr = type(k)
setattr(cl, j, pr(i["obj"]["params"][j]))
stepper_list.append(cl)
class StepperCat(cqparts.Assembly):
def initialize_parameters(self):
self.coll = []
self.offset = 90
def add(self, i):
self.coll.append(i)
@classmethod
def item_name(cls, index):
return "item_%03i" % index
def make_components(self):
items = {}
for i in range(len(self.coll)):
items[self.item_name(i)] = self.coll[i]
return items
def make_constraints(self):
constraints = []
length = len(self.coll)
total = length * self.offset
for i in range(len(self.coll)):
constraints.append(
Fixed(
self.coll[i].mate_origin,
CoordSystem(
origin=(0, i * self.offset - (total / 2), 0),
xDir=(0, 1, 0),
normal=(0, 0, 1),
),
)
)
return constraints
if __name__ == "__main__":
ar = StepperCat()
print(stepper_list)
for i in stepper_list:
ar.add(MountedStepper(clearance=2, thickness=3, stepper=i))
# ar.add(i())
print(ar.tree_str())
display(ar)
| StarcoderdataPython |
3318044 | USUARIO_TEMA = "CALL get_temas(%s,%s)"
| StarcoderdataPython |
1668032 | #!/usr/bin/python3
"""
Take a list, say for example this one:
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
and write a program that prints out all the elements of the list that are less than 5.
Extras:
Instead of printing the elements one by one, make a new list that has all the elements
less than 5 from this list in it and print out this new list.
Write this in one line of Python.
Ask the user for a number and return a list that contains only elements from the original
list a that are smaller than that number given by the user.
"""
from helpers import user_data_validation
number_list = [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
def main():
create_new_list()
create_user_generated_list()
def create_new_list():
new_number_list = []
for number in number_list:
if number < 5:
print('index: {} value: {}'.format(number_list.index(number), number))
new_number_list.append(number)
print('original list: {}'.format(number_list))
print('new list with values less than 5: {}'.format(new_number_list))
def create_user_generated_list():
user_generated_list = []
user_input_value = int(user_data_validation(input('Please provide a value: ')))
for number in number_list:
if number < user_input_value:
user_generated_list.append(number)
print('original list: {}'.format(number_list))
print('user generated list: {}'.format(user_generated_list))
if __name__ == '__main__': main()
| StarcoderdataPython |
1734932 | <reponame>eldad-a/BioCRNPyler
from biocrnpyler.chemical_reaction_network import Species, Reaction, ComplexSpecies, ChemicalReactionNetwork
print("Start")
#Names of different supported propensities
propensity_types = ['hillpositive', 'proportionalhillpositive', 'hillnegative', 'proportionalhillnegative', 'massaction', 'general']
kb = 100
ku = 10
kex = 1.
kd = .1
G = Species(name = "G", material_type = "dna") #DNA
A = Species(name = "A", material_type = "protein") #Activator
GA = ComplexSpecies([G, A, A]) #Activated Gene
X = Species(name = "X", material_type = "protein")
rxnd = Reaction([X], [], kd)
#Massaction Unregulated
species1 = [G, A, GA, X]
rxn0_1 = Reaction([G, A, A], [GA], k=kb, k_rev = ku)
rxn0_2 = Reaction([GA], [GA, X], k=kex)
CRN0 = ChemicalReactionNetwork(species1, [rxn0_1, rxn0_2, rxnd])
rxn1_1 = Reaction([G, A, A], [GA], k=kb, k_rev = ku)
rxn1_2 = Reaction([G], [G, X], k=kex)
CRN1 = ChemicalReactionNetwork(species1, [rxn1_1, rxn1_2, rxnd])
#hill positive
species2 = [G, A, X]
rxn2_1 = Reaction([G], [G, X], propensity_type = "hillpositive", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A})
CRN2 = ChemicalReactionNetwork(species2, [rxn2_1, rxnd])
#proportional hill positive
rxn3_1 = Reaction([G], [G, X], propensity_type = "proportionalhillpositive", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A, "d":G})
CRN3 = ChemicalReactionNetwork(species2, [rxn3_1, rxnd])
#hill Negative
rxn4_1 = Reaction([G], [G, X], propensity_type = "hillnegative", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A})
CRN4 = ChemicalReactionNetwork(species2, [rxn4_1, rxnd])
#proportional hill negative
rxn5_1 = Reaction([G], [G, X], propensity_type = "proportionalhillnegative", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A, "d":G})
CRN5 = ChemicalReactionNetwork(species2, [rxn5_1, rxnd])
import numpy as np
import pylab as plt
x0 = {repr(G):2, repr(A):10}
timepoints = np.linspace(0, 100, 200)
fname = "CRN.xml"
CRNs = [CRN0, CRN1, CRN2, CRN4, CRN3, CRN5]
LSs = ["-", "--", ":"]
plt.figure()
for i in range(6):
plt.subplot(3, 2, i+1)
CRN = CRNs[i]
CRN.write_sbml_file(file_name = fname)
print("Saved")
from bioscrape.types import Model
from bioscrape.simulator import py_simulate_model
from bioscrape.sbmlutil import *
M = Model(sbml_filename = fname)
A_list = [0, 1, 2, 5, 10]
for ind in range(len(A_list)):
x0[repr(A)] = A_list[ind]
M.set_species(x0)
R = py_simulate_model(timepoints, Model = M)
plt.plot(timepoints, R["protein_X"], label ="A="+str(A_list[ind]),color = 'blue', alpha = (ind+1)/len(A_list))
txt = ""
for rxn in CRN.reactions:
txt += repr(rxn)+"\n"
plt.title(txt[:-1], fontsize = 8)
plt.legend()
plt.show()
#CRN.simulate_with_bioscrape(timepoints, initial_condition_dict = x0)
#CRN.simulate_with_bioscrape_via_sbml(timepoints, file = fname, initial_condition_dict = x0) | StarcoderdataPython |
3352499 | import gi
import math
import cairo
import numpy
from dock import Dock, create_icon, get_gicon_pixbuf, pixbuf2image
gi.require_version('Gtk', '3.0') # noqa
gi.require_version('Gdk', '3.0') # noqa
gi.require_version('Gio', '2.0') # noqa
gi.require_version('GObject', '2.0') # noqa
from applications import AppCache, WindowTracker, groupings
from PIL import Image, ImageOps
from gi.repository import Gtk, Gdk, Gio, GLib, GObject
from dominantcolors import rgba2rgb, find_dominant_colors
app_cache = AppCache()
window_tracker = WindowTracker()
class DockWindow(Gtk.Window):
supports_alpha = False
def __init__(self):
super().__init__()
self.set_position(Gtk.WindowPosition.CENTER)
self.set_title("Diffusion Dock")
self.connect("delete-event", Gtk.main_quit)
self.set_app_paintable(True)
self.connect("screen-changed", self.screen_changed)
self.connect("draw", self.expose_draw)
self.set_decorated(False)
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.set_type_hint(Gdk.WindowTypeHint.DOCK)
dock = Dock(None, app_cache=app_cache, window_tracker=window_tracker)
self.add(dock)
style_provider = Gtk.CssProvider()
style_provider.load_from_file(Gio.File.new_for_path("style.css"))
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def aaa(_, a):
print("aaa")
#self.resize(
# max(dock.get_size_request().width, 1), max(dock.get_size_request().height, 1))
# self.set_size_request(100, 100)
# self.resize(100, 100)
print(dock.get_size_request())
# dock.connect("draw", lambda dock, _: self.resize(
# max(dock.get_size_request().width, 1), max(dock.get_size_request().height, 1)))
dock.connect("draw", aaa)
self.screen_changed(self, None, None)
self.show_all()
self.resize(100, 100)
Gtk.main()
def expose_draw(self, widget, event, userdata=None):
cr = Gdk.cairo_create(widget.get_window())
cr.scale(.2, .2)
if self.supports_alpha:
print("setting transparent window")
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
else:
print("setting opaque window")
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
def screen_changed(self, widget, old_screen, userdata=None):
screen = self.get_screen()
visual = screen.get_rgba_visual()
if visual is None:
visual = screen.get_system_visual()
self.supports_alpha = False
else:
self.supports_alpha = True
self.set_visual(visual)
if __name__ == "__main__":
DockWindow()
| StarcoderdataPython |
137808 | # Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61968.Common.Document import Document
class ErpPayment(Document):
"""Payment infromation and status for any individual line item of an ErpInvoice (e.g., when payment is from a customer). ErpPayable is also updated when payment is to a supplier and ErpReceivable is updated when payment is from a customer. Multiple payments can be made against a single line item and an individual payment can apply to more that one line item.Payment infromation and status for any individual line item of an ErpInvoice (e.g., when payment is from a customer). ErpPayable is also updated when payment is to a supplier and ErpReceivable is updated when payment is from a customer. Multiple payments can be made against a single line item and an individual payment can apply to more that one line item.
"""
def __init__(self, termsPayment='', ErpRecLineItems=None, ErpPayableLineItems=None, ErpInvoiceLineItems=None, *args, **kw_args):
"""Initialises a new 'ErpPayment' instance.
@param termsPayment: Payment terms (e.g., net 30).
@param ErpRecLineItems:
@param ErpPayableLineItems:
@param ErpInvoiceLineItems:
"""
#: Payment terms (e.g., net 30).
self.termsPayment = termsPayment
self._ErpRecLineItems = []
self.ErpRecLineItems = [] if ErpRecLineItems is None else ErpRecLineItems
self._ErpPayableLineItems = []
self.ErpPayableLineItems = [] if ErpPayableLineItems is None else ErpPayableLineItems
self._ErpInvoiceLineItems = []
self.ErpInvoiceLineItems = [] if ErpInvoiceLineItems is None else ErpInvoiceLineItems
super(ErpPayment, self).__init__(*args, **kw_args)
_attrs = ["termsPayment"]
_attr_types = {"termsPayment": str}
_defaults = {"termsPayment": ''}
_enums = {}
_refs = ["ErpRecLineItems", "ErpPayableLineItems", "ErpInvoiceLineItems"]
_many_refs = ["ErpRecLineItems", "ErpPayableLineItems", "ErpInvoiceLineItems"]
def getErpRecLineItems(self):
return self._ErpRecLineItems
def setErpRecLineItems(self, value):
for p in self._ErpRecLineItems:
filtered = [q for q in p.ErpPayments if q != self]
self._ErpRecLineItems._ErpPayments = filtered
for r in value:
if self not in r._ErpPayments:
r._ErpPayments.append(self)
self._ErpRecLineItems = value
ErpRecLineItems = property(getErpRecLineItems, setErpRecLineItems)
def addErpRecLineItems(self, *ErpRecLineItems):
for obj in ErpRecLineItems:
if self not in obj._ErpPayments:
obj._ErpPayments.append(self)
self._ErpRecLineItems.append(obj)
def removeErpRecLineItems(self, *ErpRecLineItems):
for obj in ErpRecLineItems:
if self in obj._ErpPayments:
obj._ErpPayments.remove(self)
self._ErpRecLineItems.remove(obj)
def getErpPayableLineItems(self):
return self._ErpPayableLineItems
def setErpPayableLineItems(self, value):
for p in self._ErpPayableLineItems:
filtered = [q for q in p.ErpPayments if q != self]
self._ErpPayableLineItems._ErpPayments = filtered
for r in value:
if self not in r._ErpPayments:
r._ErpPayments.append(self)
self._ErpPayableLineItems = value
ErpPayableLineItems = property(getErpPayableLineItems, setErpPayableLineItems)
def addErpPayableLineItems(self, *ErpPayableLineItems):
for obj in ErpPayableLineItems:
if self not in obj._ErpPayments:
obj._ErpPayments.append(self)
self._ErpPayableLineItems.append(obj)
def removeErpPayableLineItems(self, *ErpPayableLineItems):
for obj in ErpPayableLineItems:
if self in obj._ErpPayments:
obj._ErpPayments.remove(self)
self._ErpPayableLineItems.remove(obj)
def getErpInvoiceLineItems(self):
return self._ErpInvoiceLineItems
def setErpInvoiceLineItems(self, value):
for p in self._ErpInvoiceLineItems:
filtered = [q for q in p.ErpPayments if q != self]
self._ErpInvoiceLineItems._ErpPayments = filtered
for r in value:
if self not in r._ErpPayments:
r._ErpPayments.append(self)
self._ErpInvoiceLineItems = value
ErpInvoiceLineItems = property(getErpInvoiceLineItems, setErpInvoiceLineItems)
def addErpInvoiceLineItems(self, *ErpInvoiceLineItems):
for obj in ErpInvoiceLineItems:
if self not in obj._ErpPayments:
obj._ErpPayments.append(self)
self._ErpInvoiceLineItems.append(obj)
def removeErpInvoiceLineItems(self, *ErpInvoiceLineItems):
for obj in ErpInvoiceLineItems:
if self in obj._ErpPayments:
obj._ErpPayments.remove(self)
self._ErpInvoiceLineItems.remove(obj)
| StarcoderdataPython |
3327827 | from .core import *
from .callbacks import *
from .tokenizers import *
from .model_splits import *
from .transforms import *
from .generated_lm import GeneratedLM, GenerateArgs | StarcoderdataPython |
1774761 | <reponame>late-goodbye/codingame-overcomplicated
import logging
from math import floor, sqrt
logging.basicConfig(level=logging.DEBUG)
class Message(object):
def __init__(self, text: str = None):
self.logger = logging.getLogger(type(self).__name__)
self.text = text
self.logger.info('Set text value to {}'.format(self.text))
def __repr__(self):
return self.text
def __len__(self):
return len(self.text)
class Coder(object):
def __init__(self):
self.logger = logging.getLogger(type(self).__name__)
def encode(self, message):
bit_size = 1
curr_char = 0
encoded_message = ''
while curr_char < len(message.text):
bit = message.text[curr_char:curr_char+bit_size]
if bit_size % 2 == 1:
encoded_message += bit
else:
encoded_message = bit + encoded_message
curr_char += bit_size
bit_size += 1
self.logger.info('Encoded message "{}" to "{}"'.format(message, encoded_message))
message.text = encoded_message
def decode(self, message):
"""
Decodes message once.
The first step is to determine how many bits the message contains and lengths of the bits.
The bits lengths in common are 1, 2, 3, ..., k, k + 1, ... .
We know only the message's total length, it equals 1 + 2 + 3 + ... + k + k'
k' is one of the next values: [0..k+1].
Sum of all bits except the last one is 1 + 2 + 3 + ... + k = (1 + k) / 2 * (n - 1) = l.
Total length is L = l + k', hence k' = L - l.
L is already known.
n - 1 = k => (1 + k) / 2 * k = l
k^2 + k - 2l = 0
if k' = 0 then L = l, n = k
if k' = k + 1 then L = l, n = k + 1
Let's find k for these cases:
k^2 + k - 2L = 0
D = 1 + 8L
k = (-1 + sqrt(1 + 8L)) / 2
So if k is not integer we have to found floor(k)
"""
decoded_message = ''
self.logger.info('Started decoding message {}'.format(message))
bits_num = floor((-1 + sqrt(1 + 8 * len(message))) / 2)
expected_message_length = int((1 + bits_num) / 2 * bits_num)
self.logger.info('The message contains at least {} bits'.format(bits_num))
last_bit_length = len(message) - expected_message_length or 0
head = bits_num % 2 == 0
if last_bit_length:
self.logger.info('The message has shortened last bit with length {}'.format(last_bit_length))
head = not head
bit_size = bits_num
def cut_message(message, head, bit_size):
if head:
bit, message.text = message.text[:bit_size], message.text[bit_size:]
else:
message.text, bit = message.text[:-bit_size], message.text[-bit_size:]
return bit
while len(message) > 0:
self.logger.info('Message length is {}'.format(len(message)))
if last_bit_length:
bit = cut_message(message, head, last_bit_length)
last_bit_length = 0
else:
bit = cut_message(message, head, bit_size)
bit_size -= 1
decoded_message = '{}{}'.format(bit, decoded_message)
head = not head
self.logger.info('Decoded message to "{}"'.format(decoded_message))
message.text = decoded_message
def transform(self, message: Message, times: int = 0):
self.logger.info('Started transforming message "{}"'.format(message))
if times:
action = self.decode if times > 0 else self.encode
self.logger.info('Chose action "{}"'.format(action.__name__))
for i in range(abs(times)):
self.logger.info('Started iteration {}'.format(i))
action(message)
self.logger.info('Finished iteration {}'.format(i))
self.logger.info('Finished transformation')
self.logger.info('Result: "{}"\n'.format(message))
return message
if __name__ == '__main__':
n = int(input())
message = Message(input())
coder = Coder()
print(coder.transform(message, n))
| StarcoderdataPython |
172269 | # -*- coding: utf-8 -*-
"""
locale test_services module.
"""
import pytest
import pyrin.globalization.locale.services as locale_services
import pyrin.configuration.services as config_services
from pyrin.globalization.locale.exceptions import InvalidLocaleSelectorTypeError, \
LocaleSelectorHasBeenAlreadySetError, InvalidTimezoneSelectorTypeError, \
TimezoneSelectorHasBeenAlreadySetError
def test_set_locale_selector_invalid_type():
"""
sets the given function as locale selector which has an invalid type.
it should raise an error.
"""
with pytest.raises(InvalidLocaleSelectorTypeError):
locale_services.set_locale_selector(None)
def test_set_locale_selector_already_set():
"""
sets the given function as locale selector which has been already set.
it should raise an error.
"""
with pytest.raises(LocaleSelectorHasBeenAlreadySetError):
locale_services.set_locale_selector(locale_services.get_current_locale)
def test_set_timezone_selector_invalid_type():
"""
sets the given function as timezone selector which has an invalid type.
it should raise an error.
"""
with pytest.raises(InvalidTimezoneSelectorTypeError):
locale_services.set_timezone_selector(23)
def test_set_timezone_selector_already_set():
"""
sets the given function as timezone selector which has been already set.
it should raise an error.
"""
with pytest.raises(TimezoneSelectorHasBeenAlreadySetError):
locale_services.set_timezone_selector(locale_services.get_current_timezone)
def test_get_current_locale():
"""
gets the current locale that should be used for current request.
it should be the default locale, because there is no request.
"""
locale = locale_services.get_current_locale()
default_locale = config_services.get('globalization', 'locale',
'babel_default_locale')
assert locale == default_locale
def test_get_current_timezone():
"""
gets the current timezone that should be used for current request.
it should be the default timezone, because there is no request.
"""
timezone = locale_services.get_current_timezone()
default_timezone = config_services.get('globalization', 'timezone',
'babel_default_timezone')
assert timezone == default_timezone
def test_locale_exists_valid():
"""
gets a value indicating that a locale with the given name exists.
"""
assert locale_services.locale_exists('fa') is True
assert locale_services.locale_exists('fA') is True
assert locale_services.locale_exists('en') is True
assert locale_services.locale_exists('FR') is True
def test_locale_exists_invalid():
"""
gets a value indicating that a locale with the given name does not exist.
"""
assert locale_services.locale_exists('11') is not True
assert locale_services.locale_exists('fake') is not True
assert locale_services.locale_exists('fa.ir') is not True
assert locale_services.locale_exists('') is not True
assert locale_services.locale_exists(' ') is not True
assert locale_services.locale_exists(None) is not True
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.