hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a76744312dd71b2361f14534d5618c09af6e2802
| 28,542
|
py
|
Python
|
calc_dens_rates.py
|
daniel-furman/ice-densification-research
|
89645275aace68ad1dbf897ac5de72696685bffc
|
[
"MIT"
] | null | null | null |
calc_dens_rates.py
|
daniel-furman/ice-densification-research
|
89645275aace68ad1dbf897ac5de72696685bffc
|
[
"MIT"
] | null | null | null |
calc_dens_rates.py
|
daniel-furman/ice-densification-research
|
89645275aace68ad1dbf897ac5de72696685bffc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tues Aug 18 13:27:33 2020
@author: danielfurman
"""
# Sixteen densification rate calculations. The entire creep curve was
# composed of a transient response followed by the steady-state regime,
# with rates calculated by taking time slices during steady-state and
# averaging many thus-approximated rates. Steady-state slices corresponded to
# relatively small changes in density; therefore, the mean relative density
# was considered representative per measurement, and the densification can be
# taken as density invariant, albeit locally.
# Required Libraries:
import pandas as pd
import numpy as np
import glob
import matplotlib.pylab as plt
import warnings
warnings.filterwarnings("ignore")
# Load all experimental data
filenames = sorted(glob.glob('data/compaction*.csv')) #grab filepath names
print('\nThe compaction creep test files are:\n\n',filenames) # print names
data_list1 = [] #intialize empty numpy stack
for f in filenames: #f is index with string filenames
data_list1.append(np.loadtxt(fname=f, delimiter=',')) #stack arrays
print('\nThere are', len(data_list1), 'total experimental files.')
num_experiments = len(data_list1) #variable for number of files
# Also load experimental table
paper_table = pd.read_csv('data/paper_data.csv', delimiter=',', header = 'infer')
applied_pressure_raw = np.array([0.627, 0.753, 0.878,0.973,1.16,1.16,
0.47,0.59,0.72,1.16,0.251,0.377,
0.44, 0.5967,1.16,1.16]) #In MPa
fig, axes = plt.subplots(1, 3, figsize=(20,4.5)) #inialize matrix of plots
steadystate_slice = data_list1[0][(data_list1[0][:,1]/(60*60)>=10)&
(data_list1[0][:,1]/(60*60)<=115)]
axes[0].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[0].set_ylim([.74,.744])
axes[0].set_title('Compaction Id 1, First Step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[0,'Densification rate'] = dens_rate
paper_table.loc[0, 'Mean dens'] = mean_dens
paper_table.loc[0, 'applied stress'] = applied_pressure_raw[0]/mean_dens
axes[0].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[0].legend(loc = 'lower right', shadow = True)
axes[0].set_ylabel('Density (g/cm^3)')
axes[0].set_xlabel('Time (hours)')
steadystate_slice = data_list1[1][(data_list1[1][:,1]/(60*60)>=0)
&(data_list1[1][:,1]/(60*60)<=33.9)]
axes[1].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[1].set_ylim([.739,.743])
axes[1].set_title('Compaction Id 2, First Step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[3,'Densification rate'] = dens_rate
paper_table.loc[3, 'Mean dens'] = mean_dens
paper_table.loc[3, 'applied stress'] = applied_pressure_raw[3]/mean_dens
axes[1].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[1].legend(loc = 'lower right', shadow = True)
axes[1].set_ylabel('Density (g/cm^3)')
axes[1].set_xlabel('Time (hours)')
steadystate_slice = data_list1[2][(data_list1[2][:,1]/(60*60)>=5)
&(data_list1[2][:,1]/(60*60)<=60)]
axes[2].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[2].set_title('Compaction Id 3, First Step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[11,'Densification rate'] = dens_rate
paper_table.loc[11, 'Mean dens'] = mean_dens
paper_table.loc[11, 'applied stress'] = applied_pressure_raw[11]/mean_dens
axes[2].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[2].legend(loc = 'lower right', shadow = True)
axes[2].set_ylabel('Density (g/cm^3)')
axes[2].set_xlabel('Time (hours)')
fig, axes = plt.subplots(1, 3, figsize=(20,4.5)) #inialize matrix of plots
steadystate_slice = data_list1[3][(data_list1[3][:,1]/(60*60)<=38)
&(data_list1[3][:,1]/(60*60)>=23)]
axes[0].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[0].set_title('Compaction Id 4, Single step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[13,'Densification rate'] = dens_rate
paper_table.loc[13, 'Mean dens'] = mean_dens
paper_table.loc[13, 'applied stress'] = applied_pressure_raw[13]/mean_dens
axes[0].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[0].legend(loc = 'lower right', shadow = True)
axes[0].set_ylabel('Density (g/cm^3)')
axes[0].set_xlabel('Time (hours)')
steadystate_slice = data_list1[2][(data_list1[2][:,1]/(60*60)>=140)]
axes[1].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[1].set_title('Compaction Id 3, Second Step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[10,'Densification rate'] = dens_rate
paper_table.loc[10, 'Mean dens'] = mean_dens
paper_table.loc[10, 'applied stress'] = applied_pressure_raw[10]/mean_dens
axes[1].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[1].legend(loc = 'lower right', shadow = True)
axes[1].set_ylabel('Density (g/cm^3)')
axes[1].set_xlabel('Time (hours)')
steadystate_slice = data_list1[4][(data_list1[4][:,1]/(60*60)>=82)
&(data_list1[4][:,1]/(60*60)<=97)]
axes[2].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[2].set_title('Compaction Id 5, First Step')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[14,'Densification rate'] = dens_rate
paper_table.loc[14, 'Mean dens'] = mean_dens
paper_table.loc[14, 'applied stress'] = applied_pressure_raw[14]/mean_dens
axes[2].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[2].legend(loc = 'lower right', shadow = True)
axes[2].set_ylabel('Density (g/cm^3)')
axes[2].set_xlabel('Time (hours)')
fig, axes = plt.subplots(1, 3, figsize=(20,4.5)) #inialize matrix of plots
steadystate_slice = data_list1[4][(data_list1[4][:,1]/(60*60)>=128)]
axes[0].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
axes[0].set_title('Compaction Id 5, Second Step ')
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[12,'Densification rate'] = dens_rate
paper_table.loc[12, 'Mean dens'] = mean_dens
paper_table.loc[12, 'applied stress'] = applied_pressure_raw[12]/mean_dens
axes[0].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[0].legend(loc = 'lower right', shadow = True)
axes[0].set_ylabel('Density (g/cm^3)')
axes[0].set_xlabel('Time (hours)')
axes[1].set_title('Compaction Id 6, First Step')
steadystate_slice = data_list1[5][(data_list1[5][:,1]/(60*60)>=60)
&(data_list1[5][:,1]/(60*60)<=90)]
axes[1].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[9,'Densification rate'] = dens_rate
paper_table.loc[9, 'Mean dens'] = mean_dens
paper_table.loc[9, 'applied stress'] = applied_pressure_raw[9]/mean_dens
axes[1].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[1].legend(loc = 'lower right', shadow = True)
axes[1].set_ylabel('Density (g/cm^3)')
axes[1].set_xlabel('Time (hours)')
axes[2].set_title('Compaction Id 6, Second Step')
steadystate_slice = data_list1[5][(data_list1[5][:,1]/(60*60)>=91)
&(data_list1[5][:,1]/(60*60)<=113)]
axes[2].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[6,'Densification rate'] = dens_rate
paper_table.loc[6, 'Mean dens'] = mean_dens
paper_table.loc[6, 'applied stress'] = applied_pressure_raw[6]/mean_dens
axes[2].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[2].legend(loc = 'lower right', shadow = True)
axes[2].set_ylabel('Density (g/cm^3)')
axes[2].set_xlabel('Time (hours)')
fig, axes = plt.subplots(1, 3, figsize=(20,4.5)) #inialize matrix of plots
steadystate_slice = data_list1[5][(data_list1[5][:,1]/(60*60)>=115)
&(data_list1[5][:,1]/(60*60)<=136)]
axes[0].set_title('Compaction Id 6, Third Step')
axes[0].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[8,'Densification rate'] = dens_rate
paper_table.loc[8, 'Mean dens'] = mean_dens
paper_table.loc[8, 'applied stress'] = applied_pressure_raw[8]/mean_dens
axes[0].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[0].legend(loc = 'lower right', shadow = True)
axes[0].set_ylabel('Density (g/cm^3)')
axes[0].set_xlabel('Time (hours)')
steadystate_slice = data_list1[5][(data_list1[5][:,1]/(60*60)>=140)]
axes[1].set_title('Compaction Id 6, Fourth Step')
axes[1].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[7,'Densification rate'] = dens_rate
paper_table.loc[7, 'Mean dens'] = mean_dens
paper_table.loc[7, 'applied stress'] = applied_pressure_raw[7]/mean_dens
axes[1].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[1].legend(loc = 'lower right', shadow = True)
axes[1].set_ylabel('Density (g/cm^3)')
axes[1].set_xlabel('Time (hours)')
steadystate_slice = data_list1[0][(data_list1[0][:,1]/(60*60)>=140)]
axes[2].set_title('Compaction Id 1, Second Step')
axes[2].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[5,'Densification rate'] = dens_rate
paper_table.loc[5, 'Mean dens'] = mean_dens
paper_table.loc[5, 'applied stress'] = applied_pressure_raw[5]/mean_dens
axes[2].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[2].legend(loc = 'lower right', shadow = True)
fig, axes = plt.subplots(1, 3, figsize=(20,4.5)) #inialize matrix of plots
axes[2].set_ylabel('Density (g/cm^3)')
axes[2].set_xlabel('Time (hours)')
axes[0].set_title('Compaction Id 2, Second Step')
steadystate_slice = data_list1[1][(data_list1[1][:,1]/(60*60)>=60)
&(data_list1[1][:,1]/(60*60)<=79)]
axes[0].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[4,'Densification rate'] = dens_rate
paper_table.loc[4, 'Mean dens'] = mean_dens
paper_table.loc[4, 'applied stress'] = applied_pressure_raw[4]/mean_dens
axes[0].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[0].legend(loc = 'lower right', shadow = True)
axes[0].set_ylabel('Density (g/cm^3)')
axes[0].set_xlabel('Time (hours)')
axes[1].set_title('Compaction Id 2, Third Step')
steadystate_slice = data_list1[1][(data_list1[1][:,1]/(60*60)>=80)
&(data_list1[1][:,1]/(60*60)<=100)]
axes[1].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[1,'Densification rate'] = dens_rate
paper_table.loc[1, 'Mean dens'] = mean_dens
paper_table.loc[1, 'applied stress'] = applied_pressure_raw[1]/mean_dens
axes[1].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[1].legend(loc = 'lower right', shadow = True)
axes[1].set_ylabel('Density (g/cm^3)')
axes[1].set_xlabel('Time (hours)')
axes[2].set_title('Compaction Id 2, Fourth Step')
steadystate_slice = data_list1[1][(data_list1[1][:,1]/(60*60)>=125)]
axes[2].plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[2,'Densification rate'] = dens_rate
paper_table.loc[2, 'Mean dens'] = mean_dens
paper_table.loc[2, 'applied stress'] = applied_pressure_raw[2]/mean_dens
axes[2].plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes[2].legend(loc = 'lower right', shadow = True)
axes[2].set_ylabel('Density (g/cm^3)')
axes[2].set_xlabel('Time (hours)')
fig, axes = plt.subplots(1, 1, figsize=(4.8,4)) #inialize matrix of plots
axes.set_title('Compaction Id 7, Single Step')
steadystate_slice = data_list1[6][(data_list1[6][:,1]/(60*60)>=100)
&(data_list1[6][:,1]/(60*60)<=170)]
axes.plot(steadystate_slice [:,1]/(60*60), steadystate_slice [:,7])
x = int(len(steadystate_slice[:,1])/10)
densrates = np.zeros(x)
strainrates = np.zeros(x)
time1 = np.zeros(x)
dense1 = np.zeros(x)
time2 = np.zeros(x)
dense2 = np.zeros(x)
for i in range(0,x):
dtime = steadystate_slice[:,1][i] - steadystate_slice[:,1][
(len(steadystate_slice)-(i+1))]
ddense = steadystate_slice[:,7][i] - steadystate_slice[:,7][
(len(steadystate_slice)-(i+1))]
dstrain = steadystate_slice[:,5][i] - steadystate_slice[:,5][
(len(steadystate_slice)-(i+1))]
densrates[i] = (ddense/dtime)/.917
strainrates[i] = dstrain/dtime
dense1[i] = steadystate_slice[:,7][i]
time1[i] = steadystate_slice[:,1][i]
dense2[i] = steadystate_slice[:,7][(len(steadystate_slice)-(i+1))]
time2[i] = steadystate_slice[:,1][(len(steadystate_slice)-(i+1))]
dens_rate = np.mean(densrates)
mean_dens = np.mean(np.array([np.mean(dense1),np.mean(dense2)]))/.917
paper_table.loc[15,'Densification rate'] = dens_rate
paper_table.loc[15, 'Mean dens'] = mean_dens
paper_table.loc[15, 'applied stress'] = applied_pressure_raw[15]/mean_dens
axes.plot(np.array([np.mean(time1)/(60*60),np.mean(time2)/(60*60)]),
np.array([np.mean(dense1),np.mean(dense2)]),
label = 'Steady-state rate = ' + "{:.2e}".format(dens_rate))
axes.legend(loc = 'lower right', shadow = True)
axes.set_ylabel('Density (g/cm^3)')
axes.set_xlabel('Time (hours)')
paper_table.to_csv('data/paper_table_full.csv', index = None, header=True)
| 41.245665
| 82
| 0.6509
| 4,382
| 28,542
| 4.10566
| 0.0534
| 0.270357
| 0.105831
| 0.088933
| 0.91857
| 0.887277
| 0.868712
| 0.802957
| 0.802957
| 0.802957
| 0
| 0.060505
| 0.141826
| 28,542
| 691
| 83
| 41.305355
| 0.674002
| 0.032829
| 0
| 0.798623
| 0
| 0
| 0.083161
| 0.000907
| 0.027539
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008606
| 0
| 0.008606
| 0.003442
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ac043cab1d05cb93a5d913adb8d24e76f7373000
| 10,243
|
py
|
Python
|
alembic/versions/da0fe56f616b_create_models_for_experiment_objects.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 17
|
2017-03-13T15:14:57.000Z
|
2020-01-07T19:12:49.000Z
|
alembic/versions/da0fe56f616b_create_models_for_experiment_objects.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 32
|
2016-06-08T03:35:43.000Z
|
2016-11-30T18:50:49.000Z
|
alembic/versions/da0fe56f616b_create_models_for_experiment_objects.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 4
|
2018-07-11T23:36:28.000Z
|
2019-11-16T19:32:33.000Z
|
"""create models for experiment objects
Revision ID: da0fe56f616b
Revises: 022d8114fe2a
Create Date: 2016-07-23 19:18:48.388073
"""
# revision identifiers, used by Alembic.
revision = 'da0fe56f616b'
down_revision = '022d8114fe2a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('praw_key_id', sa.String(length=256), nullable=True),
sa.Column('action_subject_type', sa.String(length=64), nullable=True),
sa.Column('action_subject_id', sa.String(length=256), nullable=True),
sa.Column('action_object_type', sa.String(length=64), nullable=True),
sa.Column('action_object_id', sa.String(length=256), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_actions_action_object_id'), 'experiment_actions', ['action_object_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_action_subject_id'), 'experiment_actions', ['action_subject_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_experiment_id'), 'experiment_actions', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_praw_key_id'), 'experiment_actions', ['praw_key_id'], unique=False)
op.create_table('experiment_things',
sa.Column('id', sa.String(length=256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('object_created', sa.DateTime(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_things_experiment_id'), 'experiment_things', ['experiment_id'], unique=False)
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=256), nullable=False),
sa.Column('controller', sa.String(length=256), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('end_time', sa.DateTime(), nullable=True),
sa.Column('settings_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiments_name'), 'experiments', ['name'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiments_name'), table_name='experiments')
op.drop_table('experiments')
op.drop_index(op.f('ix_experiment_things_experiment_id'), table_name='experiment_things')
op.drop_table('experiment_things')
op.drop_index(op.f('ix_experiment_actions_praw_key_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_experiment_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_subject_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_object_id'), table_name='experiment_actions')
op.drop_table('experiment_actions')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('praw_key_id', sa.String(length=256), nullable=True),
sa.Column('action_subject_type', sa.String(length=64), nullable=True),
sa.Column('action_subject_id', sa.String(length=256), nullable=True),
sa.Column('action_object_type', sa.String(length=64), nullable=True),
sa.Column('action_object_id', sa.String(length=256), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_actions_action_object_id'), 'experiment_actions', ['action_object_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_action_subject_id'), 'experiment_actions', ['action_subject_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_experiment_id'), 'experiment_actions', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_praw_key_id'), 'experiment_actions', ['praw_key_id'], unique=False)
op.create_table('experiment_things',
sa.Column('id', sa.String(length=256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('object_created', sa.DateTime(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_things_experiment_id'), 'experiment_things', ['experiment_id'], unique=False)
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=256), nullable=False),
sa.Column('controller', sa.String(length=256), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('end_time', sa.DateTime(), nullable=True),
sa.Column('settings_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiments_name'), 'experiments', ['name'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiments_name'), table_name='experiments')
op.drop_table('experiments')
op.drop_index(op.f('ix_experiment_things_experiment_id'), table_name='experiment_things')
op.drop_table('experiment_things')
op.drop_index(op.f('ix_experiment_actions_praw_key_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_experiment_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_subject_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_object_id'), table_name='experiment_actions')
op.drop_table('experiment_actions')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('praw_key_id', sa.String(length=256), nullable=True),
sa.Column('action_subject_type', sa.String(length=64), nullable=True),
sa.Column('action_subject_id', sa.String(length=256), nullable=True),
sa.Column('action_object_type', sa.String(length=64), nullable=True),
sa.Column('action_object_id', sa.String(length=256), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_actions_action_object_id'), 'experiment_actions', ['action_object_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_action_subject_id'), 'experiment_actions', ['action_subject_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_experiment_id'), 'experiment_actions', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_actions_praw_key_id'), 'experiment_actions', ['praw_key_id'], unique=False)
op.create_table('experiment_things',
sa.Column('id', sa.String(length=256), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('object_created', sa.DateTime(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_things_experiment_id'), 'experiment_things', ['experiment_id'], unique=False)
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=256), nullable=False),
sa.Column('controller', sa.String(length=256), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('end_time', sa.DateTime(), nullable=True),
sa.Column('settings_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiments_name'), 'experiments', ['name'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiments_name'), table_name='experiments')
op.drop_table('experiments')
op.drop_index(op.f('ix_experiment_things_experiment_id'), table_name='experiment_things')
op.drop_table('experiment_things')
op.drop_index(op.f('ix_experiment_actions_praw_key_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_experiment_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_subject_id'), table_name='experiment_actions')
op.drop_index(op.f('ix_experiment_actions_action_object_id'), table_name='experiment_actions')
op.drop_table('experiment_actions')
### end Alembic commands ###
| 51.994924
| 127
| 0.728205
| 1,377
| 10,243
| 5.148874
| 0.063181
| 0.074471
| 0.100705
| 0.118477
| 0.945839
| 0.945839
| 0.945839
| 0.945839
| 0.945839
| 0.938787
| 0
| 0.01251
| 0.110319
| 10,243
| 196
| 128
| 52.260204
| 0.7655
| 0.059943
| 0
| 0.832258
| 0
| 0
| 0.321253
| 0.112496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051613
| false
| 0
| 0.019355
| 0
| 0.070968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac37fa0e7b54f16872b8c3a76e823c12fe181827
| 168
|
py
|
Python
|
language-python-test/test/features/strings/raw_bytestring_v3.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
language-python-test/test/features/strings/raw_bytestring_v3.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
language-python-test/test/features/strings/raw_bytestring_v3.py
|
wbadart/language-python
|
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
|
[
"BSD-3-Clause"
] | null | null | null |
rb'hello \n world'
rb"hello \n world"
br'hello \n world'
br"hello \n world"
rb"""hello \n world"""
br"""hello \n world"""
rb'''hello \n world'''
br'''hello \n world'''
| 18.666667
| 22
| 0.619048
| 32
| 168
| 3.25
| 0.15625
| 0.461538
| 0.846154
| 0.5
| 0.980769
| 0.980769
| 0.980769
| 0.980769
| 0.980769
| 0.980769
| 0
| 0
| 0.142857
| 168
| 8
| 23
| 21
| 0.722222
| 0
| 0
| 0.5
| 0
| 0
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
ac73ee6cf5e7e4f259b17d12c069fd5507386f56
| 20,817
|
py
|
Python
|
tests/test_xray.py
|
jfunez/packtools
|
3a5fe3fa5bcfb9dfdf57b7822e834d72bae28150
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_xray.py
|
jfunez/packtools
|
3a5fe3fa5bcfb9dfdf57b7822e834d72bae28150
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_xray.py
|
jfunez/packtools
|
3a5fe3fa5bcfb9dfdf57b7822e834d72bae28150
|
[
"BSD-2-Clause"
] | null | null | null |
#coding: utf-8
import unittest
import zipfile
from tempfile import NamedTemporaryFile
from lxml import etree
import mocker
from packtools import xray as x_ray
def make_test_archive(arch_data):
fp = NamedTemporaryFile()
with zipfile.ZipFile(fp, 'w') as zipfp:
for archive, data in arch_data:
zipfp.writestr(archive, data)
return fp
class SPSMixinTests(mocker.MockerTestCase):
def _makeOne(self, fname):
class Foo(x_ray.SPSMixin, x_ray.Xray):
pass
return Foo(fname)
def test_xmls_yields_etree_instances(self):
data = [('bar.xml', b'<root><name>bar</name></root>')]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
xmls = pkg.xmls
self.assertIsInstance(xmls.next(), etree._ElementTree)
def test_xml_returns_etree_instance(self):
data = [('bar.xml', b'<root><name>bar</name></root>')]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsInstance(pkg.xml, etree._ElementTree)
def test_xml_raises_AttributeError_when_multiple_xmls(self):
data = [
('bar.xml', b'<root><name>bar</name></root>'),
('baz.xml', b'<root><name>baz</name></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertRaises(AttributeError, lambda: pkg.xml)
def test_meta_journal_title_data_is_fetched(self):
data = [
('bar.xml', b'<root><journal-meta><journal-title-group><journal-title>foo</journal-title></journal-title-group></journal-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['journal_title'], 'foo')
def test_meta_journal_title_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['journal_title'])
def test_meta_journal_eissn_data_is_fetched(self):
data = [
('bar.xml', b'<root><journal-meta><issn pub-type="epub">1234-1234</issn></journal-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['journal_eissn'], '1234-1234')
def test_meta_journal_eissn_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['journal_eissn'])
def test_meta_journal_pissn_data_is_fetched(self):
data = [
('bar.xml', b'<root><journal-meta><issn pub-type="ppub">1234-1234</issn></journal-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['journal_pissn'], '1234-1234')
def test_meta_journal_pissn_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['journal_pissn'])
def test_meta_article_title_data_is_fetched(self):
data = [
('bar.xml', b'<root><article-meta><title-group><article-title>bar</article-title></title-group></article-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['article_title'], 'bar')
def test_meta_article_title_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['article_title'])
def test_meta_issue_year_data_is_fetched(self):
data = [
('bar.xml', b'<root><article-meta><pub-date><year>2013</year></pub-date></article-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['issue_year'], '2013')
def test_meta_issue_year_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['issue_year'])
def test_meta_issue_volume_data_is_fetched(self):
data = [
('bar.xml', b'<root><article-meta><volume>2</volume></article-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['issue_volume'], '2')
def test_meta_issue_volume_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['issue_volume'])
def test_meta_issue_number_data_is_fetched(self):
data = [
('bar.xml', b'<root><article-meta><issue>2</issue></article-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertEqual(pkg.meta['issue_number'], '2')
def test_meta_issue_number_is_None_if_not_present(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertIsNone(pkg.meta['issue_number'])
def test_meta_is_not_valid(self):
data = [
('bar.xml', b'<root></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertFalse(pkg.is_valid_meta())
def test_meta_is_valid(self):
data = [
('bar.xml', b'<root><journal-meta><issn pub-type="ppub">12-34</issn></journal-meta><article-meta><issue>3</issue><title-group><article-title>Titulo de artigo</article-title></title-group></article-meta></root>'),
]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertTrue(pkg.is_valid_meta())
def test_is_valid_schema_with_valid_xml(self):
data = [('bar.xml', b'''<?xml version="1.0" encoding="utf-8"?>
<article article-type="in-brief" dtd-version="1.0" xml:lang="en" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML">
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">Bull World Health Organ</journal-id>
<journal-title-group>
<journal-title>Bulletin of the World Health Organization</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Bull. World Health Organ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="ppub">0042-9686</issn>
<publisher>
<publisher-name>World Health Organization</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">BLT.13.000813</article-id>
<article-id pub-id-type="doi">10.2471/BLT.13.000813</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject> In This Month´s Bulletin</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>In this month's <italic>Bulletin</italic>
</article-title>
</title-group>
<pub-date pub-type="ppub">
<month>08</month>
<year>2013</year>
</pub-date>
<volume>91</volume>
<issue>8</issue>
<fpage>545</fpage>
<lpage>545</lpage>
<permissions>
<copyright-statement>(c) World Health Organization (WHO) 2013. All rights reserved.</copyright-statement>
<copyright-year>2013</copyright-year>
</permissions>
</article-meta>
</front>
<body>
<p>In the editorial section, David B Evans and colleagues (546) discuss the dimensions of universal health coverage. In the news, Gary Humphreys & Catherine Fiankan-Bokonga (549–550) report on the approach France is taking to counter trends in childhood obesity. Fiona Fleck (551–552) interviews Philip James on how the global obesity epidemic started and what should be done to reverse it.</p>
<sec sec-type="other1">
<title>Nigeria</title>
</sec>
</body>
</article>
''')]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertTrue(pkg.is_valid_schema())
def test_is_valid_schema_with_invalid_xml(self):
data = [('bar.xml', b'''<?xml version="1.0" encoding="utf-8"?>
<article article-type="in-brief" dtd-version="1.0" xml:lang="en" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML">
<front>
<journal-meta>
<journal-title-group>
<journal-title>Bulletin of the World Health Organization</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Bull. World Health Organ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="ppub">0042-9686</issn>
<publisher>
<publisher-name>World Health Organization</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">BLT.13.000813</article-id>
<article-id pub-id-type="doi">10.2471/BLT.13.000813</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject> In This Month´s Bulletin</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>In this month's <italic>Bulletin</italic>
</article-title>
</title-group>
<pub-date pub-type="ppub">
<month>08</month>
<year>2013</year>
</pub-date>
<volume>91</volume>
<issue>8</issue>
<fpage>545</fpage>
<lpage>545</lpage>
<permissions>
<copyright-statement>(c) World Health Organization (WHO) 2013. All rights reserved.</copyright-statement>
<copyright-year>2013</copyright-year>
</permissions>
</article-meta>
</front>
<body>
<p>In the editorial section, David B Evans and colleagues (546) discuss the dimensions of universal health coverage. In the news, Gary Humphreys & Catherine Fiankan-Bokonga (549–550) report on the approach France is taking to counter trends in childhood obesity. Fiona Fleck (551–552) interviews Philip James on how the global obesity epidemic started and what should be done to reverse it.</p>
<sec sec-type="other1">
<title>Nigeria</title>
</sec>
</body>
</article>
''')]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertFalse(pkg.is_valid_schema())
def test_is_valid_schema_with_wrong_tag(self):
data = [('bar.xml', b'''<?xml version="1.0" encoding="utf-8"?>
<article article-type="in-brief" dtd-version="1.0" xml:lang="en" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML">
<front>
<a>wrong</a>
<journal-meta>
<journal-title-group>
<journal-title>Bulletin of the World Health Organization</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Bull. World Health Organ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="ppub">0042-9686</issn>
<publisher>
<publisher-name>World Health Organization</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">BLT.13.000813</article-id>
<article-id pub-id-type="doi">10.2471/BLT.13.000813</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject> In This Month´s Bulletin</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>In this month's <italic>Bulletin</italic>
</article-title>
</title-group>
<pub-date pub-type="ppub">
<month>08</month>
<year>2013</year>
</pub-date>
<volume>91</volume>
<issue>8</issue>
<fpage>545</fpage>
<lpage>545</lpage>
<permissions>
<copyright-statement>(c) World Health Organization (WHO) 2013. All rights reserved.</copyright-statement>
<copyright-year>2013</copyright-year>
</permissions>
</article-meta>
</front>
<body>
<p>In the editorial section, David B Evans and colleagues (546) discuss the dimensions of universal health coverage. In the news, Gary Humphreys & Catherine Fiankan-Bokonga (549–550) report on the approach France is taking to counter trends in childhood obesity. Fiona Fleck (551–552) interviews Philip James on how the global obesity epidemic started and what should be done to reverse it.</p>
<sec sec-type="other1">
<title>Nigeria</title>
</sec>
</body>
</article>
''')]
arch = make_test_archive(data)
pkg = self._makeOne(arch.name)
self.assertFalse(pkg.is_valid_schema())
class XrayTests(mocker.MockerTestCase):
def _make_test_archive(self, arch_data):
fp = NamedTemporaryFile()
with zipfile.ZipFile(fp, 'w') as zipfp:
for archive, data in arch_data:
zipfp.writestr(archive, data)
return fp
def test_non_zip_archive_raises_ValueError(self):
fp = NamedTemporaryFile()
self.assertRaises(ValueError, lambda: x_ray.Xray(fp.name))
def test_get_ext_returns_member_names(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_ext('xml'), ['bar.xml'])
def test_get_ext_returns_empty_when_ext_doesnot_exist(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_ext('jpeg'), [])
def test_get_fps_returns_an_iterable(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
fps = xray.get_fps('xml')
self.assertTrue(hasattr(fps, 'next'))
def test_get_fpd_yields_ZipExtFile_instances(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
fps = xray.get_fps('xml')
self.assertIsInstance(fps.next(), zipfile.ZipExtFile)
def test_get_fps_swallow_exceptions_when_ext_doesnot_exist(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
fps = xray.get_fps('jpeg')
self.assertRaises(StopIteration, lambda: fps.next())
def test_package_checksum_is_calculated(self):
data = [('bar.xml', b'<root><name>bar</name></root>')]
arch1 = make_test_archive(data)
arch2 = make_test_archive(data)
self.assertEquals(
x_ray.Xray(arch1.name).checksum,
x_ray.Xray(arch2.name).checksum
)
def test_get_members(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_members(), ['bar.xml', 'jar.xml'])
def test_get_members_returns_empty(self):
arch = make_test_archive([])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_members(), [])
def test_get_fp(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertIsInstance(xray.get_fp('bar.xml'),
zipfile.ZipExtFile)
def test_get_fp_nonexisting_members(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertRaises(ValueError, lambda: xray.get_fp('foo.xml'))
def test_get_classified_members(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.xml', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_classified_members(), {'xml': ['bar.xml', 'jar.xml']})
def test_get_ext_is_caseinsensitive(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.XML', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_ext('xml'), ['bar.xml', 'jar.XML'])
def test_get_ext_arg_is_caseinsensitive(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.XML', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_ext('XML'), ['bar.xml', 'jar.XML'])
def test_get_classified_members_is_caseinsensitive(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.XML', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
self.assertEquals(xray.get_classified_members(), {'xml': ['bar.xml', 'jar.XML']})
def test_get_fps_is_caseinsensitive(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.XML', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
fps = xray.get_fps('xml')
self.assertEqual([fp.name for fp in fps], ['bar.xml', 'jar.XML'])
def test_get_fps_arg_is_caseinsensitive(self):
arch = make_test_archive(
[('bar.xml', b'<root><name>bar</name></root>'),
('jar.XML', b'<root><name>bar</name></root>')])
xray = x_ray.Xray(arch.name)
fps = xray.get_fps('XML')
self.assertEqual([fp.name for fp in fps], ['bar.xml', 'jar.XML'])
| 40.032692
| 433
| 0.543978
| 2,416
| 20,817
| 4.527318
| 0.104719
| 0.017188
| 0.032181
| 0.064271
| 0.869446
| 0.826019
| 0.814043
| 0.808466
| 0.807003
| 0.806455
| 0
| 0.023581
| 0.315511
| 20,817
| 519
| 434
| 40.109827
| 0.743842
| 0.000624
| 0
| 0.690244
| 0
| 0.060976
| 0.533724
| 0.158983
| 0
| 0
| 0
| 0
| 0.095122
| 0
| null | null | 0.002439
| 0.014634
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac76f98e25d85405ee60b2f84b3d915d5130838a
| 14,520
|
py
|
Python
|
p4v1_1/simple_router/send2.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | 14
|
2019-02-25T22:42:15.000Z
|
2021-12-22T06:29:20.000Z
|
p4v1_1/simple_router/send2.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | null | null | null |
p4v1_1/simple_router/send2.py
|
vibhaa/iw15
|
c2a316499dbd3e7459aed2cacf0612df0b7dcec2
|
[
"Apache-2.0"
] | 8
|
2018-11-25T11:42:24.000Z
|
2021-03-11T07:23:21.000Z
|
#!/usr/bin/python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scapy.all import sniff, sendp
from scapy.all import Packet
from scapy.all import ShortField, IntField, LongField, BitField
from scapy.all import Ether, IP, TCP
import networkx as nx
import sys
def main():
if len(sys.argv) != 1:
print "Usage: send2.py"
sys.exit(1)
srcmac = '00:aa:bb:00:00:00'
dstmac = '00:aa:bb:00:00:01'
port = 80
msg = 'hi'
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '142.54.3.18', dst = '35.240.203.247') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.119.163.206', dst = '1.102.89.68') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '205.38.229.41', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '6.172.207.28', dst = '208.89.117.253') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.173', dst = '1.0.3.222') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '148.92.117.249', dst = '1.102.127.50') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '194.253.242.112', dst = '153.193.46.216') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '113.169.241.172', dst = '1.96.222.132') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '30.187.70.176', dst = '1.96.166.240') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '129.201.147.168', dst = '43.239.238.254') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '15.83.232.209', dst = '153.193.117.43') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '117.91.221.69', dst = '1.96.228.67') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.65.110.243', dst = '1.96.222.237') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '122.138.7.7', dst = '3.151.114.131') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '197.78.57.215', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.137.47.98', dst = '1.96.223.52') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '116.209.201.217', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '205.38.229.188', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.157.173.33', dst = '1.96.167.17') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '39.183.184.70', dst = '1.96.167.9') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '194.253.242.112', dst = '153.193.46.216') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '173.162.174.71', dst = '102.14.133.117') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.102', dst = '1.34.248.21') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '102.1.84.90', dst = '43.206.171.27') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.25.115.194', dst = '1.96.167.56') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.229.0.142', dst = '5.252.90.214') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.29.115.116', dst = '1.96.223.248') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.37.250.218', dst = '3.248.234.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.137.52.25', dst = '1.96.166.173') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '102.6.32.157', dst = '221.46.220.102') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '102.6.32.157', dst = '221.46.220.102') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '100.159.187.56', dst = '208.89.121.171') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '183.190.174.107', dst = '1.150.209.83') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '111.209.150.229', dst = '210.108.56.240') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '109.147.8.232', dst = '1.2.38.184') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.37.184.30', dst = '1.96.223.239') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '116.209.201.171', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '102.0.115.0', dst = '111.205.228.129') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '205.38.229.94', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '197.78.57.35', dst = '66.216.25.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.111.179.170', dst = '1.96.223.36') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '184.21.123.130', dst = '211.106.242.60') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '117.92.23.134', dst = '1.45.68.235') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '112.111.179.170', dst = '1.96.223.36') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '194.253.242.112', dst = '153.193.46.216') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.123', dst = '1.81.56.123') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '13.1.149.6', dst = '1.96.164.152') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.37.184.30', dst = '1.96.223.239') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.102', dst = '1.13.5.138') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.10.57.179', dst = '43.233.106.124') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '204.103.13.219', dst = '221.46.221.163') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '128.61.59.69', dst = '1.96.223.133') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.139.101.170', dst = '1.0.227.102') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '113.181.185.162', dst = '1.39.174.181') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '73.149.65.226', dst = '210.108.49.161') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '43.255.233.20', dst = '221.46.220.203') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.238.4.231', dst = '1.96.167.6') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '130.77.108.26', dst = '1.146.59.107') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '153.193.150.99', dst = '221.46.220.227') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '207.157.173.33', dst = '1.96.167.17') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '119.236.185.197', dst = '1.96.223.248') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '130.77.108.26', dst = '1.146.59.107') / msg
sendp(p, iface = "veth0", verbose = 0)
p = Ether(src=srcmac, dst=dstmac, type=0x0800) / IP(src = '74.90.63.201', dst = '210.108.56.240') / msg
sendp(p, iface = "veth0", verbose = 0)
if __name__ == '__main__':
main()
| 66.605505
| 110
| 0.598691
| 2,477
| 14,520
| 3.506258
| 0.119096
| 0.061485
| 0.092228
| 0.153713
| 0.832355
| 0.830052
| 0.830052
| 0.830052
| 0.830052
| 0.830052
| 0
| 0.21001
| 0.192287
| 14,520
| 217
| 111
| 66.912442
| 0.530525
| 0.040427
| 0
| 0.675258
| 0
| 0
| 0.204684
| 0
| 0
| 0
| 0.038365
| 0
| 0
| 0
| null | null | 0
| 0.030928
| null | null | 0.005155
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3baba84140516ff4c6c30f4ef6a9d363d405a779
| 4,839
|
py
|
Python
|
day11/main.py
|
ogun/advent-of-code-2020
|
1dcf268f9f1f6cd7c46d442b6aaa9b19ec5b2cf1
|
[
"MIT"
] | 4
|
2020-12-03T13:22:12.000Z
|
2020-12-22T21:51:49.000Z
|
day11/main.py
|
ogun/advent-of-code-2020
|
1dcf268f9f1f6cd7c46d442b6aaa9b19ec5b2cf1
|
[
"MIT"
] | null | null | null |
day11/main.py
|
ogun/advent-of-code-2020
|
1dcf268f9f1f6cd7c46d442b6aaa9b19ec5b2cf1
|
[
"MIT"
] | 1
|
2020-12-12T13:41:12.000Z
|
2020-12-12T13:41:12.000Z
|
from copy import deepcopy
import day11.data as data
def part1():
value = data.INPUT
def get_neigbours(col, row, col_length, row_length):
neigbours = []
for col_idx in range(col - 1, col + 2):
for row_idx in range(row - 1, row + 2):
if (
-1 in [col_idx, row_idx]
or col_idx == col_length
or row_idx == row_length
):
continue
if (col, row) == (col_idx, row_idx):
continue
neigbours.append((col_idx, row_idx))
return neigbours
def next_step(value):
result = deepcopy(value)
for row_idx, row in enumerate(value):
for col_idx, col in enumerate(row):
current = value[row_idx][col_idx]
if current == ".":
continue
neigbours = get_neigbours(col_idx, row_idx, len(value[0]), len(value))
occupied_seats = sum(1 for n in neigbours if value[n[1]][n[0]] == "#")
if current == "L" and occupied_seats == 0:
result[row_idx][col_idx] = "#"
continue
if current == "#" and occupied_seats > 3:
result[row_idx][col_idx] = "L"
continue
return result
next_value = next_step(value)
while value != next_value:
value = deepcopy(next_value)
next_value = next_step(value)
return sum(1 for r in value for c in r if c == "#")
def part2():
value = data.INPUT
def get_neigbours(col, row, col_length, row_length):
neigbours = []
seats = ["L", "#"]
# Up
for row_idx in range(row - 1, -1, -1):
if value[row_idx][col] in seats:
neigbours.append((col, row_idx))
break
# Down
for row_idx in range(row + 1, row_length):
if value[row_idx][col] in seats:
neigbours.append((col, row_idx))
break
# Left
for col_idx in range(col - 1, -1, -1):
if value[row][col_idx] in seats:
neigbours.append((col_idx, row))
break
# Right
for col_idx in range(col + 1, col_length):
if value[row][col_idx] in seats:
neigbours.append((col_idx, row))
break
# Up-Left
for step in range(1, max(col_length, row_length)):
col_idx = col - step
row_idx = row - step
if not (col_idx > -1 and row_idx > -1):
break
if value[row_idx][col_idx] in seats:
neigbours.append((col_idx, row_idx))
break
# Up-Right
for step in range(1, max(col_length, row_length)):
col_idx = col + step
row_idx = row - step
if not (col_idx < col_length and row_idx > -1):
break
if value[row_idx][col_idx] in seats:
neigbours.append((col_idx, row_idx))
break
# Down-Left
for step in range(1, max(col_length, row_length)):
col_idx = col - step
row_idx = row + step
if not (col_idx > -1 and row_idx < row_length):
break
if value[row_idx][col_idx] in seats:
neigbours.append((col_idx, row_idx))
break
# Down-Right
for step in range(1, max(col_length, row_length)):
col_idx = col + step
row_idx = row + step
if not (col_idx < col_length and row_idx < row_length):
break
if value[row_idx][col_idx] in seats:
neigbours.append((col_idx, row_idx))
break
return neigbours
def next_step(value):
result = deepcopy(value)
for row_idx, row in enumerate(value):
for col_idx, col in enumerate(row):
current = value[row_idx][col_idx]
if current == ".":
continue
neigbours = get_neigbours(col_idx, row_idx, len(value[0]), len(value))
occupied_seats = sum(1 for n in neigbours if value[n[1]][n[0]] == "#")
if current == "L" and occupied_seats == 0:
result[row_idx][col_idx] = "#"
continue
if current == "#" and occupied_seats > 4:
result[row_idx][col_idx] = "L"
continue
return result
next_value = next_step(value)
while value != next_value:
value = deepcopy(next_value)
next_value = next_step(value)
return sum(1 for r in value for c in r if c == "#")
| 28.633136
| 86
| 0.49349
| 601
| 4,839
| 3.777038
| 0.088186
| 0.097797
| 0.047577
| 0.052863
| 0.917621
| 0.917621
| 0.903965
| 0.886344
| 0.845815
| 0.845815
| 0
| 0.013636
| 0.408969
| 4,839
| 168
| 87
| 28.803571
| 0.78007
| 0.011573
| 0
| 0.798246
| 0
| 0
| 0.003351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.017544
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3be419ebc9aa8f6d5afe92154cfdcde28af9526c
| 95
|
py
|
Python
|
DTMT/dtmt/layers/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | 3
|
2020-09-22T07:33:29.000Z
|
2021-02-19T09:53:28.000Z
|
DTMT/dtmt/layers/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | null | null | null |
DTMT/dtmt/layers/__init__.py
|
fandongmeng/StackedDTMT
|
f10062f98a443ad67cadec68fa5abdc8ab60815f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
import dtmt.layers.attention
import dtmt.layers.nn
import dtmt.layers.rnn_cell
| 15.833333
| 28
| 0.810526
| 16
| 95
| 4.75
| 0.625
| 0.394737
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.094737
| 95
| 5
| 29
| 19
| 0.872093
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
020cabf3902fc7fd00975c8bf6c6b0d16e8eff81
| 85,205
|
py
|
Python
|
tests/providers/figshare/test_provider.py
|
west1636/RDM-waterbutler
|
6ed22cd52eda8a9665a29615f9b2e623dcae3b29
|
[
"Apache-2.0"
] | 65
|
2015-01-23T03:22:04.000Z
|
2022-01-11T22:33:19.000Z
|
tests/providers/figshare/test_provider.py
|
cslzchen/waterbutler
|
e4e07727e06885a752c9251e5731f5627f646da3
|
[
"Apache-2.0"
] | 300
|
2015-02-16T16:45:02.000Z
|
2022-01-31T14:49:07.000Z
|
tests/providers/figshare/test_provider.py
|
cslzchen/waterbutler
|
e4e07727e06885a752c9251e5731f5627f646da3
|
[
"Apache-2.0"
] | 76
|
2015-01-20T20:45:17.000Z
|
2021-07-30T13:18:10.000Z
|
import io
import json
import pytest
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.providers.figshare import metadata
from waterbutler.providers.figshare import provider
from waterbutler.providers.figshare.path import FigsharePath
from waterbutler.providers.figshare.settings import MAX_PAGE_SIZE
from tests.providers.figshare.fixtures import (crud_fixtures,
error_fixtures,
project_list_articles,
root_provider_fixtures,
project_article_type_1_metadata,
project_article_type_3_metadata,
project_article_type_1_file_metadata,
project_article_type_3_file_metadata)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
'callback_url': 'http://sup.com/api/v1/project/v8s9q/waterbutler/logs/',
'id': 'fakey',
}
@pytest.fixture
def credentials():
return {
'token': 'freddie',
}
@pytest.fixture
def project_settings():
return {
'container_type': 'project',
'container_id': '13423',
}
@pytest.fixture
def project_settings_2():
return {
'container_type': 'project',
'container_id': '64916',
}
@pytest.fixture
def article_settings():
return {
'container_type': 'article',
'container_id': '4037952',
}
@pytest.fixture
def project_provider(auth, credentials, project_settings):
return provider.FigshareProvider(auth, credentials, project_settings)
@pytest.fixture
def project_provider_2(auth, credentials, project_settings_2):
return provider.FigshareProvider(auth, credentials, project_settings_2)
@pytest.fixture
def article_provider(auth, credentials, article_settings):
return provider.FigshareProvider(auth, credentials, article_settings)
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
class TestPolymorphism:
def test_project_provider(self, project_settings, project_provider):
assert isinstance(project_provider, provider.FigshareProjectProvider)
assert project_provider.container_id == project_settings['container_id']
def test_article_provider(self, article_settings, article_provider):
assert isinstance(article_provider, provider.FigshareArticleProvider)
assert article_provider.container_id == article_settings['container_id']
class TestProjectV1ValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder_article(self, project_provider, root_provider_fixtures):
item = root_provider_fixtures['folder_article_metadata']
file_id = str(item['id'])
path = '/{}/'.format(file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=item)
result = await project_provider.validate_v1_path(path)
expected = FigsharePath('/{}/'.format(item['title']),
_ids=(project_provider.container_id, file_id),
folder=True,
is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder_article_bad_path(self, project_provider,
root_provider_fixtures):
item = root_provider_fixtures['folder_article_metadata']
file_id = str(item['id'])
path = '/{}'.format(file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=item)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.validate_v1_path(path)
assert e.value.code == 404
assert aiohttpretty.has_call(method='GET', uri=article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder_article_bad_type(self, project_provider,
root_provider_fixtures):
item = root_provider_fixtures['folder_article_metadata']
file_id = str(item['id'])
path = '/{}/'.format(file_id)
item['defined_type'] = 5
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=item)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.validate_v1_path(path)
assert e.value.code == 404
assert aiohttpretty.has_call(method='GET', uri=article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_validate_v1_path_root(self, project_provider):
path = '/'
result = await project_provider.validate_v1_path(path)
expected = FigsharePath(path, _ids=('', ), folder=True, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v1_path_invalid_path(self, article_provider):
with pytest.raises(exceptions.InvalidPathError) as e:
await article_provider.validate_v1_path('/this/is/an/invalid/path')
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_invalid_path(self, project_provider):
path = 'whatever'
with pytest.raises(exceptions.InvalidPathError) as e:
await project_provider.validate_v1_path(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file_article(self, project_provider, root_provider_fixtures):
file_item = root_provider_fixtures['file_metadata']
item = root_provider_fixtures['file_article_metadata']
file_id = str(item['files'][0]['id'])
article_id = str(item['id'])
path = '/{}/{}'.format(article_id, file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments, 'files', file_id)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=file_item)
result = await project_provider.validate_v1_path(path)
expected = FigsharePath('/{}/{}'.format(item['title'], file_item['name']),
_ids=(project_provider.container_id, file_id),
folder=False, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file_article_public(self, project_provider,
root_provider_fixtures):
file_item = root_provider_fixtures['file_metadata_public']
item = root_provider_fixtures['file_article_metadata']
file_id = str(file_item['id'])
article_id = str(item['id'])
path = '/{}/{}'.format(article_id, file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(True, *article_segments, 'files', file_id)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['public_list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=file_item)
result = await project_provider.validate_v1_path(path)
expected = FigsharePath('/{}/{}'.format(item['title'], file_item['name']),
_ids=(project_provider.container_id, file_id),
folder=False, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file_article_bad_path(self, project_provider,
root_provider_fixtures):
file_item = root_provider_fixtures['file_metadata']
item = root_provider_fixtures['file_article_metadata']
file_id = str(item['files'][0]['id'])
article_id = str(item['id'])
path = '/{}/{}/'.format(article_id, file_id)
article_list_url = project_provider.build_url(False,
*project_provider.root_path_parts, 'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments, 'files', file_id)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=file_item)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.validate_v1_path(path)
assert e.value.code == 404
class TestArticleV1ValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v1_path_root(self, article_provider):
path = '/'
result = await article_provider.validate_v1_path(path)
expected = FigsharePath(path, _ids=('', ), folder=True, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v1_path(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = item['id']
path = '/' + str(file_id)
url = article_provider.build_url(False, *article_provider.root_path_parts, 'files',
str(file_id))
aiohttpretty.register_json_uri('GET', url, body=item)
result = await article_provider.validate_v1_path(path)
expected = FigsharePath('/' + item['name'], _ids=('', file_id), folder=False,
is_public=False)
assert result == expected
class TestProjectV0ValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v0_path_folder_article(self, project_provider, root_provider_fixtures):
item = root_provider_fixtures['folder_article_metadata']
file_id = str(item['id'])
path = '/{}/'.format(file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=item)
result = await project_provider.validate_path(path)
expected = FigsharePath('/{}/'.format(item['title']),
_ids=(project_provider.container_id, file_id),
folder=True,
is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v0_path_folder_article_bad_path(self, project_provider,
root_provider_fixtures):
bad_article_id = '000000000'
path = '/{}'.format(bad_article_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', bad_article_id)
article_url = project_provider.build_url(False, *article_segments)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, status=404)
result = await project_provider.validate_path(path)
expected = FigsharePath(path, _ids=('', ''), folder=True, is_public=False)
assert result == expected
assert aiohttpretty.has_call(method='GET', uri=article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_validate_v0_path_root(self, project_provider):
path = '/'
result = await project_provider.validate_path(path)
expected = FigsharePath(path, _ids=('', ), folder=True, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v0_path_invalid_path(self, article_provider):
with pytest.raises(exceptions.InvalidPathError) as e:
await article_provider.validate_path('/this/is/an/invalid/path')
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v0_path_invalid_path(self, project_provider):
path = 'whatever'
with pytest.raises(exceptions.InvalidPathError) as e:
await project_provider.validate_path(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v0_path_file_article(self, project_provider, root_provider_fixtures):
file_item = root_provider_fixtures['file_metadata']
item = root_provider_fixtures['file_article_metadata']
file_id = str(item['files'][0]['id'])
article_id = str(item['id'])
path = '/{}/{}'.format(article_id, file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(False, *article_segments, 'files', file_id)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=file_item)
result = await project_provider.validate_path(path)
expected = FigsharePath('/{}/{}'.format(item['title'], file_item['name']),
_ids=(project_provider.container_id, file_id),
folder=False, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v0_path_file_article_public(self, project_provider,
root_provider_fixtures):
file_item = root_provider_fixtures['file_metadata_public']
item = root_provider_fixtures['file_article_metadata']
file_id = str(file_item['id'])
article_id = str(item['id'])
path = '/{}/{}'.format(article_id, file_id)
article_list_url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles')
article_segments = (*project_provider.root_path_parts, 'articles', str(item['id']))
article_url = project_provider.build_url(True, *article_segments, 'files', file_id)
aiohttpretty.register_json_uri('GET', article_list_url,
body=root_provider_fixtures['public_list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_list_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', article_url, body=file_item)
result = await project_provider.validate_path(path)
expected = FigsharePath('/{}/{}'.format(item['title'], file_item['name']),
_ids=(project_provider.container_id, file_id),
folder=False, is_public=False)
assert result == expected
class TestArticleV0ValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v0_path_root(self, article_provider):
path = '/'
result = await article_provider.validate_path(path)
expected = FigsharePath(path, _ids=('', ), folder=True, is_public=False)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_validate_v0_path(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = item['id']
path = '/' + str(file_id)
url = article_provider.build_url(False, *article_provider.root_path_parts, 'files',
str(file_id))
aiohttpretty.register_json_uri('GET', url, body=item)
result = await article_provider.validate_path(path)
expected = FigsharePath('/' + item['name'], _ids=('', file_id), folder=False,
is_public=False)
assert result == expected
class TestProjectMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_contents(self,
project_provider_2,
project_list_articles,
project_article_type_1_metadata,
project_article_type_3_metadata):
"""Test content listings for a project root.
"""
root_parts = project_provider_2.root_path_parts
# Register the requests that retrieve the article list of a project.
list_articles_url = project_provider_2.build_url(False, *root_parts, 'articles')
aiohttpretty.register_json_uri('GET', list_articles_url,
body=project_list_articles['page1'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url,
body=project_list_articles['page2'],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url,
body=[],
params={'page': '3', 'page_size': str(MAX_PAGE_SIZE)})
# Register the requests that retrieve the metadata for each item in the article list.
article_id_1 = str(project_list_articles['page1'][0]['id'])
article_url_1 = project_provider_2.build_url(False, *root_parts, 'articles', article_id_1)
article_meta_1 = project_article_type_1_metadata['private']
aiohttpretty.register_json_uri('GET', article_url_1, body=article_meta_1)
article_id_2 = str(project_list_articles['page1'][1]['id'])
article_url_2 = project_provider_2.build_url(False, *root_parts, 'articles', article_id_2)
article_meta_2 = project_article_type_1_metadata['public']
aiohttpretty.register_json_uri('GET', article_url_2, body=article_meta_2)
article_id_3 = str(project_list_articles['page2'][0]['id'])
article_url_3 = project_provider_2.build_url(False, *root_parts, 'articles', article_id_3)
article_meta_3 = project_article_type_3_metadata['private']
aiohttpretty.register_json_uri('GET', article_url_3, body=article_meta_3)
article_id_4 = str(project_list_articles['page2'][1]['id'])
article_url_4 = project_provider_2.build_url(False, *root_parts, 'articles', article_id_4)
article_meta_4 = project_article_type_3_metadata['public']
aiohttpretty.register_json_uri('GET', article_url_4, body=article_meta_4)
# The ``metadata()`` call to test
path = FigsharePath('/', _ids=(''), folder=True)
metadata_list = (await project_provider_2.metadata(path)).sort(key=lambda x: x.path)
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=list_articles_url,
params={'page': '3', 'page_size': str(MAX_PAGE_SIZE)})
assert aiohttpretty.has_call(method='GET', uri=article_url_1)
assert aiohttpretty.has_call(method='GET', uri=article_url_2)
assert aiohttpretty.has_call(method='GET', uri=article_url_3)
assert aiohttpretty.has_call(method='GET', uri=article_url_4)
expected = ([
metadata.FigshareFileMetadata(
project_article_type_1_metadata['public'],
raw_file=project_article_type_1_metadata['public']['files'][0]
),
metadata.FigshareFileMetadata(
project_article_type_1_metadata['private'],
raw_file=project_article_type_1_metadata['private']['files'][0]
),
metadata.FigshareFolderMetadata(project_article_type_3_metadata['public']),
metadata.FigshareFolderMetadata(project_article_type_3_metadata['private'])
]).sort(key=lambda x: x.path)
assert metadata_list == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_metadata_invalid_figshare_path(self, project_provider_2):
"""Test that figshare path can at most have three levels (including root itself).
"""
path = FigsharePath('/folder_lvl_1/folder_lvl_2/file_lvl_3.txt',
_ids=('1', '2', '3', '4', ), folder=False, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider_2.metadata(path)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_metadata_bad_article_response(self, project_provider_2):
"""Test handling 404 response for figshare article request.
"""
root_parts = project_provider_2.root_path_parts
path = FigsharePath('/article_name/file_name',
_ids=('1', '2', '3'), folder=False, is_public=False)
article_url = project_provider_2.build_url(path.is_public, *root_parts,
'articles', path.parts[1].identifier)
aiohttpretty.register_json_uri('GET', article_url, status=404)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider_2.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=article_url)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_file(self,
project_provider_2,
project_article_type_1_metadata,
project_article_type_1_file_metadata):
"""Test metadata for a file that belongs to an article of file type.
"""
root_parts = project_provider_2.root_path_parts
article_meta_json = project_article_type_1_metadata['private']
file_meta_json = project_article_type_1_file_metadata['private']
article_id = str(article_meta_json['id'])
article_name = article_meta_json['title']
file_id = str(file_meta_json['id'])
file_name = file_meta_json['name']
article_meta_url = project_provider_2.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', article_meta_url, body=article_meta_json)
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, file_id), folder=False, is_public=False)
result = await project_provider_2.metadata(path)
expected = metadata.FigshareFileMetadata(article_meta_json, file_meta_json)
assert aiohttpretty.has_call(method='GET', uri=article_meta_url)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == file_meta_json['size']
assert result.is_public == (article_meta_json['published_date'] is not None)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_file_not_found(self,
project_provider_2,
project_article_type_3_metadata,
project_article_type_3_file_metadata):
"""Test the error case where the file is not found in the article's file list.
"""
root_parts = project_provider_2.root_path_parts
article_meta_json = project_article_type_3_metadata['private']
article_meta_json['files'] = []
article_id = str(article_meta_json['id'])
article_name = article_meta_json['title']
file_id = str(project_article_type_3_file_metadata['private']['id'])
file_name = project_article_type_3_file_metadata['private']['name']
article_meta_url = project_provider_2.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', article_meta_url, body=article_meta_json)
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, file_id), folder=False, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider_2.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=article_meta_url)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_type_error(self,
project_provider,
project_article_type_3_metadata):
"""Test the error case where the folder article is of a wrong type.
"""
root_parts = project_provider.root_path_parts
article_meta_json = project_article_type_3_metadata['private']
article_meta_json['defined_type'] = 15
article_id = str(article_meta_json['id'])
article_name = article_meta_json['title']
article_meta_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', article_meta_url, body=article_meta_json)
path = FigsharePath('/{}'.format(article_name),
_ids=('', article_id), folder=True, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=article_meta_url)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_contents(self,
project_provider_2,
project_article_type_3_metadata):
"""Test content listing for an article of folder type.
"""
root_parts = project_provider_2.root_path_parts
article_meta_json = project_article_type_3_metadata['private']
article_id = str(article_meta_json['id'])
article_name = article_meta_json['title']
article_meta_url = project_provider_2.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', article_meta_url, body=article_meta_json)
path = FigsharePath('/{}'.format(article_name), _ids=('', article_id), folder=True,
is_public=False)
result = (await project_provider_2.metadata(path)).sort(key=lambda x: x.path)
expected = ([
metadata.FigshareFileMetadata(article_meta_json,
raw_file=article_meta_json['files'][0]),
metadata.FigshareFileMetadata(article_meta_json,
raw_file=article_meta_json['files'][1])
]).sort(key=lambda x: x.path)
assert aiohttpretty.has_call(method='GET', uri=article_meta_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_article_file(self,
project_provider_2,
project_article_type_3_metadata,
project_article_type_3_file_metadata):
"""Test metadata for a file that belongs to an article of folder type.
"""
root_parts = project_provider_2.root_path_parts
article_meta_json = project_article_type_3_metadata['private']
file_meta_json = project_article_type_3_file_metadata['private']
article_id = str(article_meta_json['id'])
article_name = article_meta_json['title']
file_id = str(file_meta_json['id'])
file_name = file_meta_json['name']
article_meta_url = project_provider_2.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', article_meta_url, body=article_meta_json)
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, file_id), folder=False, is_public=False)
result = await project_provider_2.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=article_meta_url)
expected = metadata.FigshareFileMetadata(article_meta_json, file_meta_json)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert str(result.article_id) == article_id
assert result.article_name == article_name
assert result.size == file_meta_json['size']
assert result.is_public == (article_meta_json['published_date'] is not None)
assert result.extra['hashes']['md5'] == '68c3a15be1ddc27893c17eaab61f2d3d'
class TestArticleMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_contents(self, article_provider, root_provider_fixtures):
article_metadata = root_provider_fixtures['folder_article_metadata']
file_metadata = root_provider_fixtures['folder_file_metadata']
root_parts = article_provider.root_path_parts
article_id = str(article_metadata['id'])
article_name = article_metadata['title']
file_id = str(file_metadata['id'])
file_name = file_metadata['name']
folder_article_metadata_url = article_provider.build_url(False, *root_parts)
aiohttpretty.register_json_uri('GET', folder_article_metadata_url, body=article_metadata)
path = FigsharePath('/{}'.format(file_name), _ids=('', file_id), folder=False,
is_public=False)
result = await article_provider.metadata(path)
assert aiohttpretty.has_call(method='GET', uri=folder_article_metadata_url)
expected = metadata.FigshareFileMetadata(article_metadata, file_metadata)
assert result == expected
assert str(result.id) == file_id
assert result.name == file_name
assert result.path == '/{}/{}'.format(article_id, file_id)
assert result.materialized_path == '/{}/{}'.format(article_name, file_name)
assert result.article_name == article_name
assert result.size == file_metadata['size']
assert result.is_public is False
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_root_contents(self, article_provider, root_provider_fixtures):
article_metadata = root_provider_fixtures['folder_article_metadata']
file_metadata = root_provider_fixtures['folder_file_metadata']
root_parts = article_provider.root_path_parts
file_id = str(file_metadata['id'])
folder_article_metadata_url = article_provider.build_url(False, *root_parts)
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
aiohttpretty.register_json_uri('GET', folder_article_metadata_url, body=article_metadata)
aiohttpretty.register_json_uri('GET', file_metadata_url, body=file_metadata)
path = FigsharePath('/', _ids=(file_id, ), folder=True, is_public=False)
result = await article_provider.metadata(path)
expected = [metadata.FigshareFileMetadata(article_metadata, file_metadata)]
assert result == expected
class TestProjectCRUD:
"""Due to a bug in aiohttpretty, the file stream is not being read from on file upload for the
Figshare provider. Because the file stream isn't read, the stream hash calculator never gets
any data, and the computed md5sum is always that of the empty string. To work around this, the
fixtures currently include the empty md5 in the metadata. Once aiohttpretty is fixed, the
metadata can be reverted to deliver the actual content hash."""
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_upload(self, file_stream, project_provider,
root_provider_fixtures, crud_fixtures):
file_name = 'barricade.gif'
path = FigsharePath('/' + file_name, _ids=('', ''), folder=False, is_public=False)
root_parts = project_provider.root_path_parts
article_id = str(crud_fixtures['upload_article_metadata']['id'])
file_metadata = root_provider_fixtures['get_file_metadata']
create_article_url = project_provider.build_url(False, *root_parts, 'articles')
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_article_url,
body=crud_fixtures['create_article_metadata'], status=201)
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url,
body=file_metadata)
aiohttpretty.register_json_uri('GET', upload_url,
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['upload_article_metadata'])
# md5 hash calculation is being hacked around. see test class docstring
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
crud_fixtures['upload_article_metadata'],
crud_fixtures['upload_article_metadata']['files'][0],
)
assert aiohttpretty.has_call(
method='POST',
uri=create_article_url,
data=json.dumps({
'title': 'barricade.gif',
})
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_upload_checksum_mismatch(self, project_provider,
root_provider_fixtures,
crud_fixtures, file_stream):
file_name = 'barricade.gif'
item = root_provider_fixtures['get_file_metadata']
root_parts = project_provider.root_path_parts
path = FigsharePath('/' + file_name, _ids=('', ''), folder=False, is_public=False)
article_id = str(crud_fixtures['checksum_mismatch_article_metadata']['id'])
create_article_url = project_provider.build_url(False, *root_parts, 'articles')
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(item['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = item['upload_url']
aiohttpretty.register_json_uri('POST', create_article_url,
body=crud_fixtures['create_article_metadata'], status=201)
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url, body=item)
aiohttpretty.register_json_uri('GET', upload_url,
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['checksum_mismatch_article_metadata'])
with pytest.raises(exceptions.UploadChecksumMismatchError):
await project_provider.upload(file_stream, path)
assert aiohttpretty.has_call(
method='POST',
uri=create_article_url,
data=json.dumps({
'title': 'barricade.gif',
})
)
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert aiohttpretty.has_call(method='GET', uri=file_url)
assert aiohttpretty.has_call(method='GET', uri=upload_url)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=file_url)
assert aiohttpretty.has_call(method='GET', uri=get_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_upload(self, file_stream,
project_provider,
root_provider_fixtures,
crud_fixtures):
file_name = 'barricade.gif'
article_id = str(root_provider_fixtures['list_project_articles'][1]['id'])
article_name = root_provider_fixtures['list_project_articles'][1]['title']
root_parts = project_provider.root_path_parts
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, ''), folder=False, is_public=False)
file_metadata = root_provider_fixtures['get_file_metadata']
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', upload_url,
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['upload_folder_article_metadata'])
# md5 hash calculation is being hacked around. see test class docstring
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
crud_fixtures['upload_folder_article_metadata'],
crud_fixtures['upload_folder_article_metadata']['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_upload_undefined_type(self, file_stream,
project_provider,
root_provider_fixtures,
crud_fixtures):
file_name = 'barricade.gif'
article_id = str(root_provider_fixtures['list_project_articles'][1]['id'])
article_name = root_provider_fixtures['list_project_articles'][1]['title']
changed_metadata = crud_fixtures['upload_folder_article_metadata']
changed_metadata['defined_type'] = 5
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
aiohttpretty.register_json_uri('POST', list_articles_url, status=201,
body=crud_fixtures['create_upload_article_metadata'])
path = FigsharePath('/{}/{}'.format(article_name, file_name), _ids=('', article_id, ''),
folder=False, is_public=False)
file_metadata = root_provider_fixtures['get_file_metadata']
create_file_url = project_provider.build_url(False, 'articles', article_id, 'files')
file_url = project_provider.build_url(False, 'articles', article_id, 'files',
str(file_metadata['id']))
get_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
upload_url = file_metadata['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url, body=file_metadata)
aiohttpretty.register_json_uri('GET', upload_url,
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url, body=changed_metadata)
result, created = await project_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
crud_fixtures['upload_folder_article_metadata'],
crud_fixtures['upload_folder_article_metadata']['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_upload_update_error(self, file_stream, project_provider):
path = FigsharePath('/testfolder/whatever.txt',
_ids=('512415', '123325', '8890481'),
folder=False, is_public=False)
with pytest.raises(exceptions.UnsupportedOperationError) as e:
await project_provider.upload(file_stream, path)
assert e.value.code == 403
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_article_download(self, project_provider, root_provider_fixtures):
article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
file_id = str(root_provider_fixtures['file_article_metadata']['files'][0]['id'])
article_name = str(root_provider_fixtures['list_project_articles'][0]['title'])
file_name = str(root_provider_fixtures['file_article_metadata']['files'][0]['name'])
body = b'castle on a cloud'
root_parts = project_provider.root_path_parts
file_metadata_url = project_provider.build_url(False, *root_parts, 'articles', article_id,
'files', file_id)
article_metadata_url = project_provider.build_url(False, *root_parts, 'articles',
article_id)
download_url = root_provider_fixtures['file_metadata']['download_url']
aiohttpretty.register_json_uri('GET', file_metadata_url,
body=root_provider_fixtures['file_metadata'])
aiohttpretty.register_json_uri('GET', article_metadata_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_uri('GET', download_url,
params={'token': project_provider.token},
body=body, auto_length=True)
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, file_id),
folder=False, is_public=False)
result = await project_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_delete(self, project_provider, root_provider_fixtures):
file_id = str(root_provider_fixtures['file_metadata']['id'])
file_name = root_provider_fixtures['file_metadata']['name']
article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
article_name = str(root_provider_fixtures['list_project_articles'][0]['title'])
root_parts = project_provider.root_path_parts
file_url = project_provider.build_url(False, *root_parts,
'articles', article_id, 'files', file_id)
file_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', file_url,
body=root_provider_fixtures['file_metadata'])
aiohttpretty.register_json_uri('GET', file_article_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_uri('DELETE', file_article_url, status=204)
path = FigsharePath('/{}/{}'.format(article_name, file_name),
_ids=('', article_id, file_id),
folder=False, is_public=False)
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_delete_folder_type(self, project_provider, root_provider_fixtures):
item = root_provider_fixtures['file_article_metadata']
item['defined_type'] = 4
file_id = str(root_provider_fixtures['file_metadata']['id'])
article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
root_parts = project_provider.root_path_parts
path = FigsharePath('/{}/{}'.format(article_id, file_id),
_ids=('', article_id, file_id), folder=False)
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_url = project_provider.build_url(False, 'articles', path.parts[1]._id,
'files', path.parts[2]._id)
get_file_article_url = project_provider.build_url(False,
*root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', get_file_article_url, body=item)
aiohttpretty.register_uri('DELETE', file_article_url, status=204)
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_file_delete_bad_path(self, project_provider, root_provider_fixtures):
file_name = str(root_provider_fixtures['file_metadata']['name'])
article_name = str(root_provider_fixtures['list_project_articles'][0]['title'])
path = FigsharePath('/{}/{}'.format(article_name, file_name), _ids=('',), folder=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.delete(path)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_folder_delete(self, project_provider, root_provider_fixtures):
article_id = str(root_provider_fixtures['list_project_articles'][1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
folder_article_url = project_provider.build_url(False, *root_parts, 'articles', article_id)
aiohttpretty.register_json_uri('GET', list_articles_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', folder_article_url,
body=root_provider_fixtures['folder_article_metadata'])
aiohttpretty.register_uri('DELETE', folder_article_url, status=204)
path = FigsharePath('/{}'.format(article_id), _ids=('', article_id), folder=True)
result = await project_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=folder_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_delete_root_confirm_error(self, project_provider):
path = FigsharePath('/', _ids=('11241213', ), folder=True, is_public=False)
with pytest.raises(exceptions.DeleteError) as e:
await project_provider.delete(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_delete_root(self, project_provider, root_provider_fixtures):
path = FigsharePath('/', _ids=('11241213', ), folder=True, is_public=False)
item = root_provider_fixtures['list_project_articles']
list_articles_url = project_provider.build_url(False,
*project_provider.root_path_parts,
'articles')
delete_url_1 = project_provider.build_url(False, *project_provider.root_path_parts,
'articles', str(item[0]['id']))
delete_url_2 = project_provider.build_url(False, *project_provider.root_path_parts,
'articles', str(item[1]['id']))
aiohttpretty.register_json_uri('DELETE', delete_url_1, status=204)
aiohttpretty.register_json_uri('DELETE', delete_url_2, status=204)
aiohttpretty.register_json_uri('GET', list_articles_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
await project_provider.delete(path, 1)
assert aiohttpretty.has_call(method='DELETE', uri=delete_url_2)
assert aiohttpretty.has_call(method='DELETE', uri=delete_url_1)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_delete_errors(self, project_provider):
path = FigsharePath('/test.txt', _ids=('11241213', '123123'), folder=False, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.delete(path)
assert e.value.code == 404
path = FigsharePath('/test/test.txt', _ids=('11241213', '123123', '123123'),
folder=True, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await project_provider.delete(path)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_create_folder(self, project_provider, root_provider_fixtures, crud_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
path = FigsharePath('/folder2/', _ids=('', file_id), folder=True)
create_url = project_provider.build_url(False,
*project_provider.root_path_parts, 'articles')
metadata_url = crud_fixtures['create_article_metadata']['location']
aiohttpretty.register_json_uri('POST', create_url,
body=crud_fixtures['create_article_metadata'], status=201)
aiohttpretty.register_json_uri('GET', metadata_url,
body=root_provider_fixtures['folder_article_metadata'])
result = await project_provider.create_folder(path)
assert result is not None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_create_folder_invalid_path(self, project_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
path = FigsharePath('/folder2/folder3/folder4/folder5',
_ids=('', file_id, file_id, file_id), folder=True)
with pytest.raises(exceptions.CreateFolderError) as e:
await project_provider.create_folder(path)
assert e.value.code == 400
class TestArticleCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_upload(self, file_stream, article_provider,
root_provider_fixtures, crud_fixtures):
file_name = 'barricade.gif'
file_id = str(root_provider_fixtures['get_file_metadata']['id'])
root_parts = article_provider.root_path_parts
path = FigsharePath('/' + file_name, _ids=('', ''), folder=False, is_public=False)
create_file_url = article_provider.build_url(False, *root_parts, 'files')
file_url = article_provider.build_url(False, *root_parts, 'files', file_id)
get_article_url = article_provider.build_url(False, *root_parts)
upload_url = root_provider_fixtures['get_file_metadata']['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url,
body=root_provider_fixtures['get_file_metadata'])
aiohttpretty.register_json_uri('GET',
root_provider_fixtures['get_file_metadata']['upload_url'],
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['upload_folder_article_metadata'])
# md5 hash calculation is being hacked around. see test class docstring
result, created = await article_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
crud_fixtures['upload_folder_article_metadata'],
crud_fixtures['upload_folder_article_metadata']['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
file_name = str(item['name'])
body = b'castle on a cloud'
root_parts = article_provider.root_path_parts
article_metadata_url = article_provider.build_url(False, *root_parts)
download_url = item['download_url']
aiohttpretty.register_json_uri('GET', article_metadata_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_uri('GET', download_url, params={'token': article_provider.token},
body=body, auto_length=True)
path = FigsharePath('/{}'.format(file_name), _ids=('', file_id),
folder=False, is_public=False)
result = await article_provider.download(path)
content = await result.read()
assert content == body
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download_range(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
file_name = str(item['name'])
body = b'castle on a cloud'
root_parts = article_provider.root_path_parts
article_metadata_url = article_provider.build_url(False, *root_parts)
download_url = item['download_url']
aiohttpretty.register_json_uri('GET', article_metadata_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_uri('GET', download_url, params={'token': article_provider.token},
body=body[0:2], auto_length=True, status=206)
path = FigsharePath('/{}'.format(file_name), _ids=('', file_id),
folder=False, is_public=False)
result = await article_provider.download(path, range=(0, 1))
assert result.partial
content = await result.read()
assert content == b'ca'
assert aiohttpretty.has_call(method='GET', uri=download_url,
headers={'Range': 'bytes=0-1',
'Authorization': 'token freddie'},
params={'token': 'freddie'})
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download_path_not_file(self, article_provider, root_provider_fixtures):
path = FigsharePath('/testfolder/', _ids=('', ), folder=True, is_public=False)
with pytest.raises(exceptions.NotFoundError) as e:
await article_provider.download(path)
assert e.value.code == 404
assert e.value.message == 'Could not retrieve file or directory /{}'.format(path.path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download_no_downloadurl(self, article_provider, error_fixtures):
item = error_fixtures['file_metadata_missing_download_url']
file_id = str(item['id'])
path = FigsharePath('/{}'.format(file_id), _ids=('', file_id), folder=False)
root_parts = article_provider.root_path_parts
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
article_metadata_url = article_provider.build_url(False, *root_parts)
missing_download_url = error_fixtures['file_article_metadata_missing_download_url']
aiohttpretty.register_json_uri('GET', file_metadata_url, body=item)
aiohttpretty.register_json_uri('GET', article_metadata_url, body=missing_download_url)
with pytest.raises(exceptions.DownloadError) as e:
await article_provider.download(path)
assert e.value.code == 403
assert e.value.message == 'Download not available'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_upload_checksum_mismatch(self, file_stream, article_provider,
root_provider_fixtures, crud_fixtures):
file_name = 'barricade.gif'
item = root_provider_fixtures['get_file_metadata']
file_id = str(item['id'])
root_parts = article_provider.root_path_parts
validate_file_url = article_provider.build_url(False, *root_parts, 'files', file_name)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = FigsharePath('/' + file_name, _ids=('', file_id), folder=False, is_public=False)
create_file_url = article_provider.build_url(False, *root_parts, 'files')
file_url = article_provider.build_url(False, *root_parts, 'files', file_id)
get_article_url = article_provider.build_url(False, *root_parts)
upload_url = item['upload_url']
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url, body=item)
aiohttpretty.register_json_uri('GET', item['upload_url'],
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['checksum_mismatch_folder_article_metadata'])
with pytest.raises(exceptions.UploadChecksumMismatchError):
await article_provider.upload(file_stream, path)
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert aiohttpretty.has_call(method='GET', uri=file_url)
assert aiohttpretty.has_call(method='GET', uri=item['upload_url'])
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=file_url)
assert aiohttpretty.has_call(method='GET', uri=get_article_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_delete_root_no_confirm(self, article_provider):
path = FigsharePath('/', _ids=('11241213', ), folder=True, is_public=False)
with pytest.raises(exceptions.DeleteError) as e:
await article_provider.delete(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_delete_root(self, article_provider, root_provider_fixtures):
path = FigsharePath('/', _ids=('11241213', ), folder=True, is_public=False)
item = root_provider_fixtures['file_article_metadata']
list_articles_url = article_provider.build_url(False, *article_provider.root_path_parts)
delete_url = article_provider.build_url(False, *article_provider.root_path_parts,
'files', str(item['files'][0]['id']))
aiohttpretty.register_json_uri('DELETE', delete_url, status=204)
aiohttpretty.register_json_uri('GET', list_articles_url, body=item)
await article_provider.delete(path, 1)
assert aiohttpretty.has_call(method='DELETE', uri=delete_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_create_folder(self, article_provider):
path = '/'
with pytest.raises(exceptions.CreateFolderError) as e:
await article_provider.create_folder(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_file_delete(self, article_provider, root_provider_fixtures):
file_id = str(root_provider_fixtures['file_metadata']['id'])
file_name = root_provider_fixtures['file_metadata']['name']
file_url = article_provider.build_url(False, *article_provider.root_path_parts, 'files',
file_id)
aiohttpretty.register_uri('DELETE', file_url, status=204)
path = FigsharePath('/{}'.format(file_name), _ids=('', file_id), folder=False)
result = await article_provider.delete(path)
assert result is None
assert aiohttpretty.has_call(method='DELETE', uri=file_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_download_404(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
path = FigsharePath('/{}'.format(file_id), _ids=('', file_id), folder=False)
root_parts = article_provider.root_path_parts
file_metadata_url = article_provider.build_url(False, *root_parts, 'files', file_id)
article_metadata_url = article_provider.build_url(False, *root_parts)
download_url = item['download_url']
aiohttpretty.register_json_uri('GET', file_metadata_url, body=item)
aiohttpretty.register_json_uri('GET', article_metadata_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_uri('GET', download_url, params={'token': article_provider.token},
status=404, auto_length=True)
with pytest.raises(exceptions.DownloadError) as e:
await article_provider.download(path)
assert e.value.code == 404
assert e.value.message == 'Download not available'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_upload_root(self, file_stream, article_provider,
root_provider_fixtures, crud_fixtures):
file_name = 'barricade.gif'
file_id = str(root_provider_fixtures['get_file_metadata']['id'])
root_parts = article_provider.root_path_parts
item = crud_fixtures["upload_folder_article_metadata"]
item['defined_type'] = 5
validate_file_url = article_provider.build_url(False, *root_parts, 'files', file_name)
aiohttpretty.register_uri('GET', validate_file_url, status=404)
path = FigsharePath('/1234/94813', _ids=('1234', '94813'), folder=False, is_public=False)
create_file_url = article_provider.build_url(False, *root_parts, 'files')
file_url = article_provider.build_url(False, *root_parts, 'files', file_id)
get_article_url = article_provider.build_url(False, *root_parts)
upload_url = root_provider_fixtures['get_file_metadata']['upload_url']
parent_url = article_provider.build_url(False, *root_parts,
'articles', path.parent.identifier)
aiohttpretty.register_json_uri('GET', parent_url, body=item)
aiohttpretty.register_json_uri('POST', create_file_url,
body=crud_fixtures['create_file_metadata'], status=201)
aiohttpretty.register_json_uri('GET', file_url,
body=root_provider_fixtures['get_file_metadata'])
aiohttpretty.register_json_uri('GET',
root_provider_fixtures['get_file_metadata']['upload_url'],
body=root_provider_fixtures['get_upload_metadata'])
aiohttpretty.register_uri('PUT', '{}/1'.format(upload_url), status=200)
aiohttpretty.register_uri('POST', file_url, status=202)
aiohttpretty.register_json_uri('GET', get_article_url,
body=crud_fixtures['upload_folder_article_metadata'])
result, created = await article_provider.upload(file_stream, path)
expected = metadata.FigshareFileMetadata(
crud_fixtures['upload_folder_article_metadata'],
crud_fixtures['upload_folder_article_metadata']['files'][0],
)
assert aiohttpretty.has_call(method='PUT', uri='{}/1'.format(upload_url))
assert aiohttpretty.has_call(method='POST', uri=create_file_url)
assert result == expected
class TestRevalidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_revalidate_path(self, project_provider, root_provider_fixtures):
file_article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
folder_article_id = str(root_provider_fixtures['list_project_articles'][1]['id'])
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_url = project_provider.build_url(False, *root_parts, 'articles',
file_article_id)
folder_article_url = project_provider.build_url(False, *root_parts, 'articles',
folder_article_id)
aiohttpretty.register_json_uri('GET', list_articles_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_json_uri('GET', folder_article_url,
body=root_provider_fixtures['folder_article_metadata'])
path = FigsharePath('/', _ids=(''), folder=True)
result = await project_provider.revalidate_path(path, '{}'.format('file'), folder=False)
assert result.is_dir is False
assert result.name == 'file'
assert result.identifier == str(
root_provider_fixtures['file_article_metadata']['files'][0]['id'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_revalidate_path_duplicate_folder(self, project_provider,
root_provider_fixtures):
file_article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
folder_article_id = str(root_provider_fixtures['list_project_articles'][1]['id'])
folder_article_name = root_provider_fixtures['list_project_articles'][1]['title']
root_parts = project_provider.root_path_parts
list_articles_url = project_provider.build_url(False, *root_parts, 'articles')
file_article_url = project_provider.build_url(False, *root_parts, 'articles',
file_article_id)
folder_article_url = project_provider.build_url(False, *root_parts, 'articles',
folder_article_id)
aiohttpretty.register_json_uri('GET', list_articles_url,
body=root_provider_fixtures['list_project_articles'],
params={'page': '1', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', list_articles_url, body=[],
params={'page': '2', 'page_size': str(MAX_PAGE_SIZE)})
aiohttpretty.register_json_uri('GET', file_article_url,
body=root_provider_fixtures['file_article_metadata'])
aiohttpretty.register_json_uri('GET', folder_article_url,
body=root_provider_fixtures['folder_article_metadata'])
path = FigsharePath('/', _ids=(''), folder=True)
result = await project_provider.revalidate_path(path, folder_article_name, folder=True)
assert result.is_dir is True
assert result.name == 'folder_article'
assert result.identifier == folder_article_id
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_project_revalidate_path_not_root(self, project_provider, root_provider_fixtures):
file_article_id = str(root_provider_fixtures['list_project_articles'][0]['id'])
path = FigsharePath('/folder1/', _ids=('', file_article_id), folder=True)
root_parts = project_provider.root_path_parts
file_article_url = project_provider.build_url(False, *root_parts, 'articles',
file_article_id)
aiohttpretty.register_json_uri('GET', file_article_url,
body=root_provider_fixtures['file_article_metadata'])
result = await project_provider.revalidate_path(path, '{}'.format('file'), folder=False)
assert result.is_dir is False
assert result.name == 'file'
assert result.identifier == str(
root_provider_fixtures['file_article_metadata']['files'][0]['id'])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_revalidate_path_bad_path(self, article_provider, root_provider_fixtures):
item = root_provider_fixtures['file_metadata']
file_id = str(item['id'])
path = FigsharePath('/fodler1/folder2/', _ids=('', '', file_id), folder=True)
with pytest.raises(exceptions.NotFoundError) as e:
await article_provider.revalidate_path(path, 'childname', folder=True)
assert e.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_article_revalidate_path_file(self, article_provider, crud_fixtures):
item = crud_fixtures["upload_folder_article_metadata"]
file_id = str(item['files'][0]['id'])
file_name = item['files'][0]['name']
path = FigsharePath('/' + str(file_name), _ids=('', file_id), folder=True)
urn_parts = (*article_provider.root_path_parts, (path.identifier))
url = article_provider.build_url(False, *urn_parts)
aiohttpretty.register_json_uri('GET', url, body=item)
result = await article_provider.revalidate_path(path, item['files'][0]['name'],
folder=False)
expected = path.child(item['files'][0]['name'], _id=file_id, folder=False,
parent_is_folder=False)
assert result == expected
class TestMisc:
def test_path_from_metadata_file(self, project_provider, root_provider_fixtures):
file_article_metadata = root_provider_fixtures['file_article_metadata']
fig_metadata = metadata.FigshareFileMetadata(file_article_metadata)
path = FigsharePath('/', _ids=(''), folder=True)
expected = FigsharePath('/file_article/file', _ids=('', '4037952', '6530715'), folder=False)
result = project_provider.path_from_metadata(path, fig_metadata)
assert result == expected
def test_path_from_metadata_folder(self, project_provider, root_provider_fixtures):
folder_article_metadata = root_provider_fixtures['folder_article_metadata']
fig_metadata = metadata.FigshareFolderMetadata(folder_article_metadata)
path = FigsharePath('/', _ids=(''), folder=True)
expected = FigsharePath('/folder_article/', _ids=('', '4040019'), folder=True)
result = project_provider.path_from_metadata(path, fig_metadata)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test__get_article_metadata_returns_none(self, project_provider,
root_provider_fixtures):
file_id = root_provider_fixtures['file_article_metadata']['id']
item = {'defined_type': 5, 'files': None, 'id': file_id}
url = project_provider.build_url(False, *project_provider.root_path_parts,
'articles', str(file_id))
aiohttpretty.register_json_uri('GET', url, body=item)
result = await project_provider._get_article_metadata(str(file_id), False)
assert result is None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test__get_file_upload_url_error(self, project_provider):
article_id = '12345'
file_id = '871947'
url = project_provider.build_url(False, 'articles', article_id, 'files', file_id)
aiohttpretty.register_json_uri('GET', url, status=404)
with pytest.raises(exceptions.ProviderError) as e:
await project_provider._get_file_upload_url(article_id, file_id)
assert e.value.code == 500
@pytest.mark.asyncio
async def test_revisions(self, project_provider):
result = await project_provider.revisions('/')
expected = [metadata.FigshareFileRevisionMetadata()]
assert result == expected
def test_can_duplicate_names(self, project_provider):
assert project_provider.can_duplicate_names() is False
def test_base_figshare_provider_fileset(self, auth, credentials):
settings = {
'container_type': 'fileset',
'container_id': '13423',
}
test_provider = provider.FigshareArticleProvider(auth, credentials, settings)
assert test_provider.container_type == 'article'
def test_base_figshare_provider_invalid_setting(self, auth, credentials):
bad_settings = {
'container_type': 'not_a_project',
'container_id': '13423',
}
with pytest.raises(exceptions.ProviderError) as e:
provider.FigshareProjectProvider(auth, credentials, bad_settings)
assert e.value.message == '{} is not a valid container type.'.format(
bad_settings['container_type'])
def test_figshare_provider_invalid_setting(self, auth, credentials):
bad_settings = {
'container_type': 'not_a_project',
'container_id': '13423',
}
with pytest.raises(exceptions.ProviderError) as e:
provider.FigshareProvider(auth, credentials, bad_settings)
assert e.value.message == 'Invalid "container_type" {0}'.format(
bad_settings['container_type'])
| 48.99655
| 103
| 0.637345
| 9,470
| 85,205
| 5.375713
| 0.032418
| 0.063939
| 0.059716
| 0.061523
| 0.903237
| 0.877544
| 0.856879
| 0.832345
| 0.802703
| 0.777736
| 0
| 0.01063
| 0.260255
| 85,205
| 1,738
| 104
| 49.024741
| 0.797049
| 0.009741
| 0
| 0.723626
| 0
| 0
| 0.088354
| 0.028902
| 0
| 0
| 0
| 0
| 0.115156
| 1
| 0.014116
| false
| 0
| 0.008172
| 0.008172
| 0.038633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
021ff06c29c567be077f516b6d440f0702099da9
| 14,561
|
py
|
Python
|
delpapa/avalanches/Fig5.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
delpapa/avalanches/Fig5.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
delpapa/avalanches/Fig5.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
####
# Script for the 5th paper figure (from PhaseTransitionNoise)
# A,B: Gaussian noise 'phase transition'
# C,D: random spike 'phase transition'
# E,F,G: raster plot for every phase
####
from pylab import *
import matplotlib.gridspec as gridspec
import tables
import os
from tempfile import TemporaryFile
import scipy, scipy.stats # for the binomial distribution
import data_analysis as analysis
import powerlaw as pl
transient_steps = 2e6
stable_steps = 3e6
number_of_files = 2
THETA = 'half'
raster_steps = 800
### figure parameters
width = 6
height = 9
fig = figure(1, figsize=(width, height))
fig_5a = subplot(3, 2, 1)
fig_5b = subplot(3, 2, 2)
fig_5c = subplot(3, 2, 3)
fig_5d = subplot(3, 2, 4)
fig_5e = subplot(9, 1, 7)
fig_5f = subplot(9, 1, 8)
fig_5g = subplot(9, 1, 9)
letter_size = 11
letter_size_panel = 13
line_width = 1.5
line_width_fit = 2.0
subplot_letter = (-0.18, 1.05)
subplot_letter_long = (-0.08, 0.85)
########################################################################
# Fig. 5A,B: Gaussian noise (power-laws and activity)
experiment_name = 'Gaussian'
print experiment_name
for sigma in ['0.005', '0.05', '5']:
print sigma
data_all = np.zeros((number_of_files, stable_steps))
act_density = zeros((number_of_files, 200))
for data_file in xrange(number_of_files):
exper = 'result.h5'
exper_path = ''
h5 = tables.openFile(os.path.join(exper_path,exper),'r')
data = h5.root
data_all[data_file] = np.around(data.activity[0] \
[transient_steps:]*data.c.N_e)
for i in xrange(int(stable_steps)):
act_density[data_file, data_all[data_file, i]] += 1
act_density[data_file, :] /= act_density[data_file, :].sum()
### Save raster plots
if data_file == 0: # 0 is a random file
if sigma == '0.005':
raster_low = data.Spikes[0][:, -raster_steps:]
if sigma == '0.05':
raster_inter = data.Spikes[0][:, -raster_steps:]
if sigma == '5':
raster_high = data.Spikes[0][:, -raster_steps:]
h5.close()
# activity distribution and std
act_density_mean = act_density.mean(0)
act_density_std = act_density.std(0)
# calculates avalanches
T_data, S_data = analysis.avalanches(data_all, \
'N', '200', Threshold=THETA)
a_dur1, a_area1 = analysis.avalanches(data_all, 'N', '200',\
Theta_percent = 5)
a_dur2, a_area2 = analysis.avalanches(data_all, 'N', '200',\
Theta_percent = 25)
subplot(321)
if sigma == '0.05':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2)), interp2, interp1, facecolor='k', alpha=0.2)
pl.plot_pdf(S_data, color='k', linewidth=line_width_fit)
elif sigma == '0.005':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2)), interp2, interp1, facecolor='darkcyan', alpha=0.2)
pl.plot_pdf(S_data, color='darkcyan', linewidth=line_width)
elif sigma == '5':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2)), interp2, interp1, facecolor='r', alpha=0.2)
pl.plot_pdf(S_data, color='r', linewidth=line_width)
xscale('log'); yscale('log')
xlabel(r'$S$', fontsize=letter_size)
ylabel(r'$f(S)$', fontsize=letter_size)
# ticks name
xlim([1, 3000])
ylim([0.00001, 0.1])
xticks([1, 10, 100, 1000], \
['$10^0$', '$10^{1}$', '$10^{2}$', '$10^{3}$'])
yticks([0.1, 0.001, 0.00001],\
['$10^{-1}$', '$10^{-3}$', '$10^{-5}$'])
fig_5a.spines['right'].set_visible(False)
fig_5a.spines['top'].set_visible(False)
fig_5a.xaxis.set_ticks_position('bottom')
fig_5a.yaxis.set_ticks_position('left')
tick_params(labelsize=letter_size)
subplot(322)
if sigma == '0.005':
plot(act_density_mean, 'c', linewidth=line_width, label='low')
if sigma == '0.05':
plot(act_density_mean, 'k', linewidth=line_width_fit, label='intermediate')
if sigma == '5':
plot(act_density_mean, 'r', linewidth=line_width, label='high')
### Binomial distribution plot
### p = 0.1 (\mu_IP); n = 200 (N_E)
bino_x = np.arange(0, 40)
bino_y = scipy.stats.binom.pmf(bino_x, 200,0.1)
subplot(322)
plot(bino_x, bino_y, '--', color='gray', linewidth=line_width_fit)
xlabel(r'$a(x)$ [# neurons]', fontsize=letter_size)
ylabel(r'$p(a(x))$', fontsize=letter_size)
xlim([0, 70])
ylim([0, 0.11])
xticks([0, 20, 40, 60], \
['$0$', '$20$', '$40$', '$60$'])
yticks([0, 0.1], \
['$0$', '$0.1$'])
fig_5b.spines['right'].set_visible(False)
fig_5b.spines['top'].set_visible(False)
fig_5b.xaxis.set_ticks_position('bottom')
fig_5b.yaxis.set_ticks_position('left')
tick_params(labelsize=letter_size)
legend(loc=(0.5, 0.55), prop={'size':letter_size}, \
title=r'Noise level', frameon=False)
fig_5b.get_legend().get_title().set_fontsize(letter_size)
########################################################################
########################################################################
# Fig. 5C,D: Random Spikes (power-laws and activity)
experiment_name = 'RandomSpikes'
print experiment_name
for p in ['000', '005', '010']:
if p == '000':
number_of_files = 65
else:
number_of_files = 30
print p
data_all = np.zeros((number_of_files, stable_steps))
act_density = zeros((number_of_files, 200))
for data_file in xrange(number_of_files):
exper = 'result.h5'
exper_path = ''
h5 = tables.openFile(os.path.join(exper_path,exper),'r')
data = h5.root
data_all[data_file] = np.around(data.activity[0] \
[transient_steps:]*data.c.N_e)
for i in xrange(int(stable_steps)):
act_density[data_file, data_all[data_file, i]] += 1
act_density[data_file, :] /= act_density[data_file, :].sum()
h5.close()
# activity distribution and std
act_density_mean = act_density.mean(0)
act_density_std = act_density.std(0)
# calculates avalanches
T_data, S_data = analysis.avalanches(data_all, \
'N', '200', Threshold=THETA)
a_dur1, a_area1 = analysis.avalanches(data_all, 'N', '200',\
Theta_percent = 10)
a_dur2, a_area2 = analysis.avalanches(data_all, 'N', '200',\
Theta_percent = 25)
subplot(323)
if p == '005':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2)), interp2, interp1, facecolor='k', alpha=0.2)
pl.plot_pdf(S_data, color='k', linewidth=line_width_fit)
elif p == '000':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2))[100:-100], interp2[100:-100], interp1[100:-100], facecolor='darkcyan', alpha=0.2)
pl.plot_pdf(S_data, color='darkcyan', linewidth=line_width)
elif p == '010':
a_area1_pdf = pl.pdf(a_area1, 10)
a_area2_pdf = pl.pdf(a_area2, 10)
BinCenters1 = (a_area1_pdf[0][:-1]+a_area1_pdf[0][1:])/2.
BinCenters2 = (a_area2_pdf[0][:-1]+a_area2_pdf[0][1:])/2.
x_max = a_area1_pdf[0].max()
interp1 = np.interp(np.arange(x_max), BinCenters1, a_area1_pdf[1])
interp2 = np.interp(np.arange(x_max), BinCenters2, a_area2_pdf[1])
fill_between(np.linspace(0,x_max,len(interp2)), interp2, interp1, facecolor='r', alpha=0.2)
pl.plot_pdf(S_data, color='r', linewidth=line_width)
xscale('log'); yscale('log')
xlabel(r'$S$', fontsize=letter_size)
ylabel(r'$f(S)$', fontsize=letter_size)
# ticks name
xlim([1, 3000])
ylim([0.00001, 0.1])
xticks([1, 10, 100, 1000], \
['$10^0$', '$10^{1}$', '$10^{2}$', '$10^{3}$'])
yticks([0.1, 0.001, 0.00001],\
['$10^{-1}$', '$10^{-3}$', '$10^{-5}$'])
fig_5c.spines['right'].set_visible(False)
fig_5c.spines['top'].set_visible(False)
fig_5c.xaxis.set_ticks_position('bottom')
fig_5c.yaxis.set_ticks_position('left')
tick_params(labelsize=letter_size)
subplot(324)
if p == '000':
label_p = '$0\%$'
plot(act_density_mean, 'c', linewidth=line_width, label=label_p)
elif p == '005':
label_p = '$5\%$'
plot(act_density_mean, 'k', linewidth=line_width_fit, label=label_p)
if p == '010':
label_p = '$10\%$'
plot(act_density_mean, 'r', linewidth=line_width, label=label_p)
### Binomial distribution plot
### p = 0.1 (\mu_IP); n = 200 (N_E)
bino_x = np.arange(0, 40)
bino_y = scipy.stats.binom.pmf(bino_x, 200,0.1)
subplot(324)
plot(bino_x, bino_y, '--', color='gray', linewidth=line_width_fit)
xlabel(r'$a(x)$ [# neurons]', fontsize=letter_size)
ylabel(r'$p(a(x))$', fontsize=letter_size)
xlim([0, 70])
ylim([0, 0.11])
xticks([0, 20, 40, 60], \
['$0$', '$20$', '$40$', '$60$'])
yticks([0, 0.1], \
['$0$', '$0.1$'])
fig_5d.spines['right'].set_visible(False)
fig_5d.spines['top'].set_visible(False)
fig_5d.xaxis.set_ticks_position('bottom')
fig_5d.yaxis.set_ticks_position('left')
tick_params(labelsize=letter_size)
# legend stuff
legend(loc=(0.5, 0.55), prop={'size':letter_size}, \
title=r'$p_{\rm s}$', frameon=False)
fig_5d.get_legend().get_title().set_fontsize(letter_size)
########################################################################
########################################################################
# Fig. 5E, F, G: Raster plots
subplot(917)
for (i,sp) in enumerate(raster_low):
s_train = where(sp == 1)[0]
if s_train != []:
vlines(s_train, i + 0.5, i + 1.5)
hold('on')
ylabel('Low', fontsize=letter_size)
ylim([0, 200])
tick_params(axis='both', which='major', labelsize=letter_size)
xticks([])
yticks([])
subplot(918)
for (i,sp) in enumerate(raster_inter):
s_train = where(sp == 1)[0]
if s_train != []:
vlines(s_train, i + 0.5, i + 1.5)
hold('on')
ylabel('Interm.', fontsize=letter_size)
ylim([0, 200])
tick_params(axis='both', which='major', labelsize=letter_size)
xticks([])
yticks([])
subplot(919)
for (i,sp) in enumerate(raster_high):
s_train = where(sp == 1)[0]
if s_train != []:
vlines(s_train, i + 0.5, i + 1.5)
hold('on')
xlabel('Time step', fontsize=letter_size)
ylabel('High', fontsize=letter_size)
ylim([0, 200])
tick_params(axis='both', which='major', labelsize=letter_size)
yticks([])
xticks([0, 400, 800], \
['$0$', '$400$', '$800$'])
########################################################################
fig_5a.annotate('A', xy=subplot_letter, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5b.annotate('B', xy=subplot_letter, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5c.annotate('C', xy=subplot_letter, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5d.annotate('D', xy=subplot_letter, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5e.annotate('E', xy=subplot_letter_long, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5f.annotate('F', xy=subplot_letter_long, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig_5g.annotate('G', xy=subplot_letter_long, xycoords='axes fraction', \
fontsize=letter_size_panel , fontweight='bold', \
horizontalalignment='right', verticalalignment='bottom')
fig.subplots_adjust(wspace=.25)
fig.subplots_adjust(hspace=.45)
print 'Saving figures...',
result_path = '../../plots/'
result_name_png = 'Fig5_test.pdf'
savefig(os.path.join(result_path, result_name_png), format = 'pdf')
print 'done\n\n'
| 37.919271
| 136
| 0.588352
| 2,058
| 14,561
| 3.936346
| 0.129252
| 0.028145
| 0.033329
| 0.022219
| 0.810888
| 0.80595
| 0.745093
| 0.737934
| 0.737934
| 0.707073
| 0
| 0.066251
| 0.218392
| 14,561
| 383
| 137
| 38.018277
| 0.64555
| 0.044159
| 0
| 0.633333
| 0
| 0
| 0.067098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.026667
| null | null | 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
023aed51d466633ff9001a665242e2cae3ed7787
| 150
|
py
|
Python
|
QFin/tvm.py
|
RomanMichaelPaolucci/Q-Fin
|
570e7322518cb57187c77cf173f7e013123b88ab
|
[
"MIT"
] | 94
|
2021-04-21T20:15:54.000Z
|
2022-03-31T02:39:13.000Z
|
QFin/tvm.py
|
RBBRONDANI/Q-Fin
|
49902da7eb013bda196c3b383c5736b008a61a17
|
[
"MIT"
] | null | null | null |
QFin/tvm.py
|
RBBRONDANI/Q-Fin
|
49902da7eb013bda196c3b383c5736b008a61a17
|
[
"MIT"
] | 16
|
2021-04-24T21:45:01.000Z
|
2022-03-29T20:32:07.000Z
|
class Annuity:
def __init__():
pass
class Perpetuity:
def __init__():
pass
class TVM:
def __init__():
pass
| 8.823529
| 19
| 0.533333
| 15
| 150
| 4.533333
| 0.466667
| 0.308824
| 0.485294
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.386667
| 150
| 16
| 20
| 9.375
| 0.73913
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
02481bad8ed51852bad92d2f10f9ae9fc281f0fb
| 10,421
|
py
|
Python
|
google/ads/google_ads/v6/proto/services/offline_user_data_job_service_pb2_grpc.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v6/proto/services/offline_user_data_job_service_pb2_grpc.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/offline_user_data_job_service_pb2_grpc.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v6.proto.resources import offline_user_data_job_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_offline__user__data__job__pb2
from google.ads.google_ads.v6.proto.services import offline_user_data_job_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
class OfflineUserDataJobServiceStub(object):
"""Proto file describing the OfflineUserDataJobService.
Service to manage offline user data jobs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateOfflineUserDataJob = channel.unary_unary(
'/google.ads.googleads.v6.services.OfflineUserDataJobService/CreateOfflineUserDataJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobResponse.FromString,
)
self.GetOfflineUserDataJob = channel.unary_unary(
'/google.ads.googleads.v6.services.OfflineUserDataJobService/GetOfflineUserDataJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.GetOfflineUserDataJobRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_offline__user__data__job__pb2.OfflineUserDataJob.FromString,
)
self.AddOfflineUserDataJobOperations = channel.unary_unary(
'/google.ads.googleads.v6.services.OfflineUserDataJobService/AddOfflineUserDataJobOperations',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsResponse.FromString,
)
self.RunOfflineUserDataJob = channel.unary_unary(
'/google.ads.googleads.v6.services.OfflineUserDataJobService/RunOfflineUserDataJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.RunOfflineUserDataJobRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class OfflineUserDataJobServiceServicer(object):
"""Proto file describing the OfflineUserDataJobService.
Service to manage offline user data jobs.
"""
def CreateOfflineUserDataJob(self, request, context):
"""Creates an offline user data job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOfflineUserDataJob(self, request, context):
"""Returns the offline user data job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddOfflineUserDataJobOperations(self, request, context):
"""Adds operations to the offline user data job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunOfflineUserDataJob(self, request, context):
"""Runs the offline user data job.
When finished, the long running operation will contain the processing
result or failure information, if any.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OfflineUserDataJobServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateOfflineUserDataJob': grpc.unary_unary_rpc_method_handler(
servicer.CreateOfflineUserDataJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobResponse.SerializeToString,
),
'GetOfflineUserDataJob': grpc.unary_unary_rpc_method_handler(
servicer.GetOfflineUserDataJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.GetOfflineUserDataJobRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_offline__user__data__job__pb2.OfflineUserDataJob.SerializeToString,
),
'AddOfflineUserDataJobOperations': grpc.unary_unary_rpc_method_handler(
servicer.AddOfflineUserDataJobOperations,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsResponse.SerializeToString,
),
'RunOfflineUserDataJob': grpc.unary_unary_rpc_method_handler(
servicer.RunOfflineUserDataJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.RunOfflineUserDataJobRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.OfflineUserDataJobService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OfflineUserDataJobService(object):
"""Proto file describing the OfflineUserDataJobService.
Service to manage offline user data jobs.
"""
@staticmethod
def CreateOfflineUserDataJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.OfflineUserDataJobService/CreateOfflineUserDataJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.CreateOfflineUserDataJobResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOfflineUserDataJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.OfflineUserDataJobService/GetOfflineUserDataJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.GetOfflineUserDataJobRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_offline__user__data__job__pb2.OfflineUserDataJob.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddOfflineUserDataJobOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.OfflineUserDataJobService/AddOfflineUserDataJobOperations',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.AddOfflineUserDataJobOperationsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunOfflineUserDataJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.OfflineUserDataJobService/RunOfflineUserDataJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_offline__user__data__job__service__pb2.RunOfflineUserDataJobRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 57.894444
| 197
| 0.75914
| 1,049
| 10,421
| 6.938036
| 0.120114
| 0.048365
| 0.065952
| 0.071723
| 0.817257
| 0.803655
| 0.787991
| 0.726298
| 0.712558
| 0.674086
| 0
| 0.007548
| 0.186354
| 10,421
| 179
| 198
| 58.217877
| 0.850808
| 0.07696
| 0
| 0.461538
| 1
| 0
| 0.106688
| 0.087309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.030769
| 0.030769
| 0.161538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a55007cc4ace720eed8291694ffc85671e23a5c
| 185
|
py
|
Python
|
src/pytezos/rpc/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
src/pytezos/rpc/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
src/pytezos/rpc/__init__.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
from pytezos.rpc.helpers import *
from pytezos.rpc.node import RpcMultiNode, RpcNode
from pytezos.rpc.protocol import *
from pytezos.rpc.search import *
from pytezos.rpc.shell import *
| 30.833333
| 50
| 0.805405
| 27
| 185
| 5.518519
| 0.407407
| 0.369128
| 0.469799
| 0.402685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113514
| 185
| 5
| 51
| 37
| 0.908537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
5a799498ce57bcf5542e2d7bb0c8b44093164a35
| 23,599
|
py
|
Python
|
src/core_utils/validation.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
src/core_utils/validation.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
src/core_utils/validation.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xc2\x16\x00\x00\x00\x00\x00\x10\x03\x2a\xa7\x65\x90\xc6\x23\x60\xc4\x07\x7d\xf2\x68\x12\x87\xe0\x00\x00\x00\x00\x00\x00\x00\x00\xce\x8b\x52\x12\xe3\x19\x8f\xc6\xcc\xf7\xc1\xaa\xb1\x31\xdf\xee\xa6\xa9\x7a\x23\x64\x92\x8e\xbb\xab\xa1\xe7\x72\xad\x92\xc0\x46\x3e\xdb\xa2\x5b\xdd\x57\x93\xdc\x2f\x26\x6d\xdf\x01\x1e\xcb\xa2\x36\x9e\x9c\x2d\x77\x77\x36\x7f\x7d\xf7\x86\x1c\xdf\x2a\x49\xf4\x87\xc2\x96\x87\xf3\xb3\xd3\x68\x1d\x28\x98\x6c\xb3\xa3\x78\x5e\xc8\x39\x60\xd4\x40\xd9\xf7\x4b\x63\x77\x1e\x2f\x3d\x96\x3a\x63\xc5\x95\x89\x58\x24\xd1\x4e\x57\x93\x0f\x09\x0c\x1b\xec\x79\x55\xd8\x75\x97\xf4\x91\x12\x32\xd1\xe2\x1f\xa9\xa7\xc9\xc4\xe5\x2e\xac\xe6\x87\x24\x34\x86\x61\xe8\xfe\x98\x2a\xa1\x19\xd7\xbe\xe5\xb5\xe7\x27\x9b\x44\x52\x1f\x51\xbf\xb3\x63\xf9\x1d\x39\x4e\x81\x7b\x1c\x92\xa8\xff\x36\xf3\xbf\x14\xd4\x67\xe2\x62\xa6\x67\xb9\x46\x8e\x8e\xd4\xa4\x66\x33\x9d\xeb\x2a\xaf\xb8\x82\x92\x46\xc3\x02\xe6\xe4\x67\x9c\xaf\xbc\xda\x52\xb7\xcb\x6a\xc7\x11\x0c\xe3\x4a\x2e\x9a\x62\x0d\x2b\xcf\x06\x5f\xc4\xa9\xce\xfe\x80\xfa\xbb\xf8\xa9\x42\x84\xc3\x8a\xc6\x31\x34\x2b\xf5\x0b\x76\x9c\xf4\xe8\x8d\x52\x24\x50\x31\x67\xe8\xeb\x0c\xea\x4e\x2d\x63\x12\xad\xff\xb5\x51\x2c\xc8\xdb\x59\x5a\x0c\xea\x8a\x39\xa9\x64\x60\xf5\x79\x1e\x9b\x99\x55\xf4\xa1\xd2\x6e\x52\xf6\xc6\x1f\xa3\xbd\x97\x96\x50\x45\xaf\x0d\x9e\xdc\xe1\x43\xec\xd6\x15\xed\x7d\x56\xa5\xf1\xa9\x10\xbb\xc6\xae\xe2\xc0\x7d\x92\x20\x56\xb6\xd7\x62\x3f\xad\x26\x5d\x5a\x91\x76\xce\xe7\x3b\xa1\x67\x47\x86\x7b\x88\xc9\x4e\xc7\xed\xc8\x96\xb3\xc7\x92\xb1\x46\xa5\x31\xf4\x58\x1a\x89\x10\xbc\xbd\x9c\xa1\x5d\x48\x83\x6b\x22\x4a\xd5\xce\x65\x8e\x01\x7a\x88\xb3\xc3\x02\x70\xbc\x32\x69\xf9\xe2\x2b\xe9\x18\xd5\xb1\xdc\xce\x7f\x32\x09\x80\xdf\x02\x28\xa3\x80\x60\x51\xee\x36\x41\xd8\xeb\xd9\x57\x83\xbe\x55\xc5\x21\xc5\xd5\xe8\x82\x5f\x88\x92\x15\x30\x72\xaf\xc6\xbb\x6b\x41\x8a\x87\x1c\x18\xd7\xee\xc3\xb3\x28\xb7\x83\x48\xfc\xa5\x86\x71\x79\xb3\xbe\x07\x44\x99\x57\x1e\xe6\xad\xb9\x7b\xa3\x70\x05\xc4\xa3\xfb\x83\xbe\xd8\xdb\x58\xde\x9f\x0d\x21\xe6\xb5\xbe\xb0\x04\xcb\x3d\x4e\x24\xae\xb7\x50\x3c\xd6\x76\x12\xb1\xd2\x35\x29\x61\xf0\x75\x1d\xb1\xd6\x81\xbf\xb1\x31\x05\x51\xc4\x1f\x95\x3e\x73\x43\xd9\x28\xc0\x7c\x72\xcb\x62\x87\x3c\xaa\xe6\xf6\x89\x25\x11\x8f\xcd\x77\x74\xe0\x44\x3c\x1e\xde\x60\xf8\x7e\x0f\xdf\xad\x33\x32\xf3\xbc\x03\x14\xbb\xd1\xa2\x98\xcd\x1a\x11\x71\x18\x98\x90\x69\x2a\xb4\xb2\x47\x4f\x1e\x0b\xd0\xb3\xef\xe4\x37\xa4\x9b\xa0\x68\x1a\x4e\x0d\xda\x29\xb6\xe4\x90\xeb\x6a\x32\xd3\x22\xbb\x2e\xc5\x02\x46\x78\x05\x37\x29\xe8\x45\x8c\x6f\x64\x00\x59\x43\x2b\x6a\xf0\x54\x69\xf3\x82\x1b\xf9\x90\x68\x0e\x88\x5e\x64\x2d\xe0\x76\x57\x88\xd6\x84\x66\xbb\xee\x84\xdb\x18\x28\x0a\xc8\xaf\xdf\x6b\xd0\xe3\xce\xbf\x8f\x4b\xef\xec\x46\x91\x86\xb3\x5c\x8e\xdc\xe7\x7b\xde\x56\x14\x10\x33\x16\xf6\x19\xec\x0f\x91\xf7\x3d\x0c\x7e\x95\xe6\x70\x50\x0e\x80\x82\xbc\x28\x99\x5a\xdf\x6a\x32\xb2\xb2\x41\x7e\x46\x17\x70\x70\x9f\x74\xa9\x8e\x68\xb0\x22\xc4\x7a\xd6\x40\x94\x8d\x04\x4a\xac\x8a\x74\xe2\xa0\xb7\x2c\xd0\x81\xe2\x1a\xd8\x04\x2f\x37\xe5\xd0\x02\x45\x3e\x59\x8f\x90\xe8\x17\xf5\x2e\x11\xa3\x93\x08\x39\xee\x98\x64\x08\xe2\xf6\x76\x1a\xc2\x30\x6b\xf5\x7c\xcf\x28\x06\x9d\x66\x5e\x64\x7e\xec\xa7\xce\x79\xef\x74\xa7\x63\x15\xa7\xe9\x7b\x96\x22\x36\xe3\x09\x54\xd4\x61\xa5\x62\x5c\xa6\x15\xc1\xb7\x9b\xe4\xcb\x13\x2b\x08\x80\x17\x1c\xf7\xe9\xeb\xc9\x4c\xd3\x1e\x3f\xc2\xf9\x9f\x89\x7a\x41\x82\x50\x00\x52\x86\xb5\xc8\x2f\xc4\xff\x20\xe2\x5e\x54\x51\x71\xc3\xf8\x9d\x44\x0e\xa4\xb1\x9f\x62\x74\x34\x72\x21\x16\xb4\x79\x9d\xda\xfb\xcf\x07\x7e\x5d\x66\xe6\x5c\x21\xb4\x47\x8d\xf9\xf0\x8a\xbd\x8c\xea\xfb\x9c\xca\x4f\x13\xac\x34\xb4\x61\x6c\x0d\xb4\xd0\x26\x4a\x6c\xb3\xc3\x4a\x4c\x11\x45\x18\x20\xb4\xbe\xab\xcd\x0c\x6d\xbe\xea\x4f\x07\xf6\xbe\x8c\xd0\x80\xb6\xf4\x80\x6f\x2e\x14\x5e\xeb\x55\x67\x13\x96\xa4\x93\x57\x63\xda\xa2\xbe\x65\x1c\xa7\x6e\x2f\xd2\x55\x14\xb6\xc0\xc0\x39\x48\x17\xea\xd7\xdb\xb7\x4d\x22\x30\x9f\x6c\xd6\x37\xc8\x0e\x8a\x35\x3e\xa2\x3a\xb0\xe7\x65\x79\xec\x63\x17\xba\xfc\x39\x59\xee\x33\xad\xea\x3c\xf2\xb5\x22\x81\xe5\x45\x08\xb1\x9e\x45\xe2\x0f\x41\x53\xc7\x53\x99\x8b\x1d\x4d\x5d\x09\x2d\xa2\xc4\x38\xb4\xda\xb6\x20\xd8\x1c\xd6\x04\xb2\x6e\xe3\xa4\x0d\x6d\xf1\xd4\xdb\xc1\xb6\x86\x74\xfe\xed\x32\xee\x11\x0e\xc4\x9e\x91\x8b\xe5\x15\xf0\xc1\x3c\xd1\xea\x6f\xb9\xae\xbe\xac\xe6\x1f\xc7\x5d\x2f\xaf\x07\x16\xb7\x70\xdc\xf0\x94\x1c\x6a\xd3\xe8\x43\xd3\xd6\x6f\xe3\x0c\xdd\xbd\xeb\xb1\x4f\x75\x5f\x5d\xec\x54\xf5\x69\x2d\x29\xc9\x6e\xa2\xfe\x00\x27\x38\xd2\x0b\xb7\x60\x54\x31\x19\xab\x32\x27\x63\xe1\xb0\x2c\x1d\x8c\x6a\x5a\xb4\x4b\x33\xa8\xed\x8e\x90\x3c\x0a\x95\xf4\x2c\xda\x67\x2e\xee\xca\xe2\xc4\x49\x71\xff\xfb\xe4\x87\x1e\x3b\x38\xeb\xbf\xc1\x08\x79\xe2\xa7\x4b\x09\x77\x09\x78\x46\x3f\x29\xc2\xe3\x4f\x5d\xc6\xc7\xf2\x6e\x49\x73\xaf\x3a\xd1\x93\x6c\x81\x04\x54\x4f\xe7\xab\x3f\x73\xb8\xa8\x9a\x6e\x98\xe1\x34\x2e\x97\x52\x85\xe0\xa8\x5a\xad\x34\x10\x15\xd0\x64\xd5\x0d\x66\xd4\xb3\xe0\xa7\xff\x74\x40\x5f\x1f\xc7\xa1\x85\xac\x2d\xa3\xca\x15\xbb\xfd\xb3\x14\x7c\x95\xba\xf7\xd2\x4c\x0d\xdd\xef\x49\x65\x55\x9a\xdb\xe9\xfc\xbb\x93\x95\xfb\xbf\x7d\x0a\x30\x35\x59\xdc\xd5\x7b\x1c\xa8\xcc\x56\x99\x4c\x05\x8a\xb8\x6d\x61\x95\xec\xbe\x4e\xc4\x34\xb1\xc7\x07\xea\x30\xc3\x0a\x67\x0b\xf0\x50\x06\x31\xd8\xd2\x82\x42\xbc\x77\x8f\xc6\xbe\x7e\xd2\x40\xd8\xef\x4e\x43\xb5\x39\xf6\x3c\xed\x76\x27\x5d\x64\xf6\x86\x4e\x89\x20\xf6\x96\x12\x2b\xaf\x6d\x20\xad\x80\x8c\x39\x23\x84\xd4\x40\x2f\x97\xfd\x63\xd7\x77\xe3\x41\x73\x24\x2e\x9b\x17\x04\x9e\xdd\xe2\xe1\x55\xe3\x4d\x59\x46\x20\x4e\x66\xf9\xfd\xcb\x0f\x14\x51\x03\xb1\xf3\xba\xb3\x26\x4a\x3c\xf2\x52\x56\x86\x79\x06\x8c\xa7\x6d\x38\xac\x7d\x04\x08\x93\x83\x53\x85\x4a\x84\x08\xf3\x01\x76\x00\x46\x3c\x83\x51\x84\x53\xae\x75\x8e\x08\x79\x5c\x7b\x48\x95\x2f\x1f\x59\x75\x2b\x58\xb7\xa4\xee\x19\xe9\x83\x2d\x0c\xc1\x89\x0b\x79\x2d\x3b\x3e\xb1\xf3\xcc\xca\xdc\x94\x14\x56\x4c\xc5\x9b\x61\xe1\x25\x64\x3d\x6e\xfe\x6e\x38\x88\x8c\x0b\x27\x95\x71\x43\x39\x98\x07\xaf\x28\xb3\xa2\x2f\xf4\xe7\xa3\x3f\x0c\x2c\x3f\x0c\x31\x7a\x22\xc9\xd2\x1b\x92\x37\xa8\xc6\x04\xa5\x2f\xcf\xbf\x20\xcd\xc0\xd0\x75\x10\x2f\x79\xc1\x6a\x6c\x9e\x19\x55\xec\xdf\x24\x80\xf8\x23\xa0\xad\x1a\x67\xa1\x90\xb1\x7c\x9b\x97\xd7\x28\x13\x8d\x8a\xbc\xe7\x6f\x39\x86\xff\x1d\x63\xf1\x03\x6a\x00\xbd\xf2\xfa\x5b\x0e\x83\xa5\xf8\x97\xd2\xad\x55\x93\x37\x4c\x61\xd5\xb7\x70\xa8\x01\x65\xb9\x4e\x93\xa9\xa3\xc9\x1c\x69\x77\x95\x52\xc4\x48\xab\x68\x28\x05\xb5\xf0\x46\xf5\x83\x4e\x59\x87\x5b\x52\x45\xbb\xb1\x77\xeb\xb5\xd5\xba\xea\x42\x7b\x70\x93\x84\xbc\x26\x8d\xe1\x68\x89\x6f\xa1\x17\x41\xf5\x6f\xf5\x8e\xf3\xd1\x93\x2c\x41\xcc\x08\xda\x7b\x3c\xb3\x53\x42\x95\xdb\x05\xf6\x00\xe6\x2c\x2f\x34\x8e\xc1\xe6\x3e\x92\xb9\x99\x60\xf7\xc9\xde\xc7\xe2\x65\x78\xae\x6d\xa0\x2d\x1e\xcb\x50\xab\x27\x9f\x2c\x7d\x1e\xe6\xa0\x26\x43\xbc\xef\x34\xc9\x70\x8f\x3a\xbc\x2d\xaf\x51\xb2\x49\x98\x2c\x28\xf9\xc8\x43\x34\xf3\x89\x5a\xd4\x5d\xf6\xd9\xbc\xf5\x74\x57\xd3\x15\xa8\x46\xd9\xf9\xd0\xa5\x61\xa7\xca\x29\x77\x7d\xa9\xab\x8c\x68\xc3\x15\xee\x68\x31\x70\x98\x43\xde\x78\x5d\x0c\xae\xd6\xfb\xbd\xef\x3e\xdf\x80\xf1\x7c\x1d\xe4\x07\xba\x3e\x0d\x9c\x28\x5c\x21\xec\xa2\x66\x57\x45\xdb\x8d\xf1\x49\x43\x78\x47\xc3\xc0\x1f\xe2\x55\x6c\x31\xfe\xf6\xb1\x32\xd9\x4a\x7b\x42\x0f\x46\x4e\x99\x25\xca\xe7\x6e\xda\xea\xe5\x74\x8c\xd9\x37\xfc\x44\xfe\xa0\x5e\x8f\x41\x22\x4c\xe2\x21\xe8\x24\xc7\xa8\xba\xd9\x5b\x88\xe8\x19\xeb\xfc\xbf\x63\xd8\x11\xa6\xe5\x3c\x94\x1a\x53\x99\x5d\x5b\x8e\x02\x18\x4a\x2b\x7b\x31\xe2\x01\x95\x7f\x51\xea\x0f\xc8\xee\x2a\xa0\xeb\x60\x59\x71\x6d\xb2\x2a\x33\x56\x5b\x70\xb4\xd7\x6a\x1a\x60\xed\x4f\x4b\xa5\x73\x07\x65\x02\xe8\x9b\xb3\x80\x98\x2c\x46\x18\xd0\x2a\xa6\x9f\xfe\x22\xf7\xc2\x54\x7d\xa7\x6b\x28\x83\x9f\xec\x3c\xe5\x96\xa8\x58\x23\xc9\x71\xa9\x32\x2e\x60\xac\x69\x00\xd6\x4e\xe5\xc5\x1c\x15\x68\xa6\x9c\xfe\x8c\xdd\x8e\xca\x2c\xf8\x5f\x22\x53\xcb\x23\x1f\x1d\x24\x0e\x64\xc9\xe6\x39\x3d\x52\x74\x15\x50\xad\x28\xa8\x8d\x49\x97\x7f\xab\x44\x07\xa4\x3d\x45\xdd\xa8\x96\x05\x68\xc0\x04\x2c\x1a\xf6\x14\x4b\xd4\x8d\x8d\xd2\xe2\xce\x75\x5f\x26\xb3\x33\x00\x64\x56\xb9\xf2\x7f\x51\x1d\x3a\x11\x3e\xa0\x2a\x98\x5b\x55\x20\xf6\x77\x1d\x27\xfb\xe9\xe5\x56\x81\x7d\xa6\x0b\xbf\x48\x4e\x88\x0b\xf4\x5a\x3c\x13\xc2\x73\xd4\xfa\x79\x8c\x31\x06\x0a\x1c\x0c\x4d\xf6\xfe\xb6\xe4\xbf\xfa\x74\x4d\xdf\x5b\x13\x31\x25\x26\xb5\xec\x8a\x54\x5c\xba\xf6\x1a\xe1\x7c\x7b\xda\x74\x84\x22\x1b\xbc\x65\xfc\xe7\x5d\x4e\x67\x43\x27\x6f\x66\x07\x30\x30\x3d\x26\x3c\x3f\x34\x24\xfd\x04\x10\x6d\x75\x05\x57\x39\x0f\xee\x32\x7f\x64\x87\xad\xf9\x3b\xb2\xf9\x3a\xe5\x42\xf0\x96\x58\x85\x6e\xe8\x6a\xfd\x62\xa8\xda\x5e\x6f\x50\x81\x4e\x7e\x01\xb8\x32\x61\xe2\x13\x8f\x3c\xda\x7f\x3b\x6e\xac\xb3\x92\x16\x48\x96\x39\xe5\x9d\x00\x01\x49\xdc\x50\x47\x60\x6a\x77\x66\x6c\x9d\xb8\x7f\x86\xe6\xd2\xcb\x3e\xd3\xc9\x97\x70\x2a\xd0\x2d\xeb\xa3\x94\x6d\x96\xb5\xfe\x8e\x97\x75\xc6\xcf\x17\xea\xe9\xf0\x86\x93\xec\xd8\xcb\xeb\xda\x32\xe2\x40\xbc\x61\x98\x87\xfe\x4b\x5a\x1c\xc7\xfc\x82\xe6\x71\x3b\x6a\xa5\xbf\xba\x9e\x60\x28\x3f\x38\xa4\xe8\x97\x03\xec\x53\x50\x0d\x22\x75\x9b\x5f\x08\x78\x3d\xb1\x47\x12\x70\xad\x0a\x59\xcf\xe0\x1e\xec\x00\x69\xed\xdd\x4e\x27\x57\xb7\x23\xdb\x4d\xc1\x51\x7b\x94\x39\xf9\x43\x5d\x23\x06\x91\x50\x74\x85\x1d\x63\x20\x64\xd5\x2f\x0f\x57\x65\x56\x98\x70\xeb\xf0\x2f\x41\x55\x61\xb5\xa1\xfb\x7c\xb9\xf9\x28\x5d\x0b\x29\xdb\x50\x09\x03\xab\xd6\x22\x0e\x03\x41\x13\xe9\x17\x5c\x7e\x00\x0b\x8f\x79\x84\xb3\xbb\xc6\x3e\x2a\xce\xd3\xc6\xb3\xd4\x6f\x92\x24\xbc\xe1\xd7\xe1\xe3\x34\x4d\x9c\xea\x5f\xee\x6b\x4b\x30\x21\x89\x77\x27\x34\xe4\x2c\x8d\x32\x28\x3f\xe6\x3a\x76\x91\x9c\xe6\x1b\x34\x0c\xf5\x72\x29\x82\xe0\xc6\xba\xb4\xc4\x5e\x3a\x37\xbb\x77\x20\x7e\xb5\xdb\x9f\x3f\xe5\x14\x94\x13\x37\x77\xe1\x31\xd0\x68\xf5\xdc\x1d\x6c\xe7\xe7\xab\x5d\xbe\x1d\x74\xbd\x42\x18\x9a\x49\xf9\x42\x65\x32\x98\xe2\xd1\xbd\xb0\xa7\x44\xc5\x29\x65\x77\x86\x54\xe2\x9b\x41\x40\x1c\xf5\xaf\x72\x1a\x88\x12\xfe\x28\x68\x73\xbb\xb0\xcd\x6e\x09\x5f\x91\xd4\xfe\x49\xcb\xba\x7c\x73\xb3\xc5\x82\x5e\x4c\x0d\x1b\x90\xbf\x38\x5d\x8e\xd1\x67\x34\x8f\xf9\x07\x20\x48\xd7\x9d\x03\x73\xe5\xca\xee\xf7\xad\x44\x62\x32\xd2\x42\xc9\xc7\x68\x3e\x3a\xce\xa4\x14\x48\xe2\x47\xee\xa5\xa8\xce\xe4\x4d\x0f\x62\x17\xf3\x06\x5a\x6d\xfb\xde\x22\xec\x3e\x94\x94\xb9\xc0\x00\xb5\xf3\xcf\xf6\x4b\xe7\x5b\x22\x18\x0a\xfa\xfc\xd7\x5c\x8d\xdd\x97\xa7\x64\x68\x94\xe0\xb3\xed\xbb\xc5\x91\xd9\x26\x62\x39\xc3\x1f\x68\x42\x4a\xab\xa9\x41\x46\x00\xad\xf0\xcc\xd5\xe6\xf8\xbf\x0c\x3a\x1b\x1e\xce\x84\x47\x7d\x37\x92\x71\x95\xf4\xf6\xb9\xa7\xe7\xa0\x49\x10\x43\x76\x67\xd7\xbe\x9a\xf2\xe2\x13\xb0\xbe\x26\xfd\xc4\x30\x0b\x3e\xe8\xa3\x13\x10\xde\x39\x86\x28\xad\xd9\xf0\x06\xa7\x96\x0d\x0f\x56\x15\xbd\x59\x26\xca\xed\x7e\x82\x5d\x81\x4c\xb4\xfd\x31\x13\xe4\x62\x87\xa2\x5e\x9a\x91\x3e\x2a\xaa\xd1\x1b\xff\x98\x92\xd2\xf7\xce\xe2\x98\x53\x4b\x1c\x7e\xe3\x3f\x3f\x08\xe6\x79\xf2\x27\x43\x42\x5c\x57\x5b\x34\xe4\x5b\xa7\x45\x18\x40\xf0\xd0\x5f\x17\x3d\x34\x7c\x9c\xfc\x3c\x03\x5a\xdf\xe6\x3c\x3c\xd3\x38\xf7\x6b\xde\x93\xb3\xad\x10\x04\xdf\x7d\xd8\x57\x6e\x41\xf9\xc6\x1f\x6e\xe3\x67\xb9\x9e\x2a\x9c\x26\xe9\xa1\x86\xcd\x0e\xce\x69\x36\xf7\x40\x5e\x6c\x8e\xfb\x7c\x08\x7a\x8a\x39\xf5\x3c\x9a\x3a\x83\x9d\x2d\x05\xab\xd3\xf3\xc9\xe6\x49\x90\x99\x84\x47\x95\xb5\xee\xe2\xe6\x5c\x26\x17\x30\x9f\x0d\x9e\xc7\xb5\xd0\x72\xc1\xba\x8f\x45\x86\xd3\x2b\xc2\xe5\x1b\xf2\xaa\x26\x8d\xbc\xd9\xe7\xd0\xc3\xa0\x4a\x14\x28\x07\xce\x8d\x65\x88\xed\x4d\x12\xb2\x05\xb7\x74\x67\x2f\x93\xe7\x68\xac\xc9\xf6\xc4\x6c\x47\x76\x08\x83\x94\x64\x66\x0f\xae\x0f\xf5\x68\x7c\xe2\xca\xeb\xcb\x34\xab\xd0\xfa\xf5\x01\x85\x14\x1a\xec\x34\x8f\xeb\x66\x9d\x92\xe5\x28\x43\x02\x4a\x39\xfa\x4f\x10\x18\x27\xb0\xdf\xc1\x68\xe4\x9c\x34\xf4\xc8\x05\xf6\xfd\xd3\x58\x39\x48\xaa\xcf\x1b\x96\x24\x45\x25\xe8\x8e\x93\xc7\x5c\x8a\x7f\x66\x05\x4e\x58\xf8\xcd\x2d\x4e\x59\xe0\xcf\x47\xfe\x5d\x95\x9e\x50\xe8\xed\xfc\xbc\xf5\x99\x1a\x19\x82\x17\xec\x47\x74\x56\x98\xcc\x93\xc6\x20\x35\x1a\xc5\x34\xf7\xbb\xf6\x86\x99\xdd\x96\x16\x68\x4a\x68\x24\x57\x8e\xbf\xf6\x9d\x1d\x7b\xa8\x63\x97\x90\x27\xbc\x29\x22\x16\xba\x36\xb0\x79\x80\xdc\xae\x14\x53\x67\x67\xd9\x5a\xef\xac\x4d\x89\x7a\xa7\x7e\x93\x57\x9a\x31\x39\x4b\xb0\xa0\xb6\x36\x96\x85\x55\x47\x38\xd5\xab\x8a\xa2\x35\xfc\x00\x2e\x96\x6e\xe7\x14\x36\x59\x83\x0c\xc3\xa8\x76\xc7\xd9\x0e\x9e\xb8\x61\x2d\xc6\xf9\x3b\x1b\xfc\xde\x4c\x6c\x53\xf8\x25\xb4\xde\xd0\x7a\x53\x42\xbc\x5b\x8d\xf8\x5c\x55\x7d\x52\xbe\xc5\x71\xea\x34\x6e\xdc\xf4\x6e\x68\x78\xa8\x1d\x6a\x5d\x62\x50\x9c\x3c\xe1\xff\x0c\xc8\xff\x64\x29\xfb\xe6\xf0\xc5\xb9\x9c\x2d\xd6\x61\x88\x5a\x3b\xef\xfc\xf8\x01\x4e\xa2\x59\x49\xb4\xb3\xcd\x03\xe9\x7d\x95\x21\xd7\x40\x4b\x1e\x92\x10\x6b\x62\xda\x03\x5f\xbf\xe7\x85\x9e\xe7\x0f\x48\xe3\xf3\xe5\x39\xbb\xbe\x23\x47\xa9\x27\x1e\xcb\xbf\x8b\x27\x72\xce\x31\xb2\xc0\xb8\xd4\xa8\xf1\xbd\xbb\x98\xc9\x87\xb6\xef\x3b\xf8\xc0\x90\x65\x7d\xf4\xbc\xb9\x1f\xcf\x2a\xd0\x6b\x2e\x38\x57\xca\x9d\x9d\x66\x21\xff\xc8\x69\x43\x0c\xa6\x98\xa7\x90\xfb\x40\x16\xbf\x66\x97\x88\xfc\x77\xad\x26\x78\xc4\xdd\x23\x04\x34\xab\x2c\xc4\x33\x4f\x64\xde\xd3\xd4\x92\xa6\x08\x62\x73\x4f\x30\x4e\x17\xb2\x8d\x3c\xa3\x6c\x48\x04\x97\x7a\x20\x3a\xd1\x99\xa6\x8c\x48\x89\x53\xb4\x9d\xb5\x0f\xbe\x5e\x64\xdd\x19\x15\x3a\x7f\x90\x20\x6e\x85\x4d\x42\xe2\xca\xb0\xd1\x2a\xbc\x82\xd5\x35\xde\xeb\xf4\x3c\x57\xfd\x79\x3e\xb6\x26\x13\x0a\x53\x19\x9d\xcb\x82\x85\x5f\x20\x89\xf0\x39\x68\xd1\x68\xb4\xe8\x47\x42\x8d\xec\x3f\x11\xf2\xed\x69\xfa\xc7\xdd\x49\x06\x59\x6e\x29\x6b\x3c\x67\x72\x47\xff\xdb\x35\x3a\xec\x09\x36\x6a\x31\x17\xf4\x28\xe4\x27\x31\x06\xbb\x5b\x1c\x46\x89\x45\x33\xf3\x3d\x7d\xf2\x3f\xa0\x14\x0f\x85\xfe\x7a\xfc\x35\x38\x82\xd9\x05\x8e\x19\xd0\xac\xbc\x40\x4d\x2a\x2f\xbf\xc8\xac\x61\x9f\xd8\x0c\xcc\xad\x52\xe1\xad\xdb\x3e\x8d\x78\x66\x5d\x7f\xb1\xd7\x20\xf9\xdc\x97\xd9\x15\x17\xfb\x56\x84\xcf\xa1\xd7\xf3\x46\x98\x3d\x2a\xe6\xb8\x76\x24\x6b\x10\x72\xcc\x2e\x5a\x75\x5b\xb0\x54\x1e\x64\x74\x43\x82\xc1\x57\xed\x18\x7d\x0b\x5f\x47\xf1\x67\xbd\x28\xcf\xd4\x67\xd1\x96\xea\xe9\xea\x69\xea\xf6\xc3\x1c\x1c\xec\x19\xa8\x24\xa6\x91\xcc\x21\x89\x8b\xaa\xdd\xf8\x97\x5c\xd7\x26\x5d\xbe\xb7\x1a\x6b\x4e\xe2\x06\xef\x60\xb5\x82\x70\x26\x17\x08\x5f\x11\x0a\x14\x88\x87\xc6\x7a\x59\x56\xcf\xa6\xce\x2c\x67\xc2\x14\x85\xb8\xc3\xd2\x26\x3f\xfe\x63\x9a\xc7\x9c\x2e\xc4\x4a\xcc\xa5\x0a\xa6\x08\x8b\x07\x70\x25\xc0\x7b\x31\x22\x09\x2e\x9d\xef\x13\x98\x8a\xa5\xf1\x50\xd3\xbb\xb9\x45\xd7\x52\x50\xdb\x43\x7c\x8f\xac\xe2\xc9\x0b\x56\xbb\x5b\xb5\x0e\x54\xd8\x10\xdd\xdc\xc2\xb0\xdc\x84\x81\x63\x32\x8f\x75\x59\x4a\xec\xd1\xf2\x4a\xae\x6d\x4f\x8b\xa6\xed\x4a\x75\xba\xb8\xb8\x5a\x4b\xb6\x43\x03\xc2\xdc\x43\xae\xc6\x60\x88\x09\xa7\x7e\x1c\x91\xc8\x40\x43\x19\x12\x9f\x44\xec\x68\xb0\x80\xd1\x78\x4b\x85\x80\x04\x53\x1c\xce\x3c\x28\xad\x5e\xe7\xd2\x0b\xd1\x1e\xf1\xba\x66\x23\x58\xdd\xbd\x51\x8c\x99\xd4\x3d\x08\x54\x46\x36\x81\x06\x5e\x7e\x66\x97\x3f\x3b\x42\xb5\x9f\xb2\x61\xe6\x07\x08\x2c\xcc\x49\x27\x4b\xa6\xaa\x70\xd6\x64\x3f\x7a\x4c\xa3\x88\xb6\x63\x19\x17\x9e\x69\x7e\x0f\x38\x5b\xb9\x71\xb9\xa0\xe6\x49\x87\x95\x30\x66\x69\xf6\x6d\x81\xc9\x9c\xc8\x60\xb7\x8f\x39\xa6\x7c\xb4\xe6\xdc\xe3\xe1\x5a\xf8\x2b\xa5\x0b\xc1\xb6\x2d\x19\xe5\x3b\x40\xfc\x49\x74\x02\xeb\x71\x6c\x2f\x3f\x94\xee\x91\x22\x76\xf3\xc5\xcb\x00\x80\x87\x73\x31\xa9\xae\x5c\xc8\xe1\xde\xb3\xb5\x7f\x83\x95\xde\x70\x33\x4a\x8f\x09\x38\x35\x81\x8c\x3e\x00\x5d\xac\x66\x2a\x80\xbd\x22\x6e\xd4\xe4\x8a\xb9\xfd\xe1\xff\x1f\xfb\x1d\xf5\x33\x09\x55\x6a\xdd\x35\xa4\x21\x4c\x1c\xdc\x5f\x26\x5e\xea\x5a\x88\x40\x1b\xe9\x60\x7a\xa0\xea\x36\x25\x03\xe2\x64\xd1\x72\x9c\xda\x8b\x67\xc2\x73\x6c\x92\x20\x72\xdd\xae\x14\xc6\x72\xa4\x06\xec\xd4\x05\xee\xe8\x51\xfe\xcd\x82\x54\x01\x0f\x6e\xff\x5d\x4f\xca\xef\xf7\xf3\x40\x79\x1c\xa4\x31\x7a\xb0\x75\x95\xca\xa1\x1a\x73\xc5\x13\xa4\xf9\x24\x1f\xdb\x58\xbc\x94\x9f\xa3\xf4\x3e\x1c\xca\xc1\x50\xc1\x3e\xb4\xf4\x11\xef\x7b\x33\x92\x2d\xad\x2e\x0c\x33\xfa\x85\x79\x73\xb0\x8d\x24\x26\x17\x3d\x8c\x76\x25\xb8\x6b\xd2\xec\x30\x80\xcd\xc7\xed\x01\x8e\xbc\xdd\xad\x90\x5a\xcc\x51\x25\xcc\xd3\xf5\x47\xd3\x89\x52\xf3\x72\x13\x5f\x68\x88\xa8\xdc\x5f\xba\x70\x04\x9f\x9f\x0f\x3d\x3a\x3e\x31\xf6\xcd\x21\xc2\x6c\x9e\x98\x5c\x53\xbd\xcc\x54\xcf\x46\xa7\xe1\x55\x68\x21\x5a\xc8\x00\x35\xc3\xa4\x1c\x2b\x8f\x80\xab\xbd\x22\x20\x80\x13\x6f\xc6\x64\x64\x4c\xef\x51\xe3\xf4\xcd\x5f\xd8\x83\x31\x51\x4f\xa9\x6f\x34\xfa\x64\xa2\x48\x4a\xfd\xf7\x89\x44\x97\x3d\x4b\xd3\x89\xa8\xab\x2e\xb8\xe3\x18\xbe\x74\xb3\xf0\x61\xb7\xb0\x77\xde\x65\xf1\x1f\x73\x2f\x7c\x7a\xa5\x1e\x32\xb2\x23\x81\x1d\x65\xd2\x12\x1f\x59\xc6\x37\x4a\xb5\x4d\xe7\x15\xd5\xfc\x06\x3f\xcd\x65\xa6\x58\xf5\x06\x4e\xac\x38\x40\x95\xe7\x27\x1e\x79\x58\x97\x05\xb2\xbc\xff\x77\xdf\x6f\xfd\xfa\xcc\x8a\xc0\x10\x42\x12\xcc\x7c\x19\xb6\x73\x3a\x5e\x5b\x27\xb5\x8f\xb2\x72\x8d\x20\xe4\xdd\x29\xf9\xa4\x98\x9e\x14\xc6\x6b\xdd\xdb\xaf\x84\xa9\x42\x1b\xac\x45\x6c\xee\x33\x37\x88\xda\x17\xbe\x38\xaf\x4f\x09\xfe\x2d\x16\x93\xc1\xd9\x24\xed\xf5\xcd\x85\xe2\xdf\x19\x3d\xd2\x43\x94\x47\x4e\x00\x5c\x3d\x76\x18\x4b\x2c\xa4\xdd\x24\x52\xea\x0f\x47\x88\xad\xb3\x6d\x3f\xf0\x78\x3a\xd1\xe3\x99\xaa\xee\x75\x75\x3a\x50\xe5\xe1\x11\xd6\x7c\x0b\x54\x3d\x94\xed\x70\x42\x0e\x57\x8c\xf2\x82\xf4\xf9\x5d\x73\x49\xce\x36\x47\x51\x9d\x55\x36\xbd\x99\x98\x45\xc7\x97\x1b\xf4\x9b\x43\x0f\x2b\xf3\xce\x21\xcb\x84\xb6\x0e\x2c\x93\x16\x35\xdb\x6c\x7e\xcd\x4e\xfb\xed\x28\x90\x56\x36\x73\x8a\x53\xc9\xb8\xe9\xef\x82\x1d\x11\xf8\x19\xb9\x0a\x36\x3c\xe4\x0b\xec\x10\xfc\x9a\xf6\x69\xe1\xa8\xee\xb3\xf4\xa4\xec\xf3\xea\x37\x3a\xdc\x6f\x05\xeb\x96\x45\x6c\x42\x45\x15\xeb\x41\x37\x0a\x02\xec\x92\x8b\xde\x3c\x6f\x0a\x09\x9e\xf2\xbf\x90\xa9\x69\xce\x88\xe4\x6d\x6f\xde\x4d\x12\x87\x94\xff\x68\xc6\x36\x29\x99\x2a\x51\x40\xde\x7f\x4b\x7a\x0a\x61\xa1\x83\x60\x19\x35\x71\xf5\x93\x4a\xf3\xc9\x6d\x7e\x5f\x36\x01\x9c\xa8\x25\x93\xf4\xc5\x09\x48\x50\xaf\x38\xc2\xfc\x6c\x49\x6b\x67\xc0\x99\x45\x93\x06\x9d\x6d\xf2\x20\x48\xf1\x62\x4f\xbe\x42\xb4\x9d\x6c\xf4\x7b\x73\xf5\x48\xf0\x13\x8a\x5b\x81\x33\x97\xec\x48\x02\x6a\xbe\x2a\xe8\x8c\xd8\x3d\x2f\x29\xac\xf1\x1f\xd3\x83\xa1\x96\x8c\x10\x73\xad\x2f\xc4\xc2\x0c\x19\x96\x00\x22\x33\x4b\xb1\x61\x93\x6b\x1e\xc7\xed\x5e\x84\x7d\x0b\x67\x82\xd3\x54\x8d\x9c\xfb\x12\x11\xce\x63\x2e\x07\x03\x8c\x53\xa0\xa8\x2e\x98\x7c\x94\x81\x0b\x80\xaf\x44\xc1\x08\x9f\xec\x65\x56\x22\x17\x9b\x39\x10\x16\xb5\x6f\x0e\xde\x58\xb7\x9d\x38\x35\xcc\xb1\xf2\xdf\xa8\x5b\xa0\x2e\x6e\x83\x13\x77\xb5\xd5\xb6\x01\xf8\xa6\x42\xaa\xad\xb7\x8d\x02\x33\x23\xed\xc2\x9d\x7d\x08\x35\x36\x4f\xad\xd3\x1b\x23\xdc\xd6\x52\x2d\x76\x52\xc6\x11\x4c\xdb\x41\x59\x5e\x44\x1a\x9b\x58\x72\x43\x39\x35\x27\x5a\x0a\x64\x4a\xec\x20\x88\xcf\xff\xd6\x36\xe2\xc3\x18\x5d\x23\x92\x5c\x62\xa6\x0b\x03\x4b\xd7\x52\xd2\xaa\xd9\x51\x75\x9f\xa1\x40\x6e\x2a\x0b\x91\x65\x7f\x96\x06\x30\x12\x4c\x2a\xd5\x83\x24\x13\x09\xb0\xa3\xd2\xc4\xca\x46\x05\x39\x91\x19\x68\xb8\x61\x3f\x3d\x2f\xa2\xc7\xe3\xff\x03\x0c\x14\xcb\xa1\xe4\x38\x0d\x2b\xfc\xf0\x47\x28\xb4\x8d\x59\x8e\x3f\xe4\x17\x67\xc9\xd6\xd7\x20\xff\x82\xe8\xab\x9b\x92\x6b\x9b\xd8\xba\xcd\xb0\x6c\xc7\x20\x29\x8e\xb0\x05\xf2\x13\xe6\xd8\xf5\x01\x0b\xb0\x57\xd4\x66\x75\xbc\xa1\x09\x84\x7c\x71\x16\xba\x63\xf0\xca\x04\x0b\xe8\x60\x67\x19\x1c\x14\xa6\x02\x84\x97\x9a\x3b\x09\xa8\x37\x65\xa6\xc0\x0e\x16\xd1\x08\xd1\xf4\x10\x1c\xc4\x72\x6d\x9b\x6b\xec\xc8\xa2\xd6\x05\x75\x43\xb1\xca\x8d\xbb\x1f\x0c\x82\x43\x31\x92\x40\x78\x7d\x20\xc3\x9e\x61\xd8\x11\x53\xe1\xfe\x3f\x6b\xac\x94\x67\x64\x32\x80\xd4\x83\x02\x7c\x1c\xa8\xee\x29\x6a\xe4\x16\xfc\x33\xcd\x36\xd5\x5d\x61\x00\x23\xbc\x3a\x06\x32\x5c\xa3\x50\x85\x23\xfa\x59\x3a\x2a\x66\x0b\x64\x3a\x9e\x4f\x21\xd2\xc3\xb8\x50\x2d\xbd\x46\x5c\x64\x35\xdb\xa7\x1f\x48\x99\xba\x8d\x52\x58\xd3\x8d\x24\x51\xbc\x5f\x4e\x92\xac\x23\x75\xa0\x5c\xaa\xd6\x79\x85\x24\x9e\xd0\xb1\x80\x20\x3f\x70\x08\x32\x32\xe1\xfa\xe3\x6f\x8f\xaa\x8b\x07\xd1\x59\x91\xd5\x73\x92\x7a\x43\x11\x41\x77\x42\x99\x62\x8a\x91\x69\x3c\x0e\xce\x07\x31\x47\x53\x2f\x05\x41\x98\x50\x8b\x57\x95\xb1\xae\xfc\xe9\xeb\x62\x37\xfc\xd0\x4b\xb2\x4f\xa7\x02\x61\x86\x3b\x6f\x88\xa6\xab\xd5\xd5\x0a\x41\x48\x1e\xb8\x2b\xab\xad\x1f\x48\x4e\x6e\xf9\x24\x1e\xc0\x48\x2b\xe6\x4f\xe9\xa0\x97\x83\x8b\x2d\x65\x73\xe4\xbe\x1f\xae\xf9\x1b\x4a\x8d\xc3\x81\xed\xfb\x7f\x55\x1f\x8d\xcc\x4c\x12\x7b\x0b\x60\x48\xae\x61\x30\x58\x85\x87\x53\x02\x24\x17\xba\xe4\x62\x11\x13\x39\x10\xa6\x5d\xa6\x02\x02\xdd\x5a\x33\x22\x73\xe9\x29\xcf\x8c\xd6\x4b\x51\x36\x7a\x71\x54\x83\xba\x9d\xe4\xe4\xdf\xcd\xe9\xbb\x07\x31\xb5\x69\x82\xa0\x89\xff\x69\x4d\x0d\x8c\x5e\x8c\x9f\x1c\xd8\xa7\xed\xb8\x9d\xe5\x44\x13\xbf\x4b\xe8\xd1\x22\x66\x90\x9f\x25\xf8\xcd\xf3\xd4\xb4\xb2\x8a\x49\x54\x20\x1c\x48\xf7\x4a\xa0\x0b\xa5\xfe\x78\x00\x39\x83\x35\x1f\xd0\x6d\xbc\x8a\x5a\xa5\xa9\xe1\x55\x9d\x0a\xba\xa4\x3e\x02\x79\x34\xdd\x00\x0f\xe6\xf1\x4e\xac\xec\xaa\xcb\xba\xff\x42\x74\x63\x3e\xab\x84\xf1\x3d\x14\xf7\x1a\xcc\x3b\x0a\xb3\xa9\xa7\xff\x11\xeb\xcf\xfe\x40\x04\x44\x6e\xa7\xc2\xfb\x37\xe1\xfd\xc6\xe8\x03\x84\x51\x10\x80\x23\xa7\x0e\xc8\xb2\x32\xcb\x9f\x5c\xd6\x17\x8e\x2d\x78\xa6\xb1\x77\xf8\x23\x00\x3c\xd6\x4d\xc1\x8c\xf9\x68\x2f\x00\xc9\x55\x67\x40\x8f\xfb\x5b\x6b\x32\x84\xc5\x58\xe1\xaa\x8c\x80\x61\x67\x5b\x8f\xe8\xbb\xab\x62\x51\x98\x3a\x7d\x81\xe7\x31\x7b\x79\xa3\xe9\x0c\xee\xd3\x85\x89\x48\x8f\xb7\x0c\x04\x1d\x0c\x64\x77\x8e\xd0\xa1\xfc\x98\x5c\xaa\x3d\xce\xd2\xd3\xf1\xb8\x7d\x44\x92\x44\x46\x40\x06\x86\x3b\x49\x15\xdb\xe0\x3b\xa4\xdf\x45\xea\x9e\x44\x69\x9e\xc2\xb1\x44\x19\xa8\xbd\x62\xc4\x53\xa6\x14\xec\x0b\x04\x47\x16\xc7\xae\x14\x3e\xfc\x51\x8a\x22\x3d\xfc\xa1\x96\x94\x91\x9c\xad\x59\x12\x73\xbf\x65\x60\x5b\x5f\x9e\x37\x40\x83\xce\xad\xf1\x7e\x14\x7b\x31\xc2\x0c\x4a\x54\xaa\x1c\x92\xe9\x27\xbf\xa5\x23\x96\xbd\xba\x73\x4e\x83\x7d\xef\x91\x23\x7b\x62\x44\x44\xd4\x28\x57\x70\xd3\x26\xac\x46\x47\x0b\x26\xe4\xfa\xbb\x0a\x4b\xf7\xd6\xdc\xca\x1f\x9a\xdf\x56\x0b\x64\x7a\xb3\x7e\x4f\x2a\xdc\x06\xfd\xea\xcb\xd7\xfb\xfd\xe8\xba\x7f\x8a\xf0\x58\xfc\x68\x00\x4e\xa6\x03\xdb\x0a\x35\xf6\xc8\xd2\xdb\x85\xd6\xef\xa7\x75\x23\xa7\x34\xe3\x62\xbb\x9d\xa3\xe2\x05\xa3\x39\x49\x19\x0b\x9d\x44\x41\x00\x9f\x1d\x71\x8b\xff\xe7\xef\xa3\x50\x39\x02\x42\x22\x0f\x02\x83\x37\x67\x81\x73\xba\xb3\x62\x48\x08\x0a\x7d\xda\x64\x6e\x15\xa6\x1b\xf2\x7d\x0a\x99\x13\x6e\x15\xa2\xb9\x52\x87\x87\x90\xec\xd3\x3d\x5c\xbb\xa5\x16\x05\x9a\x5f\x32\xa0\xc0\xad\xd5\xc8\x55\x18\x44\xa3\xa4\x2c\xab\xa4\x98\x7d\x3d\x71\xa5\x6c\x72\x23\x8d\x7e\x66\xb8\xd1\x68\xa0\x4d\xbf\x17\x4f\xd6\x4a\x81\xc4\x9e\x47\x23\xad\x4f\xbc\x8d\x22\xe3\x08\x15\x47\x4e\xc5\x35\x32\x49\xd9\xd8\xd4\x68\x4c\x37\x0e\x04\x57\x94\x71\xec\x3c\xba\x6e\xdb\x9a\xe9\x4a\x62\x25\x8c\x73\x65\x17\x31\xb8\x58\x7c\x60\x0f\xd4\x16\x34\x35\x46\xfd\xc1\x2c\xb2\x72\x6c\x19\xa4\x69\x8e\x82\xa4\xf5\x1e\x72\xa4\x53\x77\xe2\xcf\xac\x9f\xe8\xe2\x1b\x6e\x13\xf0\x3c\x9f\x27\x91\xab\xe0\xa3\x4c\xc5\xe3\x7d\xca\xe0\x1d\xa0\x83\xa3\xe5\x86\x7e\xca\x80\x1b\xb7\xf2\x4d\x97\xc8\xb8\xb3\xbb\x4f\x34\xe5\x47\x80\x12\x3a\xe8\x51\xd6\x42\xe4\xfd\xed\x4d\x67\x62\xd8\x92\x74\xaf\x26\x95\xf5\x71\x00\x86\xc0\xb1\x62\x53\x62\x34\x3a\x5a\x44\xa8\xd0\xef\xfb\xbc\xd0\xd5\x1b\x60\x4f\x3a\xb1\x67\x80\xfa\x00\x0e\xb4\x64\x8a\xd2\x93\xf8\x50\xd3\xad\x1f\x5b\xb6\x87\x41\xd4\xa9\x32\xe4\x18\xb0\x54\x0e\xb7\xf7\xcf\xb1\x45\x34\x3e\x86\x07\x81\x49\xf6\xf1\x94\x3f\x4f\x7c\x69\x4c\x25\x89\x30\xba\x1e\xd3\x09\x64\x8e\x82\x1e\xfc\xed\x69\xec\x9b\x7c\x5e\x37\xd9\xde\xff\x6a\xdb\xd3\x9a\x2b\x5b\x63\xf8\xe9\x66\x7d\x5a\x55\x26\xd7\xb0\x89\x1e\x4d\x13\x0d\x4b\x12\xc3\x64\x5a\x3b\x4b\x47\x61\x41\x0a\x7f\x3a\x95\x69\x1a\x3f\x76\xd4\x79\xe6\x6a\x0c\xaf\xf9\x8f\x0f\xa1\x86\x9c\xc3\xc0\xf3\xd6\x3c\x83\x38\x75\x1f\xab\xc3\x40\xef\xe6\x60\x5f\x42\x78\xce\x1d\x4f\x92\x7c\xa7\xd4\xd1\xdf\x43\x68\x91\xb4\xab\x77\x91\x93\xda\x58\x35\x16\x9b\x94\xdd\x00\x95\xce\x0e\x57\x88\xc8\x1f\xd5\x99\xc4\x6b\xe3', 2)
| 23,599
| 23,599
| 0.749989
| 5,895
| 23,599
| 3.000339
| 0.044275
| 0.006445
| 0.006615
| 0.005428
| 0.002205
| 0.001357
| 0.001357
| 0
| 0
| 0
| 0
| 0.314333
| 0.000127
| 23,599
| 1
| 23,599
| 23,599
| 0.435243
| 0
| 0
| 0
| 0
| 1
| 0.998305
| 0.998305
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5a83f56d991d35556e353df24ee7d82dce508461
| 54,099
|
py
|
Python
|
tests/sedml/test_sedml_exec.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
tests/sedml/test_sedml_exec.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
tests/sedml/test_sedml_exec.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
from biosimulators_utils.config import get_config
from biosimulators_utils.log.data_model import (
Status, CombineArchiveLog, SedDocumentLog, TaskLog, ReportLog)
from biosimulators_utils.log.utils import init_sed_document_log
from biosimulators_utils.plot.data_model import PlotFormat
from biosimulators_utils.report.data_model import VariableResults, DataSetResults, ReportResults, ReportFormat
from biosimulators_utils.report.io import ReportReader
from biosimulators_utils.report.warnings import RepeatDataSetLabelsWarning
from biosimulators_utils.sedml import data_model
from biosimulators_utils.sedml import exec
from biosimulators_utils.sedml import io
from biosimulators_utils.sedml.exceptions import SedmlExecutionError
from biosimulators_utils.sedml.warnings import NoTasksWarning, NoOutputsWarning, InconsistentVariableShapesWarning
from lxml import etree
from unittest import mock
import numpy
import numpy.testing
import os
import shutil
import tempfile
import unittest
class ExecTaskCase(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_successful(self):
doc = data_model.SedDocument()
doc.models.append(data_model.Model(
id='model1',
source='model1.xml',
language='urn:sedml:language:sbml',
))
doc.models.append(data_model.Model(
id='model2',
source='https://models.edu/model1.xml',
language='urn:sedml:language:cellml',
))
doc.simulations.append(data_model.SteadyStateSimulation(
id='ss_sim',
))
doc.simulations.append(data_model.UniformTimeCourseSimulation(
id='time_course_sim',
initial_time=10.,
output_start_time=20.,
output_end_time=30.,
number_of_points=5,
))
doc.tasks.append(data_model.Task(
id='task_1_ss',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.tasks.append(data_model.Task(
id='task_2_time_course',
model=doc.models[1],
simulation=doc.simulations[1],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_2',
variables=[
data_model.Variable(
id='data_gen_2_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_2']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_2_var_2',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_3',
variables=[
data_model.Variable(
id='data_gen_3_var_3',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_3']/@concentration",
task=doc.tasks[1],
),
],
math='data_gen_3_var_3',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_4',
variables=[
data_model.Variable(
id='data_gen_4_var_4',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_4']/@concentration",
task=doc.tasks[1],
),
],
math='data_gen_4_var_4',
))
doc.outputs.append(data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
data_model.DataSet(
id='dataset_2',
label='dataset_2',
data_generator=doc.data_generators[2],
),
],
))
doc.outputs.append(data_model.Report(
id='report_2',
data_sets=[
data_model.DataSet(
id='dataset_3',
label='dataset_3',
data_generator=doc.data_generators[1],
),
data_model.DataSet(
id='dataset_4',
label='dataset_4',
data_generator=doc.data_generators[3],
),
],
))
doc.outputs.append(data_model.Report(
id='report_3',
data_sets=[
data_model.DataSet(
id='dataset_5',
label='dataset_5',
data_generator=doc.data_generators[0],
),
],
))
doc.outputs.append(data_model.Report(
id='report_4',
data_sets=[
data_model.DataSet(
id='dataset_6',
label='dataset_6',
data_generator=doc.data_generators[3],
),
data_model.DataSet(
id='dataset_7',
label='dataset_7',
data_generator=doc.data_generators[3],
),
],
))
filename = os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log):
results = VariableResults()
if task.id == 'task_1_ss':
results[doc.data_generators[0].variables[0].id] = numpy.array((1., 2.))
results[doc.data_generators[1].variables[0].id] = numpy.array((3., 4.))
else:
results[doc.data_generators[2].variables[0].id] = numpy.array((5., 6.))
results[doc.data_generators[3].variables[0].id] = numpy.array((7., 8.))
return results, log
working_dir = os.path.dirname(filename)
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with mock.patch('requests.get', return_value=mock.Mock(raise_for_status=lambda: None, content=b'')):
output_results, _ = exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, report_formats=[ReportFormat.csv], plot_formats=[])
expected_output_results = ReportResults({
doc.outputs[0].id: DataSetResults({
'dataset_1': numpy.array((1., 2.)),
'dataset_2': numpy.array((5., 6.)),
}),
doc.outputs[1].id: DataSetResults({
'dataset_3': numpy.array((3., 4.)),
'dataset_4': numpy.array((7., 8.)),
}),
doc.outputs[2].id: DataSetResults({
'dataset_5': numpy.array((1., 2.)),
}),
doc.outputs[3].id: DataSetResults({
'dataset_6': numpy.array((7., 8.)),
'dataset_7': numpy.array((7., 8.)),
}),
})
self.assertEqual(sorted(output_results.keys()), sorted(expected_output_results.keys()))
for report_id, data_set_results in output_results.items():
self.assertEqual(sorted(output_results[report_id].keys()), sorted(expected_output_results[report_id].keys()))
for data_set_id in data_set_results.keys():
numpy.testing.assert_allclose(
output_results[report_id][data_set_id],
expected_output_results[report_id][data_set_id])
data_set_results = ReportReader().run(doc.outputs[0], out_dir, doc.outputs[0].id, format=ReportFormat.csv)
for data_set in doc.outputs[0].data_sets:
numpy.testing.assert_allclose(
output_results[doc.outputs[0].id][data_set.id],
data_set_results[data_set.id])
data_set_results = ReportReader().run(doc.outputs[1], out_dir, doc.outputs[1].id, format=ReportFormat.csv)
for data_set in doc.outputs[1].data_sets:
numpy.testing.assert_allclose(
output_results[doc.outputs[1].id][data_set.id],
data_set_results[data_set.id])
# save in HDF5 format
doc.models[1].source = doc.models[0].source
io.SedmlSimulationWriter().run(doc, filename)
shutil.rmtree(out_dir)
exec.exec_sed_doc(execute_task, filename, os.path.dirname(filename), out_dir, report_formats=[ReportFormat.h5], plot_formats=[])
data_set_results = ReportReader().run(doc.outputs[0], out_dir, doc.outputs[0].id, format=ReportFormat.h5)
for data_set in doc.outputs[0].data_sets:
numpy.testing.assert_allclose(
output_results[doc.outputs[0].id][data_set.id],
data_set_results[data_set.id])
data_set_results = ReportReader().run(doc.outputs[1], out_dir, doc.outputs[1].id, format=ReportFormat.h5)
for data_set in doc.outputs[1].data_sets:
numpy.testing.assert_allclose(
output_results[doc.outputs[1].id][data_set.id],
data_set_results[data_set.id])
# track execution status
shutil.rmtree(out_dir)
log = SedDocumentLog(
tasks={
'task_1_ss': TaskLog(id='task_1_ss', status=Status.QUEUED),
'task_2_time_course': TaskLog(id='task_2_time_course', status=Status.QUEUED),
},
outputs={
'report_1': ReportLog(id='report_1', status=Status.QUEUED, data_sets={
'dataset_1': Status.QUEUED,
'dataset_2': Status.QUEUED,
}),
'report_2': ReportLog(id='report_2', status=Status.QUEUED, data_sets={
'dataset_3': Status.QUEUED,
'dataset_4': Status.QUEUED,
}),
'report_3': ReportLog(id='report_3', status=Status.QUEUED, data_sets={
'dataset_5': Status.QUEUED,
}),
'report_4': ReportLog(id='report_4', status=Status.QUEUED, data_sets={
'dataset_6': Status.QUEUED,
'dataset_7': Status.QUEUED,
})
},
)
log.parent = CombineArchiveLog(out_dir=out_dir)
log.tasks['task_1_ss'].parent = log
log.tasks['task_2_time_course'].parent = log
log.outputs['report_1'].parent = log
log.outputs['report_2'].parent = log
log.outputs['report_3'].parent = log
log.outputs['report_4'].parent = log
exec.exec_sed_doc(execute_task, filename, os.path.dirname(filename), out_dir, report_formats=[ReportFormat.h5], plot_formats=[],
log=log)
expected_log = {
'location': None,
'status': None,
'exception': None,
'skipReason': None,
'output': None,
'duration': None,
'tasks': [
{
'id': 'task_1_ss',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.tasks['task_1_ss'].output,
'duration': log.tasks['task_1_ss'].duration,
'algorithm': None,
'simulatorDetails': None,
},
{
'id': 'task_2_time_course',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.tasks['task_2_time_course'].output,
'duration': log.tasks['task_2_time_course'].duration,
'algorithm': None,
'simulatorDetails': None,
},
],
'outputs': [
{
'id': 'report_1',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.outputs['report_1'].output,
'duration': log.outputs['report_1'].duration,
'dataSets': [
{'id': 'dataset_1', 'status': 'SUCCEEDED'},
{'id': 'dataset_2', 'status': 'SUCCEEDED'},
],
},
{
'id': 'report_2',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.outputs['report_2'].output,
'duration': log.outputs['report_2'].duration,
'dataSets': [
{'id': 'dataset_3', 'status': 'SUCCEEDED'},
{'id': 'dataset_4', 'status': 'SUCCEEDED'},
],
},
{
'id': 'report_3',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.outputs['report_3'].output,
'duration': log.outputs['report_3'].duration,
'dataSets': [
{'id': 'dataset_5', 'status': 'SUCCEEDED'},
],
},
{
'id': 'report_4',
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.outputs['report_4'].output,
'duration': log.outputs['report_4'].duration,
'dataSets': [
{'id': 'dataset_6', 'status': 'SUCCEEDED'},
{'id': 'dataset_7', 'status': 'SUCCEEDED'},
],
},
],
}
actual = log.to_json()
actual['tasks'].sort(key=lambda task: task['id'])
actual['outputs'].sort(key=lambda output: output['id'])
for output in actual['outputs']:
output['dataSets'].sort(key=lambda dat_set: dat_set['id'])
self.assertEqual(actual, expected_log)
self.assertTrue(os.path.isfile(os.path.join(out_dir, get_config().LOG_PATH)))
def test_with_model_changes(self):
doc = data_model.SedDocument()
model1 = data_model.Model(
id='model1',
source='model1.xml',
language='urn:sedml:language:sbml',
)
doc.models.append(model1)
model2 = data_model.Model(
id='model2',
source='model1.xml',
language='urn:sedml:language:sbml',
changes=[
data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Z']/@id",
new_value="Z2",
)
]
)
doc.models.append(model2)
model3 = data_model.Model(
id='model3',
source='#model2',
language='urn:sedml:language:sbml',
changes=[
data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Z2']/@initialConcentration",
new_value="4.0",
),
data_model.ComputeModelChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Z2']/@initialConcentration",
parameters=[
data_model.Parameter(
id='p',
value=3.1,
),
],
variables=[
data_model.Variable(
id='var_Z2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Z2']/@initialConcentration",
),
],
math='p * var_Z2',
),
],
)
model3.changes[1].variables[0].model = model3
doc.models.append(model3)
model1.changes.append(
data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='X']/@initialConcentration",
new_value="2.5",
)
),
model1.changes.append(
data_model.ComputeModelChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Y']/@initialConcentration",
parameters=[
data_model.Parameter(id='a', value=0.2),
data_model.Parameter(id='b', value=2.0),
],
variables=[
data_model.Variable(
id='y',
model=model1,
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='X']/@initialConcentration",
),
data_model.Variable(
id='z',
model=model3,
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Z2']/@initialConcentration",
),
],
math='a * y + b * z',
),
)
doc.simulations.append(data_model.SteadyStateSimulation(
id='sim1',
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='X']/@initialConcentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_2',
variables=[
data_model.Variable(
id='data_gen_2_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Y']/@initialConcentration",
task=doc.tasks[0],
),
],
math='data_gen_2_var_2',
))
doc.outputs.append(data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
data_model.DataSet(
id='dataset_2',
label='dataset_2',
data_generator=doc.data_generators[1],
),
],
))
filename = os.path.join(self.tmp_dir, 'test.sedml')
working_dir = os.path.dirname(filename)
io.SedmlSimulationWriter().run(doc, filename)
shutil.copyfile(
os.path.join(os.path.dirname(__file__), '..', 'fixtures', 'sbml-three-species.xml'),
os.path.join(working_dir, 'model1.xml'))
def execute_task(task, variables, log):
et = etree.parse(task.model.source)
results = VariableResults()
for variable in variables:
obj_xpath, _, attr = variable.target.rpartition('/@')
obj = et.xpath(obj_xpath, namespaces={'sbml': 'http://www.sbml.org/sbml/level3/version2'})[0]
results[variable.id] = numpy.array((float(obj.get(attr)),))
return results, log
out_dir = os.path.join(self.tmp_dir, 'results')
report_results, _ = exec.exec_sed_doc(execute_task, filename, working_dir, out_dir, apply_xml_model_changes=False)
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[0].id], numpy.array((1., )))
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[1].id], numpy.array((2., )))
report_results, _ = exec.exec_sed_doc(execute_task, filename, working_dir, out_dir, apply_xml_model_changes=True)
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[0].id], numpy.array((2.5, )))
expected_value = 0.2 * 2.5 + 2.0 * 3.1 * 4.0
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[1].id], numpy.array((expected_value)))
def test_warnings(self):
# no tasks
doc = data_model.SedDocument()
filename = os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log):
return VariableResults(), log
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertWarns(NoTasksWarning):
exec.exec_sed_doc(execute_task, filename, os.path.dirname(filename), out_dir)
# no outputs
doc = data_model.SedDocument()
doc.models.append(data_model.Model(
id='model1',
source='model1.xml',
language='urn:sedml:language:sbml',
changes=[
data_model.ModelAttributeChange(
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='X']/@initialConcentration",
new_value="2.0",
),
],
))
doc.simulations.append(data_model.SteadyStateSimulation(
id='sim1',
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.tasks.append(data_model.Task(
id='task2',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='X']/@initialConcentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
))
doc.outputs.append(data_model.Report(id='report_1', data_sets=[
data_model.DataSet(id='data_set_1', label='data_set_1', data_generator=doc.data_generators[0]),
]))
filename = os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log):
if task.id == 'task1':
return VariableResults({'data_gen_1_var_1': numpy.array(1.)}), log
else:
return VariableResults(), log
working_dir = os.path.dirname(filename)
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertWarns(NoOutputsWarning):
exec.exec_sed_doc(execute_task, filename, working_dir, out_dir)
def test_errors(self):
# error: variable not recorded
doc = data_model.SedDocument()
doc.models.append(data_model.Model(
id='model1',
source='model1.xml',
language='urn:sedml:language:sbml',
))
doc.simulations.append(data_model.SteadyStateSimulation(
id='sim1',
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
))
doc.outputs.append(data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
],
))
filename = os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log):
return VariableResults(), log
working_dir = os.path.dirname(filename)
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertRaisesRegex(SedmlExecutionError, 'did not generate the following expected variables'):
exec.exec_sed_doc(execute_task, filename, working_dir, out_dir)
# error: unsupported type of task
doc = data_model.SedDocument()
doc.tasks.append(mock.Mock(
id='task_1_ss',
))
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertRaisesRegex(SedmlExecutionError, 'not supported'):
exec.exec_sed_doc(execute_task, doc, '.', out_dir)
# error: unsupported data generators
doc = data_model.SedDocument()
doc.models.append(data_model.Model(
id='model1',
source='model1.xml',
language='urn:sedml:language:sbml',
))
doc.simulations.append(data_model.SteadyStateSimulation(
id='sim1',
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
data_model.Variable(
id='data_gen_1_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1 * data_gen_1_var_2',
))
doc.outputs.append(data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
],
))
def execute_task(task, variables, log):
results = VariableResults()
results[doc.data_generators[0].variables[0].id] = numpy.array((1.,))
results[doc.data_generators[0].variables[1].id] = numpy.array((1.,))
return results, log
working_dir = self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
# error: inconsistent math
doc.data_generators = [
data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='xx',
),
]
doc.outputs = [
data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
]
)
]
def execute_task(task, variables, log):
results = VariableResults()
results[doc.data_generators[0].variables[0].id] = numpy.array((1.,))
return results, log
working_dir = self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertRaisesRegex(SedmlExecutionError, 'could not be evaluated'):
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
# error: variables have inconsistent shapes
doc.data_generators = [
data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
data_model.Variable(
id='data_gen_1_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_2']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1 * data_gen_1_var_2',
),
]
doc.outputs = [
data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
],
),
]
def execute_task(task, variables, log):
results = VariableResults()
results[doc.data_generators[0].variables[0].id] = numpy.array((1.,))
results[doc.data_generators[0].variables[1].id] = numpy.array((1., 2.))
return results, log
working_dir = self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertWarnsRegex(InconsistentVariableShapesWarning, 'do not have consistent shapes'):
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
# error: data generators have inconsistent shapes
doc.data_generators = [
data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
),
data_model.DataGenerator(
id='data_gen_2',
variables=[
data_model.Variable(
id='data_gen_2_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_2']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_2_var_2',
),
data_model.DataGenerator(
id='data_gen_3',
variables=[
data_model.Variable(
id='data_gen_3_var_3',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_3']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_3_var_3',
),
]
doc.outputs = [
data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_1',
data_generator=doc.data_generators[0],
),
data_model.DataSet(
id='dataset_2',
label='dataset_2',
data_generator=doc.data_generators[1],
),
],
),
]
def execute_task(task, variables, log):
results = VariableResults()
results[doc.data_generators[0].variables[0].id] = numpy.array((1.,))
results[doc.data_generators[1].variables[0].id] = numpy.array((1., 2.))
results[doc.data_generators[2].variables[0].id] = numpy.array(((1., 2., 3.), (4., 5., 6.), (7., 8., 9.)))
return results, log
working_dir = self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results')
with self.assertWarnsRegex(UserWarning, 'do not have consistent shapes'):
report_results, _ = exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[0].id], numpy.array((1.)))
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[1].id], numpy.array((1., 2.)))
doc.outputs[0].data_sets.append(
data_model.DataSet(
id='dataset_3',
label='dataset_3',
data_generator=doc.data_generators[2],
),
)
working_dir = self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir = os.path.join(self.tmp_dir, 'results2')
with self.assertRaisesRegex(SedmlExecutionError, 'Multidimensional reports cannot be exported to CSV'):
with self.assertWarnsRegex(UserWarning, 'do not have consistent shapes'):
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
with self.assertWarnsRegex(UserWarning, 'do not have consistent shapes'):
report_results, _ = exec.exec_sed_doc(execute_task, doc, working_dir, out_dir, report_formats=[ReportFormat.h5])
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[0].id],
numpy.array((1.,)))
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[1].id],
numpy.array((1., 2.)))
numpy.testing.assert_equal(report_results[doc.outputs[0].id][doc.outputs[0].data_sets[2].id],
numpy.array(((1., 2., 3.), (4., 5., 6.), (7., 8., 9.))))
# warning: data set labels are not unique
doc.data_generators=[
data_model.DataGenerator(
id='data_gen_1',
variables=[
data_model.Variable(
id='data_gen_1_var_1',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_1_var_1',
),
data_model.DataGenerator(
id='data_gen_2',
variables=[
data_model.Variable(
id='data_gen_2_var_2',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var_1']/@concentration",
task=doc.tasks[0],
),
],
math='data_gen_2_var_2',
),
]
doc.outputs=[
data_model.Report(
id='report_1',
data_sets=[
data_model.DataSet(
id='dataset_1',
label='dataset_label',
data_generator=doc.data_generators[0],
),
data_model.DataSet(
id='dataset_2',
label='dataset_label',
data_generator=doc.data_generators[1],
),
],
),
]
def execute_task(task, variables, log):
results=VariableResults()
results[doc.data_generators[0].variables[0].id]=numpy.array((1., 2.))
results[doc.data_generators[1].variables[0].id]=numpy.array((2., 3.))
return results, log
working_dir=self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir=os.path.join(self.tmp_dir, 'results')
with self.assertWarnsRegex(RepeatDataSetLabelsWarning, 'should have unique labels'):
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir)
# error: unsupported outputs
doc.outputs=[
mock.Mock(id='unsupported')
]
working_dir=self.tmp_dir
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
log=SedDocumentLog(tasks={}, outputs={})
for task in doc.tasks:
log.tasks[task.id]=TaskLog(parent=log)
for output in doc.outputs:
log.outputs[output.id]=ReportLog(parent=log)
with self.assertRaisesRegex(SedmlExecutionError, 'are not supported'):
exec.exec_sed_doc(execute_task, doc, working_dir, out_dir, log=log)
def test_2d_plot(self):
doc=data_model.SedDocument()
doc.models.append(data_model.Model(
id='model',
source='model1.xml',
language='urn:sedml:language:sbml',
))
doc.simulations.append(data_model.UniformTimeCourseSimulation(
id='sim',
initial_time=0.,
output_start_time=10.,
output_end_time=10.,
number_of_points=10,
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.tasks.append(data_model.Task(
id='task2',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_time',
variables=[
data_model.Variable(
id='time',
symbol=data_model.Symbol.time,
task=doc.tasks[0],
),
],
math='time',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_var',
variables=[
data_model.Variable(
id='var',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var']/@concentration",
task=doc.tasks[1],
),
],
math='var',
))
doc.outputs.append(data_model.Plot2D(
id='plot_2d_1',
curves=[
data_model.Curve(
id='curve1',
x_data_generator=doc.data_generators[0],
y_data_generator=doc.data_generators[0],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
),
data_model.Curve(
id='curve2',
x_data_generator=doc.data_generators[1],
y_data_generator=doc.data_generators[1],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
),
],
))
doc.outputs.append(data_model.Plot2D(
id='plot_2d_2',
curves=[
data_model.Curve(
id='curve3',
x_data_generator=doc.data_generators[1],
y_data_generator=doc.data_generators[1],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
),
],
))
filename=os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log=None):
results=VariableResults()
results[doc.data_generators[0].variables[0].id]=numpy.linspace(0., 10., 10 + 1)
results[doc.data_generators[1].variables[0].id]=2 * results[doc.data_generators[0].variables[0].id]
return results, log
working_dir=os.path.dirname(filename)
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir=os.path.join(self.tmp_dir, 'results')
_, log=exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, plot_formats=[PlotFormat.pdf])
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_1.pdf')))
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_2.pdf')))
self.assertEqual(
log.to_json()['outputs'],
[
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][0]['output'],
'duration': log.to_json()['outputs'][0]['duration'],
'id': 'plot_2d_1',
'curves': [
{'id': 'curve1', 'status': 'SUCCEEDED'},
{'id': 'curve2', 'status': 'SUCCEEDED'},
],
},
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][1]['output'],
'duration': log.to_json()['outputs'][1]['duration'],
'id': 'plot_2d_2',
'curves': [
{'id': 'curve3', 'status': 'SUCCEEDED'},
],
},
]
)
os.remove(os.path.join(out_dir, 'plot_2d_1.pdf'))
os.remove(os.path.join(out_dir, 'plot_2d_2.pdf'))
# error with a curve
doc.data_generators[0].math='time * var'
io.SedmlSimulationWriter().run(doc, filename)
log=init_sed_document_log(doc)
with self.assertRaisesRegex(SedmlExecutionError, "name 'var' is not defined"):
exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, log=log, plot_formats=[PlotFormat.pdf])
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_1.pdf')))
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_2.pdf')))
self.assertEqual(
log.to_json()['outputs'],
[
{
'status': 'FAILED',
'exception': log.to_json()['outputs'][0]['exception'],
'skipReason': None,
'output': log.to_json()['outputs'][0]['output'],
'duration': log.to_json()['outputs'][0]['duration'],
'id': 'plot_2d_1',
'curves': [
{'id': 'curve1', 'status': 'FAILED'},
{'id': 'curve2', 'status': 'SUCCEEDED'},
],
},
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][1]['output'],
'duration': log.to_json()['outputs'][1]['duration'],
'id': 'plot_2d_2',
'curves': [
{'id': 'curve3', 'status': 'SUCCEEDED'},
],
},
]
)
# error with a task
def execute_task(task, variables, log=None):
results=VariableResults()
results[doc.data_generators[0].variables[0].id]=None
results[doc.data_generators[1].variables[0].id]=2 * numpy.linspace(0., 10., 10 + 1)
return results, log
doc.data_generators[0].math='time'
io.SedmlSimulationWriter().run(doc, filename)
log=init_sed_document_log(doc)
with self.assertRaisesRegex(SedmlExecutionError, "Some generators could not be produced:"):
exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, log=log, plot_formats=[PlotFormat.pdf])
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_1.pdf')))
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_2d_2.pdf')))
self.assertEqual(
log.to_json()['outputs'],
[
{
'status': 'FAILED',
'exception': log.to_json()['outputs'][0]['exception'],
'skipReason': None,
'output': log.to_json()['outputs'][0]['output'],
'duration': log.to_json()['outputs'][0]['duration'],
'id': 'plot_2d_1',
'curves': [
{'id': 'curve1', 'status': 'FAILED'},
{'id': 'curve2', 'status': 'SUCCEEDED'},
],
},
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][1]['output'],
'duration': log.to_json()['outputs'][1]['duration'],
'id': 'plot_2d_2',
'curves': [
{'id': 'curve3', 'status': 'SUCCEEDED'},
],
},
]
)
def test_3d_plot(self):
doc=data_model.SedDocument()
doc.models.append(data_model.Model(
id='model',
source='model1.xml',
language='urn:sedml:language:sbml',
))
doc.simulations.append(data_model.UniformTimeCourseSimulation(
id='sim',
initial_time=0.,
output_start_time=10.,
output_end_time=10.,
number_of_points=10,
))
doc.tasks.append(data_model.Task(
id='task1',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.tasks.append(data_model.Task(
id='task2',
model=doc.models[0],
simulation=doc.simulations[0],
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_time',
variables=[
data_model.Variable(
id='time',
symbol=data_model.Symbol.time,
task=doc.tasks[0],
),
],
math='time',
))
doc.data_generators.append(data_model.DataGenerator(
id='data_gen_var',
variables=[
data_model.Variable(
id='var',
target="/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:speces[@id='var']/@concentration",
task=doc.tasks[1],
),
],
math='var',
))
doc.outputs.append(data_model.Plot3D(
id='plot_3d_1',
surfaces=[
data_model.Surface(
id='surface1',
x_data_generator=doc.data_generators[0],
y_data_generator=doc.data_generators[0],
z_data_generator=doc.data_generators[0],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
z_scale=data_model.AxisScale.linear,
),
data_model.Surface(
id='surface2',
x_data_generator=doc.data_generators[1],
y_data_generator=doc.data_generators[1],
z_data_generator=doc.data_generators[1],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
z_scale=data_model.AxisScale.linear,
),
],
))
doc.outputs.append(data_model.Plot3D(
id='plot_3d_2',
surfaces=[
data_model.Surface(
id='surface3',
x_data_generator=doc.data_generators[1],
y_data_generator=doc.data_generators[1],
z_data_generator=doc.data_generators[1],
x_scale=data_model.AxisScale.linear,
y_scale=data_model.AxisScale.linear,
z_scale=data_model.AxisScale.linear,
),
],
))
filename=os.path.join(self.tmp_dir, 'test.sedml')
io.SedmlSimulationWriter().run(doc, filename)
def execute_task(task, variables, log=None):
results=VariableResults()
x=numpy.arange(-5, 5, 0.25)
x, _=numpy.meshgrid(x, x)
results[doc.data_generators[0].variables[0].id]=x
results[doc.data_generators[1].variables[0].id]=x
return results, log
working_dir=os.path.dirname(filename)
with open(os.path.join(working_dir, doc.models[0].source), 'w'):
pass
out_dir=os.path.join(self.tmp_dir, 'results')
_, log=exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, plot_formats=[PlotFormat.pdf])
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_3d_1.pdf')))
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_3d_2.pdf')))
self.assertEqual(
log.to_json()['outputs'],
[
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][0]['output'],
'duration': log.to_json()['outputs'][0]['duration'],
'id': 'plot_3d_1',
'surfaces': [
{'id': 'surface1', 'status': 'SUCCEEDED'},
{'id': 'surface2', 'status': 'SUCCEEDED'},
],
},
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][1]['output'],
'duration': log.to_json()['outputs'][1]['duration'],
'id': 'plot_3d_2',
'surfaces': [
{'id': 'surface3', 'status': 'SUCCEEDED'},
],
},
]
)
os.remove(os.path.join(out_dir, 'plot_3d_1.pdf'))
os.remove(os.path.join(out_dir, 'plot_3d_2.pdf'))
# error with a surface
doc.data_generators[0].math='time * var'
io.SedmlSimulationWriter().run(doc, filename)
log=init_sed_document_log(doc)
with self.assertRaisesRegex(SedmlExecutionError, "name 'var' is not defined"):
exec.exec_sed_doc(execute_task, filename, working_dir,
out_dir, log=log, plot_formats=[PlotFormat.pdf])
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_3d_1.pdf')))
self.assertTrue(os.path.isfile(os.path.join(out_dir, 'plot_3d_2.pdf')))
self.assertEqual(
log.to_json()['outputs'],
[
{
'status': 'FAILED',
'exception': log.to_json()['outputs'][0]['exception'],
'skipReason': None,
'output': log.to_json()['outputs'][0]['output'],
'duration': log.to_json()['outputs'][0]['duration'],
'id': 'plot_3d_1',
'surfaces': [
{'id': 'surface1', 'status': 'FAILED'},
{'id': 'surface2', 'status': 'SUCCEEDED'},
],
},
{
'status': 'SUCCEEDED',
'exception': None,
'skipReason': None,
'output': log.to_json()['outputs'][1]['output'],
'duration': log.to_json()['outputs'][1]['duration'],
'id': 'plot_3d_2',
'surfaces': [
{'id': 'surface3', 'status': 'SUCCEEDED'},
],
},
]
)
| 38.422585
| 136
| 0.500878
| 5,453
| 54,099
| 4.758115
| 0.052081
| 0.053419
| 0.049141
| 0.026208
| 0.838048
| 0.796809
| 0.765436
| 0.751407
| 0.738996
| 0.726933
| 0
| 0.020406
| 0.372243
| 54,099
| 1,407
| 137
| 38.449893
| 0.743588
| 0.007357
| 0
| 0.731511
| 0
| 0.023312
| 0.142721
| 0.048505
| 0
| 0
| 0
| 0
| 0.038585
| 1
| 0.016881
| false
| 0.009646
| 0.016077
| 0.001608
| 0.045016
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce73ba83e383c924f4adc7f4435b7b66d09e37e2
| 184
|
py
|
Python
|
pypy/translator/jvm/test/test_extreme.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/jvm/test/test_extreme.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/translator/jvm/test/test_extreme.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
from pypy.translator.jvm.test.runtest import JvmTest
from pypy.translator.oosupport.test_template.extreme import BaseTestExtreme
class TestExtreme(BaseTestExtreme, JvmTest):
pass
| 30.666667
| 75
| 0.842391
| 22
| 184
| 7
| 0.681818
| 0.103896
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092391
| 184
| 5
| 76
| 36.8
| 0.922156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
ceb8f9b08dd0a48099aae6a96157fbc3fa8c4cbf
| 5,257
|
py
|
Python
|
Game.py
|
doublehelix91/Multi-Agent-Alternating-Turn-Board-and-Card-Games
|
294800019b608fd21c652645b6c6ce557d9e52a4
|
[
"MIT"
] | 4
|
2019-11-11T15:16:06.000Z
|
2021-03-08T10:05:43.000Z
|
Game.py
|
doublehelix91/Multi-Agent-Alternating-Turn-Board-and-Card-Games
|
294800019b608fd21c652645b6c6ce557d9e52a4
|
[
"MIT"
] | null | null | null |
Game.py
|
doublehelix91/Multi-Agent-Alternating-Turn-Board-and-Card-Games
|
294800019b608fd21c652645b6c6ce557d9e52a4
|
[
"MIT"
] | 1
|
2020-01-15T23:41:53.000Z
|
2020-01-15T23:41:53.000Z
|
"""
@author: Nihal & Adi
"""
from sequence import simpleSequence
from Qlearn import Qlearn
from minimaxQ import miniMAX
import random
import time
def M_M(player, player2):
M1 = miniMAX(player)
M2 = miniMAX(player2)
s = simpleSequence()
s.printTitle("Minimax Q vs Minimax Q")
s.print_board()
s.printHand()
while not s.complete():
while True:
if player == s.player:
state = str(s.board_string()) + '-' + s.getHand(player)
#print(state)
card, location = M1.play(state)
if not s.make_move(card,location):
print("Invalid Move!! Try Again!!")
else:
break
else:
state = s.board_string() + '-' + s.getHand(player2)
#print(state)
card, location = M2.play(state)
if not s.make_move(card, location):
print("Invalid Move!! Try Again!!")
else:
break
s.printHand()
s.switch_player()
def Q_Q(player, player2):
Q1 = Qlearn(player)
Q2 = Qlearn(player2)
s = simpleSequence()
s.printTitle("Q Learning vs Q Learning")
s.print_board()
s.printHand()
while not s.complete():
while True:
if player == s.player:
state = str(s.board_string()) + '-' + s.getHand(player)
#print(state)
card, location = Q1.play(state)
if not s.make_move(card,location):
print("Invalid Move!! Try Again!!")
else:
break
else:
state = s.board_string() + '-' + s.getHand(player2)
#print(state)
card, location = Q2.play(state)
if not s.make_move(card, location):
print("Invalid Move!! Try Again!!")
else:
break
s.printHand()
s.switch_player()
def Q_M(player, player2):
Q1 = Qlearn(player)
M2 = miniMAX(player2)
s = simpleSequence()
time.sleep(10)
s.printTitle("Minimax Q vs Q Learning")
s.print_board()
s.printHand()
while not s.complete():
while True:
if player == s.player:
state = str(s.board_string()) + '-' + s.getHand(player)
#print(state)
card, location = Q1.play(state)
if not s.make_move(card,location):
print("Invalid Move!! Try Again!!")
else:
break
else:
state = s.board_string() + '-' + s.getHand(player2)
#print(state)
card, location = M2.play(state)
if not s.make_move(card, location):
print("Invalid Move!! Try Again!!")
else:
break
s.printHand()
s.switch_player()
def Q_R(player):
Q1 = Qlearn(player)
s = simpleSequence()
s.printTitle("Q Learning vs Random")
s.print_board()
s.printHand()
while not s.complete():
while True:
if player == s.player:
state = str(s.board_string()) + '-' + s.getHand(player)
#print(state)
card, location = Q1.play(state)
if not s.make_move(card,location):
print("Invalid Move!! Try Again!!")
else:
break
else:
#state = s.board_string() + '-' + s.getHand(player2)
#print(state)
card = random.choice(s.available_moves())
#print(card.suit)
while (card.value=='Jack'):
card = random.choice(s.available_moves())
location=s.find_card_location(card)
if not s.make_move(card, location):
print("Invalid Move!! Try Again!!")
else:
break
s.printHand()
s.switch_player()
def M_R(player):
M1 = miniMAX(player)
s = simpleSequence()
s.printTitle("Minimax Q vs Random")
s.print_board()
s.printHand()
while not s.complete():
while True:
if player == s.player:
state = str(s.board_string()) + '-' + s.getHand(player)
#print(state)
card, location = M1.play(state)
if not s.make_move(card,location):
print("Invalid Move!! Try Again!!")
else:
break
else:
#state = s.board_string() + '-' + s.getHand(player2)
#print(state)
card = random.choice(s.available_moves())
#print(card.suit)
while (card.value=='Jack'):
card = random.choice(s.available_moves())
location=s.find_card_location(card)
if not s.make_move(card, location):
print("Invalid Move!! Try Again!!")
else:
break
s.printHand()
s.switch_player()
Q_M('B','G')
Q_Q('B','G')
Q_R('B')
M_R('B')
M_M('B', 'G')
| 31.860606
| 71
| 0.475556
| 560
| 5,257
| 4.378571
| 0.107143
| 0.097879
| 0.04894
| 0.053018
| 0.909054
| 0.87398
| 0.849918
| 0.792822
| 0.792822
| 0.792822
| 0
| 0.00925
| 0.403652
| 5,257
| 164
| 72
| 32.054878
| 0.772887
| 0.052311
| 0
| 0.833333
| 0
| 0
| 0.079016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036232
| false
| 0
| 0.036232
| 0
| 0.072464
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cebfd7aca8a6225c86e48f1d8298d94aeb554fb3
| 11,284
|
py
|
Python
|
python/ql/test/query-tests/UselessCode/DuplicateCode/duplicate_test.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 26
|
2020-06-30T03:07:19.000Z
|
2022-03-31T03:57:23.000Z
|
python/ql/test/query-tests/UselessCode/DuplicateCode/duplicate_test.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 2
|
2020-06-30T06:00:59.000Z
|
2021-04-21T19:53:33.000Z
|
python/ql/test/query-tests/UselessCode/DuplicateCode/duplicate_test.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 10
|
2021-03-24T13:09:08.000Z
|
2022-02-10T07:39:30.000Z
|
#Code Duplication
#Exact duplication of function
#Code copied from stdlib, copyright PSF.
#See http://www.python.org/download/releases/2.7/license/
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name)
try:
dis(x1)
except TypeError(msg):
print("Sorry:", msg)
print()
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError(
"don't know how to disassemble %s objects" %
type(x).__name__)
#And duplicate version
def dis2(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name)
try:
dis(x1)
except TypeError(msg):
print("Sorry:", msg)
print()
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError(
"don't know how to disassemble %s objects" %
type(x).__name__)
#Exactly duplicate class
class Popen3:
"""Class representing a child process. Normally, instances are created
internally by the functions popen2() and popen3()."""
sts = -1 # Child not completed yet
def __init__(self, cmd, capturestderr=False, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments
will be passed directly to the program without shell intervention (as
with os.spawnv()). If 'cmd' is a string it will be passed to the shell
(as with os.system()). The 'capturestderr' flag, if true, specifies
that the object should capture standard error output of the child
process. The default is false. If the 'bufsize' parameter is
specified, it specifies the size of the I/O buffers to/from the child
process."""
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
if capturestderr:
os.dup2(errin, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
def __del__(self):
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.sts < 0:
if _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _run_child(self, cmd):
if isinstance(cmd, basestring):
cmd = ['/bin/sh', '-c', cmd]
os.closerange(3, MAXFD)
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
def poll(self, _deadstate=None):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
# pid will be 0 if self.pid hasn't terminated
if pid == self.pid:
self.sts = sts
except os.error:
if _deadstate is not None:
self.sts = _deadstate
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
if self.sts < 0:
pid, sts = os.waitpid(self.pid, 0)
# This used to be a test, but it is believed to be
# always true, so I changed it to an assertion - mvl
assert pid == self.pid
self.sts = sts
return self.sts
class Popen3Again:
"""Class representing a child process. Normally, instances are created
internally by the functions popen2() and popen3()."""
sts = -1 # Child not completed yet
def __init__(self, cmd, capturestderr=False, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments
will be passed directly to the program without shell intervention (as
with os.spawnv()). If 'cmd' is a string it will be passed to the shell
(as with os.system()). The 'capturestderr' flag, if true, specifies
that the object should capture standard error output of the child
process. The default is false. If the 'bufsize' parameter is
specified, it specifies the size of the I/O buffers to/from the child
process."""
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
if capturestderr:
os.dup2(errin, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
def __del__(self):
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.sts < 0:
if _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _run_child(self, cmd):
if isinstance(cmd, basestring):
cmd = ['/bin/sh', '-c', cmd]
os.closerange(3, MAXFD)
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
def poll(self, _deadstate=None):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
# pid will be 0 if self.pid hasn't terminated
if pid == self.pid:
self.sts = sts
except os.error:
if _deadstate is not None:
self.sts = _deadstate
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
if self.sts < 0:
pid, sts = os.waitpid(self.pid, 0)
# This used to be a test, but it is believed to be
# always true, so I changed it to an assertion - mvl
assert pid == self.pid
self.sts = sts
return self.sts
#Duplicate function with identifiers changed
def dis3(y=None):
"""frobnicate classes, methods, functions, or code.
With no argument, frobnicate the last traceback.
"""
if y is None:
distb()
return
if isinstance(y, types.InstanceType):
y = y.__class__
if hasattr(y, 'im_func'):
y = y.im_func
if hasattr(y, 'func_code'):
y = y.func_code
if hasattr(y, '__dict__'):
items = y.__dict__.items()
items.sort()
for name, y1 in items:
if isinstance(y1, _have_code):
print("Disassembly of %s:" % name)
try:
dis(y1)
except TypeError(msg):
print("Sorry:", msg)
print()
elif hasattr(y, 'co_code'):
frobnicate(y)
elif isinstance(y, str):
frobnicate_string(y)
else:
raise TypeError(
"don't know how to frobnicate %s objects" %
type(y).__name__)
#Mostly similar function
def dis4(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name)
try:
dis(x1)
except TypeError(msg):
print("Sorry:", msg)
print()
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError(
"don't know how to disassemble %s objects" %
type(x).__name__)
#Similar function with changed identifiers
def dis5(z=None):
"""splat classes, methods, functions, or code.
With no argument, splat the last traceback.
"""
if z is None:
distb()
return
if isinstance(z, types.InstanceType):
z = z.__class__
if hasattr(z, 'im_func'):
z = z.im_func
if hasattr(y, 'func_code'):
y = y.func_code
if hasattr(z, '__dict__'):
items = z.__dict__.items()
items.sort()
for name, z1 in items:
if isinstance(z1, _have_code):
print("Disassembly of %s:" % name)
try:
dis(z1)
except TypeError(msg):
print("Sorry:", msg)
print()
elif hasattr(z, 'co_code'):
splat(z)
elif isinstance(z, str):
splat_string(z)
else:
raise TypeError(
"don't know how to splat %s objects" %
type(z).__name__)
| 31.431755
| 80
| 0.540854
| 1,412
| 11,284
| 4.21034
| 0.159348
| 0.018839
| 0.013457
| 0.017157
| 0.882927
| 0.882927
| 0.86476
| 0.86476
| 0.839865
| 0.811606
| 0
| 0.011491
| 0.35989
| 11,284
| 358
| 81
| 31.519553
| 0.811574
| 0.268167
| 0
| 0.844622
| 0
| 0
| 0.060633
| 0
| 0
| 0
| 0
| 0
| 0.007968
| 1
| 0.059761
| false
| 0
| 0
| 0
| 0.111554
| 0.059761
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cfd7b22c59db19007b93fef7ce4541472accb9d
| 81
|
py
|
Python
|
tests/test_version.py
|
amirasaad/fastapi-passwordless
|
fa907800a9b649ac1f601807fce180f322203acd
|
[
"MIT"
] | null | null | null |
tests/test_version.py
|
amirasaad/fastapi-passwordless
|
fa907800a9b649ac1f601807fce180f322203acd
|
[
"MIT"
] | 6
|
2021-01-20T14:54:12.000Z
|
2021-12-19T22:50:29.000Z
|
tests/test_version.py
|
amirasaad/fastapi-passwordless
|
fa907800a9b649ac1f601807fce180f322203acd
|
[
"MIT"
] | null | null | null |
import fastapi_passwordless
assert hasattr(fastapi_passwordless, "__version__")
| 20.25
| 51
| 0.864198
| 8
| 81
| 8
| 0.75
| 0.59375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 81
| 3
| 52
| 27
| 0.853333
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 1
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
0b3abab2e1331de3de7e2d51ade45042e2b6d794
| 2,956
|
py
|
Python
|
vendor/pip-1.2.1/tests/test_vcs_git.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | 1
|
2015-11-05T20:27:06.000Z
|
2015-11-05T20:27:06.000Z
|
vendor/pip-1.2.1/tests/test_vcs_git.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | null | null | null |
vendor/pip-1.2.1/tests/test_vcs_git.py
|
hmoody87/heroku-buildpack-python-ffmpeg-lame
|
ba7f092f0f341dfb274da311ebc8a1ff43ac2e0a
|
[
"MIT"
] | 3
|
2015-03-26T17:19:04.000Z
|
2020-11-11T13:50:47.000Z
|
from mock import patch
from pip.vcs.git import Git
from tests.test_pip import (reset_env, run_pip,
_create_test_package)
def test_get_tag_revs_should_return_tag_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
env.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_tag_revs(version_pkg_path)
assert result == {'0.1': commit, '0.2': commit}, result
def test_get_branch_revs_should_return_branch_name_and_commit_pair():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
def test_get_branch_revs_should_ignore_no_branch():
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = env.run('git', 'rev-parse', 'HEAD',
cwd=version_pkg_path).stdout.strip()
# current branch here is "* (nobranch)"
env.run('git', 'checkout', commit,
cwd=version_pkg_path, expect_stderr=True)
git = Git()
result = git.get_branch_revs(version_pkg_path)
assert result == {'master': commit, 'branch0.1': commit}
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_branch_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_tag_name(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_tag_revs')
@patch('pip.vcs.git.Git.get_branch_revs')
def test_check_rev_options_should_handle_ambiguous_commit(branches_revs_mock,
tags_revs_mock):
branches_revs_mock.return_value = {'master': '123456'}
tags_revs_mock.return_value = {'0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
| 37.897436
| 77
| 0.644114
| 408
| 2,956
| 4.301471
| 0.144608
| 0.079772
| 0.111681
| 0.077493
| 0.830769
| 0.810826
| 0.764103
| 0.764103
| 0.764103
| 0.764103
| 0
| 0.034468
| 0.214817
| 2,956
| 77
| 78
| 38.38961
| 0.721672
| 0.012517
| 0
| 0.704918
| 0
| 0
| 0.144669
| 0.060679
| 0
| 0
| 0
| 0
| 0.098361
| 1
| 0.098361
| false
| 0
| 0.04918
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b5441134f11f18c99bbaaac755754154609d810
| 3,265
|
py
|
Python
|
tests/test_scorekeepers.py
|
questionlp/api.wwdt.me_v2
|
9e3705bba2668221740f5d28e94eec90998c3d00
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scorekeepers.py
|
questionlp/api.wwdt.me_v2
|
9e3705bba2668221740f5d28e94eec90998c3d00
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scorekeepers.py
|
questionlp/api.wwdt.me_v2
|
9e3705bba2668221740f5d28e94eec90998c3d00
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2022 Linh Pham
# api.wwdt.me is released under the terms of the Apache License 2.0
"""Testing /v2.0/scorekeepers routes
"""
from fastapi.testclient import TestClient
import pytest
from app.main import app
from app.config import API_VERSION
client = TestClient(app)
def test_scorekeepers():
"""Test /v2.0/scorekeepers route"""
response = client.get(f"/v{API_VERSION}/scorekeepers")
scorekeepers = response.json()
assert response.status_code == 200
assert "scorekeepers" in scorekeepers
assert "id" in scorekeepers["scorekeepers"][0]
assert "name" in scorekeepers["scorekeepers"][0]
assert "slug" in scorekeepers["scorekeepers"][0]
@pytest.mark.parametrize("scorekeeper_id", [11])
def test_scorekeepers_id(scorekeeper_id: int):
"""Test /v2.0/scorekeepers/id/{scorekeeper_id} route"""
response = client.get(f"/v{API_VERSION}/scorekeepers/id/{scorekeeper_id}")
scorekeeper = response.json()
assert response.status_code == 200
assert "id" in scorekeeper
assert scorekeeper["id"] == scorekeeper_id
assert "name" in scorekeeper
assert "slug" in scorekeeper
@pytest.mark.parametrize("scorekeeper_slug", ["bill-kurtis"])
def test_scorekeepers_slug(scorekeeper_slug: str):
"""Test /v2.0/scorekeepers/slug/{scorekeeper_slug} route"""
response = client.get(f"/v{API_VERSION}/scorekeepers/slug/{scorekeeper_slug}")
scorekeeper = response.json()
assert response.status_code == 200
assert "id" in scorekeeper
assert "name" in scorekeeper
assert "slug" in scorekeeper
assert scorekeeper["slug"] == scorekeeper_slug
def test_scorekeepers_details():
"""Test /v2.0/scorekeepers/details route"""
response = client.get(f"/v{API_VERSION}/scorekeepers/details")
scorekeepers = response.json()
assert response.status_code == 200
assert "scorekeepers" in scorekeepers
assert "id" in scorekeepers["scorekeepers"][0]
assert "name" in scorekeepers["scorekeepers"][0]
assert "slug" in scorekeepers["scorekeepers"][0]
assert "appearances" in scorekeepers["scorekeepers"][0]
@pytest.mark.parametrize("scorekeeper_id", [11])
def test_scorekeepers_details_id(scorekeeper_id: int):
"""Test /v2.0/scorekeepers/details/id/{scorekeeper_id} route"""
response = client.get(f"/v{API_VERSION}/scorekeepers/details/id/{scorekeeper_id}")
scorekeeper = response.json()
assert response.status_code == 200
assert "id" in scorekeeper
assert scorekeeper["id"] == scorekeeper_id
assert "name" in scorekeeper
assert "slug" in scorekeeper
assert "appearances" in scorekeeper
@pytest.mark.parametrize("scorekeeper_slug", ["bill-kurtis"])
def test_scorekeepers_details_slug(scorekeeper_slug: str):
"""Test /v2.0/scorekeepers/details/slug/{scorekeeper_slug} route"""
response = client.get(
f"/v{API_VERSION}/scorekeepers/details/slug/{scorekeeper_slug}"
)
scorekeeper = response.json()
assert response.status_code == 200
assert "id" in scorekeeper
assert "name" in scorekeeper
assert "slug" in scorekeeper
assert scorekeeper["slug"] == scorekeeper_slug
assert "appearances" in scorekeeper
| 32.009804
| 86
| 0.71853
| 403
| 3,265
| 5.707196
| 0.173697
| 0.07913
| 0.09087
| 0.082174
| 0.836087
| 0.81
| 0.81
| 0.81
| 0.742174
| 0.719565
| 0
| 0.020297
| 0.154977
| 3,265
| 101
| 87
| 32.326733
| 0.813338
| 0.149158
| 0
| 0.677419
| 0
| 0
| 0.210084
| 0.102302
| 0
| 0
| 0
| 0
| 0.532258
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b8454ef807c7deb6d40fa9879b045c97d7c52ad
| 23,541
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_tag_removal.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_tag_removal.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_tag_removal.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from django.utils import timezone
from dfirtrack.settings import BASE_DIR
from dfirtrack_config.models import SystemImporterFileCsvConfigModel
from dfirtrack_main.importer.file.csv import system_cron
from dfirtrack_main.models import Analysisstatus, System, Systemstatus, Tag, Tagcolor
from dfirtrack_main.tests.system_importer.config_functions import set_config_tag_remove_all
from dfirtrack_main.tests.system_importer.config_functions import set_config_tag_remove_none
from dfirtrack_main.tests.system_importer.config_functions import set_config_tag_remove_prefix
from dfirtrack_main.tests.system_importer.config_functions import set_csv_import_filename
from dfirtrack_main.tests.system_importer.config_functions import set_csv_import_path
from mock import patch
import os
import urllib.parse
def compare_messages_csv(self, messages):
""" compare messages """
# compare - messages
self.assertEqual(messages[0].message, '3 systems were updated.')
self.assertEqual(messages[0].level_tag, 'success')
# return to test function
return self
def compare_tag_remove_all(self, file_number):
""" compare tags """
# compare - old tags (all removed)
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='tag_97_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='tag_97_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='tag_97_1').exists())
# compare - existence of objects
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
# compare - new tags
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
# return to test function
return self
def compare_tag_remove_prefix(self, file_number):
""" compare tags """
# compare - old tags (with prefix removed)
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertFalse(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='tag_97_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='tag_97_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='tag_97_1').exists())
# compare - existence of objects
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
# compare - new tags
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
# return to test function
return self
def compare_tag_remove_none(self, file_number):
""" compare tags """
# compare - old tags (no tag removed)
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='AUTO_tag_96_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_001').tag.filter(tag_name='tag_97_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_002').tag.filter(tag_name='tag_97_1').exists())
self.assertTrue(System.objects.get(system_name=f'system_csv_{file_number}_003').tag.filter(tag_name='tag_97_1').exists())
# compare - new tags (no tag created)
self.assertFalse(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_1').exists())
self.assertFalse(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_2').exists())
self.assertFalse(Tag.objects.filter(tag_name=f'AUTO_tag_{file_number}_3').exists())
# return to test function
return self
class SystemImporterFileCsvTagRemovalViewTestCase(TestCase):
""" system importer file CSV view tests """
@classmethod
def setUpTestData(cls):
""" one-time setup """
# create user
test_user = User.objects.create_user(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
User.objects.create_user(username='message_user', password='8vwuoDthBxFkMQUBG2DM')
# create objects
analysisstatus_1 = Analysisstatus.objects.create(analysisstatus_name='analysisstatus_1')
analysisstatus_2 = Analysisstatus.objects.create(analysisstatus_name='analysisstatus_2')
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
systemstatus_2 = Systemstatus.objects.create(systemstatus_name='systemstatus_2')
# create systems
system_csv_56_001 = System.objects.create(
system_name = 'system_csv_56_001',
systemstatus = systemstatus_1,
analysisstatus = analysisstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
system_csv_56_002 = System.objects.create(
system_name = 'system_csv_56_002',
systemstatus = systemstatus_1,
analysisstatus = analysisstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
system_csv_56_003 = System.objects.create(
system_name = 'system_csv_56_003',
systemstatus = systemstatus_1,
analysisstatus = analysisstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
AUTO_tag_96_1 = Tag.objects.create(
tag_name='AUTO_tag_96_1',
tagcolor=tagcolor_1,
)
tag_97_1 = Tag.objects.create(
tag_name='tag_97_1',
tagcolor=tagcolor_1,
)
system_csv_56_001.tag.add(AUTO_tag_96_1)
system_csv_56_002.tag.add(AUTO_tag_96_1)
system_csv_56_003.tag.add(AUTO_tag_96_1)
system_csv_56_001.tag.add(tag_97_1)
system_csv_56_002.tag.add(tag_97_1)
system_csv_56_003.tag.add(tag_97_1)
# set config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/'))
# set config
set_csv_import_filename('system_importer_file_csv_testfile_56_tag_delimiter_space.csv')
# set config
system_importer_file_csv_config_model = SystemImporterFileCsvConfigModel.objects.get(system_importer_file_csv_config_name='SystemImporterFileCsvConfig')
system_importer_file_csv_config_model.csv_column_system = 1
system_importer_file_csv_config_model.csv_skip_existing_system = False
system_importer_file_csv_config_model.csv_headline = False
system_importer_file_csv_config_model.csv_import_username = test_user
system_importer_file_csv_config_model.csv_default_systemstatus = systemstatus_1
system_importer_file_csv_config_model.csv_default_analysisstatus = analysisstatus_1
system_importer_file_csv_config_model.csv_default_tagfree_systemstatus = systemstatus_2
system_importer_file_csv_config_model.csv_default_tagfree_analysisstatus = analysisstatus_2
system_importer_file_csv_config_model.csv_tag_lock_systemstatus = 'LOCK_SYSTEMSTATUS'
system_importer_file_csv_config_model.csv_tag_lock_analysisstatus = 'LOCK_ANALYSISSTATUS'
system_importer_file_csv_config_model.csv_field_delimiter = 'field_comma'
system_importer_file_csv_config_model.csv_text_quote = 'text_double_quotation_marks'
system_importer_file_csv_config_model.csv_ip_delimiter = 'ip_semicolon'
system_importer_file_csv_config_model.csv_tag_delimiter = 'tag_space'
system_importer_file_csv_config_model.csv_tag_prefix = 'AUTO'
system_importer_file_csv_config_model.csv_tag_prefix_delimiter = 'tag_prefix_underscore'
system_importer_file_csv_config_model.csv_choice_tag = True
system_importer_file_csv_config_model.csv_column_tag = 2
system_importer_file_csv_config_model.save()
""" remove all tags """
def test_system_importer_file_csv_cron_tag_remove_all(self):
""" test importer view """
# change config
set_config_tag_remove_all()
# mock timezone.now()
t_1 = datetime(2021, 3, 26, 18, 35, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_1):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_tag_removal')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:35:00 - 2021-03-26 18:35:00]')
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='8vwuoDthBxFkMQUBG2DM')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:35:00 - 2021-03-26 18:35:00]')
self.assertEqual(messages[0].level_tag, 'success')
# compare - tags
compare_tag_remove_all(self, '56')
def test_system_importer_file_csv_instant_tag_remove_all(self):
""" test importer view """
# change config
set_config_tag_remove_all()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_all(self, '56')
def test_system_importer_file_csv_upload_post_tag_remove_all(self):
""" test importer view """
# change config
set_config_tag_remove_all()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# open upload file
systemcsv = open(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_56_tag_delimiter_space.csv'), 'r')
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_all(self, '56')
""" remove prefix tags """
def test_system_importer_file_csv_cron_tag_remove_prefix(self):
""" test importer view """
# change config
set_config_tag_remove_prefix()
# mock timezone.now()
t_2 = datetime(2021, 3, 26, 18, 40, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_2):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_tag_removal')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:40:00 - 2021-03-26 18:40:00]')
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='8vwuoDthBxFkMQUBG2DM')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:40:00 - 2021-03-26 18:40:00]')
self.assertEqual(messages[0].level_tag, 'success')
# compare - tags
compare_tag_remove_prefix(self, '56')
def test_system_importer_file_csv_instant_tag_remove_prefix(self):
""" test importer view """
# change config
set_config_tag_remove_prefix()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_prefix(self, '56')
def test_system_importer_file_csv_upload_post_tag_remove_prefix(self):
""" test importer view """
# change config
set_config_tag_remove_prefix()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# open upload file
systemcsv = open(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_56_tag_delimiter_space.csv'), 'r')
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_prefix(self, '56')
""" remove no tags """
def test_system_importer_file_csv_cron_tag_remove_none(self):
""" test importer view """
# change config
set_config_tag_remove_none()
# mock timezone.now()
t_3 = datetime(2021, 3, 26, 18, 45, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_3):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_tag_removal')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:45:00 - 2021-03-26 18:45:00]')
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='8vwuoDthBxFkMQUBG2DM')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, 'System CSV importer: created: 0 | updated: 3 | skipped: 0 | multiple: 0 [2021-03-26 18:45:00 - 2021-03-26 18:45:00]')
self.assertEqual(messages[0].level_tag, 'success')
# compare - tags
compare_tag_remove_none(self, '56')
def test_system_importer_file_csv_instant_tag_remove_none(self):
""" test importer view """
# change config
set_config_tag_remove_none()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_none(self, '56')
def test_system_importer_file_csv_upload_post_tag_remove_none(self):
""" test importer view """
# change config
set_config_tag_remove_none()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_tag_removal', password='XAavYL75MrC5eVVSuzoL')
# open upload file
systemcsv = open(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_56_tag_delimiter_space.csv'), 'r')
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
# compare - messages
compare_messages_csv(self, messages)
# compare - tags
compare_tag_remove_none(self, '56')
| 50.844492
| 185
| 0.713903
| 3,106
| 23,541
| 5.060528
| 0.059562
| 0.059677
| 0.056305
| 0.077491
| 0.909085
| 0.891208
| 0.868876
| 0.853098
| 0.81728
| 0.792149
| 0
| 0.033726
| 0.175014
| 23,541
| 462
| 186
| 50.954545
| 0.775604
| 0.09201
| 0
| 0.619403
| 0
| 0.022388
| 0.224671
| 0.143541
| 0
| 0
| 0
| 0
| 0.264925
| 1
| 0.052239
| false
| 0.052239
| 0.287313
| 0
| 0.358209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
0b93d8a4f2d3d99f468fed01691f702cc699ee58
| 17,661
|
py
|
Python
|
tests/client_connectivity/test_bridge_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/client_connectivity/test_bridge_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/client_connectivity/test_bridge_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import sys
for folder in 'py-json', 'py-scripts':
if folder not in sys.path:
sys.path.append(f'../lanforge/lanforge-scripts/{folder}')
sys.path.append(f"../lanforge/lanforge-scripts/py-scripts/tip-cicd-sanity")
sys.path.append(f'../libs')
sys.path.append(f'../libs/lanforge/')
from LANforge.LFUtils import *
from configuration_data import TEST_CASES
if 'py-json' not in sys.path:
sys.path.append('../py-scripts')
import sta_connect2
from sta_connect2 import StaConnect2
import eap_connect
from eap_connect import EAPConnect
import time
@pytest.mark.run(order=13)
@pytest.mark.bridge
class TestBridgeModeClientConnectivity(object):
@pytest.mark.wpa
@pytest.mark.twog
def test_client_wpa_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_testrail, instantiate_project):
profile_data = setup_profile_data["BRIDGE"]["WPA"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
print(profile_data, get_lanforge_data)
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_2dot4g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa_bridge"], run_id=instantiate_project,
status_id=1,
msg='2G WPA Client Connectivity Passed successfully - bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa_bridge"], run_id=instantiate_project,
status_id=5,
msg='2G WPA Client Connectivity Failed - bridge mode')
assert staConnect.passes()
# C2420
@pytest.mark.wpa
@pytest.mark.fiveg
def test_client_wpa_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["BRIDGE"]["WPA"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa_bridge"], run_id=instantiate_project,
status_id=1,
msg='5G WPA Client Connectivity Passed successfully - bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa_bridge"], run_id=instantiate_project,
status_id=5,
msg='5G WPA Client Connectivity Failed - bridge mode')
assert staConnect.passes()
# C2419
@pytest.mark.wpa2_personal
@pytest.mark.twog
def test_client_wpa2_personal_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["BRIDGE"]["WPA2_P"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_2dot4g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa2"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa2_bridge"], run_id=instantiate_project,
status_id=1,
msg='2G WPA2 Client Connectivity Passed successfully - bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa2_bridge"], run_id=instantiate_project,
status_id=5,
msg='2G WPA2 Client Connectivity Failed - bridge mode')
assert staConnect.passes()
# C2237
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
def test_client_wpa2_personal_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["BRIDGE"]["WPA2_P"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa2"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa2_bridge"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 Client Connectivity Passed successfully - bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa2_bridge"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 Client Connectivity Failed - bridge mode')
assert staConnect.passes()
# C2236
@pytest.mark.wpa2_enterprise
@pytest.mark.twog
def test_client_wpa2_enterprise_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["BRIDGE"]["WPA2_E"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
eap_connect = EAPConnect(get_lanforge_data["lanforge_ip"], get_lanforge_data["lanforge-port-number"])
eap_connect.upstream_resource = 1
eap_connect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
eap_connect.security = "wpa2"
eap_connect.sta_list = station_names
eap_connect.station_names = station_names
eap_connect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
eap_connect.ssid = profile_data["ssid_name"]
eap_connect.radio = get_lanforge_data["lanforge_2dot4g"]
eap_connect.eap = "TTLS"
eap_connect.identity = "nolaradius"
eap_connect.ttls_passwd = "nolastart"
eap_connect.runtime_secs = 10
eap_connect.setup()
eap_connect.start()
print("napping %f sec" % eap_connect.runtime_secs)
time.sleep(eap_connect.runtime_secs)
eap_connect.stop()
try:
eap_connect.cleanup()
eap_connect.cleanup()
except:
pass
run_results = eap_connect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", eap_connect.passes)
if eap_connect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_eap_bridge"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 ENTERPRISE Client Connectivity Passed successfully - '
'bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_eap_bridge"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 ENTERPRISE Client Connectivity Failed - bridge mode')
assert eap_connect.passes()
# C5214
@pytest.mark.wpa2_enterprise
@pytest.mark.fiveg
def test_client_wpa2_enterprise_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["BRIDGE"]["WPA2_E"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
eap_connect = EAPConnect(get_lanforge_data["lanforge_ip"], get_lanforge_data["lanforge-port-number"])
eap_connect.upstream_resource = 1
eap_connect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
eap_connect.security = "wpa2"
eap_connect.sta_list = station_names
eap_connect.station_names = station_names
eap_connect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
eap_connect.ssid = profile_data["ssid_name"]
eap_connect.radio = get_lanforge_data["lanforge_5g"]
eap_connect.eap = "TTLS"
eap_connect.identity = "nolaradius"
eap_connect.ttls_passwd = "nolastart"
eap_connect.runtime_secs = 10
eap_connect.setup()
eap_connect.start()
print("napping %f sec" % eap_connect.runtime_secs)
time.sleep(eap_connect.runtime_secs)
eap_connect.stop()
try:
eap_connect.cleanup()
eap_connect.cleanup()
except:
pass
run_results = eap_connect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", eap_connect.passes)
if eap_connect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_eap_bridge"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 ENTERPRISE Client Connectivity Passed successfully - '
'bridge mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_eap_bridge"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 ENTERPRISE Client Connectivity Failed - bridge mode')
assert eap_connect.passes()
@pytest.mark.modify_ssid
@pytest.mark.parametrize(
'update_ssid',
(["BRIDGE, WPA, 5G, Sanity-updated-5G-WPA-BRIDGE"]),
indirect=True
)
def test_modify_ssid(self, request, update_ssid, get_lanforge_data, setup_profile_data, instantiate_testrail, instantiate_project):
profile_data = setup_profile_data["BRIDGE"]["WPA"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["bridge_ssid_update"], run_id=instantiate_project,
status_id=1,
msg='5G WPA Client Connectivity Passed successfully - bridge mode '
'updated ssid')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["bridge_ssid_update"], run_id=instantiate_project,
status_id=5,
msg='5G WPA Client Connectivity Failed - bridge mode updated ssid')
assert staConnect.passes()
| 50.031161
| 136
| 0.626012
| 1,920
| 17,661
| 5.445313
| 0.071875
| 0.047824
| 0.071736
| 0.092396
| 0.943089
| 0.933716
| 0.91889
| 0.896222
| 0.895457
| 0.876231
| 0
| 0.015642
| 0.28328
| 17,661
| 352
| 137
| 50.173295
| 0.810318
| 0.013929
| 0
| 0.830721
| 0
| 0
| 0.155964
| 0.014487
| 0
| 0
| 0
| 0
| 0.021944
| 1
| 0.021944
| false
| 0.115987
| 0.028213
| 0
| 0.053292
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
0ba50208b3dcb7b935427b4f7fdfdd21264269c5
| 210
|
py
|
Python
|
modules/dbnd-airflow/test_dbnd_airflow/package/test_airflow_imports.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
modules/dbnd-airflow/test_dbnd_airflow/package/test_airflow_imports.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
modules/dbnd-airflow/test_dbnd_airflow/package/test_airflow_imports.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
def test_import_databand():
print("Starting Import")
import dbnd
str(dbnd)
def test_import_airflow_settings():
print("Starting Import")
import airflow.settings
str(airflow.settings)
| 16.153846
| 35
| 0.704762
| 25
| 210
| 5.72
| 0.4
| 0.314685
| 0.181818
| 0.34965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 210
| 12
| 36
| 17.5
| 0.85119
| 0
| 0
| 0.25
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.75
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0bc041db2dd0b1ce16f9c7b139bd9ab2dbc29343
| 145
|
py
|
Python
|
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | 2
|
2020-01-09T14:42:50.000Z
|
2020-08-18T10:26:55.000Z
|
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | null | null | null |
proxytest/__main__.py
|
yoleg/proxytest
|
63c85b9b14c35de72fce4542ae44080ee6082efb
|
[
"MIT"
] | null | null | null |
""" Entry point for calling with python -m proxytest ... """
import sys
from . import run_from_command_line
sys.exit(run_from_command_line())
| 18.125
| 60
| 0.744828
| 22
| 145
| 4.636364
| 0.681818
| 0.137255
| 0.27451
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144828
| 145
| 7
| 61
| 20.714286
| 0.822581
| 0.358621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f03f6fc7db4aea8a372a539aa797f9231ccd486f
| 3,762
|
py
|
Python
|
GenDataset/words.py
|
pengkang2018/AttentionalTextMaating
|
a0ad2e3963919632985dbcea9b15141f5cc2f2d6
|
[
"MIT"
] | 8
|
2021-02-01T01:55:05.000Z
|
2022-02-22T13:08:05.000Z
|
GenDataset/words.py
|
568568568/AttentionalTextMatting
|
a0ad2e3963919632985dbcea9b15141f5cc2f2d6
|
[
"MIT"
] | 1
|
2022-01-25T09:58:51.000Z
|
2022-01-25T10:49:19.000Z
|
GenDataset/words.py
|
568568568/AttentionalTextMatting
|
a0ad2e3963919632985dbcea9b15141f5cc2f2d6
|
[
"MIT"
] | 1
|
2022-02-22T13:07:55.000Z
|
2022-02-22T13:07:55.000Z
|
words= '啊阿埃挨哎唉哀皑癌蔼矮艾碍爱隘鞍氨安俺按暗岸胺案肮昂盎凹敖熬翱袄傲奥懊澳芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸白柏百摆佰败拜稗斑班搬扳般颁板版扮拌伴瓣半办绊邦帮梆榜膀绑棒磅蚌镑傍谤苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆杯碑悲卑北辈背贝钡倍狈备惫焙被奔苯本笨崩绷甭泵蹦迸逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛鞭边编贬扁便变卞辨辩辫遍标彪膘表鳖憋别瘪彬斌濒滨宾摈兵冰柄丙秉饼炳病并玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜哺补埠不布步簿部怖擦猜裁材才财睬踩采彩菜蔡餐参蚕残惭惨灿苍舱仓沧藏操糙槽曹草厕策侧册测层蹭插叉茬茶查碴搽察岔差诧拆柴豺搀掺蝉馋谗缠铲产阐颤昌猖场尝常长偿肠厂敞畅唱倡超抄钞朝嘲潮巢吵炒车扯撤掣彻澈郴臣辰尘晨忱沉陈趁衬撑称城橙成呈乘程惩澄诚承逞骋秤吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽充冲虫崇宠抽酬畴踌稠愁筹仇绸瞅丑臭初出橱厨躇锄雏滁除楚础储矗搐触处揣川穿椽传船喘串疮窗幢床闯创吹炊捶锤垂春椿醇唇淳纯蠢戳绰疵茨磁雌辞慈瓷词此刺赐次聪葱囱匆从丛凑粗醋簇促蹿篡窜摧崔催脆瘁粹淬翠村存寸磋撮搓措挫错搭达答瘩打大呆歹傣戴带殆代贷袋待逮怠耽担丹单郸掸胆旦氮但惮淡诞弹蛋当挡党荡档刀捣蹈倒岛祷导到稻悼道盗德得的蹬灯登等瞪凳邓堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔颠掂滇碘点典靛垫电佃甸店惦奠淀殿碉叼雕凋刁掉吊钓调跌爹碟蝶迭谍叠丁盯叮钉顶鼎锭定订丢东冬董懂动栋侗恫冻洞兜抖斗陡豆逗痘都督毒犊独读堵睹赌杜镀肚度渡妒端短锻段断缎堆兑队对墩吨蹲敦顿囤钝盾遁掇哆多夺垛躲朵跺舵剁惰堕蛾峨鹅俄额讹娥恶厄扼遏鄂饿恩而儿耳尔饵洱二贰发罚筏伐乏阀法珐藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛坊芳方肪房防妨仿访纺放菲非啡飞肥匪诽吠肺废沸费芬酚吩氛分纷坟焚汾粉奋份忿愤粪丰封枫蜂峰锋风疯烽逢冯缝讽奉凤佛否夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐噶嘎该改概钙盖溉干甘杆柑竿肝赶感秆敢赣冈刚钢缸肛纲岗港杠篙皋高膏羔糕搞镐稿告哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各给根跟耕更庚羹埂耿梗工攻功恭龚供躬公宫弓巩汞拱贡共钩勾沟苟狗垢构购够辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇刮瓜剐寡挂褂乖拐怪棺关官冠观管馆罐惯灌贯光广逛瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽辊滚棍锅郭国果裹过哈骸孩海氦亥害骇酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉夯杭航壕嚎豪毫郝好耗号浩呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺嘿黑痕很狠恨哼亨横衡恒轰哄烘虹鸿洪宏弘红喉侯猴吼厚候后呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户花哗华猾滑画划化话槐徊怀淮坏欢环桓还缓换患唤痪豢焕涣宦幻荒慌黄磺蝗簧皇凰惶煌晃幌恍谎灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘荤昏婚魂浑混豁活伙火获或惑霍货祸击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁歼监坚尖笺间煎兼肩艰奸缄茧检柬碱碱拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建僵姜将浆江疆蒋桨奖讲匠酱降蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净炯窘揪究纠玖韭久灸九酒厩救旧臼舅咎就疚鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧捐鹃娟倦眷卷绢撅攫抉掘倔爵觉决诀绝均菌钧军君峻俊竣浚郡骏喀咖卡咯开揩楷凯慨刊堪勘坎砍看康慷糠扛抗亢炕考拷烤靠坷苛柯棵磕颗科壳咳可渴克刻客课肯啃垦恳坑吭空恐孔控抠口扣寇枯哭窟苦酷库裤夸垮挎跨胯块筷侩快宽款匡筐狂框矿眶旷况亏盔岿窥葵奎魁傀馈愧溃坤昆捆困括扩廓阔垃拉喇蜡腊辣啦莱来赖蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥琅榔狼廊郎朗浪捞劳牢老佬姥酪烙涝勒乐雷镭蕾磊累儡垒擂肋类泪棱楞冷厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利僳例俐痢立粒沥隶力璃哩俩联莲连镰廉怜涟帘敛脸链恋炼练粮凉梁粱良两辆量晾亮谅撩聊僚疗燎寥辽潦了撂镣廖料列裂烈劣猎琳林磷霖临邻鳞淋凛赁吝拎玲菱零龄铃伶羚凌灵陵岭领另令溜琉榴硫馏留刘瘤流柳六龙聋咙笼窿隆垄拢陇楼娄搂篓漏陋芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮驴吕铝侣旅履屡缕虑氯律率滤绿峦挛孪滦卵乱掠略抡轮伦仑沦纶论萝螺罗逻锣箩骡裸落洛骆络妈麻玛码蚂马骂嘛吗埋买麦卖迈脉瞒馒蛮满蔓曼慢漫谩芒茫盲氓忙莽猫茅锚毛矛铆卯茂冒帽貌贸么玫枚梅酶霉煤没眉媒镁每美昧寐妹媚门闷们萌蒙檬盟锰猛梦孟眯醚靡糜迷谜弥米秘觅泌蜜密幂棉眠绵冕免勉娩缅面苗描瞄藐秒渺庙妙蔑灭民抿皿敏悯闽明螟鸣铭名命谬摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌谋牟某拇牡亩姆母墓暮幕募慕木目睦牧穆拿哪呐钠那娜纳氖乃奶耐奈南男难囊挠脑恼闹淖呢馁内嫩能妮霓倪泥尼拟你匿腻逆溺蔫拈年碾撵捻念娘酿鸟尿捏聂孽啮镊镍涅您柠狞凝宁拧泞牛扭钮纽脓浓农弄奴努怒女暖虐疟挪懦糯诺哦欧鸥殴藕呕偶沤啪趴爬帕怕琶拍排牌徘湃派攀潘盘磐盼畔判叛乓庞旁耪胖抛咆刨炮袍跑泡呸胚培裴赔陪配佩沛喷盆砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬篇偏片骗飘漂瓢票撇瞥拼频贫品聘乒坪苹萍平凭瓶评屏坡泼颇婆破魄迫粕剖扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫掐洽牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉枪呛腔羌墙蔷强抢橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍切茄且怯窃钦侵亲秦琴勤芹擒禽寝沁青轻氢倾卿清擎晴氰情顷请庆琼穷秋丘邱球求囚酋泅趋区蛆曲躯屈驱渠取娶龋趣去圈颧权醛泉全痊拳犬券劝缺炔瘸却鹊榷确雀裙群然燃冉染瓤壤攘嚷让饶扰绕惹热壬仁人忍韧任认刃妊纫扔仍日戎茸蓉荣融熔溶容绒冗揉柔肉茹蠕儒孺如辱乳汝入褥软阮蕊瑞锐闰润若弱撒洒萨腮鳃塞赛三叁伞散桑嗓丧搔骚扫嫂瑟色涩森僧莎砂杀刹沙纱傻啥煞筛晒珊苫杉山删煽衫闪陕擅赡膳善汕扇缮墒伤商赏晌上尚裳梢捎稍烧芍勺韶少哨邵绍奢赊蛇舌舍赦摄射慑涉社设砷申呻伸身深娠绅神沈审婶甚肾慎渗声生甥牲升绳省盛剩胜圣师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试收手首守寿授售受瘦兽蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕刷耍摔衰甩帅栓拴霜双爽谁水睡税吮瞬顺舜说硕朔烁斯撕嘶思私司丝死肆寺嗣四伺似饲巳松耸怂颂送宋讼诵搜艘擞嗽苏酥俗素速粟僳塑溯宿诉肃酸蒜算虽隋随绥髓碎岁穗遂隧祟孙损笋蓑梭唆缩琐索锁所塌他它她塔獭挞蹋踏胎苔抬台泰酞太态汰坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭汤塘搪堂棠膛唐糖倘躺淌趟烫掏涛滔绦萄桃逃淘陶讨套特藤腾疼誊梯剔踢锑提题蹄啼体替嚏惕涕剃屉天添填田甜恬舔腆挑条迢眺跳贴铁帖厅听烃汀廷停亭庭艇通桐酮瞳同铜彤童桶捅筒统痛偷投头透凸秃突图徒途涂屠土吐兔湍团推颓腿蜕褪退吞屯臀拖托脱鸵陀驮驼椭妥拓唾挖哇蛙洼娃瓦袜歪外豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕汪王亡枉网往旺望忘妄威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫瘟温蚊文闻纹吻稳紊问嗡翁瓮挝蜗涡窝我斡卧握沃巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细瞎虾匣霞辖暇峡侠狭下厦夏吓掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑薪芯锌欣辛新忻心信衅星腥猩惺兴刑型形邢行醒幸杏性姓兄凶胸匈汹雄熊休修羞朽嗅锈秀袖绣墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续轩喧宣悬旋玄选癣眩绚靴薛学穴雪血勋熏循旬询寻驯巡殉汛训讯逊迅压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾邀腰妖瑶摇尧遥窑谣姚咬舀药要耀椰噎耶爷野冶也页掖业叶曳腋夜液一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎茵荫因殷音阴姻吟银淫寅饮尹引隐印英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映哟拥佣臃痈庸雍踊蛹咏泳涌永恿勇用幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院曰约越跃钥岳粤月悦阅耘云郧匀陨允运蕴酝晕韵孕匝砸杂栽哉灾宰载再在咱攒暂赞赃脏葬遭糟凿藻枣早澡蚤躁噪造皂灶燥责择则泽贼怎增憎曾赠扎喳渣札轧铡闸眨栅榨咋乍炸诈摘斋宅窄债寨瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽樟章彰漳张掌涨杖丈帐账仗胀瘴障招昭找沼赵照罩兆肇召遮折哲蛰辙者锗蔗这浙珍斟真甄砧臻贞针侦枕疹诊震振镇阵蒸挣睁征狰争怔整拯正政帧症郑证芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒中盅忠钟衷终种肿重仲众舟周州洲诌粥轴肘帚咒皱宙昼骤珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻抓爪拽专砖转撰赚篆桩庄装妆撞壮状椎锥追赘坠缀谆准捉拙卓桌琢茁酌啄着灼浊兹咨资姿滋淄孜紫仔籽滓子自渍字鬃棕踪宗综总纵邹走奏揍租足卒族祖诅阻组钻纂嘴醉最罪尊遵昨左佐柞做作坐座'
| 3,762
| 3,762
| 0.998937
| 2
| 3,762
| 1,879
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000266
| 3,762
| 1
| 3,762
| 3,762
| 0.999202
| 0
| 0
| 0
| 0
| 0
| 0.997343
| 0.997343
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2bb97a54cbab4786370c1b4aa765d4d617fb5a1
| 38,361
|
py
|
Python
|
dearpypixl/appitems/values.py
|
Atlamillias/pixl-engine
|
c4217a3a65e01e49d05bf7f07946d65484f6e1da
|
[
"MIT"
] | 6
|
2021-08-28T03:22:19.000Z
|
2021-10-14T22:04:04.000Z
|
dearpypixl/appitems/values.py
|
Atlamillias/pixl-engine
|
c4217a3a65e01e49d05bf7f07946d65484f6e1da
|
[
"MIT"
] | 1
|
2021-07-29T16:51:28.000Z
|
2021-08-03T00:24:11.000Z
|
dearpypixl/appitems/values.py
|
Atlamillias/pixl-engine
|
c4217a3a65e01e49d05bf7f07946d65484f6e1da
|
[
"MIT"
] | null | null | null |
from typing import (
Callable,
Any,
Union,
Dict,
Tuple,
Set,
List,
)
from dearpygui import dearpygui
from dearpypixl.components import *
##################################################
####### NOTE: This file is auto-generated. #######
##################################################
__all__ = [
"IntValue",
"Int4Value",
"BoolValue",
"FloatValue",
"Float4Value",
"StringValue",
"DoubleValue",
"Double4Value",
"ColorValue",
"FloatVectValue",
"SeriesValue",
]
class IntValue(Widget):
"""Adds a int value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (int, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : int = ItemAttribute('information', 'get_item_cached', None, None)
value : int = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvIntValue',)
_command : Callable = dearpygui.add_int_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : int = 0 ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class Int4Value(Widget):
"""Adds a int4 value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Union[List[int], Tuple[int, ...]], optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Union[List[int], Tuple[int, ...]] = ItemAttribute('information', 'get_item_cached', None, None)
value : Union[List[int], Tuple[int, ...]] = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvInt4Value',)
_command : Callable = dearpygui.add_int4_value
def __init__(
self ,
label : str = None ,
user_data : Any = None ,
use_internal_label: bool = True ,
source : Union[int, str] = 0 ,
default_value : Union[List[int], Tuple[int, ...]] = (0, 0, 0, 0),
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class BoolValue(Widget):
"""Adds a bool value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (bool, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : bool = ItemAttribute('information', 'get_item_cached', None, None)
value : bool = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvBoolValue',)
_command : Callable = dearpygui.add_bool_value
def __init__(
self ,
label : str = None ,
user_data : Any = None ,
use_internal_label: bool = True ,
source : Union[int, str] = 0 ,
default_value : bool = False,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class FloatValue(Widget):
"""Adds a float value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (float, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : float = ItemAttribute('information', 'get_item_cached', None, None)
value : float = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvFloatValue',)
_command : Callable = dearpygui.add_float_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : float = 0.0 ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class Float4Value(Widget):
"""Adds a float4 value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Union[List[float], Tuple[float, ...]], optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Union[List[float], Tuple[float, ...]] = ItemAttribute('information', 'get_item_cached', None, None)
value : Union[List[float], Tuple[float, ...]] = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvFloat4Value',)
_command : Callable = dearpygui.add_float4_value
def __init__(
self ,
label : str = None ,
user_data : Any = None ,
use_internal_label: bool = True ,
source : Union[int, str] = 0 ,
default_value : Union[List[float], Tuple[float, ...]] = (0.0, 0.0, 0.0, 0.0),
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class StringValue(Widget):
"""Adds a string value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (str, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : str = ItemAttribute('information', 'get_item_cached', None, None)
value : str = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvStringValue',)
_command : Callable = dearpygui.add_string_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : str = '' ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class DoubleValue(Widget):
"""Adds a double value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (float, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : float = ItemAttribute('information', 'get_item_cached', None, None)
value : float = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvDoubleValue',)
_command : Callable = dearpygui.add_double_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : float = 0.0 ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class Double4Value(Widget):
"""Adds a double value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Any, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Any = ItemAttribute('information', 'get_item_cached', None, None)
value : Any = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvDouble4Value',)
_command : Callable = dearpygui.add_double4_value
def __init__(
self ,
label : str = None ,
user_data : Any = None ,
use_internal_label: bool = True ,
source : Union[int, str] = 0 ,
default_value : Any = (0.0, 0.0, 0.0, 0.0),
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class ColorValue(Widget):
"""Adds a color value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Union[List[float], Tuple[float, ...]], optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Union[List[float], Tuple[float, ...]] = ItemAttribute('information', 'get_item_cached', None, None)
value : Union[List[float], Tuple[float, ...]] = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvColorValue',)
_command : Callable = dearpygui.add_color_value
def __init__(
self ,
label : str = None ,
user_data : Any = None ,
use_internal_label: bool = True ,
source : Union[int, str] = 0 ,
default_value : Union[List[float], Tuple[float, ...]] = (0.0, 0.0, 0.0, 0.0),
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class FloatVectValue(Widget):
"""Adds a float vect value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Union[List[float], Tuple[float, ...]], optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Union[List[float], Tuple[float, ...]] = ItemAttribute('information', 'get_item_cached', None, None)
value : Union[List[float], Tuple[float, ...]] = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvFloatVectValue',)
_command : Callable = dearpygui.add_float_vect_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : Union[List[float], Tuple[float, ...]] = () ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
class SeriesValue(Widget):
"""Adds a plot series value.
Args:
label (str, optional): Overrides 'name' as label.
user_data (Any, optional): User data for callbacks
use_internal_label (bool, optional): Use generated internal label instead of user specified (appends ### uuid).
tag (Union[int, str], optional): Unique id used to programmatically refer to the item.If label is unused this will be the label.
source (Union[int, str], optional): Overrides 'id' as value storage key.
default_value (Any, optional):
parent (Union[int, str], optional): Parent to add this item to. (runtime adding)
id (Union[int, str], optional): (deprecated)
Returns:
Union[int, str]
"""
label : str = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
user_data : Any = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
use_internal_label: bool = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
source : Union[int, str] = ItemAttribute("configuration", "get_item_config", "set_item_config", None)
default_value : Any = ItemAttribute('information', 'get_item_cached', None, None)
value : Any = ItemAttribute("configuration", "get_item_value", "set_item_value", "default_value")
_is_container : bool = False
_is_root_item : bool = False
_is_value_able : bool = True
_unique_parents : tuple = ('ValueRegistry',)
_unique_children : tuple = ()
_unique_commands : tuple = ()
_unique_constants : tuple = ('mvSeriesValue',)
_command : Callable = dearpygui.add_series_value
def __init__(
self ,
label : str = None,
user_data : Any = None,
use_internal_label: bool = True,
source : Union[int, str] = 0 ,
default_value : Any = () ,
parent : Union[int, str] = 13 ,
**kwargs ,
) -> None:
super().__init__(
label=label,
user_data=user_data,
use_internal_label=use_internal_label,
source=source,
default_value=default_value,
parent=parent,
**kwargs,
)
| 62.579119
| 147
| 0.42014
| 2,964
| 38,361
| 5.176113
| 0.042173
| 0.045887
| 0.063095
| 0.118303
| 0.935797
| 0.931169
| 0.931169
| 0.928888
| 0.92237
| 0.91872
| 0
| 0.004173
| 0.500274
| 38,361
| 612
| 148
| 62.681373
| 0.79614
| 0.204922
| 0
| 0.791457
| 0
| 0
| 0.104522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027638
| false
| 0
| 0.007538
| 0
| 0.449749
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2ce779efb8dbd435eb3f8f8315d1d91ec804a5b
| 46,629
|
py
|
Python
|
test/integration/clueweb09.py
|
eugene-yang/ir_datasets
|
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
|
[
"Apache-2.0"
] | null | null | null |
test/integration/clueweb09.py
|
eugene-yang/ir_datasets
|
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
|
[
"Apache-2.0"
] | null | null | null |
test/integration/clueweb09.py
|
eugene-yang/ir_datasets
|
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
|
[
"Apache-2.0"
] | null | null | null |
import re
import unittest
import ir_datasets
from ir_datasets.datasets.clueweb09 import TrecWebTrackQuery, TrecPrel
from ir_datasets.formats import TrecQrel, TrecSubtopic, GenericDoc, GenericQuery, WarcDoc
from .base import DatasetIntegrationTest
_logger = ir_datasets.log.easy()
class TestClueWeb09(DatasetIntegrationTest):
def test_clueweb09_docs(self):
self._test_docs('clueweb09', items={
0: WarcDoc('clueweb09-ar0000-00-00000', 'http://0098shop.com/product_"EH24_A\'1C3_Forex_(\'2\'131E\'�G.html', '2009-03-84T15:35:08-0700', re.compile(b'^HTTP/1\\.1 200 OK\nServer: Apache/2\\.2\\.11 \\(Unix\\) mod_ssl/2\\.2\\.11 OpenSSL/0\\.9\\.8b DAV/2 mod_auth_passthroug.{92}\nConnection: close\nContent\\-Type: text/html\nDate: Fri, 27 Feb 2009 16:04:39 GMT\nContent\\-Length: 38889$', flags=16), re.compile(b'^<meta httpequiv=Content\\-Type content="text/html; charset=utf\\-8"><meta httpequiv=Content\\-Type content.{38691}m3d\'\\);\\\r\n</SCRIPT>\\\r\n</span>\n\\\t<span id=\'HAM3D_counter\' class=\'HAM3D_hidden\'></span>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-ar0000-00-00009', 'http://00perdomain.com/kids_and_teens/international/arabic/', '2009-03-84T15:35:08-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Mon, 09 Feb 2009 12:41:10 GMT\nPragma: n.{145} sid=iorj3059uaka0isae61uh29494; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 28444$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{28246}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-ar0000-00-01000', 'http://213.136.192.26/311276/ln59d.htm', '2009-03-84T15:35:13-0700', re.compile(b'^HTTP/1\\.1 200 OK\nAccept\\-Ranges: bytes\nContent\\-Type: text/html\nDate: Wed, 28 Jan 2009 18:47:22 GMT\nHos.{85}: close\nLast\\-Modified: Thu, 08 Jan 2009 00:28:36 GMT\nETag: "548bbb2871c91:a76"\nContent\\-Length: 65536$', flags=16), re.compile(b'^<html>\\\r\n <head>\\\r\n <META http\\-equiv="Content\\-Type" content="text/html; charset=windows\\-1256">\\\r\n .{65338}\n <font face="Simplified Arabic" color="\\#4D5064">\\\r\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/ar', items={
0: WarcDoc('clueweb09-ar0000-00-00000', 'http://0098shop.com/product_"EH24_A\'1C3_Forex_(\'2\'131E\'�G.html', '2009-03-84T15:35:08-0700', re.compile(b'^HTTP/1\\.1 200 OK\nServer: Apache/2\\.2\\.11 \\(Unix\\) mod_ssl/2\\.2\\.11 OpenSSL/0\\.9\\.8b DAV/2 mod_auth_passthroug.{92}\nConnection: close\nContent\\-Type: text/html\nDate: Fri, 27 Feb 2009 16:04:39 GMT\nContent\\-Length: 38889$', flags=16), re.compile(b'^<meta httpequiv=Content\\-Type content="text/html; charset=utf\\-8"><meta httpequiv=Content\\-Type content.{38691}m3d\'\\);\\\r\n</SCRIPT>\\\r\n</span>\n\\\t<span id=\'HAM3D_counter\' class=\'HAM3D_hidden\'></span>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-ar0000-00-00009', 'http://00perdomain.com/kids_and_teens/international/arabic/', '2009-03-84T15:35:08-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Mon, 09 Feb 2009 12:41:10 GMT\nPragma: n.{145} sid=iorj3059uaka0isae61uh29494; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 28444$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{28246}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-ar0000-00-01000', 'http://213.136.192.26/311276/ln59d.htm', '2009-03-84T15:35:13-0700', re.compile(b'^HTTP/1\\.1 200 OK\nAccept\\-Ranges: bytes\nContent\\-Type: text/html\nDate: Wed, 28 Jan 2009 18:47:22 GMT\nHos.{85}: close\nLast\\-Modified: Thu, 08 Jan 2009 00:28:36 GMT\nETag: "548bbb2871c91:a76"\nContent\\-Length: 65536$', flags=16), re.compile(b'^<html>\\\r\n <head>\\\r\n <META http\\-equiv="Content\\-Type" content="text/html; charset=windows\\-1256">\\\r\n .{65338}\n <font face="Simplified Arabic" color="\\#4D5064">\\\r\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/zh', items={
0: WarcDoc('clueweb09-zh0000-00-00000', 'http://000027.istock.jrj.com.cn/', '2009-03-77T19:52:11-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=GBK\nContent\\-Encoding: gzip\nVary: Accept\\-Encoding\nDa.{63}ose\nSet\\-Cookie: JSESSIONID=aWnxiSnSp6rg; path=/\nContent\\-Language: zh\\-CN, zh\\-CN\nContent\\-Length: 65536$', flags=16), re.compile(b'^\\\r\n\\\r\n\\\r\n\\\r\n\\\r\n\\\r\n<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN" "http://www\\.w3\\.org/TR/xht.{65338}\\.com\\.cn/component/editor/imgs/B012\\.gif" height="30" width="30" onclick="oblog_InsertImg\\(this\\.src\\)"\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-zh0000-00-00009', 'http://000078.istock.jrj.com.cn/forum000078/topic1001345.html', '2009-03-77T19:52:11-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=GBK\nContent\\-Encoding: gzip\nVary: Accept\\-Encoding\nDa.{63}ose\nSet\\-Cookie: JSESSIONID=bbPnYVFWx8D6; path=/\nContent\\-Language: zh\\-CN, zh\\-CN\nContent\\-Length: 26075$', flags=16), re.compile(b'^\\\r\n\\\r\n\\\r\n\\\r\n\\\r\n<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN" "http://www\\.w3\\.org/TR/xhtml.{25877}lt="" />\\\r\n</body>\\\r\n<script src="http://istock\\.jrj\\.com\\.cn/includes/js/tongji\\.js"></script>\\\r\n</html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-zh0000-00-01000', 'http://119go.com/shop/index.htm', '2009-03-77T19:52:15-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nAccept\\-Ranges: bytes\nDate: Fri, 09 Jan 2009 21:54:09 GMT\nSer.{56} close\nLast\\-Modified: Fri, 19 Sep 2008 01:50:00 GMT\nETag: "054996fa19c91:337a"\nContent\\-Length: 18005$', flags=16), re.compile(b'^<!DOCTYPE HTML PUBLIC "\\-//W3C//DTD HTML 4\\.01 Transitional//EN"\\\r\n"http://www\\.w3\\.org/TR/html4/loose\\.dt.{17807}UA\\-105821\\-1";urchinTracker\\(\\);</script><!\\-\\-\\#include virtual="/include/ads\\.asp"\\-\\-></body>\\\r\n</html>\\\r\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/en', items={
0: WarcDoc('clueweb09-en0000-00-00000', 'http://00000-nrt-realestate.homepagestartup.com/', '2009-03-65T08:43:19-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nDate: Tue, 13 Jan 2009 18:05:10 GMT\nPragma: no\\-cache\nCache\\-C.{100}Modified: Tue, 13 Jan 2009 18:05:10 GMT\nExpires: Mon, 20 Dec 1998 01:00:00 GMT\nContent\\-Length: 16254$', flags=16), re.compile(b'^<head> <meta http\\-equiv="Content\\-Language" content="en\\-gb"> <meta http\\-equiv="Content\\-Type" content=.{16056} 8pt">YouTube Videos</span></td> </tr> </table> </td> </tr> </table></div> </div> </body> </html> \n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-en0000-00-00009', 'http://00perdomain.com/computers/', '2009-03-65T08:43:20-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Sat, 17 Jan 2009 23:40:59 GMT\nPragma: n.{145} sid=i35idajmde65hlem4m2jpmrc37; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 23500$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{23302}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-en0000-00-01000', 'http://2modern.com/designer/FLOS/Flos-Archimoon-Soft-Table-Lamp', '2009-03-65T08:44:07-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nKeep\\-Alive: timeout=15, max=965\nContent\\-Encod.{359}4Pa38Ta38Nb350; path=/\nLast\\-Modified: Tue, 13 Jan 2009 21:10:47 GMT\nExpires: 0\nContent\\-Length: 52741$', flags=16), re.compile(b'^\n<html>\n<head>\n<meta http\\-equiv="Content\\-Type" content="text/html; charset=UTF\\-8">\n<title>FLOS \\- Arc.{52543}\\- \\[ 418126 \\] \\[ \\] \\[ /s\\.nl \\] \\[ Tue Jan 13 13:10:47 PST 2009 \\] \\-\\->\n<!\\-\\- Not logging slowest SQL \\-\\->\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/fr', items={
0: WarcDoc('clueweb09-fr0000-00-00000', 'http://0-charmedgallery.skyrock.com/', '2009-03-85T01:34:33-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nContent\\-Encoding: gzip\nP3P: CP="NOI DSP COR CURa DEVa PSAa O.{328}xpires: Sun, 08 Feb 2009 15:13:41 GMT\nETag: "92c422f2ea325185cfadb9a9088b2b1f"\nContent\\-Length: 53898$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Strict//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtml1\\-str.{53700}ascript" src="http://pagead2\\.googlesyndication\\.com/pagead/show_ads\\.js"></script>\n\n\n</body>\n</html>\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-fr0000-00-00009', 'http://000221.skyrock.com/', '2009-03-85T01:34:33-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nContent\\-Encoding: gzip\nP3P: CP="NOI DSP COR CURa DEVa PSAa O.{329}xpires: Fri, 13 Feb 2009 21:03:18 GMT\nETag: "bc2011cb3798d5b21d8df1709d7270f1"\nContent\\-Length: 31594$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Strict//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtml1\\-str.{31396}ascript" src="http://pagead2\\.googlesyndication\\.com/pagead/show_ads\\.js"></script>\n\n\n</body>\n</html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-fr0000-00-01000', 'http://123maigrir.com.ivchost3.com/pratique/institut/institut2.htm', '2009-03-85T01:34:39-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nAccept\\-Ranges: bytes\nDate: Thu, 29 Jan 2009 16:25:21 GMT\nSer.{95}ose\nLast\\-Modified: Fri, 15 Jun 2007 23:09:10 GMT\nETag: "18441c9\\-adbe\\-46731c16"\nContent\\-Length: 44478$', flags=16), re.compile(b'^<html><!\\-\\- \\#BeginTemplate "/Templates/page_menu_complet\\.dwt" \\-\\-><!\\-\\- DW6 \\-\\->\\\r\n<head>\\\r\n<script langua.{44280}"279" align="center">\\ </td>\\\r\n </tr>\\\r\n</table>\\\r\n\\\r\n</body>\\\r\n\\\r\n<!\\-\\- \\#EndTemplate \\-\\-></html>\\\r\n\\\r\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/de', items={
0: WarcDoc('clueweb09-de0000-00-00000', 'http://00perdomain.com/kids_and_teens/international/deutsch/', '2009-03-84T23:38:37-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Sun, 08 Feb 2009 06:17:05 GMT\nPragma: n.{144}: sid=g3njt512js43a04j8vekkt0627; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 9942$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{9744}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-de0000-00-00009', 'http://00perdomain.com/world/deutsch/computer/', '2009-03-84T23:38:37-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Tue, 03 Feb 2009 03:48:40 GMT\nCache\\-Con.{145} sid=fit0t220fh9vpsefik8l628866; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 21957$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{21759}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-de0000-00-01000', 'http://1689494.rc-welt.com/', '2009-03-84T23:38:43-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=iso\\-8859\\-1\nContent\\-Encoding: gzip\nDate: Mon, 26 Jan.{332}Modified: Mon, 26 Jan 2009 01:42:05 GMT\nExpires: Mon, 26 Jan 2009 01:42:05 GMT\nContent\\-Length: 38206$', flags=16), re.compile(b'^ <!DOCTYPE HTML PUBLIC "\\-//W3C//DTD HTML 4\\.01 Transitional//EN"> <html> <head> <meta http\\-equiv="Co.{38008}"0" height="0" scrolling="no" name="dynamicFrame" src="RCheartbeat\\.php" ></iframe> </body> </html>\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/it', items={
0: WarcDoc('clueweb09-it0000-00-00000', 'http://00lucianoligabue00.giovani.it/', '2009-03-84T17:45:02-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nContent\\-Encoding: gzip\nVary: Host,Accept\\-Encoding,User\\-Agent.{248}ec0cfa05ca1; path=/; domain=\\.giovani\\.it\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 12895$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtm.{12697}avascript">\n_uacct = "UA\\-746038\\-2";\n_udn="giovani\\.it";\nurchinTracker\\(\\);\n</script>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-it0000-00-00009', 'http://00perdomain.com/world/furlan/sal%c3%bbt/', '2009-03-84T17:45:02-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Sun, 08 Feb 2009 06:13:46 GMT\nCache\\-Con.{144}: sid=bqreqd4mrjrm3fi30405ldfnq6; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 9794$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{9596}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-it0000-00-01000', 'http://aaronsummers.ifrance.com/new/socci/', '2009-03-84T17:45:07-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=ISO\\-8859\\-1\nAccept\\-Ranges: bytes\nDate: Tue, 10 Feb 2.{70}lose\nLast\\-Modified: Wed, 10 Oct 2007 19:43:32 GMT\nETag: "804131\\-3d59\\-b356a100"\nContent\\-Length: 16483$', flags=16), re.compile(b'^<script language="Javascript" type="text/javascript">\n<!\\-\\-\nvar d=new Date; rnd=d\\.getDay\\(\\)\\+\'\\-\'\\+d\\.getH.{16285}\'<sc\'\\+\'ript src="http://js\\-perso\\.ifrance\\.com/js2\\.php\\?\'\\+rnd\\+\'"><\'\\+\'/sc\'\\+\'ript>\'\\);\n// \\-\\->\n</script>\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/ja', items={
0: WarcDoc('clueweb09-ja0000-00-00000', 'http://00077.web.fc2.com/', '2009-03-85T02:09:10-0700', re.compile(b'^HTTP/1\\.1 200 OK\nAccept\\-Ranges: bytes\nContent\\-Type: text/html\nDate: Tue, 13 Jan 2009 21:29:20 GMT\nSer.{98}ection: close\nLast\\-Modified: Tue, 18 Nov 2008 14:07:53 GMT\nContent\\-Language: en\nContent\\-Length: 9911$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN"\n "http://www\\.w3\\.org/TR/xhtml1/DTD/xht.{9713}_cs \\+ \'\\&dm=\' \\+ fhp_dm \\+ \'"><\' \\+ \'/script>\';\ndocument\\.write\\(fhp_wt\\);\n//\\-\\-></script></body>\n</html>\n\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-ja0000-00-00009', 'http://0095.jp/', '2009-03-85T02:09:10-0700', b'HTTP/1.1 200 OK\nContent-Type: text/html\nDate: Thu, 08 Jan 2009 04:28:30 GMT\nServer: Apache/1.3.39 (Unix)\nX-Powered-By: PHP/5.2.8\nConnection: close\nContent-Length: 20822', re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtm.{20624}r>\n\xa1\xa1\xa1\xa1\xa1\xa1\xa1\xa1\\©2009\\ <a href="http://0095\\.jp/">\xa5\xad\xa5\xa6\xa5\xa4\xb6\xe6\xb3\xda\xc9\xf4</a></div>\n</div>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-ja0000-00-01000', 'http://1bunting.info/sanc3/0066/020/', '2009-03-85T02:09:15-0700', re.compile(b'^HTTP/1\\.1 200 OK\nAccept\\-Ranges: bytes\nContent\\-Type: text/html\nDate: Wed, 07 Jan 2009 12:39:57 GMT\nSer.{145}ast\\-Modified: Sun, 04 Jan 2009 02:38:48 GMT\nETag: "64d86f0\\-2d98\\-45f9f12e0be00"\nContent\\-Length: 11672$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Transitional//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtm.{11474}en\\.height\\+"\\&"\\);\ndocument\\.write\\("color="\\+screen\\.colorDepth\\+"\'>"\\);\n// \\-\\->\n</SCRIPT>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/ko', items={
0: WarcDoc('clueweb09-ko0000-00-00000', 'http://00perdomain.com/kids_and_teens/international/korean/', '2009-03-84T16:00:51-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Wed, 11 Feb 2009 16:27:15 GMT\nPragma: n.{145} sid=6eoehtb6uqtbtc5i6j1ohsht55; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 10481$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{10283}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-ko0000-00-00009', 'http://00perdomain.com/world/korean/%ea%b1%b4%ea%b0%95,%ec%9d%98%ed%95%99/%ec%a7%88%eb%b3%91,%ec%a7%88%ed%99%98/', '2009-03-84T16:00:51-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Fri, 27 Feb 2009 20:07:39 GMT\nCache\\-Con.{145} sid=6s444g9ae4nv9uk4mfbb9e2dv3; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 18781$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{18583}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-ko0000-00-01000', 'http://208.70.77.133/9ea771f89c09d26fdd4f9309023b.html', '2009-03-84T16:00:55-0700', b'HTTP/1.1 200 OK\nContent-Type: text/html; charset=EUC-KR\nDate: Sat, 17 Jan 2009 08:48:54 GMT\nServer: Apache/2.0.52 (CentOS)\nX-Powered-By: PHP/5.1.6\nConnection: close\nContent-Length: 14595', re.compile(b'^<!DOCTYPE HTML PUBLIC "\\-//W3C//DTD HTML 4\\.0 Transitional//EN">\n<html>\n<head>\n<title>\xc1\xea\xbd\xc3\xc6\xae\xb7\xb9\xc0\xcc\xb4\xd7\xba\xb9 \xc1.{14397}:<a href="mailto:webmaster@208\\.70\\.77\\.133">\xc0\xa5\xb8\xb6\xbd\xba\xc5\xcd</a>\n</center>\n<body>\n</html>\n<!\\-\\- 0\\.110781 \\-\\->\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/pt', items={
0: WarcDoc('clueweb09-pt0000-00-00000', 'http://005bom.ezdir.net/', '2009-03-84T20:11:48-0700', re.compile(b'^HTTP/1\\.1 200 OK\nServer: Apache/1\\.3\\.37 \\(Unix\\) mod_auth_passthrough/1\\.8 mod_log_bytes/1\\.2 mod_bwlimite.{91}Content\\-Type: text/html; charset=iso\\-8859\\-1\nDate: Wed, 18 Feb 2009 23:32:21 GMT\nContent\\-Length: 1970$', flags=16), re.compile(b'^<HTML>\n<HEAD>\n<META NAME="Generator" CONTENT="ezDIR \\- www\\.ezdir\\.net">\n<META NAME="Description" CONTE.{1772}/ezdir\\.inweb\\.adm\\.br/ezdir/anuncie\\.php">Clique aqui</A></FONT>\n</TD></TR>\n</TABLE>\n</BODY>\n</HTML>\n\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-pt0000-00-00009', 'http://00perdomain.com/world/portugu%c3%aas/artes/artesanato/', '2009-03-84T20:11:48-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Thu, 12 Feb 2009 05:26:23 GMT\nCache\\-Con.{145} sid=8j75130i6a5iibrmaq85ji1753; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 18361$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{18163}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-pt0000-00-01000', 'http://1001gatos.org/john-wood-saiu-da-microsoft-para-mudar-o-mundo/', '2009-03-84T20:11:55-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=UTF\\-8\nDate: Sun, 08 Feb 2009 00:43:44 GMT\nServer: A.{223}, 08\\-Feb\\-2010 00:43:45 GMT; path=/\nX\\-Pingback: http://1001gatos\\.org/xmlrpc\\.php\nContent\\-Length: 40352$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Strict//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtml1\\-str.{40154}DynaboxConfig\\?div_nome=dynabox\\&site_origem=8662542\\&cor=cc0000"></script>\\\r\n</body>\\\r\n</html>\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/es', items={
0: WarcDoc('clueweb09-es0000-00-00000', 'http://00001101.blogspot.com/', '2009-03-81T23:59:24-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=UTF\\-8\nDate: Sat, 24 Jan 2009 18:07:26 GMT\nCache\\-Con.{101}ed: Sat, 17 Jan 2009 18:35:52 GMT\nETag: "a341615f\\-6594\\-4524\\-ae74\\-53101f87cdba"\nContent\\-Length: 34361$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Strict//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtml1\\-str.{34163}\\.com/fb/images/pub/feed\\-icon16x16\\.png" alt="" style="border:0"/></a></p>\\\r\n</div>\\\r\n</body>\\\r\n</html>\n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-es0000-00-00009', 'http://00perdomain.com/world/espa%c3%b1ol/', '2009-03-81T23:59:24-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Thu, 29 Jan 2009 19:26:46 GMT\nCache\\-Con.{144}: sid=javrlcp0o652co0p1gvpttlqe6; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 9073$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{8875}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-es0000-00-01000', 'http://abrahamzabludovsky.radiotrece.com.mx/2008/11/27/deportes-del-27-de-noviembre-de-2008/', '2009-03-81T23:59:31-0700', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=UTF\\-8\nDate: Mon, 09 Feb 2009 08:53:49 GMT\nServer: A.{51}tion: close\nX\\-Pingback: http://abrahamzabludovsky\\.radiotrece\\.com\\.mx/xmlrpc\\.php\nContent\\-Length: 28172$', flags=16), re.compile(b'^<!DOCTYPE html PUBLIC "\\-//W3C//DTD XHTML 1\\.0 Strict//EN" "http://www\\.w3\\.org/TR/xhtml1/DTD/xhtml1\\-str.{27974}UA\\-1620189\\-24"\\);\npageTracker\\._initData\\(\\);\npageTracker\\._trackPageview\\(\\);\n</script>\n</body>\n</html>\n\n\n$', flags=16), 'text/html'),
})
self._test_docs('clueweb09/catb', items={
0: WarcDoc('clueweb09-en0000-00-00000', 'http://00000-nrt-realestate.homepagestartup.com/', '2009-03-65T08:43:19-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html\nDate: Tue, 13 Jan 2009 18:05:10 GMT\nPragma: no\\-cache\nCache\\-C.{100}Modified: Tue, 13 Jan 2009 18:05:10 GMT\nExpires: Mon, 20 Dec 1998 01:00:00 GMT\nContent\\-Length: 16254$', flags=16), re.compile(b'^<head> <meta http\\-equiv="Content\\-Language" content="en\\-gb"> <meta http\\-equiv="Content\\-Type" content=.{16056} 8pt">YouTube Videos</span></td> </tr> </table> </td> </tr> </table></div> </div> </body> </html> \n\n$', flags=16), 'text/html'),
9: WarcDoc('clueweb09-en0000-00-00009', 'http://00perdomain.com/computers/', '2009-03-65T08:43:20-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nDate: Sat, 17 Jan 2009 23:40:59 GMT\nPragma: n.{145} sid=i35idajmde65hlem4m2jpmrc37; path=/\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\nContent\\-Length: 23500$', flags=16), re.compile(b'^\n\n\n<!\\-\\- Site by Zaz Corporation, 1 \\- 8 8 8 \\- 2 \\- Z A Z C O R http://www\\.zazcorp\\.us \\-\\->\n\n\n\n<html><he.{23302}<script type="text/javascript">\n_uacct = "UA\\-488717\\-2";\nurchinTracker\\(\\);\n</script>\n\n</body></html>\n\n$', flags=16), 'text/html'),
1000: WarcDoc('clueweb09-en0000-00-01000', 'http://2modern.com/designer/FLOS/Flos-Archimoon-Soft-Table-Lamp', '2009-03-65T08:44:07-0800', re.compile(b'^HTTP/1\\.1 200 OK\nContent\\-Type: text/html; charset=utf\\-8\nKeep\\-Alive: timeout=15, max=965\nContent\\-Encod.{359}4Pa38Ta38Nb350; path=/\nLast\\-Modified: Tue, 13 Jan 2009 21:10:47 GMT\nExpires: 0\nContent\\-Length: 52741$', flags=16), re.compile(b'^\n<html>\n<head>\n<meta http\\-equiv="Content\\-Type" content="text/html; charset=UTF\\-8">\n<title>FLOS \\- Arc.{52543}\\- \\[ 418126 \\] \\[ \\] \\[ /s\\.nl \\] \\[ Tue Jan 13 13:10:47 PST 2009 \\] \\-\\->\n<!\\-\\- Not logging slowest SQL \\-\\->\n\n\n$', flags=16), 'text/html'),
})
self._assert_namedtuple(ir_datasets.load('clueweb09').docs.lookup('clueweb09-en0007-01-40637'), WarcDoc('clueweb09-en0007-01-40637', 'http://www.job-hunt.org/marketing.shtml', '2009-03-65T12:44:49-0800', re.compile(b'HTTP/1.1 200 OK.*44437', flags=16), body=re.compile(b'<!DOCTYPE HTML .*</script> \n\n</BODY>\n\n</HTML>\n\n\n', flags=16), body_content_type='text/html'))
def test_clueweb09_docstore(self):
docstore = ir_datasets.load('clueweb09').docs_store()
docstore.clear_cache()
with _logger.duration('cold fetch'):
result = docstore.get_many(['clueweb09-en0000-00-00003', 'clueweb09-en0000-00-35154', 'clueweb09-ar0000-48-02342'])
self.assertEqual(len(result), 3)
with _logger.duration('warm fetch'):
result = docstore.get_many(['clueweb09-en0000-00-00003', 'clueweb09-en0000-00-35154', 'clueweb09-ar0000-48-02342'])
self.assertEqual(len(result), 3)
docstore = ir_datasets.load('clueweb09').docs_store()
with _logger.duration('warm fetch (new docstore)'):
result = docstore.get_many(['clueweb09-en0000-00-00003', 'clueweb09-en0000-00-35154', 'clueweb09-ar0000-48-02342'])
self.assertEqual(len(result), 3)
with _logger.duration('cold fetch (nearby)'):
result = docstore.get_many(['clueweb09-en0000-00-00023', 'clueweb09-en0000-00-35167', 'clueweb09-ar0000-48-02348'])
self.assertEqual(len(result), 3)
with _logger.duration('cold fetch (earlier)'):
result = docstore.get_many(['clueweb09-en0000-00-00001', 'clueweb09-ar0000-48-00009'])
self.assertEqual(len(result), 2)
def test_clueweb09_queries(self):
self._test_queries('clueweb09/en/trec-web-2009', count=50, items={
0: TrecWebTrackQuery('1', 'obama family tree', "Find information on President Barack Obama's family\n history, including genealogy, national origins, places and dates of\n birth, etc.\n ", 'faceted', (TrecSubtopic(number='1', text='\n Find the TIME magazine photo essay "Barack Obama\'s Family Tree".\n ', type='nav'), TrecSubtopic(number='2', text="\n Where did Barack Obama's parents and grandparents come from?\n ", type='inf'), TrecSubtopic(number='3', text="\n Find biographical information on Barack Obama's mother.\n ", type='inf'))),
9: TrecWebTrackQuery('10', 'cheap internet', "I'm looking for cheap (i.e. low-cost) internet service.\n ", 'faceted', (TrecSubtopic(number='1', text='\n What are some low-cost broadband internet providers?\n ', type='inf'), TrecSubtopic(number='2', text='\n Do any internet providers still sell dial-up?\n ', type='inf'), TrecSubtopic(number='3', text='\n Who can provide inexpensive digital cable television bundled with\n internet service?\n ', type='inf'), TrecSubtopic(number='4', text="\n I'm looking for the Vonage homepage.\n ", type='nav'), TrecSubtopic(number='5', text='\n Find me some providers of free wireless internet access.\n ', type='inf'), TrecSubtopic(number='6', text='\n I want to find cheap DSL providers.\n ', type='inf'), TrecSubtopic(number='7', text='\n Is there a way to get internet access without phone service?\n ', type='inf'), TrecSubtopic(number='8', text="\n Take me to Comcast's homepage.\n ", type='nav'))),
49: TrecWebTrackQuery('50', 'dog heat', 'What is the effect of excessive heat on dogs?\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n What is the effect of excessive heat on dogs?\n ', type='inf'), TrecSubtopic(number='2', text='\n What are symptoms of heat stroke and other heat-related illnesses\n in dogs?\n ', type='inf'), TrecSubtopic(number='3', text='\n Find information on dogs\' reproductive cycle. What does it mean\n when a dog is "in heat"?\n ', type='inf'))),
})
self._test_queries('clueweb09/en/trec-web-2010', count=50, items={
0: TrecWebTrackQuery('51', 'horse hooves', '\n Find information about horse hooves, their care, and diseases of hooves.\n ', 'faceted', (TrecSubtopic(number='1', text="\n Find information about horses' hooves and how to care for them.\n ", type='inf'), TrecSubtopic(number='2', text='\n Find pictures of horse hooves.\n ', type='nav'), TrecSubtopic(number='3', text='\n What are some injuries or diseases of hooves in horses, and how\n are they treated?\n ', type='inf'), TrecSubtopic(number='4', text="\n Describe the anatomy of horses' feet and hooves.\n ", type='inf'), TrecSubtopic(number='5', text='\n Find information on shoeing horses and horseshoe problems.\n ', type='inf'))),
9: TrecWebTrackQuery('60', 'bellevue', '\n Find information about Bellevue, Washington.\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n Find information about Bellevue, Washington.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find information about Bellevue, Nebraska.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find information about Bellevue Hospital Center in New York, NY.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find the homepage of Bellevue University.\n ', type='nav'), TrecSubtopic(number='5', text='\n Find the homepage of Bellevue College, Washington.\n ', type='nav'), TrecSubtopic(number='6', text='\n Find the homepage of Bellevue Hospital Center in New York, NY.\n ', type='nav'))),
49: TrecWebTrackQuery('100', 'rincon puerto rico', '\n Find information about Rincon, Puerto Rico.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find hotels and beach resorts in Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find information on the history of Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find surf forecasts for Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find pictures of Rincon, Puerto Rico.\n ', type='nav'), TrecSubtopic(number='5', text='\n Find information about real estate and rental properties in\n Rincon, Puerto Rico.\n ', type='inf'))),
})
self._test_queries('clueweb09/en/trec-web-2011', count=50, items={
0: TrecWebTrackQuery('101', 'ritz carlton lake las vegas', '\n Find information about the Ritz Carlton resort at Lake Las Vegas.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find information about the Ritz Carlton resort at Lake Las Vegas.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find a site where I can determine room price and availability.\n ', type='nav'), TrecSubtopic(number='3', text='\n Find directions to the Ritz Carlton Lake Las Vegas.\n ', type='nav'), TrecSubtopic(number='4', text='\n Find reviews of the Ritz Carlton Lake Las Vegas.\n ', type='inf'))),
9: TrecWebTrackQuery('110', 'map of brazil', '\n What are the boundaries of the political jurisdictions in Brazil?\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n What are the boundaries of the political jurisdictions in Brazil?\n ', type='inf'), TrecSubtopic(number='2', text='\n I am looking for information about taking a vacation trip to Brazil.\n ', type='inf'), TrecSubtopic(number='3', text='\n I want to buy a road map of Brazil.\n ', type='nav'))),
49: TrecWebTrackQuery('150', 'tn highway patrol', '\n What are the requirements to become a Tennessee Highway Patrol State Trooper?\n ', 'faceted', (TrecSubtopic(number='1', text='\n What are the requirements to become a Tennessee Highway Patrol State Trooper?\n ', type='inf'), TrecSubtopic(number='2', text='\n information about the responsibilities of the Tennessee Highway Patrol\n ', type='inf'), TrecSubtopic(number='3', text='\n home page of the Tennessee Highway Patrol\n ', type='nav'), TrecSubtopic(number='4', text='\n I want to fill in the customer satisfaction survey about my interaction with a Tennessee Highway Patrol State Trooper.\n ', type='nav'))),
})
self._test_queries('clueweb09/en/trec-web-2012', count=50, items={
0: TrecWebTrackQuery('151', '403b', '\n What is a 403b plan?\n ', 'faceted', (TrecSubtopic(number='1', text='\n What is a 403b plan?\n ', type='inf'), TrecSubtopic(number='2', text='\n Who is eligible for a 403b plan?\n ', type='inf'), TrecSubtopic(number='3', text='\n What are the rules for a 403b retirement plan?\n ', type='nav'), TrecSubtopic(number='4', text='\n What is the difference between 401k and 403b retirement plans?\n ', type='inf'), TrecSubtopic(number='5', text='\n What are the withdrawal limitations for a 403b retirement plan?\n ', type='nav'))),
9: TrecWebTrackQuery('160', 'grilling', '\n Find kabob recipes.\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n Find kabob recipes.\n ', type='nav'), TrecSubtopic(number='2', text='\n Find tips on grilling vegetables.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find tips on grilling fish.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find instructions for grilling chicken.\n ', type='inf'), TrecSubtopic(number='5', text='\n Find the Grilling Magazine website.\n ', type='nav'), TrecSubtopic(number='6', text='\n Find information on gas barbecue grills and cooking on a gas grill.\n ', type='inf'))),
49: TrecWebTrackQuery('200', 'ontario california airport', '\n Find flight information for the Ontario, CA airport.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find flight information for the Ontario, CA airport.\n ', type='inf'), TrecSubtopic(number='2', text='\n What hotels are near the Ontario, CA airport?\n ', type='inf'), TrecSubtopic(number='3', text='\n What services/facilities does the Ontario, CA airport offer?\n ', type='inf'), TrecSubtopic(number='4', text='\n What is the address of the Ontario, CA airport?\n ', type='nav'))),
})
self._test_queries('clueweb09/catb/trec-web-2009', count=50, items={
0: TrecWebTrackQuery('1', 'obama family tree', "Find information on President Barack Obama's family\n history, including genealogy, national origins, places and dates of\n birth, etc.\n ", 'faceted', (TrecSubtopic(number='1', text='\n Find the TIME magazine photo essay "Barack Obama\'s Family Tree".\n ', type='nav'), TrecSubtopic(number='2', text="\n Where did Barack Obama's parents and grandparents come from?\n ", type='inf'), TrecSubtopic(number='3', text="\n Find biographical information on Barack Obama's mother.\n ", type='inf'))),
9: TrecWebTrackQuery('10', 'cheap internet', "I'm looking for cheap (i.e. low-cost) internet service.\n ", 'faceted', (TrecSubtopic(number='1', text='\n What are some low-cost broadband internet providers?\n ', type='inf'), TrecSubtopic(number='2', text='\n Do any internet providers still sell dial-up?\n ', type='inf'), TrecSubtopic(number='3', text='\n Who can provide inexpensive digital cable television bundled with\n internet service?\n ', type='inf'), TrecSubtopic(number='4', text="\n I'm looking for the Vonage homepage.\n ", type='nav'), TrecSubtopic(number='5', text='\n Find me some providers of free wireless internet access.\n ', type='inf'), TrecSubtopic(number='6', text='\n I want to find cheap DSL providers.\n ', type='inf'), TrecSubtopic(number='7', text='\n Is there a way to get internet access without phone service?\n ', type='inf'), TrecSubtopic(number='8', text="\n Take me to Comcast's homepage.\n ", type='nav'))),
49: TrecWebTrackQuery('50', 'dog heat', 'What is the effect of excessive heat on dogs?\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n What is the effect of excessive heat on dogs?\n ', type='inf'), TrecSubtopic(number='2', text='\n What are symptoms of heat stroke and other heat-related illnesses\n in dogs?\n ', type='inf'), TrecSubtopic(number='3', text='\n Find information on dogs\' reproductive cycle. What does it mean\n when a dog is "in heat"?\n ', type='inf'))),
})
self._test_queries('clueweb09/catb/trec-web-2010', count=50, items={
0: TrecWebTrackQuery('51', 'horse hooves', '\n Find information about horse hooves, their care, and diseases of hooves.\n ', 'faceted', (TrecSubtopic(number='1', text="\n Find information about horses' hooves and how to care for them.\n ", type='inf'), TrecSubtopic(number='2', text='\n Find pictures of horse hooves.\n ', type='nav'), TrecSubtopic(number='3', text='\n What are some injuries or diseases of hooves in horses, and how\n are they treated?\n ', type='inf'), TrecSubtopic(number='4', text="\n Describe the anatomy of horses' feet and hooves.\n ", type='inf'), TrecSubtopic(number='5', text='\n Find information on shoeing horses and horseshoe problems.\n ', type='inf'))),
9: TrecWebTrackQuery('60', 'bellevue', '\n Find information about Bellevue, Washington.\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n Find information about Bellevue, Washington.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find information about Bellevue, Nebraska.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find information about Bellevue Hospital Center in New York, NY.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find the homepage of Bellevue University.\n ', type='nav'), TrecSubtopic(number='5', text='\n Find the homepage of Bellevue College, Washington.\n ', type='nav'), TrecSubtopic(number='6', text='\n Find the homepage of Bellevue Hospital Center in New York, NY.\n ', type='nav'))),
49: TrecWebTrackQuery('100', 'rincon puerto rico', '\n Find information about Rincon, Puerto Rico.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find hotels and beach resorts in Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find information on the history of Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find surf forecasts for Rincon, Puerto Rico.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find pictures of Rincon, Puerto Rico.\n ', type='nav'), TrecSubtopic(number='5', text='\n Find information about real estate and rental properties in\n Rincon, Puerto Rico.\n ', type='inf'))),
})
self._test_queries('clueweb09/catb/trec-web-2011', count=50, items={
0: TrecWebTrackQuery('101', 'ritz carlton lake las vegas', '\n Find information about the Ritz Carlton resort at Lake Las Vegas.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find information about the Ritz Carlton resort at Lake Las Vegas.\n ', type='inf'), TrecSubtopic(number='2', text='\n Find a site where I can determine room price and availability.\n ', type='nav'), TrecSubtopic(number='3', text='\n Find directions to the Ritz Carlton Lake Las Vegas.\n ', type='nav'), TrecSubtopic(number='4', text='\n Find reviews of the Ritz Carlton Lake Las Vegas.\n ', type='inf'))),
9: TrecWebTrackQuery('110', 'map of brazil', '\n What are the boundaries of the political jurisdictions in Brazil?\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n What are the boundaries of the political jurisdictions in Brazil?\n ', type='inf'), TrecSubtopic(number='2', text='\n I am looking for information about taking a vacation trip to Brazil.\n ', type='inf'), TrecSubtopic(number='3', text='\n I want to buy a road map of Brazil.\n ', type='nav'))),
49: TrecWebTrackQuery('150', 'tn highway patrol', '\n What are the requirements to become a Tennessee Highway Patrol State Trooper?\n ', 'faceted', (TrecSubtopic(number='1', text='\n What are the requirements to become a Tennessee Highway Patrol State Trooper?\n ', type='inf'), TrecSubtopic(number='2', text='\n information about the responsibilities of the Tennessee Highway Patrol\n ', type='inf'), TrecSubtopic(number='3', text='\n home page of the Tennessee Highway Patrol\n ', type='nav'), TrecSubtopic(number='4', text='\n I want to fill in the customer satisfaction survey about my interaction with a Tennessee Highway Patrol State Trooper.\n ', type='nav'))),
})
self._test_queries('clueweb09/catb/trec-web-2012', count=50, items={
0: TrecWebTrackQuery('151', '403b', '\n What is a 403b plan?\n ', 'faceted', (TrecSubtopic(number='1', text='\n What is a 403b plan?\n ', type='inf'), TrecSubtopic(number='2', text='\n Who is eligible for a 403b plan?\n ', type='inf'), TrecSubtopic(number='3', text='\n What are the rules for a 403b retirement plan?\n ', type='nav'), TrecSubtopic(number='4', text='\n What is the difference between 401k and 403b retirement plans?\n ', type='inf'), TrecSubtopic(number='5', text='\n What are the withdrawal limitations for a 403b retirement plan?\n ', type='nav'))),
9: TrecWebTrackQuery('160', 'grilling', '\n Find kabob recipes.\n ', 'ambiguous', (TrecSubtopic(number='1', text='\n Find kabob recipes.\n ', type='nav'), TrecSubtopic(number='2', text='\n Find tips on grilling vegetables.\n ', type='inf'), TrecSubtopic(number='3', text='\n Find tips on grilling fish.\n ', type='inf'), TrecSubtopic(number='4', text='\n Find instructions for grilling chicken.\n ', type='inf'), TrecSubtopic(number='5', text='\n Find the Grilling Magazine website.\n ', type='nav'), TrecSubtopic(number='6', text='\n Find information on gas barbecue grills and cooking on a gas grill.\n ', type='inf'))),
49: TrecWebTrackQuery('200', 'ontario california airport', '\n Find flight information for the Ontario, CA airport.\n ', 'faceted', (TrecSubtopic(number='1', text='\n Find flight information for the Ontario, CA airport.\n ', type='inf'), TrecSubtopic(number='2', text='\n What hotels are near the Ontario, CA airport?\n ', type='inf'), TrecSubtopic(number='3', text='\n What services/facilities does the Ontario, CA airport offer?\n ', type='inf'), TrecSubtopic(number='4', text='\n What is the address of the Ontario, CA airport?\n ', type='nav'))),
})
self._test_queries('clueweb09/trec-mq-2009', count=40000, items={
0: GenericQuery('20001', '1:obama family tree'),
9: GenericQuery('20010', '1:cheap internet'),
39999: GenericQuery('60000', '4:bird shingles'),
})
def test_clueweb09_qrels(self):
self._test_qrels('clueweb09/en/trec-web-2009', count=23601, items={
0: TrecPrel('1', 'clueweb09-en0003-55-31884', 0, 0, 1.0),
9: TrecPrel('1', 'clueweb09-en0009-84-37392', 0, 1, 0.0136322534877696),
23600: TrecPrel('50', 'clueweb09-en0007-05-20194', 0, 1, 1.0),
})
self._test_qrels('clueweb09/en/trec-web-2010', count=25329, items={
0: TrecQrel('51', 'clueweb09-en0000-16-19379', 0, '0'),
9: TrecQrel('51', 'clueweb09-en0001-55-24197', 0, '0'),
25328: TrecQrel('99', 'clueweb09-enwp03-23-18429', 0, '0'),
})
self._test_qrels('clueweb09/en/trec-web-2011', count=19381, items={
0: TrecQrel('101', 'clueweb09-en0007-71-07471', 0, '0'),
9: TrecQrel('101', 'clueweb09-en0044-05-29808', 2, '0'),
19380: TrecQrel('150', 'clueweb09-en0003-86-25593', -2, '0'),
})
self._test_qrels('clueweb09/en/trec-web-2012', count=16055, items={
0: TrecQrel('151', 'clueweb09-en0000-00-03430', -2, '0'),
9: TrecQrel('151', 'clueweb09-en0000-00-04023', -2, '0'),
16054: TrecQrel('200', 'clueweb09-enwp03-49-00268', 0, '0'),
})
self._test_qrels('clueweb09/catb/trec-web-2009', count=13118, items={
0: TrecPrel('1', 'clueweb09-en0003-55-31884', 0, 0, 1.0),
9: TrecPrel('1', 'clueweb09-enwp01-17-09993', 2, 1, 1.0),
13117: TrecPrel('50', 'clueweb09-en0007-05-20194', 0, 1, 1.0),
})
self._test_qrels('clueweb09/catb/trec-web-2010', count=15845, items={
0: TrecQrel('51', 'clueweb09-en0000-16-19379', 0, '0'),
9: TrecQrel('51', 'clueweb09-en0001-55-24197', 0, '0'),
15844: TrecQrel('99', 'clueweb09-enwp03-23-18429', 0, '0'),
})
self._test_qrels('clueweb09/catb/trec-web-2011', count=13081, items={
0: TrecQrel('101', 'clueweb09-en0007-71-07471', 0, '0'),
9: TrecQrel('101', 'clueweb09-en0001-12-16652', 0, '0'),
13080: TrecQrel('150', 'clueweb09-en0003-86-25593', -2, '0'),
})
self._test_qrels('clueweb09/catb/trec-web-2012', count=10022, items={
0: TrecQrel('151', 'clueweb09-en0000-00-03430', -2, '0'),
9: TrecQrel('151', 'clueweb09-en0000-00-04023', -2, '0'),
10021: TrecQrel('200', 'clueweb09-enwp03-49-00268', 0, '0'),
})
self._test_qrels('clueweb09/trec-mq-2009', count=34534, items={
0: TrecPrel('20001', 'clueweb09-en0003-55-31884', 0, 0, 1.0),
9: TrecPrel('20001', 'clueweb09-enwp01-17-09993', 2, 1, 1.0),
34533: TrecPrel('57118', 'clueweb09-en0010-39-07801', 0, 1, 0.0612006151868131),
})
if __name__ == '__main__':
unittest.main()
| 239.123077
| 991
| 0.648052
| 7,224
| 46,629
| 4.161545
| 0.136074
| 0.008515
| 0.020224
| 0.042577
| 0.8253
| 0.810964
| 0.799321
| 0.789209
| 0.784453
| 0.77454
| 0.000043
| 0.122727
| 0.131871
| 46,629
| 194
| 992
| 240.35567
| 0.619886
| 0
| 0
| 0.47541
| 0
| 0.409836
| 0.692702
| 0.214545
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.021858
| false
| 0.016393
| 0.032787
| 0
| 0.060109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b2f04416c405999058119701e75211e360360d6a
| 14,597
|
py
|
Python
|
tools/grammar-analysis/ANTLRv4ParserListener.py
|
sanyaade-teachings/spresensedroplet
|
8dffe6d07a744288870aa5f37a179aa5db22f8d2
|
[
"MIT"
] | 3
|
2017-05-19T15:07:19.000Z
|
2018-10-25T12:29:27.000Z
|
tools/grammar-analysis/ANTLRv4ParserListener.py
|
sanyaade-teachings/spresensedroplet
|
8dffe6d07a744288870aa5f37a179aa5db22f8d2
|
[
"MIT"
] | 1
|
2018-08-30T19:37:37.000Z
|
2018-08-30T19:37:37.000Z
|
tools/grammar-analysis/ANTLRv4ParserListener.py
|
sanyaade-teachings/spresensedroplet
|
8dffe6d07a744288870aa5f37a179aa5db22f8d2
|
[
"MIT"
] | 7
|
2017-04-07T14:11:01.000Z
|
2021-11-11T19:32:01.000Z
|
# Generated from java-escape by ANTLR 4.5
from antlr4 import *
# This class defines a complete listener for a parse tree produced by ANTLRv4Parser.
class ANTLRv4ParserListener(ParseTreeListener):
# Enter a parse tree produced by ANTLRv4Parser#grammarSpec.
def enterGrammarSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#grammarSpec.
def exitGrammarSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#grammarType.
def enterGrammarType(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#grammarType.
def exitGrammarType(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#prequelConstruct.
def enterPrequelConstruct(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#prequelConstruct.
def exitPrequelConstruct(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#optionsSpec.
def enterOptionsSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#optionsSpec.
def exitOptionsSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#option.
def enterOption(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#option.
def exitOption(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#optionValue.
def enterOptionValue(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#optionValue.
def exitOptionValue(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#delegateGrammars.
def enterDelegateGrammars(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#delegateGrammars.
def exitDelegateGrammars(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#delegateGrammar.
def enterDelegateGrammar(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#delegateGrammar.
def exitDelegateGrammar(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#tokensSpec.
def enterTokensSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#tokensSpec.
def exitTokensSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#channelsSpec.
def enterChannelsSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#channelsSpec.
def exitChannelsSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#idList.
def enterIdList(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#idList.
def exitIdList(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#action.
def enterAction(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#action.
def exitAction(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#actionScopeName.
def enterActionScopeName(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#actionScopeName.
def exitActionScopeName(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#actionBlock.
def enterActionBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#actionBlock.
def exitActionBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#argActionBlock.
def enterArgActionBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#argActionBlock.
def exitArgActionBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#modeSpec.
def enterModeSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#modeSpec.
def exitModeSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#rules.
def enterRules(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#rules.
def exitRules(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleSpec.
def enterRuleSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleSpec.
def exitRuleSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#parserRuleSpec.
def enterParserRuleSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#parserRuleSpec.
def exitParserRuleSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#exceptionGroup.
def enterExceptionGroup(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#exceptionGroup.
def exitExceptionGroup(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#exceptionHandler.
def enterExceptionHandler(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#exceptionHandler.
def exitExceptionHandler(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#finallyClause.
def enterFinallyClause(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#finallyClause.
def exitFinallyClause(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#rulePrequel.
def enterRulePrequel(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#rulePrequel.
def exitRulePrequel(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleReturns.
def enterRuleReturns(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleReturns.
def exitRuleReturns(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#throwsSpec.
def enterThrowsSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#throwsSpec.
def exitThrowsSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#localsSpec.
def enterLocalsSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#localsSpec.
def exitLocalsSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleAction.
def enterRuleAction(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleAction.
def exitRuleAction(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleModifiers.
def enterRuleModifiers(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleModifiers.
def exitRuleModifiers(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleModifier.
def enterRuleModifier(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleModifier.
def exitRuleModifier(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleBlock.
def enterRuleBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleBlock.
def exitRuleBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleAltList.
def enterRuleAltList(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleAltList.
def exitRuleAltList(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledAlt.
def enterLabeledAlt(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledAlt.
def exitLabeledAlt(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerRuleSpec.
def enterLexerRuleSpec(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerRuleSpec.
def exitLexerRuleSpec(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerRuleBlock.
def enterLexerRuleBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerRuleBlock.
def exitLexerRuleBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAltList.
def enterLexerAltList(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAltList.
def exitLexerAltList(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAlt.
def enterLexerAlt(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAlt.
def exitLexerAlt(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElements.
def enterLexerElements(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElements.
def exitLexerElements(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerElement.
def enterLexerElement(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerElement.
def exitLexerElement(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledLexerElement.
def enterLabeledLexerElement(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledLexerElement.
def exitLabeledLexerElement(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerBlock.
def enterLexerBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerBlock.
def exitLexerBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommands.
def enterLexerCommands(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommands.
def exitLexerCommands(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommand.
def enterLexerCommand(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommand.
def exitLexerCommand(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommandName.
def enterLexerCommandName(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommandName.
def exitLexerCommandName(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerCommandExpr.
def enterLexerCommandExpr(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerCommandExpr.
def exitLexerCommandExpr(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#altList.
def enterAltList(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#altList.
def exitAltList(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#alternative.
def enterAlternative(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#alternative.
def exitAlternative(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#element.
def enterElement(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#element.
def exitElement(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#labeledElement.
def enterLabeledElement(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#labeledElement.
def exitLabeledElement(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ebnf.
def enterEbnf(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ebnf.
def exitEbnf(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#blockSuffix.
def enterBlockSuffix(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#blockSuffix.
def exitBlockSuffix(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ebnfSuffix.
def enterEbnfSuffix(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ebnfSuffix.
def exitEbnfSuffix(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#lexerAtom.
def enterLexerAtom(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#lexerAtom.
def exitLexerAtom(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#atom.
def enterAtom(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#atom.
def exitAtom(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#notSet.
def enterNotSet(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#notSet.
def exitNotSet(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#blockSet.
def enterBlockSet(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#blockSet.
def exitBlockSet(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#setElement.
def enterSetElement(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#setElement.
def exitSetElement(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#block.
def enterBlock(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#block.
def exitBlock(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#ruleref.
def enterRuleref(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#ruleref.
def exitRuleref(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#characterRange.
def enterCharacterRange(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#characterRange.
def exitCharacterRange(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#terminal.
def enterTerminal(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#terminal.
def exitTerminal(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#elementOptions.
def enterElementOptions(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#elementOptions.
def exitElementOptions(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#elementOption.
def enterElementOption(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#elementOption.
def exitElementOption(self, ctx):
pass
# Enter a parse tree produced by ANTLRv4Parser#identifier.
def enterIdentifier(self, ctx):
pass
# Exit a parse tree produced by ANTLRv4Parser#identifier.
def exitIdentifier(self, ctx):
pass
| 25.430314
| 84
| 0.682332
| 1,665
| 14,597
| 5.981982
| 0.130931
| 0.076506
| 0.12751
| 0.229518
| 0.791466
| 0.791466
| 0.788153
| 0.787651
| 0.608635
| 0.608635
| 0
| 0.012073
| 0.256628
| 14,597
| 573
| 85
| 25.474695
| 0.905815
| 0.495102
| 0
| 0.496063
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.496063
| false
| 0.496063
| 0.003937
| 0
| 0.503937
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
6528feece33b346f134e6548f2dfdcb52c7e65c1
| 96
|
py
|
Python
|
ven2/lib/python2.7/site-packages/zope/deferredimport/samples/sample6.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 4
|
2021-02-20T06:00:01.000Z
|
2022-01-07T20:37:37.000Z
|
ven2/lib/python2.7/site-packages/zope/deferredimport/samples/sample6.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 2
|
2020-04-30T13:03:09.000Z
|
2021-05-05T10:20:15.000Z
|
ven2/lib/python2.7/site-packages/zope/deferredimport/samples/sample6.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 5
|
2020-05-07T08:14:36.000Z
|
2022-03-24T15:15:08.000Z
|
import zope.deferredimport.sample5
def getone():
return zope.deferredimport.sample5.one
| 12
| 42
| 0.770833
| 11
| 96
| 6.727273
| 0.727273
| 0.486486
| 0.675676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.145833
| 96
| 7
| 43
| 13.714286
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.666667
| 0.333333
| 1.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
3345b0eac5cc00a5018ad53d35a6980e4d23e0dc
| 100
|
py
|
Python
|
Lab_6/Lab6_054/4.2.2.py
|
Saif-M-Dhrubo/Crypto-Lab
|
2807320d7ca389adee86f69f704b9f91f9cd8054
|
[
"MIT"
] | 2
|
2019-04-28T16:34:15.000Z
|
2019-04-28T17:54:06.000Z
|
Lab_6/Lab6_054/4.2.2.py
|
saif-mahmud/Crypto-Lab
|
2807320d7ca389adee86f69f704b9f91f9cd8054
|
[
"MIT"
] | null | null | null |
Lab_6/Lab6_054/4.2.2.py
|
saif-mahmud/Crypto-Lab
|
2807320d7ca389adee86f69f704b9f91f9cd8054
|
[
"MIT"
] | 1
|
2020-01-08T06:48:08.000Z
|
2020-01-08T06:48:08.000Z
|
from struct import pack
print pack("<IIIII",0xaabbccdd,0x00000000,0x00000000,0xbffe9a98,0x08048efe)
| 33.333333
| 75
| 0.84
| 12
| 100
| 7
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308511
| 0.06
| 100
| 2
| 76
| 50
| 0.585106
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
6824e70dad17ca462b2d50be808db2eea796cf38
| 14,237
|
py
|
Python
|
opponents/hermione/make_hermione_images.py
|
screenusername/spni
|
50243958299483dd9fab3d5970ad219e367cb426
|
[
"MIT"
] | null | null | null |
opponents/hermione/make_hermione_images.py
|
screenusername/spni
|
50243958299483dd9fab3d5970ad219e367cb426
|
[
"MIT"
] | null | null | null |
opponents/hermione/make_hermione_images.py
|
screenusername/spni
|
50243958299483dd9fab3d5970ad219e367cb426
|
[
"MIT"
] | null | null | null |
import sys
#emotions:
#happy
#calm
#sad
#loss
#interested - clasping hands together?
#horny
#shocked - maybe hands in front of face, with a gap in between her fingers to see through?
#excited
#stunned - eyes closed, I think.
#angry
#clothes = shoes, socks, jumper, tie, skirt, shirt, bra, panties
#11 total stages
#appearance:36**aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49_ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.E8E8E8.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0
version_str = "36**"
def get_emotion_data():
emotions = dict()
#happy
em = dict()
em["pose"] = "aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49"
em["blush_mod"] = 0
emotions["happy"] = em
#calm
em = dict()
em["pose"] = "aa11.98.0.42.54.11.98.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha70.70_hb49.1.44.99_hc0.65.39.0.65.39_hd0.1.49.49"
em["blush_mod"] = 0
emotions["calm"] = em
#sad
em = dict()
em["pose"] = "aa13.97.0.42.54.13.97.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa3.50.50.60.50.74.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha59.59_hb49.1.20.99_hc0.61.39.0.61.39_hd8.1.49.49"
em["blush_mod"] = 0
emotions["sad"] = em
#loss
em = dict()
em["pose"] = "aa11.89.1.42.54.11.89.1.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.86.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha61.61_hb49.1.3.99_hc0.65.39.0.65.39_hd21.1.49.49"
em["blush_mod"] = 0
emotions["loss"] = em
#interested
em = dict()
em["pose"] = "aa26.63.0.16.58.24.63.1.0.64_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa5.50.50.60.50.0.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha84.84_hb49.1.44.99_hc0.39.39.0.39.39_hd1.1.49.49"
em["blush_mod"] = 1
emotions["interested"] = em
#horny
em = dict()
em["pose"] = "aa10.58.0.42.75.10.58.0.4.75_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha87.87_hb49.1.44.99_hc0.59.39.0.59.39_hd27.1.49.49"
em["blush_mod"] = 2
emotions["horny"] = em
#shocked
em = dict()
em["pose"] = "aa65.38.1.27.41.75.36.1.4.60_ab_ac2.52.52.52_ba50_bb17.1_bc185.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.79.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha88.88_hb49.1.44.99_hc0.59.39.0.59.39_hd41.1.49.49"
em["blush_mod"] = 1
emotions["shocked"] = em
#excited
em = dict()
em["pose"] = "aa7.44.0.43.54.7.44.0.6.43_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.99.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha96.96_hb49.1.44.99_hc0.41.39.0.41.39_hd34.1.49.49"
em["blush_mod"] = 2
emotions["excited"] = em
#stunned
em = dict()
em["pose"] = "aa8.100.1.0.54.8.100.1.6.30_ab_ac2.52.52.52_ba50_bb17.1_bc185.500.8.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.59.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha73.73_hb49.1.44.45_hc0.0.39.0.0.39_hd40.1.49.49"
em["blush_mod"] = 1
emotions["stunned"] = em
#angry
em = dict()
em["pose"] = "aa22.83.1.16.42.22.83.1.4.52_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.8.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa6.50.50.60.50.31.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha77.77_hb49.1.44.99_hc0.0.39.0.0.39_hd38.1.49.49"
em["blush_mod"] = 0
emotions["angry"] = em
#smug
em = dict()
em["pose"] = "aa20.72.0.42.49.20.72.0.4.49_ab_ac2.52.52.52_ba50_bb18.1_bc150.500.0.0.1_bd18_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd9.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc0.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.38.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha63.63_hb49.1.44.99_hc0.35.39.0.35.39_hd2.1.49.49"
em["blush_mod"] = 0
emotions["smug"] = em
return emotions
def get_image_data():
d = dict()
d["appearance"] = "36**aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49_ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.E8E8E8.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0"
#these are separated out because parts of the descriptions change according to blush and love juice levels
d["vagina"] = "dc40.1.1.1.1" #dc component
d["face"] = "dd0.0.34.50.45" #dd component
stages = list()
#lj = love juices
#fully clothed
s = {}
s["blush"] = 0
s["lj"] = 0
s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost shoes
s = {}
s["blush"] = 0
s["lj"] = 0
s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost socks
s = {}
s["blush"] = 0
s["lj"] = 0
s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost jumper
s = {}
s["blush"] = 0
s["lj"] = 0
s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost tie
s = {}
s["blush"] = 0
s["lj"] = 0
s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost skirt
s = {}
s["blush"] = 1
s["lj"] = 0
s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost blouse
s = {}
s["blush"] = 1
s["lj"] = 0
s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost bra
s = {}
s["blush"] = 2
s["lj"] = 0
s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj"
s["other"] = ""
stages.append(s)
#lost panties/nude
s = {}
s["blush"] = 2
s["lj"] = 5
s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka_kb_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0"
s["other"] = ""
stages.append(s)
#masturbating
s = {}
s["blush"] = 4
s["lj"] = 80
s["clothes"] = stages[-1]["clothes"]
s["other"] = ""
stages.append(s)
#finished
s = {}
s["blush"] = 3
s["lj"] = 140
s["clothes"] = stages[-1]["clothes"]
s["other"] = ""
stages.append(s)
d["stages"] = stages
blush = list()
blush.append(( 0, 9)) #0 no blush
blush.append((14, 9)) #1 lost dress
blush.append((27, 0)) #2 lost bra
blush.append((50, 1)) #3 nude & finished
blush.append((60, 10)) #4 masturbating
blush.append((70, 12)) #5 stage + emotion mod
blush.append((80, 14)) #6
d["blush"] = blush
return d
def make_descriptions(pd, ems, out_filename):
#pd = player data
#ems = emotion data
#get complete vagina description string
def get_v_str(desc, lj):
#desc = vagina description string, lj = love juice level
a, b = desc.split(".", 1)
return "dc" + ("%d." % lj) + b
#get blush/blue face desciption string
def get_b_str(blush, blue):
return "gc%d.%d" % (blush, blue)
#get complete face description string
def get_face_str(desc, sticker_type):
a, b = desc.split(".", 1)
return "dd" + ("%d." % sticker_type) + b
with open(out_filename, "w") as f:
#put special setup code here
for ind, stage in enumerate(pd["stages"]):
if ind == len(pd["stages"]) - 2:
#skip the masturbation stage, all of those are custom images
#continue
pass
stage_desc = version_str + stage["clothes"] # + pd["appearance"] + "_"
if "other" in stage and len(stage["other"]) > 0:
stage_desc += "_" + stage["other"]
for em_name, em in ems.iteritems():
blush_ind = stage["blush"] + em["blush_mod"]
if blush_ind < 0:
blush_ind = 0
if blush_ind >= len(pd["blush"]):
blush_ind = len(pd["blush"]) - 1
blush = pd["blush"][blush_ind]
em_desc = stage_desc + "_" + em["pose"]
em_desc += "_" + get_b_str(blush[0], 0)
#put in the strings that need to be replaced last, so that they don't get overwritten
em_desc += "_" + get_face_str(pd["face"], blush[1])
em_desc += "_" + get_v_str(pd["vagina"], stage["lj"])
image_name = "%d-%s" % (ind, em_name)
f.write("%s=%s\n\n" % (image_name, em_desc))
def write_descriptions(out_name):
character_data = get_image_data()
emotion_data = get_emotion_data()
make_descriptions(character_data, emotion_data, out_name)
if __name__ == "__main__":
write_descriptions(sys.argv[1])
| 53.322097
| 862
| 0.737164
| 3,518
| 14,237
| 2.643547
| 0.104036
| 0.035054
| 0.019355
| 0.013333
| 0.739785
| 0.722903
| 0.711935
| 0.708925
| 0.680538
| 0.680538
| 0
| 0.319988
| 0.077193
| 14,237
| 267
| 863
| 53.322097
| 0.387882
| 0.136405
| 0
| 0.45509
| 0
| 0.107784
| 0.733453
| 0.683282
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041916
| false
| 0.005988
| 0.005988
| 0.005988
| 0.077844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6852acbc35b4c4ab3d11ee48927195f82c17c2d1
| 14,866
|
py
|
Python
|
test/test_msa.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | null | null | null |
test/test_msa.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | 14
|
2021-11-26T14:43:25.000Z
|
2022-03-22T00:39:17.000Z
|
test/test_msa.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | null | null | null |
""" tests msa.py
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 David Wyllie david.wyllie@phe.gov.uk
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
"""
import os
import unittest
import time
import pandas as pd
from findn.msa import MSAResult, MSAStore
from findn.persistence import Persistence
# unittests
UNITTEST_MONGOCONN: str = "mongodb://localhost"
UNITTEST_RDBMSCONN: str = "sqlite://"
class Test_MSA(unittest.TestCase):
"""tests the MSAResult class"""
def runTest(self):
inputdict = {
"fconst": {},
"variant_positions": [0, 1, 2, 3],
"invalid_guids": [],
"valid_guids": [
"AAACGY-1",
"CCCCGY-2",
"TTTCGY-3",
"GGGGGY-4",
"NNNCGY-5",
"ACTCGY-6",
"TCTQGY-7",
"AAACGY-8",
],
"expected_p1": 0.16666666666666666,
"sample_size": 30,
"df_dict": {
"AAACGY-1": {
"aligned_seq": "AAAC",
"aligned_mseq": "AAAC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
"CCCCGY-2": {
"aligned_seq": "CCCC",
"aligned_mseq": "CCCC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
},
"what_tested": "M",
"outgroup": None,
"creation_time": "2019-11-17T23:46:00.098151",
}
m = MSAResult(**inputdict)
self.assertEqual(type(m.msa_fasta()), str)
self.assertEqual(type(m.msa_html()), str)
self.assertEqual(type(m.msa_dict()), dict)
self.assertEqual(type(m.serialise()), dict)
self.assertEqual(type(m.msa_interactive_depiction()), str)
self.assertEqual(type(m.df), pd.DataFrame)
self.assertEqual(
m.token, "msa|M|no_og|ddc4781ec984b66b0b5bf006a71b29cf1f523740"
)
# skip these tests if the NO_MONGO_TESTS variable exists
mongo_test = unittest.skipIf(
os.environ.get("NO_MONGO_TESTS", False), "no mongo tests performed"
)
@mongo_test
class Test_MSAStore_mongodb(unittest.TestCase):
"""tests the MSAStore class"""
def runTest(self):
inputdict1 = {
"fconst": {},
"variant_positions": [0, 1, 2, 3],
"invalid_guids": [],
"valid_guids": ["AAACGY-1", "CCCCGY-2"],
"expected_p1": 0.16666666666666666,
"sample_size": 30,
"df_dict": {
"AAACGY-1": {
"aligned_seq": "AAMM",
"aligned_mseq": "AAYR",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 3,
"alignM": 2,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
"CCCCGY-2": {
"aligned_seq": "CCCC",
"aligned_mseq": "CCCC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
},
"what_tested": "M",
"outgroup": None,
"creation_time": "2019-11-17T23:46:00.098151",
}
inputdict2 = {
"fconst": {},
"variant_positions": [0, 1, 2, 3],
"invalid_guids": [],
"valid_guids": ["AAACGY-1", "CCCCGY-2"],
"expected_p1": 0.16666666666666666,
"sample_size": 30,
"df_dict": {
"AAACGY-1": {
"aligned_seq": "AAAM",
"aligned_mseq": "AAAR",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 1,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
"CCCCGY-2": {
"aligned_seq": "CCCC",
"aligned_mseq": "CCCC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
},
"what_tested": "M",
"outgroup": None,
"creation_time": "2019-11-17T23:46:00.098151",
}
m1 = MSAResult(**inputdict1)
m2 = MSAResult(**inputdict2)
guids1 = m1.valid_guids
guids2 = m2.valid_guids
pm = Persistence()
p = pm.get_storage_object(connString=UNITTEST_MONGOCONN, debug=2, verbose=True)
ms = MSAStore(p, in_ram_persistence_time=1)
t1 = ms.get_token("M", False, guids1)
t2 = ms.get_token("M", False, guids2)
self.assertFalse(ms.is_in_ram(t1))
self.assertFalse(ms.is_in_ram(t2))
ms.cache_in_ram(token=t1, msa_result=m1)
ms.cache_in_ram(token=t2, msa_result=m2)
self.assertTrue(ms.is_in_ram(t1))
self.assertTrue(ms.is_in_ram(t2))
# test purging of expired samples
time.sleep(2)
ms.purge_ram()
self.assertFalse(ms.is_in_ram(t1))
self.assertFalse(ms.is_in_ram(t2))
# store on disc
ms.persist(t1, m1)
ms.persist(t2, m2)
m1r = ms.load(t1)
m2r = ms.load(t2)
self.assertEqual(m1r.valid_guids, m1.valid_guids)
self.assertEqual(m2r.valid_guids, m2.valid_guids)
ms.unpersist([])
m1r = ms.load(t1)
m2r = ms.load(t2)
self.assertIsNone(m1r)
self.assertIsNone(m2r)
rdbms_test = unittest.skipIf(os.environ.get("NO_RDBMS_TESTS", False), "No rdbms tests")
@rdbms_test
class Test_MSAStore_rdbms(unittest.TestCase):
"""tests the MSAStore class"""
def runTest(self):
inputdict1 = {
"fconst": {},
"variant_positions": [0, 1, 2, 3],
"invalid_guids": [],
"valid_guids": ["AAACGY-1", "CCCCGY-2"],
"expected_p1": 0.16666666666666666,
"sample_size": 30,
"df_dict": {
"AAACGY-1": {
"aligned_seq": "AAMM",
"aligned_mseq": "AAYR",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 3,
"alignM": 2,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
"CCCCGY-2": {
"aligned_seq": "CCCC",
"aligned_mseq": "CCCC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
},
"what_tested": "M",
"outgroup": None,
"creation_time": "2019-11-17T23:46:00.098151",
}
inputdict2 = {
"fconst": {},
"variant_positions": [0, 1, 2, 3],
"invalid_guids": [],
"valid_guids": ["AAACGY-1", "CCCCGY-2"],
"expected_p1": 0.16666666666666666,
"sample_size": 30,
"df_dict": {
"AAACGY-1": {
"aligned_seq": "AAAM",
"aligned_mseq": "AAAR",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 1,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
"CCCCGY-2": {
"aligned_seq": "CCCC",
"aligned_mseq": "CCCC",
"aligned_seq_len": 4,
"allN": 0,
"alignN": 0,
"allM": 1,
"alignM": 0,
"allN_or_M": 1,
"alignN_or_M": 0,
"p_value1": 1.0,
"p_value2": 1.0,
"p_value3": 1.0,
"p_value4": 1.0,
"observed_proportion": 0.0,
"expected_proportion1": 0.16666666666666666,
"expected_proportion2": 0.5,
"expected_proportion3": 0.5,
"expected_proportion4": 0.0,
"what_tested": "M",
},
},
"what_tested": "M",
"outgroup": None,
"creation_time": "2019-11-17T23:46:00.098151",
}
m1 = MSAResult(**inputdict1)
m2 = MSAResult(**inputdict2)
guids1 = m1.valid_guids
guids2 = m2.valid_guids
pm = Persistence()
p = pm.get_storage_object(connString=UNITTEST_RDBMSCONN, debug=2, verbose=True)
ms = MSAStore(p, in_ram_persistence_time=1)
t1 = ms.get_token("M", False, guids1)
t2 = ms.get_token("M", False, guids2)
self.assertFalse(ms.is_in_ram(t1))
self.assertFalse(ms.is_in_ram(t2))
ms.cache_in_ram(token=t1, msa_result=m1)
ms.cache_in_ram(token=t2, msa_result=m2)
self.assertTrue(ms.is_in_ram(t1))
self.assertTrue(ms.is_in_ram(t2))
# test purging of expired samples
time.sleep(2)
ms.purge_ram()
self.assertFalse(ms.is_in_ram(t1))
self.assertFalse(ms.is_in_ram(t2))
# store on disc
ms.persist(t1, m1)
ms.persist(t2, m2)
m1r = ms.load(t1)
m2r = ms.load(t2)
self.assertEqual(m1r.valid_guids, m1.valid_guids)
self.assertEqual(m2r.valid_guids, m2.valid_guids)
ms.unpersist([])
m1r = ms.load(t1)
m2r = ms.load(t2)
self.assertIsNone(m1r)
self.assertIsNone(m2r)
| 33.481982
| 98
| 0.434145
| 1,448
| 14,866
| 4.243785
| 0.140193
| 0.013019
| 0.014646
| 0.017575
| 0.836127
| 0.820342
| 0.81188
| 0.801465
| 0.801465
| 0.801465
| 0
| 0.103693
| 0.440805
| 14,866
| 443
| 99
| 33.557562
| 0.635511
| 0.044195
| 0
| 0.834225
| 0
| 0
| 0.234344
| 0.012835
| 0
| 0
| 0
| 0
| 0.072193
| 1
| 0.008021
| false
| 0
| 0.016043
| 0
| 0.032086
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68b598e17f3073ae1e3a74adc7d1c10e5845f58c
| 152
|
py
|
Python
|
jekyllutils/helpers/messages.py
|
queirozfcom/jekyll-utils
|
461b90ac11ea313a5506841dd14c024d544769d1
|
[
"MIT"
] | 3
|
2016-11-23T21:00:58.000Z
|
2019-11-10T20:01:49.000Z
|
jekyllutils/helpers/messages.py
|
queirozfcom/jekyll-utils
|
461b90ac11ea313a5506841dd14c024d544769d1
|
[
"MIT"
] | 9
|
2016-02-16T17:20:06.000Z
|
2020-06-20T19:06:32.000Z
|
jekyllutils/helpers/messages.py
|
queirozfcom/jekyll-utils
|
461b90ac11ea313a5506841dd14c024d544769d1
|
[
"MIT"
] | null | null | null |
def wrap_error(msg):
return "\033[31mERROR: \033[00m{0}".format(msg)
def wrap_success(msg):
return "\033[32mSUCCESS: \033[00m{0}".format(msg)
| 21.714286
| 53
| 0.671053
| 24
| 152
| 4.166667
| 0.5
| 0.14
| 0.24
| 0.26
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.131579
| 152
| 6
| 54
| 25.333333
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0.355263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d7ad1309d340529b0c9b2c4ab6ccf2906053d06e
| 9,239
|
py
|
Python
|
agents/callback.py
|
mmcenta/left-shift
|
e9b89b29accc5faa6e41ce604df5c70ddd631ed1
|
[
"MIT"
] | 39
|
2020-03-26T17:46:02.000Z
|
2022-01-07T15:39:31.000Z
|
agents/callback.py
|
mmcenta/left-shift
|
e9b89b29accc5faa6e41ce604df5c70ddd631ed1
|
[
"MIT"
] | 8
|
2020-03-25T19:40:03.000Z
|
2022-02-10T00:44:45.000Z
|
agents/callback.py
|
mmcenta/left-shift
|
e9b89b29accc5faa6e41ce604df5c70ddd631ed1
|
[
"MIT"
] | 4
|
2020-03-26T18:14:34.000Z
|
2022-01-07T15:39:33.000Z
|
# unusable until we find how to access states within _on_step()
from stable_baselines.common.callbacks import BaseCallback
import numpy as np
import os
class CustomCallback(BaseCallback):
"""
A custom callback that derives from ``BaseCallback``.
:param verbose: (int) Verbosity level 0: not output 1: info 2: debug
"""
def __init__(self, log_dir='logs', hist_freq:int=100, verbose=0, log_file='log', eval_episodes=100):
super(CustomCallback, self).__init__(verbose)
self.num_episodes = 1
self.max_val = 0
self.histogram = np.zeros(15, dtype = int)
self.verbose = verbose
self.hist_freq = int(hist_freq)
self.log_dir = log_dir
self.log_file = log_file
self.eval_episodes = eval_episodes
self.last_timestep = 1
self.episode_lengths= []
self.episode_maxtiles = []
# Those variables will be accessible in the callback
# (they are defined in the base class)
# The RL model
# self.model = None # type: BaseRLModel
# An alias for self.model.get_env(), the environment used for training
# self.training_env = None # type: Union[gym.Env, VecEnv, None]
# Number of time the callback was called
# self.n_calls = 0 # type: int
# self.num_timesteps = 0 # type: int
# local and global variables
# self.locals = None # type: Dict[str, Any]
# self.globals = None # type: Dict[str, Any]
# The logger object, used to report things in the terminal
# self.logger = None # type: logger.Logger
# # Sometimes, for event callback, it is useful
# # to have access to the parent object
# self.parent = None # type: Optional[BaseCallback]
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
# print(self.verbose, type(self.hist_freq))
pass
def _on_rollout_start(self) -> None:
"""
A rollout is the collection of environment interaction
using the current policy.
This event is triggered before collecting new samples.
"""
pass
def _on_step(self) -> bool:
"""
This method will be called by the model after each call to `env.step()`.
For child callback (of an `EventCallback`), this will be called
when the event is triggered.
:return: (bool) If the callback returns False, training is aborted early.
"""
timestep = self.locals['self'].num_timesteps
num_episodes = len(self.locals['episode_rewards'])
if num_episodes > self.num_episodes:
self.num_episodes = num_episodes
self.histogram[self.max_val] += 1
self.episode_maxtiles.append(self.max_val)
self.episode_lengths.append(timestep-self.last_timestep)
self.last_timestep = timestep
if self.hist_freq > 0 and num_episodes % self.hist_freq == 0 :
self._dump_values()
self.max_val = self.locals['self'].get_env().maximum_tile()
return True
def _on_rollout_end(self) -> None:
"""
This event is triggered before updating the policy.
"""
pass
def _on_training_end(self) -> None:
"""
This event is triggered before exiting the `learn()` method.
"""
pass
def _dump_values(self):
timestep = self.locals['self'].num_timesteps
num_episodes = self.num_episodes
if self.log_dir and self.log_file:
log_path = os.path.join(self.log_dir, self.log_file)
log_file = log_path + '.npz'
try:
os.replace(log_file, log_file+'.bkp')
except:
pass
np.savez(log_path, rewards=self.locals['episode_rewards'], lengths=self.episode_lengths, max_tiles=self.episode_maxtiles)
if self.verbose:
print()
print(f'#episodes: {num_episodes}')
print(f'#timesteps: {timestep}')
print(f'Mean rewards: {np.mean(self.locals["episode_rewards"][-self.eval_episodes:])}')
print(f'Mean episode length: {np.mean(self.episode_lengths[-self.eval_episodes:])}')
print('Histogram of maximum tile achieved:')
for i in range(1,15):
if self.histogram[i] > 0:
print(f'{2**i}: {self.histogram[i]}')
print()
class CustomCallbackPPO2(BaseCallback):
"""
A custom callback that derives from ``BaseCallback``.
:param verbose: (int) Verbosity level 0: not output 1: info 2: debug
"""
def __init__(self, log_dir='logs', hist_freq:int=100, verbose=0, log_file='log', eval_episodes=100):
super(CustomCallbackPPO2, self).__init__(verbose)
self.num_episodes = 1
self.max_val = 0
self.histogram = np.zeros(15, dtype = int)
self.verbose = verbose
self.hist_freq = int(hist_freq)
self.log_dir = log_dir
self.log_file = log_file
self.eval_episodes = eval_episodes
self.last_timestep = 1
# self.episode_lengths= []
self.episode_maxtiles = []
# Those variables will be accessible in the callback
# (they are defined in the base class)
# The RL model
# self.model = None # type: BaseRLModel
# An alias for self.model.get_env(), the environment used for training
# self.training_env = None # type: Union[gym.Env, VecEnv, None]
# Number of time the callback was called
# self.n_calls = 0 # type: int
# self.num_timesteps = 0 # type: int
# local and global variables
# self.locals = None # type: Dict[str, Any]
# self.globals = None # type: Dict[str, Any]
# The logger object, used to report things in the terminal
# self.logger = None # type: logger.Logger
# # Sometimes, for event callback, it is useful
# # to have access to the parent object
# self.parent = None # type: Optional[BaseCallback]
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
# print(self.verbose, type(self.hist_freq))
pass
def _on_rollout_start(self) -> None:
"""
A rollout is the collection of environment interaction
using the current policy.
This event is triggered before collecting new samples.
"""
pass
def _on_step(self) -> bool:
"""
This method will be called by the model after each call to `env.step()`.
For child callback (of an `EventCallback`), this will be called
when the event is triggered.
:return: (bool) If the callback returns False, training is aborted early.
"""
timestep = self.locals['self'].num_timesteps
env = self.training_env
episode_rewards = env.get_attr('episode_rewards')
n_env = len(episode_rewards)
num_episodes = 0
for l in episode_rewards:
num_episodes += len(l)
if num_episodes > self.num_episodes:
self.num_episodes = num_episodes
self.histogram[self.max_val] += 1
self.episode_maxtiles.append(self.max_val)
# self.episode_lengths.append(timestep-self.last_timestep)
self.last_timestep = timestep
if self.hist_freq > 0 and num_episodes % self.hist_freq == 0 :
self._dump_values()
self.max_val = env.env_method('maximum_tile')
return True
def _on_rollout_end(self) -> None:
"""
This event is triggered before updating the policy.
"""
pass
def _on_training_end(self) -> None:
"""
This event is triggered before exiting the `learn()` method.
"""
pass
def _dump_values(self):
timestep = self.locals['self'].num_timesteps
num_episodes = self.num_episodes
env = self.training_env
episode_rewards = env.get_attr('episode_rewards')
if self.log_dir and self.log_file:
log_path = os.path.join(self.log_dir, self.log_file)
log_file = log_path + '.npz'
try:
os.replace(log_file, log_file+'.bkp')
except:
pass
np.savez(log_path, rewards=episode_rewards, max_tiles=self.episode_maxtiles)
if self.verbose:
n_env = len(episode_rewards)
print()
print(f'#episodes: {num_episodes}')
print(f'#timesteps: {timestep}')
mean_rewards = 0
for l in episode_rewards:
mean_rewards += np.mean(l[-self.eval_episodes//n_env:])/n_env
print(f'Mean rewards: {mean_rewards}')
# print(f'Mean episode length: {np.mean(self.episode_lengths[-self.eval_episodes:])}')
print('Histogram of maximum tile achieved:')
for i in range(1,15):
if self.histogram[i] > 0:
print(f'{2**i}: {self.histogram[i]}')
print()
| 38.3361
| 133
| 0.597467
| 1,156
| 9,239
| 4.603806
| 0.15917
| 0.043405
| 0.022548
| 0.015784
| 0.908117
| 0.900225
| 0.892334
| 0.892334
| 0.875235
| 0.875235
| 0
| 0.008403
| 0.30447
| 9,239
| 240
| 134
| 38.495833
| 0.819795
| 0.333694
| 0
| 0.832
| 0
| 0
| 0.090292
| 0.020181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112
| false
| 0.08
| 0.024
| 0
| 0.168
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d7c64ab2303f43a8b9e9f3f82d8532deac05fc3a
| 7,933
|
py
|
Python
|
frameworks/get_framework.py
|
proteekroy/samoo
|
a3497d65e060c8713b5a2153952cc4d9c83ba60a
|
[
"MIT"
] | 6
|
2019-02-11T20:12:00.000Z
|
2020-04-12T10:41:58.000Z
|
frameworks/get_framework.py
|
proteekroy/samoo
|
a3497d65e060c8713b5a2153952cc4d9c83ba60a
|
[
"MIT"
] | null | null | null |
frameworks/get_framework.py
|
proteekroy/samoo
|
a3497d65e060c8713b5a2153952cc4d9c83ba60a
|
[
"MIT"
] | 2
|
2019-06-20T11:16:51.000Z
|
2020-04-12T10:42:10.000Z
|
from frameworks.framework11 import Framework11
from frameworks.framework12 import Framework12
from frameworks.framework21 import Framework21
from frameworks.framework22 import Framework22
from frameworks.framework31 import Framework31
from frameworks.framework32 import Framework32
from frameworks.framework41 import Framework41
from frameworks.framework42 import Framework42
from frameworks.framework5 import Framework5
from frameworks.framework6 import Framework6
def get_framework(framework_id=None,
problem=None,
algorithm=None,
model_list=None,
ref_dirs=None,
curr_ref_id=None,
g_aggregate_func=None,
f_aggregate_func=None,
m5_fg_aggregate_func=None,
m6_fg_aggregate_func=None,
*args,
**kwargs):
frameworks = framework_id.split(',')
if len(frameworks) == 1:
if framework_id in ['11']:
return Framework11(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['12']:
return Framework12(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['21']:
return Framework21(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['22']:
return Framework22(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['31']:
return Framework31(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['32']:
return Framework32(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['41']:
return Framework41(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['42']:
return Framework42(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['5']:
return Framework5(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
elif framework_id in ['6A', '6B']:
return Framework6(framework_id=framework_id,
problem=problem,
algorithm=algorithm,
model_list=model_list,
ref_dirs=ref_dirs,
curr_ref_id=curr_ref_id,
g_aggregate_func=g_aggregate_func,
f_aggregate_func=f_aggregate_func,
m5_fg_aggregate_func=m5_fg_aggregate_func,
m6_fg_aggregate_func=m6_fg_aggregate_func,
*args, **kwargs)
else:
raise Exception("Framework definition not supported")
else:
raise Exception("Multiple Frameworks not supported")
| 52.536424
| 73
| 0.47876
| 703
| 7,933
| 4.937411
| 0.083926
| 0.314607
| 0.181504
| 0.102852
| 0.729761
| 0.729761
| 0.729761
| 0.729761
| 0.729761
| 0.729761
| 0
| 0.027818
| 0.474348
| 7,933
| 150
| 74
| 52.886667
| 0.804556
| 0
| 0
| 0.689189
| 0
| 0
| 0.011219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006757
| false
| 0
| 0.067568
| 0
| 0.141892
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7cefb9fc757c32265b856aa6168b644882fd07a
| 131
|
py
|
Python
|
src/gmj/depends.py
|
nikell28/good-morning-jarvis
|
8783be63f5a53bb47c40b5b40247a21d5f56bbb5
|
[
"MIT"
] | null | null | null |
src/gmj/depends.py
|
nikell28/good-morning-jarvis
|
8783be63f5a53bb47c40b5b40247a21d5f56bbb5
|
[
"MIT"
] | null | null | null |
src/gmj/depends.py
|
nikell28/good-morning-jarvis
|
8783be63f5a53bb47c40b5b40247a21d5f56bbb5
|
[
"MIT"
] | null | null | null |
from gmj.service import GoodMorningService
def get_good_morning_service() -> GoodMorningService:
return GoodMorningService()
| 21.833333
| 53
| 0.816794
| 13
| 131
| 8
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 131
| 5
| 54
| 26.2
| 0.904348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d7cfe4ad88d702f45128d71f9076ff80ecf391c5
| 15,428
|
py
|
Python
|
dot_vim/plugged/powerline/tests/test_python/test_logging.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 11,435
|
2015-01-01T03:32:34.000Z
|
2022-03-31T20:39:05.000Z
|
dot_vim/plugged/powerline/tests/test_python/test_logging.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 879
|
2015-01-02T11:59:30.000Z
|
2022-03-24T09:52:17.000Z
|
dot_vim/plugged/powerline/tests/test_python/test_logging.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 1,044
|
2015-01-05T22:37:53.000Z
|
2022-03-17T19:43:16.000Z
|
# vim:fileencoding=utf-8:noet
'''Tests for various logging features'''
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import re
import codecs
import os
from io import StringIO
from shutil import rmtree
from powerline import finish_common_config, create_logger
from tests.modules import TestCase
from tests.modules.lib import replace_attr
TIMESTAMP_RE = r'\d{4}-\d\d-\d\d \d\d:\d\d:\d\d,\d{3}'
class TestRE(TestCase):
def assertMatches(self, text, regexp):
self.assertTrue(
re.match(regexp, text),
'{0!r} did not match {1!r}'.format(text, regexp),
)
def close_handlers(logger):
for handler in logger.handlers:
handler.close()
class TestHandlers(TestRE):
def test_stderr_handler_is_default(self):
out = StringIO()
err = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config)
pl.error('Foo')
close_handlers(logger)
self.assertMatches(err.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(out.getvalue(), '')
def test_stream_override(self):
out = StringIO()
err = StringIO()
stream = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
self.assertMatches(stream.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_explicit_none(self):
out = StringIO()
err = StringIO()
stream = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [None]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
self.assertMatches(stream.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_explicit_stream_handler(self):
out = StringIO()
err = StringIO()
stream = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [['logging.StreamHandler', [[]]]]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
self.assertEqual(stream.getvalue(), '')
self.assertMatches(err.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(out.getvalue(), '')
def test_explicit_stream_handler_implicit_stream(self):
out = StringIO()
err = StringIO()
stream = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [['logging.StreamHandler', []]]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
self.assertMatches(stream.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_file_handler(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name = 'test_logging-test_file_handler'
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': file_name})
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
os.unlink(file_name)
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_file_handler_create_dir(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name = 'test_logging-test_file_handler_create_dir/file'
self.assertFalse(os.path.isdir(os.path.dirname(file_name)))
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': file_name})
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
self.assertTrue(os.path.isdir(os.path.dirname(file_name)))
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
rmtree(os.path.dirname(file_name))
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_multiple_files(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name_1 = 'test_logging-test_multiple_files-1'
file_name_2 = file_name_1[:-1] + '2'
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [file_name_1, file_name_2]})
try:
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
for file_name in (file_name_1, file_name_2):
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
os.unlink(file_name_1)
finally:
os.unlink(file_name_2)
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_multiple_files_and_stream(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name_1 = 'test_logging-test_multiple_files_and_stream-1'
file_name_2 = file_name_1[:-1] + '2'
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [file_name_1, file_name_2, None]})
try:
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
for file_name in (file_name_1, file_name_2):
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
os.unlink(file_name_1)
finally:
os.unlink(file_name_2)
self.assertMatches(stream.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_handler_args(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name = 'test_logging-test_handler_args'
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['RotatingFileHandler', [[file_name]]]
]})
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
close_handlers(logger)
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
os.unlink(file_name)
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_handler_args_kwargs(self):
out = StringIO()
err = StringIO()
stream = StringIO()
file_name = 'test_logging-test_handler_args_kwargs'
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['RotatingFileHandler', [[file_name], {'maxBytes': 1, 'backupCount': 1}]]
]})
try:
try:
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.error('Foo')
pl.error('Bar')
close_handlers(logger)
with codecs.open(file_name, encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$')
with codecs.open(file_name + '.1', encoding='utf-8') as fp:
self.assertMatches(fp.read(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Foo\n$')
finally:
os.unlink(file_name + '.1')
finally:
os.unlink(file_name)
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_logger_level(self):
out = StringIO()
err = StringIO()
stream = StringIO()
stream1 = StringIO()
stream2 = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['logging.StreamHandler', [[stream1]], 'WARNING'],
['logging.StreamHandler', [[stream2]], 'ERROR'],
]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('Foo')
pl.error('Bar')
close_handlers(logger)
self.assertMatches(stream1.getvalue(), (
'^' + TIMESTAMP_RE + ':WARNING:__unknown__:Foo\n'
+ TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$'
))
self.assertMatches(stream2.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$')
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_logger_level_not_overriding_default(self):
out = StringIO()
err = StringIO()
stream = StringIO()
stream1 = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['logging.StreamHandler', [[stream1]], 'DEBUG'],
]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.debug('Foo')
pl.error('Bar')
close_handlers(logger)
self.assertMatches(stream1.getvalue(), '^' + TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$')
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_top_log_level(self):
out = StringIO()
err = StringIO()
stream = StringIO()
stream1 = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['logging.StreamHandler', [[stream1]], 'DEBUG'],
], 'log_level': 'DEBUG'})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.debug('Foo')
pl.error('Bar')
close_handlers(logger)
self.assertMatches(stream1.getvalue(), (
'^' + TIMESTAMP_RE + ':DEBUG:__unknown__:Foo\n'
+ TIMESTAMP_RE + ':ERROR:__unknown__:Bar\n$'
))
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_logger_format(self):
out = StringIO()
err = StringIO()
stream = StringIO()
stream1 = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['logging.StreamHandler', [[stream1]], 'WARNING', 'FOO'],
]})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('Foo')
pl.error('Bar')
close_handlers(logger)
self.assertEqual(stream1.getvalue(), 'FOO\nFOO\n')
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
def test_top_log_format(self):
out = StringIO()
err = StringIO()
stream = StringIO()
stream1 = StringIO()
stream2 = StringIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
common_config = finish_common_config('utf-8', {'log_file': [
['logging.StreamHandler', [[stream1]], 'WARNING', 'FOO'],
['logging.StreamHandler', [[stream2]], 'WARNING'],
], 'log_format': 'BAR'})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('Foo')
pl.error('Bar')
close_handlers(logger)
self.assertEqual(stream2.getvalue(), 'BAR\nBAR\n')
self.assertEqual(stream1.getvalue(), 'FOO\nFOO\n')
self.assertEqual(stream.getvalue(), '')
self.assertEqual(err.getvalue(), '')
self.assertEqual(out.getvalue(), '')
class TestPowerlineLogger(TestRE):
def test_args_formatting(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('foo {0}', 'Test')
pl.warn('bar {0!r}', 'Test')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':WARNING:__unknown__:foo Test\n'
+ TIMESTAMP_RE + ':WARNING:__unknown__:bar u?\'Test\'\n$'
))
def test_prefix_formatting(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.prefix = '1'
pl.warn('foo')
pl.prefix = '2'
pl.warn('bar')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':WARNING:__unknown__:1:foo\n'
+ TIMESTAMP_RE + ':WARNING:__unknown__:2:bar\n$'
))
def test_kwargs_formatting(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('foo {arg}', arg='Test')
pl.warn('bar {arg!r}', arg='Test')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':WARNING:__unknown__:foo Test\n'
+ TIMESTAMP_RE + ':WARNING:__unknown__:bar u?\'Test\'\n$'
))
def test_args_kwargs_formatting(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.warn('foo {0!r} {arg}', 'Test0', arg='Test')
pl.warn('bar {0} {arg!r}', 'Test0', arg='Test')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':WARNING:__unknown__:foo u?\'Test0\' Test\n'
+ TIMESTAMP_RE + ':WARNING:__unknown__:bar Test0 u?\'Test\'\n$'
))
def test_exception_formatting(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
try:
raise ValueError('foo')
except ValueError:
pl.exception('Message')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':ERROR:__unknown__:Message\n'
+ 'Traceback \\(most recent call last\\):\n'
+ '(?: File ".*?", line \\d+, in \\w+\n [^\n]*\n)+'
+ 'ValueError: foo\n$'
))
def test_levels(self):
stream = StringIO()
common_config = finish_common_config('utf-8', {'log_level': 'DEBUG'})
logger, pl, get_module_attr = create_logger(common_config, stream=stream)
pl.debug('1')
pl.info('2')
pl.warn('3')
pl.error('4')
pl.critical('5')
close_handlers(logger)
self.assertMatches(stream.getvalue(), (
'^' + TIMESTAMP_RE + ':DEBUG:__unknown__:1\n'
+ TIMESTAMP_RE + ':INFO:__unknown__:2\n'
+ TIMESTAMP_RE + ':WARNING:__unknown__:3\n'
+ TIMESTAMP_RE + ':ERROR:__unknown__:4\n'
+ TIMESTAMP_RE + ':CRITICAL:__unknown__:5\n$'
))
old_cwd = None
def setUpModule():
global old_cwd
global __file__
old_cwd = os.getcwd()
__file__ = os.path.abspath(__file__)
os.chdir(os.path.dirname(os.path.dirname(__file__)))
def tearDownModule():
global old_cwd
os.chdir(old_cwd)
if __name__ == '__main__':
from tests.modules import main
main()
| 32.965812
| 97
| 0.681034
| 1,983
| 15,428
| 5.016641
| 0.077156
| 0.08082
| 0.055489
| 0.053076
| 0.864495
| 0.840068
| 0.835444
| 0.831122
| 0.827
| 0.811017
| 0
| 0.008004
| 0.149663
| 15,428
| 467
| 98
| 33.036403
| 0.750286
| 0.004083
| 0
| 0.70603
| 0
| 0.002513
| 0.15001
| 0.080083
| 0
| 0
| 0
| 0
| 0.178392
| 1
| 0.065327
| false
| 0
| 0.027638
| 0
| 0.100503
| 0.002513
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7df1d5b7fca59ab1105e650030fb3e3394eca28
| 31,265
|
py
|
Python
|
tasks-deploy/blind-robot/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/blind-robot/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/blind-robot/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
flags = ['QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_046Ci}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_0FYMt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_0HYro}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_0MMRH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_0eZoA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_0vqkt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_12C7I}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1ADj1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1CnRx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1j8IV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1nNop}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1qgZp}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1uqzi}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_1zZqR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_20vSx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_2N1YQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_2tjCB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_34eFB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_38Qfu}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_38bI5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_3OiwM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_3Peh5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_3s3gN}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_44cmm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4CY4y}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4HGMG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4MW8z}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4TqSS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4X8ZV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_4qdT1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_50nME}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_50wCe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5HZv0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5TsRS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5qvIp}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5qzo2}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5r4oV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_5v0A3}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_62oEa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_643WP}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_673GH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6DuYa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6KH0R}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6MkcQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6PJCh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6Q2dg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6SNbo}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6Wi1C}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6aNXh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6i2Ou}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6l9rw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_6n4at}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_726aj}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_7Jdtc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_7TfUQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_7W8i2}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_7rmJV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_84pYk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_85wbL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8J1v4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8TleK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8YI7A}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8j3VA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8mBEH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8tXuI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_8vgYy}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_94Npb}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9ENDw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9Ek90}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9GL3n}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9S9xa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9Ubk6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9UxYB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_9VaS1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_A52x9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_A9y7d}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ACqaI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_APfzM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_AYDVt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_AiY0O}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_AjFud}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Ap7KR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ArGnP}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Axg3f}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_AyMML}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_AymRp}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_B6B79}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_BEXx0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_BTuh6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_BTxSG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_BeuwH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Bp1uM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Bxnkk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_BzSj9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Bzal2}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_C1WXR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_C8uek}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_CCYLf}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_CFLCe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_CGaNP}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Ch1Hc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ChEOO}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_CmpDO}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Cn2Lq}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Cv4WH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Cz5ZG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Czb9z}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_D888V}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_D9YVN}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_DCdU9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_DMdk6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_DeaZp}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_E1zIg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_E9sJc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EJ41k}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EQRtE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EiO8J}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EjRbq}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EnbWI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Eo1nu}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EssYh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_EwbD5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Ey6b9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_FEblk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_FVQq6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_FWJbn}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_FbJ8N}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_FtIkT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Fvx2D}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_GFmHw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_GRwTY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Gnsq9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_GtVo4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_GuCXr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_GzePl}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HEpBk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HMRZb}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HQzj9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HSRLE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HW4d6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HWvIL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HZWiS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HaHcs}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HfeKB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Hgu18}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_HhD19}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_IZPxM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_IaCC1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Ijyc0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_IrClV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_IsT0Y}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ItCG1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ItZng}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_IuL2y}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_J4SkP}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JC7ec}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JHlHY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JI6YV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JVzcc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Jaw5v}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JwPvm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_JyfdL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KC50M}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KD79o}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KI106}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KVW36}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KZPth}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Kk1Ut}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KkVqy}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_KpKhw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Kw3PX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LEh8G}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LFoyk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LMmtt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LP49l}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LYYwF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Li9P7}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LrWPm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_LvikZ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_M7jTS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_MIvtE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_MMqSi}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Mb1hX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_MmRDf}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Mtpzw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_N3alV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_N3piv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_N5GU6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_NG1fV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_NOGhd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_NPTP0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_NjhOe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_NspIm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_O0H6O}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_OcY8H}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Oi2OU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_P0CMT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_P97vl}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_PDAO6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_PSRJj}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_PcNrL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_QBSJk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_QQBWA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_QSkC6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_QfS8X}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_QkBzT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_R1j3S}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_R2f5J}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_R3fcQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_RIcIP}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_RNRAF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_RfDOc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Rr6UT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_RwlkI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_RzzsB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_S4dpA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_S7WD0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_S88uG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SBePX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SCmo1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SNMII}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SeP6w}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SjLWt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_SkIsw}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_T13VM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TG7hY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_THCAI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TPnnj}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TcRKU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Tf1c7}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TiM8s}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Tl90Y}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TmQUY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TozZG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Tr62I}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TwsSW}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_TzzjR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_UJ5vf}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_UKt69}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Ua9uS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_UlB6N}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_V0V8n}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_V6Faa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_V923o}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VL2mU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VNTWN}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VPvkA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VYH0K}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VcMdz}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_VrmNx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WCTRZ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WDZKe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WFVrf}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WKfT5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WTvzx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_WlvFD}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_XXfUD}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_XY4vT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Xchpy}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_XfauU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_XpvLb}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Y0ShL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Y1hkv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YAoxa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YC99g}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YEEjd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YJPrO}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YQnF7}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YblxH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Yk0R0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Yqcxk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_YuNKr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Yw3W0}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Z3MA1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Z40BE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZEIFu}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZIDiF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZOYKY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZXLrG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZmOj6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_Zmtbt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ZqU7L}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_aDGTX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_aDjnk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_aUTdt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ajYDQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_awHjK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_b14Ji}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bDQHO}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bWuoX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bYY5C}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bbOQC}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_be7Ij}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bg496}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bgn4X}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bkOG3}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bmHUv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bnTdq}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_bw38t}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_cPVc1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ctTaR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ctiPE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_d7wUB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dDEDv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dJ9ts}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dUOnO}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dUotd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dYrx9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_dklg8}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_e7W4d}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_e8VYh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_eHpZx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_eO4Hi}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_eiDwJ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_euhv4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_f3xpo}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_f9SiT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fLIzt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fP1B3}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fPHvF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fSNTI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fWS5h}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fcXNW}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fcyp4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fggWN}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fqphV}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_fwNIu}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gJtUh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gReTh}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gTpDb}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gVEc9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ggoGr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_glOEp}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_glk4t}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gtJ4z}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_guNhe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_gz5Ub}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_h2TMx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_h3XUg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_h5ugG}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_h93Ro}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hD3vQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hEEr9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hQfxF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hSTtr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hU0cB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hf59R}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hhZGS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hr74N}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_hr9AM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iJCjK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iLY8I}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iPRzW}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iQO4l}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iQgJn}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_iVy6P}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_idKFm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_igGnK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_irass}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_j38KK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_j3SkK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jCre4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jK6nB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jPnv3}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jZqFa}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jgj0V}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_jzDHM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_kLVHv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_kRNV9}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_kU3Gd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_kVFzY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_kYx9a}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lTwjF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lf1vr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lmgYv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lpbfI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lwcjX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_lx23X}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_m3KXQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mH29X}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mKoxQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mSnGC}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mVFle}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mY1TY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_mb4X2}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_n8h9D}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nD0Vx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nIFsc}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nNHvL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nODRT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nSV2R}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nTyVt}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nc8Cl}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ngmor}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_niHsm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_njxbN}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nn1DS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nrAEU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_nshPD}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_o5SPj}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_o9r2h}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_oCpDX}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_oPip4}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_oXBLe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_oaZsQ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ocISn}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ooQhR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ookeK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_p5t3O}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_p6of5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_pCGJr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_pR59r}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_pVesm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_payIF}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_phemz}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_pwGRU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_pygKI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_q4Pks}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_q4bCk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_q6RsI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qBgtL}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qD5Vx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qJE01}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qMdBm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qP5F2}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qf17U}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qgNbM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qhwZd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qpZQS}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_qzO8c}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rFgja}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rOCmZ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rZuS8}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rexys}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_riTSZ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rmaE1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_roOrK}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_rsU31}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_s3jqI}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_s8v9o}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sHISA}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sNJqH}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sNrKW}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sOeRR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sXTko}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_saLvJ}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_sjI2h}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_svAkv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_tG09X}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_tmmNR}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_uO993}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_uWpgB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ueOfi}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ufeXs}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ureIk}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_uwvfe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_v5iYz}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_v7hhB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vA0Ff}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vG8MU}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vHiV5}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vWYah}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vZw2F}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vqhY6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vsGPr}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_vtmoY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_w4XIe}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_w9Rfg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wCnCm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wJYVE}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wSv9k}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wgFwj}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wluTD}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wpBQn}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ww0kv}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wwWmd}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_wwfY6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_x2GmY}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_x5iSM}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_x9wMx}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xADeB}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xDcsb}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xI2Yf}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xZqCm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xrzcl}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_xxQAT}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_y0PIg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_y3h5V}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_yBS16}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_yGE8N}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_yPdUg}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ygvD1}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ypnt6}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_ytS8a}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zJ25F}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zKxQ8}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zMFJm}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zXs4J}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zj1sz}', 'QCTF{m1gh7_b3_th3_loo0ng35t_fLag_y0u_hav3_Ever_s33N_zqpUD}']
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
| 3,126.5
| 31,008
| 0.901423
| 5,532
| 31,265
| 4.281092
| 0.095264
| 0.19001
| 0.232234
| 0.295571
| 0.886712
| 0.886712
| 0.886712
| 0.886712
| 0.886712
| 0.886712
| 0
| 0.192427
| 0.017656
| 31,265
| 9
| 31,009
| 3,473.888889
| 0.57868
| 0
| 0
| 0
| 0
| 0
| 0.927555
| 0.927555
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 14
|
cc0ed0e057e2d64a3ff0408b9ea7b3c8b1217f85
| 325
|
py
|
Python
|
reskit/util/loss_factors.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 16
|
2020-01-08T09:44:37.000Z
|
2022-03-24T15:56:02.000Z
|
reskit/util/loss_factors.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 22
|
2020-04-25T18:01:40.000Z
|
2020-10-07T14:11:57.000Z
|
reskit/util/loss_factors.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 16
|
2020-02-26T14:31:26.000Z
|
2021-04-28T10:34:51.000Z
|
import numpy as np
def low_generation_loss(capacity_factor, base=0, sharpness=5):
"""Generate capacity-factor-dependent loss factors
Follows the equation:
(1-base) * ( 1 - exp[-sharpness * capacity_factor] )
"""
return (1 - base) * np.exp(-sharpness * capacity_factor) # dampens lower wind speeds
| 29.545455
| 89
| 0.683077
| 42
| 325
| 5.166667
| 0.642857
| 0.258065
| 0.184332
| 0.239631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019305
| 0.203077
| 325
| 10
| 90
| 32.5
| 0.818533
| 0.473846
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
040f9f5fc9db1e1dc45d24d54a99f7d39803764f
| 26,673
|
py
|
Python
|
IntOpt/NeurIPSIntopt-main/experiments/Building Knapsack/optimizer.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 7
|
2020-11-06T01:29:48.000Z
|
2022-01-02T12:49:40.000Z
|
IntOpt/NeurIPSIntopt-main/experiments/Building Knapsack/optimizer.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 2
|
2021-01-19T16:59:04.000Z
|
2021-01-25T10:17:46.000Z
|
IntOpt/NeurIPSIntopt-main/experiments/Building Knapsack/optimizer.py
|
Patyrn/Divide-and-Learn
|
ff03689c7ab6a7155ebd019babce8f79d0757a53
|
[
"MIT"
] | 5
|
2021-07-13T04:47:13.000Z
|
2022-01-17T14:05:06.000Z
|
import numpy as np
import random
import pandas as pd
import math, time
import itertools
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import datetime
import torch
from torch import nn, optim
from torch.autograd import Variable
import torch.utils.data as data_utils
from torch.utils.data.dataset import Dataset
from sklearn.preprocessing import StandardScaler
import gurobipy as gp
import logging
import copy
from collections import defaultdict
import joblib
budget = 20
test_batchsize = 31
scaler = joblib.load( 'price_scaler.bin')
def inv_scaler_transform(value):
return scaler.inverse_transform([[ value]]).squeeze()
def actual_obj(prop_data, n_items = 62):
n = len(prop_data)
price = prop_data['Actual sales prices'].values
cst = prop_data[ 'Actual construction costs'].values
n_instances = n//n_items
obj_list = []
for i in range(n_instances):
p = price[(n_items*i):((i+1)*n_items)]
c = cst[(n_items*i):((i+1)*n_items)]
# print("Total cost",sum(c), "Total Profit",sum(p))
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= n_items, lb=0.0, ub=1.0,vtype=gp.GRB.BINARY, name="x")
model.addConstr(c @ x == n_items*budget, name="eq")
model.setObjective(p@x, gp.GRB.MAXIMIZE)
model.optimize()
sol = x.X
obj_list.append(inv_scaler_transform(np.dot(sol,p)))
return np.array(obj_list)
class PriceNet(nn.Module):
def __init__(self,n_ts_features,n_cat,n_features,lookback,
embedding_size,num_layers,hidden_size,target_size=1):
super().__init__()
self.n_ts_features = n_ts_features
self.n_cat = n_cat
self.n_features = n_features
self.embedding_size = embedding_size
self.num_layers = num_layers
self.hidden_size = hidden_size
self.target_size = target_size
self.lookback = lookback
self.embeddings = nn.Embedding(n_cat, embedding_size)
self.lstm = nn.LSTM(n_ts_features, hidden_size, num_layers,
batch_first=True)
# print("Pricenet: embedding size {} n_features {}".format(embedding_size, n_features))
# print(n_features+ embedding_size)
self.fc = nn.Linear(n_features+ embedding_size + self.lookback * self.hidden_size,
target_size)
def forward(self,x_features,x_cat,x_ts,h):
x_emb = self.embeddings(x_cat)
out, h = self.lstm(x_ts, h)
out = torch.flatten(out,start_dim=1)
x = torch.cat([x_emb, x_features, out], 1)
# print(x.shape)
pred = self.fc(x).squeeze()
return pred, h
class MyCustomDataset():
def __init__(self, economic_data,properties_data,lookback=5):
self.x_features= properties_data.iloc[:,6:13].values.astype(np.float32)
self.x_cat = properties_data.iloc[:,5].cat.codes.values.astype(np.int64)
self.y = properties_data.iloc[:,13].values.astype(np.float32)
self.cost = properties_data.iloc[:,14].values.astype(np.float32)
self.ts_features = economic_data.iloc[:,2:].values.astype(np.float32)
self.x_ts = np.zeros((len(self.ts_features),
lookback,self.ts_features.shape[1])).astype(np.float32)
for i in range(0,len(self.x_ts),lookback):
self.x_ts[i] = self.ts_features[i:(i+lookback),:]
self.x_ts = self.x_ts.reshape(-1,lookback,self.ts_features.shape[1])
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.x_features[idx],self.x_cat[idx],self.x_ts[idx], self.y[idx],self.cost[idx]
class two_stage:
def __init__(self,n_ts_features=19,n_cat=20,n_features=7,lookback=5,
embedding_size=5,num_layers=2,hidden_size=10,target_size=1,
epochs=8,optimizer=optim.Adam,batch_size=32,**hyperparams):
self.embedding_size = embedding_size
self.num_layers = num_layers
self.hidden_size = hidden_size
self.target_size = target_size
self.n_features = n_features
self.n_ts_features = n_ts_features
self.n_cat = n_cat
self.lookback = lookback
self.optimizer = optimizer
self.batch_size = batch_size
self.hyperparams = hyperparams
self.epochs= epochs
# print("embedding size {} n_features {}".format(embedding_size, n_features))
self.model = PriceNet(embedding_size=embedding_size,n_features=n_features,
n_ts_features = n_ts_features,n_cat= n_cat,lookback = lookback,
num_layers=num_layers,hidden_size=hidden_size,target_size=target_size)
self.optimizer = optimizer(self.model.parameters(), **hyperparams)
def fit(self,economic_data,properties_data):
logging.info("2stage")
train_df = MyCustomDataset(economic_data,properties_data)
criterion = nn.L1Loss(reduction='mean') #nn.MSELoss(reduction='mean')
for e in range(self.epochs):
logging.info("EPOCH Starts")
total_loss = 0
train_dl = data_utils.DataLoader(train_df, batch_size=self.batch_size,shuffle=True)
for x_f,x_c,x_t,y,cst in train_dl:
# print("training shape",x_t.shape)
self.optimizer.zero_grad()
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
loss = criterion(op, y)
total_loss += loss.item()
loss.backward()
self.optimizer.step()
logging.info("EPOCH Ends")
# print("Epoch{} ::loss {}".format(e,total_loss))
def val_loss(self,economic_data,properties_data):
test_obj = actual_obj(properties_data,n_items = test_batchsize)
self.model.eval()
criterion = nn.L1Loss(reduction='sum')#nn.MSELoss(reduction='sum')
valid_df = MyCustomDataset(economic_data,properties_data)
valid_dl = data_utils.DataLoader(valid_df, batch_size= test_batchsize,shuffle=False)
prediction_loss = 0
obj_list = []
for x_f,x_c,x_t,y,cst in valid_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
loss = criterion(op, y)
prediction_loss += loss.item()
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.BINARY, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*budget, name="eq")
model.setObjective((op.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
sol = x.X
y_np = y.detach().numpy()
obj_list.append(inv_scaler_transform(np.dot(sol,y_np)))
self.model.train()
return prediction_loss, test_obj- np.array(obj_list)
def predict(self,economic_data,properties_data):
self.model.eval()
pred_df = MyCustomDataset(economic_data,properties_data)
pred_dl = data_utils.DataLoader(pred_df, batch_size=self.batch_size,shuffle=False)
target =[]
pred = []
for x_f,x_c,x_t,y,cst in pred_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
target = target+y.tolist()
pred= pred+op.squeeze().tolist()
self.model.train()
return {'prediction':pred,'groundtuth':target}
class SPO:
def __init__(self,n_ts_features=19,n_cat=20,n_features=7,lookback=5,
embedding_size=5,num_layers=2,hidden_size=10,target_size=1,
epochs=8,optimizer=optim.Adam,batch_size=32,budget= budget,**hyperparams):
self.embedding_size = embedding_size
self.num_layers = num_layers
self.hidden_size = hidden_size
self.target_size = target_size
self.n_features = n_features
self.n_ts_features = n_ts_features
self.n_cat = n_cat
self.lookback = lookback
self.budget = budget
self.optimizer = optimizer
self.batch_size = batch_size
self.hyperparams = hyperparams
self.epochs= epochs
# print("embedding size {} n_features {}".format(embedding_size, n_features))
self.model = PriceNet(embedding_size=embedding_size,n_features=n_features,
n_ts_features = n_ts_features,n_cat= n_cat,lookback = lookback,
num_layers=num_layers,hidden_size=hidden_size,target_size=target_size)
self.optimizer = optimizer(self.model.parameters(), **hyperparams)
def fit(self,economic_data,properties_data):
logging.info("SPO")
train_df = MyCustomDataset(economic_data,properties_data)
criterion = nn.L1Loss(reduction='mean') #nn.MSELoss(reduction='mean')
for e in range(self.epochs):
logging.info("EPOCH Starts")
# total_loss = 0
train_dl = data_utils.DataLoader(train_df, batch_size=self.batch_size,shuffle=True)
for x_f,x_c,x_t,y ,cst in train_dl:
# print("shape",x_t.shape)
self.optimizer.zero_grad()
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*self.budget, name="eq")
model.setObjective((y.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
x_actual = x.X
c_spo = (2*op - y)
# print("SHape",y.shape, c_spo.shape, op.shape)
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.CONTINUOUS, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*self.budget, name="eq")
model.setObjective((c_spo.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
x_SPO = x.X
grad = torch.from_numpy( x_SPO -x_actual).float()
op.backward(gradient=grad)
self.optimizer.step()
logging.info("EPOCH Ends")
# print("Epoch{} ::loss {}".format(e,total_loss))
def val_loss(self,economic_data,properties_data):
test_obj = actual_obj(properties_data,n_items = test_batchsize)
self.model.eval()
criterion = nn.L1Loss(reduction='sum')#nn.MSELoss(reduction='sum')
valid_df = MyCustomDataset(economic_data,properties_data)
valid_dl = data_utils.DataLoader(valid_df, batch_size= test_batchsize,shuffle=False)
prediction_loss = 0
obj_list = []
for x_f,x_c,x_t,y,cst in valid_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
loss = criterion(op, y)
prediction_loss += loss.item()
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.BINARY, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*budget, name="eq")
model.setObjective((op.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
sol = x.X
y_np = y.detach().numpy()
obj_list.append(inv_scaler_transform(np.dot(sol,y_np)))
self.model.train()
return prediction_loss, test_obj- np.array(obj_list)
def predict(self,economic_data,properties_data):
self.model.eval()
pred_df = MyCustomDataset(economic_data,properties_data)
pred_dl = data_utils.DataLoader(pred_df, batch_size=self.batch_size,shuffle=False)
target =[]
pred = []
for x_f,x_c,x_t,y,cst in pred_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
target = target+y.tolist()
pred= pred+op.squeeze().tolist()
self.model.train()
return {'prediction':pred,'groundtuth':target}
import sys
sys.path.insert(0,"../../Interior")
from ip_model_whole import IPOfunc
class Intopt:
def __init__(self,smoothing=False,thr = 0.1,max_iter=None,method=1,mu0=None,
n_ts_features=19,n_cat=20,n_features=7,lookback=5,damping=0.5,
embedding_size=5,num_layers=2,hidden_size=10,target_size=1,
epochs=8,optimizer=optim.Adam,batch_size=32,budget=budget,**hyperparams):
self.embedding_size = embedding_size
self.num_layers = num_layers
self.hidden_size = hidden_size
self.target_size = target_size
self.n_features = n_features
self.n_ts_features = n_ts_features
self.n_cat = n_cat
self.lookback = lookback
self.budget = budget
self.damping = damping
self.smoothing = smoothing
self.thr = thr
self.max_iter = max_iter
self.method = method
self.mu0= mu0
self.optimizer = optimizer
self.batch_size = batch_size
self.hyperparams = hyperparams
self.epochs= epochs
# print("embedding size {} n_features {}".format(embedding_size, n_features))
self.model = PriceNet(embedding_size=embedding_size,n_features=n_features,
n_ts_features = n_ts_features,n_cat= n_cat,lookback = lookback,
num_layers=num_layers,hidden_size=hidden_size,target_size=target_size)
self.optimizer = optimizer(self.model.parameters(), **hyperparams)
def fit(self,economic_data,properties_data):
logging.info("Intopt")
train_df = MyCustomDataset(economic_data,properties_data)
criterion = nn.L1Loss(reduction='mean') #nn.MSELoss(reduction='mean')
grad_list = []
for e in range(self.epochs):
total_loss = 0
# for i in range(30):
# logging.info("EPOCH Starts")
# train_prop = properties_data.sample(n = 279,random_state =i)
# valid_prop = properties_data.loc[~properties_data.index.isin(train_prop.index)]
# train_sl = train_prop.Sl.unique().tolist()
# valid_sl = valid_prop.Sl.unique().tolist()
# train_prop = train_prop.sort_values(['Sl'],ascending=[True])
# valid_prop = valid_prop.sort_values(['Sl'],ascending=[True])
# train_econ = economic_data[economic_data.Sl.isin(train_sl)]
# valid_econ = economic_data[economic_data.Sl.isin(valid_sl)]
# train_econ = train_econ.sort_values(['Sl','Lag'],ascending=[True,False])
# valid_econ = valid_econ.sort_values(['Sl','Lag'],ascending=[True,False])
# train_df = MyCustomDataset(train_econ,train_prop)
train_dl = data_utils.DataLoader(train_df, batch_size=self.batch_size,shuffle=False)
for x_f,x_c,x_t,y ,cst in train_dl:
self.optimizer.zero_grad()
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
G = cst.unsqueeze(0)
h = torch.tensor([x_t.shape[0]*self.budget],dtype=torch.float)
A = torch.Tensor()
b = torch.Tensor()
x = IPOfunc(G,h,A,b,bounds= [(0., 1.)],max_iter=self.max_iter, thr=self.thr,damping=self.damping,
smoothing=self.smoothing)(-op)
loss = -(x*y).mean()
# op.retain_grad()
loss.backward()
self.optimizer.step()
logging.info("EPOCH Ends")
# print("Epoch{} ::loss {} ->".format(e,total_loss))
# print(self.val_loss(valid_econ, valid_prop))
# print("______________")
def val_loss(self,economic_data,properties_data):
test_obj = actual_obj(properties_data,n_items = test_batchsize)
self.model.eval()
criterion = nn.L1Loss(reduction='sum')#nn.MSELoss(reduction='sum')
valid_df = MyCustomDataset(economic_data,properties_data)
valid_dl = data_utils.DataLoader(valid_df, batch_size= test_batchsize,shuffle=False)
prediction_loss = 0
obj_list = []
for x_f,x_c,x_t,y,cst in valid_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
loss = criterion(op, y)
prediction_loss += loss.item()
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.BINARY, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*budget, name="eq")
model.setObjective((op.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
sol = x.X
y_np = y.detach().numpy()
obj_list.append(inv_scaler_transform(np.dot(sol,y_np)))
self.model.train()
return prediction_loss, test_obj- np.array(obj_list)
def predict(self,economic_data,properties_data):
self.model.eval()
pred_df = MyCustomDataset(economic_data,properties_data)
pred_dl = data_utils.DataLoader(pred_df, batch_size=self.batch_size,shuffle=False)
target =[]
pred = []
for x_f,x_c,x_t,y,cst in pred_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
target = target+y.tolist()
pred= pred+op.squeeze().tolist()
self.model.train()
return {'prediction':pred,'groundtuth':target}
from qpthlocal.qp import QPFunction
from qpthlocal.qp import QPSolvers
from qpthlocal.qp import make_gurobi_model
def make_matrix_qp(w,budget):
n = len(w)
A1 = np.eye(n)
b1 = np.ones(n)
A2 = -np.eye(n)
b2 = np.zeros(n)
A3 = np.expand_dims(w, axis=0)
b3 = np.array([budget])
A = np.vstack([A1,A2])
b = np.concatenate([b1,b2])
return torch.from_numpy(A).float(), torch.from_numpy(b).float(),torch.from_numpy(A3).float(), torch.from_numpy(b3).float()
class qptl:
def __init__(self,tau=1e5,n_ts_features=19,n_cat=20,n_features=7,lookback=5,
embedding_size=5,num_layers=2,hidden_size=10,target_size=1,
epochs=8,optimizer=optim.Adam,batch_size=32,budget=budget,**hyperparams):
self.embedding_size = embedding_size
self.num_layers = num_layers
self.hidden_size = hidden_size
self.target_size = target_size
self.n_features = n_features
self.n_ts_features = n_ts_features
self.n_cat = n_cat
self.lookback = lookback
self.budget = budget
self.tau = tau
self.optimizer = optimizer
self.batch_size = batch_size
self.hyperparams = hyperparams
self.epochs= epochs
# print("embedding size {} n_features {}".format(embedding_size, n_features))
self.model = PriceNet(embedding_size=embedding_size,n_features=n_features,
n_ts_features = n_ts_features,n_cat= n_cat,lookback = lookback,
num_layers=num_layers,hidden_size=hidden_size,target_size=target_size)
self.optimizer = optimizer(self.model.parameters(), **hyperparams)
def fit(self,economic_data,properties_data):
logging.info("QPTL")
train_df = MyCustomDataset(economic_data,properties_data)
grad_list = []
for e in range(self.epochs):
total_loss = 0
# for i in range(30):
# logging.info("EPOCH Starts")
# train_prop = properties_data.sample(n = 279,random_state =i)
# valid_prop = properties_data.loc[~properties_data.index.isin(train_prop.index)]
# train_sl = train_prop.Sl.unique().tolist()
# valid_sl = valid_prop.Sl.unique().tolist()
# train_prop = train_prop.sort_values(['Sl'],ascending=[True])
# valid_prop = valid_prop.sort_values(['Sl'],ascending=[True])
# train_econ = economic_data[economic_data.Sl.isin(train_sl)]
# valid_econ = economic_data[economic_data.Sl.isin(valid_sl)]
# train_econ = train_econ.sort_values(['Sl','Lag'],ascending=[True,False])
# valid_econ = valid_econ.sort_values(['Sl','Lag'],ascending=[True,False])
# train_df = MyCustomDataset(train_econ,train_prop)
train_dl = data_utils.DataLoader(train_df, batch_size=self.batch_size,shuffle=False)
for x_f,x_c,x_t,y ,cst in train_dl:
self.optimizer.zero_grad()
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
G,h,A,b = make_matrix_qp(cst.detach().numpy(),x_t.shape[0]*self.budget)
Q = torch.eye(x_t.shape[0])/self.tau
model_params_quad = make_gurobi_model(G.detach().numpy(),h.detach().numpy(),
A.detach().numpy(),b.detach().numpy(), self.tau*np.eye(x_t.shape[0]))
x = QPFunction(verbose=False, solver=QPSolvers.GUROBI,
model_params=model_params_quad)(Q.expand(1, *Q.shape),
-op, G.expand(1, *G.shape), h.expand(1, *h.shape),
A.expand(1, *A.shape), b.expand(1, *b.shape))
loss = -(x*y).mean()
op.retain_grad()
loss.backward()
# op_grad = copy.deepcopy(op.grad)
# grad_dict = {}
# grad_dict['epoch'] = e
# grad_dict['subepoch'] = i
# for l in range(len(op_grad)):
# grad_dict['qpt_cgrad'] = op_grad[l].item()
# grad_dict['prediction'] = op[l].item()
# grad_dict['true'] = y[l].item()
# grad_list.append(copy.deepcopy(grad_dict))
self.optimizer.step()
total_loss += loss.item()
logging.info("EPOCH Ends")
# print("Epoch{} ::loss {} ->".format(e,total_loss))
# print(self.val_loss(valid_econ, valid_prop))
# print("______________")
def val_loss(self,economic_data,properties_data):
test_obj = actual_obj(properties_data,n_items = test_batchsize)
self.model.eval()
criterion = nn.L1Loss(reduction='sum')#nn.MSELoss(reduction='sum')
valid_df = MyCustomDataset(economic_data,properties_data)
valid_dl = data_utils.DataLoader(valid_df, batch_size= test_batchsize,shuffle=False)
prediction_loss = 0
obj_list = []
for x_f,x_c,x_t,y,cst in valid_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
loss = criterion(op, y)
prediction_loss += loss.item()
model = gp.Model()
model.setParam('OutputFlag', 0)
x = model.addMVar(shape= x_t.shape[0], lb=0.0, ub=1.0,vtype=gp.GRB.BINARY, name="x")
model.addConstr(cst.detach().numpy() @ x == x_t.shape[0]*budget, name="eq")
model.setObjective((op.detach().numpy())@x, gp.GRB.MAXIMIZE)
model.optimize()
sol = x.X
y_np = y.detach().numpy()
obj_list.append(inv_scaler_transform(np.dot(sol,y_np)))
self.model.train()
return prediction_loss, test_obj- np.array(obj_list)
def predict(self,economic_data,properties_data):
self.model.eval()
pred_df = MyCustomDataset(economic_data,properties_data)
pred_dl = data_utils.DataLoader(pred_df, batch_size=self.batch_size,shuffle=False)
target =[]
pred = []
for x_f,x_c,x_t,y,cst in pred_dl:
h = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
c = torch.zeros((self.num_layers,x_t.shape[0],self.hidden_size),dtype=torch.float)
op,states = self.model(x_f,x_c,x_t,(h,c))
h,c = states
target = target+y.tolist()
pred= pred+op.squeeze().tolist()
self.model.train()
return {'prediction':pred,'groundtuth':target}
| 42.54067
| 126
| 0.606081
| 3,636
| 26,673
| 4.213696
| 0.074532
| 0.008616
| 0.019189
| 0.020886
| 0.803472
| 0.792899
| 0.778409
| 0.773318
| 0.773318
| 0.756674
| 0
| 0.011588
| 0.265549
| 26,673
| 627
| 127
| 42.54067
| 0.770495
| 0.111798
| 0
| 0.721491
| 0
| 0
| 0.014898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0.006579
| 0.149123
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b0776b058df62266be922f31d4d8e5674a6fa34
| 10,272
|
py
|
Python
|
insights/tests/datasources/test_candlepin_broker.py
|
maxamillion/insights-core
|
aa11e07e68077df97b6c85219911f8382be6e2fa
|
[
"Apache-2.0"
] | 121
|
2017-05-30T20:23:25.000Z
|
2022-03-23T12:52:15.000Z
|
insights/tests/datasources/test_candlepin_broker.py
|
maxamillion/insights-core
|
aa11e07e68077df97b6c85219911f8382be6e2fa
|
[
"Apache-2.0"
] | 1,977
|
2017-05-26T14:36:03.000Z
|
2022-03-31T10:38:53.000Z
|
insights/tests/datasources/test_candlepin_broker.py
|
maxamillion/insights-core
|
aa11e07e68077df97b6c85219911f8382be6e2fa
|
[
"Apache-2.0"
] | 244
|
2017-05-30T20:22:57.000Z
|
2022-03-26T10:09:39.000Z
|
import pytest
from mock.mock import Mock
from insights.core import ET
from insights.core.spec_factory import DatasourceProvider
from insights.core.dr import SkipComponent
from insights.specs.default import candlepin_broker
CANDLEPIN_BROKER = """
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<acceptors>
<acceptor name="in-vm">vm://0</acceptor>
<acceptor name="stomp">tcp://localhost:61613?protocols=STOMP;useEpoll=false;sslEnabled=true;trustStorePath=/etc/candlepin/certs/truststore;trustStorePassword=CDX9i3K5uPPBzcNtzz5tcycVf5PuXA5w;keyStorePath=/etc/candlepin/certs/keystore;keyStorePassword=4iBpTS45VZjFmVdNzRhRKNXtxbsH5Dij;needClientAuth=true</acceptor>
</acceptors>
<security-enabled>true</security-enabled>
<security-settings>
<security-setting match="katello.candlepin.#">
<permission type="consume" roles="candlepinEventsConsumer"/>
</security-setting>
<security-setting match="#">
<permission type="createAddress" roles="invm-role"/>
<permission type="deleteAddress" roles="invm-role"/>
<permission type="createDurableQueue" roles="invm-role"/>
<permission type="deleteDurableQueue" roles="invm-role"/>
<permission type="createNonDurableQueue" roles="invm-role"/>
<permission type="deleteNonDurableQueue" roles="invm-role"/>
<permission type="send" roles="invm-role"/>
<permission type="consume" roles="invm-role"/>
<permission type="browse" roles="invm-role"/>
<permission type="manage" roles="invm-role"/>
</security-setting>
</security-settings>
<!-- Silence warnings on server startup -->
<cluster-user></cluster-user>
<cluster-password></cluster-password>
</core>
</configuration>
""".strip()
CANDLEPIN_BROKER_NO_SENSITIVE_INFO = """
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<security-enabled>true</security-enabled>
<security-settings>
<security-setting match="katello.candlepin.#">
<permission type="consume" roles="candlepinEventsConsumer"/>
</security-setting>
<security-setting match="#">
<permission type="createAddress" roles="invm-role"/>
<permission type="deleteAddress" roles="invm-role"/>
<permission type="createDurableQueue" roles="invm-role"/>
<permission type="deleteDurableQueue" roles="invm-role"/>
<permission type="createNonDurableQueue" roles="invm-role"/>
<permission type="deleteNonDurableQueue" roles="invm-role"/>
<permission type="send" roles="invm-role"/>
<permission type="consume" roles="invm-role"/>
<permission type="browse" roles="invm-role"/>
<permission type="manage" roles="invm-role"/>
</security-setting>
</security-settings>
<!-- Silence warnings on server startup -->
<cluster-user></cluster-user>
</core>
</configuration>
""".strip()
CANDLEPIN_BROKER_BAD = """
<config>
<core/>
</configuration>
""".strip()
CANDLEPIN_BROKER_XML = """
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xsi:schemaLocation="urn:activemq:core ">
<security-enabled>true</security-enabled>
<security-settings>
<security-setting match="katello.candlepin.#">
<permission roles="candlepinEventsConsumer" type="consume" />
</security-setting>
<security-setting match="#">
<permission roles="invm-role" type="createAddress" />
<permission roles="invm-role" type="deleteAddress" />
<permission roles="invm-role" type="createDurableQueue" />
<permission roles="invm-role" type="deleteDurableQueue" />
<permission roles="invm-role" type="createNonDurableQueue" />
<permission roles="invm-role" type="deleteNonDurableQueue" />
<permission roles="invm-role" type="send" />
<permission roles="invm-role" type="consume" />
<permission roles="invm-role" type="browse" />
<permission roles="invm-role" type="manage" />
</security-setting>
</security-settings>
<cluster-user />
</core>
</configuration>
""".strip()
CANDLE_BROKER_NO_SENTISVE_INFO = """
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xsi:schemaLocation="urn:activemq:core ">
<security-enabled>true</security-enabled>
<security-settings>
<security-setting match="katello.candlepin.#">
<permission roles="candlepinEventsConsumer" type="consume" />
</security-setting>
<security-setting match="#">
<permission roles="invm-role" type="createAddress" />
<permission roles="invm-role" type="deleteAddress" />
<permission roles="invm-role" type="createDurableQueue" />
<permission roles="invm-role" type="deleteDurableQueue" />
<permission roles="invm-role" type="createNonDurableQueue" />
<permission roles="invm-role" type="deleteNonDurableQueue" />
<permission roles="invm-role" type="send" />
<permission roles="invm-role" type="consume" />
<permission roles="invm-role" type="browse" />
<permission roles="invm-role" type="manage" />
</security-setting>
</security-settings>
<cluster-user />
</core>
</configuration>
""".strip()
RELATIVE_PATH = '/etc/candlepin/broker.xml'
def xml_check_removed(result):
root = ET.fromstring('\n'.join(result))
assert root is not None
core_ele = root.find('core')
assert core_ele is not None
search_tags = ['cluster-password', 'acceptors']
for tag in search_tags:
found = core_ele.find(tag)
assert found is None, 'Tag {} should not be in result'.format(tag)
def xml_compare(result, expected):
root_result = ET.fromstring('\n'.join(result))
root_expected = ET.fromstring('\n'.join(expected))
re_core_ele = root_result.find('core')
assert re_core_ele is not None
ex_core_ele = root_expected.find('core')
assert ex_core_ele is not None
search_tags = ['cluster-user', 'security-enabled']
for tag in search_tags:
ex_found = ex_core_ele.find(tag)
if ex_found is not None:
re_found = re_core_ele.find(tag)
assert re_found is not None, 'Tag {} is in expected but not in result'.format(tag)
assert re_found.text == ex_found.text, 'Tag {} text is different in expected and result'.format(tag)
ex_settings = ex_core_ele.find('security-settings')
if ex_settings is not None:
re_settings = re_core_ele.find('security-settings')
assert re_found is not None, 'Tag security-settings is in expected but not in result'
assert re_found.text == ex_found.text, 'Tag {} text is different in expected and result'.format(tag)
ex_settings_dict = {}
re_settings_dict = {}
for setting in ex_settings.findall('security-setting'):
ex_settings_dict[setting.get('match')] = []
for perm in setting.findall('permission'):
ex_settings_dict[setting.get('match')].append((perm.get('roles'), perm.get('type')))
for setting in re_settings.findall('security-setting'):
re_settings_dict[setting.get('match')] = []
for perm in setting.findall('permission'):
re_settings_dict[setting.get('match')].append((perm.get('roles'), perm.get('type')))
assert ex_settings_dict == re_settings_dict
def test_candlepin_broker():
candlepin_broker_file = Mock()
candlepin_broker_file.content = CANDLEPIN_BROKER.splitlines()
broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file}
result = candlepin_broker.candlepin_broker(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=CANDLEPIN_BROKER_XML.splitlines(), relative_path=RELATIVE_PATH)
xml_check_removed(result.content)
xml_compare(result.content, expected.content)
assert result.relative_path == expected.relative_path
def test_candlepin_broker_bad():
candlepin_broker_file = Mock()
candlepin_broker_file.content = CANDLEPIN_BROKER_BAD.splitlines()
broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file}
with pytest.raises(SkipComponent) as e:
candlepin_broker.candlepin_broker(broker)
assert 'Unexpected exception' in str(e)
def test_candlpin_broker_no_sensitive_info():
candlepin_broker_file = Mock()
candlepin_broker_file.content = CANDLEPIN_BROKER_NO_SENSITIVE_INFO.splitlines()
broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file}
result = candlepin_broker.candlepin_broker(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=CANDLE_BROKER_NO_SENTISVE_INFO.splitlines(), relative_path=RELATIVE_PATH)
xml_check_removed(result.content)
xml_compare(result.content, expected.content)
assert result.relative_path == expected.relative_path
| 45.653333
| 326
| 0.657516
| 1,097
| 10,272
| 6.023701
| 0.136737
| 0.054479
| 0.078692
| 0.069613
| 0.80796
| 0.750605
| 0.736077
| 0.720642
| 0.710654
| 0.710654
| 0
| 0.005702
| 0.214661
| 10,272
| 224
| 327
| 45.857143
| 0.813437
| 0
| 0
| 0.673575
| 0
| 0.025907
| 0.635125
| 0.170171
| 0
| 0
| 0
| 0
| 0.088083
| 1
| 0.025907
| false
| 0.015544
| 0.031088
| 0
| 0.056995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b12b9992d497676ad30c37b169031f1a79fee67
| 143
|
py
|
Python
|
01_fundamentals/02_operators/grouping_values.py
|
doanthanhnhan/learningPY
|
93c10c5225a306c791402095e1cf0b454f31d0c2
|
[
"MIT"
] | 1
|
2021-04-04T02:39:05.000Z
|
2021-04-04T02:39:05.000Z
|
01_fundamentals/02_operators/grouping_values.py
|
doanthanhnhan/learningPY
|
93c10c5225a306c791402095e1cf0b454f31d0c2
|
[
"MIT"
] | null | null | null |
01_fundamentals/02_operators/grouping_values.py
|
doanthanhnhan/learningPY
|
93c10c5225a306c791402095e1cf0b454f31d0c2
|
[
"MIT"
] | null | null | null |
# Making a List
my_list = [1, 2.5, "A string", True]
print(my_list)
my_list = [1, 2.5, "A string", True]
print(my_list[2])
print(len(my_list))
| 20.428571
| 36
| 0.65035
| 30
| 143
| 2.933333
| 0.366667
| 0.340909
| 0.227273
| 0.25
| 0.75
| 0.75
| 0.75
| 0.75
| 0.75
| 0.75
| 0
| 0.057377
| 0.146853
| 143
| 7
| 37
| 20.428571
| 0.663934
| 0.090909
| 0
| 0.4
| 0
| 0
| 0.124031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
0b155d9e8742ba6d91aca897a1f7f2eeaf77eacd
| 9,975
|
py
|
Python
|
src/enochecker/useragents.py
|
hnj2/enochecker
|
464d585500effb98f33e357df9a90337af52a770
|
[
"MIT"
] | null | null | null |
src/enochecker/useragents.py
|
hnj2/enochecker
|
464d585500effb98f33e357df9a90337af52a770
|
[
"MIT"
] | null | null | null |
src/enochecker/useragents.py
|
hnj2/enochecker
|
464d585500effb98f33e357df9a90337af52a770
|
[
"MIT"
] | null | null | null |
from random import sample
from typing import List
# Useragent list from ructf github :)
useragents = [
r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Safari/602.1.50',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
r'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Safari/602.1.50',
r'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 6.1; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/53.0.2785.143 Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.7 (KHTML, like Gecko) Version/9.1.2 Safari/601.7.7',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.8 (KHTML, like Gecko) Version/9.1.3 Safari/601.7.8',
r'Mozilla/5.0 (iPad; CPU OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A456 Safari/602.1',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Safari/602.1.50',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)',
r'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
r'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0',
r'Mozilla/5.0 (Windows NT 5.1; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',
r'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
r'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0',
r'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/14A456 Safari/601.1.46',
r'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A456 Safari/602.1',
r'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/601.6.17 (KHTML, like Gecko) Version/9.1.1 Safari/601.6.17',
r'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0',
r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
r'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/601.5.17 (KHTML, like Gecko) Version/9.1 Safari/601.5.17',
r'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
r'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36'
] # type: List[str]
def random_useragent():
# type: ()-> str
"""
Returns a random useragent
:return: A seemingly valid useragent.
"""
return sample(useragents, 1)[0]
| 97.794118
| 152
| 0.70005
| 1,990
| 9,975
| 3.474372
| 0.050251
| 0.070871
| 0.111947
| 0.124385
| 0.940411
| 0.939832
| 0.933034
| 0.924501
| 0.92291
| 0.922476
| 0
| 0.241167
| 0.137444
| 9,975
| 101
| 153
| 98.762376
| 0.562413
| 0.013233
| 0
| 0
| 0
| 0.934783
| 0.90673
| 0.00448
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01087
| false
| 0
| 0.021739
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9bc4d70bcb1e5463436709553fd1c41b589acba2
| 17,451
|
py
|
Python
|
calvin/tests/test_queue.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 334
|
2015-06-04T15:14:28.000Z
|
2022-02-09T11:14:17.000Z
|
calvin/tests/test_queue.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 89
|
2015-06-13T19:15:35.000Z
|
2019-12-03T19:23:20.000Z
|
calvin/tests/test_queue.py
|
gabrielcercel/calvin-base
|
c0315f100643230d65aed1745e1c22df3e7a7c2c
|
[
"Apache-2.0"
] | 112
|
2015-06-06T19:16:54.000Z
|
2020-10-19T01:27:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import unittest
from calvin.runtime.north.plugins.port import queue
from calvin.runtime.north.calvin_token import Token
pytestmark = pytest.mark.unittest
class QueueTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def verify_data(self, write_data, fifo_data):
print write_data, fifo_data
for a, b in zip(write_data, fifo_data):
d = b.value
self.assertEquals(a, d)
def test1(self):
"""Adding reader again (reconnect)"""
f = queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {})
f.add_reader('p1.id', {})
data = ['1', '2', '3', '4']
for token in data:
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token(token), None))
self.verify_data(['1', '2'], [f.peek('p1.id') for _ in range(2)])
f.commit('p1.id')
f.add_reader('p1.id', {})
self.verify_data(['3', '4'], [f.peek('p1.id') for _ in range(2)])
self.assertRaises(queue.common.QueueEmpty, f.peek, 'p1.id')
f.commit('p1.id')
for token in ['5', '6', '7', '8']:
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token(token), None))
self.assertFalse(f.slots_available(1, None))
self.verify_data(['5', '6', '7', '8'], [f.peek('p1.id')
for _ in range(4)])
f.commit('p1.id')
def test2(self):
"""Multiple readers"""
f = queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {})
f.add_reader("r1", {})
f.add_reader("r2", {})
# Ensure fifo is empty
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertTrue(f.tokens_available(0, "r1"))
self.assertTrue(f.tokens_available(0, "r2"))
# Add something
self.assertTrue(f.write(Token('1'), None))
self.assertTrue(f.tokens_available(1, "r1"))
self.assertTrue(f.tokens_available(1, "r2"))
# Reader r1 read something
self.assertTrue(f.peek('r1'))
f.commit('r1')
self.assertEquals([True] * 3, [f.write(Token(t), None) for t in ['2', '3', '4']])
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
self.verify_data(['2', '3', '4'], [f.peek('r1') for _ in range(3)])
f.commit("r1")
# Reader r1 all done, ensure reader r2 can still read
self.assertTrue(f.tokens_available(4, "r2"))
self.assertFalse(f.slots_available(1, None))
self.assertFalse(f.tokens_available(1, "r1"))
# Reader r2 reads something
self.verify_data(['1', '2', '3'], [f.peek("r2") for _ in range(3)])
f.commit("r2")
self.assertTrue(f.tokens_available(1, "r2"))
self.assertTrue(f.write(Token('5'), None))
self.verify_data(['4', '5'], [f.peek("r2") for _ in range(2)])
self.assertFalse(f.tokens_available(1, "r2"))
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
self.assertTrue(f.tokens_available(1, "r1"))
self.verify_data(['5'], [f.peek("r1")])
f.commit("r2")
f.commit("r1")
self.assertTrue(f.write(Token('6'), None))
self.assertTrue(f.write(Token('7'), None))
self.assertTrue(f.write(Token('8'), None))
self.assertTrue([f.peek("r1")
for _ in range(3)], [f.peek("r2") for _ in range(3)])
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
def test3(self):
"""Testing commit reads"""
f = queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {})
f.add_reader("r1", {})
for token in ['1', '2', '3', '4']:
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token(token), None))
# Fails, fifo full
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
# Tentative, fifo still full
self.verify_data(['1'], [f.peek("r1")])
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
# commit previous reads, fifo 1 pos free
f.commit('r1')
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token('5'), None))
# fifo full again
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
def test4(self):
"""Testing rollback reads"""
f = queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {})
f.add_reader('r1', {})
for token in ['1', '2', '3', '4']:
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token(token), None))
# fifo full
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
# tentative reads
self.verify_data(['1', '2', '3', '4'], [f.peek("r1")
for _ in range(4)])
# check still tentative
self.assertTrue(f.tokens_available(0, "r1"))
self.assertTrue(f.slots_available(0, None))
f.cancel("r1")
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('5'), None)
self.assertTrue(f.tokens_available(4, "r1"))
# re-read
self.verify_data(['1'], [f.peek("r1")])
f.commit("r1")
self.assertTrue(f.tokens_available(3, "r1"))
# one pos free in fifo
self.assertTrue(f.slots_available(1, None))
self.assertTrue(f.write(Token('a'), None))
self.assertFalse(f.slots_available(1, None))
self.assertRaises(queue.common.QueueFull, f.write, Token('b'), None)
def test_round_robin_queue_1(self):
"""Round-Robin scheduled queue test"""
f = queue.fanout_round_robin_fifo.FanoutRoundRobinFIFO({'routing': 'round-robin', 'nbr_peers': 2}, {})
f.add_reader("r1", {})
f.add_reader("r2", {})
# Ensure fifo is empty
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
self.assertFalse(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
# Add something
self.assertTrue(f.write(Token('1'), None))
self.assertTrue(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
# Add something
self.assertTrue(f.write(Token('2'), None))
self.assertTrue(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(2, "r1"))
self.assertTrue(f.tokens_available(1, "r2"))
# Reader r2 read something
self.verify_data(['2'], [f.peek('r2')])
self.assertTrue(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(2, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
f.commit('r2')
self.assertTrue(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(2, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
self.assertEquals([True] * 2, [f.write(Token(t), None) for t in ['3', '4']])
self.verify_data(['1', '3'], [f.peek('r1') for _ in range(2)])
f.commit("r1")
# Reader r1 all done, ensure reader r2 can still read
self.assertTrue(f.tokens_available(1, "r2"))
self.assertFalse(f.tokens_available(1, "r1"))
# Reader r2 reads something
self.verify_data(['4'], [f.peek("r2") for _ in range(1)])
f.commit("r2")
self.assertFalse(f.tokens_available(1, "r2"))
self.assertTrue(f.write(Token('5'), None))
self.verify_data(['5'], [f.peek("r1")])
self.assertTrue(f.write(Token('6'), None))
self.assertFalse(f.tokens_available(1, "r1"))
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertTrue(f.tokens_available(1, "r2"))
self.verify_data(['6'], [f.peek("r2")])
f.commit("r2")
f.commit("r1")
self.assertTrue(f.write(Token('7'), None))
self.assertTrue(f.write(Token('8'), None))
self.assertTrue(f.write(Token('9'), None))
self.assertTrue(f.write(Token('10'), None))
self.verify_data(['7', '9'], [f.peek("r1") for _ in range(2)])
self.verify_data(['8', '10'], [f.peek("r2") for _ in range(2)])
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
def test_round_robin_queue_2(self):
"""Round-Robin scheduled queue test"""
f = queue.fanout_round_robin_fifo.FanoutRoundRobinFIFO({'routing': 'round-robin', 'nbr_peers': 2}, {})
f.add_reader("r1", {})
f.add_reader("r2", {})
# Ensure fifo is empty
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
self.assertFalse(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
# Fill up
self.assertEquals([True] * 4, [f.write(Token(t), None) for t in map(lambda x: str(x), range(1,5))])
self.assertTrue(f.tokens_available(2, "r1"))
self.assertTrue(f.tokens_available(2, "r2"))
# Reader r2 read something
self.verify_data(['2'], [f.peek('r2')])
self.assertFalse(f.tokens_available(2, "r2"))
self.assertTrue(f.tokens_available(1, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
f.cancel('r2')
self.assertTrue(f.tokens_available(2, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
# Reader r2 read something again
self.verify_data(['2'], [f.peek('r2')])
self.assertFalse(f.tokens_available(2, "r2"))
self.assertTrue(f.tokens_available(1, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
f.cancel('r2')
self.assertTrue(f.tokens_available(2, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
# Reader r2 read something again plus some
self.verify_data(['2', '4'], [f.peek('r2') for _ in range(2)])
self.assertFalse(f.tokens_available(1, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
f.cancel('r2')
self.assertTrue(f.tokens_available(2, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
# Reader r2 read something again
self.verify_data(['2', '4'], [f.peek('r2') for _ in range(2)])
self.assertFalse(f.tokens_available(1, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
f.commit('r2')
self.assertFalse(f.tokens_available(1, "r2"))
self.assertTrue(f.tokens_available(2, "r1"))
self.verify_data(['1', '3'], [f.peek('r1') for _ in range(2)])
self.assertFalse(f.tokens_available(1, "r1"))
f.commit('r1')
self.assertFalse(f.tokens_available(1, "r1"))
def test_scheduled_queue_3(self):
"""Round-Robin scheduled queue test"""
f = queue.fanout_round_robin_fifo.FanoutRoundRobinFIFO({'routing': 'round-robin', 'nbr_peers': 2}, {})
f.add_reader("r1", {})
f.add_reader("r2", {})
# Ensure fifo is empty
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
self.assertFalse(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
count = {'r1': 0, 'r2': 0}
values = {'r1': [], 'r2': []}
for k in range(10):
# Fill up
self.assertEquals([True] * 4, [f.write(Token(t), None) for t in map(lambda x: str(x), range(k*4+1,k*4+5))])
# Empty
for r in ['r1', 'r2']:
while True:
try:
values[r].append(f.peek(r))
count[r] += 1
except queue.common.QueueEmpty:
break
f.commit(r)
assert count['r1'] == 20
assert count['r2'] == 20
values['r1'] = [int(v.value) for v in values['r1']]
values['r2'] = [int(v.value) for v in values['r2']]
# No common tokens and always in order
assert len(set(values['r1']).intersection(set(values['r2']))) == 0
assert sorted(values['r1']) == values['r1']
assert sorted(values['r2']) == values['r2']
def test_random_queue_4(self):
"""Random scheduled queue test"""
f = queue.fanout_random_fifo.FanoutRandomFIFO({'routing': 'random', 'nbr_peers': 2}, {})
f.add_reader("r1", {})
f.add_reader("r2", {})
# Ensure fifo is empty
self.assertRaises(queue.common.QueueEmpty, f.peek, "r1")
self.assertRaises(queue.common.QueueEmpty, f.peek, "r2")
self.assertFalse(f.tokens_available(1, "r1"))
self.assertFalse(f.tokens_available(1, "r2"))
count = {'r1': 0, 'r2': 0}
values = {'r1': [], 'r2': []}
for k in range(10):
# Fill up
self.assertEquals([True] * 4, [f.write(Token(t), None) for t in map(lambda x: str(x), range(k*4+1,k*4+5))])
# Empty
for r in ['r1', 'r2']:
while True:
try:
values[r].append(f.peek(r))
count[r] += 1
except queue.common.QueueEmpty:
break
f.commit(r)
# Even if random assume that at least 5 tokens to each peer
assert count['r1'] > 5
assert count['r2'] > 5
assert sum(count.values()) == 10 * 4
values['r1'] = [int(v.value) for v in values['r1']]
values['r2'] = [int(v.value) for v in values['r2']]
# No common tokens and always in order
assert len(set(values['r1']).intersection(set(values['r2']))) == 0
assert sorted(values['r1']) == values['r1']
assert sorted(values['r2']) == values['r2']
def test_collect_unordered1(self):
f = queue.collect_unordered.CollectUnordered({'routing': 'collect-unordered', 'nbr_peers': 10}, {})
for i in range(10):
f.add_writer("w"+str(i), {})
# Fill queue
try:
for t in range(40):
for i in range(10):
f.write(Token(i), "w"+str(i))
except:
pass
tokens = []
try:
for i in range(1000):
tokens.append(f.peek(None))
f.commit(None)
except:
pass
print [t.value for t in tokens]
assert [t.value for t in tokens] == range(0,10) * 4
def test_collect_unordered2(self):
f = queue.collect_unordered.CollectUnordered({'routing': 'collect-unordered', 'nbr_peers': 10}, {})
for i in range(10):
f.add_writer("w"+str(i), {})
# Fill queue
try:
for t in range(40):
for i in range(10):
f.write(Token(i), "w"+str(i))
except:
pass
tokens = []
try:
for i in range(1000):
tokens.append(f.peek(None))
if i % 2 == 1:
f.commit(None)
else:
f.cancel(None)
except:
pass
print [t.value for t in tokens]
assert [t.value for t in tokens] == [0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9] * 4
def test_collect_unordered3(self):
f = queue.collect_unordered.CollectUnordered({'routing': 'collect-unordered', 'nbr_peers': 10}, {})
for i in range(10):
f.add_writer("w"+str(i), {})
# Fill queue
try:
for t in range(40):
for i in range(0,6):
f.write(Token(i), "w"+str(i))
except:
pass
tokens = []
try:
for i in range(1000):
tokens.append(f.peek(None))
if i % 2 == 1:
f.commit(None)
else:
f.cancel(None)
try:
f.write(Token(0), "w0")
except:
pass
except:
pass
print [t.value for t in tokens]
s = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5] * 4 + [0] * 12
assert [t.value for t in tokens][:len(s)] == s
| 37.288462
| 119
| 0.555154
| 2,304
| 17,451
| 4.124566
| 0.096354
| 0.088393
| 0.094707
| 0.060823
| 0.836367
| 0.824687
| 0.806798
| 0.783647
| 0.756287
| 0.731453
| 0
| 0.040048
| 0.278838
| 17,451
| 467
| 120
| 37.368308
| 0.715058
| 0.082058
| 0
| 0.786378
| 0
| 0
| 0.048071
| 0
| 0
| 0
| 0
| 0
| 0.424149
| 0
| null | null | 0.027864
| 0.012384
| null | null | 0.012384
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
50465b1a4463b39b3d9891638fc95254bfa2a2b5
| 22,774
|
py
|
Python
|
App/apis/UserApi.py
|
TonyRays/flaskapi4.0
|
559b6cee8c75cee15a658f06730a9a3b97c1c988
|
[
"Apache-2.0"
] | null | null | null |
App/apis/UserApi.py
|
TonyRays/flaskapi4.0
|
559b6cee8c75cee15a658f06730a9a3b97c1c988
|
[
"Apache-2.0"
] | null | null | null |
App/apis/UserApi.py
|
TonyRays/flaskapi4.0
|
559b6cee8c75cee15a658f06730a9a3b97c1c988
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import uuid
from flask import abort, request, g
from flask_restful import Resource, reqparse, fields, marshal, marshal_with
from werkzeug.security import generate_password_hash, check_password_hash
from App.ext import cache
from App.models.UserModel import *
#from flask_login import login_user
# 输出参数
parser = reqparse.RequestParser()
#parser_user = reqparse.RequestParser()
#parser.add_argument('user', required=True, help="请输入操作者身份mentors/students")
# 比较常见的位置直接放在 ?action=login, register
#parser.add_argument('action', required=True, help="请提供具体操作")
# ———————————————————— Labs 注册 ———————————————————————————————— #
# 内层参数格式化
labs_fields = {
'lab_id' : fields.Integer,
'lab_name': fields.String,
'lab_info': fields.String,
'lab_time': fields.DateTime
# lab_permission': fields.Integer,
}
# 外层输出参数格式化
response_labs_fields = {
'status': fields.Integer,
'msg': fields.String,
'data': fields.Nested(labs_fields)
}
# /labs/register/
class LabsRegister(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('l_name', required=True, help="请输入lab名称")
parser.add_argument('l_info', required=True, help="请输入lab信息")
#global user
args = parser.parse_args()
#users = args.get("user")
#action = args.get("action")
l_name = args.get("l_name")
l_info = args.get("l_info")
#global data
data = {
"status": 201,
"msg": 'lab register ok, please register mentors then!'
}
user = Labs()
# 防止重复注册
if user.query.filter(Labs.lab_name.__eq__(l_name)).one_or_none():
data['status'] = 403
data['msg'] = 'lab has already registered'
return data
else:
user.lab_name = l_name
user.lab_info = l_info
user.lab_time = datetime.now()
user.save()
data['data'] = user
return marshal(data, response_labs_fields)
# eg. curl http://localhost:5000/mentors/register/
def get(self):
return marshal(data, response_labs_fields)
# /labs/modify/<int:id>/
class Labsmodify(Resource):
# 根据 id 获取
@authm.login_required
@marshal_with(response_labs_fields)
def get(self, id):
user = Labs.query.get(id)
data = {
"status": 200,
"msg": 'ok',
'data': user,
}
return data
# 根据 id 修改
@authm.login_required
@marshal_with(response_labs_fields)
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument('l_name', required=True, help="请输入lab名称")
parser.add_argument('l_info', required=True, help="请输入lab信息")
args = parser.parse_args()
l_name = args.get("l_name")
l_info = args.get("l_info")
user = Labs.query.get(id) # 根据id获得labs
if not user:
abort(401, message="user not login or didnt find id")
else:
user.lab_name = l_name
user.lab_info = l_info
#user.lab_time = datetime.now()
user.save()
data = {
"status": 200,
"msg": 'labs change ok',
'data': user
}
return data
'''
# /labs/<int:mentor_id>/<int:lab_id>/operation1
class operation1(Resource):
# 操作labs operation1
@authm.login_required
@marshal_with(response_mentors_fields)
def POST(self, mentor_id):
user = Mentors.query.get(mentor_id)
if user.lab_id
#data = {
# "status": 200,
# "msg": 'ok',
# 'data': user,
#}
#return data
return
# 根据 id 修改用户信息 --> 密码修改
@authm.login_required
@marshal_with(response_mentors_fields)
def post(self, mentor_id):
parser = reqparse.RequestParser()
parser.add_argument('u_password', required=True, help="请输入修改后的密码")
args = parser.parse_args()
u_password = args.get("u_password")
user = Mentors.query.get(id) # 根据id获得用户
user.mentor_password = user.men_password(u_password)
user.save()
data = {
"status": 200,
"msg": 'password change ok',
'data': user
}
return data
# 根据 id 删除某个用户 Model中应该设计一个字段 is_delete 来判断是否已删除
@authm.login_required
@marshal_with(response_mentors_fields)
def delete(self, id):
user = Mentors.query.get(id)
user.is_delete = True # is_delete = 1 表示删除用户
user.save()
data = {
"status": 200,
"msg": 'delete ok',
"data": user
}
return data
'''
# ———————————————————— Mentors 注册和登录———————————————————————————————— #
mentors_fields = {
'mentor_id': fields.Integer,
'mentor_name': fields.String,
'lab_id': fields.Integer
#'mentor_permission': fields.Integer,
}
# 外层输出参数格式化
response_mentors_fields = {
'status': fields.Integer,
'msg': fields.String,
'data': fields.Nested(mentors_fields)
}
# 外层输出参数格式化 + token
response_mentors_token_fields = {
'status': fields.Integer,
'msg': fields.String,
'token': fields.String,
'data': fields.Nested(mentors_fields)
}
# /mentors/register/
class MentorsRegister(Resource):
def post(self):
#global user
parser = reqparse.RequestParser()
parser.add_argument('u_name', required=True, help="请输入用户名")
parser.add_argument('u_password', required=True, help="请输入密码")
parser.add_argument('l_id', required=True, help="请输入lab_id")
args = parser.parse_args()
#users = args.get("user")
#action = args.get("action")
u_name = args.get("u_name")
u_password = args.get("u_password")
l_id = args.get("l_id")
#global data
data = {
"status": 201,
"msg": 'mentor register ok'
}
user = Mentors()
# 如果重复注册
if user.query.filter(Mentors.mentor_name.__eq__(u_name)).one_or_none():
data['status'] = 403
data['msg'] = 'mentor has already registered'
return data
else:
# register_mentors(user, u_name, u_password)
user.mentor_name = u_name
# 最终方法 密码做数据安全处理
user.mentor_password = user.men_password(u_password)
user.lab_id = l_id
user.save()
data['data'] = user
return marshal(data, response_mentors_token_fields)
# eg. curl http://localhost:5000/mentors/register/
def get(self):
return marshal(data, response_mentors_token_fields)
# /mentors/login/
class MentorsLogin(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('u_name', required=True, help="请输入用户名")
parser.add_argument('u_password', required=True, help="请输入密码")
global user
args = parser.parse_args()
#users = args.get("user")
#action = args.get("action")
u_name = args.get("u_name")
u_password = args.get("u_password")
global data
data = {
"status": 201,
"msg": 'mentor login ok'
}
#token = request.args.get("token")
#if token:
#abort(401, message="User has already login.")
#else:
user = Mentors.query.filter(Mentors.mentor_name.__eq__(u_name)).one_or_none()
#login_mentors(user, u_password)
if user: # 如果用户存在
# 如果用户已经登录
if user.is_login:
data['status'] = 403
data['msg'] = 'mentor is login'
return data
# 如果密码错误
if not user.verify_password(u_password):
data['status'] = 406
data['msg'] = 'password fail'
return data
# 如果用户已被删除
elif user.is_delete:
data['status'] = 900
data['msg'] = 'mentor is deleted'
return data
else:
g.user = user # 赋给全局
data['data'] = user
# 生成token
token = user.generate_auth_token(600)
# token = str(uuid.uuid4()) # token 需要转换为字符串
# 将用户token 存到缓存中 可以根据token 找到用户id 也可以根据用户id 找到token
# key: 使用token 值:用户id
# 第一个参数是键,这个主要是用来获取这个缓存的值。第二个参数是值。第三个参数是秒
cache.set(token, user.mentor_id, timeout=60*60*24*7)
if not cache.get(token):
abort(403, message="fail to set token")
data['token'] = token
user.is_login = True
user.save()
return marshal(data, response_mentors_token_fields)
else:
data['status'] = 406
data['msg'] = 'mentor not exist'
#return data
data['data'] = user
return marshal(data, response_mentors_token_fields)
#eg. curl http://localhost:5000/mentors/register/
def get(self):
return marshal(data, response_mentors_token_fields)
# /mentors/<int:id>/
class MentorUser(Resource):
# 根据 id 获取用户信息
@authm.login_required
@marshal_with(response_mentors_fields)
def get(self, id):
user = Mentors.query.get(id)
data = {
"status": 200,
"msg": 'ok',
'data': user,
}
return data
# 根据 id 修改用户信息 --> 密码修改
@authm.login_required
@marshal_with(response_mentors_fields)
def post(self, id):
parser = reqparse.RequestParser()
#parser.add_argument('u_password', required=True, help="请输入原密码")
#parser.add_argument('u_new_password', required=True, help="请输入修改后的密码")
parser.add_argument('u_password', required=True, help="请输入密码")
args = parser.parse_args()
u_password = args.get("u_password")
#u_new_password = args.get("u_new_password")
user = Mentors.query.get(id) # 根据id获得用户
#if user.mentor_password is not u_password:
# abort(401, message="user not login")
user.mentor_password = user.men_password(u_password)
user.save()
data = {
"status": 200,
"msg": 'password change ok',
'data': user
}
return data
# 根据 id 删除某个用户 Model中应该设计一个字段 is_delete 来判断是否已删除
@authm.login_required
@marshal_with(response_mentors_fields)
def delete(self, id):
user = Mentors.query.get(id)
user.is_delete = True # is_delete = 1 表示删除用户
user.save()
data = {
"status": 200,
"msg": 'delete ok',
"data": user
}
return data
'''
# /mentors/<int:id>/givepermission/<int:lab_id>/<int:s_id>/
#验证:
#检查Mentors已登录,检查拥有lab的权限,给予学生权限
class Mentorsgivepermission(Resource):
@authm.login_required
def get(self, id):
data = {
"status": 201,
"msg": 'ok'
}
user = Mentors.query.get(id)
# 如果id不存在
if not Mentors.query.filter(Mentors.mentor_id.__eq__(id)).one_or_none():
data['status'] = 900
data['msg'] = 'mentor is deleted'
return data
# 如果用户还没登录
elif user.is_login == 0:
data['status'] = 403
data['msg'] = 'please login first'
return data
# 用户登录了可以正常logout
else:
user.is_login = 0
user.save()
return data
'''
# /mentors/<int:id>/logout/
class MentorsLogout(Resource):
@authm.login_required
def get(self, id):
data = {
"status": 201,
"msg": 'mentor logout ok'
}
user = Mentors.query.get(id)
# 如果id不存在
if not Mentors.query.filter(Mentors.mentor_id.__eq__(id)).one_or_none():
data['status'] = 900
data['msg'] = 'mentor is deleted'
return data
# 如果用户还没登录
elif user.is_login == 0:
data['status'] = 403
data['msg'] = 'please login first'
return data
# 只能操作自己的账号
# 用户登录了可以正常logout
else:
user.is_login = 0
user.save()
return data
# —————————————————————————— Students 注册和登录—————————————————————————————— #
students_fields = {
'student_id': fields.Integer,
'student_name': fields.String,
'lab_id': fields.Integer
#'student_permission': fields.Integer,
}
# 外层输出参数格式化
response_students_fields = {
'status': fields.Integer,
'msg': fields.String,
'data': fields.Nested(students_fields)
}
# 外层输出参数格式化 + token
response_students_token_fields = {
'status': fields.Integer,
'msg': fields.String,
'token': fields.String,
'data': fields.Nested(students_fields)
}
# /students/register/
class StudentsRegister(Resource):
@authm.login_required
def post(self):
#global user
parser = reqparse.RequestParser()
parser.add_argument('u_name', required=True, help="请输入用户名")
parser.add_argument('u_password', required=True, help="请输入密码")
parser.add_argument('l_id', required=True, help="请输入lab_id")
args = parser.parse_args()
#users = args.get("user")
#action = args.get("action")
u_name = args.get("u_name")
u_password = args.get("u_password")
l_id = args.get("l_id")
#global data
data = {
"status": 201,
"msg": 'student register ok'
}
user = Students()
# 防止重复注册
if user.query.filter(Students.student_name.__eq__(u_name)).one_or_none():
data['status'] = 403
data['msg'] = 'student has already registered'
return data
else:
# register_students(user, u_name, u_password)
user.student_name = u_name
# 密码做数据安全处理
user.student_password = user.stu_password(u_password)
user.lab_id = l_id
user.save()
data['data'] = user
return marshal(data, response_students_token_fields)
# eg. curl http://localhost:5000/students/register/
def get(self):
return marshal(data, response_students_token_fields)
# /students/login/
class StudentsLogin(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('u_name', required=True, help="请输入用户名")
parser.add_argument('u_password', required=True, help="请输入密码")
global user
args = parser.parse_args()
#users = args.get("user")
#action = args.get("action")
u_name = args.get("u_name")
u_password = args.get("u_password")
global data
data = {
"status": 201,
"msg": 'student login ok'
}
#token = request.args.get("token")
#if token:
#abort(401, message="User has already login.")
#else:
user = Students.query.filter(Students.student_name.__eq__(u_name)).one_or_none()
#login_students(user, u_password)
if user: # 如果用户存在
# 如果用户已经登录
if user.is_login:
data['status'] = 403
data['msg'] = 'student is login'
return data
# 如果密码错误
if not user.verify_password(u_password):
data['status'] = 406
data['msg'] = 'password fail'
return data
# 如果用户已被删除
elif user.is_delete:
data['status'] = 900
data['msg'] = 'student is deleted'
return data
else:
g.user = user # 赋给全局
data['data'] = user
token = user.generate_auth_token(600) #生成token
# token = str(uuid.uuid4()) # token 需要转换为字符串
# 将用户token 存到缓存中 可以根据token 找到用户id 也可以根据用户id 找到token
# key: 使用token 值:用户id
# 第一个参数是键,这个主要是用来获取这个缓存的值。第二个参数是值。第三个参数是秒
#cache.set(token, user.student_id, timeout=60*60*24*7)
#if not cache.get(token):
# abort(403, message="fail to set token")
data['token'] = token
user.is_login = True
user.save()
return marshal(data, response_students_token_fields)
# 如果用户不存在
else:
data['status'] = 406
data['msg'] = 'student not exist'
#return data
data['data'] = user
return marshal(data, response_students_token_fields)
#eg. curl http://localhost:5000/students/register/
def get(self):
return marshal(data, response_students_token_fields)
# /students/<int:id>/
class StudentUser(Resource):
# 根据 id 获取用户信息
@auths.login_required
@marshal_with(response_students_fields)
def get(self, id):
user = Students.query.get(id)
data = {
"status": 200,
"msg": 'ok',
'data': user,
}
return data
# 根据 id 修改用户信息 --> 密码修改
@auths.login_required
@marshal_with(response_students_fields)
def post(self, id):
parser = reqparse.RequestParser()
parser.add_argument('u_password', required=True, help="请输入修改后的密码")
args = parser.parse_args()
u_password = args.get("u_password")
user = Students.query.get(id) # 根据id获得用户
user.student_password = user.stu_password(u_password)
user.save()
data = {
"status": 200,
"msg": 'password change ok',
'data': user
}
return data
# 根据 id 删除某个用户 Model中设计一个字段 is_delete 来判断是否已删除
@auths.login_required
@marshal_with(response_students_fields)
def delete(self, id):
user = Students.query.get(id)
user.is_delete = True # is_delete = 1 表示删除用户
user.save()
data = {
"status": 200,
"msg": 'delete ok',
"data": user
}
return data
# /students/<int:id>/logout/
class StudentsLogout(Resource):
@auths.login_required
def get(self, id):
data = {
"status": 201,
"msg": 'student logout ok'
}
user = Students.query.get(id)
# 如果id不存在
if not Students.query.filter(Students.student_id.__eq__(id)).one_or_none():
data['status'] = 900
data['msg'] = 'student is deleted'
return data
# 如果用户还没登录
elif user.is_login == 0:
data['status'] = 403
data['msg'] = 'please login first'
return data
# 用户登录了可以正常logout
else:
user.is_login = 0
user.save()
return data
"""
def login_mentors(user, u_password):
if user:
if not user.verify_password(u_password):
data['status'] = 406
data['msg'] = 'password fail'
return data
elif user.is_delete:
data['status'] = 900
data['msg'] = 'user is deleted'
return data
else:
data['data'] = user
token = user.generate_auth_token(600)
#token = str(uuid.uuid4()) # token 需要转换为字符串
# 将用户token 存到缓存中 可以根据token 找到用户id 也可以根据用户id 找到token
# key: 使用token 值:用户id
# 第一个参数是键,这个主要是用来获取这个缓存的值。第二个参数是值。第三个参数是秒
#cache.set(token, user.mentor_id, timeout=60*60*24*7)
#if not cache.get(token):
# abort(403, message="fail to set token")
data['token'] = token
return marshal(data, response_mentors_token_fields)
else:
data['status'] = 406
data['msg'] = 'user not exist'
return data
def register_mentors(user, u_name, u_password):
user.mentor_name = u_name
# 密码做数据安全处理
# user.set_password(u_password)
# 最终方法 密码做数据安全处理
user.mentor_password = user.men_password(u_password)
user.save()
# 用户注册登录 操作 其中密码做了数据安全
class UsersResource(Resource):
def post(self):
global user
args = parser.parse_args()
users = args.get("user")
action = args.get("action")
u_name = args.get("u_name")
u_password = args.get("u_password")
global data
data = {
"status": 201,
"msg": 'ok'
}
if users == "mentors":
#def login(User, u_name, u_password, data)
if action == "login": # 用户登录
token = request.args.get("token")
if token:
abort(401, message="User has already login.")
else:
user = Mentors.query.filter(Mentors.mentor_name.__eq__(u_name)).one_or_none()
login_mentors(user, u_password)
elif action == "register": # 用户注册
#def register(User, u_name, u_password)
user = Mentors()
register_mentors(user, u_name, u_password)
# elif action == "logout": # 用户登出
# user = Mentors()
# cache.
else:
pass
data['data'] = user
return marshal(data, response_mentors_token_fields)
#return data
def get(self):
return marshal(data, response_mentors_token_fields)
# 更新用户信息不更改用户名,只改密码
parser_user = reqparse.RequestParser()
parser_user.add_argument('u_password', required=True, help='请输入新密码')
# 单个用户数据操作 查询 修改 删除
class UserResource(Resource):
# 根据 id 获取用户信息
@marshal_with(response_user_fields)
def get(self, id):
user = User.query.get(id)
data = {
"status": 200,
"msg": 'ok',
'data': user,
}
return data
# 根据 id 修改用户信息 --> 密码修改
@marshal_with(response_user_fields)
def post(self, id):
args = parser_user.parse_args()
u_password = args.get("u_password")
user = User.query.get(id)
user.u_password = u_password
user.save()
data = {
"status": 200,
"msg": 'password change ok',
'data': user,
}
return data
# 根据 id 删除某个用户 Model中应该设计一个字段 is_delete 来判断是否已删除
@marshal_with(response_user_fields)
def delete(self, id):
user = User.query.get(id)
user.is_delete = True # is_delete = 1 表示删除用户
user.save()
data = {
"status": 200,
"msg": 'delete ok',
"data": user,
}
return data
"""
| 24.998902
| 97
| 0.54773
| 2,498
| 22,774
| 4.885508
| 0.090472
| 0.036873
| 0.028843
| 0.030728
| 0.829318
| 0.784989
| 0.74787
| 0.708047
| 0.707637
| 0.660357
| 0
| 0.014462
| 0.332045
| 22,774
| 911
| 98
| 24.998902
| 0.777281
| 0.130368
| 0
| 0.758713
| 1
| 0
| 0.101691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053619
| false
| 0.061662
| 0.016086
| 0.013405
| 0.190349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
4a08d57f0c8889074d2da065b14affbe21c2bb67
| 6,619
|
py
|
Python
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 5
|
2020-03-30T17:36:11.000Z
|
2021-06-16T09:14:20.000Z
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 6
|
2020-04-28T16:32:04.000Z
|
2020-12-18T13:35:28.000Z
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 2
|
2020-04-28T15:59:51.000Z
|
2020-05-14T08:11:33.000Z
|
import unittest
import numpy as np
from bcselector.filter_methods.no_cost_based_filter_methods import no_cost_find_best_feature
from bcselector.information_theory.j_criterion_approximations import mim, mifs, mrmr, jmi, cife
class TestNoCostMethod(unittest.TestCase):
def test_simple_input_mim(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mim,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_mifs(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
beta = 10
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mifs,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index,
beta=beta)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_mifs_no_beta_provided(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
with self.assertWarns(Warning): no_cost_find_best_feature(
j_criterion_func=mifs,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index)
def test_simple_input_mrmr(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mrmr,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_jmi(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=jmi,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_cife(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
beta = 10
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=cife,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index,
beta=beta)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_cife_no_beta_provided(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
with self.assertWarns(Warning): no_cost_find_best_feature(
j_criterion_func=cife,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index)
if __name__ == '__main__':
unittest.main()
| 50.143939
| 95
| 0.539205
| 764
| 6,619
| 4.399215
| 0.10733
| 0.079143
| 0.062481
| 0.066647
| 0.916691
| 0.910443
| 0.910443
| 0.910443
| 0.910443
| 0.910443
| 0
| 0.090475
| 0.367125
| 6,619
| 132
| 96
| 50.143939
| 0.711864
| 0
| 0
| 0.826087
| 0
| 0
| 0.001208
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 1
| 0.06087
| false
| 0
| 0.034783
| 0
| 0.104348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5af2a4ea139f56feadad760a33ca1a6cd5de562
| 47
|
py
|
Python
|
src/socialite/strategy/__init__.py
|
garzola/masonite-socialite
|
70cb80365e2096773e291f84b6e7af81a276ac1b
|
[
"MIT"
] | 13
|
2020-02-02T01:27:51.000Z
|
2021-11-08T08:50:57.000Z
|
src/socialite/strategy/__init__.py
|
garzola/masonite-socialite
|
70cb80365e2096773e291f84b6e7af81a276ac1b
|
[
"MIT"
] | 17
|
2020-02-05T16:52:45.000Z
|
2021-05-16T14:34:46.000Z
|
src/socialite/strategy/__init__.py
|
garzola/masonite-socialite
|
70cb80365e2096773e291f84b6e7af81a276ac1b
|
[
"MIT"
] | 6
|
2020-02-03T14:20:30.000Z
|
2021-03-18T01:33:21.000Z
|
from .MasoniteStrategy import MasoniteStrategy
| 23.5
| 46
| 0.893617
| 4
| 47
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a8667d764c57992453d915792f8fed96deae1798
| 203
|
py
|
Python
|
rulesets/models/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | null | null | null |
rulesets/models/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | 3
|
2018-12-19T08:16:15.000Z
|
2018-12-19T08:16:47.000Z
|
rulesets/models/__init__.py
|
jdr-tools/rulesets
|
2bbfb280c84da6ef359d47fa6c24d34b84814eeb
|
[
"MIT"
] | null | null | null |
import pymodm, os
from rulesets.models.account import Account
from rulesets.models.ruleset import Ruleset
from rulesets.models.session import Session
pymodm.connection.connect(os.getenv('MONGODB_URL'))
| 29
| 51
| 0.837438
| 28
| 203
| 6.035714
| 0.5
| 0.213018
| 0.319527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083744
| 203
| 7
| 51
| 29
| 0.908602
| 0
| 0
| 0
| 0
| 0
| 0.053922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a876fc0a3fcf244833f7c61dc5bd2a3382710e65
| 18,131
|
py
|
Python
|
load_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | null | null | null |
load_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | null | null | null |
load_functions.py
|
adambrzosko/ml-htt-methods
|
5378b4a9747ea21702c7a48e5f3cbb4fc75a71fc
|
[
"MIT"
] | 1
|
2022-01-31T14:54:33.000Z
|
2022-01-31T14:54:33.000Z
|
import uproot
import os
import pandas as pd
import numpy as np
def load_mc_ntuple(data, tree, branch, mjj_training, channel, cut_feats, apply_cuts, split_by_sample, signal, embedding, ff):
# LOAD MC NTUPLES AND APPLY BASELINE CUTS BY CHANNEL
# need to do something for when df too large..
try:
iterator = uproot.iterate(data, tree, branches=branch+cut_feats)
except IOError:
print 'Tree/Branches not found'
df = []
try:
for block in iterator:
df_b = pd.DataFrame(block)
if apply_cuts:
if channel == 'tt':
df_b = df_b[
(df_b['pt_1'] > 40)
& (df_b['deepTauVsJets_medium_1'] > 0.5)
& (df_b['deepTauVsJets_medium_2'] > 0.5)
& (df_b['deepTauVsEle_vvvloose_1'] > 0.5)
& (df_b['deepTauVsMu_vloose_1'] > 0.5)
& (df_b['deepTauVsEle_vvvloose_2'] > 0.5)
& (df_b['deepTauVsMu_vloose_2'] > 0.5)
& (df_b['leptonveto'] == False)
& (df_b['trg_doubletau'] == True)
]
elif channel == 'mt':
df_b = df_b[
(df_b['iso_1'] < 0.15)
& (df_b['mt_1'] < 40) # was 70 but use 40
& (df_b['mva_olddm_tight_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& ((df_b['trg_singlemuon']*df_b['pt_1'] > 23)
| (df_b['trg_mutaucross']*df_b['pt_1'] < 23))
]
elif channel == 'et':
df_b = df_b[
(df_b['iso_1'] < 0.1)
& (df_b['mt_1'] < 40)
& (df_b['mva_olddm_tight_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& (df_b['trg_singleelectron'] == True)
]
elif channel == 'em':
df_b = df_b[
(df_b['iso_1'] < 0.15)
& (df_b['iso_2'] < 0.2)
& (df_b['pzeta'] > -50)
& (df_b['leptonveto'] == False)
& (df_b['trg_muonelectron'] == True)
]
else:
assert ValueError('Channel not in ["tt", "mt", "et", "em"]')
# if not embedding and not signal:
# if channel == 'tt':
# df_b = df_b[
# ~((df_b['gen_match_1'] == 5)
# & (df_b['gen_match_2'] == 5))]
# elif channel == 'mt':
# df_b = df_b[
# ~((df_b['gen_match_1'] == 4)
# & (df_b['gen_match_2'] == 5))]
# elif channel == 'et':
# df_b = df_b[
# ~((df_b['gen_match_1'] == 3)
# & (df_b['gen_match_2'] == 5))]
# elif channel == 'em':
# df_b = df_b[
# ~((df_b['gen_match_1'] == 3)
# & (df_b['gen_match_2'] == 4))]
if ff and not signal:
if channel == 'tt':
df_b = df_b[
~((df_b['gen_match_1'] == 6)
| (df_b['gen_match_2'] == 6))]
elif channel in ['et','mt']:
df_b = df_b[df_b['gen_match_2'] < 6]
## TO SELECT THE SIGNAL SAMPLE ACCORDING TO
## CUTS APPLIED RELATING TO n_jets AND mjj
if not split_by_sample:
df_b = df_b
else:
if mjj_training == 'low':
df_b = df_b[
(df_b['n_jets'] < 2)
| ((df_b['n_jets'] >= 2)
& (df_b['mjj'] < 300))
]
elif mjj_training == 'high':
df_b = df_b[
((df_b['n_jets'] >= 2)
& (df_b['mjj'] > 300))
]
else:
assert ValueError('Mjj training not in ["low", "high"]')
df_b = df_b[(df_b['m_sv'] > 0)] ## SOME m_sv ARE MISSING
df_b = df_b.drop(cut_feats, axis=1)
df.append(df_b)
df = pd.concat(df, ignore_index=True)
except IndexError:
print 'zero events in ntuple'
return df
def load_data_ntuple(data, tree, branch, mjj_training, channel, cut_feats, apply_cuts, split_by_sample):
## THIS FUNCTION IS FOR READING IN SAME SIGN DATA (FOR mt, et, em CHANNELS)
## OR ANTIISOLATED (FOR tt CHANNEL) FOR THE QCD ESTIMATION
try:
iterator = uproot.iterate(data, tree, branches=branch+cut_feats)
except IOError:
print 'Tree/Branches not found'
df = []
for block in iterator:
df_b = pd.DataFrame(block)
if apply_cuts:
if channel == 'tt':
df_b = df_b[
(df_b['pt_1'] > 40)
& ((df_b['mva_olddm_loose_1'] > 0.5)
& (df_b['mva_olddm_tight_1'] < 0.5)
& (df_b['mva_olddm_medium_2'] > 0.5))
| ((df_b['mva_olddm_loose_2'] > 0.5)
& (df_b['mva_olddm_tight_2'] < 0.5)
& (df_b['mva_olddm_medium_1'] > 0.5))
& (df_b['antiele_1'] == True)
& (df_b['antimu_1'] == True)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['trg_doubletau'] == True)
]
elif channel == 'mt':
df_b = df_b[
(df_b['iso_1'] < 0.15)
& (df_b['mt_1'] < 40)
& (df_b['mva_olddm_tight_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& ((df_b['trg_singlemuon']*df_b['pt_1'] > 23)
| (df_b['trg_mutaucross']*df_b['pt_1'] < 23))
& (df_b['os'] == False)
]
elif channel == 'et':
df_b = df_b[
(df_b['iso_1'] < 0.1)
& (df_b['mt_1'] < 40)
& (df_b['mva_olddm_tight_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& (df_b['trg_singleelectron'] == True)
& (df_b['os'] == False)
]
elif channel == 'em':
df_b = df_b[
(df_b['iso_1'] < 0.15)
& (df_b['iso_2'] < 0.2)
& (df_b['pzeta'] > -50)
& (df_b['leptonveto'] == False)
& (df_b['trg_muonelectron'] == True)
& (df_b['os'] == False)
]
else:
assert ValueError('Channel not in ["tt", "mt", "et", "em"]')
## TO SELECT THE SIGNAL SAMPLE ACCORDING TO
## CUTS APPLIED RELATING TO n_jets AND mjj
if not split_by_sample:
df_b = df_b
else:
if mjj_training == 'low':
df_b = df_b[
(df_b['n_jets'] < 2)
| ((df_b['n_jets'] >= 2)
& (df_b['mjj'] < 300))
]
elif mjj_training == 'high':
df_b = df_b[
((df_b['n_jets'] >= 2)
& (df_b['mjj'] > 300))
]
else:
assert ValueError('Mjj training not in ["low", "high"]')
df_b = df_b[(df_b['m_sv'] > 0)] ## SOME m_sv ARE MISSING
df_b = df_b.drop(cut_feats, axis=1)
df.append(df_b)
df = pd.concat(df, ignore_index=True)
return df
def load_ff_ntuple(data, tree, branch, mjj_training, channel, cut_feats, apply_cuts, split_by_sample):
try:
iterator = uproot.iterate(data, tree, branches=branch+cut_feats)
except IOError:
print 'Tree/Branches not found'
df = []
for block in iterator:
df_b = pd.DataFrame(block)
if apply_cuts:
if channel == 'tt':
df_b_tt = []
# df_b_1 = df_b[
# (df_b['pt_1'] > 40)
# & ((df_b['mva_olddm_tight_1'] < 0.5)
# & (df_b['mva_olddm_vloose_1'] > 0.5)
# & (df_b['mva_olddm_tight_2'] > 0.5))
# & (df_b['antiele_1'] == True)
# & (df_b['antimu_1'] == True)
# & (df_b['antiele_2'] == True)
# & (df_b['antimu_2'] == True)
# & (df_b['leptonveto'] == False)
# & (df_b['trg_doubletau'] == True)
# ]
# df_b_1["wt"] = df_b_1["wt_ff_1"]
# df_b_tt.append(df_b_1)
# df_b_2 = df_b[
# (df_b['pt_1'] > 40)
# & ((df_b['mva_olddm_tight_2'] < 0.5)
# & (df_b['mva_olddm_vloose_2'] > 0.5)
# & (df_b['mva_olddm_tight_1'] > 0.5))
# & (df_b['antiele_1'] == True)
# & (df_b['antimu_1'] == True)
# & (df_b['antiele_2'] == True)
# & (df_b['antimu_2'] == True)
# & (df_b['leptonveto'] == False)
# & (df_b['trg_doubletau'] == True)
# ]
# df_b_2["wt"] = df_b_2["wt_ff_2"]
# df_b_tt.append(df_b_2)
# new ffs with deeptau
df_b = df_b[
(df_b['pt_1'] > 40)
& ((df_b['deepTauVsJets_medium_1'] < 0.5)
& (df_b['deepTauVsJets_vvvloose_1'] > 0.5)
& (df_b['deepTauVsJets_medium_2'] > 0.5))
& (df_b['deepTauVsEle_vvvloose_1'] > 0.5 )
& (df_b['deepTauVsMu_vloose_1'] > 0.5 )
& (df_b['deepTauVsEle_vvvloose_2'] > 0.5 )
& (df_b['deepTauVsMu_vloose_2'] > 0.5 )
& (df_b['leptonveto'] == False)
& (df_b['trg_doubletau'] == True)
]
df_b["wt"] = df_b["wt_ff_dmbins_1"]
elif channel == 'mt':
df_b = df_b[
(df_b['iso_1'] < 0.15)
& (df_b['mt_1'] < 40)
& (df_b['mva_olddm_tight_2'] < 0.5)
& (df_b['mva_olddm_vloose_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& ((df_b['trg_singlemuon']*df_b['pt_1'] > 23)
| (df_b['trg_mutaucross']*df_b['pt_1'] < 23))
]
df_b["wt"] = df_b["wt_ff_1"]
elif channel == 'et':
df_b = df_b[
(df_b['iso_1'] < 0.1)
& (df_b['mt_1'] < 40)
& (df_b['mva_olddm_tight_2'] < 0.5)
& (df_b['mva_olddm_vloose_2'] > 0.5)
& (df_b['antiele_2'] == True)
& (df_b['antimu_2'] == True)
& (df_b['leptonveto'] == False)
& (df_b['pt_2'] > 20)
& (df_b['trg_singleelectron'] == True)
]
df_b["wt"] = df_b["wt_ff_1"]
else:
assert ValueError('Channel not in ["tt", "mt", "et"]')
## TO SELECT THE SIGNAL SAMPLE ACCORDING TO
## CUTS APPLIED RELATING TO n_jets AND mjj
if not split_by_sample:
df_b = df_b
else:
if mjj_training == 'low':
df_b = df_b[
(df_b['n_jets'] < 2)
| ((df_b['n_jets'] >= 2)
& (df_b['mjj'] < 300))
]
elif mjj_training == 'high':
df_b = df_b[
((df_b['n_jets'] >= 2)
& (df_b['mjj'] > 300))
]
else:
assert ValueError('Mjj training not in ["low", "high"]')
df_b = df_b[(df_b['m_sv'] > 0)] ## SOME m_sv ARE MISSING
df_b = df_b.drop(cut_feats, axis=1)
df.append(df_b)
df = pd.concat(df, ignore_index=True)
return df
def load_rhoID_ntuple(data, tree, branch, channel, cut_feats, apply_cuts):
# LOAD MC NTUPLES AND APPLY BASELINE CUTS BY CHANNEL
# need to do something for when df too large..
try:
iterator = uproot.iterate(data, tree, branches=branch+cut_feats)
except IOError:
print 'Tree/Branches not found'
df1 = []
df2 = []
try:
for block in iterator:
df_b1 = pd.DataFrame(block)
df_b2 = pd.DataFrame(block)
if apply_cuts:
if channel == 'tt':
df_b1 = df_b1[
(df_b1['pt_1'] > 40)
& (df_b1['mva_olddm_tight_1'] > 0.5)
& (df_b1['antiele_1'] == True)
& (df_b1['antimu_1'] == True)
& (df_b1['leptonveto'] == False)
& (df_b1['trg_doubletau'] == True)
& (df_b1['tau_decay_mode_1'] == 1)
]
df_b2 = df_b2[
(df_b2['pt_1'] > 40)
& (df_b2['mva_olddm_tight_2'] > 0.5)
& (df_b2['antiele_2'] == True)
& (df_b2['antimu_2'] == True)
& (df_b2['leptonveto'] == False)
& (df_b2['trg_doubletau'] == True)
& (df_b2['tau_decay_mode_2'] == 1)
]
else:
assert ValueError('Channel not "tt"')
df_b1 = df_b1.drop(cut_feats, axis=1)
df1.append(df_b1)
df_b2 = df_b2.drop(cut_feats, axis=1)
df2.append(df_b2)
except IndexError:
print 'zero events in ntuple'
df1 = pd.concat(df1, ignore_index=True)
df2 = pd.concat(df2, ignore_index=True)
return df1, df2
def load_noisejets_ntuple(data, tree, branch, channel, cut_feats, apply_cuts):
# LOAD MC NTUPLES AND APPLY BASELINE CUTS BY CHANNEL
# need to do something for when df too large..
try:
iterator = uproot.iterate(data, tree, branches=branch+cut_feats)
except IOError:
print 'Tree/Branches not found'
df1 = []
try:
for block in iterator:
df_b1 = pd.DataFrame(block)
df_b2 = pd.DataFrame(block)
if apply_cuts:
if channel == 'zmm': # only train on zmm
df_b1 = df_b1[
(df_b1['pt_1'] > 25)
& (df_b1['iso_1'] < 0.15)
& (df_b1['iso_2'] < 0.15)
& (df_b1['trg_singlemuon'] == True)
& ((df_b1["jpt_1"]/df_b1["pt_vis"]) > 0.5)
& ((df_b1["jpt_1"]/df_b1["pt_vis"]) < 1.5)
& ((df_b1["jpt_1"]) < 100)
& (np.abs(df_b1["jeta_1"]) > 2.65)
& (np.abs(df_b1["jeta_1"]) < 3.139)
# & (df_b1["n_jets"] == 1)
]
else:
assert ValueError('Channel not "zmm"')
df_b1 = df_b1.drop(cut_feats, axis=1)
df1.append(df_b1)
except IndexError:
print 'zero events in ntuple'
df1 = pd.concat(df1, ignore_index=True)
return df1
def load_files(filelist):
with open(filelist) as f:
files = f.read().splitlines()
file_names = [os.path.splitext(os.path.basename(file))[0] for file in files]
return file_names
| 39.936123
| 125
| 0.37378
| 1,954
| 18,131
| 3.158137
| 0.091095
| 0.117647
| 0.051045
| 0.058337
| 0.877005
| 0.85999
| 0.834387
| 0.817047
| 0.79744
| 0.778966
| 0
| 0.045869
| 0.49738
| 18,131
| 453
| 126
| 40.024283
| 0.631296
| 0.131598
| 0
| 0.73065
| 0
| 0
| 0.126644
| 0.013022
| 0
| 0
| 0
| 0
| 0.024768
| 0
| null | null | 0
| 0.012384
| null | null | 0.024768
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a8991ab5d7325e00ce2a7a52857ca1761e3c97c6
| 10,300
|
py
|
Python
|
scripts/move-prediction/plot.py
|
dsmic/oakfoam
|
f4093d57ea173d5dc00e0f6031ed16f169258456
|
[
"BSD-2-Clause"
] | 2
|
2019-08-27T04:18:45.000Z
|
2021-04-20T23:14:24.000Z
|
scripts/move-prediction/plot.py
|
dsmic/oakfoam
|
f4093d57ea173d5dc00e0f6031ed16f169258456
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/move-prediction/plot.py
|
dsmic/oakfoam
|
f4093d57ea173d5dc00e0f6031ed16f169258456
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# Read in a list of move prediction results and plot the results.
# The input file format is JSON (see plot_example.json).
import sys
import math
import numpy as np
import json
import csv
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
jd = json.loads("".join(sys.stdin.readlines()))
plottype = jd['type']
if plottype == 'move-prediction': # 2d move prediction
fig = plt.figure()
fig.canvas.set_window_title(jd['title'])
plt.title(jd['title'])
plt.xlabel('Move Rank')
plt.ylabel('Cumulative Probability')
plt.grid(True)
plt.ylim(0,1)
xmin = 1
xmax = 50
if 'xmax' in jd.keys():
xmax = int(jd['xmax'])
plt.xlim(xmin,xmax)
plt.yticks(np.append(np.arange(0,1,0.05),1))
plt.xticks(np.append(np.arange(xmin,xmax),xmax))
errk = 0.0
if 'errk' in jd.keys():
errk = float(jd['errk'])
table = []
table.append(range(xmin,xmax+1))
tablelabels = []
tablelabels.append('Move Rank')
c = 0
for f in jd['data']:
with open(f['file'], 'rb') as csvfile:
x = []
y = []
z = []
reader = csv.reader(csvfile)
for row in reader:
x.append(float(row[0]))
y.append(float(row[1]))
z.append(float(row[2]))
err = []
err1 = []
err2 = []
for i in range(len(x)):
if errk>0:
e = errk*math.sqrt(y[i]*(1-y[i])/z[i])
else:
e = 0
err.append(e)
err1.append(y[i]+e)
err2.append(y[i]-e)
lbl = f['label'] + ' (%.1f%%)' % (y[0]*100)
if len(jd['data']) == 1:
col = cm.spectral(0)
else:
col = cm.spectral(c*0.9/(len(jd['data'])-1),1)
c+=1
# p = plt.plot(x, z, label = lbl)
p = plt.plot(x, y, label = lbl, color = col)
# col = p[0].get_color()
if errk>0:
plt.fill_between(x, err1, err2, alpha = 0.2, color = col)
# plt.errorbar(x, z, yerr = err, fmt='.', color = col)
td = []
xi = 0
for i in table[0]:
while i > x[xi]:
xi += 1
td.append(y[xi])
table.append(td)
tablelabels.append(lbl)
plt.legend(loc=4)
sys.stdout.write('# ' + jd['title'] + '\n')
tt = len(tablelabels)
for i in range(tt):
sys.stdout.write('# ' + (' | ')*i + ' '*5 + '+' + ('-'*7)*(tt-i-1) + '- %d: ' % i + tablelabels[i] +'\n')
sys.stdout.write('#' + (' |----|')*tt + '\n')
for i in range(len(table[0])):
sys.stdout.write(' ')
for j in range(len(table)):
if j == 0:
sys.stdout.write(' %6d' % table[j][i])
else:
sys.stdout.write(' %6.3f' % table[j][i])
sys.stdout.write('\n')
elif plottype == 'stages-mp': # stages of move prediction
fig = plt.figure()
fig.canvas.set_window_title(jd['title'])
plt.title(jd['title'])
plt.xlabel('Game Stage Center')
plt.ylabel('Move Prediction Accuracy')
plt.grid(True)
# plt.ylim(0,1)
xmin = 0
xmax = 300
stage = 30
if 'xmax' in jd.keys():
xmax = int(jd['xmax'])
if 'stage' in jd.keys():
stage = int(jd['stage'])
plt.xlim(xmin,xmax)
# plt.yticks(np.append(np.arange(0,1,0.02),1))
plt.xticks(np.append(np.arange(xmin,xmax,stage),xmax))
errk = 0.0
if 'errk' in jd.keys():
errk = float(jd['errk'])
table = []
# table.append(range(xmin,xmax+1))
table.append(range(xmin+stage/2,xmax+1,stage))
tablelabels = []
tablelabels.append('Center of game stage')
c = 0
for f in jd['data']:
with open(f['file'], 'rb') as csvfile:
x = []
y = []
z = []
reader = csv.reader(csvfile)
for row in reader:
r0 = float(row[0])
r1 = float(row[1])
r2 = float(row[2])
r3 = float(row[3])
r4 = float(row[4])
r5 = float(row[5])
x.append((r0+r1)/2)
y.append(r2)
z.append(r4)
err = []
err1 = []
err2 = []
for i in range(len(x)):
if errk>0:
e = errk*math.sqrt(y[i]*(1-y[i])/z[i])
else:
e = 0
err.append(e)
err1.append(y[i]+e)
err2.append(y[i]-e)
lbl = f['label']
if len(jd['data']) == 1:
col = cm.spectral(0)
else:
col = cm.spectral(c*0.9/(len(jd['data'])-1),1)
c+=1
# p = plt.plot(x, z, label = lbl)
p = plt.plot(x, y, label = lbl, color = col)
# col = p[0].get_color()
if errk>0:
plt.fill_between(x, err1, err2, alpha = 0.2, color = col)
# plt.errorbar(x, z, yerr = err, fmt='.', color = col)
td = []
xi = 0
for i in table[0]:
while i > x[xi]:
xi += 1
td.append(y[xi])
table.append(td)
tablelabels.append(lbl)
plt.legend(loc=4)
sys.stdout.write('# ' + jd['title'] + '\n')
tt = len(tablelabels)
for i in range(tt):
sys.stdout.write('# ' + (' | ')*i + ' '*5 + '+' + ('-'*7)*(tt-i-1) + '- %d: ' % i + tablelabels[i] +'\n')
sys.stdout.write('#' + (' |----|')*tt + '\n')
for i in range(len(table[0])):
sys.stdout.write(' ')
for j in range(len(table)):
if j == 0:
sys.stdout.write(' %6d' % table[j][i])
else:
sys.stdout.write(' %6.3f' % table[j][i])
sys.stdout.write('\n')
elif plottype == 'stages-le': # stages of mean log-evidence
fig = plt.figure()
fig.canvas.set_window_title(jd['title'])
plt.title(jd['title'])
plt.xlabel('Game Stage Center')
plt.ylabel('Mean Log-evidence')
plt.grid(True)
# plt.ylim(0,1)
xmin = 0
xmax = 300
stage = 30
if 'xmax' in jd.keys():
xmax = int(jd['xmax'])
if 'stage' in jd.keys():
stage = int(jd['stage'])
plt.xlim(xmin,xmax)
# plt.yticks(np.append(np.arange(0,1,0.02),1))
plt.xticks(np.append(np.arange(xmin,xmax,stage),xmax))
errk = 0.0
if 'errk' in jd.keys():
errk = float(jd['errk'])
c = 0
for f in jd['data']:
with open(f['file'], 'rb') as csvfile:
x = []
y = []
z = []
reader = csv.reader(csvfile)
for row in reader:
r0 = float(row[0])
r1 = float(row[1])
r2 = float(row[2])
r3 = float(row[3])
r4 = float(row[4])
r5 = float(row[5])
x.append((r0+r1)/2)
y.append(r3)
z.append(r5)
err = []
err1 = []
err2 = []
for i in range(len(x)):
if errk>0:
e = errk*math.sqrt(z[i])
else:
e = 0
err.append(e)
err1.append(y[i]+e)
err2.append(y[i]-e)
lbl = f['label']
if len(jd['data']) == 1:
col = cm.spectral(0)
else:
col = cm.spectral(c*0.9/(len(jd['data'])-1),1)
c+=1
# p = plt.plot(x, z, label = lbl)
p = plt.plot(x, y, label = lbl, color = col)
# col = p[0].get_color()
if errk>0:
plt.fill_between(x, err1, err2, alpha = 0.2, color = col)
# plt.errorbar(x, z, yerr = err, fmt='.', color = col)
plt.legend(loc=4)
elif plottype == '3d': # legacy 3d mp plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.suptitle(jd['title'])
ax.set_ylabel('Move Rank')
ax.set_zlabel('Cumulative Probability')
xmax = 200
if 'xmax' in jd.keys():
xmax = jd['xmax']
X = []
Y = []
Z = []
for f in jd['data']:
with open(f['file'], 'rb') as csvfile:
x = []
y = []
reader = csv.reader(csvfile)
xi = 1
s = 0
for row in reader:
rx = float(row[0])
ry = float(row[1])
if rx>xmax:
s += ry
else:
while xi<rx:
x.append(xi)
y.append(0.0)
xi += 1
x.append(rx)
y.append(ry)
xi += 1
s += sum(y)
t = 0.0
z = []
zz = []
for yy in y:
t += yy
z.append(t/s)
zz.append(f['z'])
Y.append(x)
Z.append(z)
X.append(zz)
# ax.plot_wireframe(X, Y, Z, color='b', cmap='jet')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_zlim(0,1)
ax.set_ylim(0,xmax)
else: # legacy 2d mp plot
fig = plt.figure()
fig.canvas.set_window_title(jd['title'])
plt.title(jd['title'])
plt.xlabel('Move Rank')
plt.ylabel('Cumulative Probability')
plt.grid(True)
plt.ylim(0,1)
xmin = 1
xmax = 50
if 'xmax' in jd.keys():
xmax = int(jd['xmax'])
plt.xlim(xmin,xmax)
plt.yticks(np.append(np.arange(0,1,0.05),1))
plt.xticks(np.append(np.arange(xmin,xmax),xmax))
errk = 0.0
if 'errk' in jd.keys():
errk = float(jd['errk'])
table = []
table.append(range(xmin,xmax+1))
tablelabels = []
tablelabels.append('Move Rank')
c = 0
for f in jd['data']:
with open(f['file'], 'rb') as csvfile:
x = []
y = []
reader = csv.reader(csvfile)
for row in reader:
x.append(float(row[0]))
y.append(float(row[1]))
s = sum(y)
t = 0.0
z = []
err = []
err1 = []
err2 = []
for yy in y:
t += yy
v = t/s
z.append(v)
if errk>0:
e = errk*math.sqrt(v*(1-v)/s)
else:
e = 0
err.append(e)
err1.append(v+e)
err2.append(v-e)
lbl = f['label'] + ' (%.1f%%)' % (z[0]*100)
col = cm.spectral(c*0.9/(len(jd['data'])-1),1)
c+=1
# p = plt.plot(x, z, label = lbl)
p = plt.plot(x, z, label = lbl, color = col)
# col = p[0].get_color()
if errk>0:
plt.fill_between(x, err1, err2, alpha = 0.2, color = col)
# plt.errorbar(x, z, yerr = err, fmt='.', color = col)
td = []
xi = 0
for i in table[0]:
while i > x[xi]:
xi += 1
td.append(z[xi])
table.append(td)
tablelabels.append(lbl)
plt.legend(loc=4)
sys.stdout.write('# ' + jd['title'] + '\n')
tt = len(tablelabels)
for i in range(tt):
sys.stdout.write('# ' + (' | ')*i + ' '*5 + '+' + ('-'*7)*(tt-i-1) + '- %d: ' % i + tablelabels[i] +'\n')
sys.stdout.write('#' + (' |----|')*tt + '\n')
for i in range(len(table[0])):
sys.stdout.write(' ')
for j in range(len(table)):
if j == 0:
sys.stdout.write(' %6d' % table[j][i])
else:
sys.stdout.write(' %6.3f' % table[j][i])
sys.stdout.write('\n')
plt.show()
| 24.465558
| 113
| 0.50301
| 1,595
| 10,300
| 3.232602
| 0.116614
| 0.036656
| 0.057021
| 0.019201
| 0.801202
| 0.793832
| 0.786074
| 0.782002
| 0.773274
| 0.773274
| 0
| 0.036351
| 0.297573
| 10,300
| 420
| 114
| 24.52381
| 0.676296
| 0.085534
| 0
| 0.808023
| 0
| 0
| 0.069953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025788
| 0
| 0.025788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7635d638410991a211ac25f018d8be5b4fa60974
| 393
|
py
|
Python
|
exercise_brokensongify_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | null | null | null |
exercise_brokensongify_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | null | null | null |
exercise_brokensongify_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | 1
|
2022-01-04T15:26:40.000Z
|
2022-01-04T15:26:40.000Z
|
# Broken #1
def songify(lyric):
return "🎼" + lyric + "🎶"
songify("siiiinnnging in the rain")
# Broken #2
def songify(lyric):
return "🎼" + lyric + "🎶"
songify("siiiinnnging in the rain")
# Broken #3
def songify(lyric):
return "🎼" + lyric + "🎶"
songify("siiiinnnging in the rain")
# Broken #4
def songify(lyric):
return "🎼" + lyric + "🎶"
print(songify("siiiinnnging in the rain"))
| 17.086957
| 42
| 0.633588
| 57
| 393
| 4.508772
| 0.280702
| 0.155642
| 0.233463
| 0.326848
| 0.941634
| 0.832685
| 0.832685
| 0.723735
| 0.723735
| 0.723735
| 0
| 0.012539
| 0.188295
| 393
| 23
| 42
| 17.086957
| 0.768025
| 0.089059
| 0
| 0.916667
| 0
| 0
| 0.297143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.083333
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 12
|
7668e1e1e289a1d416a6b3bcc82b0701c68e312d
| 3,449
|
py
|
Python
|
Packs/Workday/Integrations/Workday_IAM/test_data/event_results.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | null | null | null |
Packs/Workday/Integrations/Workday_IAM/test_data/event_results.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | null | null | null |
Packs/Workday/Integrations/Workday_IAM/test_data/event_results.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | null | null | null |
events_result = [
{
"name": None,
"rawJSON": "{\"Employee_Type\": \"Regular\", \"Leadership\": \"Yes-HQ\", \"Work_Country_Code\": \"840\", \"Street_Address\": \"3000 Tannery Way\", \"Employment_Status\": \"Active\", \"VP_Flag\": \"N\", \"Mgr_ID\": \"115069\", \"Cost_Center_Description\": \"Channel Sales\", \"GDPR_Country_Flag\": \"0\", \"Director_Flag\": \"Y\", \"Email_-_Primary_Home\": \"test@test.com\", \"First_Name\": \"Ronny\", \"Last_Hire_Date\": \"10/05/2020\", \"People_Manager_Flag\": \"N\", \"Department\": \"Sales NAM:NAM Channel Sales\", \"Workday_ID\": \"5aa443c785ff10461ac83e5a6be32e1e\", \"Postal_Code\": \"95054\", \"Rehired_Employee\": \"Yes\", \"Org_Level_1\": \"Sales\", \"Org_Level_3\": \"NAM Channel Sales\", \"Country_Name\": \"United States Of America\", \"Org_Level_2\": \"Sales NAM\", \"Emp_ID\": \"100122\", \"Job_Family\": \"Product Management\", \"User_Name\": \"rrahardjo@paloaltonetworks.com\", \"Preferred_Name_-_First_Name\": \"Ronny\", \"Prehire_Flag\": \"False\", \"Management_Level_1\": \"Nikesh Arora\", \"Work_Country_Abbrev\": \"US\", \"Management_Level_2\": \"Timmy Turner\", \"Email_Address\": \"rrahardjo@paloaltonetworks.com\", \"Title\": \"Dir, Product Line Manager\", \"City\": \"Santa Clara\", \"Work_State_US_Only\": \"California\", \"Job_Code\": \"2245\", \"PAN_CF_Okta_Location_Region\": \"Americas\", \"Last_Name\": \"Rahardjo\", \"Job_Function\": \"Product Management Function\", \"State\": \"California\", \"Exec_Admin_Flag\": \"N\", \"Preferred_Name\": \"Ronny Rahardjo\", \"Regular_Employee_Flag\": \"Y\", \"Preferred_Name_-_Last_Name\": \"Rahardjo\", \"Cost_Center_Code\": \"120100\", \"Location\": \"Office - USA - CA - Headquarters\", \"UserProfile\": {\"employee_type\": \"Regular\", \"leadership\": \"Yes-HQ\", \"work_country_code\": \"840\", \"street_address\": \"3000 Tannery Way\", \"employment_status\": \"Active\", \"vp_flag\": \"N\", \"mgr_id\": \"115069\", \"cost_center_description\": \"Channel Sales\", \"gdpr_country_flag\": \"0\", \"director_flag\": \"Y\", \"email_-_primary_home\": \"test@test.com\", \"first_name\": \"Ronny\", \"last_hire_date\": \"10/05/2020\", \"people_manager_flag\": \"N\", \"department\": \"Sales NAM:NAM Channel Sales\", \"workday_id\": \"5aa443c785ff10461ac83e5a6be32e1e\", \"postal_code\": \"95054\", \"rehired_employee\": \"Yes\", \"org_level_1\": \"Sales\", \"org_level_3\": \"NAM Channel Sales\", \"country_name\": \"United States Of America\", \"org_level_2\": \"Sales NAM\", \"emp_id\": \"100122\", \"job_family\": \"Product Management\", \"user_name\": \"rrahardjo@paloaltonetworks.com\", \"preferred_name_-_first_name\": \"Ronny\", \"prehire_flag\": \"False\", \"management_level_1\": \"Nikesh Arora\", \"work_country_abbrev\": \"US\", \"management_level_2\": \"Timmy Turner\", \"email_address\": \"rrahardjo@paloaltonetworks.com\", \"title\": \"Dir, Product Line Manager\", \"city\": \"Santa Clara\", \"work_state_us_only\": \"California\", \"job_code\": \"2245\", \"pan_cf_okta_location_region\": \"Americas\", \"last_name\": \"Rahardjo\", \"job_function\": \"Product Management Function\", \"state\": \"California\", \"exec_admin_flag\": \"N\", \"preferred_name\": \"Ronny Rahardjo\", \"regular_employee_flag\": \"Y\", \"preferred_name_-_last_name\": \"Rahardjo\", \"cost_center_code\": \"120100\", \"location\": \"Office - USA - CA - Headquarters\"}}",
"details": "Profile changed. Changed fields: []"
}
]
| 431.125
| 3,337
| 0.637576
| 387
| 3,449
| 5.338501
| 0.302326
| 0.014521
| 0.027106
| 0.028074
| 0.96515
| 0.96515
| 0.96515
| 0.96515
| 0.96515
| 0.96515
| 0
| 0.042338
| 0.082343
| 3,449
| 7
| 3,338
| 492.714286
| 0.610427
| 0
| 0
| 0
| 0
| 0
| 0.173384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
769ba07cabedacdf51e14f831f888c4a624780a3
| 218
|
py
|
Python
|
mosaicme/web/mosaic/models.py
|
emccode/mosaicme
|
a6e133defb3404b6de6083c1414078338fba773a
|
[
"MIT"
] | 14
|
2015-08-26T14:51:43.000Z
|
2016-09-13T20:04:50.000Z
|
mosaicme/web/mosaic/models.py
|
thecodeteam/mosaicme
|
a6e133defb3404b6de6083c1414078338fba773a
|
[
"MIT"
] | 21
|
2016-04-07T13:56:43.000Z
|
2016-09-19T14:41:00.000Z
|
mosaicme/web/mosaic/models.py
|
emccode/mosaicme
|
a6e133defb3404b6de6083c1414078338fba773a
|
[
"MIT"
] | 12
|
2015-06-08T13:54:18.000Z
|
2016-09-11T11:08:03.000Z
|
from django.conf import settings
def init_redis():
"""
Returns redis connection.
"""
import redis
return redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
| 24.222222
| 102
| 0.724771
| 28
| 218
| 5.5
| 0.535714
| 0.253247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174312
| 218
| 9
| 102
| 24.222222
| 0.855556
| 0.114679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
76b4933c0530989e5fd1033942f529d33c6f56a1
| 3,972
|
py
|
Python
|
examples/demo.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
examples/demo.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
examples/demo.py
|
diegoperezm/screencast-script
|
ac477c6f44a151cafa88ebfd981d2bbe34f792bd
|
[
"MIT"
] | null | null | null |
import sys
# for development
sys.path.append('./src')
sys.path.append('../src')
from screencastscript import ScreencastScript # noqa: E402
screencast = ScreencastScript()
##################################
# #
# screencast-script: #
# Demo #
# #
##################################
screencast.i3wm_ws_2()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.i3wm_focus_right()
screencast.send_input_take_screenshots("""import matplotlib.pyplot as plt
""")
screencast.sleep(4)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots("""import numpy as np
""")
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots(
"""x = np.linspace(-np.pi / 2, np.pi / 2, 31)
y = np.cos(x)**3
""")
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots("""# 1) remove points where y > 0.7
x2 = x[y <= 0.7]
y2 = y[y <= 0.7]
""")
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots("""# 2) mask points where y > 0.7
y3 = np.ma.masked_where(y > 0.7, y)
# 3) set to NaN where y > 0.7
y4 = y.copy()
y4[y3 > 0.7] = np.nan
""")
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.i3wm_toggle_fullscreen()
screencast.take_screenshots(8)
screencast.sleep(4)
screencast.send_input_take_screenshots(
"""plt.plot(x * 0.1, y, 'o-', color='lightgrey', label='No mask')
""")
screencast.sleep(4)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots("""plt.show()
""")
screencast.sleep(4)
screencast.take_screenshots(12)
screencast.send_input_take_screenshots(
"""plt.plot(x2 * 0.4, y2, 'o-', label='Points removed')
""")
screencast.sleep(4)
screencast.take_screenshots(8)
screencast.send_input_take_screenshots("""plt.show()
""")
screencast.sleep(4)
screencast.take_screenshots(12)
screencast.send_input_take_screenshots(
"""plt.plot(x * 0.7, y3, 'o-', label='Masked values')
""")
screencast.sleep(4)
screencast.take_screenshots(8)
screencast.send_input("""plt.show()
""")
screencast.sleep(4)
screencast.take_screenshots(12)
screencast.send_input("""plt.plot(x * 1.0, y4, 'o-', label='NaN values')
""")
screencast.sleep(4)
screencast.take_screenshots(12)
screencast.send_input("""plt.legend()
""")
screencast.sleep(4)
screencast.send_input_take_screenshots("""plt.title('Masked and NaN data')
""")
screencast.sleep(4)
screencast.take_screenshots(4)
screencast.send_input_take_screenshots("""plt.show()
""")
screencast.sleep(4)
screencast.take_screenshots(12)
screencast.i3wm_toggle_fullscreen_take_screenshots()
screencast.take_screenshots(4)
screencast.i3wm_focus_left_take_screenshots()
screencast.take_screenshots(4)
screencast.sleep(2)
screencast.i3wm_toggle_fullscreen_take_screenshots()
screencast.take_screenshots(4)
screencast.sleep(2)
screencast.i3wm_zoom_in_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.i3wm_zoom_in_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.i3wm_zoom_in_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(8)
screencast.vim_scroll_down_take_screenshots()
screencast.sleep(2)
screencast.take_screenshots(24)
screencast.make_video()
# If you want to use this, you need an audio file (in the folder)
# screencast.make_video_with_audio(
# audio_file="./audio.mp3")
| 20.795812
| 74
| 0.739174
| 530
| 3,972
| 5.3
| 0.177358
| 0.277679
| 0.249199
| 0.166607
| 0.795657
| 0.776433
| 0.745461
| 0.740833
| 0.740833
| 0.660377
| 0
| 0.032741
| 0.108006
| 3,972
| 190
| 75
| 20.905263
| 0.76009
| 0.070997
| 0
| 0.743363
| 0
| 0
| 0.122577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035398
| 0
| 0.035398
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
76c8ec8726c97f123104a8e3ed55ca0d3b92b623
| 4,032
|
py
|
Python
|
pycls/datasets/custom_datasets.py
|
acl21/deep-active-learning-pytorch
|
637fd507235632903bcf84ed841ff524d847b94e
|
[
"MIT"
] | 42
|
2021-05-10T23:02:20.000Z
|
2022-03-26T02:21:40.000Z
|
pycls/datasets/custom_datasets.py
|
acl21/deep-active-learning-pytorch
|
637fd507235632903bcf84ed841ff524d847b94e
|
[
"MIT"
] | 1
|
2021-06-24T06:43:11.000Z
|
2021-06-24T11:54:13.000Z
|
pycls/datasets/custom_datasets.py
|
acl21/deep-active-learning-pytorch
|
637fd507235632903bcf84ed841ff524d847b94e
|
[
"MIT"
] | 3
|
2021-05-19T08:16:16.000Z
|
2022-02-03T10:58:51.000Z
|
import torch
import torchvision
from PIL import Image
class CIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, root, train, transform, test_transform, download=True):
super(CIFAR10, self).__init__(root, train, transform=transform, download=download)
self.test_transform = test_transform
self.no_aug = False
def __getitem__(self, index: int):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.no_aug:
if self.test_transform is not None:
img = self.test_transform(img)
else:
if self.transform is not None:
img = self.transform(img)
return img, target
class CIFAR100(torchvision.datasets.CIFAR100):
def __init__(self, root, train, transform, test_transform, download=True):
super(CIFAR100, self).__init__(root, train, transform=transform, download=download)
self.test_transform = test_transform
self.no_aug = False
def __getitem__(self, index: int):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.no_aug:
if self.test_transform is not None:
img = self.test_transform(img)
else:
if self.transform is not None:
img = self.transform(img)
return img, target
class MNIST(torchvision.datasets.MNIST):
def __init__(self, root, train, transform, test_transform, download=True):
super(MNIST, self).__init__(root, train, transform=transform, download=download)
self.test_transform = test_transform
self.no_aug = False
def __getitem__(self, index: int):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.no_aug:
if self.test_transform is not None:
img = self.test_transform(img)
else:
if self.transform is not None:
img = self.transform(img)
return img, target
class SVHN(torchvision.datasets.SVHN):
def __init__(self, root, train, transform, test_transform, download=True):
super(SVHN, self).__init__(root, train, transform=transform, download=download)
self.test_transform = test_transform
self.no_aug = False
def __getitem__(self, index: int):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.no_aug:
if self.test_transform is not None:
img = self.test_transform(img)
else:
if self.transform is not None:
img = self.transform(img)
return img, target
| 32
| 92
| 0.569196
| 464
| 4,032
| 4.782328
| 0.127155
| 0.11717
| 0.091933
| 0.064894
| 0.904461
| 0.904461
| 0.904461
| 0.904461
| 0.904461
| 0.904461
| 0
| 0.005708
| 0.348214
| 4,032
| 126
| 93
| 32
| 0.838661
| 0.189484
| 0
| 0.793651
| 0
| 0
| 0.000339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126984
| false
| 0
| 0.047619
| 0
| 0.301587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f173ae3da752dfece60a87223ed5699969af000
| 12,040
|
py
|
Python
|
test/lba/util_tests.py
|
nervepoint/logonbox-authenticator-python
|
a91c94d4358c2ecf9779250fe55f0ec84c3fff07
|
[
"Apache-2.0"
] | null | null | null |
test/lba/util_tests.py
|
nervepoint/logonbox-authenticator-python
|
a91c94d4358c2ecf9779250fe55f0ec84c3fff07
|
[
"Apache-2.0"
] | null | null | null |
test/lba/util_tests.py
|
nervepoint/logonbox-authenticator-python
|
a91c94d4358c2ecf9779250fe55f0ec84c3fff07
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import lba.util;
class ByteArrayWriterTest(unittest.TestCase):
def test_write_string(self):
w = lba.util.ByteArrayWriter();
w.write_string("A Test String");
self.assertEqual(bytes([ 0, 0, 0, 13, 65, 32, 84, 101, 115, 116, 32, 83, 116, 114, 105, 110, 103 ]), w.get_bytes());
def test_integer(self):
w = lba.util.ByteArrayWriter()
w.write_int(4294967295)
w.write_int(0)
w.write_int(255)
w.write_int(4294967040)
self.assertEqual(bytes([ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0 ]), w.get_bytes())
def test_big_integer(self):
w = lba.util.ByteArrayWriter();
w.write_big_integer(329802389981797891243908975290812);
self.assertEqual(bytes([ 0, 0, 0, 14, 16, 66, 176, 254, 247, 114, 215, 130, 240, 27, 237, 39, 233, 188 ]), w.get_bytes());
def test_binary_string(self):
w = lba.util.ByteArrayWriter()
w.write_binary_string("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".encode('UTF-8'))
self.assertEqual(bytes([ 0, 0, 0, 123, 76, 111, 114, 101,
109, 32, 105, 112, 115, 117, 109, 32,
100, 111, 108, 111, 114, 32, 115, 105,
116, 32, 97, 109, 101, 116, 44, 32,
99, 111, 110, 115, 101, 99, 116, 101,
116, 117, 114, 32, 97, 100, 105, 112,
105, 115, 99, 105, 110, 103, 32, 101,
108, 105, 116, 44, 32, 115, 101, 100,
32, 100, 111, 32, 101, 105, 117, 115,
109, 111, 100, 32, 116, 101, 109, 112,
111, 114, 32, 105, 110, 99, 105, 100,
105, 100, 117, 110, 116, 32, 117, 116,
32, 108, 97, 98, 111, 114, 101, 32,
101, 116, 32, 100, 111, 108, 111, 114,
101, 32, 109, 97, 103, 110, 97, 32,
97, 108, 105, 113, 117, 97, 46 ]),
w.get_bytes())
def test_massive_integer(self):
w = lba.util.ByteArrayWriter()
w.write_big_integer(4986580695048258251352289243969528543723799114324057371323608612564101467693190796478532220284311403189255873250803291602531019677110835331481798144386049284511688009328775687804730487000620487321119382781090544960120583643153599562724683545896843186364280959049341308629380720692043569110468202632021048673338887960542310457475382130231373634793736853819191982436405235215379401298185584213567077387840129057385674664071727417723315763120148348448625747824864998778650276874067046964948041454108472270884726573176720890632226924444526896411492224011080798782446878497167945815843132905198949069567082142592104355525279386692616234048604119115967592552701346081832583566701136596353331815241580453022478423878876764704414366376336598553049072822810090907768245535476110588270567353835663980833082822835527392197580869451516391575655964243632587493986489280683147080083155190055556030197814111481606633955453576346428985945179);
self.assertEqual(bytes([ 0, 0, 1, 129, 0, 219, 187,
194, 33, 195, 140, 127, 7, 175, 149, 255,
85, 187, 33, 19, 91, 211, 199, 5, 237,
90, 0, 155, 254, 36, 119, 8, 188, 150,
217, 238, 237, 90, 223, 43, 21, 237, 235,
55, 138, 131, 252, 118, 236, 201, 9, 163,
47, 30, 139, 78, 117, 127, 191, 123, 137,
169, 168, 62, 179, 79, 118, 184, 119, 19,
169, 223, 68, 154, 25, 117, 175, 114,
110, 170, 14, 20, 92, 110, 158, 73, 57,
123, 52, 245, 87, 240, 34, 231, 184, 153,
186, 114, 242, 99, 25, 131, 37, 240, 29,
207, 117, 37, 242, 52, 219, 49, 88, 208,
186, 193, 85, 242, 176, 154, 112, 176,
81, 107, 219, 126, 133, 206, 92, 18, 178,
156, 177, 26, 152, 189, 81, 41, 30, 226,
88, 70, 123, 0, 164, 176, 105, 91, 166,
221, 169, 159, 163, 94, 40, 145, 123, 94,
202, 91, 246, 150, 171, 157, 244, 102,
86, 236, 54, 28, 141, 210, 49, 218, 149,
106, 78, 196, 232, 174, 20, 66, 213, 176,
239, 147, 80, 102, 232, 173, 142, 48,
122, 76, 161, 193, 238, 64, 90, 45, 189,
182, 162, 163, 218, 158, 187, 2, 145, 84,
14, 254, 177, 241, 142, 245, 165, 130,
241, 124, 94, 23, 172, 48, 252, 201, 209,
160, 21, 17, 18, 222, 198, 190, 34, 136,
26, 78, 163, 127, 61, 152, 31, 106, 98,
144, 251, 112, 205, 91, 244, 138, 167,
23, 92, 210, 60, 229, 6, 213, 244, 87,
225, 55, 171, 143, 90, 234, 223, 36, 247,
110, 251, 98, 121, 3, 145, 52, 133, 81,
128, 148, 122, 147, 215, 231, 226, 163,
179, 133, 244, 249, 209, 83, 56, 88, 78,
245, 243, 130, 155, 181, 131, 57, 235,
22, 233, 67, 205, 208, 210, 41, 157, 208,
212, 73, 142, 122, 231, 128, 124, 170,
172, 214, 231, 191, 205, 195, 176, 16,
57, 92, 51, 74, 250, 171, 132, 254, 178,
37, 46, 234, 47, 107, 153, 242, 179, 120,
82, 184, 195, 224, 134, 61, 79, 116, 34,
173, 153, 170, 221, 144, 64, 120, 43,
128, 117, 158, 62, 153, 195, 224, 114,
254, 30, 161, 112, 80, 168, 103, 2, 215,
130, 120, 171, 67, 25, 172, 91 ]), w.get_bytes())
def test_sig(self):
w = lba.util.ByteArrayWriter()
w.write_string("ssh-rsa")
w.write_big_integer(65537)
w.write_big_integer(4986580695048258251352289243969528543723799114324057371323608612564101467693190796478532220284311403189255873250803291602531019677110835331481798144386049284511688009328775687804730487000620487321119382781090544960120583643153599562724683545896843186364280959049341308629380720692043569110468202632021048673338887960542310457475382130231373634793736853819191982436405235215379401298185584213567077387840129057385674664071727417723315763120148348448625747824864998778650276874067046964948041454108472270884726573176720890632226924444526896411492224011080798782446878497167945815843132905198949069567082142592104355525279386692616234048604119115967592552701346081832583566701136596353331815241580453022478423878876764704414366376336598553049072822810090907768245535476110588270567353835663980833082822835527392197580869451516391575655964243632587493986489280683147080083155190055556030197814111481606633955453576346428985945179);
self.assertEqual(bytes([ 0, 0, 0, 7, 115, 115, 104, 45,
114, 115, 97, 0, 0, 0, 3, 1, 0, 1,
0, 0, 1, 129, 0, 219, 187, 194, 33,
195, 140, 127, 7, 175, 149, 255, 85, 187,
33, 19, 91, 211, 199, 5, 237, 90, 0,
155, 254, 36, 119, 8, 188, 150, 217, 238,
237, 90, 223, 43, 21, 237, 235, 55, 138,
131, 252, 118, 236, 201, 9, 163, 47, 30,
139, 78, 117, 127, 191, 123, 137, 169,
168, 62, 179, 79, 118, 184, 119, 19, 169,
223, 68, 154, 25, 117, 175, 114, 110,
170, 14, 20, 92, 110, 158, 73, 57, 123,
52, 245, 87, 240, 34, 231, 184, 153, 186,
114, 242, 99, 25, 131, 37, 240, 29, 207,
117, 37, 242, 52, 219, 49, 88, 208, 186,
193, 85, 242, 176, 154, 112, 176, 81,
107, 219, 126, 133, 206, 92, 18, 178,
156, 177, 26, 152, 189, 81, 41, 30, 226,
88, 70, 123, 0, 164, 176, 105, 91, 166,
221, 169, 159, 163, 94, 40, 145, 123, 94,
202, 91, 246, 150, 171, 157, 244, 102,
86, 236, 54, 28, 141, 210, 49, 218, 149,
106, 78, 196, 232, 174, 20, 66, 213, 176,
239, 147, 80, 102, 232, 173, 142, 48,
122, 76, 161, 193, 238, 64, 90, 45, 189,
182, 162, 163, 218, 158, 187, 2, 145, 84,
14, 254, 177, 241, 142, 245, 165, 130,
241, 124, 94, 23, 172, 48, 252, 201, 209,
160, 21, 17, 18, 222, 198, 190, 34, 136,
26, 78, 163, 127, 61, 152, 31, 106, 98,
144, 251, 112, 205, 91, 244, 138, 167,
23, 92, 210, 60, 229, 6, 213, 244, 87,
225, 55, 171, 143, 90, 234, 223, 36, 247,
110, 251, 98, 121, 3, 145, 52, 133, 81,
128, 148, 122, 147, 215, 231, 226, 163,
179, 133, 244, 249, 209, 83, 56, 88, 78,
245, 243, 130, 155, 181, 131, 57, 235,
22, 233, 67, 205, 208, 210, 41, 157, 208,
212, 73, 142, 122, 231, 128, 124, 170,
172, 214, 231, 191, 205, 195, 176, 16,
57, 92, 51, 74, 250, 171, 132, 254, 178,
37, 46, 234, 47, 107, 153, 242, 179, 120,
82, 184, 195, 224, 134, 61, 79, 116, 34,
173, 153, 170, 221, 144, 64, 120, 43,
128, 117, 158, 62, 153, 195, 224, 114,
254, 30, 161, 112, 80, 168, 103, 2, 215,
130, 120, 171, 67, 25, 172, 91 ]), w.get_bytes())
class ByteArrayReaderTest(unittest.TestCase):
def test_read_string(self):
r = lba.util.ByteArrayReader(bytes([ 0, 0, 0, 13, 65, 32, 84, 101, 115, 116, 32, 83, 116, 114, 105, 110, 103]))
self.assertEqual("A Test String", r.read_string())
def test_integer(self):
r = lba.util.ByteArrayReader(bytes([ 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0 ]));
self.assertEqual(4294967295, r.read_int())
self.assertEqual(0, r.read_int())
self.assertEqual(255, r.read_int())
self.assertEqual(4294967040, r.read_int())
def test_binary_string(self):
r = lba.util.ByteArrayReader(bytes([ 0, 0, 0, 123, 76, 111,
114, 101, 109, 32, 105, 112, 115, 117,
109, 32, 100, 111, 108, 111, 114, 32,
115, 105, 116, 32, 97, 109, 101, 116, 44,
32, 99, 111, 110, 115, 101, 99, 116, 101,
116, 117, 114, 32, 97, 100, 105, 112,
105, 115, 99, 105, 110, 103, 32, 101,
108, 105, 116, 44, 32, 115, 101, 100, 32,
100, 111, 32, 101, 105, 117, 115, 109,
111, 100, 32, 116, 101, 109, 112, 111,
114, 32, 105, 110, 99, 105, 100, 105,
100, 117, 110, 116, 32, 117, 116, 32,
108, 97, 98, 111, 114, 101, 32, 101, 116,
32, 100, 111, 108, 111, 114, 101, 32,
109, 97, 103, 110, 97, 32, 97, 108, 105,
113, 117, 97, 46 ]))
self.assertEqual(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.".encode("UTF-8"),
r.read_binary_string())
def test_big_integer(self):
r = lba.util.ByteArrayReader(bytes([ 0, 0, 0, 14, 16, 66,
176, 254, 247, 114, 215, 130, 240, 27,
237, 39, 233, 188 ]))
self.assertEqual(329802389981797891243908975290812, r.read_big_integer())
def test_boolean(self):
r = lba.util.ByteArrayReader(bytes([ 0, 1 ]))
self.assertFalse(r.read_boolean())
self.assertTrue(r.read_boolean())
| 62.061856
| 955
| 0.534219
| 1,453
| 12,040
| 4.391604
| 0.188575
| 0.009403
| 0.008463
| 0.005015
| 0.910202
| 0.883247
| 0.876038
| 0.864441
| 0.838897
| 0.834665
| 0
| 0.624129
| 0.344601
| 12,040
| 193
| 956
| 62.38342
| 0.184514
| 0
| 0
| 0.440678
| 0
| 0.011299
| 0.024003
| 0
| 0
| 0
| 0.005316
| 0
| 0.084746
| 1
| 0.062147
| false
| 0
| 0.011299
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f3d8aeca4802004133b55244744bbf85575c3cc
| 100,487
|
py
|
Python
|
alibabacloud/clients/r_kvstore_20150101.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | 21
|
2018-12-20T07:34:13.000Z
|
2020-03-05T14:32:08.000Z
|
alibabacloud/clients/r_kvstore_20150101.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | 22
|
2018-12-21T13:22:33.000Z
|
2020-06-29T08:37:09.000Z
|
alibabacloud/clients/r_kvstore_20150101.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | 12
|
2018-12-29T05:45:55.000Z
|
2022-01-05T09:59:30.000Z
|
# Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from alibabacloud.client import AlibabaCloudClient
from alibabacloud.request import APIRequest
from alibabacloud.utils.parameter_validation import verify_params
class RkvstoreClient(AlibabaCloudClient):
def __init__(self, client_config, credentials_provider=None, retry_policy=None,
endpoint_resolver=None):
AlibabaCloudClient.__init__(self, client_config,
credentials_provider=credentials_provider,
retry_policy=retry_policy,
endpoint_resolver=endpoint_resolver)
self.product_code = 'R-kvstore'
self.api_version = '2015-01-01'
self.location_service_code = 'redisa'
self.location_endpoint_type = 'openAPI'
def create_sharding_instance(
self,
shard_storage_quantity=None,
resource_owner_id=None,
node_type=None,
coupon_no=None,
network_type=None,
engine_version=None,
instance_class=None,
capacity=None,
password=None,
shard_replica_class=None,
security_token=None,
incremental_backup_mode=None,
instance_type=None,
business_info=None,
period=None,
resource_owner_account=None,
src_db_instance_id=None,
owner_account=None,
backup_id=None,
owner_id=None,
token=None,
shard_quantity=None,
vswitch_id=None,
private_ip_address=None,
security_ip_list=None,
instance_name=None,
shard_replica_quantity=None,
architecture_type=None,
vpc_id=None,
redis_manager_class=None,
zone_id=None,
charge_type=None,
proxy_quantity=None,
config=None,
proxy_mode=None):
api_request = APIRequest('CreateShardingInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ShardStorageQuantity": shard_storage_quantity,
"ResourceOwnerId": resource_owner_id,
"NodeType": node_type,
"CouponNo": coupon_no,
"NetworkType": network_type,
"EngineVersion": engine_version,
"InstanceClass": instance_class,
"Capacity": capacity,
"Password": password,
"ShardReplicaClass": shard_replica_class,
"SecurityToken": security_token,
"IncrementalBackupMode": incremental_backup_mode,
"InstanceType": instance_type,
"BusinessInfo": business_info,
"Period": period,
"ResourceOwnerAccount": resource_owner_account,
"SrcDBInstanceId": src_db_instance_id,
"OwnerAccount": owner_account,
"BackupId": backup_id,
"OwnerId": owner_id,
"Token": token,
"ShardQuantity": shard_quantity,
"VSwitchId": vswitch_id,
"PrivateIpAddress": private_ip_address,
"SecurityIPList": security_ip_list,
"InstanceName": instance_name,
"ShardReplicaQuantity": shard_replica_quantity,
"ArchitectureType": architecture_type,
"VpcId": vpc_id,
"RedisManagerClass": redis_manager_class,
"ZoneId": zone_id,
"ChargeType": charge_type,
"ProxyQuantity": proxy_quantity,
"Config": config,
"ProxyMode": proxy_mode}
return self._handle_request(api_request).result
def describe_sharding_instances(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
instance_ids=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeShardingInstances', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"InstanceIds": instance_ids,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_tags(
self,
resource_owner_id=None,
resource_owner_account=None,
region_id=None,
next_token=None,
owner_account=None,
owner_id=None,
resource_type=None):
api_request = APIRequest('DescribeTags', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"RegionId": region_id,
"NextToken": next_token,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"ResourceType": resource_type}
return self._handle_request(api_request).result
def describe_available_resource(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
zone_id=None,
owner_id=None,
instance_charge_type=None,
order_type=None):
api_request = APIRequest('DescribeAvailableResource', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ZoneId": zone_id,
"OwnerId": owner_id,
"InstanceChargeType": instance_charge_type,
"OrderType": order_type}
return self._handle_request(api_request).result
def allocate_instance_public_connection(
self,
resource_owner_id=None,
connection_string_prefix=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
port=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('AllocateInstancePublicConnection', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ConnectionStringPrefix": connection_string_prefix,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"Port": port,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def release_instance_public_connection(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
current_connection_string=None):
api_request = APIRequest('ReleaseInstancePublicConnection', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"CurrentConnectionString": current_connection_string}
return self._handle_request(api_request).result
def list_tag_resources(
self,
resource_owner_id=None,
list_of_resource_id=None,
resource_owner_account=None,
region_id=None,
next_token=None,
owner_account=None,
list_of_tag=None,
owner_id=None,
resource_type=None):
api_request = APIRequest('ListTagResources', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceId": list_of_resource_id,
"ResourceOwnerAccount": resource_owner_account,
"RegionId": region_id,
"NextToken": next_token,
"OwnerAccount": owner_account,
"Tag": list_of_tag,
"OwnerId": owner_id,
"ResourceType": resource_type}
repeat_info = {"ResourceId": ('ResourceId', 'list', 'str', None),
"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None),
('Key', 'str', None, None),
]),
}
verify_params(api_request._params, repeat_info)
return self._handle_request(api_request).result
def tag_resources(
self,
resource_owner_id=None,
list_of_resource_id=None,
resource_owner_account=None,
region_id=None,
owner_account=None,
list_of_tag=None,
owner_id=None,
resource_type=None):
api_request = APIRequest('TagResources', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceId": list_of_resource_id,
"ResourceOwnerAccount": resource_owner_account,
"RegionId": region_id,
"OwnerAccount": owner_account,
"Tag": list_of_tag,
"OwnerId": owner_id,
"ResourceType": resource_type}
repeat_info = {"ResourceId": ('ResourceId', 'list', 'str', None),
"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None),
('Key', 'str', None, None),
]),
}
verify_params(api_request._params, repeat_info)
return self._handle_request(api_request).result
def migrate_to_other_zone(
self,
vswitch_id=None,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
effective_time=None,
owner_account=None,
zone_id=None,
db_instance_id=None,
owner_id=None):
api_request = APIRequest('MigrateToOtherZone', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"VSwitchId": vswitch_id,
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"EffectiveTime": effective_time,
"OwnerAccount": owner_account,
"ZoneId": zone_id,
"DBInstanceId": db_instance_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def untag_resources(
self,
all_=None,
resource_owner_id=None,
list_of_resource_id=None,
resource_owner_account=None,
region_id=None,
owner_account=None,
owner_id=None,
list_of_tag_key=None,
resource_type=None):
api_request = APIRequest('UntagResources', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"All": all_,
"ResourceOwnerId": resource_owner_id,
"ResourceId": list_of_resource_id,
"ResourceOwnerAccount": resource_owner_account,
"RegionId": region_id,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"TagKey": list_of_tag_key,
"ResourceType": resource_type}
repeat_info = {"ResourceId": ('ResourceId', 'list', 'str', None),
"TagKey": ('TagKey', 'list', 'str', None),
}
verify_params(api_request._params, repeat_info)
return self._handle_request(api_request).result
def modify_audit_log_config(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
audit_command=None,
owner_account=None,
owner_id=None,
retention=None):
api_request = APIRequest('ModifyAuditLogConfig', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"AuditCommand": audit_command,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"Retention": retention}
return self._handle_request(api_request).result
def modify_db_instance_monitor(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
db_instance_id=None,
interval=None,
owner_id=None):
api_request = APIRequest('ModifyDBInstanceMonitor', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"DBInstanceId": db_instance_id,
"Interval": interval,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_db_instance_monitor(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
db_instance_id=None,
owner_id=None):
api_request = APIRequest('DescribeDBInstanceMonitor', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"DBInstanceId": db_instance_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_cache_analysis_report_list(
self,
date=None,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
instance_id=None,
security_token=None,
page_size=None,
page_numbers=None,
days=None,
node_id=None):
api_request = APIRequest('DescribeCacheAnalysisReportList', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"Date": date,
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"PageSize": page_size,
"PageNumbers": page_numbers,
"Days": days,
"NodeId": node_id}
return self._handle_request(api_request).result
def describe_intranet_attribute(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeIntranetAttribute', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_intranet_attribute(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('ModifyIntranetAttribute', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_cache_analysis_report(
self,
date=None,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
analysis_type=None,
instance_id=None,
security_token=None,
page_size=None,
page_numbers=None,
node_id=None):
api_request = APIRequest('DescribeCacheAnalysisReport', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"Date": date,
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"AnalysisType": analysis_type,
"InstanceId": instance_id,
"SecurityToken": security_token,
"PageSize": page_size,
"PageNumbers": page_numbers,
"NodeId": node_id}
return self._handle_request(api_request).result
def create_cache_analysis_task(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('CreateCacheAnalysisTask', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_audit_records(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
query_keywords=None,
page_number=None,
host_address=None,
instance_id=None,
account_name=None,
security_token=None,
database_name=None,
page_size=None,
node_id=None):
api_request = APIRequest('DescribeAuditRecords', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"QueryKeywords": query_keywords,
"PageNumber": page_number,
"HostAddress": host_address,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"DatabaseName": database_name,
"PageSize": page_size,
"NodeId": node_id}
return self._handle_request(api_request).result
def describe_slow_log_records(
self,
sql_id=None,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
slow_log_record_type=None,
instance_id=None,
db_name=None,
security_token=None,
page_size=None,
node_id=None):
api_request = APIRequest('DescribeSlowLogRecords', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"SQLId": sql_id,
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"SlowLogRecordType": slow_log_record_type,
"InstanceId": instance_id,
"DBName": db_name,
"SecurityToken": security_token,
"PageSize": page_size,
"NodeId": node_id}
return self._handle_request(api_request).result
def describe_error_log_records(
self,
sql_id=None,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
instance_id=None,
db_name=None,
security_token=None,
page_size=None,
role_type=None,
node_id=None):
api_request = APIRequest('DescribeErrorLogRecords', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"SQLId": sql_id,
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"InstanceId": instance_id,
"DBName": db_name,
"SecurityToken": security_token,
"PageSize": page_size,
"RoleType": role_type,
"NodeId": node_id}
return self._handle_request(api_request).result
def restart_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
effective_time=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('RestartInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"EffectiveTime": effective_time,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_auto_renewal_attribute(
self,
duration=None,
resource_owner_id=None,
auto_renew=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
db_instance_id=None,
owner_id=None):
api_request = APIRequest('ModifyInstanceAutoRenewalAttribute',
'GET', 'http', 'RPC', 'query')
api_request._params = {
"Duration": duration,
"ResourceOwnerId": resource_owner_id,
"AutoRenew": auto_renew,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"DBInstanceId": db_instance_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_instance_auto_renewal_attribute(
self,
resource_owner_id=None,
resource_owner_account=None,
client_token=None,
region_id=None,
owner_account=None,
page_size=None,
db_instance_id=None,
owner_id=None,
page_number=None,
proxy_id=None):
api_request = APIRequest('DescribeInstanceAutoRenewalAttribute',
'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"ClientToken": client_token,
"RegionId": region_id,
"OwnerAccount": owner_account,
"PageSize": page_size,
"DBInstanceId": db_instance_id,
"OwnerId": owner_id,
"PageNumber": page_number,
"proxyId": proxy_id}
return self._handle_request(api_request).result
def describe_running_log_records(
self,
sql_id=None,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
instance_id=None,
db_name=None,
security_token=None,
page_size=None,
role_type=None,
node_id=None):
api_request = APIRequest('DescribeRunningLogRecords', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"SQLId": sql_id,
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"InstanceId": instance_id,
"DBName": db_name,
"SecurityToken": security_token,
"PageSize": page_size,
"RoleType": role_type,
"NodeId": node_id}
return self._handle_request(api_request).result
def unlink_replica_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('UnlinkReplicaInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_active_operation_task(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
ids=None,
switch_time=None,
owner_id=None):
api_request = APIRequest('ModifyActiveOperationTask', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"Ids": ids,
"SwitchTime": switch_time,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_active_operation_task_type(
self,
resource_owner_id=None,
is_history=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeActiveOperationTaskType', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"IsHistory": is_history,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_active_operation_task_region(
self,
resource_owner_id=None,
is_history=None,
task_type=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeActiveOperationTaskRegion', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"IsHistory": is_history,
"TaskType": task_type,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_active_operation_task_count(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeActiveOperationTaskCount', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_active_operation_task(
self,
resource_owner_id=None,
task_type=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
page_number=None,
is_history=None,
security_token=None,
page_size=None,
region=None):
api_request = APIRequest('DescribeActiveOperationTask', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"TaskType": task_type,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"PageNumber": page_number,
"IsHistory": is_history,
"SecurityToken": security_token,
"PageSize": page_size,
"Region": region}
return self._handle_request(api_request).result
def modify_instance_major_version(
self,
resource_owner_id=None,
instance_id=None,
major_version=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
effect_time=None):
api_request = APIRequest('ModifyInstanceMajorVersion', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"MajorVersion": major_version,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"EffectTime": effect_time}
return self._handle_request(api_request).result
def describe_parameter_templates(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
engine=None,
owner_account=None,
engine_version=None,
owner_id=None,
character_type=None):
api_request = APIRequest('DescribeParameterTemplates', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"Engine": engine,
"OwnerAccount": owner_account,
"EngineVersion": engine_version,
"OwnerId": owner_id,
"CharacterType": character_type}
return self._handle_request(api_request).result
def revoke_account_privilege(
self,
resource_owner_id=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('RevokeAccountPrivilege', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_parameters(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
db_instance_id=None,
owner_id=None,
node_id=None):
api_request = APIRequest('DescribeParameters', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"DBInstanceId": db_instance_id,
"OwnerId": owner_id,
"NodeId": node_id}
return self._handle_request(api_request).result
def modify_account_description(
self,
resource_owner_id=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
account_description=None):
api_request = APIRequest('ModifyAccountDescription', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"AccountDescription": account_description}
return self._handle_request(api_request).result
def reset_account(
self,
resource_owner_id=None,
account_password=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('ResetAccount', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"AccountPassword": account_password,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def create_account(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
account_type=None,
owner_id=None,
account_description=None,
account_privilege=None,
account_password=None,
instance_id=None,
account_name=None,
security_token=None):
api_request = APIRequest('CreateAccount', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"AccountType": account_type,
"OwnerId": owner_id,
"AccountDescription": account_description,
"AccountPrivilege": account_privilege,
"AccountPassword": account_password,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token}
return self._handle_request(api_request).result
def delete_account(
self,
resource_owner_id=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DeleteAccount', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_parameter_modification_history(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
db_instance_id=None,
start_time=None,
owner_id=None,
node_id=None):
api_request = APIRequest('DescribeParameterModificationHistory',
'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"DBInstanceId": db_instance_id,
"StartTime": start_time,
"OwnerId": owner_id,
"NodeId": node_id}
return self._handle_request(api_request).result
def reset_account_password(
self,
resource_owner_id=None,
account_password=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('ResetAccountPassword', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"AccountPassword": account_password,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_accounts(
self,
resource_owner_id=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeAccounts', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def grant_account_privilege(
self,
resource_owner_id=None,
instance_id=None,
account_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
account_privilege=None):
api_request = APIRequest('GrantAccountPrivilege', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"AccountName": account_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"AccountPrivilege": account_privilege}
return self._handle_request(api_request).result
def evaluate_fail_over_switch(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('EvaluateFailOverSwitch', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def destroy_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DestroyInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_vpc_auth_mode(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
vpc_auth_mode=None,
owner_id=None):
api_request = APIRequest('ModifyInstanceVpcAuthMode', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"VpcAuthMode": vpc_auth_mode,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_db_instance_connection_string(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
new_connection_string=None,
owner_id=None,
ip_type=None,
current_connection_string=None,
security_token=None,
port=None,
db_instance_id=None):
api_request = APIRequest('ModifyDBInstanceConnectionString', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"NewConnectionString": new_connection_string,
"OwnerId": owner_id,
"IPType": ip_type,
"CurrentConnectionString": current_connection_string,
"SecurityToken": security_token,
"Port": port,
"DBInstanceId": db_instance_id}
return self._handle_request(api_request).result
def describe_rds_vswitchs(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
vpc_id=None,
zone_id=None,
owner_id=None):
api_request = APIRequest('DescribeRdsVSwitchs', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"VpcId": vpc_id,
"ZoneId": zone_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_rds_vpcs(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
zone_id=None,
owner_id=None):
api_request = APIRequest('DescribeRdsVpcs', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ZoneId": zone_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_strategy(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('DescribeStrategy', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_replica_conflict_info(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
security_token=None,
replica_id=None,
page_size=None):
api_request = APIRequest('DescribeReplicaConflictInfo', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"SecurityToken": security_token,
"ReplicaId": replica_id,
"PageSize": page_size}
return self._handle_request(api_request).result
def create_static_verification(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
destination_instance_id=None,
source_instance_id=None,
owner_id=None):
api_request = APIRequest('CreateStaticVerification', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"DestinationInstanceId": destination_instance_id,
"SourceInstanceId": source_instance_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_replica_recovery_mode(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
recovery_mode=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyReplicaRecoveryMode', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"RecoveryMode": recovery_mode,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_static_verification_list(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
destination_instance_id=None,
source_instance_id=None,
owner_id=None):
api_request = APIRequest('DescribeStaticVerificationList', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"DestinationInstanceId": destination_instance_id,
"SourceInstanceId": source_instance_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_verification_list(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
security_token=None,
replica_id=None,
page_size=None):
api_request = APIRequest('DescribeVerificationList', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"SecurityToken": security_token,
"ReplicaId": replica_id,
"PageSize": page_size}
return self._handle_request(api_request).result
def modify_replica_verification_mode(
self,
resource_owner_id=None,
verification_mode=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyReplicaVerificationMode', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"VerificationMode": verification_mode,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_instances_by_expire_time(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
page_number=None,
security_token=None,
has_expired_res=None,
page_size=None,
instance_type=None,
expire_period=None):
api_request = APIRequest('DescribeInstancesByExpireTime', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"PageNumber": page_number,
"SecurityToken": security_token,
"HasExpiredRes": has_expired_res,
"PageSize": page_size,
"InstanceType": instance_type,
"ExpirePeriod": expire_period}
return self._handle_request(api_request).result
def modify_replica_relation(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyReplicaRelation', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_replica_mode(
self,
domain_mode=None,
resource_owner_id=None,
primary_instance_id=None,
replica_mode=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyReplicaMode', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"DomainMode": domain_mode,
"ResourceOwnerId": resource_owner_id,
"PrimaryInstanceId": primary_instance_id,
"ReplicaMode": replica_mode,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_guard_domain_mode(
self,
domain_mode=None,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyGuardDomainMode', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"DomainMode": domain_mode,
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_ssl(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
ssl_enabled=None):
api_request = APIRequest('ModifyInstanceSSL', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"SSLEnabled": ssl_enabled}
return self._handle_request(api_request).result
def describe_instance_ssl(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeInstanceSSL', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_logic_instance_topology(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeLogicInstanceTopology', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_spec(
self,
resource_owner_id=None,
auto_pay=None,
from_app=None,
resource_owner_account=None,
owner_account=None,
coupon_no=None,
owner_id=None,
instance_class=None,
instance_id=None,
security_token=None,
effective_time=None,
force_upgrade=None,
business_info=None):
api_request = APIRequest('ModifyInstanceSpec', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"AutoPay": auto_pay,
"FromApp": from_app,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"CouponNo": coupon_no,
"OwnerId": owner_id,
"InstanceClass": instance_class,
"InstanceId": instance_id,
"SecurityToken": security_token,
"EffectiveTime": effective_time,
"ForceUpgrade": force_upgrade,
"BusinessInfo": business_info}
return self._handle_request(api_request).result
def modify_instance_spec_pre_check(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
target_instance_class=None):
api_request = APIRequest('ModifyInstanceSpecPreCheck', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"TargetInstanceClass": target_instance_class}
return self._handle_request(api_request).result
def modify_instance_net_expire_time(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
connection_string=None,
classic_expired_days=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('ModifyInstanceNetExpireTime', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"ConnectionString": connection_string,
"ClassicExpiredDays": classic_expired_days,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def release_replica(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ReleaseReplica', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_replica_description(
self,
replica_description=None,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('ModifyReplicaDescription', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ReplicaDescription": replica_description,
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_replica_usage(
self,
resource_owner_id=None,
source_db_instance_id=None,
destination_db_instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('DescribeReplicaUsage', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SourceDBInstanceId": source_db_instance_id,
"DestinationDBInstanceId": destination_db_instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_replicas(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
attach_db_instance_data=None,
owner_account=None,
replica_id=None,
page_size=None,
owner_id=None,
page_number=None):
api_request = APIRequest('DescribeReplicas', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"AttachDbInstanceData": attach_db_instance_data,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"PageSize": page_size,
"OwnerId": owner_id,
"PageNumber": page_number}
return self._handle_request(api_request).result
def describe_replica_performance(
self,
resource_owner_id=None,
destination_db_instance_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
source_db_instance_id=None,
security_token=None,
replica_id=None,
key=None):
api_request = APIRequest('DescribeReplicaPerformance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"DestinationDBInstanceId": destination_db_instance_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"SourceDBInstanceId": source_db_instance_id,
"SecurityToken": security_token,
"ReplicaId": replica_id,
"Key": key}
return self._handle_request(api_request).result
def describe_replica_initialize_progress(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
replica_id=None,
owner_id=None):
api_request = APIRequest('DescribeReplicaInitializeProgress', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"ReplicaId": replica_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_minor_version(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
minorversion=None,
owner_account=None,
owner_id=None,
effect_time=None):
api_request = APIRequest('ModifyInstanceMinorVersion', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"Minorversion": minorversion,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"EffectTime": effect_time}
return self._handle_request(api_request).result
def modify_certification(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
no_certification=None):
api_request = APIRequest('ModifyCertification', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"NoCertification": no_certification}
return self._handle_request(api_request).result
def describe_certification(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
parameters=None):
api_request = APIRequest('DescribeCertification', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"Parameters": parameters}
return self._handle_request(api_request).result
def describe_db_instance_net_info(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeDBInstanceNetInfo', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_monthly_service_status_detail(
self,
resource_owner_id=None,
instance_id=None,
month=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeMonthlyServiceStatusDetail',
'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"Month": month,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_monthly_service_status(
self,
resource_owner_id=None,
month=None,
security_token=None,
resource_owner_account=None,
instance_ids=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeMonthlyServiceStatus', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"Month": month,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"InstanceIds": instance_ids,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_security_ips(
self,
resource_owner_id=None,
modify_mode=None,
resource_owner_account=None,
owner_account=None,
security_ips=None,
owner_id=None,
security_ip_group_name=None,
instance_id=None,
security_token=None,
security_ip_group_attribute=None):
api_request = APIRequest('ModifySecurityIps', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ModifyMode": modify_mode,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"SecurityIps": security_ips,
"OwnerId": owner_id,
"SecurityIpGroupName": security_ip_group_name,
"InstanceId": instance_id,
"SecurityToken": security_token,
"SecurityIpGroupAttribute": security_ip_group_attribute}
return self._handle_request(api_request).result
def describe_security_ips(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeSecurityIps', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_maintain_time(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
maintain_start_time=None,
owner_account=None,
owner_id=None,
maintain_end_time=None):
api_request = APIRequest('ModifyInstanceMaintainTime', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"MaintainStartTime": maintain_start_time,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"MaintainEndTime": maintain_end_time}
return self._handle_request(api_request).result
def describe_instance_attribute(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeInstanceAttribute', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def restore_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
backup_id=None,
owner_id=None):
api_request = APIRequest('RestoreInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"BackupId": backup_id,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_backup_policy(
self,
preferred_backup_time=None,
preferred_backup_period=None,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('ModifyBackupPolicy', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"PreferredBackupTime": preferred_backup_time,
"PreferredBackupPeriod": preferred_backup_period,
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_backups(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
backup_id=None,
end_time=None,
start_time=None,
owner_id=None,
page_number=None,
instance_id=None,
security_token=None,
page_size=None):
api_request = APIRequest('DescribeBackups', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"BackupId": backup_id,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"PageNumber": page_number,
"InstanceId": instance_id,
"SecurityToken": security_token,
"PageSize": page_size}
return self._handle_request(api_request).result
def describe_backup_policy(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeBackupPolicy', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def create_backup(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('CreateBackup', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def switch_network(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
vswitch_id=None,
instance_id=None,
security_token=None,
target_network_type=None,
retain_classic=None,
classic_expired_days=None,
vpc_id=None):
api_request = APIRequest('SwitchNetwork', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"VSwitchId": vswitch_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"TargetNetworkType": target_network_type,
"RetainClassic": retain_classic,
"ClassicExpiredDays": classic_expired_days,
"VpcId": vpc_id}
return self._handle_request(api_request).result
def describe_zones(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
accept_language=None,
owner_id=None):
api_request = APIRequest('DescribeZones', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"AcceptLanguage": accept_language,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def delete_snapshot(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DeleteSnapshot', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def create_temp_instance(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('CreateTempInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def create_snapshot(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('CreateSnapshot', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def delete_temp_instance(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DeleteTempInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def delete_snapshot_settings(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DeleteSnapshotSettings', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def get_snapshot_settings(
self,
resource_owner_id=None,
instance_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('GetSnapshotSettings', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_temp_instance(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeTempInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_snapshots(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
begin_time=None,
owner_id=None):
api_request = APIRequest('DescribeSnapshots', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"BeginTime": begin_time,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def restore_snapshot(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('RestoreSnapshot', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def query_task(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('QueryTask', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def switch_temp_instance(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('SwitchTempInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def set_snapshot_settings(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('SetSnapshotSettings', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def renew_multi_instance(
self,
resource_owner_id=None,
period=None,
auto_pay=None,
from_app=None,
resource_owner_account=None,
owner_account=None,
coupon_no=None,
owner_id=None,
security_token=None,
instance_ids=None,
business_info=None):
api_request = APIRequest('RenewMultiInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"Period": period,
"AutoPay": auto_pay,
"FromApp": from_app,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"CouponNo": coupon_no,
"OwnerId": owner_id,
"SecurityToken": security_token,
"InstanceIds": instance_ids,
"BusinessInfo": business_info}
return self._handle_request(api_request).result
def transform_to_pre_paid(
self,
resource_owner_id=None,
period=None,
instance_id=None,
auto_pay=None,
from_app=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('TransformToPrePaid', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"Period": period,
"InstanceId": instance_id,
"AutoPay": auto_pay,
"FromApp": from_app,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def renew_instance(
self,
resource_owner_id=None,
period=None,
auto_pay=None,
from_app=None,
resource_owner_account=None,
owner_account=None,
coupon_no=None,
owner_id=None,
instance_class=None,
capacity=None,
instance_id=None,
security_token=None,
force_upgrade=None,
business_info=None):
api_request = APIRequest('RenewInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"Period": period,
"AutoPay": auto_pay,
"FromApp": from_app,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"CouponNo": coupon_no,
"OwnerId": owner_id,
"InstanceClass": instance_class,
"Capacity": capacity,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ForceUpgrade": force_upgrade,
"BusinessInfo": business_info}
return self._handle_request(api_request).result
def verify_password(
self,
resource_owner_id=None,
password=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('VerifyPassword', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"Password": password,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def modify_instance_config(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
config=None):
api_request = APIRequest('ModifyInstanceConfig', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"Config": config}
return self._handle_request(api_request).result
def modify_instance_attribute(
self,
resource_owner_id=None,
instance_id=None,
instance_name=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None,
new_password=None):
api_request = APIRequest('ModifyInstanceAttribute', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"InstanceName": instance_name,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id,
"NewPassword": new_password}
return self._handle_request(api_request).result
def flush_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('FlushInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_regions(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
accept_language=None,
owner_id=None):
api_request = APIRequest('DescribeRegions', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"AcceptLanguage": accept_language,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_monitor_items(
self,
resource_owner_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeMonitorItems', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_instances(
self,
resource_owner_id=None,
instance_status=None,
resource_owner_account=None,
owner_account=None,
search_key=None,
network_type=None,
engine_version=None,
owner_id=None,
instance_class=None,
page_number=None,
vswitch_id=None,
expired=None,
security_token=None,
instance_ids=None,
architecture_type=None,
vpc_id=None,
page_size=None,
instance_type=None,
zone_id=None,
charge_type=None,
list_of_tag=None):
api_request = APIRequest('DescribeInstances', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceStatus": instance_status,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"SearchKey": search_key,
"NetworkType": network_type,
"EngineVersion": engine_version,
"OwnerId": owner_id,
"InstanceClass": instance_class,
"PageNumber": page_number,
"VSwitchId": vswitch_id,
"Expired": expired,
"SecurityToken": security_token,
"InstanceIds": instance_ids,
"ArchitectureType": architecture_type,
"VpcId": vpc_id,
"PageSize": page_size,
"InstanceType": instance_type,
"ZoneId": zone_id,
"ChargeType": charge_type,
"Tag": list_of_tag}
repeat_info = {"Tag": ('Tag', 'list', 'dict', [('Value', 'str', None, None),
('Key', 'str', None, None),
]),
}
verify_params(api_request._params, repeat_info)
return self._handle_request(api_request).result
def describe_instance_config(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DescribeInstanceConfig', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def describe_history_monitor_values(
self,
resource_owner_id=None,
resource_owner_account=None,
owner_account=None,
end_time=None,
start_time=None,
owner_id=None,
instance_id=None,
security_token=None,
interval_for_history=None,
node_id=None,
monitor_keys=None):
api_request = APIRequest('DescribeHistoryMonitorValues', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"EndTime": end_time,
"StartTime": start_time,
"OwnerId": owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"IntervalForHistory": interval_for_history,
"NodeId": node_id,
"MonitorKeys": monitor_keys}
return self._handle_request(api_request).result
def delete_instance(
self,
resource_owner_id=None,
instance_id=None,
security_token=None,
resource_owner_account=None,
owner_account=None,
owner_id=None):
api_request = APIRequest('DeleteInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"InstanceId": instance_id,
"SecurityToken": security_token,
"ResourceOwnerAccount": resource_owner_account,
"OwnerAccount": owner_account,
"OwnerId": owner_id}
return self._handle_request(api_request).result
def create_instance(
self,
resource_owner_id=None,
node_type=None,
coupon_no=None,
network_type=None,
engine_version=None,
auto_use_coupon=None,
instance_class=None,
capacity=None,
password=None,
security_token=None,
instance_type=None,
business_info=None,
auto_renew_period=None,
period=None,
resource_owner_account=None,
src_db_instance_id=None,
owner_account=None,
backup_id=None,
owner_id=None,
token=None,
vswitch_id=None,
private_ip_address=None,
instance_name=None,
auto_renew=None,
vpc_id=None,
zone_id=None,
charge_type=None,
config=None):
api_request = APIRequest('CreateInstance', 'GET', 'http', 'RPC', 'query')
api_request._params = {
"ResourceOwnerId": resource_owner_id,
"NodeType": node_type,
"CouponNo": coupon_no,
"NetworkType": network_type,
"EngineVersion": engine_version,
"AutoUseCoupon": auto_use_coupon,
"InstanceClass": instance_class,
"Capacity": capacity,
"Password": password,
"SecurityToken": security_token,
"InstanceType": instance_type,
"BusinessInfo": business_info,
"AutoRenewPeriod": auto_renew_period,
"Period": period,
"ResourceOwnerAccount": resource_owner_account,
"SrcDBInstanceId": src_db_instance_id,
"OwnerAccount": owner_account,
"BackupId": backup_id,
"OwnerId": owner_id,
"Token": token,
"VSwitchId": vswitch_id,
"PrivateIpAddress": private_ip_address,
"InstanceName": instance_name,
"AutoRenew": auto_renew,
"VpcId": vpc_id,
"ZoneId": zone_id,
"ChargeType": charge_type,
"Config": config}
return self._handle_request(api_request).result
| 38.427151
| 100
| 0.584732
| 9,062
| 100,487
| 6.109799
| 0.0597
| 0.108007
| 0.062311
| 0.060307
| 0.852367
| 0.842831
| 0.814276
| 0.778027
| 0.762006
| 0.747142
| 0
| 0.000236
| 0.325773
| 100,487
| 2,614
| 101
| 38.441852
| 0.816977
| 0.005722
| 0
| 0.832393
| 0
| 0
| 0.157731
| 0.017538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046737
| false
| 0.007252
| 0.001209
| 0
| 0.094682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f94a23ca7e19b56160d9eccd3849d5a44bc3779
| 520
|
py
|
Python
|
train_mosmed_timm-regnetx_002_clahe.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_mosmed_timm-regnetx_002_clahe.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_mosmed_timm-regnetx_002_clahe.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold0_clahe.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold1_clahe.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold2_clahe.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold3_clahe.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold4_clahe.yml",
]
for l in ls:
os.system(l)
| 47.272727
| 98
| 0.840385
| 80
| 520
| 5.0875
| 0.3
| 0.12285
| 0.14742
| 0.233415
| 0.85258
| 0.85258
| 0.85258
| 0.85258
| 0.85258
| 0.85258
| 0
| 0.0409
| 0.059615
| 520
| 11
| 99
| 47.272727
| 0.791411
| 0
| 0
| 0
| 0
| 0
| 0.873321
| 0.633397
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8c0ab765bec36840bd3c85e0d62097a308db48fe
| 14,657
|
py
|
Python
|
det3d/datasets/pipelines/formating.py
|
Charrrrrlie/CenterPoint
|
51d699e306fea3b0237648088ee33a1760ab00ec
|
[
"MIT"
] | null | null | null |
det3d/datasets/pipelines/formating.py
|
Charrrrrlie/CenterPoint
|
51d699e306fea3b0237648088ee33a1760ab00ec
|
[
"MIT"
] | null | null | null |
det3d/datasets/pipelines/formating.py
|
Charrrrrlie/CenterPoint
|
51d699e306fea3b0237648088ee33a1760ab00ec
|
[
"MIT"
] | null | null | null |
from det3d import torchie
import numpy as np
import torch
from ..registry import PIPELINES
class DataBundle(object):
def __init__(self, data):
self.data = data
@PIPELINES.register_module
class Reformat(object):
def __init__(self, **kwargs):
double_flip = kwargs.get('double_flip', False)
self.double_flip = double_flip
def __call__(self, res, info):
meta = res["metadata"]
points = res["lidar"]["points"]
data_bundle = dict(
metadata=meta
)
if points is not None:
data_bundle.update(points=points)
if 'voxels' in res["lidar"]:
voxels = res["lidar"]["voxels"]
data_bundle.update(
voxels=voxels["voxels"],
shape=voxels["shape"],
num_points=voxels["num_points"],
num_voxels=voxels["num_voxels"],
coordinates=voxels["coordinates"],
)
if res["mode"] == "train":
data_bundle.update(res["lidar"]["targets"])
elif res["mode"] == "val":
data_bundle.update(dict(metadata=meta, ))
if self.double_flip:
# y axis
yflip_points = res["lidar"]["yflip_points"]
yflip_voxels = res["lidar"]["yflip_voxels"]
yflip_data_bundle = dict(
metadata=meta,
points=yflip_points,
voxels=yflip_voxels["voxels"],
shape=yflip_voxels["shape"],
num_points=yflip_voxels["num_points"],
num_voxels=yflip_voxels["num_voxels"],
coordinates=yflip_voxels["coordinates"],
)
# x axis
xflip_points = res["lidar"]["xflip_points"]
xflip_voxels = res["lidar"]["xflip_voxels"]
xflip_data_bundle = dict(
metadata=meta,
points=xflip_points,
voxels=xflip_voxels["voxels"],
shape=xflip_voxels["shape"],
num_points=xflip_voxels["num_points"],
num_voxels=xflip_voxels["num_voxels"],
coordinates=xflip_voxels["coordinates"],
)
# double axis flip
double_flip_points = res["lidar"]["double_flip_points"]
double_flip_voxels = res["lidar"]["double_flip_voxels"]
double_flip_data_bundle = dict(
metadata=meta,
points=double_flip_points,
voxels=double_flip_voxels["voxels"],
shape=double_flip_voxels["shape"],
num_points=double_flip_voxels["num_points"],
num_voxels=double_flip_voxels["num_voxels"],
coordinates=double_flip_voxels["coordinates"],
)
return [data_bundle, yflip_data_bundle, xflip_data_bundle, double_flip_data_bundle], info
return data_bundle, info
@PIPELINES.register_module
class DynamicReformat(object):
def __init__(self, **kwargs):
double_flip = kwargs.get('double_flip', False)
self.double_flip = double_flip
def __call__(self, res, info):
meta = res["metadata"]
points = res["lidar"]["points"]
data_bundle = dict(
metadata=meta
)
if points is not None:
data_bundle.update(points=points)
if 'voxels' in res["lidar"]:
voxels = res["lidar"]["voxels"]
data_bundle.update(
voxels=voxels["voxels"],
shape=voxels["shape"],
num_points=voxels["num_points"],
num_voxels=voxels["num_voxels"],
coordinates=voxels["coordinates"],
)
if res["mode"] == "train":
data_bundle.update(res["lidar"]["targets"])
elif res["mode"] == "val":
data_bundle.update(dict(metadata=meta, ))
if self.double_flip and 'voxels' in res["lidar"]:
# y axis
yflip_points = res["lidar"]["yflip_points"]
yflip_voxels = res["lidar"]["yflip_voxels"]
yflip_data_bundle = dict(
metadata=meta,
points=yflip_points,
voxels=yflip_voxels["voxels"],
shape=yflip_voxels["shape"],
num_points=yflip_voxels["num_points"],
num_voxels=yflip_voxels["num_voxels"],
coordinates=yflip_voxels["coordinates"],
)
# x axis
xflip_points = res["lidar"]["xflip_points"]
xflip_voxels = res["lidar"]["xflip_voxels"]
xflip_data_bundle = dict(
metadata=meta,
points=xflip_points,
voxels=xflip_voxels["voxels"],
shape=xflip_voxels["shape"],
num_points=xflip_voxels["num_points"],
num_voxels=xflip_voxels["num_voxels"],
coordinates=xflip_voxels["coordinates"],
)
# double axis flip
double_flip_points = res["lidar"]["double_flip_points"]
double_flip_voxels = res["lidar"]["double_flip_voxels"]
double_flip_data_bundle = dict(
metadata=meta,
points=double_flip_points,
voxels=double_flip_voxels["voxels"],
shape=double_flip_voxels["shape"],
num_points=double_flip_voxels["num_points"],
num_voxels=double_flip_voxels["num_voxels"],
coordinates=double_flip_voxels["coordinates"],
)
elif self.double_flip:
# print('#'*100)
yflip_points = res["lidar"]["yflip_points"]
yflip_data_bundle = dict(
metadata=meta,
points=yflip_points,
)
xflip_points = res["lidar"]["xflip_points"]
xflip_data_bundle = dict(
metadata=meta,
points=xflip_points,
)
double_flip_points = res["lidar"]["double_flip_points"]
double_flip_data_bundle = dict(
metadata=meta,
points=double_flip_points,
)
# points_rotate0_points = res["lidar"]["points_rotate0"]
# points_rotate0_bundle = dict(
# metadata=meta,
# points=points_rotate0_points,
# )
# points_rotate1_points = res["lidar"]["points_rotate1"]
# points_rotate1_bundle = dict(
# metadata=meta,
# points=points_rotate1_points,
# )
# points_rotate2_points = res["lidar"]["points_rotate2"]
# points_rotate2_bundle = dict(
# metadata=meta,
# points=points_rotate2_points,
# )
# points_rotate3_points = res["lidar"]["points_rotate3"]
# points_rotate3_bundle = dict(
# metadata=meta,
# points=points_rotate3_points,
# )
# points_rotate4_points = res["lidar"]["points_rotate4"]
# points_rotate4_bundle = dict(
# metadata=meta,
# points=points_rotate4_points,
# )
# points_rotate5_points = res["lidar"]["points_rotate5"]
# points_rotate5_bundle = dict(
# metadata=meta,
# points=points_rotate5_points,
# )
return [data_bundle, yflip_data_bundle, xflip_data_bundle, double_flip_data_bundle], info
# return [data_bundle, yflip_data_bundle, xflip_data_bundle, double_flip_data_bundle, points_rotate0_bundle,points_rotate1_bundle,points_rotate2_bundle,points_rotate3_bundle,points_rotate4_bundle,points_rotate5_bundle], info
return data_bundle, info
@PIPELINES.register_module
class DynamicScaleReformat(object):
def __init__(self, **kwargs):
double_flip = kwargs.get('double_flip', False)
self.double_flip = double_flip
def __call__(self, res, info):
meta = res["metadata"]
points = res["lidar"]["points"]
data_bundle = dict(
metadata=meta
)
if points is not None:
data_bundle.update(points=points)
if 'voxels' in res["lidar"]:
voxels = res["lidar"]["voxels"]
data_bundle.update(
voxels=voxels["voxels"],
shape=voxels["shape"],
num_points=voxels["num_points"],
num_voxels=voxels["num_voxels"],
coordinates=voxels["coordinates"],
)
if res["mode"] == "train":
data_bundle.update(res["lidar"]["targets"])
elif res["mode"] == "val":
data_bundle.update(dict(metadata=meta, ))
if self.double_flip and 'voxels' in res["lidar"]:
# y axis
yflip_points = res["lidar"]["yflip_points"]
yflip_voxels = res["lidar"]["yflip_voxels"]
yflip_data_bundle = dict(
metadata=meta,
points=yflip_points,
voxels=yflip_voxels["voxels"],
shape=yflip_voxels["shape"],
num_points=yflip_voxels["num_points"],
num_voxels=yflip_voxels["num_voxels"],
coordinates=yflip_voxels["coordinates"],
)
# x axis
xflip_points = res["lidar"]["xflip_points"]
xflip_voxels = res["lidar"]["xflip_voxels"]
xflip_data_bundle = dict(
metadata=meta,
points=xflip_points,
voxels=xflip_voxels["voxels"],
shape=xflip_voxels["shape"],
num_points=xflip_voxels["num_points"],
num_voxels=xflip_voxels["num_voxels"],
coordinates=xflip_voxels["coordinates"],
)
# double axis flip
double_flip_points = res["lidar"]["double_flip_points"]
double_flip_voxels = res["lidar"]["double_flip_voxels"]
double_flip_data_bundle = dict(
metadata=meta,
points=double_flip_points,
voxels=double_flip_voxels["voxels"],
shape=double_flip_voxels["shape"],
num_points=double_flip_voxels["num_points"],
num_voxels=double_flip_voxels["num_voxels"],
coordinates=double_flip_voxels["coordinates"],
)
elif self.double_flip:
# print('#'*100)
orgflip_points = res["lidar"]["org_points_scale0"]
orgflip_data_bundle0 = dict(
metadata=meta,
points=orgflip_points,
)
orgflip_points = res["lidar"]["org_points_scale1"]
orgflip_data_bundle1 = dict(
metadata=meta,
points=orgflip_points,
)
orgflip_points = res["lidar"]["org_points_scale2"]
orgflip_data_bundle2 = dict(
metadata=meta,
points=orgflip_points,
)
yflip_points = res["lidar"]["yflip_points_scale0"]
yflip_data_bundle0 = dict(
metadata=meta,
points=yflip_points,
)
yflip_points = res["lidar"]["yflip_points_scale1"]
yflip_data_bundle1 = dict(
metadata=meta,
points=yflip_points,
)
yflip_points = res["lidar"]["yflip_points_scale2"]
yflip_data_bundle2 = dict(
metadata=meta,
points=yflip_points,
)
xflip_points = res["lidar"]["xflip_points_scale0"]
xflip_data_bundle0 = dict(
metadata=meta,
points=xflip_points,
)
xflip_points = res["lidar"]["xflip_points_scale1"]
xflip_data_bundle1 = dict(
metadata=meta,
points=xflip_points,
)
xflip_points = res["lidar"]["xflip_points_scale2"]
xflip_data_bundle2 = dict(
metadata=meta,
points=xflip_points,
)
# doubleflip_points = res["lidar"]["double_flip_points_scale0"]
# doubleflip_data_bundle0 = dict(
# metadata=meta,
# points=doubleflip_points,
# )
# doubleflip_points = res["lidar"]["double_flip_points_scale0"]
# doubleflip_data_bundle1 = dict(
# metadata=meta,
# points=doubleflip_points,
# )
# doubleflip_points = res["lidar"]["double_flip_points_scale0"]
# doubleflip_data_bundle2 = dict(
# metadata=meta,
# points=doubleflip_points,
# )
return [orgflip_data_bundle0, yflip_data_bundle0, xflip_data_bundle0], info
# return [orgflip_data_bundle0, yflip_data_bundle0, xflip_data_bundle0,
# orgflip_data_bundle1, yflip_data_bundle1, xflip_data_bundle1,
# orgflip_data_bundle2, yflip_data_bundle2, xflip_data_bundle2], info
# return [orgflip_data_bundle0, yflip_data_bundle0, xflip_data_bundle0, doubleflip_data_bundle0,
# orgflip_data_bundle1, yflip_data_bundle1, xflip_data_bundle1, doubleflip_data_bundle1,
# orgflip_data_bundle2, yflip_data_bundle2, xflip_data_bundle2, doubleflip_data_bundle2], info
return data_bundle, info
| 39.294906
| 236
| 0.507607
| 1,313
| 14,657
| 5.304646
| 0.057121
| 0.086145
| 0.082699
| 0.09476
| 0.878823
| 0.875377
| 0.811055
| 0.801579
| 0.801579
| 0.792534
| 0
| 0.009621
| 0.397216
| 14,657
| 372
| 237
| 39.400538
| 0.778721
| 0.144641
| 0
| 0.776471
| 0
| 0
| 0.1103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027451
| false
| 0
| 0.015686
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c350ac5d18c987d8fccb0ddda8be00c54286dc3
| 93
|
py
|
Python
|
simple_learning/__init__.py
|
JA-Bar/simple-learning
|
c59ce4231a4ca6d4c0359eeff85ca43c85e0348f
|
[
"MIT"
] | null | null | null |
simple_learning/__init__.py
|
JA-Bar/simple-learning
|
c59ce4231a4ca6d4c0359eeff85ca43c85e0348f
|
[
"MIT"
] | null | null | null |
simple_learning/__init__.py
|
JA-Bar/simple-learning
|
c59ce4231a4ca6d4c0359eeff85ca43c85e0348f
|
[
"MIT"
] | null | null | null |
from simple_learning.tensor import Tensor
from simple_learning.grad.function import no_grad
| 23.25
| 49
| 0.870968
| 14
| 93
| 5.571429
| 0.571429
| 0.25641
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 93
| 3
| 50
| 31
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8c38628ad8dc6a8be624ab2ec96957f41616ad78
| 338
|
py
|
Python
|
menpo/image/test/image_gaussian_test.py
|
jacksoncsy/menpo
|
3cac491fe30454935ed12fcaa89f453c5f6ec878
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T00:36:57.000Z
|
2021-04-20T00:36:57.000Z
|
menpo/image/test/image_gaussian_test.py
|
dykestra/menpo
|
3a4690f991003a8706028ddb898493ac5b53418e
|
[
"BSD-3-Clause"
] | null | null | null |
menpo/image/test/image_gaussian_test.py
|
dykestra/menpo
|
3a4690f991003a8706028ddb898493ac5b53418e
|
[
"BSD-3-Clause"
] | null | null | null |
import menpo
def test_image_gaussian_pyramid_n_levels():
lenna = menpo.io.import_builtin_asset.lenna_png()
assert len(list(lenna.gaussian_pyramid(n_levels=4))) == 4
def test_image_gaussian_pyramid_one_level():
lenna = menpo.io.import_builtin_asset.lenna_png()
assert len(list(lenna.gaussian_pyramid(n_levels=1))) == 1
| 28.166667
| 61
| 0.769231
| 52
| 338
| 4.615385
| 0.403846
| 0.25
| 0.2
| 0.275
| 0.875
| 0.65
| 0.65
| 0.65
| 0.65
| 0.65
| 0
| 0.013423
| 0.118343
| 338
| 11
| 62
| 30.727273
| 0.791946
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| false
| 0
| 0.428571
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4fb8f8e0297bfbb75e0c8e30c12db5fb56e9f31d
| 21,920
|
py
|
Python
|
swagger_client/api/authorization_api.py
|
chbndrhnns/finapi-client
|
259beda8b05e912c49d2dc4c3ed71205134e5d8a
|
[
"MIT"
] | 2
|
2019-04-15T05:58:21.000Z
|
2021-11-15T18:26:37.000Z
|
swagger_client/api/authorization_api.py
|
chbndrhnns/finapi-client
|
259beda8b05e912c49d2dc4c3ed71205134e5d8a
|
[
"MIT"
] | 1
|
2021-06-18T09:46:25.000Z
|
2021-06-18T20:12:41.000Z
|
swagger_client/api/authorization_api.py
|
chbndrhnns/finapi-client
|
259beda8b05e912c49d2dc4c3ed71205134e5d8a
|
[
"MIT"
] | 2
|
2019-07-08T13:41:09.000Z
|
2020-12-07T12:10:04.000Z
|
# coding: utf-8
"""
finAPI RESTful Services
finAPI RESTful Services # noqa: E501
OpenAPI spec version: v1.42.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AuthorizationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_token(self, grant_type, client_id, client_secret, **kwargs): # noqa: E501
"""Get tokens # noqa: E501
finAPI implements the OAuth 2.0 Standard for authorizing applications and users within applications. OAuth uses the terminology of clients and users. A client represents an application that calls finAPI services. A service call might be in the context of a user of the client (e.g.: getting a user's bank connections), or outside any user context (e.g.: editing your client's configuration, or creating a new user for your client). In any case, every service call must be authorized by an access_token. This service can be used to get such an access_token, for either one of the client's users, or for the client itself. Also, this service can be used to refresh the access_token of a user that has previously requested an access_token.<br/><br/>To get a token, you must always pass a valid client identifier and client secret (=client credentials). You can get free client credentials for the sandbox <a href='http://www.finapi.io/jetzt-testen/'>here</a>. Alternatively, you can also contact us at <a href='mailto:support@finapi.io'>support@finapi.io</a>.<br/><br/>The authorization process is similar for both a user within a client, and for the client itself: <br/>• To authorize a client (i.e. application), use <code>grant_type=client_credentials</code><br/>• To authorize a user, use <code>grant_type=password</code><br/><br/>If the given parameters are valid, the service will respond with the authorization data. <br/>Here is an example of a response when authorizing a user: <br/><pre>{ \"access_token\": \"yvMbx_TgwdYE0hgOVb8N4ZOvxOukqfjzYOGRZcJiCjQuRGkVIBfjjV3YG4zKTGiY2aPn2cQTGaQOT8uo5uo7_QOXts1s5UBSVuRHc6a8X30RrGBTyqV9h26SUHcZPNbZ\", \"token_type\": \"bearer\", \"refresh_token\": \"0b9KjiBVlZLz7a4HshSAIcFuscStiXT1VzT5mgNYwCQ_dWctTDsaIjedAhD1LpsOFJ7x6K8Emf8M3VOQkwNFR9FHijALYSQw2UeRwAC2MvrOKwfF1dHmOq5VEVYEaGf6\", \"expires_in\": 3600, \"scope\": \"all\" }</pre><br/><p>Use the returned access_token for other service calls by sending it in a 'Authorization' header, with the word 'Bearer' in front of the token. Like this:</p><pre>Authorization: Bearer yvMbx_TgwdYE0hgOVb8N4ZOvxOukqfjzYOGRZcJiCjQuRGkVIBfjjV3YG4zKTGiY2aPn2cQTGaQOT8uo5uo7_QOXts1s5UBSVuRHc6a8X30RrGBTyqV9h26SUHcZPNbZ</pre><p><b>WARNING</b>: Sending the access_token as a request parameter is deprecated and will probably be no longer supported in the next release of finAPI. Please always send the access_token in the request header, as shown above.</p><p>By default, the access tokens have an expiration time of one hour (however, you can change this via the service PATCH /clientConfiguration). If a token has expired, then using the token for a service call will result in a HTTP code 401. To restore access you can simply get a new token (as it is described above) or use <code>grant_type=refresh_token</code> (which works for user-related tokens only). In the latter case you just have to pass the previously received <code>refresh_token</code> for the user.</p><p>If the user that you want to authorize is not yet verified by the client (please see the 'isUserAutoVerificationEnabled' flag in the Client Configuration), then the service will respond with HTTP code 403. If the user is locked (see 'maxUserLoginAttempts' in the Client Configuration), the service will respond with HTTP code 423.</p><p>If the current role has no privileges to call a certain service (e.g. if a user tries to create a new user, or if a client tries to access user data outside of any user context), then the request will fail with the HTTP code 403.</p><p><b>IMPORTANT NOTES:</b><br/>• Even though finAPI is not logging query parameters, it is still recommended to pass the parameters in the POST body instead of in the URL. Also, please set the Content-Type of your request to 'application/x-www-form-urlencoded' when calling this service.<br/>• You should use this service only when you actually need a new token. As long as a token exists and has not expired, the service will always return the same token for the same credentials. Calling this service repeatedly with the same credentials contradicts the idea behind the tokens in OAuth, and will have a negative impact on the performance of your application. So instead of retrieving the same tokens over and over with this service, you should cache the tokens and re-use them as long as they have not expired - or at least as long as you're using the same tokens repeatedly, e.g. for the time of an active user session in your application.</p> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_token(grant_type, client_id, client_secret, async=True)
>>> result = thread.get()
:param async bool
:param str grant_type: Determines the required type of authorization:password - authorize a user; client_credentials - authorize a client;refresh_token - refresh a user's access_token. (required)
:param str client_id: Client identifier (required)
:param str client_secret: Client secret (required)
:param str refresh_token: Refresh token. Required for grant_type=refresh_token only.
:param str username: User identifier. Required for grant_type=password only.
:param str password: User password. Required for grant_type=password only.
:return: AccessToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_token_with_http_info(grant_type, client_id, client_secret, **kwargs) # noqa: E501
else:
(data) = self.get_token_with_http_info(grant_type, client_id, client_secret, **kwargs) # noqa: E501
return data
def get_token_with_http_info(self, grant_type, client_id, client_secret, **kwargs): # noqa: E501
"""Get tokens # noqa: E501
finAPI implements the OAuth 2.0 Standard for authorizing applications and users within applications. OAuth uses the terminology of clients and users. A client represents an application that calls finAPI services. A service call might be in the context of a user of the client (e.g.: getting a user's bank connections), or outside any user context (e.g.: editing your client's configuration, or creating a new user for your client). In any case, every service call must be authorized by an access_token. This service can be used to get such an access_token, for either one of the client's users, or for the client itself. Also, this service can be used to refresh the access_token of a user that has previously requested an access_token.<br/><br/>To get a token, you must always pass a valid client identifier and client secret (=client credentials). You can get free client credentials for the sandbox <a href='http://www.finapi.io/jetzt-testen/'>here</a>. Alternatively, you can also contact us at <a href='mailto:support@finapi.io'>support@finapi.io</a>.<br/><br/>The authorization process is similar for both a user within a client, and for the client itself: <br/>• To authorize a client (i.e. application), use <code>grant_type=client_credentials</code><br/>• To authorize a user, use <code>grant_type=password</code><br/><br/>If the given parameters are valid, the service will respond with the authorization data. <br/>Here is an example of a response when authorizing a user: <br/><pre>{ \"access_token\": \"yvMbx_TgwdYE0hgOVb8N4ZOvxOukqfjzYOGRZcJiCjQuRGkVIBfjjV3YG4zKTGiY2aPn2cQTGaQOT8uo5uo7_QOXts1s5UBSVuRHc6a8X30RrGBTyqV9h26SUHcZPNbZ\", \"token_type\": \"bearer\", \"refresh_token\": \"0b9KjiBVlZLz7a4HshSAIcFuscStiXT1VzT5mgNYwCQ_dWctTDsaIjedAhD1LpsOFJ7x6K8Emf8M3VOQkwNFR9FHijALYSQw2UeRwAC2MvrOKwfF1dHmOq5VEVYEaGf6\", \"expires_in\": 3600, \"scope\": \"all\" }</pre><br/><p>Use the returned access_token for other service calls by sending it in a 'Authorization' header, with the word 'Bearer' in front of the token. Like this:</p><pre>Authorization: Bearer yvMbx_TgwdYE0hgOVb8N4ZOvxOukqfjzYOGRZcJiCjQuRGkVIBfjjV3YG4zKTGiY2aPn2cQTGaQOT8uo5uo7_QOXts1s5UBSVuRHc6a8X30RrGBTyqV9h26SUHcZPNbZ</pre><p><b>WARNING</b>: Sending the access_token as a request parameter is deprecated and will probably be no longer supported in the next release of finAPI. Please always send the access_token in the request header, as shown above.</p><p>By default, the access tokens have an expiration time of one hour (however, you can change this via the service PATCH /clientConfiguration). If a token has expired, then using the token for a service call will result in a HTTP code 401. To restore access you can simply get a new token (as it is described above) or use <code>grant_type=refresh_token</code> (which works for user-related tokens only). In the latter case you just have to pass the previously received <code>refresh_token</code> for the user.</p><p>If the user that you want to authorize is not yet verified by the client (please see the 'isUserAutoVerificationEnabled' flag in the Client Configuration), then the service will respond with HTTP code 403. If the user is locked (see 'maxUserLoginAttempts' in the Client Configuration), the service will respond with HTTP code 423.</p><p>If the current role has no privileges to call a certain service (e.g. if a user tries to create a new user, or if a client tries to access user data outside of any user context), then the request will fail with the HTTP code 403.</p><p><b>IMPORTANT NOTES:</b><br/>• Even though finAPI is not logging query parameters, it is still recommended to pass the parameters in the POST body instead of in the URL. Also, please set the Content-Type of your request to 'application/x-www-form-urlencoded' when calling this service.<br/>• You should use this service only when you actually need a new token. As long as a token exists and has not expired, the service will always return the same token for the same credentials. Calling this service repeatedly with the same credentials contradicts the idea behind the tokens in OAuth, and will have a negative impact on the performance of your application. So instead of retrieving the same tokens over and over with this service, you should cache the tokens and re-use them as long as they have not expired - or at least as long as you're using the same tokens repeatedly, e.g. for the time of an active user session in your application.</p> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_token_with_http_info(grant_type, client_id, client_secret, async=True)
>>> result = thread.get()
:param async bool
:param str grant_type: Determines the required type of authorization:password - authorize a user; client_credentials - authorize a client;refresh_token - refresh a user's access_token. (required)
:param str client_id: Client identifier (required)
:param str client_secret: Client secret (required)
:param str refresh_token: Refresh token. Required for grant_type=refresh_token only.
:param str username: User identifier. Required for grant_type=password only.
:param str password: User password. Required for grant_type=password only.
:return: AccessToken
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['grant_type', 'client_id', 'client_secret', 'refresh_token', 'username', 'password'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'grant_type' is set
if ('grant_type' not in params or
params['grant_type'] is None):
raise ValueError("Missing the required parameter `grant_type` when calling `get_token`") # noqa: E501
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `get_token`") # noqa: E501
# verify the required parameter 'client_secret' is set
if ('client_secret' not in params or
params['client_secret'] is None):
raise ValueError("Missing the required parameter `client_secret` when calling `get_token`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'grant_type' in params:
query_params.append(('grant_type', params['grant_type'])) # noqa: E501
if 'client_id' in params:
query_params.append(('client_id', params['client_id'])) # noqa: E501
if 'client_secret' in params:
query_params.append(('client_secret', params['client_secret'])) # noqa: E501
if 'refresh_token' in params:
query_params.append(('refresh_token', params['refresh_token'])) # noqa: E501
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/oauth/token', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessToken', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def revoke_token(self, token, **kwargs): # noqa: E501
"""Revoke a token # noqa: E501
An additional endpoint for the OAuth 2.0 Standard, which allows clients to notify finAPI that a previously obtained refresh_token or access_token is no longer required. A successful request will invalidate the given token. The revocation of a particular token may also cause the revocation of related tokens and the underlying authorization grant. For token_type_hint=access_token finAPI will invalidate only the given access_token. For token_type_hint=refresh_token, finAPI will invalidate the refresh token and all access tokens based on the same authorization grant. If the token_type_hint is not defined, finAPI will revoke all access and refresh tokens (if applicable) that are based on the same authorization grant.<br/><br/>Note that the service responds with HTTP status code 200 both if the token has been revoked successfully, and if the client submitted an invalid token.<br/><br/>Note also that the client's access_token is required to authenticate the revocation.<br/><br/>Here is an example of how to revoke a user's refresh_token (and therefore also his access tokens):<pre>Authorization: Bearer {client_access_token} POST /oauth/revoke?token={refresh_token}&token_type_hint=refresh_token</pre> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.revoke_token(token, async=True)
>>> result = thread.get()
:param async bool
:param str token: The token that the client wants to get revoked (required)
:param str token_type_hint: A hint about the type of the token submitted for revocation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.revoke_token_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.revoke_token_with_http_info(token, **kwargs) # noqa: E501
return data
def revoke_token_with_http_info(self, token, **kwargs): # noqa: E501
"""Revoke a token # noqa: E501
An additional endpoint for the OAuth 2.0 Standard, which allows clients to notify finAPI that a previously obtained refresh_token or access_token is no longer required. A successful request will invalidate the given token. The revocation of a particular token may also cause the revocation of related tokens and the underlying authorization grant. For token_type_hint=access_token finAPI will invalidate only the given access_token. For token_type_hint=refresh_token, finAPI will invalidate the refresh token and all access tokens based on the same authorization grant. If the token_type_hint is not defined, finAPI will revoke all access and refresh tokens (if applicable) that are based on the same authorization grant.<br/><br/>Note that the service responds with HTTP status code 200 both if the token has been revoked successfully, and if the client submitted an invalid token.<br/><br/>Note also that the client's access_token is required to authenticate the revocation.<br/><br/>Here is an example of how to revoke a user's refresh_token (and therefore also his access tokens):<pre>Authorization: Bearer {client_access_token} POST /oauth/revoke?token={refresh_token}&token_type_hint=refresh_token</pre> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.revoke_token_with_http_info(token, async=True)
>>> result = thread.get()
:param async bool
:param str token: The token that the client wants to get revoked (required)
:param str token_type_hint: A hint about the type of the token submitted for revocation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'token_type_hint'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method revoke_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `revoke_token`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'token_type_hint' in params:
query_params.append(('token_type_hint', params['token_type_hint'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['finapi_auth'] # noqa: E501
return self.api_client.call_api(
'/oauth/revoke', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 85.291829
| 4,524
| 0.713914
| 3,102
| 21,920
| 4.931012
| 0.118311
| 0.019351
| 0.011899
| 0.009414
| 0.939331
| 0.925209
| 0.905335
| 0.900366
| 0.89409
| 0.88193
| 0
| 0.016093
| 0.209078
| 21,920
| 256
| 4,525
| 85.625
| 0.866182
| 0.030155
| 0
| 0.564886
| 1
| 0
| 0.199239
| 0.021843
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.022901
| 0.030534
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8b224ed86f8128faac71eec5b6283fd3f87475c5
| 1,015
|
py
|
Python
|
pyQPanda/pyqpanda/Algorithm/test/test_utils.py
|
QianJianhua1/QPanda-2
|
a13c7b733031b1d0007dceaf1dae6ad447bb969c
|
[
"Apache-2.0"
] | 631
|
2019-01-21T01:33:38.000Z
|
2022-03-31T07:33:04.000Z
|
pyQPanda/pyqpanda/Algorithm/test/test_utils.py
|
yinxx/QPanda-2
|
c70c4117a90978916b871424e204c5159f645642
|
[
"Apache-2.0"
] | 24
|
2019-02-01T10:12:45.000Z
|
2021-12-02T01:49:57.000Z
|
pyQPanda/pyqpanda/Algorithm/test/test_utils.py
|
yinxx/QPanda-2
|
c70c4117a90978916b871424e204c5159f645642
|
[
"Apache-2.0"
] | 80
|
2019-01-21T03:04:20.000Z
|
2022-03-29T15:38:45.000Z
|
'''
Test Utils\n
Copyright (C) Origin Quantum 2017-2018\n
Licensed Under Apache Licence 2.0
'''
def test_begin_str(test_name,max_len=40):
"""
Use with test_end_str(test_name) to make string like this:\n
******** Test Begin ********\n
******** Test End **********
"""
if (len(test_name)%2==1):
star_len=(max_len-len(test_name))//2+1
return '*'*star_len+" "+test_name+" Begin "+'*'*(star_len-1)
else:
star_len=(max_len-len(test_name))//2
return '*'*star_len+" "+test_name+" Begin "+'*'*star_len
def test_end_str(test_name,max_len=40):
"""
Use with test_begin_str(test_name) to make string like this:\n
******** Test Begin ********\n
******** Test End **********
"""
if (len(test_name)%2==1):
star_len=(max_len-len(test_name))//2+1
return '*'*star_len+" "+test_name+" End **"+'*'*(star_len-1)
else:
star_len=(max_len-len(test_name))//2
return '*'*star_len+" "+test_name+" End **"+'*'*star_len
| 32.741935
| 68
| 0.559606
| 152
| 1,015
| 3.473684
| 0.230263
| 0.212121
| 0.208333
| 0.136364
| 0.856061
| 0.810606
| 0.810606
| 0.810606
| 0.753788
| 0.640152
| 0
| 0.032258
| 0.205911
| 1,015
| 31
| 69
| 32.741935
| 0.622829
| 0.320197
| 0
| 0.571429
| 0
| 0
| 0.063391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8ca4476924324ced011a73ec864e899b8a5387de
| 10,261
|
py
|
Python
|
mayan/apps/user_management/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/user_management/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/user_management/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.urls import reverse
from rest_api.tests import BaseAPITestCase
from ..tests.literals import (
TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD, TEST_ADMIN_USERNAME
)
from .literals import (
TEST_GROUP_NAME, TEST_GROUP_NAME_EDITED, TEST_USER_EMAIL,
TEST_USER_PASSWORD, TEST_USER_USERNAME, TEST_USER_USERNAME_EDITED
)
class UserManagementUserAPITestCase(BaseAPITestCase):
def setUp(self):
super(UserManagementUserAPITestCase, self).setUp()
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=TEST_ADMIN_PASSWORD
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=TEST_ADMIN_PASSWORD
)
def tearDown(self):
get_user_model().objects.all().delete()
super(UserManagementUserAPITestCase, self).tearDown()
def test_user_create(self):
response = self.client.post(
reverse('rest_api:user-list'), data={
'email': TEST_USER_EMAIL, 'password': TEST_USER_PASSWORD,
'username': TEST_USER_USERNAME,
}
)
self.assertEqual(response.status_code, 201)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_USERNAME)
def test_user_create_with_group(self):
group_1 = Group.objects.create(name='test group 1')
response = self.client.post(
reverse('rest_api:user-list'), data={
'email': TEST_USER_EMAIL, 'password': TEST_USER_PASSWORD,
'username': TEST_USER_USERNAME,
'groups_pk_list': '{}'.format(group_1.pk)
}
)
self.assertEqual(response.status_code, 201)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_USERNAME)
self.assertQuerysetEqual(user.groups.all(), (repr(group_1),))
def test_user_create_with_groups(self):
group_1 = Group.objects.create(name='test group 1')
group_2 = Group.objects.create(name='test group 2')
response = self.client.post(
reverse('rest_api:user-list'), data={
'email': TEST_USER_EMAIL, 'password': TEST_USER_PASSWORD,
'username': TEST_USER_USERNAME,
'groups_pk_list': '{},{}'.format(group_1.pk, group_2.pk)
}
)
self.assertEqual(response.status_code, 201)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_USERNAME)
self.assertQuerysetEqual(
user.groups.all().order_by('name'), (repr(group_1), repr(group_2))
)
def test_user_create_login(self):
response = self.client.post(
reverse('rest_api:user-list'), data={
'email': TEST_USER_EMAIL, 'password': TEST_USER_PASSWORD,
'username': TEST_USER_USERNAME,
}
)
self.assertEqual(response.status_code, 201)
self.assertTrue(
self.client.login(
username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD
)
)
def test_user_create_login_password_change(self):
response = self.client.post(
reverse('rest_api:user-list'), data={
'email': TEST_USER_EMAIL, 'password': 'bad_password',
'username': TEST_USER_USERNAME,
}
)
self.assertEqual(response.status_code, 201)
self.assertFalse(
self.client.login(
username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD
)
)
user = get_user_model().objects.get(pk=response.data['id'])
response = self.client.patch(
reverse('rest_api:user-detail', args=(user.pk,)), data={
'password': TEST_USER_PASSWORD,
}
)
self.assertTrue(
self.client.login(
username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD
)
)
def test_user_edit_via_put(self):
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
response = self.client.put(
reverse('rest_api:user-detail', args=(user.pk,)),
data={'username': TEST_USER_USERNAME_EDITED}
)
self.assertEqual(response.status_code, 200)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_USERNAME_EDITED)
def test_user_edit_via_patch(self):
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
response = self.client.patch(
reverse('rest_api:user-detail', args=(user.pk,)),
data={'username': TEST_USER_USERNAME_EDITED}
)
self.assertEqual(response.status_code, 200)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_USERNAME_EDITED)
def test_user_edit_remove_groups_via_patch(self):
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
group_1 = Group.objects.create(name='test group 1')
user.groups.add(group_1)
response = self.client.patch(
reverse('rest_api:user-detail', args=(user.pk,)),
)
self.assertEqual(response.status_code, 200)
user.refresh_from_db()
self.assertQuerysetEqual(
user.groups.all().order_by('name'), (repr(group_1),)
)
def test_user_edit_add_groups_via_patch(self):
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
group_1 = Group.objects.create(name='test group 1')
response = self.client.patch(
reverse('rest_api:user-detail', args=(user.pk,)),
data={'groups_pk_list': '{}'.format(group_1.pk)}
)
self.assertEqual(response.status_code, 200)
user.refresh_from_db()
self.assertQuerysetEqual(
user.groups.all().order_by('name'), (repr(group_1),)
)
def test_user_delete(self):
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
response = self.client.delete(
reverse('rest_api:user-detail', args=(user.pk,))
)
self.assertEqual(response.status_code, 204)
with self.assertRaises(get_user_model().DoesNotExist):
get_user_model().objects.get(pk=user.pk)
def test_user_group_list(self):
group = Group.objects.create(name=TEST_GROUP_NAME)
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
user.groups.add(group)
response = self.client.get(
reverse('rest_api:users-group-list', args=(user.pk,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['results'][0]['name'], TEST_GROUP_NAME)
def test_user_group_add(self):
group = Group.objects.create(name=TEST_GROUP_NAME)
user = get_user_model().objects.create_user(
email=TEST_USER_EMAIL, password=TEST_USER_PASSWORD,
username=TEST_USER_USERNAME
)
response = self.client.post(
reverse(
'rest_api:users-group-list', args=(user.pk,)
), data={
'group_pk_list': '{}'.format(group.pk)
}
)
self.assertEqual(response.status_code, 201)
self.assertEqual(group.user_set.first(), user)
class UserManagementGroupAPITestCase(BaseAPITestCase):
def setUp(self):
super(UserManagementGroupAPITestCase, self).setUp()
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=TEST_ADMIN_PASSWORD
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=TEST_ADMIN_PASSWORD
)
def tearDown(self):
get_user_model().objects.all().delete()
super(UserManagementGroupAPITestCase, self).tearDown()
def test_group_create(self):
response = self.client.post(
reverse('rest_api:group-list'), data={'name': TEST_GROUP_NAME}
)
self.assertEqual(response.status_code, 201)
group = Group.objects.get(pk=response.data['id'])
self.assertEqual(group.name, TEST_GROUP_NAME)
def test_group_edit_via_put(self):
group = Group.objects.create(name=TEST_GROUP_NAME)
response = self.client.put(
reverse('rest_api:group-detail', args=(group.pk,)), data={
'name': TEST_GROUP_NAME_EDITED
}
)
self.assertEqual(response.status_code, 200)
group.refresh_from_db()
self.assertEqual(group.name, TEST_GROUP_NAME_EDITED)
def test_group_edit_via_patch(self):
group = Group.objects.create(name=TEST_GROUP_NAME)
response = self.client.patch(
reverse('rest_api:group-detail', args=(group.pk,)), data={
'name': TEST_GROUP_NAME_EDITED
}
)
self.assertEqual(response.status_code, 200)
group.refresh_from_db()
self.assertEqual(group.name, TEST_GROUP_NAME_EDITED)
def test_group_delete(self):
group = Group.objects.create(name=TEST_GROUP_NAME)
response = self.client.delete(
reverse('rest_api:group-detail', args=(group.pk,))
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Group.objects.count(), 0)
| 32.993569
| 80
| 0.628496
| 1,177
| 10,261
| 5.17842
| 0.072218
| 0.085316
| 0.063002
| 0.090566
| 0.847744
| 0.819032
| 0.785398
| 0.763249
| 0.750615
| 0.723708
| 0
| 0.009251
| 0.262548
| 10,261
| 310
| 81
| 33.1
| 0.79622
| 0
| 0
| 0.55
| 0
| 0
| 0.063736
| 0.011013
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.083333
| false
| 0.095833
| 0.029167
| 0
| 0.120833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
8cc23d6e16dec99454b8cb44433eaca01fc806b7
| 11,100
|
py
|
Python
|
backend/microservices/auth/__test__/controller_test/register.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | 3
|
2021-04-26T00:17:14.000Z
|
2021-07-04T15:30:09.000Z
|
backend/microservices/auth/__test__/controller_test/register.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | null | null | null |
backend/microservices/auth/__test__/controller_test/register.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | null | null | null |
import unittest
import json
from controller import app
from __test__.core_test.util import *
class RegisterTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def test_register_with_valid_data(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH )
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmail.com"
phone = "+201234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert res is not None
assert "201" in res.status
res = self.app.post(LOGIN_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email, REQ_USER_PASSWORD_KEY_NAME: password})
assert res is not None
assert "200" in res.status
res = json.loads(res.data)
assert RES_ACCESS_TOKEN_KEY_NAME in res
assert RES_REFRESH_TOKEN_KEY_NAME in res
def test_register_user_with_invalid_email(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "l@gmail.com"
phone = "+201234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "gmail.com"
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmailcom"
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
def test_register_user_with_invalid_first_name(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH + 1)
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmail.com"
phone = "+201234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
first_name = generate_lowercase_string_of_length(USER_NAME_MIN_LENGTH - 1)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
def test_register_user_with_invalid_last_name(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH + 1)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmail.com"
phone = "+201234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
last_name = generate_lowercase_string_of_length(USER_NAME_MIN_LENGTH - 1)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
def test_register_user_with_invalid_phone(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmail.com"
phone = "01234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
def test_register_user_with_invalid_password_length(self):
first_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
last_name = generate_lowercase_string_of_length(USER_NAME_MAX_LENGTH)
email = generate_lowercase_string_of_length(USER_EMAIL_MAX_LENGTH - len("@gmail.com")) + "@gmail.com"
phone = "+201234567890"
password = generate_lowercase_string_of_length(USER_PASSWORD_MAX_LENGTH + 1)
gender = "m"
birthday = datetime(1999, 7, 24)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
password = generate_lowercase_string_of_length(USER_PASSWORD_MIN_LENGTH - 1)
res = self.app.post(REGISTER_ABS_ENDPOINT_NAME,
data={REQ_USER_EMAIL_KEY_NAME: email,
REQ_USER_PASSWORD_KEY_NAME: password,
REQ_USER_FIRST_NAME_KEY_NAME: first_name,
REQ_USER_LAST_NAME_KEY_NAME: last_name,
REQ_USER_BIRTHDAY_KEY_NAME: birthday.strftime(REQ_USER_BIRTHDAY_FORMAT),
REQ_USER_PHONE_KEY_NAME: phone,
REQ_USER_GENDER_KEY_NAME: gender})
assert "400" in res.status
| 60
| 111
| 0.59027
| 1,255
| 11,100
| 4.669323
| 0.058964
| 0.107509
| 0.113823
| 0.12372
| 0.942833
| 0.937031
| 0.924744
| 0.924744
| 0.916041
| 0.914505
| 0
| 0.021609
| 0.353784
| 11,100
| 184
| 112
| 60.326087
| 0.795344
| 0
| 0
| 0.815476
| 0
| 0
| 0.025376
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.041667
| false
| 0.119048
| 0.02381
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
8cd13df92238111ccfe66601635f4df99728d4f8
| 21,077
|
py
|
Python
|
muri/muda.py
|
dtsukiyama/muri
|
a9c935edf010bcf0f63988668a166b57d7d7dad8
|
[
"MIT"
] | 1
|
2019-08-13T02:42:17.000Z
|
2019-08-13T02:42:17.000Z
|
muri/muda.py
|
dtsukiyama/muri
|
a9c935edf010bcf0f63988668a166b57d7d7dad8
|
[
"MIT"
] | 5
|
2021-03-19T01:41:07.000Z
|
2022-03-11T23:51:26.000Z
|
muri/muda.py
|
dtsukiyama/muri
|
a9c935edf010bcf0f63988668a166b57d7d7dad8
|
[
"MIT"
] | null | null | null |
from __future__ import division
import argparse
import os
import time
import chainer
import numpy as np
from PIL import Image
import six
from muri.lib import iproc
from muri.lib import reconstruct
from muri.lib import srcnn
from muri.lib import utils
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Validate(object):
@classmethod
def model(cls, model):
models = ['vgg7','upresnet10','upconv7','resnet10']
if model.lower() not in models:
raise ValueError('Model does not exist. Only the following models are acceptable: {}, {}, {}, {}'.format(models[0],
models[1], models[2], models[3]))
@classmethod
def ns_model(cls, model):
models = ['upresnet10','upconv7','resnet10']
if model.lower() not in models:
raise ValueError('Model does not exist. Only the following models are acceptable: {}, {}, {}'.format(models[0],
models[1], models[2]))
@classmethod
def color(cls, color):
colors = ['rgb', 'y']
if color not in colors:
raise ValueError('Only two color models available: {} and {}'.format(colors[0], colors[1]))
@classmethod
def noise(cls, noise_level):
levels = [0,1,2,3]
if noise_level not in levels:
raise ValueError('Please set your noise level accordingly, between 0 and 3')
# scale models
class Scale(object):
def __init__(self, model='VGG7', color='rgb',
tta_level=8, tta=False, batch_size=16, block_size=128, scale_ratio=2.0,
width=0, height=0, shorter_side=0, longer_side=0, quality=None, extension='png'):
Validate.model(model)
Validate.color(color)
self.model_directory = 'muri/models/{}'.format(model.lower())
self.models = {}
self.channel = 3 if color == 'rgb' else 1
self.color = color
self.model = model
self.tta_level = tta_level
self.tta = tta
self.batch_size = batch_size
self.block_size = block_size
self.scale_ratio = scale_ratio
self.width = width
self.height = height
self.shorter_side = shorter_side
self.longer_side = longer_side
self.quality = quality
self.extension = extension
def config(self):
settings = Namespace()
settings.model_dir = self.model_directory
settings.color = self.color
settings.model = self.model
settings.ch = self.channel
settings.tta_level = self.tta_level
settings.batch_size = self.batch_size
settings.block_size = self.block_size
settings.scale_ratio = self.scale_ratio
settings.height = self.height
settings.width = self.width
settings.shorter_side = self.shorter_side
settings.longer_side = self.longer_side
settings.tta = self.tta
settings.quality = self.quality
settings.extension = self.extension
return settings
def cpu(self):
model_name = 'anime_style_scale_{}.npz'.format(self.color)
model_path = os.path.join(self.model_directory, model_name)
self.models['scale'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['scale'])
return self.models
def gpu(self, n_gpu):
model_name = 'anime_style_scale_{}.npz'.format(self.color)
model_path = os.path.join(self.model_directory, model_name)
self.models['scale'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['scale'])
chainer.backends.cuda.check_cuda_available()
chainer.backends.cuda.get_device(n_gpu).use()
for _, model in self.models.items():
model.to_gpu()
return self.models
class Noise(object):
def __init__(self, model='VGG7', noise_level = 1, color='rgb',
tta_level=8, tta=False, batch_size=16, block_size=128, scale_ratio=2.0,
width=0, height=0, shorter_side=0, longer_side=0, quality=None, extension='png'):
Validate.model(model)
Validate.color(color)
Validate.noise(noise_level)
self.model_directory = 'muri/models/{}'.format(model.lower())
self.models = {}
self.channel = 3 if color == 'rgb' else 1
self.color = color
self.noise_level = noise_level
self.model = model
self.tta_level = tta_level
self.tta = tta
self.batch_size = batch_size
self.block_size = block_size
self.scale_ratio = scale_ratio
self.width = width
self.height = height
self.shorter_side = shorter_side
self.longer_side = longer_side
self.quality = quality
self.extension = extension
def config(self):
settings = Namespace()
settings.model_dir = self.model_directory
settings.color = self.color
settings.noise_level = self.noise_level
settings.model = self.model
settings.ch = self.channel
settings.tta_level = self.tta_level
settings.batch_size = self.batch_size
settings.block_size = self.block_size
settings.scale_ratio = self.scale_ratio
settings.height = self.height
settings.width = self.width
settings.shorter_side = self.shorter_side
settings.longer_side = self.longer_side
settings.tta = self.tta
settings.quality = self.quality
settings.extension = self.extension
return settings
def cpu(self):
model_name = 'anime_style_noise{}_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
if not os.path.exists(model_path):
model_name = 'anime_style_noise{}_scale_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
self.models['noise'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['noise'])
return self.models
def gpu(self, n_gpu):
model_name = 'anime_style_noise{}_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
if not os.path.exists(model_path):
model_name = 'anime_style_noise{}_scale_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
self.models['noise'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['noise'])
chainer.backends.cuda.check_cuda_available()
chainer.backends.cuda.get_device(n_gpu).use()
for _, model in self.models.items():
model.to_gpu()
return self.models
class NoiseScale(object):
def __init__(self, model='UpConv7', noise_level=1, color='rgb',
tta_level=8, tta=False, batch_size=16, block_size=128, scale_ratio=2.0,
width=0, height=0, shorter_side=0, longer_side=0, quality=None, extension='png'):
Validate.ns_model(model)
Validate.color(color)
Validate.noise(noise_level)
self.model_directory = 'muri/models/{}'.format(model.lower())
self.models = {}
self.channel = 3 if color == 'rgb' else 1
self.color = color
self.noise_level = noise_level
self.model = model
self.tta_level = tta_level
self.tta = tta
self.batch_size = batch_size
self.block_size = block_size
self.scale_ratio = scale_ratio
self.width = width
self.height = height
self.shorter_side = shorter_side
self.longer_side = longer_side
self.quality = quality
self.extension = extension
def config(self):
settings = Namespace()
settings.model_dir = self.model_directory
settings.color = self.color
settings.noise_level = self.noise_level
settings.model = self.model
settings.ch = self.channel
settings.tta_level = self.tta_level
settings.batch_size = self.batch_size
settings.block_size = self.block_size
settings.scale_ratio = self.scale_ratio
settings.height = self.height
settings.width = self.width
settings.shorter_side = self.shorter_side
settings.longer_side = self.longer_side
settings.tta = self.tta
settings.quality = self.quality
settings.extension = self.extension
return settings
def cpu(self):
model_name = 'anime_style_noise{}_scale_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
if os.path.exists(model_path):
self.models['noise_scale'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['noise_scale'])
alpha_model_name = 'anime_style_scale_{}.npz'.format(self.color)
alpha_model_path = os.path.join(self.model_directory, alpha_model_name)
self.models['alpha'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(alpha_model_path, self.models['alpha'])
return self.models
def gpu(self, n_gpu):
model_name = 'anime_style_noise{}_{}.npz'.format(self.noise_level, self.color)
model_path = os.path.join(self.model_directory, model_name)
if os.path.exists(model_path):
self.models['noise_scale'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(model_path, self.models['noise_scale'])
alpha_model_name = 'anime_style_scale_{}.npz'.format(self.color)
alpha_model_path = os.path.join(self.model_directory, alpha_model_name)
self.models['alpha'] = srcnn.archs[self.model](self.channel)
chainer.serializers.load_npz(alpha_model_path, self.models['alpha'])
chainer.serializers.load_npz(model_path, models['noise'])
chainer.backends.cuda.check_cuda_available()
chainer.backends.cuda.get_device(n_gpu).use()
for _, model in self.models.items():
model.to_gpu()
return self.models
class Transform(object):
def __init__(self, models, settings, extension='png'):
self.models = models
self.input_exts = ['.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff', '.webp']
self.output_exts = ['.png', '.webp']
self.settings = settings
def denoise_image(self, src, model):
dst, alpha = self.split_alpha(src, model)
six.print_('Level {} denoising...'.format(self.settings.noise_level), end=' ', flush=True)
if self.settings.tta:
dst = reconstruct.image_tta(
dst, model, self.settings.tta_level,
self.settings.block_size,
self.settings.batch_size)
else:
dst = reconstruct.image(dst, model, self.settings.block_size,
self.settings.batch_size)
if model.inner_scale != 1:
dst = dst.resize((src.size[0], src.size[1]), Image.LANCZOS)
six.print_('OK')
if alpha is not None:
dst.putalpha(alpha)
return dst
def split_alpha(self, src, model):
alpha = None
if src.mode in ('L', 'RGB', 'P'):
if isinstance(src.info.get('transparency'), bytes):
src = src.convert('RGBA')
rgb = src.convert('RGB')
if src.mode in ('LA', 'RGBA'):
six.print_('Splitting alpha channel...', end=' ', flush=True)
alpha = src.split()[-1]
rgb = iproc.alpha_make_border(rgb, alpha, model)
six.print_('OK')
return rgb, alpha
def upscale_image(self, src, scale_model, alpha_model=None):
dst, alpha = self.split_alpha(src, scale_model)
for i in range(int(np.ceil(np.log2(self.settings.scale_ratio)))):
six.print_('2.0x upscaling...', end=' ', flush=True)
model = scale_model if i == 0 or alpha_model is None else alpha_model
if model.inner_scale == 1:
dst = iproc.nn_scaling(dst, 2) # Nearest neighbor 2x scaling
alpha = iproc.nn_scaling(alpha, 2) # Nearest neighbor 2x scaling
if self.settings.tta:
dst = reconstruct.image_tta(
dst, model, self.settings.tta_level,
self.settings.block_size,
self.settings.batch_size)
else:
dst = reconstruct.image(dst, model,
self.settings.block_size,
self.settings.batch_size)
if alpha_model is None:
alpha = reconstruct.image(
alpha, scale_model, self.settings.block_size, self.settings.batch_size)
else:
alpha = reconstruct.image(
alpha, alpha_model, self.settings.block_size, self.settings.batch_size)
six.print_('OK')
dst_w = int(np.round(src.size[0] * self.settings.scale_ratio))
dst_h = int(np.round(src.size[1] * self.settings.scale_ratio))
if dst_w != dst.size[0] or dst_h != dst.size[1]:
six.print_('Resizing...', end=' ', flush=True)
dst = dst.resize((dst_w, dst_h), Image.LANCZOS)
six.print_('OK')
if alpha is not None:
if alpha.size[0] != dst_w or alpha.size[1] != dst_h:
alpha = alpha.resize((dst_w, dst_h), Image.LANCZOS)
dst.putalpha(alpha)
return dst
def build_filelist(self, input, output):
outname = None
outdir = output
outext = '.' + self.settings.extension
if os.path.isdir(input):
filelist = utils.load_filelist(input)
else:
tmpname, tmpext = os.path.splitext(os.path.basename(output))
if tmpext in self.output_exts:
outext = tmpext
outname = tmpname
outdir = os.path.dirname(output)
outdir = './' if outdir == '' else outdir
elif not tmpext == '':
raise ValueError('Format {} is not supported'.format(tmpext))
filelist = [input]
# create outpur directory
if not os.path.exists(outdir):
os.makedirs(outdir)
return outext, outname, outdir, filelist
def process_image(self, outext, outname, outdir, filelist):
for path in filelist:
if outname is None or len(filelist) > 1:
outname, outext = os.path.splitext(os.path.basename(path))
outpath = os.path.join(outdir, '{}{}'.format(outname, outext))
if outext.lower() in self.input_exts:
src = Image.open(path)
w, h = src.size[:2]
if self.settings.width != 0:
self.settings.scale_ratio = self.settings.width / w
elif self.settings.height != 0:
self.settings.scale_ratio = self.settings.height / h
elif self.settings.shorter_side != 0:
if w < h:
self.settings.scale_ratio = self.settings.shorter_side / w
else:
self.settings.scale_ratio = self.settings.shorter_side / h
elif self.settings.longer_side != 0:
if w > h:
self.settings.scale_ratio = self.settings.longer_side / w
else:
self.settings.scale_ratio = self.settings.longer_side / h
dst = src.copy()
outname += '_(tta{})'.format(self.settings.tta_level) if self.settings.tta else '_'
return outdir, outpath, outname, dst, src
def process_collection(self, outext, outname, outdir, filelist):
collection = []
for path in filelist:
if outname is None or len(filelist) > 1:
outname, outext = os.path.splitext(os.path.basename(path))
outpath = os.path.join(outdir, '{}{}'.format(outname, outext))
if outext.lower() in self.input_exts:
src = Image.open(path)
w, h = src.size[:2]
if self.settings.width != 0:
self.settings.scale_ratio = self.settings.width / w
elif self.settings.height != 0:
self.settings.scale_ratio = self.settings.height / h
elif self.settings.shorter_side != 0:
if w < h:
self.settings.scale_ratio = self.settings.shorter_side / w
else:
self.settings.scale_ratio = self.settings.shorter_side / h
elif self.settings.longer_side != 0:
if w > h:
self.settings.scale_ratio = self.settings.longer_side / w
else:
self.settings.scale_ratio = self.settings.longer_side / h
dst = src.copy()
outname += '_(tta{})'.format(self.settings.tta_level) if self.settings.tta else '_'
collection.append((outdir, outpath, outname, dst, src))
return collection
def scale(self, input, output):
outext, outname, outdir, filelist = self.build_filelist(input, output)
collection = self.process_collection(outext, outname, outdir, filelist)
for features in collection:
outdir = features[0]
outpath = features[1]
outname = features[2]
dst = features[3]
src = features[4]
outname += '(scale{:.1f}x)'.format(self.settings.scale_ratio)
dst = self.upscale_image(dst, self.models['scale'])
outname += '({}_{}){}'.format(self.settings.model, self.settings.color, outext)
if os.path.exists(outpath):
outpath = os.path.join(outdir, outname)
lossless = self.settings.quality is None
quality = 100 if lossless else self.settings.quality
icc_profile = src.info.get('icc_profile')
icc_profile = "" if icc_profile is None else icc_profile
dst.convert(src.mode).save(outpath, quality=quality, lossless=lossless, icc_profile=icc_profile)
six.print_('Saved as \'{}\''.format(outpath))
def noise(self, input, output):
outext, outname, outdir, filelist = self.build_filelist(input, output)
collection = self.process_collection(outext, outname, outdir, filelist)
for features in collection:
outdir = features[0]
outpath = features[1]
outname = features[2]
dst = features[3]
src = features[4]
outname += '(noise{})'.format(self.settings.noise_level)
dst = self.denoise_image(dst, self.models['noise'])
outname += '({}_{}){}'.format(self.settings.model, self.settings.color, outext)
if os.path.exists(outpath):
outpath = os.path.join(outdir, outname)
lossless = self.settings.quality is None
quality = 100 if lossless else self.settings.quality
icc_profile = src.info.get('icc_profile')
icc_profile = "" if icc_profile is None else icc_profile
dst.convert(src.mode).save(outpath, quality=quality, lossless=lossless, icc_profile=icc_profile)
six.print_('Saved as \'{}\''.format(outpath))
def noise_scale(self, input, output):
outext, outname, outdir, filelist = self.build_filelist(input, output)
collection = self.process_collection(outext, outname, outdir, filelist)
for features in collection:
outdir = features[0]
outpath = features[1]
outname = features[2]
dst = features[3]
src = features[4]
outname += '(noise{}_scale{:.1f}x)'.format(self.settings.noise_level,
self.settings.scale_ratio)
dst = self.upscale_image(dst, self.models['noise_scale'], self.models['alpha'])
outname += '({}_{}){}'.format(self.settings.model, self.settings.color, outext)
if os.path.exists(outpath):
outpath = os.path.join(outdir, outname)
lossless = self.settings.quality is None
quality = 100 if lossless else self.settings.quality
icc_profile = src.info.get('icc_profile')
icc_profile = "" if icc_profile is None else icc_profile
dst.convert(src.mode).save(outpath, quality=quality, lossless=lossless, icc_profile=icc_profile)
six.print_('Saved as \'{}\''.format(outpath))
| 44.186583
| 127
| 0.597191
| 2,532
| 21,077
| 4.806477
| 0.083728
| 0.075924
| 0.029581
| 0.030731
| 0.827198
| 0.797864
| 0.773213
| 0.769926
| 0.769926
| 0.761873
| 0
| 0.008707
| 0.291597
| 21,077
| 476
| 128
| 44.279412
| 0.806376
| 0.004365
| 0
| 0.730858
| 0
| 0
| 0.054671
| 0.013918
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062645
| false
| 0
| 0.027842
| 0
| 0.139211
| 0.025522
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cee9632a8eccfe71c578d581c03644a24ff23fd
| 30,781
|
py
|
Python
|
errorcam/models/attention_refine/atten_refine_network.py
|
frkl/SOBERT-XVQA-demo
|
c99d2f36d9ed275dfd3ba25c200019bbde197d4f
|
[
"Apache-2.0"
] | 2
|
2021-07-25T12:44:29.000Z
|
2021-07-27T08:55:48.000Z
|
errorcam/models/attention_refine/atten_refine_network.py
|
frkl/SOBERT-XVQA-demo
|
c99d2f36d9ed275dfd3ba25c200019bbde197d4f
|
[
"Apache-2.0"
] | null | null | null |
errorcam/models/attention_refine/atten_refine_network.py
|
frkl/SOBERT-XVQA-demo
|
c99d2f36d9ed275dfd3ba25c200019bbde197d4f
|
[
"Apache-2.0"
] | 1
|
2021-07-25T12:44:30.000Z
|
2021-07-25T12:44:30.000Z
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
import pdb
# a custom loss, not used
def softXEnt (input, target):
logprobs = torch.nn.functional.log_softmax (input, dim = 1)
return -(target * logprobs).sum() / input.shape[0]
class attention_refine_net(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=128, q_feat_dim=300, im_feat_dim=36*2048, out_atten_dim=14*14):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*3, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size,14*14),
nn.Softmax()
)
def forward(self, attention, im_feat, q_feat):
att_feats = self.atten_input(attention)
im_feats = self.im_feat_in(im_feat)
q_feats = self.question_feat_in(q_feat)
concat_feat = torch.cat((att_feats, im_feats, q_feats), dim=1)
att_out = self.linear_block_attention_out(concat_feat)
return {'refined_attn':att_out}
class attention_refine_net_ansconf(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=64, q_feat_dim=300, im_feat_dim=36*2048, ans_dim=3000, out_atten_dim=14*14):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size,14*14),
nn.Softmax()
)
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats = self.im_feat_in(im_feat)
q_feats = self.question_feat_in(q_feat)
ans_score_feat = self.answer_score_in(ans_scores)
concat_feat = torch.cat((att_feats, im_feats, q_feats, ans_score_feat), dim=1)
att_out = self.linear_block_attention_out(concat_feat)
return {'refined_attn':att_out}
class attention_refine_net_corrpred(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=64, q_feat_dim=300, im_feat_dim=36*2048, ans_dim=3000, out_atten_dim=14*14, att_out_nonlin="Softmax"):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.concat2feat = nn.Sequential(
nn.Linear(hidden_feat_size*8, hidden_feat_size*4),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*4, hidden_feat_size*3),
nn.LeakyReLU(0.1),
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*3, hidden_feat_size*3),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*3,14*14),
getattr(nn, att_out_nonlin)()
)
self.linear_block_corr_pred = nn.Sequential(
nn.Linear(hidden_feat_size*3, 1),
nn.Sigmoid()
)
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats = self.im_feat_in(im_feat)
q_feats = self.question_feat_in(q_feat)
ans_score_feat = self.answer_score_in(ans_scores)
concat_feat = torch.cat((att_feats, im_feats, q_feats, ans_score_feat), dim=1)
feats = self.concat2feat(concat_feat)
refined_attention = self.linear_block_attention_out(feats)
corr_pred = self.linear_block_corr_pred(feats)
return {'refined_attn':refined_attention, 'corr_pred':corr_pred}
class attention_refine_net_anspred(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=64, q_feat_dim=300, im_feat_dim=36*2048, ans_dim=3000, out_atten_dim=14*14, att_out_nonlin="Softmax"):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size*2),
nn.LeakyReLU(0.1)
)
self.concat2feat = nn.Sequential(
nn.Linear(hidden_feat_size*8, hidden_feat_size*4),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*4, hidden_feat_size*3),
nn.LeakyReLU(0.1),
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*3, hidden_feat_size*3),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*3,14*14),
getattr(nn, att_out_nonlin)()
)
self.linear_block_ans_pred = nn.Linear(hidden_feat_size*3, ans_dim)
self.att_out_nonlin = att_out_nonlin
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats = self.im_feat_in(im_feat)
q_feats = self.question_feat_in(q_feat)
ans_score_feat = self.answer_score_in(ans_scores)
concat_feat = torch.cat((att_feats, im_feats, q_feats, ans_score_feat), dim=1)
feats = self.concat2feat(concat_feat)
refined_attention = self.linear_block_attention_out(feats)
ans_pred = self.linear_block_ans_pred(feats)
return {'refined_attn':refined_attention, 'ans_pred':ans_pred}
class uncertain_attention_net_cam(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, num_class=1):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU()
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
if num_class==1:
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, num_class),
nn.Sigmoid()
)
else:
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.Linear(hidden_feat_size, hidden_feat_size//2),
nn.Linear(hidden_feat_size//2, num_class)
)
self.gradients = []
def save_gradients(self, grad):
self.gradients = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':[im_feats_feature]}
class uncertainatt_refinedatt_net_cam(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU()
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.LeakyReLU(),
nn.Linear(hidden_feat_size, 1),
nn.Sigmoid()
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size*3),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*3,hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size,7*7),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
refined_attn = self.linear_block_attention_out(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'refined_attn': refined_attn, 'q_feats':q_feat}
class uncertainatt_refinedatt_net_cam_bigger(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size//2),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size//2, 1),
nn.Sigmoid()
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size*3),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*3,hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size,7*7),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
refined_attn = self.linear_block_attention_out(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'refined_attn': refined_attn, 'q_feats':q_feat}
class uncertainatt_net_cam_bigger(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size//2),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size//2, 1),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'q_feats':q_feat}
class uncertainatt_refinedatt_agnosticnet_cam_bigger(nn.Module):
def __init__(self, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*3, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size//2),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size//2, 1),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'q_feats':q_feat}
class uncertainatt_refinedatt_net_cam_bigger_errorcondatt(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_linear2 = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size//2),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size//2, 1),
nn.Sigmoid()
)
self.linear_block_attention_out = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size*3),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size*3,hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size,7*7),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.gradients_bert = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def save_gradients_bert(self, grad):
self.gradients_bert = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
#if self.ques_cam:
# attention.register_hook(self.save_gradients_bert)
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
refined_attn = self.linear_block_attention_out(concat_feats)
ref_att_format = refined_attn.view(-1, 7,7).unsqueeze(1).repeat(1,1024,1,1)
im_feats_feature_weighted = im_feats_feature*ref_att_format
im_feats_weighted = im_feats_feature_weighted.view(im_feats_feature_weighted.size(0), -1)
im_feats_weighted = self.im_feat_linear2(im_feats_weighted)
concat_feats_weighted = torch.cat((att_feats, im_feats_weighted, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats_weighted)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'refined_attn': refined_attn, 'q_feats':q_feat}
class uncertainatt_net_cam_bigger(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129, ques_cam=False):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*4, hidden_feat_size),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size, hidden_feat_size//2),
nn.LeakyReLU(0.1),
nn.Linear(hidden_feat_size//2, 1),
nn.Sigmoid()
)
self.gradients = []
self.gradients_qfeat = []
self.ques_cam = ques_cam
def save_gradients(self, grad):
self.gradients = [grad]
def save_gradients_qfeat(self, grad):
self.gradients_qfeat = [grad]
def forward(self, attention, im_feat, q_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
if self.ques_cam:
q_feat.register_hook(self.save_gradients_qfeat)
q_feats = self.question_feat_in(q_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':im_feats_feature, 'q_feats':q_feat}
class uncertain_attention_net_noans_cam(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048)):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU()
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*3, 1),
nn.Sigmoid()
)
self.gradients = []
def save_gradients(self, grad):
self.gradients = [grad]
def forward(self, attention, im_feat, q_feat):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
q_feats = self.question_feat_in(q_feat)
concat_feats = torch.cat((att_feats, im_feats, q_feats), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':[im_feats_feature]}
class uncertain_attention_net(nn.Module):
def __init__(self, hidden_feat_size=64, q_feat_dim=300, im_feat_dim=36*2048):
super().__init__()
self.im_feat_in = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.attention_layer = nn.Linear(hidden_feat_size*2, 7*7*2048)
self.weighted_im_layer = nn.Sequential(
nn.Linear(im_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*2, 1),
nn.Sigmoid()
)
def forward(self, im_feat, q_feat):
im_feats = self.im_feat_in(im_feat)
q_feats = self.question_feat_in(q_feat)
concat_feats = torch.cat((im_feats, q_feats), dim=1)
attention = self.attention_layer(concat_feats)
attention = attention.view(-1, 7*7,2048)
attention = F.softmax(attention, dim=1)
attention = attention.view(-1, 7*7*2048)
weighted_im = attention*im_feat
weighted_im_feats = self.weighted_im_layer(weighted_im)
concat_weighted_feats = torch.cat((weighted_im_feats, q_feats), dim=1)
corr_pred = self.corr_pred_layer(concat_weighted_feats)
return {'wrong_pred':corr_pred, 'wrong_att':attention}
class quescaptionmatch_failurepred(nn.Module):
def __init__(self, atten_dim = 4*12*115*115, hidden_feat_size=96, q_feat_dim=300, im_feat_dim=(7,7,2048), ans_dim=3129):
super().__init__()
self.atten_input = nn.Sequential(
nn.Linear(atten_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.im_feat_in = nn.Conv2d(im_feat_dim[-1], 1024, 1)
self.im_feat_linear = nn.Sequential(
nn.Linear(im_feat_dim[0]*im_feat_dim[1]*1024, hidden_feat_size),
nn.LeakyReLU()
)
self.question_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.quescap_feat_in = nn.Sequential(
nn.Linear(q_feat_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.answer_score_in = nn.Sequential(
nn.Linear(ans_dim, hidden_feat_size),
nn.LeakyReLU(0.1)
)
self.corr_pred_layer = nn.Sequential(
nn.Linear(hidden_feat_size*5, 1),
nn.Sigmoid()
)
self.gradients = []
def save_gradients(self, grad):
self.gradients = [grad]
def forward(self, attention, im_feat, q_feat, q_cap_feat, ans_scores):
att_feats = self.atten_input(attention)
im_feats_feature = self.im_feat_in(im_feat)
im_feats_feature.register_hook(self.save_gradients)
im_feats = im_feats_feature.view(im_feats_feature.size(0), -1)
im_feats = self.im_feat_linear(im_feats)
q_feats = self.question_feat_in(q_feat)
q_cap_feats = self.quescap_feat_in(q_cap_feat)
ans_feat = self.answer_score_in(ans_scores)
concat_feats = torch.cat((att_feats, im_feats, q_feats, q_cap_feats, ans_feat), dim=1)
corr_pred = self.corr_pred_layer(concat_feats)
return {'wrong_pred':corr_pred, 'im_feature':[im_feats_feature]}
| 32.469409
| 168
| 0.622657
| 4,288
| 30,781
| 4.090951
| 0.028451
| 0.100901
| 0.141261
| 0.066697
| 0.935298
| 0.927146
| 0.919051
| 0.90993
| 0.907023
| 0.902121
| 0
| 0.035392
| 0.270232
| 30,781
| 947
| 169
| 32.503696
| 0.745537
| 0.003021
| 0
| 0.777108
| 0
| 0
| 0.011603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067771
| false
| 0
| 0.00753
| 0
| 0.118976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50780ea051fda0aaf1679955b3b1de3b0788789a
| 192
|
py
|
Python
|
main.py
|
abdurrahmanabzd/path_planning_demo_live
|
75891ccf83dcb219edeb654f55da2618c6b60a2d
|
[
"MIT"
] | 68
|
2016-12-14T19:53:01.000Z
|
2022-03-05T08:34:24.000Z
|
main.py
|
abdurrahmanabzd/path_planning_demo_live
|
75891ccf83dcb219edeb654f55da2618c6b60a2d
|
[
"MIT"
] | null | null | null |
main.py
|
abdurrahmanabzd/path_planning_demo_live
|
75891ccf83dcb219edeb654f55da2618c6b60a2d
|
[
"MIT"
] | 45
|
2016-12-15T01:51:49.000Z
|
2022-03-05T08:34:17.000Z
|
import process_image
occupied_grids, planned_path = process_image.main("test_images/test_image3.jpg")
print "Occupied Grids : "
print occupied_grids
print "Planned Path :"
print planned_path
| 24
| 80
| 0.817708
| 27
| 192
| 5.518519
| 0.481481
| 0.261745
| 0.241611
| 0.308725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.104167
| 192
| 7
| 81
| 27.428571
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0.303665
| 0.141361
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
0f9f58eec1f0f08d8029bb8e8925ee2009c0b40e
| 6,106
|
py
|
Python
|
tests/Unit/Evolution/Systems/RadiationTransport/M1Grey/BoundaryConditions/DirichletAnalytic.py
|
nilsvu/spectre
|
1455b9a8d7e92db8ad600c66f54795c29c3052ee
|
[
"MIT"
] | 117
|
2017-04-08T22:52:48.000Z
|
2022-03-25T07:23:36.000Z
|
tests/Unit/Evolution/Systems/RadiationTransport/M1Grey/BoundaryConditions/DirichletAnalytic.py
|
GitHimanshuc/spectre
|
4de4033ba36547113293fe4dbdd77591485a4aee
|
[
"MIT"
] | 3,177
|
2017-04-07T21:10:18.000Z
|
2022-03-31T23:55:59.000Z
|
tests/Unit/Evolution/Systems/RadiationTransport/M1Grey/BoundaryConditions/DirichletAnalytic.py
|
geoffrey4444/spectre
|
9350d61830b360e2d5b273fdd176dcc841dbefb0
|
[
"MIT"
] | 85
|
2017-04-07T19:36:13.000Z
|
2022-03-01T10:21:00.000Z
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
import PointwiseFunctions.AnalyticSolutions.RadiationTransport.M1Grey.\
TestFunctions as soln
import Evolution.Systems.RadiationTransport.M1Grey.Fluxes as fluxes
def soln_error(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
return None
_soln_mean_velocity = np.array([0.1, 0.2, 0.3])
_soln_comoving_energy_density = 0.4
# There is no Python implementation of the M1Closure, so for now we hardcode
# the pressure tensor with values obtained by running the C++ code run with
# the same input parameters. This is hacky, because it couples the C++ and
# Python implementations and makes the test less robust.
_tilde_p_values_from_cxx = np.array(
[[0.13953488372093026, 0.012403100775193798, 0.018604651162790694],
[0.012403100775193798, 0.15813953488372096, 0.037209302325581388],
[0.018604651162790694, 0.037209302325581388, 0.18914728682170545]])
def soln_tilde_e_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
return soln.constant_m1_tildeE(coords, time, _soln_mean_velocity,
_soln_comoving_energy_density)
def soln_tilde_e_bar_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
return soln.constant_m1_tildeE(coords, time, _soln_mean_velocity,
_soln_comoving_energy_density)
def soln_tilde_s_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
return soln.constant_m1_tildeS(coords, time, _soln_mean_velocity,
_soln_comoving_energy_density)
def soln_tilde_s_bar_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
return soln.constant_m1_tildeS(coords, time, _soln_mean_velocity,
_soln_comoving_energy_density)
def soln_flux_tilde_e_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
tilde_e = soln_tilde_e_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim)
tilde_s = soln_tilde_s_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim)
tilde_p = _tilde_p_values_from_cxx
lapse = 1.0
shift = np.array([0.0, 0.0, 0.0])
spatial_metric = np.identity(3)
inv_spatial_metric = np.identity(3)
return fluxes.tilde_e_flux(tilde_e, tilde_s, tilde_p, lapse, shift,
spatial_metric, inv_spatial_metric)
def soln_flux_tilde_e_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim):
tilde_e = soln_tilde_e_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords,
time, dim)
tilde_s = soln_tilde_s_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords,
time, dim)
tilde_p = _tilde_p_values_from_cxx
lapse = 1.0
shift = np.array([0.0, 0.0, 0.0])
spatial_metric = np.identity(3)
inv_spatial_metric = np.identity(3)
return fluxes.tilde_e_flux(tilde_e, tilde_s, tilde_p, lapse, shift,
spatial_metric, inv_spatial_metric)
def soln_flux_tilde_s_nue(face_mesh_velocity, outward_directed_normal_covector,
outward_directed_normal_vector, coords, time, dim):
tilde_e = soln_tilde_e_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim)
tilde_s = soln_tilde_s_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim)
tilde_p = _tilde_p_values_from_cxx
lapse = 1.0
shift = np.array([0.0, 0.0, 0.0])
spatial_metric = np.identity(3)
inv_spatial_metric = np.identity(3)
return fluxes.tilde_s_flux(tilde_e, tilde_s, tilde_p, lapse, shift,
spatial_metric, inv_spatial_metric)
def soln_flux_tilde_s_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords, time,
dim):
tilde_e = soln_tilde_e_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords,
time, dim)
tilde_s = soln_tilde_s_bar_nue(face_mesh_velocity,
outward_directed_normal_covector,
outward_directed_normal_vector, coords,
time, dim)
tilde_p = _tilde_p_values_from_cxx
lapse = 1.0
shift = np.array([0.0, 0.0, 0.0])
spatial_metric = np.identity(3)
inv_spatial_metric = np.identity(3)
return fluxes.tilde_s_flux(tilde_e, tilde_s, tilde_p, lapse, shift,
spatial_metric, inv_spatial_metric)
| 46.610687
| 79
| 0.62545
| 710
| 6,106
| 4.926761
| 0.150704
| 0.145798
| 0.204117
| 0.111778
| 0.804746
| 0.79817
| 0.79817
| 0.79817
| 0.79817
| 0.79817
| 0
| 0.053464
| 0.316901
| 6,106
| 130
| 80
| 46.969231
| 0.785183
| 0.055683
| 0
| 0.80198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089109
| false
| 0
| 0.029703
| 0.049505
| 0.207921
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0fec41cfa83dff5ce0e007994c8f2f7333d7bc57
| 986
|
py
|
Python
|
samsungctl/remote_encrypted/keys.py
|
p3g4asus/samsungctl
|
90756acc7ac210e37bbfa23dccf1ca3113e55154
|
[
"MIT"
] | 135
|
2018-11-23T10:49:42.000Z
|
2022-03-20T05:42:40.000Z
|
samsungctl/remote_encrypted/keys.py
|
p3g4asus/samsungctl
|
90756acc7ac210e37bbfa23dccf1ca3113e55154
|
[
"MIT"
] | 114
|
2018-11-25T00:18:31.000Z
|
2022-02-24T02:33:59.000Z
|
samsungctl/remote_encrypted/keys.py
|
p3g4asus/samsungctl
|
90756acc7ac210e37bbfa23dccf1ca3113e55154
|
[
"MIT"
] | 42
|
2018-12-01T19:58:13.000Z
|
2021-11-13T16:35:46.000Z
|
publicKey = (
'2cb12bb2cbf7cec713c0fff7b59ae68a96784ae517f41d259a45d20556177c0ffe951ca60'
'ec03a990c9412619d1bee30adc7773088c5721664cffcedacf6d251cb4b76e2fd7aef09b3'
'ae9f9496ac8d94ed2b262eee37291c8b237e880cc7c021fb1be0881f3d0bffa4234d3b8e6'
'a61530c00473ce169c025f47fcc001d9b8051'
)
privateKey = (
'2fd6334713816fae018cdee4656c5033a8d6b00e8eaea07b3624999242e96247112dcd019'
'c4191f4643c3ce1605002b2e506e7f1d1ef8d9b8044e46d37c0d5263216a87cd783aa1854'
'90436c4a0cb2c524e15bc1bfeae703bcbc4b74a0540202e8d79cadaae85c6f9c218bc1107'
'd1f5b4b9bd87160e782f4e436eeb17485ab4d'
)
wbKey = 'abbb120c09e7114243d1fa0102163b27'
transKey = '6c9474469ddf7578f3e5ad8a4c703d99'
prime = (
'b361eb0ab01c3439f2c16ffda7b05e3e320701ebee3e249123c3586765fd5bf6c1dfa88bb'
'6bb5da3fde74737cd88b6a26c5ca31d81d18e3515533d08df619317063224cf0943a2f29a'
'5fe60c1c31ddf28334ed76a6478a1122fb24c4a94c8711617ddfe90cf02e643cd82d4748d'
'6d4a7ca2f47d88563aa2baf6482e124acd7dd'
)
| 46.952381
| 79
| 0.881339
| 19
| 986
| 45.736842
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.566079
| 0.079108
| 986
| 20
| 80
| 49.3
| 0.390969
| 0
| 0
| 0
| 0
| 0
| 0.843813
| 0.843813
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8d473ea332acc8539ffe71651629e6a0d006f25
| 12,003
|
py
|
Python
|
dynamo/vectorfield/cell_vectors.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
dynamo/vectorfield/cell_vectors.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
dynamo/vectorfield/cell_vectors.py
|
davisidarta/dynamo-release
|
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
|
[
"BSD-3-Clause"
] | null | null | null |
from ..tools.cell_velocities import cell_velocities
from .topography import VectorField
from .vector_calculus import acceleration, curvature
def cell_accelerations(adata,
vf_basis='pca',
basis='umap',
enforce=True,
preserve_len=True,
other_kernels_dict={},
**kwargs):
"""Compute RNA acceleration field via reconstructed vector field and project it to low dimensional embeddings.
In classical physics, including fluidics and aerodynamics, velocity and acceleration vector fields are used as
fundamental tools to describe motion or external force of objects, respectively. In analogy, RNA velocity or
accelerations estimated from single cells can be regarded as samples in the velocity (La Manno et al. 2018) or
acceleration vector field (Gorin, Svensson, and Pachter 2019). In general, a vector field can be defined as a
vector-valued function f that maps any points (or cells’ expression state) x in a domain Ω with D dimension (or the
gene expression system with D transcripts / proteins) to a vector y (for example, the velocity or acceleration for
different genes or proteins), that is f(x) = y.
In two or three dimensions, a vector field is often visualised as a quiver plot where a collection of arrows
with a given magnitude and direction is drawn. For example, the velocity estimates of unspliced transcriptome of
sampled cells projected into two dimensions is drawn to show the prediction of the future cell states in RNA velocity
(La Manno et al. 2018). During the differentiation process, external signal factors perturb cells and thus change
the vector field. Since we perform genome-wide profiling of cell states and the experiments performed are often done
in a short time scale, we assume a constant vector field without loss of generality (See also Discussion). Assuming
an asymptotic deterministic system, the trajectory of the cells travelling in the gene expression space follows the
vector field and can be calculated using numerical integration methods, for example Runge-Kutta algorithm. In two or
three dimensions, a streamline plot can be used to visualize the paths of cells will follow if released in different
regions of the gene expression state space under a steady flow field. Another more intuitive way to visualize the
structure of vector field is the so called line integral convolution method or LIC (Cabral and Leedom 1993), which
works by adding random black-and-white paint sources on the vector field and letting the flowing particle on the
vector field picking up some texture to ensure the same streamline having similar intensity. Although we have not
provides such functionalities in dynamo, with vector field that changes over time, similar methods, for example,
streakline, pathline, timeline, etc. can be used to visualize the evolution of single cell or cell populations.
Arguments
---------
adata: :class:`~anndata.AnnData`
an Annodata object.
vf_basis: 'int' (optional, default `pca`)
The dictionary key that corresponds to the low dimensional embedding where the vector field function
reconstructed.
basis: 'int' (optional, default `umap`)
The dictionary key that corresponds to the reduced dimension in `.obsm` attribute.
enforce: `bool` (default: `False`)
Whether to enforce 1) redefining use_for_transition column in obs attribute;
2) recalculation of transition matrix.
preserve_len: `bool` (default: `True`)
Whether to preserve the length of high dimension vector length. When set to be True, the length of low
dimension projected vector will be proportionally scaled to that of the high dimensional vector. Note that
when preserve_len is set to be True, the acceleration field may seem to be messy (although the magnitude will
be reflected) while the trend of acceleration when `preserve_len` is `True` is more clearer but will lose
information of acceleration magnitude. This is because the acceleration is not directly related to the
distance of cells in the low embedding space; thus the acceleration direction can be better preserved than
the magnitude. On the other hand, velocity is more relevant to the distance in low embedding space, so
preserving magnitude and direction of velocity vector in low dimension can be more easily achieved.
other_kernels_dict: `dict` (default: `{}`)
A dictionary of paramters that will be passed to the cosine/correlation kernel.
Returns
-------
Adata: :class:`~anndata.AnnData`
Returns an updated `~anndata.AnnData` with transition_matrix and projected embedding of high dimension
acceleration vectors in the existing embeddings of current cell state, calculated using either the Itô
kernel method (default) or the diffusion approximation or the method from (La Manno et al. 2018).
"""
if 'velocity_' + vf_basis not in adata.obsm.keys():
cell_velocities(adata, basis=vf_basis)
if 'VecFld_' + vf_basis not in adata.uns_keys():
VectorField(adata, basis=vf_basis)
if 'acceleration_' + vf_basis not in adata.obsm.keys():
acceleration(adata, basis=vf_basis)
X = adata.obsm['X_' + vf_basis]
V = adata.obsm['acceleration_' + vf_basis]
X_embedding = adata.obsm['X_' + basis]
if basis != vf_basis and vf_basis.lower() not in ['umap', 'tsne', 'trimap', 'ddtree', 'diffusion_map']:
cell_velocities(
adata,
X=X,
V=V,
X_embedding=X_embedding,
basis=basis,
enforce=enforce,
key='acceleration',
preserve_len=preserve_len,
other_kernels_dict=other_kernels_dict,
**kwargs
)
def cell_curvatures(adata,
vf_basis='pca',
basis='umap',
enforce=True,
preserve_len=True,
other_kernels_dict={},
**kwargs):
"""Compute RNA curvature field via reconstructed vector field and project it to low dimensional embeddings.
In classical physics, including fluidics and aerodynamics, velocity and acceleration vector fields are used as
fundamental tools to describe motion or external force of objects, respectively. In analogy, RNA velocity or
accelerations estimated from single cells can be regarded as samples in the velocity (La Manno et al. 2018) or
acceleration vector field (Gorin, Svensson, and Pachter 2019). In general, a vector field can be defined as a
vector-valued function f that maps any points (or cells’ expression state) x in a domain Ω with D dimension (or the
gene expression system with D transcripts / proteins) to a vector y (for example, the velocity or acceleration for
different genes or proteins), that is f(x) = y.
In two or three dimensions, a vector field is often visualised as a quiver plot where a collection of arrows
with a given magnitude and direction is drawn. For example, the velocity estimates of unspliced transcriptome of
sampled cells projected into two dimensions is drawn to show the prediction of the future cell states in RNA velocity
(La Manno et al. 2018). During the differentiation process, external signal factors perturb cells and thus change
the vector field. Since we perform genome-wide profiling of cell states and the experiments performed are often done
in a short time scale, we assume a constant vector field without loss of generality (See also Discussion). Assuming
an asymptotic deterministic system, the trajectory of the cells travelling in the gene expression space follows the
vector field and can be calculated using numerical integration methods, for example Runge-Kutta algorithm. In two or
three dimensions, a streamline plot can be used to visualize the paths of cells will follow if released in different
regions of the gene expression state space under a steady flow field. Another more intuitive way to visualize the
structure of vector field is the so called line integral convolution method or LIC (Cabral and Leedom 1993), which
works by adding random black-and-white paint sources on the vector field and letting the flowing particle on the
vector field picking up some texture to ensure the same streamline having similar intensity. Although we have not
provides such functionalities in dynamo, with vector field that changes over time, similar methods, for example,
streakline, pathline, timeline, etc. can be used to visualize the evolution of single cell or cell populations.
Arguments
---------
adata: :class:`~anndata.AnnData`
an Annodata object.
vf_basis: 'int' (optional, default `pca`)
The dictionary key that corresponds to the low dimensional embedding where the vector field function
reconstructed.
basis: 'int' (optional, default `umap`)
The dictionary key that corresponds to the reduced dimension in `.obsm` attribute.
enforce: `bool` (default: `False`)
Whether to enforce 1) redefining use_for_transition column in obs attribute;
2) recalculation of transition matrix.
preserve_len: `bool` (default: `True`)
Whether to preserve the length of high dimension vector length. When set to be True, the length of low
dimension projected vector will be proportionally scaled to that of the high dimensional vector. Note that
when preserve_len is set to be True, the acceleration field may seem to be messy (although the magnitude will
be reflected) while the trend of acceleration when `preserve_len` is `True` is more clearer but will lose
information of acceleration magnitude. This is because the acceleration is not directly related to the
distance of cells in the low embedding space; thus the acceleration direction can be better preserved than
the magnitude. On the other hand, velocity is more relevant to the distance in low embedding space, so
preserving magnitude and direction of velocity vector in low dimension can be more easily achieved.
other_kernels_dict: `dict` (default: `{}`)
A dictionary of paramters that will be passed to the cosine/correlation kernel.
Returns
-------
Adata: :class:`~anndata.AnnData`
Returns an updated `~anndata.AnnData` with transition_matrix and projected embedding of high dimension
curvature vectors in the existing embeddings of current cell state, calculated using either the Itô kernel
method (default) or the diffusion approximation or the method from (La Manno et al. 2018).
"""
if 'velocity_' + vf_basis not in adata.obsm.keys():
cell_velocities(adata, basis=vf_basis)
if 'VecFld_' + vf_basis not in adata.uns_keys():
VectorField(adata, basis=vf_basis)
if 'curvature_' + vf_basis not in adata.obsm.keys():
curvature(adata, basis=vf_basis)
X = adata.obsm['X_' + vf_basis]
V = adata.obsm['curvature_' + vf_basis]
X_embedding = adata.obsm['X_' + basis]
if basis != vf_basis and vf_basis.lower() not in ['umap', 'tsne', 'trimap', 'ddtree', 'diffusion_map']:
cell_velocities(
adata,
X=X,
V=V,
X_embedding=X_embedding,
basis=basis,
enforce=enforce,
key='curvature',
preserve_len=preserve_len,
other_kernels_dict=other_kernels_dict,
**kwargs
)
| 63.507937
| 121
| 0.701241
| 1,637
| 12,003
| 5.087355
| 0.193036
| 0.020173
| 0.016811
| 0.007925
| 0.966138
| 0.966138
| 0.966138
| 0.960134
| 0.960134
| 0.960134
| 0
| 0.004871
| 0.247438
| 12,003
| 188
| 122
| 63.845745
| 0.917082
| 0.766058
| 0
| 0.754098
| 0
| 0
| 0.078604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.04918
| 0
| 0.081967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ce00720e4e77c15f7cbc5a1e8cea2b1bc2c1434
| 717
|
py
|
Python
|
tests/test_render.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | null | null | null |
tests/test_render.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | 1
|
2021-02-14T19:53:08.000Z
|
2021-02-14T19:53:08.000Z
|
tests/test_render.py
|
lejmr/ecsctl
|
688c5d5e10536ee74ded9de187309134a4b7effc
|
[
"MIT"
] | null | null | null |
from ecs.render import render
def test_simple_render():
# task definition
td = {
"version": "latest",
"task_name": "name_{{ var1 }}"
}
# Variable definition
vars = {
"var1": "value1"
}
# The render
r = render(td, vars)
# Asserts
assert r == {"version": "latest", "task_name": "name_value1"}
def test_nested_case1():
# task definition
td = {
"version": "latest",
"task_name": "name_{{ var1 }}"
}
# Variable definition
vars = {
"var1": "{{ var2 }}",
"var2": "value1"
}
# The render
r = render(td, vars)
# Asserts
assert r == {"version": "latest", "task_name": "name_value1"}
| 18.384615
| 65
| 0.516039
| 73
| 717
| 4.90411
| 0.328767
| 0.145251
| 0.189944
| 0.234637
| 0.804469
| 0.804469
| 0.804469
| 0.804469
| 0.804469
| 0.804469
| 0
| 0.02268
| 0.32357
| 717
| 38
| 66
| 18.868421
| 0.715464
| 0.152022
| 0
| 0.545455
| 0
| 0
| 0.290484
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2cf64cde0b37fc71b141d614c266e011a676c9f0
| 174
|
py
|
Python
|
workflow_miniscope/paths.py
|
davidgodinez/workflow-miniscope
|
8b464aec1293d234bb1dada33b2409a1840d4ef4
|
[
"MIT"
] | null | null | null |
workflow_miniscope/paths.py
|
davidgodinez/workflow-miniscope
|
8b464aec1293d234bb1dada33b2409a1840d4ef4
|
[
"MIT"
] | 1
|
2022-03-23T20:05:14.000Z
|
2022-03-23T20:05:14.000Z
|
workflow_miniscope/paths.py
|
davidgodinez/workflow-miniscope
|
8b464aec1293d234bb1dada33b2409a1840d4ef4
|
[
"MIT"
] | null | null | null |
import datajoint as dj
def get_miniscope_root_data_dir():
root_data_dirs = dj.config.get('custom', {}).get('miniscope_root_data_dir', None)
return root_data_dirs
| 19.333333
| 85
| 0.747126
| 27
| 174
| 4.407407
| 0.555556
| 0.268908
| 0.268908
| 0.336134
| 0.386555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143678
| 174
| 8
| 86
| 21.75
| 0.798658
| 0
| 0
| 0
| 0
| 0
| 0.16763
| 0.132948
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d74a82e74637b14b3e67e5e0e9e1a72dc3748f00
| 280
|
py
|
Python
|
badkeys/__init__.py
|
hoheinzollern/badkeys
|
62417cbf4304a20a5af40b5b102438406068f998
|
[
"MIT"
] | null | null | null |
badkeys/__init__.py
|
hoheinzollern/badkeys
|
62417cbf4304a20a5af40b5b102438406068f998
|
[
"MIT"
] | null | null | null |
badkeys/__init__.py
|
hoheinzollern/badkeys
|
62417cbf4304a20a5af40b5b102438406068f998
|
[
"MIT"
] | null | null | null |
__all__ = ["allchecks", "checkrsa", "checkpubkey", "checkprivkey", "checkcrt",
"checkcsr", "checksshpubkey", "detectandcheck"]
from .checks import (allchecks, checkrsa, checkpubkey, checkprivkey, checkcrt,
checkcsr, checksshpubkey, detectandcheck)
| 56
| 78
| 0.682143
| 20
| 280
| 9.35
| 0.6
| 0.181818
| 0.299465
| 0.427807
| 0.898396
| 0.898396
| 0.898396
| 0.898396
| 0
| 0
| 0
| 0
| 0.189286
| 280
| 4
| 79
| 70
| 0.823789
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d75048ea9c48f22312bd338742d0500d12220036
| 186
|
py
|
Python
|
conftest.py
|
vis7/django_pytest_fixture_tutorial
|
e4662aaca6ffebdc6d467e06115c249dc5106665
|
[
"MIT"
] | null | null | null |
conftest.py
|
vis7/django_pytest_fixture_tutorial
|
e4662aaca6ffebdc6d467e06115c249dc5106665
|
[
"MIT"
] | null | null | null |
conftest.py
|
vis7/django_pytest_fixture_tutorial
|
e4662aaca6ffebdc6d467e06115c249dc5106665
|
[
"MIT"
] | null | null | null |
import pytest
from school.models import Student
@pytest.fixture
def student():
return {
"vis": Student.objects.get(pk=1),
"dhruvin": Student.objects.get(pk=2)
}
| 18.6
| 44
| 0.645161
| 24
| 186
| 5
| 0.666667
| 0.233333
| 0.283333
| 0.316667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.22043
| 186
| 9
| 45
| 20.666667
| 0.813793
| 0
| 0
| 0
| 0
| 0
| 0.053763
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.25
| 0.125
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
ad515e129f08e0ba4fb5044846855696728ec596
| 80
|
py
|
Python
|
packages/mccomponents/python/mccomponents/detector/elements/units.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5
|
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mccomponents/python/mccomponents/detector/units.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293
|
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mccomponents/python/mccomponents/detector/units.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
from pyre.units import length, time, pressure, energy
from pyre.units import *
| 20
| 53
| 0.775
| 12
| 80
| 5.166667
| 0.666667
| 0.258065
| 0.419355
| 0.612903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 3
| 54
| 26.666667
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ad552933648aaf42eaae64b7276f804a0832a408
| 903
|
py
|
Python
|
src/nets/inception.py
|
xiaocn/ImageClassifer
|
3075150aa7ef547333729dcff5876147682c6694
|
[
"Apache-2.0"
] | 2
|
2018-11-09T10:01:23.000Z
|
2018-12-18T04:30:57.000Z
|
src/nets/inception.py
|
xiaocn/ImageClassifer
|
3075150aa7ef547333729dcff5876147682c6694
|
[
"Apache-2.0"
] | null | null | null |
src/nets/inception.py
|
xiaocn/ImageClassifer
|
3075150aa7ef547333729dcff5876147682c6694
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from nets.inception_resnet_v2 import inception_resnet_v2
from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
from nets.inception_resnet_v2 import inception_resnet_v2_base
from nets.inception_v1 import inception_v1
from nets.inception_v1 import inception_v1_arg_scope
from nets.inception_v1 import inception_v1_base
from nets.inception_v2 import inception_v2
from nets.inception_v2 import inception_v2_arg_scope
from nets.inception_v2 import inception_v2_base
from nets.inception_v3 import inception_v3
from nets.inception_v3 import inception_v3_arg_scope
from nets.inception_v3 import inception_v3_base
from nets.inception_v4 import inception_v4
from nets.inception_v4 import inception_v4_arg_scope
from nets.inception_v4 import inception_v4_base
| 45.15
| 66
| 0.895903
| 144
| 903
| 5.166667
| 0.138889
| 0.16129
| 0.342742
| 0.107527
| 0.83871
| 0.795699
| 0.774194
| 0.193548
| 0.193548
| 0
| 0
| 0.036232
| 0.083056
| 903
| 20
| 67
| 45.15
| 0.862319
| 0.032115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.055556
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ad739290227bd708d70ed07eb1a73a08038e0df3
| 3,925
|
py
|
Python
|
demography/management/commands/bake/aggregate/_state.py
|
The-Politico/politico-civic-demography
|
080bb964b64b06db7fd04386530e893ceed1cf98
|
[
"MIT"
] | null | null | null |
demography/management/commands/bake/aggregate/_state.py
|
The-Politico/politico-civic-demography
|
080bb964b64b06db7fd04386530e893ceed1cf98
|
[
"MIT"
] | null | null | null |
demography/management/commands/bake/aggregate/_state.py
|
The-Politico/politico-civic-demography
|
080bb964b64b06db7fd04386530e893ceed1cf98
|
[
"MIT"
] | null | null | null |
from tqdm import tqdm
from geography.models import Division
class AggregateState(object):
def aggregate_state_estimates_by_county(self, parent):
"""
Aggregates county-level estimates for each table within a given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/counties.json
"""
data = {}
for division in tqdm(
Division.objects.filter(level=self.COUNTY_LEVEL, parent=parent)
):
fips = division.code
id = division.id
aggregated_labels = [] # Keep track of already agg'ed variables
for estimate in division.census_estimates.all():
series = estimate.variable.table.series
year = estimate.variable.table.year
table = estimate.variable.table.code
label = None
if estimate.variable.label:
label = estimate.variable.label.label
table_label = "{}{}".format(table, label)
code = estimate.variable.code
if series not in data:
data[series] = {}
if year not in data[series]:
data[series][year] = {}
if table not in data[series][year]:
data[series][year][table] = {}
if fips not in data[series][year][table]:
data[series][year][table][fips] = {}
if label is not None:
if table_label not in aggregated_labels:
aggregated_labels.append(table_label)
data[series][year][table][fips][
label
] = self.aggregate_variable(estimate, id)
else:
data[series][year][table][division.code][
code
] = estimate.estimate
return data
def aggregate_state_estimates_by_district(self, state):
"""
Aggregates district-level estimates for each table within a
given state.
Creates data structure designed for an export in this format:
...{series}/{year}/{table}/{state_fips}/districts.json
"""
data = {}
for division in tqdm(
Division.objects.filter(level=self.DISTRICT_LEVEL, parent=state)
):
fips = division.code
id = division.id
aggregated_labels = [] # Keep track of already agg'ed variables
for estimate in division.census_estimates.all():
series = estimate.variable.table.series
year = estimate.variable.table.year
table = estimate.variable.table.code
label = None
if estimate.variable.label:
label = estimate.variable.label.label
table_label = "{}{}".format(table, label)
code = estimate.variable.code
if series not in data:
data[series] = {}
if year not in data[series]:
data[series][year] = {}
if table not in data[series][year]:
data[series][year][table] = {}
if fips not in data[series][year][table]:
data[series][year][table][fips] = {}
if label is not None:
if table_label not in aggregated_labels:
aggregated_labels.append(table_label)
data[series][year][table][fips][
label
] = self.aggregate_variable(estimate, id)
else:
data[series][year][table][division.code][
code
] = estimate.estimate
return data
| 40.463918
| 78
| 0.511338
| 386
| 3,925
| 5.126943
| 0.170984
| 0.090955
| 0.09904
| 0.096008
| 0.895402
| 0.867105
| 0.867105
| 0.867105
| 0.867105
| 0.867105
| 0
| 0
| 0.39949
| 3,925
| 96
| 79
| 40.885417
| 0.839627
| 0.116433
| 0
| 0.906667
| 0
| 0
| 0.002367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.026667
| 0
| 0.093333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad8968f9ae5c9c7463e3fe40e8336f13b82d87eb
| 104,352
|
py
|
Python
|
tests/test_scenarios_synthetic.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scenarios_synthetic.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scenarios_synthetic.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
try:
from tests.test_util import print_objects
except:
from test_util import print_objects
try:
from tests.libs_for_tests import prepare_yamllist_for_diff
except:
from libs_for_tests import prepare_yamllist_for_diff
from kalc.model.search import Check_services, Check_deployments, Check_daemonsets, OptimisticRun, CheckNodeOutage, Check_node_outage_and_service_restart
from kalc.model.system.Scheduler import Scheduler
from kalc.model.system.globals import GlobalVar
from kalc.model.kinds.Service import Service
from kalc.model.kinds.Node import Node
from kalc.model.kinds.Pod import Pod
from kalc.model.kinds.Deployment import Deployment
from kalc.model.kinds.DaemonSet import DaemonSet
from kalc.model.kinds.PriorityClass import PriorityClass
from kalc.model.kubernetes import KubernetesCluster
from kalc.misc.const import *
import pytest
from kalc.model.search import K8ServiceInterruptSearch
from kalc.misc.object_factory import labelFactory
from click.testing import CliRunner
from kalc.model.scenario import Scenario
from poodle import planned
try:
from tests.libs_for_tests import convert_space_to_yaml,print_objects_from_yaml,print_plan,load_yaml, print_objects_compare, checks_assert_conditions, reload_cluster_from_yaml, checks_assert_conditions_in_one_mode
except:
from libs_for_tests import convert_space_to_yaml,print_objects_from_yaml,print_plan,load_yaml, print_objects_compare, checks_assert_conditions, reload_cluster_from_yaml, checks_assert_conditions_in_one_mode
DEBUG_MODE = 0 # 0 - no debug, 1- debug with yaml load , 2 - debug without yaml load
def build_running_pod(podName, cpuRequest, memRequest, atNode):
pod_running_1 = Pod()
pod_running_1.metadata_name = "pod"+str(podName)
pod_running_1.cpuRequest = cpuRequest
pod_running_1.memRequest = memRequest
pod_running_1.atNode = atNode
pod_running_1.status = STATUS_POD["Running"]
pod_running_1.hasDeployment = False
pod_running_1.hasService = False
pod_running_1.hasDaemonset = False
return pod_running_1
def build_running_pod_with_d(podName, cpuRequest, memRequest, atNode, d, ds):
pod_running_1 = Pod()
pod_running_1.metadata_name = "pod"+str(podName)
pod_running_1.cpuRequest = cpuRequest
pod_running_1.memRequest = memRequest
pod_running_1.atNode = atNode
pod_running_1.status = STATUS_POD["Running"]
pod_running_1.hasDeployment = False
pod_running_1.hasService = False
pod_running_1.hasDaemonset = False
if d is not None:
d.podList.add(pod_running_1)
d.amountOfActivePods += 1
pod_running_1.hasDeployment = True
if ds is not None:
ds.podList.add(pod_running_1)
ds.amountOfActivePods += 1
pod_running_1.hasDaemonset = True
atNode.currentFormalCpuConsumption += cpuRequest
atNode.currentFormalMemConsumption += memRequest
return pod_running_1
def build_pending_pod(podName, cpuRequest, memRequest, toNode):
p = build_running_pod(podName, cpuRequest, memRequest, Node.NODE_NULL)
p.status = STATUS_POD["Pending"]
p.toNode = toNode
p.hasDeployment = False
p.hasService = False
p.hasDaemonset = False
return p
def build_pending_pod_with_d(podName, cpuRequest, memRequest, toNode, d, ds):
p = Pod()
p.metadata_name = "pod"+str(podName)
p.cpuRequest = cpuRequest
p.memRequest = memRequest
p.status = STATUS_POD["Pending"]
p.hasDeployment = False
p.hasService = False
p.hasDaemonset = False
if d is not None:
d.podList.add(p)
p.hasDeployment = True
if ds is not None:
ds.podList.add(p)
p.hasDaemonset = True
p.toNode = toNode
return p
def prepare_test_0_run_pods_no_eviction():
# print("0")
# TODO: extract final status for loader unit tests from here
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.currentFormalCpuConsumption = 0
n.currentFormalMemConsumption = 0
# priority - as needed
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Pedning pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
# Create a "holding" controller - optional
ds = DaemonSet()
ds.podList.add(pod_pending_1)
ds.amountOfActivePods = 0
pod_pending_1.hasDaemonset = True
k.state_objects.extend([n, pc, ds])
create_objects = [pod_pending_1]
k2 = reload_cluster_from_yaml(k,create_objects)
k.state_objects.extend(create_objects)
k._build_state()
return k, k2
@pytest.mark.debug(reason="this test is for debug perspective")
def test_0_run_pods_no_eviction():
k, k2 = prepare_test_0_run_pods_no_eviction()
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class Task_Check_services(Check_services):
goal = lambda self: pod_pending_1.status == STATUS_POD["Running"]
class Task_Check_deployments(Check_deployments):
goal = lambda self: pod_pending_1.status == STATUS_POD["Running"]
p = Task_Check_services(k.state_objects)
p.run(timeout=200)
assert "StartPod" in "\n".join([repr(x) for x in p.plan])
p = Task_Check_deployments(k.state_objects)
p.run(timeout=200)
assert "StartPod" in "\n".join([repr(x) for x in p.plan])
def test_0_run_pods_no_eviction_invload():
k, k2 = prepare_test_0_run_pods_no_eviction()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_services):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_services):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["StartPod"]
not_assert_conditions = ["Evict"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def construct_scpace_for_test_1_run_pods_with_eviction():
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1])
k._build_state()
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
return k, k2
def test_1_run_pods_with_eviction_invload():
k, k2 = construct_scpace_for_test_1_run_pods_with_eviction()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_services):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_services):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["StartPod","Evict"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def construct_scpace_for_test_2_synthetic_service_outage():
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
n.searchable = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 1
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
s.podList.add(pod_running_1)
pod_running_1.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
return k, k2
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step1():
# print("2-1")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal(OptimisticRun):
# pass
# goal = lambda self: pod_pending_1.status == STATUS_POD["Running"]
goal = lambda self: pod_running_1.status == STATUS_POD["Killing"]
p = NewGoal(k.state_objects)
p.run(timeout=200)
# for a in p.plan:
# print(a)
# print_objects(k.state_objects)
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step2():
# print("2-2")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
# print_objects(k.state_objects)
class NewGoal(OptimisticRun):
# pass
# goal = lambda self: pod_pending_1.status == STATUS_POD["Running"]
goal = lambda self: pod_running_1.status == STATUS_POD["Pending"]
p = NewGoal(k.state_objects)
p.run(timeout=200)
# for a in p.plan:
# print(a)
# print_objects(k.state_objects)
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert "KillPod" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step3():
# print("2-3")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
# print_objects(k.state_objects)
class NewGoal(OptimisticRun):
# pass
goal = lambda self: pod_pending_1.status == STATUS_POD["Running"] and\
pod_running_1.status == STATUS_POD["Pending"]
p = NewGoal(k.state_objects)
p.run(timeout=200)
# for a in p.plan:
# print(a)
# print_objects(k.state_objects)
assert "StartPod" in "\n".join([repr(x) for x in p.plan])
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step4():
# print("2-4")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# print_objects(k.state_objects)
class NewGoal(OptimisticRun):
goal = lambda self: pod_pending_1.status == STATUS_POD["Running"] and\
pod_running_1.status == STATUS_POD["Pending"] and\
scheduler.queueLength == 1
p = NewGoal(k.state_objects)
p.run(timeout=400)
# for a in p.plan:
# print(a)
# print_objects(k.state_objects)
assert "StartPod" in "\n".join([repr(x) for x in p.plan])
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step5():
# print("2-5")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# print_objects(k.state_objects)
class NewGoal(OptimisticRun):
goal = lambda self: pod_pending_1.status == STATUS_POD["Running"] and\
pod_running_1.status == STATUS_POD["Pending"] and\
scheduler.status == STATUS_SCHED["Clean"]
p = NewGoal(k.state_objects)
p.run(timeout=400)
# for a in p.plan:
# print(a)
# print_objects(k.state_objects)
assert "StartPod" in "\n".join([repr(x) for x in p.plan])
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
# assert "MarkServiceOutageEvent" in "\n".join([repr(x) for x in p.plan])
#TODO PASS wioth Scheduler_cant_place_pod cost=30000
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step6_noNodeSelected():
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
n.searchable = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 1
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
s.podList.add(pod_running_1)
pod_running_1.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,Node.NODE_NULL)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s])
"this test was needed when debugging invloads"
# print("2-6")
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# print_objects(k.state_objects)
class test_2_synthetic_service_outage_step6_noNodeSelected_Task_Check_services(Check_services):
goal = lambda self: globalVar.is_service_disrupted == True
p = test_2_synthetic_service_outage_step6_noNodeSelected_Task_Check_services(k.state_objects)
p.run(timeout=200)
# print_plan(p)
assert "SelectNode" in "\n".join([repr(x) for x in p.plan]) # StartPod not necessarily happens
assert "StartPod" in "\n".join([repr(x) for x in p.plan]) # StartPod not necessarily happens
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert "MarkServiceOutageEvent" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
@pytest.mark.debug(reason="if debug needed - uncomment me")
def test_2_synthetic_service_outage_step6():
# print("2-6")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# print_objects(k.state_objects)
class Task_Check_services(Check_services):
goal = lambda self: globalVar.is_service_disrupted == True
class Task_Check_daemonsets(Check_daemonsets):
goal = lambda self: globalVar.is_daemonset_disrupted == True
p = Task_Check_services(k.state_objects)
p.run(timeout=200)
# print_plan(p)
# assert "StartPod" in "\n".join([repr(x) for x in p.plan]) # StartPod not necessarily happens
assert "Evict" in "\n".join([repr(x) for x in p.plan])
assert "MarkServiceOutageEvent" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
def test_2_synthetic_service_outage_invload():
# print("2-6")
k, k2 =construct_scpace_for_test_2_synthetic_service_outage()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
node_1 = next(filter(lambda x: isinstance(x, Node), k.state_objects))
node_1.searchable = False
node_2 = next(filter(lambda x: isinstance(x, Node), k2.state_objects))
node_2.searchable = False
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_services):
goal = lambda self: globalVar_k1.is_service_disrupted == True
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_services):
goal = lambda self: globalVar_k2.is_service_disrupted == True
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = [
# "StartPod",
"Evict","MarkServiceOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def construct_multi_pods_eviction_problem():
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
n.amountOfActivePods = 2
n.searchable = False
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
# TODO: no outage detected if res is not 4
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,4,4,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
# print_objects(k.state_objects)
k._build_state()
return k, k2, pod_pending_1
def test_3_synthetic_service_outage_multi_invload():
# print("3")
"Multiple pods are evicted from one service to cause outage"
k, k2, pod_pending_1 = construct_multi_pods_eviction_problem()
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
node_k1 = next(filter(lambda x: isinstance(x, Node), k.state_objects))
node_k2 = next(filter(lambda x: isinstance(x, Node), k2.state_objects))
node_k1.searchable = False
node_k2.searchable = False
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_services):
goal = lambda self: globalVar_k1.is_service_disrupted == True
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_services):
goal = lambda self: globalVar_k2.is_service_disrupted == True
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkServiceOutageEvent"]
not_assert_conditions = []
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def prepare_test_4_synthetic_service_NO_outage_multi():
# print("4")
"No outage is caused by evicting only one pod of a multi-pod service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
# TODO: no outage detected if res is not 4
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
return k,k2
def test_4_synthetic_service_NO_outage_multi():
k, k2 = prepare_test_4_synthetic_service_NO_outage_multi()
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_services):
goal = lambda self: globalVar_k1.goal_achieved == True
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_services):
goal = lambda self: globalVar_k2.goal_achieved == True
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["SchedulerQueueClean"]
not_assert_conditions = []
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
@pytest.mark.debug()
def test_synthetic_service_NO_outage_deployment_IS_outage_step_1():
"Deployment (partial) outage must be registered in case where Deployment exists"
# Initialize scheduler, globalvar
# kalc.misc.util.CPU_DIVISOR=40
# kalc.misc.util.MEM_DIVISOR=125
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
# TODO: no outage detected if res is not 4
pod_running_1.targetService = s
pod_running_2.targetService = s
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
d = Deployment()
d.spec_replicas = 2
d.amountOfActivePods = 2
pod_running_1.hasDeployment = True
pod_running_2.hasDeployment = True
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s,d])
# kalc.misc.util.CPU_DIVISOR=40
# kalc.misc.util.MEM_DIVISOR=125
yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
k2 = KubernetesCluster()
load_yaml(yamlState,k2)
k._build_state()
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class test_synthetic_service_NO_outage_deployment_IS_outage_k1(Check_deployments):
goal = lambda self: pod_running_1.status == STATUS_POD["Pending"]
p = test_synthetic_service_NO_outage_deployment_IS_outage_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
pods = filter(lambda x: isinstance(x, Pod), k2.state_objects)
pod_k2 = next(filter(lambda x: x.metadata_name._get_value() == "pod1", pods))
class test_synthetic_service_NO_outage_deployment_IS_outage_k2(Check_deployments):
goal = lambda self: pod_k2.status == STATUS_POD["Pending"]
p2 = test_synthetic_service_NO_outage_deployment_IS_outage_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined"]
not_assert_conditions = []
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_synthetic_service_NO_outage_deployment_IS_outage():
"Deployment (partial) outage must be registered in case where Deployment exists"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has only one pod so it can detect outage
# (we can't evict all pods here with one)
# TODO: no outage detected if res is not 4
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
d = Deployment()
d.spec_replicas = 2
d.amountOfActivePods = 2
pod_running_1.hasDeployment = True
pod_running_2.hasDeployment = True
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1,s,d])
yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
k2 = KubernetesCluster()
load_yaml(yamlState,k2)
k._build_state()
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
class NewGoal_k1(Check_deployments):
goal = lambda self: globalVar_k1.is_deployment_disrupted == True
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k2(Check_deployments):
goal = lambda self: globalVar_k2.is_deployment_disrupted == True
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkDeploymentOutageEvent"]
not_assert_conditions = []
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_5_evict_and_killpod_deployment_without_service():
# print("5")
"Test that killPod works for deployment"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,d,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n,d,None)
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1, d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNotNUll_Service_isNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_6_evict_and_killpod_without_deployment_without_service():
# print("6")
"Test that killPod works without either deployment or service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# create Deploymnent that we're going to detect failure of...
# d = Deployment()
# d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n,None,None)
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# @pytest.mark.skip(reason="FIXME")
def test_7_evict_and_killpod_with_deployment_and_service():
# print("7")
"Test that killPod works for deployment with service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,d,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n,d,None)
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has multiple pods but we are detecting pods pending issue
# remove service as we are detecting service outage by a bug above
s.podList.add(pod_running_1)
pod_running_1.hasService = True
s.podList.add(pod_running_2)
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1, d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNotNUll_Service_isNotNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_8_evict_and_killpod_with_daemonset_without_service():
# print("8")
"Test that killPod works with daemonset"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# create Deploymnent that we're going to detect failure of...
ds = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,ds)
pod_running_2 = build_running_pod_with_d(2,2,2,n,None,ds)
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
# s.amountOfActivePods = 2
# s.status = STATUS_SERV["Started"]
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_9_evict_and_killpod_with_daemonset_with_service():
# print("9")
"Test that killPod works with daemonset and service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# create Deploymnent that we're going to detect failure of...
ds = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,ds)
pod_running_2 = build_running_pod_with_d(2,2,2,n,None,ds)
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# our service has multiple pods but we are detecting pods pending issue
# remove service as we are detecting service outage by a bug above
s.podList.add(pod_running_1)
pod_running_1.hasService = True
s.podList.add(pod_running_2)
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNotNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_10_startpod_without_deployment_without_service():
# print("10")
"Test that StartPod works without daemonset/deployment and service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
ds = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,ds)
n.amountOfActivePods = 1
# Service
s = Service()
s.podList.add(pod_running_1)
pod_running_1.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,None,None)
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pod_running_1, s, pod_pending_1,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["SelectNode",\
"StartPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_11_startpod_without_deployment_with_service():
# print("11")
"Test that StartPod works without daemonset/deployment but with service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
ds = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,ds)
n.amountOfActivePods = 1
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,None,None)
# Service
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s.status = STATUS_SERV["Pending"]
s.podList.add(pod_pending_1)
pod_pending_1.hasService = True
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pod_running_1, s, pod_pending_1,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["SelectNode",\
"StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_12_startpod_with_deployment_with_service():
# print("12")
"Test that StartPod works with deployment and service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
d = Deployment()
d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,d,None)
n.amountOfActivePods = 1
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,d,None)
# Service
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s.status = STATUS_SERV["Pending"]
s.podList.add(pod_running_1)
pod_running_1.hasService = True
s.podList.add(pod_pending_1)
pod_pending_1.hasService = True
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pod_running_1, s, pod_pending_1,d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["SelectNode",\
"StartPod_IF_Deployment_isNotNUll_Service_isNotNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_13_startpod_with_daemonset_without_service():
# print("13")
"Test that StartPod works with daemonset and without service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
d = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,d)
n.amountOfActivePods = 1
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,None,d)
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pod_running_1, pod_pending_1,d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["StartPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_14_startpod_with_daemonset_with_service():
# print("14")
"Test that StartPod works with daemonset and service"
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
d = DaemonSet()
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,None,d)
n.amountOfActivePods = 1
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,None,d)
# Service
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s.status = STATUS_SERV["Pending"]
s.podList.add(pod_pending_1)
pod_running_1.hasService = True
s.podList.add(pod_pending_1)
pod_pending_1.hasService = True
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pod_running_1, s, pod_pending_1,d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_pending_1_1.status == STATUS_POD["Running"]
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["StartPod_IF_Deployment_isNUll_Service_isNotNull_Daemonset_isNotNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# @pytest.mark.skip(reason="FIXME")
def test_15_has_deployment_creates_daemonset__pods_evicted_pods_pending_synthetic():
# print("15")
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,d,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n,d,None)
n.amountOfActivePods = 2
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# Create Daemonset for prioritized pod
ds = DaemonSet()
# Pending pod
pod_pending_1 = build_pending_pod_with_d(3,2,2,n,None,ds)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, pod_pending_1, d,s,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
print_objects(k.state_objects)
print_objects_from_yaml(k)
print("----")
print_objects(k2.state_objects)
print_objects_from_yaml(k2)
globalVar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
# class NewGoal_step1_k1(Check_deployments):
# goal = lambda self: globalVar_k1.goal_achieved == True and\
# pod_pending_1_1.status == STATUS_POD["Running"]
class NewGoal_k1(Check_deployments):
pass
p = NewGoal_k1(k.state_objects)
globalVar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(Check_deployments):
pass
# class NewGoal_step1_k2(Check_deployments):
# goal = lambda self: globalVar_k2.goal_achieved == True and\
# pod_pending_1_2.status == STATUS_POD["Running"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict",
"MarkDeploymentOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_16_creates_deployment_but_insufficient_resource__pods_pending_synthetic():
# print("16")
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
d.amountOfActivePods = 2
d.spec_replicas = 2
dnew = Deployment()
dnew.podList.add(pod_pending_1)
dnew.amountOfActivePods = 0
dnew.spec_replicas = 1
k.state_objects.extend([n, pod_running_1, pod_running_2, d])
create_objects = [dnew]
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
# pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(Check_deployments):
pass
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(Check_deployments):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkDeploymentOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_17_creates_service_and_deployment_insufficient_resource__service_outage():
# print("17")
# Initialize scheduler, globalVar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.searchable = False
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# priority for pod-to-evict
# pc = PriorityClass()
# pc.priority = 10
# pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
s.searchable = True
# our service has multiple pods but we are detecting pods pending issue
# remove service as we are detecting service outage by a bug above
s.podList.add(pod_running_1)
s.podList.add(pod_running_1)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
# pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
d.amountOfActivePods = 2
d.spec_replicas = 2
pod_running_1.hasDeployment = True
pod_running_2.hasDeployment = True
dnew = Deployment()
dnew.podList.add(pod_pending_1)
dnew.amountOfActivePods = 0
dnew.spec_replicas = 1
pod_pending_1.hasDeployment = True
snew = Service()
snew.metadata_name = "test-service-new"
snew.amountOfActivePods = 0
snew.status = STATUS_SERV["Pending"]
snew.podList.add(pod_pending_1)
pod_pending_1.hasService = True
snew.searchable = True
k.state_objects.extend([n, s, pod_running_1, pod_running_2, d])
create_objects = [snew,dnew]
k2 = reload_cluster_from_yaml(k,create_objects)
k.state_objects.extend(create_objects)
k.state_objects.extend([pod_pending_1])
k._build_state()
k2._build_state()
n_k2 = next(filter(lambda x: isinstance(x, Node), k2.state_objects))
n_k2.searchable = False
k._build_state()
class NewGoal_k1(Check_services):
pass
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(Check_services):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkServiceOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_17_creates_service_and_deployment_insufficient_resource__service_outage_invtest():
# print("17")
# Initialize scheduler, globalVar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# priority for pod-to-evict
# pc = PriorityClass()
# pc.priority = 10
# pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
s.searchable = True
# our service has multiple pods but we are detecting pods pending issue
# remove service as we are detecting service outage by a bug above
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
# pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
d.amountOfActivePods = 2
d.spec_replicas = 2
pod_running_1.hasDeployment = True
pod_running_2.hasDeployment = True
dnew = Deployment()
dnew.metadata_name = "new-deploymt"
dnew.podList.add(pod_pending_1)
dnew.amountOfActivePods = 0
dnew.spec_replicas = 1
pod_pending_1.hasDeployment = True
snew = Service()
snew.metadata_name = "test-service-new"
snew.amountOfActivePods = 0
snew.status = STATUS_SERV["Pending"]
snew.podList.add(pod_pending_1)
pod_pending_1.hasService = True
snew.searchable = True
k.state_objects.extend([n, s, pod_running_1, pod_running_2, d])
yamlState = convert_space_to_yaml(k.state_objects, wrap_items=True)
create_objects = [snew, dnew]
yamlCreate = convert_space_to_yaml(create_objects, wrap_items=False, load_logic_support=False)
k2 = KubernetesCluster()
for y in yamlState:
# print(y)
k2.load(y)
for y in yamlCreate:
k2.load(y, mode=KubernetesCluster.CREATE_MODE)
k2._build_state()
k._build_state()
globalVar = k2.state_objects[1]
scheduler = k2.state_objects[0]
# print_objects(k.state_objects)
class NewGoal(Check_services):
# pass
goal = lambda self: globalVar.is_service_disrupted == True and \
scheduler.status == STATUS_SCHED["Clean"]
p = NewGoal(k2.state_objects)
p.run(timeout=200)
for a in p.plan:
print(a)
assert "MarkServiceOutageEvent" in "\n".join([repr(x) for x in p.plan])
assert not "NodeOutageFinished" in "\n".join([repr(x) for x in p.plan])
def test_17_2_creates_service_and_deployment_insufficient_resource__two_service_outage():
# print("17")
# Initialize scheduler, globalVar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
# Create running pods
pod_running_1 = build_running_pod(1,2,2,n)
pod_running_2 = build_running_pod(2,2,2,n)
n.amountOfActivePods = 2
## Set consumptoin as expected
n.currentFormalCpuConsumption = 4
n.currentFormalMemConsumption = 4
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
s.searchable = True
# our service has multiple pods but we are detecting pods pending issue
# remove service as we are detecting service outage by a bug above
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
pod_running_1.hasService = True
pod_running_2.hasService = True
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_2 = build_pending_pod(4,2,2,n)
# pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.podQueue.add(pod_pending_2)
scheduler.queueLength += 2
scheduler.status = STATUS_SCHED["Changed"]
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.podList.add(pod_running_1)
d.podList.add(pod_running_2)
d.amountOfActivePods = 2
d.spec_replicas = 2
pod_running_1.hasDeployment = True
pod_running_2.hasDeployment = True
dnew = Deployment()
dnew.podList.add(pod_pending_1)
dnew.amountOfActivePods = 0
dnew.spec_replicas = 1
pod_pending_1.hasDeployment = True
snew = Service()
snew.metadata_name = "test-service-new"
snew.amountOfActivePods = 0
snew.status = STATUS_SERV["Pending"]
snew.podList.add(pod_pending_1)
pod_pending_1.hasService = True
snew.searchable = True
snew2 = Service()
snew2.metadata_name = "test-service-new"
snew2.amountOfActivePods = 0
snew2.status = STATUS_SERV["Pending"]
snew2.podList.add(pod_pending_2)
pod_pending_2.hasService = True
snew2.searchable = True
k.state_objects.extend([n, s, snew, snew2, pod_running_1, pod_running_2, pod_pending_1, pod_pending_2, d, dnew])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_pending_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k.state_objects))
class NewGoal_k1(Check_services):
pass
p = NewGoal_k1(k.state_objects)
pod_pending_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Pending"], k2.state_objects))
class NewGoal_k2(Check_services):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkDeploymentOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_19_has_deployment_creates_deployment__pods_evicted_pods_pending_test_snackable_branch():
# print("19")
k = KubernetesCluster()
prios = {}
pch = PriorityClass()
k.state_objects.append(pch)
pch.priority = 10
pch.metadata_name = "high-prio-test"
prios["high"] = pch
pcl = PriorityClass()
k.state_objects.append(pcl)
pcl.priority = 1
pcl.metadata_name = "low-prio-test"
prios["low"] = pcl
pods = []
node = Node()
k.state_objects.append(node)
node.memCapacity = 3
node.cpuCapacity = 3
d_was = Deployment()
k.state_objects.append(d_was)
d_was.metadata_name = "d_was"
d_was.priorityClass = prios["low"]
d_was.spec_template_spec_priorityClassName = prios["low"].metadata_name
d_was.amountOfActivePods = 2
d_was.spec_replicas = 2
for i in range(2):
pod = Pod()
k.state_objects.append(pod)
pod.metadata_name = "pod_number_" + str(i)
pod.memRequest = 1
pod.cpuRequest = 1
pod.status = STATUS_POD["Running"]
pod.priorityClass = prios["low"]
pod.spec_priorityClassName = prios["low"].metadata_name
pod.hasDeployment = True
pods.append(pod)
node.amountOfActivePods += 1
node.currentFormalMemConsumption += pod.memRequest
node.currentFormalCpuConsumption += pod.cpuRequest
d_was.podList.add(pod)
d_new = Deployment()
d_new.metadata_name = "d_new"
d_new.spec_replicas = 2
d_new.priorityClass = prios["high"]
d_new.spec_template_spec_priorityClassName = prios["high"].metadata_name
d_new.memRequest = 1
d_new.cpuRequest = 1
d_new.hook_after_create(k.state_objects)
k.state_objects.append(d_new)
pod_pending_count = 0
pPod = []
for pod in filter(lambda x: isinstance(x, Pod), k.state_objects):
if "pod_number_" in pod.metadata_name._get_value():
assert pod.status._get_value() == "Running", "pod_number_X pods should be Running before planning but have {0} status".format(pod.status._get_value())
if pod.status._get_value() == "Pending":
pod_pending_count += 1
pPod.append(pod)
assert pod_pending_count == 2, "should be 2 pod in pending have only {0}".format(pod_pending_count)
class TestRun(K8ServiceInterruptSearch):
goal = lambda self: self.scheduler.status == STATUS_SCHED["Clean"] and pPod[0].status == STATUS_POD["Running"]and pPod[1].status == STATUS_POD["Running"]
p = TestRun(k.state_objects)
# print_objects(k.state_objects)
# p.run()
# print("scenario \n{0}".format(p.plan))
p.xrun()
# print("---after calculation ----")
# print_objects(k.state_objects)
assert d_new.amountOfActivePods == 2
assert d_was.amountOfActivePods == 1
assert node.amountOfActivePods == 3
for pod in filter(lambda x: isinstance(x, Pod), k.state_objects):
if "d_new" in pod.metadata_name._get_value():
assert pod.status._get_value() == "Running", "{1} pods should be Running after planning but have {0} status".format(pod.status._get_value(),pod.metadata_name._get_value() )
@pytest.mark.skip(reason="This test case is broken see #109")
def test_20_scheduller_counter_bug():
# print("20")
k = KubernetesCluster()
prios = {}
pch = PriorityClass()
k.state_objects.append(pch)
pch.priority = 10
pch.metadata_name = "high-prio-test"
prios["high"] = pch
pcl = PriorityClass()
k.state_objects.append(pcl)
pcl.priority = 1
pcl.metadata_name = "low-prio-test"
prios["low"] = pcl
pods = []
node = Node()
k.state_objects.append(node)
node.memCapacity = 3
node.cpuCapacity = 3
d_was = Deployment()
k.state_objects.append(d_was)
d_was.metadata_name = "d_was"
d_was.priorityClass = prios["low"]
d_was.spec_template_spec_priorityClassName = prios["low"].metadata_name
d_was.amountOfActivePods = 2
d_was.spec_replicas = 2
for i in range(2):
pod = Pod()
k.state_objects.append(pod)
pod.metadata_name = "pod_number_" + str(i)
pod.memRequest = 1
pod.cpuRequest = 1
pod.status = STATUS_POD["Running"]
pod.priorityClass = prios["low"]
pod.spec_priorityClassName = prios["low"].metadata_name
pod.hasDeployment = True
pods.append(pod)
node.amountOfActivePods += 1
node.currentFormalMemConsumption += pod.memRequest
node.currentFormalCpuConsumption += pod.cpuRequest
d_was.podList.add(pod)
d_new = Deployment()
d_new.metadata_name = "d_new"
d_new.spec_replicas = 2
d_new.priorityClass = prios["high"]
d_new.spec_template_spec_priorityClassName = prios["high"].metadata_name
d_new.memRequest = 1
d_new.cpuRequest = 1
d_new.hook_after_create(k.state_objects)
k.state_objects.append(d_new)
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
pPod = []
for pod in filter(lambda x: isinstance(x, Pod), k.state_objects):
if pod.status._get_value() == "Pending":
pPod.append(pod)
class TestRun(K8ServiceInterruptSearch):
goal = lambda self: self.scheduler.status == STATUS_SCHED["Clean"] and pPod[0].status == STATUS_POD["Running"]and pPod[1].status == STATUS_POD["Running"]
p = TestRun(k.state_objects)
p.xrun()
assert scheduler.queueLength._get_value() == 0
assert len(scheduler.podQueue._get_value()) == 0
def test_21_has_daemonset_creates_deployment__pods_evicted_daemonset_outage_synthetic():
# print("21")
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n1 = Node("node 1")
n1.cpuCapacity = 3
n1.memCapacity = 3
n2 = Node("node 2")
n2.cpuCapacity = 3
n2.memCapacity = 3
#Create Daemonset
ds = DaemonSet()
ds.searchable = True
# Create running pods as Daemonset
pod_running_1 = build_running_pod_with_d(1,2,2,n1,None,ds)
pod_running_2 = build_running_pod_with_d(2,2,2,n2,None,ds)
n1.amountOfActivePods = 1
n2.amountOfActivePods = 1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# # Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
# Pending pod with deployment
d = Deployment()
d.spec_replicas = 1
d.priorityClass = pc
pod_pending_1 = build_pending_pod_with_d(3,2,2,n1,d,None)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n1, n2, pc, pod_running_1, pod_running_2, pod_pending_1, d,ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
class NewGoal_k1(Check_daemonsets):
pass
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(Check_daemonsets):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkDaemonsetOutageEvent"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def prepare_test_22_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic():
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n1 = Node()
n1.metadata_name = "node 1"
n1.cpuCapacity = 3
n1.memCapacity = 3
n1.isNull == False
n2 = Node("node 2")
n2.metadata_name = "node 2"
n2.cpuCapacity = 3
n2.memCapacity = 3
n2.isNull == False
#Create Daemonset
ds = DaemonSet()
ds.searchable = True
# Create running pods as Daemonset
pod_running_1 = build_running_pod_with_d(1,2,2,n1,None,ds)
pod_running_2 = build_running_pod_with_d(2,2,2,n2,None,ds)
n1.amountOfActivePods = 1
n2.amountOfActivePods = 1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# # Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 2
s.status = STATUS_SERV["Started"]
s.podList.add(pod_running_1)
s.podList.add(pod_running_2)
# Pending pod with deployment
d = Deployment()
d.spec_replicas = 1
d.priorityClass = pc
pod_pending_1 = build_pending_pod_with_d(3,2,2,n1,None,None)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n1, pc, pod_running_1, pod_running_2, pod_pending_1, d, ds])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
return k, k2
# @pytest.mark.debug(reason="if debug needed - uncomment me")
# def test_22_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic_step1():
# # print("22")
# # Initialize scheduler, globalvar
# k, k2 =prepare_test_22_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic()
# class NewGoal(OptimisticRun):
# # pass
# # goal = lambda self: globalvar.is_daemonset_disrupted == True
# goal = lambda self: pod_running_1.status == STATUS_POD["Killing"]
# p = NewGoal(k.state_objects)
# p.run(timeout=200)
# # for a in p.plan:
# # print(a)
# assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# # assert "MarkDeploymentOutageEvent" in "\n".join([repr(x) for x in p.plan])
# @pytest.mark.debug(reason="if debug needed - uncomment me")
# def test_23_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic_step2():
# # print("23")
# # Initialize scheduler, globalvar
# k, k2 = prepare_test_22_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic()
# # print_objects(k.state_objects)
# class NewGoal(OptimisticRun):
# # pass
# # goal = lambda self: globalvar.is_daemonset_disrupted == True
# goal = lambda self: pod_running_1.status == STATUS_POD["Pending"]
# p = NewGoal(k.state_objects)
# p.run(timeout=200)
# # print("---after calculation ----")
# # print_objects(k.state_objects)
# # for a in p.plan:
# # print(a)
# assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# assert "KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
def test_24_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic_step3():
# print("24")
# Initialize scheduler, globalvar
k, k2 = prepare_test_22_has_daemonset_with_service_creates_deployment__pods_evicted_daemonset_outage_synthetic()
globalvar_k1 = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
globalvar_k2 = next(filter(lambda x: isinstance(x, GlobalVar), k2.state_objects))
class NewGoal_k1(Check_daemonsets):
goal = lambda self: globalvar_k1.is_daemonset_disrupted == True
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(Check_daemonsets):
goal = lambda self: globalvar_k1.is_daemonset_disrupted == True
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkDaemonsetOutageEvent"]
not_assert_conditions = []
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def prepare_test_has_service_only_on_node_that_gets_disrupted():
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# initial node state
n1 = Node()
n1.metadata_name = "node 1"
n1.cpuCapacity = 6
n1.memCapacity = 6
n1.isNull == False
n2 = Node("node 2")
n2.metadata_name = "node 2"
n2.cpuCapacity = 6
n2.memCapacity = 6
n2.isNull == False
# Create running pods as Daemonset
pod_running_1 = build_running_pod_with_d(1,2,2,n1,None,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n1,None,None)
pod_running_3 = build_running_pod_with_d(3,2,2,n1,None,None)
pod_running_4 = build_running_pod_with_d(4,2,2,n2,None,None)
pod_running_5 = build_running_pod_with_d(5,2,2,n2,None,None)
pod_running_6 = build_running_pod_with_d(6,2,2,n2,None,None)
n1.amountOfActivePods = 3
n2.amountOfActivePods = 3
# # Service to detecte eviction
s1 = Service()
s1.metadata_name = "test-service1"
s1.amountOfActivePods = 2
s1.status = STATUS_SERV["Started"]
s2 = Service()
s2.metadata_name = "test-service2"
s2.amountOfActivePods = 4
s2.status = STATUS_SERV["Started"]
s1.podList.add(pod_running_1)
s1.podList.add(pod_running_2)
s2.podList.add(pod_running_3)
s2.podList.add(pod_running_4)
s2.podList.add(pod_running_5)
s2.podList.add(pod_running_6)
# s2.podList.add(pod_running_7)
pod_running_1.hasService = True
pod_running_2.hasService = True
pod_running_3.hasService = True
pod_running_4.hasService = True
pod_running_5.hasService = True
pod_running_6.hasService = True
## We have clean scheduler queue
scheduler.status = STATUS_SCHED["Clean"]
k.state_objects.extend([n1, n2, s1, s2, pod_running_1, pod_running_2, pod_running_3, pod_running_4, pod_running_5, pod_running_6])
return k,n1,pod_running_1
# @pytest.mark.skip(reason="if debug needed - uncomment me")
# def test_25_node_outage_with_service_eviction_step0():
# # print("25")
# # Initialize scheduler, globalvar
# k,n1,pod_running_1=construct_space_1322_has_service_only_on_node_that_gets_disrupted()
# globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# class test_25_node_outage_with_service_eviction_step1(Check_services):
# goal = lambda self: self.pod_running_1.status == STATUS_POD["Killing"]
# p = test_25_node_outage_with_service_eviction_step1(k.state_objects)
# p.run(timeout=200)
# # print_objects(k.state_objects)
# # for a in p.plan:
# # print(a)
# assert "Initiate_node_outage" in "\n".join([repr(x) for x in p.plan])
# # assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# # assert "KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "MarkDaemonsetOutageEvent" in "\n".join([repr(x) for x in p.plan])
# # @pytest.mark.skip(reason="if debug needed - uncomment me")
# def test_25_node_outage_with_service_eviction_step1():
# # print("25")
# # Initialize scheduler, globalvar
# k,n1=construct_space_1322_has_service_only_on_node_that_gets_disrupted()
# globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# class test_25_node_outage_with_service_eviction_step1(Check_services):
# goal = lambda self: n1.status == STATUS_NODE["Inactive"]
# p = test_25_node_outage_with_service_eviction_step1(k.state_objects)
# p.run(timeout=200)
# # print_objects(k.state_objects)
# # for a in p.plan:
# # print(a)
# assert "Initiate_node_outage" in "\n".join([repr(x) for x in p.plan])
# # assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# # assert "KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "MarkDaemonsetOutageEvent" in "\n".join([repr(x) for x in p.plan])
# @pytest.mark.skip(reason="if debug needed - uncomment me")
# def test_26_node_outage_with_service_eviction_step2():
# # print("26")
# # Initialize scheduler, globalvar
# k,n1=construct_space_1322_has_service_only_on_node_that_gets_disrupted()
# globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# class Check_services(Check_services):
# goal = lambda self: globalvar.is_node_disrupted == True
# p = Check_services(k.state_objects)
# p.run(timeout=200)
# # print_objects(k.state_objects)
# # for a in p.plan:
# # print(a)
# # assert "StartPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# # assert "KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "MarkDaemonsetOutageEvent" in "\n".join([repr(x) for x in p.plan])
# @pytest.mark.debug(reason="if debug needed - uncomment me")
# def test_27_node_outage_with_service_eviction_step3():
# # print("27")
# # Initialize scheduler, globalvar
# k,n1=construct_space_1322_has_service_only_on_node_that_gets_disrupted()
# globalvar = next(filter(lambda x: isinstance(x, GlobalVar), k.state_objects))
# class Check_services_with_node_eviction(Check_services):
# goal = lambda self: globalvar.is_node_disrupted == True and globalvar.is_service_disrupted == True
# p = Check_services_with_node_eviction(k.state_objects)
# p.run(timeout=200)
# # print_objects(k.state_objects)
# # for a in p.plan:
# # print(a)
# # assert "StartPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "Evict_and_replace_less_prioritized_pod_when_target_node_is_not_defined" in "\n".join([repr(x) for x in p.plan])
# # assert "KillPod_IF_Deployment_isNUll_Service_isNull_Daemonset_isNotNull" in "\n".join([repr(x) for x in p.plan])
# # assert "MarkDaemonsetOutageEvent" in "\n".join([repr(x) for x in p.plan])
def test_28_from_test_5_evict_and_killpod_deployment_without_service_with_null_mem_request():
# print("28")
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
n = Node()
n.cpuCapacity = 5
n.memCapacity = 5
n.isNull = False
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
# Create running pods
pod_running_1 = build_running_pod_with_d(1,2,2,n,d,None)
pod_running_2 = build_running_pod_with_d(2,2,2,n,d,None)
n.amountOfActivePods = 2
pod_running_1.memRequest = 0
pod_running_2.memRequest = 0
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
# Pending pod
pod_pending_1 = build_pending_pod(3,2,2,n)
pod_pending_1.priorityClass = pc # high prio will evict!
## Add pod to scheduler queue
scheduler.podQueue.add(pod_pending_1)
scheduler.queueLength += 1
scheduler.status = STATUS_SCHED["Changed"]
k.state_objects.extend([n, pc, pod_running_1, pod_running_2, s, pod_pending_1, d])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
pod_running_1_1 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k.state_objects))
class NewGoal_k1(OptimisticRun):
goal = lambda self: pod_running_1_1.status == STATUS_POD["Pending"]
p = NewGoal_k1(k.state_objects)
pod_running_1_2 = next(filter(lambda x: isinstance(x, Pod) and x.status._property_value == STATUS_POD["Running"], k2.state_objects))
class NewGoal_k2(OptimisticRun):
goal = lambda self: pod_running_1_2.status == STATUS_POD["Pending"]
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["Evict_and_replace_less_prioritized_pod_when_target_node_is_defined",\
"KillPod_IF_Deployment_isNotNUll_Service_isNull_Daemonset_isNull"]
not_assert_conditions = ["NodeOutageFinished"]
checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
def prepare_test_29_many_pods_not_enough_capacity_for_service(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
i = 0
j = 0
nodes = []
pods = []
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s2 = Service()
s2.metadata_name = "test-service2"
s2.amountOfActivePods = 0
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
pod_id = 1
for i in range(nodes_amount):
node_item = Node("node"+str(i))
node_item.cpuCapacity = node_capacity
node_item.memCapacity = node_capacity
node_item.isNull = False
node_item.status = STATUS_NODE["Active"]
nodes.append(node_item)
for j in range(pod2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod0_amount):
pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
pod_id += 1
pods.append(pod_running_0)
node_item.amountOfActivePods += 1
for j in range(pod2_2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod3_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s2.podList.add(pod_running_2)
s2.amountOfActivePods +=1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
k.state_objects.extend(nodes)
k.state_objects.extend(pods)
k.state_objects.extend([pc, s, s2 ])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
class NewGoal_k1(CheckNodeOutage):
pass
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(CheckNodeOutage):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
return k, k2, p , p2
def prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
i = 0
j = 0
nodes = []
pods = []
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s2 = Service()
s2.metadata_name = "test-service2"
s2.amountOfActivePods = 0
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
pod_id = 1
for i in range(nodes_amount):
node_item = Node("node"+str(i))
node_item.cpuCapacity = node_capacity
node_item.memCapacity = node_capacity
node_item.isNull = False
node_item.status = STATUS_NODE["Active"]
nodes.append(node_item)
for j in range(pod2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod0_amount):
pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
pod_id += 1
pods.append(pod_running_0)
node_item.amountOfActivePods += 1
for j in range(pod2_2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod3_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[1],None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s2.podList.add(pod_running_2)
s2.amountOfActivePods +=1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
k.state_objects.extend(nodes)
k.state_objects.extend(pods)
k.state_objects.extend([pc, s, s2 ])
create_objects = []
k._build_state()
class NewGoal_k1(CheckNodeOutage):
pass
p = NewGoal_k1(k.state_objects)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
return k, p
def test_29():
k, k2, p, p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,15,1,1,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_30():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,19,2,2,1,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_31():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,23,3,3,1,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_32():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,26,3,3,2,2)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_33():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,32,4,4,4,4)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_34():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,40,5,5,5,5)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_36():
k, k2, p, p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,1,1,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_37():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,2,2,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_38():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,3,3,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_39():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_40():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,25,5,5,5,5)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_41():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,11,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_42():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_43():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,5,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_44():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,5,5)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_45():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_46():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,16,5,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_47():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,20,5,5,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_48():
node = 24
for cap in range(10,20):
print("test node ", node, " cap " ,cap)
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(1,node,cap,0,0,1)
assert_conditions = ["MarkServiceOutageEvent", "Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# def test_49():
# k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,25,5,5,5,5)
# assert_conditions = ["MarkServiceOutageEvent",\
# "Mark_node_outage_event"]
# not_assert_conditions = []
# assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# def test_50():
# k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
# assert_conditions = ["MarkServiceOutageEvent",\
# "Mark_node_outage_event"]
# not_assert_conditions = []
# assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# def test_51():
# k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
# assert_conditions = ["MarkServiceOutageEvent",\
# "Mark_node_outage_event"]
# not_assert_conditions = []
# assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# def test_52():
# k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
# assert_conditions = ["MarkServiceOutageEvent",\
# "Mark_node_outage_event"]
# not_assert_conditions = []
# assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
# def test_53():
# k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
# assert_conditions = ["MarkServiceOutageEvent",\
# "Mark_node_outage_event"]
# not_assert_conditions = []
# assert_brake = checks_assert_conditions(k,k2,p,p2,assert_conditions,not_assert_conditions,DEBUG_MODE)
| 39.422743
| 216
| 0.707806
| 14,634
| 104,352
| 4.749898
| 0.027334
| 0.047763
| 0.039836
| 0.041692
| 0.93762
| 0.920975
| 0.907812
| 0.900273
| 0.885743
| 0.87586
| 0
| 0.02552
| 0.189273
| 104,352
| 2,646
| 217
| 39.437642
| 0.796102
| 0.179211
| 0
| 0.827855
| 0
| 0
| 0.077616
| 0.027824
| 0
| 0
| 0
| 0.000378
| 0.09415
| 1
| 0.037326
| false
| 0.007242
| 0.012813
| 0
| 0.121448
| 0.006128
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1104812377106b10e65deca4522853d8d89e36f
| 11,815
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowIpBgpDetail/cli/equal/golden_output8_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpBgpDetail/cli/equal/golden_output8_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpBgpDetail/cli/equal/golden_output8_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"instance": {
"default": {
"vrf": {
"vrf_1": {
"address_family": {
"vpnv4": {
"default_vrf": "vrf_1",
"prefixes": {
"10.64.4.0/22": {
"available_path": "1",
"best_path": "",
"index": {
1: {
"community": "65100:106 65100:500 65100:601 65361:3",
"ext_community": "RT:65000:31838",
"gateway": "10.9.3.4",
"localpref": 100,
"next_hop": "10.9.3.4",
"next_hop_via": "vrf vrf_1",
"origin_codes": "i",
"originator": "10.9.3.4",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "65000 65201 4400004007 4400004507 4400004001 4400004505 4400004005 4400004504 1234 5678",
"route_status": "received-only",
"status_codes": "* ",
"transfer_pathid": "0",
}
},
"paths": "1 available, no best path",
"table_version": "0",
},
"10.64.4.0/24": {
"available_path": "4",
"best_path": "3",
"index": {
1: {
"community": "9:9 65100:106 65100:508 65100:704 65371:2",
"ext_community": "RT:65000:31838",
"gateway": "10.9.3.4",
"localpref": 100,
"next_hop": "10.9.3.4",
"next_hop_via": "vrf vrf_1",
"origin_codes": "i",
"originator": "10.9.3.4",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "65000 65211 4400002007 4400002507 4400002001 4400002505 4400002005 4400002504 3456 5678 2345",
"route_status": "received-only",
"status_codes": "* ",
"transfer_pathid": "0",
"update_group": [2, 3, 47],
},
2: {
"community": "9:9 65100:106 65100:508 65100:700 65100:704 65371:2",
"gateway": "10.105.3.84",
"localpref": 100,
"next_hop": "10.105.3.84",
"next_hop_via": "vrf vrf_1",
"origin_codes": "i",
"originator": "10.105.2.27",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "4400005007 4400005507 4400005001 4400065100 4400002001 4400002505 4400002005 4400002504 3456 5678 2345",
"route_status": "received & used",
"status_codes": "* ",
"transfer_pathid": "0",
"update_group": [2, 3, 47],
},
3: {
"community": "9:9 65100:106 65100:508 65100:700 65100:704 65371:2",
"gateway": "10.105.3.80",
"localpref": 100,
"next_hop": "10.105.3.80",
"next_hop_via": "vrf vrf_1",
"origin_codes": "i",
"originator": "10.105.2.26",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "4400005007 4400005507 4400005001 4400065100 4400002001 4400002505 4400002005 4400002504 3456 5678 2345",
"route_status": "received & used",
"status_codes": "*>",
"transfer_pathid": "0x0",
"update_group": [2, 3, 47],
},
4: {
"community": "9:9 65100:106 65100:508 65100:700 65100:704 65371:2",
"gateway": "10.105.2.1",
"localpref": 100,
"metric": 0,
"next_hop": "10.105.2.1",
"next_hop_igp_metric": "2",
"next_hop_via": "vrf vrf_1",
"origin_codes": "i",
"originator": "10.105.2.1",
"recipient_pathid": "0",
"refresh_epoch": 2,
"route_info": "4400005007 4400005507 4400005001 4400065100 4400002001 4400002505 4400002005 4400002504 3456 5678 2345",
"route_status": "received & used",
"status_codes": "* i",
"transfer_pathid": "0",
"update_group": [2, 3, 47],
},
},
"paths": "4 available, best #3, table vrf_1",
"table_version": "1863365",
},
},
"route_distinguisher": "101:101",
}
}
},
"vrf_2": {
"address_family": {
"vpnv4": {
"default_vrf": "vrf_2",
"prefixes": {
"0.0.0.0/0": {
"available_path": "3",
"best_path": "2",
"index": {
1: {
"community": "65100:106 65100:500 65100:601 65351:1",
"gateway": "10.105.6.84",
"localpref": 100,
"next_hop": "10.105.6.84",
"next_hop_via": "vrf vrf_2",
"origin_codes": "i",
"originator": "10.105.5.17",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "4400005002 4400005502 4400005001 4400005505 4400005005 4400005504 6789 5678",
"route_status": "received & used",
"status_codes": "* ",
"transfer_pathid": "0",
"update_group": [1, 29, 54],
},
2: {
"community": "65100:106 65100:500 65100:601 65351:1",
"gateway": "10.105.6.80",
"localpref": 100,
"next_hop": "10.105.6.80",
"next_hop_via": "vrf vrf_2",
"origin_codes": "i",
"originator": "10.105.5.16",
"recipient_pathid": "0",
"refresh_epoch": 1,
"route_info": "4400005002 4400005502 4400005001 4400005505 4400005005 4400005504 6789 5678",
"route_status": "received & used",
"status_codes": "*>",
"transfer_pathid": "0x0",
"update_group": [1, 29, 54],
},
3: {
"community": "65100:106 65100:500 65100:601 65351:1",
"gateway": "10.105.5.1",
"localpref": 100,
"metric": 0,
"next_hop": "10.105.5.1",
"next_hop_igp_metric": "2",
"next_hop_via": "vrf vrf_2",
"origin_codes": "i",
"originator": "10.105.5.1",
"recipient_pathid": "0",
"refresh_epoch": 3,
"route_info": "4400005002 4400005502 4400005001 4400005505 4400005005 4400005504 6789 5678",
"route_status": "received & used",
"status_codes": "* i",
"transfer_pathid": "0",
"update_group": [1, 29, 54],
},
},
"paths": "3 available, best #2, table vrf_2",
"table_version": "1814679",
}
},
"route_distinguisher": "102:102",
}
}
},
}
}
}
}
| 63.521505
| 163
| 0.263817
| 684
| 11,815
| 4.374269
| 0.165205
| 0.042112
| 0.034759
| 0.034759
| 0.843917
| 0.840909
| 0.798128
| 0.762032
| 0.746658
| 0.692513
| 0
| 0.303473
| 0.646636
| 11,815
| 185
| 164
| 63.864865
| 0.413174
| 0
| 0
| 0.589189
| 0
| 0
| 0.281337
| 0
| 0
| 0
| 0.000508
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d128d1b109e903d6f3a6da3739207addf75d8a9a
| 3,382
|
py
|
Python
|
secuencias.py
|
CCG-Magno/Tutoriales_Python
|
7f6291675cabccc35ddad71070271cbc58e7e283
|
[
"MIT"
] | 1
|
2020-09-20T22:37:20.000Z
|
2020-09-20T22:37:20.000Z
|
secuencias.py
|
CCG-Magno/Tutoriales_Python
|
7f6291675cabccc35ddad71070271cbc58e7e283
|
[
"MIT"
] | null | null | null |
secuencias.py
|
CCG-Magno/Tutoriales_Python
|
7f6291675cabccc35ddad71070271cbc58e7e283
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self,val):
self.val = val
self.nxt = None
return
class DoubleNode(Node):
def __init__(self, val):
return
def listas():
# Las listas son secuencias de objetos/valores separados por comas
# que quedan encasillados por brackets '[' ']'
# Ej:
asistencia = ["Juan" , "Jesus" , "Antonio" , "Luis"]
print(f"Tomemos asistencia para la clase:\n{asistencia}\n")
# Los objetos no necesariamente tienen que ser del mismo tipo.
# Ej:
valores_aleatorios = ["a", 10 , 0.75, False]
print(f"Escupamos basura:\n{valores_aleatorios}\n")
print()
print(f"El tipo de 'asistencia' es {type(asistencia)} y el tipo de 'valores_aleatorios' es {type(valores_aleatorios)}'\n")
#las listas pueden crecer o disminuir en tamano dinamicamente mientras se ejecuta el programa
print(f"Tamano de asistencia: {len(asistencia)}\n")
#ahora vamos anadir alguien que llego tarde
tarde = "Andres"
print(f"{tarde} llego tarde!")
asistencia.append(tarde)
# Tomemos asistencia otra vez
print(f"Tomemos asistencia para la clase(gracias {tarde}!):\n{asistencia}\n")
# verifiquemos de nuevo el tamano
print(f"Tamano de asistencia: {len(asistencia)}\n")
#Alguien se fue del salon
#Para eliminar alguien de la lista usamos o eliminamos en base al indice que ocupan en la lista.
# Nota: Los indices van en el rango: [0, len(lista)-1]
# Es decir, si el tamano de la lista es 5 el rango seria: [0,4]
asistencia.remove('Jesus')
print(f"Tomemos asistencia para la clase(adios Jesus!):\n{asistencia}\n")
print(f"Tamano de asistencia: {len(asistencia)}\n")
return
def sets():
return
def tuples():
# Las listas son secuencias de objetos/valores separados por comas
# que quedan encasillados por parentesis '(' ')'
# Ej:
asistencia = ("Juan" , "Jesus" , "Antonio" , "Luis")
print(f"Tomemos asistencia para la clase:\n{asistencia}\n")
# Los objetos no necesariamente tienen que ser del mismo tipo.
# Ej:
valores_aleatorios = ("a", 10 , 0.75, False)
print(f"Escupamos basura:\n{valores_aleatorios}\n")
print()
print(f"El tipo de 'asistencia' es {type(asistencia)} y el tipo de 'valores_aleatorios' es {type(valores_aleatorios)}'\n")
#las listas pueden crecer o disminuir en tamano dinamicamente mientras se ejecuta el programa
print(f"Tamano de asistencia: {len(asistencia)}\n")
#ahora vamos anadir alguien que llego tarde
tarde = "Andres"
print(f"{tarde} llego tarde!")
asistencia.append(tarde)
# Tomemos asistencia otra vez
print(f"Tomemos asistencia para la clase(gracias {tarde}!):\n{asistencia}\n")
# verifiquemos de nuevo el tamano
print(f"Tamano de asistencia: {len(asistencia)}\n")
#Alguien se fue del salon
#Para eliminar alguien de la lista usamos o eliminamos en base al indice que ocupan en la lista.
# Nota: Los indices van en el rango: [0, len(lista)-1]
# Es decir, si el tamano de la lista es 5 el rango seria: [0,4]
asistencia.remove('Jesus')
print(f"Tomemos asistencia para la clase(adios Jesus!):\n{asistencia}\n")
print(f"Tamano de asistencia: {len(asistencia)}\n")
return
def main():
listas()
return
if __name__ == "__main__":
main()
| 31.314815
| 126
| 0.662034
| 474
| 3,382
| 4.672996
| 0.232068
| 0.048758
| 0.035214
| 0.062302
| 0.941761
| 0.925508
| 0.925508
| 0.925508
| 0.925508
| 0.925508
| 0
| 0.007634
| 0.22531
| 3,382
| 108
| 127
| 31.314815
| 0.837786
| 0.360438
| 0
| 0.693878
| 0
| 0.040816
| 0.478689
| 0.123653
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0
| 0.040816
| 0.285714
| 0.408163
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
d155f8b61a43304988d0511d2e2c6841d7b25d71
| 5,204
|
py
|
Python
|
linguistic_style_transfer_model/config/human_annotation_config.py
|
spencerbraun/linguistic-style-transfer
|
76774bc805c56e823f958f98e6ae8d5973518757
|
[
"Apache-2.0"
] | 136
|
2018-08-17T21:31:01.000Z
|
2022-01-03T15:01:41.000Z
|
linguistic_style_transfer_model/config/human_annotation_config.py
|
spencerbraun/linguistic-style-transfer
|
76774bc805c56e823f958f98e6ae8d5973518757
|
[
"Apache-2.0"
] | 18
|
2018-06-29T21:41:28.000Z
|
2020-07-09T08:53:37.000Z
|
linguistic_style_transfer_model/config/human_annotation_config.py
|
spencerbraun/linguistic-style-transfer
|
76774bc805c56e823f958f98e6ae8d5973518757
|
[
"Apache-2.0"
] | 29
|
2018-09-27T04:51:10.000Z
|
2022-03-01T21:24:04.000Z
|
output_folder = "/home/v2john/documents/style-transfer-models/annotations/"
annotation_config = {
"nips-yelp-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/nips/yelp/data/sentiment.test.1",
"generated": "/home/v2john/documents/style-transfer-models/nips/yelp/sentiment.test.1.tsf",
"count": 20
},
"nips-yelp-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/nips/yelp/data/sentiment.test.0",
"generated": "/home/v2john/documents/style-transfer-models/nips/yelp/sentiment.test.0.tsf",
"count": 20
},
"aaai-yelp-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/nips/yelp/data/sentiment.test.1",
"generated": "/home/v2john/documents/style-transfer-models/aaai/yelp-test/fixed/style0.txt",
"count": 20
},
"aaai-yelp-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/nips/yelp/data/sentiment.test.0",
"generated": "/home/v2john/documents/style-transfer-models/aaai/yelp-test/fixed/style1.txt",
"count": 20
},
"iclr-yelp-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/iclr/stytra-yelp/26_output_decoder_1_from.txt",
"generated": "/home/v2john/documents/style-transfer-models/iclr/stytra-yelp/26_output_decoder_1_tran.txt",
"count": 20
},
"iclr-yelp-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/iclr/stytra-yelp/26_output_decoder_2_from.txt",
"generated": "/home/v2john/documents/style-transfer-models/iclr/stytra-yelp/26_output_decoder_2_tran.txt",
"count": 20
},
"dae-yelp-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/output/20180629130342-inference/actual_sentences_0.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180629130342-inference/generated_sentences_0.txt",
"count": 20
},
"dae-yelp-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/output/20180629130342-inference/actual_sentences_1.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180629130342-inference/generated_sentences_1.txt",
"count": 20
},
"vae-yelp-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/output/20180702122511-inference/actual_sentences_0.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180702122511-inference/generated_sentences_0.txt",
"count": 20
},
"vae-yelp-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/output/20180702122511-inference/actual_sentences_1.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180702122511-inference/generated_sentences_1.txt",
"count": 20
},
"nips-amazon-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/nips/amazon/data/sentiment.test.1",
"generated": "/home/v2john/documents/style-transfer-models/nips/amazon/sentiment.test.1.tsf",
"count": 0
},
"nips-amazon-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/nips/amazon/data/sentiment.test.0",
"generated": "/home/v2john/documents/style-transfer-models/nips/amazon/sentiment.test.0.tsf",
"count": 0
},
"aaai-amazon-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/nips/amazon/data/sentiment.test.1",
"generated": "/home/v2john/documents/style-transfer-models/aaai/amazon-test/fixed/style0.txt",
"count": 0
},
"aaai-amazon-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/nips/amazon/data/sentiment.test.0",
"generated": "/home/v2john/documents/style-transfer-models/aaai/amazon-test/fixed/style1.txt",
"count": 0
},
"dae-amazon-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/output/20180707010403-inference/actual_sentences_0.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180707010403-inference/generated_sentences_0.txt",
"count": 0
},
"dae-amazon-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/output/20180707010403-inference/actual_sentences_1.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180707010403-inference/generated_sentences_1.txt",
"count": 0
},
"vae-amazon-pos-to-neg": {
"original": "/home/v2john/documents/style-transfer-models/output/20180707142655-inference/actual_sentences_0.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180707142655-inference/generated_sentences_0.txt",
"count": 0
},
"vae-amazon-neg-to-pos": {
"original": "/home/v2john/documents/style-transfer-models/output/20180707142655-inference/actual_sentences_1.txt",
"generated": "/home/v2john/documents/style-transfer-models/output/20180707142655-inference/generated_sentences_1.txt",
"count": 0
}
}
| 54.778947
| 126
| 0.688509
| 633
| 5,204
| 5.581359
| 0.075829
| 0.104727
| 0.198981
| 0.251344
| 0.975092
| 0.933484
| 0.922729
| 0.898953
| 0.898953
| 0.898953
| 0
| 0.074697
| 0.143351
| 5,204
| 94
| 127
| 55.361702
| 0.717811
| 0
| 0
| 0.27957
| 0
| 0.387097
| 0.771906
| 0.658148
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
66f588f30e37b7a0b2379b655e8c19406302cded
| 8,591
|
py
|
Python
|
tests/test_formatter.py
|
kevinzg/beanbot
|
4558804f539720895f9fea2bb46bcdbfe886b034
|
[
"MIT"
] | 4
|
2019-09-30T10:04:45.000Z
|
2021-09-18T05:39:08.000Z
|
tests/test_formatter.py
|
kevinzg/beanbot
|
4558804f539720895f9fea2bb46bcdbfe886b034
|
[
"MIT"
] | 2
|
2021-07-30T02:13:56.000Z
|
2021-07-30T02:14:02.000Z
|
tests/test_formatter.py
|
kevinzg/beanbot
|
4558804f539720895f9fea2bb46bcdbfe886b034
|
[
"MIT"
] | 1
|
2021-03-23T10:40:37.000Z
|
2021-03-23T10:40:37.000Z
|
import textwrap
from decimal import Decimal
from beanbot.formatter import format_transaction
from beanbot.models import Posting, Transaction
class TestFormatTransaction:
def test_format_simple_transaction(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
)
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
`=======`
`- 10.00 USD `Cash
"""
).strip()
)
def test_format_simple_transaction_with_empty_info(self):
tx = Transaction(
id=1,
date=None,
info='',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
)
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
` 10.00 USD `_Food_
`=======`
`- 10.00 USD `Cash
"""
).strip()
)
def test_format_transaction_two_postings(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='Cash',
amount=Decimal(2),
currency='USD',
),
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
` 2.00 USD `_Candy_
`=======`
`- 12.00 USD `Cash
"""
).strip()
)
def test_format_transaction_different_credit_account(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='CC',
amount=Decimal(2),
currency='USD',
),
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
` 2.00 USD `_Candy_
`=======`
`- 10.00 USD `Cash
`- 2.00 USD `CC
"""
).strip()
)
def test_format_transaction_default_currency(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='CC',
amount=Decimal(2),
currency='USD',
),
],
)
assert (
format_transaction(tx, default_currency='USD')
== textwrap.dedent(
"""
Test
` 10.00 `_Food_
` 2.00 `_Candy_
`=======`
`- 10.00 `Cash
`- 2.00 `CC
"""
).strip()
)
def test_format_transaction_different_credit_accounts_and_currency(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='CC',
amount=Decimal(2),
currency='EUR',
),
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
` 2.00 EUR `_Candy_
`=======`
`- 10.00 USD `Cash
`- 2.00 EUR `CC
"""
).strip()
)
def test_format_transaction_postings_different_currency(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='Cash',
amount=Decimal(2),
currency='EUR',
),
],
)
formatted = textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
` 2.00 EUR `_Candy_
`=======`
`- 10.00 USD `Cash
`- 2.00 EUR `
"""
).strip()
assert format_transaction(tx) == formatted
assert format_transaction(tx, default_currency='USD') == formatted
def test_format_transaction_postings_credit_accounts_with_different_currency(self):
tx = Transaction(
id=1,
date=None,
info='Test',
postings=[
Posting(
id=1,
debit_account='Food',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
),
Posting(
id=2,
debit_account='Candy',
credit_account='Cash',
amount=Decimal(2),
currency='EUR',
),
Posting(
id=3,
debit_account='Books',
credit_account='Bank',
amount=Decimal(10),
currency='USD',
),
Posting(
id=4,
debit_account='Cookies',
credit_account='Bank',
amount=Decimal(2),
currency='EUR',
),
],
)
formatted = textwrap.dedent(
"""
Test
` 10.00 USD `_Food_
` 2.00 EUR `_Candy_
` 10.00 USD `_Books_
` 2.00 EUR `_Cookies_
`=======`
`- 10.00 USD `Cash
`- 2.00 EUR `
`- 10.00 USD `Bank
`- 2.00 EUR `
"""
).strip()
assert format_transaction(tx) == formatted
assert format_transaction(tx, default_currency='USD') == formatted
def test_escape_markdown(self):
tx = Transaction(
id=1,
date=None,
info='Test . _ * `',
postings=[
Posting(
id=1,
debit_account='Food.',
credit_account='Cash',
amount=Decimal(10),
currency='USD',
)
],
)
assert (
format_transaction(tx)
== textwrap.dedent(
"""
Test \\. \\_ \\* \\`
` 10.00 USD `_Food\\._
`=======`
`- 10.00 USD `Cash
"""
).strip()
)
| 26.033333
| 87
| 0.365848
| 628
| 8,591
| 4.799363
| 0.089172
| 0.034837
| 0.039482
| 0.091573
| 0.889516
| 0.843397
| 0.822163
| 0.774718
| 0.747843
| 0.747843
| 0
| 0.042396
| 0.527762
| 8,591
| 329
| 88
| 26.112462
| 0.700518
| 0
| 0
| 0.828452
| 0
| 0
| 0.03315
| 0
| 0
| 0
| 0
| 0
| 0.046025
| 1
| 0.037657
| false
| 0
| 0.016736
| 0
| 0.058577
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
66fb1b41a10989c63e0b6d781b0db1d4a3997406
| 3,669
|
py
|
Python
|
openstates/openstates-master/scripts/affected_code/fl-test.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/scripts/affected_code/fl-test.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/scripts/affected_code/fl-test.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
s = '''
Subsections (1), (2), (3), (4), and (6) and paragraph (c) of subsection (7) of section 1002.69, Florida Statutes, are amended to read:
Section 1003.46, Florida Statutes, is amended to read:
Paragraph (d) of subsection (3) of section 1002.20, Florida Statutes, is amended to read:
Subsection (10) of section 447.203, Florida Statutes, is amended to read:
Paragraph (a) of subsection (4) of section 1001.20, Florida Statutes, is amended to read:
Paragraphs (b) and (e) of subsection (1) and subsections (2) and (4) of section 1006.33, Florida Statutes, are amended to read:
Subsection (1), paragraph (a) of subsection (2), and paragraphs (b) and (e) of subsection (3) of section 1006.28, Florida Statutes, are amended to read:
Subsections (1), (2), (3), and (7) of section 1006.34, Florida Statutes, are amended to read:
Subsection (2), paragraph (a) of subsection (3), and subsection (4) of section 1006.40, Florida Statutes, are amended to read:
Paragraph (p) of subsection (1) and paragraph (b) of subsection (6) of section 1011.62, Florida Statutes, are amended to read:
Paragraph (b) of subsection (3) and subsection (4) of section 1008.33, Florida Statutes, are amended to read:
Subsection (23) of section 1001.42, Florida Statutes, is amended to read:
Paragraph (b) of subsection (5) of section 1002.33, Florida Statutes, is amended to read:
Paragraph (a) of subsection (1) of section 1002.37, Florida Statutes, is amended to read:
Paragraph (f) is added to subsection (3) of section 1002.38, Florida Statutes, to read:
Paragraph (b) of subsection (2) of section 1002.45, Florida Statutes, is amended to read:
Subsection (1) and paragraph (c) of subsection (3) of section 1002.67, Florida Statutes, are amended to read:
Subsection (2) of section 1002.73, Florida Statutes, is amended to read:
Paragraph (c) of subsection (4) of section 1003.03, Florida Statutes, is amended to read:
Subsection (1) of section 1003.4156, Florida Statutes, is amended to read:
Section 1003.4203, Florida Statutes, is created to read:
Subsection (2) of section 1003.428, Florida Statutes, is amended to read:
Subsection (1) of section 1003.492, Florida Statutes, is amended to read:
Section 1003.493, Florida Statutes, is amended to read:
Section 1003.575, Florida Statutes, is amended to read:
Subsection (2) of section 1003.621, Florida Statutes, is amended to read:
Section 1006.29, Florida Statutes, is amended to read:
Section 1006.30, Florida Statutes, is amended to read:
Section 1006.31, Florida Statutes, is amended to read:
Section 1006.32, Florida Statutes, is amended to read:
Subsection (2) of section 1006.35, Florida Statutes, is amended to read:
Section 1006.36, Florida Statutes, is amended to read:
Section 1006.37, Florida Statutes, is repealed.
Subsection (5) of section 1006.39, Florida Statutes, is amended to read:
Section 1006.43, Florida Statutes, is amended to read:
Effective upon this act becoming a law, subsection (2) and paragraph (c) of subsection (3) of section 1008.22, Florida Statutes, are amended to read:
Subsection (3) of section 1008.34, Florida Statutes, is amended to read:
Paragraph (a) of subsection (3) of section 1011.01, Florida Statutes, is amended to read:
Subsection (4) of section 1011.03, Florida Statutes, is amended to read:
Subsection (1) of section 1011.61, Florida Statutes, is amended to read:
Subsection (1) of section 1012.39, Florida Statutes, is amended to read:'''
def main():
from core.utils import parse
from core.fl import Lexer, Parser
for section in filter(None, s.splitlines()):
tokens = parse(Lexer, Parser, None, section)
print section
if __name__ == '__main__':
main()
| 67.944444
| 152
| 0.74598
| 594
| 3,669
| 4.594276
| 0.170034
| 0.225357
| 0.181019
| 0.255038
| 0.760718
| 0.702455
| 0.627702
| 0.451081
| 0.171491
| 0.171491
| 0
| 0.098962
| 0.159989
| 3,669
| 54
| 153
| 67.944444
| 0.786502
| 0
| 0
| 0
| 0
| 0.34
| 0.931608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04
| null | null | 0.02
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
66fc3367cda58f58ef8a5b1539d76a81537678b0
| 160
|
py
|
Python
|
queued_mailer/utils.py
|
jar3b/django-queued-mailer
|
51554bf80cc0e36869d2dc61b92175f5f7402b3f
|
[
"MIT"
] | 1
|
2018-12-05T13:35:17.000Z
|
2018-12-05T13:35:17.000Z
|
queued_mailer/utils.py
|
jar3b/django-queued-mailer
|
51554bf80cc0e36869d2dc61b92175f5f7402b3f
|
[
"MIT"
] | null | null | null |
queued_mailer/utils.py
|
jar3b/django-queued-mailer
|
51554bf80cc0e36869d2dc61b92175f5f7402b3f
|
[
"MIT"
] | null | null | null |
from django.core.mail import get_connection
from .settings import EMAIL_BACKEND
def get_email_connection():
return get_connection(backend=EMAIL_BACKEND)
| 20
| 48
| 0.825
| 22
| 160
| 5.727273
| 0.545455
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11875
| 160
| 7
| 49
| 22.857143
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
0f443cefc172960cc543bd2710642cedd560c527
| 6,665
|
py
|
Python
|
tests/test_basic.py
|
deeplook/mcinstall
|
52e6926316f5db329a194cfd1dfcca638cb40d67
|
[
"MIT"
] | 6
|
2019-09-30T12:10:53.000Z
|
2020-03-13T09:34:32.000Z
|
tests/test_basic.py
|
sackh/mcinstall
|
2bda5942096c23a52057ecef7d9d2bba9e3045b8
|
[
"MIT"
] | 14
|
2020-02-19T07:59:37.000Z
|
2020-05-15T06:18:00.000Z
|
tests/test_basic.py
|
sackh/mcinstall
|
2bda5942096c23a52057ecef7d9d2bba9e3045b8
|
[
"MIT"
] | 3
|
2020-02-16T14:45:55.000Z
|
2020-03-13T09:34:58.000Z
|
"""
Test creating Miniconda installation and provisioning.
"""
import json
import os
import re
import subprocess
import sys
import pytest
import platform
import tempfile
import shutil
from mcinstall import MinicondaInstaller, config
def test_show_config():
print(json.dumps(config, indent=4))
@pytest.mark.skipif(platform.system() == "Windows", reason="uname command will not run on Windows.")
def test_uname_m():
out = subprocess.check_output(["uname", "-m"])
print(out.decode("utf-8"))
def test_install_dependencies():
try:
with tempfile.TemporaryDirectory() as tempdir:
mci = MinicondaInstaller(tempdir, verbose=True)
mci.download()
mci.install_miniconda()
mci.update_miniconda_base()
mci.install_pip(dependencies=["geopy"])
mci.install_conda(channel="conda-forge", dependencies=["pyyaml"])
if platform.system() == "Windows":
py_exe = "%s\\python" % tempdir
else:
py_exe = "%s/bin/python" % tempdir
# Run Miniconda's Python and import the installed dependencies.
for pkg_name in ["geopy", "pyyaml"]:
if pkg_name == "pyyaml":
pkg_name = "yaml"
if platform.system() == "Windows":
cmd = [
"%s\\condabin\\activate"%(tempdir),
"&&",
py_exe,
"-c",
'''import %s; print("%s %%s ok" %% %s.__version__)''' % \
(pkg_name, pkg_name, pkg_name)
]
else:
cmd = [
py_exe,
"-c",
'''import %s; print("%s %%s ok" %% %s.__version__)''' % \
(pkg_name, pkg_name, pkg_name)
]
if platform.system() == "Windows":
out = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
else:
out = subprocess.check_output(cmd).decode("utf-8").strip()
print(out)
if platform.system() == "Windows":
assert re.search("%s .+ ok" % pkg_name, out)
else:
assert re.match("%s .+ ok" % pkg_name, out)
except NotADirectoryError as err:
print(err)
shutil.rmtree(tempdir)
print()
assert not os.path.exists(tempdir)
def test_install_dependencies_index_url():
try:
with tempfile.TemporaryDirectory() as tempdir:
mci = MinicondaInstaller(tempdir, verbose=True)
mci.download()
mci.install_miniconda()
mci.update_miniconda_base()
mci.install_pip(dependencies=["pypi_pkg_test"], index_url="https://test.pypi.org/simple/")
if platform.system() == "Windows":
py_exe = "%s\\python" % tempdir
else:
py_exe = "%s/bin/python" % tempdir
# Run Miniconda's Python and import the installed dependencies.
for pkg_name in ["pypi_pkg_test"]:
cmd = [
py_exe,
"-c",
'''import %s; print("%s %%s ok" %% %s)''' % \
(pkg_name, pkg_name, pkg_name)
]
if platform.system() == "Windows":
out = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
else:
out = subprocess.check_output(cmd).decode("utf-8").strip()
print(out)
assert re.match("%s .+ ok" % pkg_name, out)
except NotADirectoryError:
shutil.rmtree(tempdir)
print()
assert not os.path.exists(tempdir)
def test_install_dependencies_extra_index_url():
try:
with tempfile.TemporaryDirectory() as tempdir:
mci = MinicondaInstaller(tempdir, verbose=True)
mci.download()
mci.install_miniconda()
mci.update_miniconda_base()
mci.install_pip(dependencies=["pypi_pkg_test"], index_url="https://test.pypi.org/simpletest/", extra_index_url="https://test.pypi.org/simple/")
if platform.system() == "Windows":
py_exe = "%s\\python" % tempdir
else:
py_exe = "%s/bin/python" % tempdir
# Run Miniconda's Python and import the installed dependencies.
for pkg_name in ["pypi_pkg_test"]:
cmd = [
py_exe,
"-c",
'''import %s; print("%s %%s ok" %% %s)''' % \
(pkg_name, pkg_name, pkg_name)
]
if platform.system() == "Windows":
out = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
else:
out = subprocess.check_output(cmd).decode("utf-8").strip()
print(out)
assert re.match("%s .+ ok" % pkg_name, out)
except NotADirectoryError:
shutil.rmtree(tempdir)
print()
assert not os.path.exists(tempdir)
def test_install_dependencies_extra_index_urls():
try:
with tempfile.TemporaryDirectory() as tempdir:
mci = MinicondaInstaller(tempdir, verbose=True)
mci.download()
mci.install_miniconda()
mci.update_miniconda_base()
mci.install_pip(dependencies=["pypi_pkg_test"], index_url="https://test.pypi.org/simpletest/", extra_index_url="https://test.pypi.org/simpletest1/, https://test.pypi.org/simple/")
if platform.system() == "Windows":
py_exe = "%s\\python" % tempdir
else:
py_exe = "%s/bin/python" % tempdir
# Run Miniconda's Python and import the installed dependencies.
for pkg_name in ["pypi_pkg_test"]:
cmd = [
py_exe,
"-c",
'''import %s; print("%s %%s ok" %% %s)''' % \
(pkg_name, pkg_name, pkg_name)
]
if platform.system() == "Windows":
out = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
else:
out = subprocess.check_output(cmd).decode("utf-8").strip()
print(out)
assert re.match("%s .+ ok" % pkg_name, out)
except NotADirectoryError:
shutil.rmtree(tempdir)
print()
assert not os.path.exists(tempdir)
| 36.823204
| 191
| 0.513728
| 683
| 6,665
| 4.849195
| 0.152269
| 0.054952
| 0.069746
| 0.069444
| 0.814614
| 0.810688
| 0.810688
| 0.810688
| 0.810688
| 0.810688
| 0
| 0.002584
| 0.36129
| 6,665
| 180
| 192
| 37.027778
| 0.775429
| 0.045461
| 0
| 0.744966
| 0
| 0
| 0.105031
| 0.003594
| 0
| 0
| 0
| 0
| 0.060403
| 1
| 0.040268
| false
| 0
| 0.067114
| 0
| 0.107383
| 0.073826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f61b8c46c3fb1e7650b03a3c895c2e3b56d3238
| 20,810
|
py
|
Python
|
Pyto Mac/PyObjC/VideoToolbox/_metadata.py
|
cclauss/Pyto
|
1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed
|
[
"MIT"
] | 4
|
2019-03-11T18:05:49.000Z
|
2021-05-22T21:09:09.000Z
|
Pyto Mac/PyObjC/VideoToolbox/_metadata.py
|
cclauss/Pyto
|
1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed
|
[
"MIT"
] | null | null | null |
Pyto Mac/PyObjC/VideoToolbox/_metadata.py
|
cclauss/Pyto
|
1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed
|
[
"MIT"
] | 1
|
2019-03-18T18:53:36.000Z
|
2019-03-18T18:53:36.000Z
|
# This file is generated by objective.metadata
#
# Last update: Sun Sep 9 19:02:46 2018
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b): return b
else:
def sel32or64(a, b): return a
if sys.byteorder == 'little':
def littleOrBig(a, b): return a
else:
def littleOrBig(a, b): return b
misc = {
}
misc.update({'VTInt32Point': objc.createStructType('VTInt32Point', b'{VTInt32Point=ii}', ['x', 'y']), 'VTInt32Size': objc.createStructType('VTInt32Size', b'{VTInt32Size=ii}', ['width', 'height'])})
constants = '''$kVTCompressionPropertyKey_AllowFrameReordering@^{__CFString=}$kVTCompressionPropertyKey_AllowOpenGOP@^{__CFString=}$kVTCompressionPropertyKey_AllowTemporalCompression@^{__CFString=}$kVTCompressionPropertyKey_AspectRatio16x9@^{__CFString=}$kVTCompressionPropertyKey_AverageBitRate@^{__CFString=}$kVTCompressionPropertyKey_BaseLayerFrameRate@^{__CFString=}$kVTCompressionPropertyKey_CleanAperture@^{__CFString=}$kVTCompressionPropertyKey_ColorPrimaries@^{__CFString=}$kVTCompressionPropertyKey_ContentLightLevelInfo@^{__CFString=}$kVTCompressionPropertyKey_DataRateLimits@^{__CFString=}$kVTCompressionPropertyKey_Depth@^{__CFString=}$kVTCompressionPropertyKey_EncoderID@^{__CFString=}$kVTCompressionPropertyKey_ExpectedDuration@^{__CFString=}$kVTCompressionPropertyKey_ExpectedFrameRate@^{__CFString=}$kVTCompressionPropertyKey_FieldCount@^{__CFString=}$kVTCompressionPropertyKey_FieldDetail@^{__CFString=}$kVTCompressionPropertyKey_H264EntropyMode@^{__CFString=}$kVTCompressionPropertyKey_ICCProfile@^{__CFString=}$kVTCompressionPropertyKey_MasteringDisplayColorVolume@^{__CFString=}$kVTCompressionPropertyKey_MaxFrameDelayCount@^{__CFString=}$kVTCompressionPropertyKey_MaxH264SliceBytes@^{__CFString=}$kVTCompressionPropertyKey_MaxKeyFrameInterval@^{__CFString=}$kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration@^{__CFString=}$kVTCompressionPropertyKey_MaximizePowerEfficiency@^{__CFString=}$kVTCompressionPropertyKey_MoreFramesAfterEnd@^{__CFString=}$kVTCompressionPropertyKey_MoreFramesBeforeStart@^{__CFString=}$kVTCompressionPropertyKey_MultiPassStorage@^{__CFString=}$kVTCompressionPropertyKey_NumberOfPendingFrames@^{__CFString=}$kVTCompressionPropertyKey_PixelAspectRatio@^{__CFString=}$kVTCompressionPropertyKey_PixelBufferPoolIsShared@^{__CFString=}$kVTCompressionPropertyKey_PixelTransferProperties@^{__CFString=}$kVTCompressionPropertyKey_ProfileLevel@^{__CFString=}$kVTCompressionPropertyKey_ProgressiveScan@^{__CFString=}$kVTCompressionPropertyKey_Quality@^{__CFString=}$kVTCompressionPropertyKey_RealTime@^{__CFString=}$kVTCompressionPropertyKey_SourceFrameCount@^{__CFString=}$kVTCompressionPropertyKey_TransferFunction@^{__CFString=}$kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder@^{__CFString=}$kVTCompressionPropertyKey_VideoEncoderPixelBufferAttributes@^{__CFString=}$kVTCompressionPropertyKey_YCbCrMatrix@^{__CFString=}$kVTDecompressionPropertyKey_ContentHasInterframeDependencies@^{__CFString=}$kVTDecompressionPropertyKey_DeinterlaceMode@^{__CFString=}$kVTDecompressionPropertyKey_FieldMode@^{__CFString=}$kVTDecompressionPropertyKey_MaxOutputPresentationTimeStampOfFramesBeingDecoded@^{__CFString=}$kVTDecompressionPropertyKey_MaximizePowerEfficiency@^{__CFString=}$kVTDecompressionPropertyKey_MinOutputPresentationTimeStampOfFramesBeingDecoded@^{__CFString=}$kVTDecompressionPropertyKey_NumberOfFramesBeingDecoded@^{__CFString=}$kVTDecompressionPropertyKey_OnlyTheseFrames@^{__CFString=}$kVTDecompressionPropertyKey_OutputPoolRequestedMinimumBufferCount@^{__CFString=}$kVTDecompressionPropertyKey_PixelBufferPool@^{__CFString=}$kVTDecompressionPropertyKey_PixelBufferPoolIsShared@^{__CFString=}$kVTDecompressionPropertyKey_PixelFormatsWithReducedResolutionSupport@^{__CFString=}$kVTDecompressionPropertyKey_PixelTransferProperties@^{__CFString=}$kVTDecompressionPropertyKey_RealTime@^{__CFString=}$kVTDecompressionPropertyKey_ReducedCoefficientDecode@^{__CFString=}$kVTDecompressionPropertyKey_ReducedFrameDelivery@^{__CFString=}$kVTDecompressionPropertyKey_ReducedResolutionDecode@^{__CFString=}$kVTDecompressionPropertyKey_SuggestedQualityOfServiceTiers@^{__CFString=}$kVTDecompressionPropertyKey_SupportedPixelFormatsOrderedByPerformance@^{__CFString=}$kVTDecompressionPropertyKey_SupportedPixelFormatsOrderedByQuality@^{__CFString=}$kVTDecompressionPropertyKey_ThreadCount@^{__CFString=}$kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder@^{__CFString=}$kVTDecompressionProperty_DeinterlaceMode_Temporal@^{__CFString=}$kVTDecompressionProperty_DeinterlaceMode_VerticalFilter@^{__CFString=}$kVTDecompressionProperty_FieldMode_BothFields@^{__CFString=}$kVTDecompressionProperty_FieldMode_BottomFieldOnly@^{__CFString=}$kVTDecompressionProperty_FieldMode_DeinterlaceFields@^{__CFString=}$kVTDecompressionProperty_FieldMode_SingleField@^{__CFString=}$kVTDecompressionProperty_FieldMode_TopFieldOnly@^{__CFString=}$kVTDecompressionProperty_OnlyTheseFrames_AllFrames@^{__CFString=}$kVTDecompressionProperty_OnlyTheseFrames_IFrames@^{__CFString=}$kVTDecompressionProperty_OnlyTheseFrames_KeyFrames@^{__CFString=}$kVTDecompressionProperty_OnlyTheseFrames_NonDroppableFrames@^{__CFString=}$kVTDecompressionProperty_TemporalLevelLimit@^{__CFString=}$kVTDecompressionResolutionKey_Height@^{__CFString=}$kVTDecompressionResolutionKey_Width@^{__CFString=}$kVTDownsamplingMode_Average@^{__CFString=}$kVTDownsamplingMode_Decimate@^{__CFString=}$kVTEncodeFrameOptionKey_ForceKeyFrame@^{__CFString=}$kVTH264EntropyMode_CABAC@^{__CFString=}$kVTH264EntropyMode_CAVLC@^{__CFString=}$kVTMultiPassStorageCreationOption_DoNotDelete@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationCleanAperture@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationColorPrimaries@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationICCProfile@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationPixelAspectRatio@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationTransferFunction@^{__CFString=}$kVTPixelTransferPropertyKey_DestinationYCbCrMatrix@^{__CFString=}$kVTPixelTransferPropertyKey_DownsamplingMode@^{__CFString=}$kVTPixelTransferPropertyKey_ScalingMode@^{__CFString=}$kVTProfileLevel_H263_Profile0_Level10@^{__CFString=}$kVTProfileLevel_H263_Profile0_Level45@^{__CFString=}$kVTProfileLevel_H263_Profile3_Level45@^{__CFString=}$kVTProfileLevel_H264_Baseline_1_3@^{__CFString=}$kVTProfileLevel_H264_Baseline_3_0@^{__CFString=}$kVTProfileLevel_H264_Baseline_3_1@^{__CFString=}$kVTProfileLevel_H264_Baseline_3_2@^{__CFString=}$kVTProfileLevel_H264_Baseline_4_0@^{__CFString=}$kVTProfileLevel_H264_Baseline_4_1@^{__CFString=}$kVTProfileLevel_H264_Baseline_4_2@^{__CFString=}$kVTProfileLevel_H264_Baseline_5_0@^{__CFString=}$kVTProfileLevel_H264_Baseline_5_1@^{__CFString=}$kVTProfileLevel_H264_Baseline_5_2@^{__CFString=}$kVTProfileLevel_H264_Baseline_AutoLevel@^{__CFString=}$kVTProfileLevel_H264_Extended_5_0@^{__CFString=}$kVTProfileLevel_H264_Extended_AutoLevel@^{__CFString=}$kVTProfileLevel_H264_High_3_0@^{__CFString=}$kVTProfileLevel_H264_High_3_1@^{__CFString=}$kVTProfileLevel_H264_High_3_2@^{__CFString=}$kVTProfileLevel_H264_High_4_0@^{__CFString=}$kVTProfileLevel_H264_High_4_1@^{__CFString=}$kVTProfileLevel_H264_High_4_2@^{__CFString=}$kVTProfileLevel_H264_High_5_0@^{__CFString=}$kVTProfileLevel_H264_High_5_1@^{__CFString=}$kVTProfileLevel_H264_High_5_2@^{__CFString=}$kVTProfileLevel_H264_High_AutoLevel@^{__CFString=}$kVTProfileLevel_H264_Main_3_0@^{__CFString=}$kVTProfileLevel_H264_Main_3_1@^{__CFString=}$kVTProfileLevel_H264_Main_3_2@^{__CFString=}$kVTProfileLevel_H264_Main_4_0@^{__CFString=}$kVTProfileLevel_H264_Main_4_1@^{__CFString=}$kVTProfileLevel_H264_Main_4_2@^{__CFString=}$kVTProfileLevel_H264_Main_5_0@^{__CFString=}$kVTProfileLevel_H264_Main_5_1@^{__CFString=}$kVTProfileLevel_H264_Main_5_2@^{__CFString=}$kVTProfileLevel_H264_Main_AutoLevel@^{__CFString=}$kVTProfileLevel_HEVC_Main10_AutoLevel@^{__CFString=}$kVTProfileLevel_HEVC_Main_AutoLevel@^{__CFString=}$kVTProfileLevel_MP4V_AdvancedSimple_L0@^{__CFString=}$kVTProfileLevel_MP4V_AdvancedSimple_L1@^{__CFString=}$kVTProfileLevel_MP4V_AdvancedSimple_L2@^{__CFString=}$kVTProfileLevel_MP4V_AdvancedSimple_L3@^{__CFString=}$kVTProfileLevel_MP4V_AdvancedSimple_L4@^{__CFString=}$kVTProfileLevel_MP4V_Main_L2@^{__CFString=}$kVTProfileLevel_MP4V_Main_L3@^{__CFString=}$kVTProfileLevel_MP4V_Main_L4@^{__CFString=}$kVTProfileLevel_MP4V_Simple_L0@^{__CFString=}$kVTProfileLevel_MP4V_Simple_L1@^{__CFString=}$kVTProfileLevel_MP4V_Simple_L2@^{__CFString=}$kVTProfileLevel_MP4V_Simple_L3@^{__CFString=}$kVTPropertyDocumentationKey@^{__CFString=}$kVTPropertyReadWriteStatusKey@^{__CFString=}$kVTPropertyReadWriteStatus_ReadOnly@^{__CFString=}$kVTPropertyReadWriteStatus_ReadWrite@^{__CFString=}$kVTPropertyShouldBeSerializedKey@^{__CFString=}$kVTPropertySupportedValueListKey@^{__CFString=}$kVTPropertySupportedValueMaximumKey@^{__CFString=}$kVTPropertySupportedValueMinimumKey@^{__CFString=}$kVTPropertyTypeKey@^{__CFString=}$kVTPropertyType_Boolean@^{__CFString=}$kVTPropertyType_Enumeration@^{__CFString=}$kVTPropertyType_Number@^{__CFString=}$kVTScalingMode_CropSourceToCleanAperture@^{__CFString=}$kVTScalingMode_Letterbox@^{__CFString=}$kVTScalingMode_Normal@^{__CFString=}$kVTScalingMode_Trim@^{__CFString=}$kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder@^{__CFString=}$kVTVideoDecoderSpecification_PreferredDecoderGPURegistryID@^{__CFString=}$kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder@^{__CFString=}$kVTVideoDecoderSpecification_RequiredDecoderGPURegistryID@^{__CFString=}$kVTVideoEncoderList_CodecName@^{__CFString=}$kVTVideoEncoderList_CodecType@^{__CFString=}$kVTVideoEncoderList_DisplayName@^{__CFString=}$kVTVideoEncoderList_EncoderID@^{__CFString=}$kVTVideoEncoderList_EncoderName@^{__CFString=}$kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder@^{__CFString=}$kVTVideoEncoderSpecification_EncoderID@^{__CFString=}$kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder@^{__CFString=}$'''
enums = '''$kVTAllocationFailedErr@-12904$kVTColorCorrectionImageRotationFailedErr@-12219$kVTColorCorrectionPixelTransferFailedErr@-12212$kVTColorSyncTransformConvertFailedErr@-12919$kVTCompressionSessionBeginFinalPass@1$kVTCouldNotCreateColorCorrectionDataErr@-12918$kVTCouldNotCreateInstanceErr@-12907$kVTCouldNotFindTemporalFilterErr@-12217$kVTCouldNotFindVideoDecoderErr@-12906$kVTCouldNotFindVideoEncoderErr@-12908$kVTDecodeFrame_1xRealTimePlayback@4$kVTDecodeFrame_DoNotOutputFrame@2$kVTDecodeFrame_EnableAsynchronousDecompression@1$kVTDecodeFrame_EnableTemporalProcessing@8$kVTDecodeInfo_Asynchronous@1$kVTDecodeInfo_FrameDropped@2$kVTDecodeInfo_ImageBufferModifiable@4$kVTEncodeInfo_Asynchronous@1$kVTEncodeInfo_FrameDropped@2$kVTFormatDescriptionChangeNotSupportedErr@-12916$kVTFrameSiloInvalidTimeRangeErr@-12216$kVTFrameSiloInvalidTimeStampErr@-12215$kVTImageRotationNotSupportedErr@-12914$kVTInsufficientSourceColorDataErr@-12917$kVTInvalidSessionErr@-12903$kVTMultiPassStorageIdentifierMismatchErr@-12213$kVTMultiPassStorageInvalidErr@-12214$kVTParameterErr@-12902$kVTPixelTransferNotPermittedErr@-12218$kVTPixelTransferNotSupportedErr@-12905$kVTPropertyNotSupportedErr@-12900$kVTPropertyReadOnlyErr@-12901$kVTUnlimitedFrameDelayCount@-1$kVTVideoDecoderAuthorizationErr@-12210$kVTVideoDecoderBadDataErr@-12909$kVTVideoDecoderMalfunctionErr@-12911$kVTVideoDecoderNotAvailableNowErr@-12913$kVTVideoDecoderRemovedErr@-17690$kVTVideoDecoderUnsupportedDataFormatErr@-12910$kVTVideoEncoderAuthorizationErr@-12211$kVTVideoEncoderMalfunctionErr@-12912$kVTVideoEncoderNotAvailableNowErr@-12915$'''
misc.update({})
functions={'VTPixelTransferSessionCreate': (sel32or64(b'l^{__CFAllocator=}^^{OpaqueVTPixelTransferSession=}', b'i^{__CFAllocator=}^^{OpaqueVTPixelTransferSession=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTCreateCGImageFromCVPixelBuffer': (sel32or64(b'l^{__CVBuffer=}^{__CFDictionary=}^^{CGImage=}', b'i^{__CVBuffer=}^{__CFDictionary=}^^{CGImage=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTMultiPassStorageGetTypeID': (sel32or64(b'L', b'Q'),), 'VTDecompressionSessionFinishDelayedFrames': (sel32or64(b'l^{OpaqueVTDecompressionSession=}', b'i^{OpaqueVTDecompressionSession=}'),), 'VTSessionCopySupportedPropertyDictionary': (sel32or64(b'l@^^{__CFDictionary=}', b'i@^^{__CFDictionary=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTSessionSetProperty': (sel32or64(b'l@^{__CFString=}@', b'i@^{__CFString=}@'),), 'VTPixelTransferSessionTransferImage': (sel32or64(b'l^{OpaqueVTPixelTransferSession=}^{__CVBuffer=}^{__CVBuffer=}', b'i^{OpaqueVTPixelTransferSession=}^{__CVBuffer=}^{__CVBuffer=}'),), 'VTFrameSiloGetProgressOfCurrentPass': (sel32or64(b'l^{OpaqueVTFrameSilo=}^f', b'i^{OpaqueVTFrameSilo=}^f'), '', {'arguments': {1: {'type_modifier': 'o'}}}), 'VTMultiPassStorageClose': (sel32or64(b'l^{OpaqueVTMultiPassStorage=}', b'i^{OpaqueVTMultiPassStorage=}'),), 'VTIsHardwareDecodeSupported': (sel32or64(b'ZL', b'ZI'),), 'VTCompressionSessionPrepareToEncodeFrames': (sel32or64(b'l^{OpaqueVTCompressionSession=}', b'i^{OpaqueVTCompressionSession=}'),), 'VTCompressionSessionInvalidate': (b'v^{OpaqueVTCompressionSession=}',), 'VTCompressionSessionCompleteFrames': (sel32or64(b'l^{OpaqueVTCompressionSession=}{_CMTime=qiIq}', b'i^{OpaqueVTCompressionSession=}{_CMTime=qiIq}'),), 'VTFrameSiloCallBlockForEachSampleBuffer': (sel32or64(b'l^{OpaqueVTFrameSilo=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}@?', b'i^{OpaqueVTFrameSilo=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}@?'), '', {'arguments': {2: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': '^v'}, 1: {'type': '^{opaqueCMSampleBuffer=}'}}}, 'block': {'retval': {'type': b'i'}, 'arguments': {0: {'type': b'^{opaqueCMSampleBuffer=}'}}}}}}), 'VTSessionCopyProperty': (sel32or64(b'l@^{__CFString=}^{__CFAllocator=}^@', b'i@^{__CFString=}^{__CFAllocator=}^@'), '', {'retval': {'already_cfretained': True}, 'arguments': {3: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTFrameSiloAddSampleBuffer': (sel32or64(b'l^{OpaqueVTFrameSilo=}^{opaqueCMSampleBuffer=}', b'i^{OpaqueVTFrameSilo=}^{opaqueCMSampleBuffer=}'),), 'VTDecompressionSessionDecodeFrame': (sel32or64(b'l^{OpaqueVTDecompressionSession=}^{opaqueCMSampleBuffer=}I^v^L', b'i^{OpaqueVTDecompressionSession=}^{opaqueCMSampleBuffer=}I^v^I'), '', {'arguments': {4: {'type_modifier': 'o'}}}), 'VTRegisterProfessionalVideoWorkflowVideoEncoders': (b'v',), 'VTCompressionSessionEncodeFrameWithOutputHandler': (sel32or64(b'l^{OpaqueVTCompressionSession=}^{__CVBuffer=}{_CMTime=qiIq}{_CMTime=qiIq}^{__CFDictionary=}^L@?', b'i^{OpaqueVTCompressionSession=}^{__CVBuffer=}{_CMTime=qiIq}{_CMTime=qiIq}^{__CFDictionary=}^I@?'), '', {'arguments': {5: {'type_modifier': 'o'}, 6: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': '^v'}, 1: {'type': 'i'}, 2: {'type': [u'I', u'Q']}, 3: {'type': '^{opaqueCMSampleBuffer=}'}}}}}}), 'VTPixelTransferSessionGetTypeID': (sel32or64(b'L', b'Q'),), 'VTCompressionSessionGetTypeID': (sel32or64(b'L', b'Q'),), 'VTCompressionSessionGetPixelBufferPool': (b'^{__CVPixelBufferPool=}^{OpaqueVTCompressionSession=}',), 'VTFrameSiloCallFunctionForEachSampleBuffer': (sel32or64(b'l^{OpaqueVTFrameSilo=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^v^?', b'i^{OpaqueVTFrameSilo=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^v^?'), '', {'arguments': {3: {'callable': {'retval': {'type': b'i'}, 'arguments': {0: {'type': b'^v'}, 1: {'type': b'^{opaqueCMSampleBuffer=}'}}}, 'callable_retained': False}}}), 'VTDecompressionSessionCopyBlackPixelBuffer': (sel32or64(b'l^{OpaqueVTDecompressionSession=}^^{__CVBuffer=}', b'i^{OpaqueVTDecompressionSession=}^^{__CVBuffer=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTDecompressionSessionCanAcceptFormatDescription': (b'Z^{OpaqueVTDecompressionSession=}^{opaqueCMFormatDescription=}',), 'VTCopyVideoEncoderList': (sel32or64(b'l^{__CFDictionary=}^^{__CFArray=}', b'i^{__CFDictionary=}^^{__CFArray=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {1: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTMultiPassStorageCreate': (sel32or64(b'l^{__CFAllocator=}^{__CFURL=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^{__CFDictionary=}^^{OpaqueVTMultiPassStorage=}', b'i^{__CFAllocator=}^{__CFURL=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^{__CFDictionary=}^^{OpaqueVTMultiPassStorage=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {4: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTDecompressionSessionGetTypeID': (sel32or64(b'L', b'Q'),), 'VTCompressionSessionBeginPass': (sel32or64(b'l^{OpaqueVTCompressionSession=}I^I', b'i^{OpaqueVTCompressionSession=}I^I'),), 'VTSessionSetProperties': (sel32or64(b'l@^{__CFDictionary=}', b'i@^{__CFDictionary=}'),), 'VTDecompressionSessionWaitForAsynchronousFrames': (sel32or64(b'l^{OpaqueVTDecompressionSession=}', b'i^{OpaqueVTDecompressionSession=}'),), 'VTFrameSiloSetTimeRangesForNextPass': (sel32or64(b'l^{OpaqueVTFrameSilo=}l^{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}', b'i^{OpaqueVTFrameSilo=}q^{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}'), '', {'arguments': {2: {'c_array_length_in_arg': 1, 'type_modifier': 'n'}}}), 'VTRegisterProfessionalVideoWorkflowVideoDecoders': (b'v',), 'VTPixelTransferSessionInvalidate': (b'v^{OpaqueVTPixelTransferSession=}',), 'VTDecompressionSessionDecodeFrameWithOutputHandler': (sel32or64(b'l^{OpaqueVTDecompressionSession=}^{opaqueCMSampleBuffer=}I^L@?', b'i^{OpaqueVTDecompressionSession=}^{opaqueCMSampleBuffer=}I^I@?'), '', {'arguments': {3: {'type_modifier': 'o'}, 4: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': '^v'}, 1: {'type': 'i'}, 2: {'type': 'I'}, 3: {'type': '^{__CVBuffer=}'}, 4: {'type': '{_CMTime=qiIq}'}, 5: {'type': '{_CMTime=qiIq}'}}}}}}), 'VTCompressionSessionEndPass': (sel32or64(b'l^{OpaqueVTCompressionSession=}^Z^I', b'i^{OpaqueVTCompressionSession=}^Z^I'), '', {'arguments': {1: {'type_modifier': 'o'}}}), 'VTCompressionSessionEncodeFrame': (sel32or64(b'l^{OpaqueVTCompressionSession=}^{__CVBuffer=}{_CMTime=qiIq}{_CMTime=qiIq}^{__CFDictionary=}^v^L', b'i^{OpaqueVTCompressionSession=}^{__CVBuffer=}{_CMTime=qiIq}{_CMTime=qiIq}^{__CFDictionary=}^v^I'), '', {'arguments': {6: {'type_modifier': 'o'}}}), 'VTFrameSiloCreate': (sel32or64(b'l^{__CFAllocator=}^{__CFURL=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^{__CFDictionary=}^^{OpaqueVTFrameSilo=}', b'i^{__CFAllocator=}^{__CFURL=}{_CMTimeRange={_CMTime=qiIq}{_CMTime=qiIq}}^{__CFDictionary=}^^{OpaqueVTFrameSilo=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {4: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTCompressionSessionCreate': (sel32or64(b'l^{__CFAllocator=}iiL^{__CFDictionary=}^{__CFDictionary=}^{__CFAllocator=}^?^v^^{OpaqueVTCompressionSession=}', b'i^{__CFAllocator=}iiI^{__CFDictionary=}^{__CFDictionary=}^{__CFAllocator=}^?^v^^{OpaqueVTCompressionSession=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {9: {'already_cfretained': True, 'type_modifier': 'o'}, 7: {'callable': {'retval': {'type': b'v'}, 'arguments': {0: {'type': b'^v'}, 1: {'type': b'^v'}, 2: {'type': b'i'}, 3: {'type': b'I'}, 4: {'type': b'^{opaqueCMSampleBuffer=}'}}}}}}), 'VTCopySupportedPropertyDictionaryForEncoder': (sel32or64(b'liiL^{__CFDictionary=}^^{__CFString=}^^{__CFDictionary=}', b'iiiI^{__CFDictionary=}^^{__CFString=}^^{__CFDictionary=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {4: {'already_cfretained': True, 'type_modifier': 'o'}, 5: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTFrameSiloGetTypeID': (sel32or64(b'L', b'Q'),), 'VTSessionCopySerializableProperties': (sel32or64(b'l@^{__CFAllocator=}^^{__CFDictionary=}', b'i@^{__CFAllocator=}^^{__CFDictionary=}'), '', {'retval': {'already_cfretained': True}, 'arguments': {2: {'already_cfretained': True, 'type_modifier': 'o'}}}), 'VTDecompressionSessionInvalidate': (b'v^{OpaqueVTDecompressionSession=}',)}
aliases = {'VT_SUPPORT_COLORSYNC_PIXEL_TRANSFER': 'COREMEDIA_TRUE'}
cftypes=[('VTPixelTransferSessionRef', b'^{OpaqueVTPixelTransferSession=}', ':VTPixelTransferSessionGetTypeID', None), ('VTDecompressionSessionRef', b'^{OpaqueVTDecompressionSession=}', ':VTDecompressionSessionGetTypeID', None), ('VTFrameSiloRef', b'^{OpaqueVTFrameSilo=}', ':VTFrameSiloGetTypeID', None), ('VTSessionRef', b'^{OpaqueVTSession=}', ':VTSessionGetTypeID', None), ('VTMultiPassStorageRef', b'^{OpaqueVTMultiPassStorage=}', ':VTMultiPassStorageGetTypeID', None)]
expressions = {}
# END OF FILE
| 743.214286
| 9,512
| 0.80937
| 1,610
| 20,810
| 9.919876
| 0.238509
| 0.072006
| 0.023417
| 0.017532
| 0.240874
| 0.15046
| 0.12817
| 0.111577
| 0.092981
| 0.080458
| 0
| 0.029838
| 0.024027
| 20,810
| 27
| 9,513
| 770.740741
| 0.756524
| 0.004517
| 0
| 0.105263
| 1
| 0.105263
| 0.860412
| 0.784511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0.210526
| 0.052632
| 0.210526
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 8
|
7e289d8467590acbc33df40d1c22d18b21da79ab
| 224
|
py
|
Python
|
segnlp/pretrained_features/__init__.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 1
|
2021-01-21T17:16:55.000Z
|
2021-01-21T17:16:55.000Z
|
segnlp/pretrained_features/__init__.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 2
|
2021-01-24T20:07:54.000Z
|
2021-01-26T16:59:28.000Z
|
segnlp/pretrained_features/__init__.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 1
|
2021-01-21T17:16:57.000Z
|
2021-01-21T17:16:57.000Z
|
from .bow import BOW
from .flair_embeddings import FlairEmbeddings
from .flair_embeddings import GloveEmbeddings
from .flair_embeddings import BertEmbeddings
from .dummy import DummyFeature
from .elmo import ELMoEmbeddings
| 28
| 45
| 0.861607
| 27
| 224
| 7.037037
| 0.444444
| 0.142105
| 0.3
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111607
| 224
| 8
| 46
| 28
| 0.954774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7e783ab793dc8a36abf3c0f01449b07ebe2c101f
| 7,137
|
py
|
Python
|
models.py
|
MILE-IISc/CRNN
|
88e7350f53a972b1cabc65aeb8c8b151cfd92f0e
|
[
"MIT"
] | null | null | null |
models.py
|
MILE-IISc/CRNN
|
88e7350f53a972b1cabc65aeb8c8b151cfd92f0e
|
[
"MIT"
] | null | null | null |
models.py
|
MILE-IISc/CRNN
|
88e7350f53a972b1cabc65aeb8c8b151cfd92f0e
|
[
"MIT"
] | null | null | null |
import numpy as np
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, MaxPooling2D, Reshape, Dense, LSTM, add, concatenate, Dropout, Lambda, Flatten
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from STN.spatial_transformer import SpatialTransformer
def loc_net(input_shape):
b = np.zeros((2, 3), dtype='float32')
b[0, 0] = 1
b[1, 1] = 1
w = np.zeros((64, 6), dtype='float32')
weights = [w, b.flatten()]
loc_input = Input(input_shape)
loc_conv_1 = Conv2D(16, (5, 5), padding='same', activation='relu')(loc_input)
loc_conv_2 = Conv2D(32, (5, 5), padding='same', activation='relu')(loc_conv_1)
loc_fla = Flatten()(loc_conv_2)
loc_fc_1 = Dense(64, activation='relu')(loc_fla)
loc_fc_2 = Dense(6, weights=weights)(loc_fc_1)
output = Model(inputs=loc_input, outputs=loc_fc_2)
return output
def ctc_lambda_func(args):
iy_pred, ilabels, iinput_length, ilabel_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
iy_pred = iy_pred[:, 2:, :] # no such influence
return K.ctc_batch_cost(ilabels, iy_pred, iinput_length, ilabel_length)
def CRNN_STN(cfg):
inputs = Input((cfg.width, cfg.height, cfg.nb_channels))
c_1 = Conv2D(cfg.conv_filter_size[0], (3, 3), activation='relu', padding='same', name='conv_1')(inputs)
c_2 = Conv2D(cfg.conv_filter_size[1], (3, 3), activation='relu', padding='same', name='conv_2')(c_1)
c_3 = Conv2D(cfg.conv_filter_size[2], (3, 3), activation='relu', padding='same', name='conv_3')(c_2)
bn_3 = BatchNormalization(name='bn_3')(c_3)
p_3 = MaxPooling2D(pool_size=(2, 2), name='maxpool_3')(bn_3)
c_4 = Conv2D(cfg.conv_filter_size[3], (3, 3), activation='relu', padding='same', name='conv_4')(p_3)
c_5 = Conv2D(cfg.conv_filter_size[4], (3, 3), activation='relu', padding='same', name='conv_5')(c_4)
bn_5 = BatchNormalization(name='bn_5')(c_5)
p_5 = MaxPooling2D(pool_size=(2, 2), name='maxpool_5')(bn_5)
c_6 = Conv2D(cfg.conv_filter_size[5], (3, 3), activation='relu', padding='same', name='conv_6')(p_5)
c_7 = Conv2D(cfg.conv_filter_size[6], (3, 3), activation='relu', padding='same', name='conv_7')(c_6)
bn_7 = BatchNormalization(name='bn_7')(c_7)
bn_7_shape = bn_7.get_shape()
loc_input_shape = (bn_7_shape[1].value, bn_7_shape[2].value, bn_7_shape[3].value)
stn = SpatialTransformer(localization_net=loc_net(loc_input_shape), output_size=(loc_input_shape[0], loc_input_shape[1]))(bn_7)
reshape = Reshape(target_shape=(int(bn_7_shape[1]), int(bn_7_shape[2] * bn_7_shape[3])), name='reshape')(stn)
fc_9 = Dense(cfg.lstm_nb_units[0], activation='relu', name='fc_9')(reshape)
lstm_10 = LSTM(cfg.lstm_nb_units[0], kernel_initializer="he_normal", return_sequences=True, name='lstm_10')(fc_9)
lstm_10_back = LSTM(cfg.lstm_nb_units[0], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_10_back')(fc_9)
lstm_10_add = add([lstm_10, lstm_10_back])
lstm_11 = LSTM(cfg.lstm_nb_units[1], kernel_initializer="he_normal", return_sequences=True, name='lstm_11')(lstm_10_add)
lstm_11_back = LSTM(cfg.lstm_nb_units[1], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_11_back')(lstm_10_add)
lstm_11_concat = concatenate([lstm_11, lstm_11_back])
do_11 = Dropout(cfg.dropout_rate, name='dropout')(lstm_11_concat)
cfg.characters = list();
for line in open(cfg.characters_file):
line = line.rstrip('\n');
cfg.characters.append(line);
fc_12 = Dense(len(cfg.characters), kernel_initializer='he_normal', activation='softmax', name='fc_12')(do_11)
prediction_model = Model(inputs=inputs, outputs=fc_12)
labels = Input(name='labels', shape=[cfg.label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([fc_12, labels, input_length, label_length])
training_model = Model(inputs=[inputs, labels, input_length, label_length], outputs=[ctc_loss])
return training_model, prediction_model
def CRNN(cfg):
inputs = Input((cfg.width, cfg.height, cfg.nb_channels))
c_1 = Conv2D(cfg.conv_filter_size[0], (3, 3), activation='relu', padding='same', name='conv_1')(inputs)
c_2 = Conv2D(cfg.conv_filter_size[1], (3, 3), activation='relu', padding='same', name='conv_2')(c_1)
c_3 = Conv2D(cfg.conv_filter_size[2], (3, 3), activation='relu', padding='same', name='conv_3')(c_2)
bn_3 = BatchNormalization(name='bn_3')(c_3)
p_3 = MaxPooling2D(pool_size=(2, 2), name='maxpool_3')(bn_3)
c_4 = Conv2D(cfg.conv_filter_size[3], (3, 3), activation='relu', padding='same', name='conv_4')(p_3)
c_5 = Conv2D(cfg.conv_filter_size[4], (3, 3), activation='relu', padding='same', name='conv_5')(c_4)
bn_5 = BatchNormalization(name='bn_5')(c_5)
p_5 = MaxPooling2D(pool_size=(2, 2), name='maxpool_5')(bn_5)
c_6 = Conv2D(cfg.conv_filter_size[5], (3, 3), activation='relu', padding='same', name='conv_6')(p_5)
c_7 = Conv2D(cfg.conv_filter_size[6], (3, 3), activation='relu', padding='same', name='conv_7')(c_6)
bn_7 = BatchNormalization(name='bn_7')(c_7)
bn_7_shape = bn_7.get_shape()
reshape = Reshape(target_shape=(int(bn_7_shape[1]), int(bn_7_shape[2] * bn_7_shape[3])), name='reshape')(bn_7)
fc_9 = Dense(cfg.lstm_nb_units[0], activation='relu', name='fc_9')(reshape)
lstm_10 = LSTM(cfg.lstm_nb_units[0], kernel_initializer="he_normal", return_sequences=True, name='lstm_10')(fc_9)
lstm_10_back = LSTM(cfg.lstm_nb_units[0], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_10_back')(fc_9)
lstm_10_add = add([lstm_10, lstm_10_back])
lstm_11 = LSTM(cfg.lstm_nb_units[1], kernel_initializer="he_normal", return_sequences=True, name='lstm_11')(lstm_10_add)
lstm_11_back = LSTM(cfg.lstm_nb_units[1], kernel_initializer="he_normal", go_backwards=True, return_sequences=True, name='lstm_11_back')(lstm_10_add)
lstm_11_concat = concatenate([lstm_11, lstm_11_back])
do_11 = Dropout(cfg.dropout_rate, name='dropout')(lstm_11_concat)
cfg.characters = list();
for line in open(cfg.characters_file):
line = line.rstrip('\n');
cfg.characters.append(line);
fc_12 = Dense(len(cfg.characters), kernel_initializer='he_normal', activation='softmax', name='fc_12')(do_11)
prediction_model = Model(inputs=inputs, outputs=fc_12)
labels = Input(name='labels', shape=[cfg.label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
ctc_loss = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([fc_12, labels, input_length, label_length])
training_model = Model(inputs=[inputs, labels, input_length, label_length], outputs=[ctc_loss])
return training_model, prediction_model
| 49.909091
| 153
| 0.702396
| 1,157
| 7,137
| 4.029386
| 0.121867
| 0.057057
| 0.039039
| 0.057057
| 0.800944
| 0.800944
| 0.800944
| 0.788074
| 0.788074
| 0.788074
| 0
| 0.053678
| 0.133389
| 7,137
| 142
| 154
| 50.260563
| 0.700081
| 0.014292
| 0
| 0.701031
| 0
| 0
| 0.089319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041237
| false
| 0
| 0.051546
| 0
| 0.134021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e1a7daa3346d7ebf50def2f78637c7e9f3ca2de
| 145
|
py
|
Python
|
satchmo/upsell/__init__.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T12:21:04.000Z
|
2016-05-09T12:21:04.000Z
|
satchmo/upsell/__init__.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/upsell/__init__.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
from satchmo.shop.signals import satchmo_cart_add_complete
import views
satchmo_cart_add_complete.connect(views.cart_add_listener, sender=None)
| 29
| 71
| 0.882759
| 22
| 145
| 5.454545
| 0.590909
| 0.175
| 0.233333
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062069
| 145
| 4
| 72
| 36.25
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0e26d71432e605cd7c3151ffb8e0db8d90cb19f7
| 219
|
py
|
Python
|
portfolio/mainapp/admin.py
|
PlagueEvgeny/Portfolio
|
aa7199fe23a10d3a8ad1e3307dbdbd50f9e150c3
|
[
"MIT"
] | null | null | null |
portfolio/mainapp/admin.py
|
PlagueEvgeny/Portfolio
|
aa7199fe23a10d3a8ad1e3307dbdbd50f9e150c3
|
[
"MIT"
] | null | null | null |
portfolio/mainapp/admin.py
|
PlagueEvgeny/Portfolio
|
aa7199fe23a10d3a8ad1e3307dbdbd50f9e150c3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from mainapp.models import Hello, About, Portfolio, Image, Certificate, Facts, Tools, UseTools
admin.site.register([Hello, About, Portfolio, Image, Certificate, Facts, Tools, UseTools])
| 54.75
| 94
| 0.794521
| 28
| 219
| 6.214286
| 0.607143
| 0.114943
| 0.218391
| 0.275862
| 0.609195
| 0.609195
| 0.609195
| 0.609195
| 0
| 0
| 0
| 0
| 0.105023
| 219
| 4
| 95
| 54.75
| 0.887755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0e468d21267a052e0c0f3fea765b8ae71dcbce0e
| 239
|
py
|
Python
|
omnipath/_core/requests/__init__.py
|
TheAustinator/omnipath
|
c0d61216e3a7d95cdb098ac025f08a9b09e0f8fe
|
[
"MIT"
] | 13
|
2020-12-02T15:47:54.000Z
|
2022-03-08T02:39:55.000Z
|
omnipath/_core/requests/__init__.py
|
TheAustinator/omnipath
|
c0d61216e3a7d95cdb098ac025f08a9b09e0f8fe
|
[
"MIT"
] | 15
|
2020-11-10T13:03:09.000Z
|
2022-03-29T08:51:38.000Z
|
omnipath/_core/requests/__init__.py
|
michalk8/omnipath
|
014ddf046d07ad16ce722e2280b39796598ca9a8
|
[
"MIT"
] | 2
|
2020-11-06T23:08:19.000Z
|
2022-03-28T21:46:33.000Z
|
from omnipath._core.requests._request import Enzsub, SignedPTMs
from omnipath._core.requests._complexes import Complexes
from omnipath._core.requests._intercell import Intercell
from omnipath._core.requests._annotations import Annotations
| 47.8
| 63
| 0.874477
| 29
| 239
| 6.931034
| 0.37931
| 0.238806
| 0.318408
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07113
| 239
| 4
| 64
| 59.75
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7ebf5c80a509dceef9b257b6bf7c064e4b28ec75
| 148
|
py
|
Python
|
utils/__init__.py
|
gaoliyao/Replica_Exchange_Stochastic_Gradient_MCMC
|
609f803ca334b21820dc020c16ad8113363a03e2
|
[
"MIT"
] | 21
|
2020-06-30T01:14:45.000Z
|
2022-03-31T08:03:17.000Z
|
utils/__init__.py
|
gaoliyao/Replica_Exchange_Stochastic_Gradient_MCMC
|
609f803ca334b21820dc020c16ad8113363a03e2
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
gaoliyao/Replica_Exchange_Stochastic_Gradient_MCMC
|
609f803ca334b21820dc020c16ad8113363a03e2
|
[
"MIT"
] | 2
|
2020-11-02T22:06:10.000Z
|
2021-07-17T23:35:21.000Z
|
#from .misc import Plot
from .data_manipulation import *
from .data_operation import *
from .chains_operation import *
from .pytorch_label import *
| 24.666667
| 32
| 0.797297
| 20
| 148
| 5.7
| 0.5
| 0.263158
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 148
| 5
| 33
| 29.6
| 0.890625
| 0.148649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7d13fa7bef82bef71e888e9436d314c7b7b87927
| 76,258
|
py
|
Python
|
recommenders/RM_start_of_studies.py
|
virtUOS/siddata_backend
|
e2e09fbb51e650ede2e743ad0a1169c71c064494
|
[
"MIT"
] | null | null | null |
recommenders/RM_start_of_studies.py
|
virtUOS/siddata_backend
|
e2e09fbb51e650ede2e743ad0a1169c71c064494
|
[
"MIT"
] | 4
|
2022-03-28T08:10:46.000Z
|
2022-03-28T09:29:04.000Z
|
recommenders/RM_start_of_studies.py
|
virtUOS/siddata_backend
|
e2e09fbb51e650ede2e743ad0a1169c71c064494
|
[
"MIT"
] | null | null | null |
from backend import models
from recommenders.RM_BASE import RM_BASE
import pandas as pd
import os
class RM_start_of_studies(RM_BASE):
""" Recommender that contains
- initial activites (guided tour)
- teaser activities that introduce all recommenders
- salient or emergent activities
In Addition the frontend adds a tile view of all recommenders available.
"""
def __init__(self):
# this magic line of code lets our recommender inherit all attributes and methods from the baseclass.
super().__init__()
# This name should be unique and appears in the Stud.IP GUI.
self.NAME = "Orientierung zum Studienstart"
# The description text summarizes the functionality. It is shown in teaser activity and tile view,
self.DESCRIPTION = "Hilfe in der Studieneingangsphase."
# This text is displayed in Teaser Activities
self.TEASER_TEXT = "Unter Orientierung zum Studienstart kannst du dich über verschiedene Themen rund um " \
"den Studienstart informieren. Außerdem werden dir auf Basis deiner Themenwünsche " \
"konkrete Workshops und Trainings für den Studieneinstieg empfohlen."
# If set to False, the recommender will not appear in the GUI or the DB
self.ACTIVE = True
# Image is shown in teaser activity
self.IMAGE = "professions.png"
# determines arrangement in Frontend
self.ORDER = 1
# This string tells the user which data is required for this recommender
self.DATA_INFO = "Diese Funktion speichert diejenigen Daten, die du aktiv eingibst und nutze Daten zu deiner Uni um passgenaue Empfehlungen zu geben."
# Reference to the database object, can be used as value vor recommender attribute in goals.
self.recommender = models.Recommender.objects.get_or_create(
name=self.get_name(),
description=self.DESCRIPTION,
classname=self.__class__.__name__,
image=self.IMAGE,
order=self.ORDER,
data_info=self.DATA_INFO,
)[0]
self.dirname = os.path.dirname(__file__)
self.filename = os.path.join(self.dirname, 'recommender_data/start_of_studies_data.csv')
self.course_df = pd.read_csv(self.filename, delimiter=',')
def initialize_templates(self):
"""
Creates and updates all templates of this recommender
Is executed at server startup
"""
super().initialize_templates()
order = 1
# create template for general information question
general_information_question = models.Question.objects.create(
question_text="Der Studienstart ist mit verschiedenen Herausforderungen und Chancen verbunden. Hier "
"findest du passende Informationen und Unterstützungsangebote:",
answer_type="checkbox",
selection_answers=["Studienverlauf, Prüfungen, Rückmeldung",
"Literatur ausleihen",
"Raum für Gruppenarbeiten",
"Raum zum ruhigen Arbeiten",
"Studium und Familie",
"Sport treiben",
"Eine weitere Sprache lernen",
"Auslandssemester",
"Probleme im Studium",
"Technische Fragen und Probleme",
"Finanzierung meines Studiums",
"Wohnen",
"Was gibt es heute zum Mittag?",
]
)
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("general_information_question"),
defaults={
"title": "Generelle Infos zum Studium",
"description": "Orientierung zum Studienstart",
"type": "question",
"status": "template",
"question": general_information_question,
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
# create template for general recommendations question
general_recommendation_question = models.Question.objects.create(
question_text="Wusstest du schon: deine Universität bietet neben den Vorlesungen deines Studiengangs auch Veranstaltungen an, die dich in deinem Studium "
"unterstützen sollen. Welche Themen interessieren dich? Wenn du deine Auswahl getroffen "
"hast, bekommst du konkrete Veranstaltungen vorgeschlagen.",
answer_type="checkbox",
selection_answers=["Wissenschaftliches Arbeiten",
"Lernen & Gedächtnis",
"Prüfungsvorbereitung",
"Präsentation & Kommunikation",
"Projektmanagement",
"Selbst- und Zeitmanagement",
"Gesund studieren",
"Sport & Bewegung",
"Literaturverwaltung",
"Schreibtraining",
]
)
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("general_recommendation_question"),
defaults={
"title": "Persönliche Weiterentwicklung",
"description": "Orientierung zum Studienstart",
"type": "question",
"status": "template",
"question": general_recommendation_question,
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
# create course recommendation templates
for index in self.course_df.index:
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("course_recommendation_{}".format(index)),
defaults={
"title": "Veranstaltung zum Thema '{}'".format(self.course_df["Categories"].iloc[index]),
"description": "<p><strong>Volltreffer! Wir haben eine Kursempfehlung für dich:</strong></p>"
"<ul>"
"<li>Veranstaltung: {}</li> "
"<li>Beschreibung: {}</li> "
"<li>Zeit: {}</li> "
"<li>Ort: {}</li> "
"<li>Link: <a href='{}' target='_blank'>Hier geht es zum Kurs.</a></li>"
"</ul>".format(self.course_df["Course"].iloc[index],
self.course_df["Description"].iloc[index],
self.course_df["Date"].iloc[index],
self.course_df["Location"].iloc[index],
self.course_df["Registration"].iloc[index]),
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
# changed all order attributes to unique values in DataFrame
"order": self.course_df["Order"].iloc[index],
}
)
# order count up not necessary anymore in for-loop
# order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("library_OS"),
defaults={
"title": "Universitätsbibliothek",
"description": "Wenn du dir Literatur ausleihen möchtest, kann dir die Bibliothek deiner Universität"
"helfen. <a href='https://www.ub.uni-osnabrueck.de/startseite.html' target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("library_UB"),
defaults={
"title": "Universitätsbibliothek",
"description": "Wenn du dir Literatur ausleihen möchtest, kann dir die Bibliothek deiner Universität"
"helfen. <a href='https://www.suub.uni-bremen.de/' target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("library_LUH"),
defaults={
"title": "Universitätsbibliothek",
"description": "Wenn du dir Literatur ausleihen möchtest, kann dir die Bibliothek deiner Universität"
"helfen. <a href='https://www.tib.eu/de/' target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rooms_OS"),
defaults={
"title": "Lern- und Gruppenarbeitsräume",
"description": "Wenn du einen ruhigen Platz zum Arbeiten suchst oder einen Ort für Gruppenarbeiten"
" benötigst, kannst du dir hier einen Raum buchen. "
"<a href='https://www.ub.uni-osnabrueck.de/startseite.html'"
"target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rooms_UB"),
defaults={
"title": "Lern- und Gruppenarbeitsräume",
"description": "Wenn du einen ruhigen Platz zum Arbeiten suchst oder einen Ort für Gruppenarbeiten"
" benötigst,kannst du dir hier einen Raum buchen. "
"<a href='https://www.uni-bremen.de/universitaet/campus/lernraeume'"
"target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rooms_LUH"),
defaults={
"title": "Lern- und Gruppenarbeitsräume",
"description": "Wenn du einen ruhigen Platz zum Arbeiten suchst oder einen Ort für Gruppenarbeiten"
" benötigst,kannst du dir hier einen Raum buchen. "
"<a href='https://www.zqs.uni-hannover.de/de/qs/lernraum/'"
"target='_blank'>Hier geht es zur "
"Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("problems_OS"),
defaults={
"title": "Wenn es mal nicht so läuft...",
"description": "Schwierige Phasen erlebt jeder einmal, im Studium und im Privaten."
" Nicht immer lassen sich diese Hindernisse schnell und aus eigener Kraft überwinden."
" Wenn du in einer solchen Phase bist, scheue dich nicht Hilfe anzunehmen. "
"<a href='https://www.studentenwerk-osnabrueck.de/de/beratung/"
"psychologische-beratung.html' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("problems_UB"),
defaults={
"title": "Wenn es mal nicht so läuft...",
"description": "Schwierige Phasen erlebt jeder einmal, im Studium und im Privaten."
" Nicht immer lassen sich diese Hindernisse schnell und aus eigener Kraft überwinden."
" Wenn du in einer solchen Phase bist, scheue dich nicht Hilfe anzunehmen. "
"<a href='https://www.stw-bremen.de/de/beratung/psychologische-beratung'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("problems_LUH"),
defaults={
"title": "Wenn es mal nicht so läuft...",
"description": "Schwierige Phasen erlebt jeder einmal, im Studium und im Privaten."
" Nicht immer lassen sich diese Hindernisse schnell und aus eigener Kraft überwinden."
" Wenn du in einer solchen Phase bist, scheue dich nicht Hilfe anzunehmen. "
"<a href='https://www.ptb.uni-hannover.de/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("language_OS"),
defaults={
"title": "Sprachen lernen",
"description": "Möchtest du eine neue Fremdsprache lernen oder deine Kenntnisse auffrischen?"
" Das Fremdsprachenangebot an Grund-, Aufbau- und Vertiefungskursen findest du hier: "
"<a href='https://www.uni-osnabrueck.de/universitaet/organisation/"
"zentrale-einrichtungen/sprachenzentrum/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("language_UB"),
defaults={
"title": "Sprachen lernen",
"description": "Möchtest du eine neue Fremdsprache lernen oder deine Kenntnisse auffrischen?"
" Das Fremdsprachenangebot an Grund-, Aufbau- und Vertiefungskursen findest du hier: "
"<a href='https://www.fremdsprachenzentrum-bremen.de/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("language_LUH"),
defaults={
"title": "Sprachen lernen",
"description": "Möchtest du eine neue Fremdsprache lernen oder deine Kenntnisse auffrischen?"
" Das Fremdsprachenangebot an Grund-, Aufbau- und Vertiefungskursen findest du hier: "
"<a href='https://www.llc.uni-hannover.de/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sports_OS"),
defaults={
"title": "Sportangebote",
"description": "Jedes Semester stehen dir zahlreiche Sportangebote im Rahmen des Hochschulsports zur "
"Verfügung. <a href='https://www.zfh.uni-osnabrueck.de/startseite.html'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sports_UB"),
defaults={
"title": "Sportangebote",
"description": "Jedes Semester stehen dir zahlreiche Sportangebote im Rahmen des Hochschulsports zur "
"Verfügung. <a href='https://www.uni-bremen.de/hospo'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sports_LUH"),
defaults={
"title": "Sportangebote",
"description": "Jedes Semester stehen dir zahlreiche Sportangebote im Rahmen des Hochschulsports zur "
"Verfügung. <a href='https://www.hochschulsport-hannover.de/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("international_office_OS"),
defaults={
"title": "International Office",
"description": "Wenn du dich für einen Studienaufenthalt im Ausland interessiert, erhältst du hier "
"Informationen und Unterstützung bei der Planung. "
"<a href='https://www.uni-osnabrueck.de/universitaet/organisation/studentisches/"
"international-office/' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("international_office_UB"),
defaults={
"title": "International Office",
"description": "Wenn du dich für einen Studienaufenthalt im Ausland interessiert, erhältst du hier "
"Informationen und Unterstützung bei der Planung. "
"<a href='https://www.uni-bremen.de/universitaet/profil/international/"
"international-office' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("international_office_LUH"),
defaults={
"title": "International Office",
"description": "Wenn du dich für einen Studienaufenthalt im Ausland interessiert, erhältst du hier "
"Informationen und Unterstützung bei der Planung. "
"<a href='https://www.uni-hannover.de/de/universitaet/internationales/'"
"_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rechenzentrum_OS"),
defaults={
"title": "Rechenzentrum",
"description": "Bei technischen Fragen (z.B. zu deinem Uni-Account, zum WLAN) findest du hier "
"Informationen und Hilfestellungen."
"<ul> <li><a href='https://www.rz.uni-osnabrueck.de/' target='_blank'>"
"Hier geht es zur Website des Rechenzentrums.</a></li>"
"<li><a href='https://www.rz.uni-osnabrueck.de/dienste/leitfaden/"
"leitfaden_studierende.html' target='_blank'>Hier geht es zu einem Leitfaden für "
"Erstsemester</a></li></ul>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rechenzentrum_UB"),
defaults={
"title": "Rechenzentrum",
"description": "Bei technischen Fragen (z.B. zu deinem Uni-Account, zum WLAN) findest du hier "
"Informationen und Hilfestellungen. "
"<a href='https://www.uni-bremen.de/zfn' target='_blank'>"
"Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("rechenzentrum_LUH"),
defaults={
"title": "Rechenzentrum",
"description": "Bei technischen Fragen (z.B. zu deinem Uni-Account, zum WLAN) findest du hier "
"Informationen und Hilfestellungen. "
"<a href='https://www.luis.uni-hannover.de/' target='_blank'>"
"Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sekretariat_OS"),
defaults={
"title": "Studierendensekretariat",
"description": "Das Studierendensekretariat ist zuständig für die den Studierendenstatus betreffenden "
"administrativen Vorgänge während der gesamten Studienzeit z.B. der Rückmeldung, der "
"Campuscard oder der Beurlaubung."
"<ul> <li><a href='https://www.uni-osnabrueck.de/universitaet/organisation/"
"studentisches/studierenden-information-osnabrueck-studios/'"
" target='_blank'>Hier geht es zur Website der Studierenden Information Osnabrück."
"</a></li>"
"<li><a href='https://www.uni-osnabrueck.de/universitaet/organisation/"
"zentrale-verwaltung/studentische-angelegenheiten/studierendenservice/'"
" target='_blank'>Hier geht es zur Website vom Studierendenservice.</a></li></ul>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sekretariat_UB"),
defaults={
"title": "Studierendensekretariat",
"description": "Das Studierendensekretariat ist zuständig für die den Studierendenstatus betreffenden "
"administrativen Vorgänge während der gesamten Studienzeit z.B. der Rückmeldung, der "
"Campuscard oder der Beurlaubung. "
"<a href='https://www.uni-bremen.de/sfs' target='_blank'> Hier geht es zur Website.",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("sekretariat_LUH"),
defaults={
"title": "Studierendensekretariat",
"description": "Das Studierendensekretariat ist zuständig für die den Studierendenstatus betreffenden "
"administrativen Vorgänge während der gesamten Studienzeit z.B. der Rückmeldung, der "
"Campuscard oder der Beurlaubung. "
"<a href='https://www.uni-hannover.de/de/studium/beratung-und-hilfe/servicecenter/'"
" target='_blank'> Hier geht es zur Website.",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("studienberatung_OS"),
defaults={
"title": "Studienberatung",
"description": "Die Studienberatung ist Anlaufstelle bei Fragen und Anliegen,"
" die im Zusammenhang mit der Wahl oder Durchführung eines Studiums auftreten. "
"<a href='https://www.zsb-os.de/' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("studienberatung_UB"),
defaults={
"title": "Studienberatung",
"description": "Die Studienberatung ist Anlaufstelle bei Fragen und Anliegen,"
" die im Zusammenhang mit der Wahl oder Durchführung eines Studiums auftreten. "
"<a href='https://www.uni-bremen.de/zsb' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("studienberatung_LUH"),
defaults={
"title": "Studienberatung",
"description": "Die Studienberatung ist Anlaufstelle bei Fragen und Anliegen,"
" die im Zusammenhang mit der Wahl oder Durchführung eines Studiums auftreten. "
"<a href='https://www.uni-hannover.de/de/universitaet/organisation/"
"dezernate/dezernat-6/sg-63-zentrale-studienberatung/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("funding_OS"),
defaults={
"title": "Finanzierung und Förderung",
"description": "Hier findest du Informationen rund um die Finanzierung deines Studiums z.B. "
"zum BAföG oder zu Stipendienprogrammen:"
"<ul><li><a href='https://www.studentenwerk-osnabrueck.de/de/finanzen/bafoeg.html' "
"target='_blank'>BAföG-Abteilung des Studentenwerks</a></li>"
"<li><a href='https://www.uni-osnabrueck.de/studieninteressierte/"
"stipendien-und-foerderung/'target='_blank'>Stipendien & Förderung</a></li></ul>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("funding_UB"),
defaults={
"title": "Finanzierung und Förderung",
"description": "Hier findest du Informationen rund um die Finanzierung deines Studiums z.B. "
"zum BAföG oder zu Stipendienprogrammen:"
"<a href='https://www.uni-bremen.de/studium/rund-ums-studium"
"studienfinanzierung-und-jobben' target='_blank'>Studienfinanzierung und Jobben</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("funding_LUH"),
defaults={
"title": "Finanzierung und Förderung",
"description": "Hier findest du Informationen rund um die Finanzierung deines Studiums z.B. "
"zum BAföG oder zu Stipendienprogrammen: "
"<a href='https://www.uni-hannover.de/de/studium/finanzierung-foerderung/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("digitization_OS"),
defaults={
"title": "Digitalisierung von Studium und Lehre",
"description": "Bei Fragen rund um das Thema digitale Medien in Studium und Lehre (z.B. zu Stud.IP) "
"findest du hier Informationen und Hilfestellungen: "
"<a href='https://www.virtuos.uni-osnabrueck.de/zentrum_fuer_digitale_lehre_campus_"
"management_und_hochschuldidaktik.html' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("digitization_UB"),
defaults={
"title": "Digitalisierung von Studium und Lehre",
"description": "Bei Fragen rund um das Thema digitale Medien in Studium und Lehre (z.B. zu Stud.IP) "
"findest du hier Informationen und Hilfestellungen: "
"<a href='https://www.uni-bremen.de/zmml' target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("digitization_LUH"),
defaults={
"title": "Digitalisierung von Studium und Lehre",
"description": "Bei Fragen rund um das Thema digitale Medien in Studium und Lehre (z.B. zu Stud.IP) "
"findest du hier Informationen und Hilfestellungen: "
"<a href='https://www.zqs.uni-hannover.de/de/elsa/'"
" target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("family_OS"),
defaults={
"title": "Studieren mit Familie",
"description": "Alle Informationen rund um das Thema Studieren mit Familie (z.B. Kinderbetreuung, "
"Pflege von Angehörigen) findest du hier: "
"<a href='https://www.uni-osnabrueck.de/universitaet/organisation/familien-service/'"
" target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("family_UB"),
defaults={
"title": "Studieren mit Familie",
"description": "Alle Informationen rund um das Thema Studieren mit Familie (z.B. Kinderbetreuung, "
"Pflege von Angehörigen) findest du hier: "
"<a href='https://www.uni-bremen.de/familie/studierende'"
" target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("family_LUH"),
defaults={
"title": "Studieren mit Familie",
"description": "Alle Informationen rund um das Thema Studieren mit Familie (z.B. Kinderbetreuung, "
"Pflege von Angehörigen) findest du hier: "
"<a href='https://www.chancenvielfalt.uni-hannover.de/"
"de/angebote/angebote-fuer-familien/'"
" target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("asta_OS"),
defaults={
"title": "Allgemeiner Studierendenausschuss (AStA)",
"description": "Allgemeiner Studierendenausschuss (AStA)"
" ist die universitätsweite Interessenvertretung der Studierenden und informiert euch"
" über Themen wie Bafög, Semesterticket etc. "
"<a href='https://www.asta.uni-osnabrueck.de/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("asta_UB"),
defaults={
"title": "Allgemeiner Studierendenausschuss (AStA)",
"description": "Allgemeiner Studierendenausschuss (AStA)"
" ist die universitätsweite Interessenvertretung der Studierenden und informiert euch"
" über Themen wie Bafög, Semesterticket etc. "
"<a href='https://www.asta.uni-bremen.de/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("asta_LUH"),
defaults={
"title": "Allgemeiner Studierendenausschuss (AStA)",
"description": "Allgemeiner Studierendenausschuss (AStA)"
" ist die universitätsweite Interessenvertretung der Studierenden und informiert euch"
" über Themen wie Bafög, Semesterticket etc. "
"<a href='https://www.asta-hannover.de/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("cantine_OS"),
defaults={
"title": "Mensa",
"description": "Das Studentenwerk bietet dir in seinen Mensen ein vielfältiges Essensangebot. "
"<a href='https://www.studentenwerk-osnabrueck.de/de/essen/speiseplaene.html' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("cantine_UB"),
defaults={
"title": "Mensa",
"description": "Das Studentenwerk bietet dir in seinen Mensen ein vielfältiges Essensangebot. "
"<a href='https://www.stw-bremen.de/de/mensa/uni-mensa' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("cantine_LUH"),
defaults={
"title": "Mensa",
"description": "Das Studentenwerk bietet dir in seinen Mensen ein vielfältiges Essensangebot. "
"<a href='https://www.studentenwerk-hannover.de/essen/speiseplaene/alle-mensen-heute/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("dorms_OS"),
defaults={
"title": "Wohnungsangebote",
"description": "Guten und preiswerten Wohnraum zu finden ist nicht immer leicht. Hier findest du "
"Informationen und Hilfestellungen: "
"<a href='https://www.studentenwerk-osnabrueck.de/de/wohnen.html' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("dorms_UB"),
defaults={
"title": "Wohnungsangebote",
"description": "Guten und preiswerten Wohnraum zu finden ist nicht immer leicht. Hier findest du "
"Informationen und Hilfestellungen: "
"<a href='https://www.uni-bremen.de/universitaet/campus/wohnen' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("dorms_LUH"),
defaults={
"title": "Wohnungsangebote",
"description": "Guten und preiswerten Wohnraum zu finden ist nicht immer leicht. Hier findest du "
"Informationen und Hilfestellungen: "
"<a href='https://www.uni-hannover.de/de/universitaet/campus-und-stadt/wohnen/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("prüfungsamt_LUH"),
defaults={
"title": "Zentrales Prüfungsamt",
"description": "Das zentrale Prüfungsamt ist zuständig bei Fragen und Problemen rund um "
"Prüfungsangelegenheiten. "
"<a href='https://www.uni-hannover.de/de/universitaet/"
"organisation/dezernate/dezernat-6/pruefungsamt/'"
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
order += 1
models.ActivityTemplate.objects.update_or_create(
template_id=self.get_template_id("prüfungsamt_OS"),
defaults={
"title": "Mehr-Fächer-Prüfungsamt PATMOS",
"description": "Das Mehrfächer-Prüfungsamt ist zuständig für die Prüfungsadministration des "
"fachübergreifenden Bereichs (Kerncurriculum Lehrerbildung"
", allgemeine Schlüsselkompetenzen) "
"und für die Erstellung von übergreifenden Bescheinigungen, Leistungsübersichten sowie "
"Abschlusszeugnissen in den Mehrfächer-Studiengängen. "
"<a href='https://www.uni-osnabrueck.de/universitaet/organisation/zentrale-verwaltung/"
"studentische-angelegenheiten/mehrfaecher-pruefungsamt-patmos/' "
"target='_blank'>Hier geht es zur Website.</a>",
"type": "todo",
"status": "template",
"feedback_size": 0,
"image": self.IMAGE,
"order": order,
}
)
return True
def initialize(self, user):
"""
When a user logs in for the first time, initial activities are generated.
:param user: SiddataUser object
:return: True if successful
"""
goal = self.activate_recommender_for_user_and_create_first_goal(user)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("general_information_question"),
goal=goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("general_recommendation_question"),
goal=goal,
)
return True
def process_activity(self, activity):
"""
:param activity: models.Activity instance modified by Stud.IP plugin
:return: True if successful
"""
# get user api endpoint from incoming activity
user_api_endpoint = activity.goal.userrecommender.user.origin.api_endpoint
# check if answer value is None
if activity.answers is None:
activity.answers = []
if "Studienverlauf, Prüfungen, Rückmeldung" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("prüfungsamt_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("prüfungsamt_LUH"),
goal=activity.goal,
)
if "Literatur ausleihen" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rooms_LUH"),
goal=activity.goal,
)
if "Raum für Gruppenarbeiten" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rooms_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rooms_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rooms_LUH"),
goal=activity.goal,
)
if "Raum zum ruhigen Arbeiten" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("library_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rooms_LUH"),
goal=activity.goal,
)
if "Studium und Familie" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("family_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("family_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("family_LUH"),
goal=activity.goal,
)
if "Sport treiben" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sports_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sports_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sports_LUH"),
goal=activity.goal,
)
if "eine weitere Sprache lernen" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_LUH"),
goal=activity.goal,
)
if "Auslandssemester" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("language_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("international_office_LUH"),
goal=activity.goal,
)
if "Probleme im Studium" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("problems_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("problems_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("sekretariat_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("studienberatung_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("problems_LUH"),
goal=activity.goal,
)
if "Technische Fragen und Probleme" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rechenzentrum_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("digitization_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rechenzentrum_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("digitization_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("rechenzentrum_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("digitization_LUH"),
goal=activity.goal,
)
if "Finanzierung meines Studiums" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("funding_OS"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("asta_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("funding_UB"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("asta_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("funding_LUH"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("asta_LUH"),
goal=activity.goal,
)
if "Wohnen" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("dorms_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("dorms_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("dorms_LUH"),
goal=activity.goal,
)
if "Was gibt es heute zu Mittag?" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("cantine_OS"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("cantine_UB"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("cantine_LUH"),
goal=activity.goal,
)
### ORANGE TREE ###
if "Wissenschaftliches Arbeiten" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_0"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_1"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_2"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_3"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_4"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_22"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_23"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_27"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_0"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_1"),
goal=activity.goal,
)
if "Lernen & Gedächtnis" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_3"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_4"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_6"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_13"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_16"),
goal=activity.goal,
)
if "Prüfungsvorbereitung" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_6"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_24"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_25"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_26"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
if "Präsentation & Kommunikation" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_8"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_15"),
goal=activity.goal,
)
if "Projektmanagement" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
if "Selbst- und Zeitmanagement" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_5"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_6"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_7"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_9"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_10"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_11"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_18"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_19"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_20"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_21"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_12"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_14"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_17"),
goal=activity.goal,
)
if "Gesund studieren" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_5"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_6"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
if "Sport & Bewegung" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
if "Literaturverwaltung" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_1"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_2"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
...
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
if "Schreibtraining" in activity.answers:
if any(endpoint in user_api_endpoint for endpoint in ["osnabrueck", "localhost"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_0"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["bremen"]):
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_22"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_23"),
goal=activity.goal,
)
models.Activity.create_activity_from_template(
template_id=self.get_template_id("course_recommendation_27"),
goal=activity.goal,
)
if any(endpoint in user_api_endpoint for endpoint in ["hannover"]):
...
return True
| 42.318535
| 166
| 0.527853
| 7,014
| 76,258
| 5.533647
| 0.086826
| 0.081931
| 0.057352
| 0.069642
| 0.856156
| 0.845619
| 0.843532
| 0.838327
| 0.830572
| 0.822663
| 0
| 0.003572
| 0.379632
| 76,258
| 1,801
| 167
| 42.342032
| 0.81686
| 0.019434
| 0
| 0.707232
| 0
| 0.020167
| 0.297207
| 0.037034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002782
| false
| 0.001391
| 0.002782
| 0
| 0.008345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d31788eaafbf1dd78b1f37d51c85feee1230bbe
| 13,580
|
py
|
Python
|
lib/python/treadmill/tests/ad/gmsa_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 133
|
2016-09-15T13:36:12.000Z
|
2021-01-18T06:29:13.000Z
|
lib/python/treadmill/tests/ad/gmsa_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 108
|
2016-12-28T23:41:27.000Z
|
2020-03-05T21:20:37.000Z
|
lib/python/treadmill/tests/ad/gmsa_test.py
|
evreng/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 69
|
2016-09-23T20:38:58.000Z
|
2020-11-11T02:31:21.000Z
|
"""Unit test for treadmill.ad.gmsa.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
import ldap3
import mock
import yaml
# Disable W0611: Unused import
import treadmill.tests.treadmill_test_skip_windows # pylint: disable=W0611
from treadmill import utils
from treadmill.ad import gmsa
from treadmill.ad import _servers as servers
class HostGroupWatchTest(unittest.TestCase):
"""Mock test for treadmill.ad.gmsa.HostGroupWatch.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
self.placement_dir = os.path.join(self.root, 'placement')
self.servers_dir = os.path.join(self.root, 'servers')
os.mkdir(self.placement_dir)
os.mkdir(self.servers_dir)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('ldap3.Connection')
@mock.patch('treadmill.ad.gmsa._check_ldap3_operation', mock.Mock())
def test_sync(self, connection):
"""Test gmsa.HostGroupWatch.sync."""
# Access protected module
# pylint: disable=W0212
server1_path = os.path.join(self.placement_dir, 'server1.ad.com')
os.mkdir(server1_path)
utils.touch(os.path.join(server1_path, 'proid1.app#0000000000001'))
server2_path = os.path.join(self.placement_dir, 'server2.ad.com')
os.mkdir(server2_path)
utils.touch(os.path.join(server2_path, 'proid4.app#0000000000004'))
server3_path = os.path.join(self.placement_dir, 'server3.ad.com')
os.mkdir(server3_path)
server4_path = os.path.join(self.placement_dir, 'server4.ad.com')
os.mkdir(server4_path)
utils.touch(os.path.join(server4_path, 'proid3.app#0000000000003'))
server5_path = os.path.join(self.placement_dir, 'server5.ad.com')
os.mkdir(server5_path)
utils.touch(os.path.join(server5_path, 'proid5.app#0000000000005'))
utils.touch(os.path.join(server5_path, 'proid5.app#0000000000006'))
with io.open(os.path.join(self.servers_dir, 'server1.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server1,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
with io.open(os.path.join(self.servers_dir, 'server2.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server2,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
with io.open(os.path.join(self.servers_dir, 'server3.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server3,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
with io.open(os.path.join(self.servers_dir, 'server5.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server5,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
mock_connection = mock.MagicMock()
connection.return_value = mock_connection
type(mock_connection).result = mock.PropertyMock(side_effect={
'result': 0
})
type(mock_connection).response = mock.PropertyMock(return_value=[
{'attributes': {
'samAccountName': 'proid1-gmsa-hosts',
'member': ['CN=server1,DC=AD,DC=COM']
}},
{'attributes': {
'samAccountName': 'proid2-gmsa-hosts',
'member': ['CN=server3,DC=AD,DC=COM']
}},
{'attributes': {
'samAccountName': 'proid3-gmsa-hosts',
'member': []
}},
{'attributes': {
'samAccountName': 'proid4-gmsa-hosts',
'member': []
}},
{'attributes': {
'samAccountName': 'proid5-gmsa-hosts',
'member': []
}}
])
watch = gmsa.HostGroupWatch(self.root, 'partition1',
'OU=test,DC=ad,DC=com', '{}-gmsa-hosts')
watch._sync()
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 1},
'proid2': {},
'proid3': {},
'proid4': {'CN=server2,DC=AD,DC=COM': 1},
'proid5': {'CN=server5,DC=AD,DC=COM': 2},
})
mock_connection.modify.assert_has_calls(
[
mock.call('CN=proid2-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_DELETE,
['CN=server3,DC=AD,DC=COM'])]}),
mock.call('CN=proid4-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_ADD,
['CN=server2,DC=AD,DC=COM'])]}),
mock.call('CN=proid5-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_ADD,
['CN=server5,DC=AD,DC=COM'])]}),
],
any_order=True
)
@mock.patch('ldap3.Connection')
@mock.patch('treadmill.ad.gmsa._check_ldap3_operation', mock.Mock())
def test_on_created_placement(self, connection):
"""Test gmsa.HostGroupWatch._on_created_placement."""
# Access protected module
# pylint: disable=W0212
server1_path = os.path.join(self.placement_dir, 'server1.ad.com')
os.mkdir(server1_path)
with io.open(os.path.join(self.servers_dir, 'server1.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server1,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
mock_connection = mock.MagicMock()
connection.return_value = mock_connection
type(mock_connection).result = mock.PropertyMock(side_effect={
'result': 0
})
type(mock_connection).response = mock.PropertyMock(return_value=[
{'attributes': {
'samAccountName': 'proid1-gmsa-hosts',
'member': []
}}
])
watch = gmsa.HostGroupWatch(self.root, 'partition1',
'OU=test,DC=ad,DC=com', '{}-gmsa-hosts')
watch._sync()
self.assertEqual(watch._proids, {
'proid1': {},
})
placement_path = os.path.join(server1_path, 'proid1.app#0000000000001')
utils.touch(placement_path)
watch._on_created_placement(placement_path)
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 1},
})
mock_connection.modify.assert_has_calls([
mock.call('CN=proid1-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_ADD,
['CN=server1,DC=AD,DC=COM'])]}),
])
@mock.patch('ldap3.Connection')
@mock.patch('treadmill.ad.gmsa._check_ldap3_operation', mock.Mock())
def test_on_created_same_host(self, connection):
"""Test gmsa.HostGroupWatch._on_created_placement."""
# Access protected module
# pylint: disable=W0212
server1_path = os.path.join(self.placement_dir, 'server1.ad.com')
os.mkdir(server1_path)
with io.open(os.path.join(self.servers_dir, 'server1.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server1,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
mock_connection = mock.MagicMock()
connection.return_value = mock_connection
type(mock_connection).result = mock.PropertyMock(side_effect={
'result': 0
})
type(mock_connection).response = mock.PropertyMock(return_value=[
{'attributes': {
'samAccountName': 'proid1-gmsa-hosts',
'member': []
}}
])
watch = gmsa.HostGroupWatch(self.root, 'partition1',
'OU=test,DC=ad,DC=com', '{}-gmsa-hosts')
watch._sync()
self.assertEqual(watch._proids, {
'proid1': {},
})
placement_path1 = os.path.join(server1_path,
'proid1.app#0000000000001')
utils.touch(placement_path1)
watch._on_created_placement(placement_path1)
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 1},
})
placement_path2 = os.path.join(server1_path,
'proid1.app#0000000000001')
utils.touch(placement_path2)
watch._on_created_placement(placement_path2)
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 2},
})
mock_connection.modify.assert_has_calls([
mock.call('CN=proid1-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_ADD,
['CN=server1,DC=AD,DC=COM'])]}),
])
self.assertEqual(mock_connection.modify.call_count, 1)
@mock.patch('ldap3.Connection')
@mock.patch('treadmill.ad.gmsa._check_ldap3_operation', mock.Mock())
def test_on_deleted_placement(self, connection):
"""Test gmsa.HostGroupWatch._on_deleted_placement."""
# Access protected module
# pylint: disable=W0212
server1_path = os.path.join(self.placement_dir, 'server1.ad.com')
os.mkdir(server1_path)
placement_path = os.path.join(server1_path, 'proid1.app#0000000000001')
utils.touch(placement_path)
with io.open(os.path.join(self.servers_dir, 'server1.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server1,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
mock_connection = mock.MagicMock()
connection.return_value = mock_connection
type(mock_connection).result = mock.PropertyMock(side_effect={
'result': 0
})
type(mock_connection).response = mock.PropertyMock(return_value=[
{'attributes': {
'samAccountName': 'proid1-gmsa-hosts',
'member': ['CN=server1,DC=AD,DC=COM']
}}
])
watch = gmsa.HostGroupWatch(self.root, 'partition1',
'OU=test,DC=ad,DC=com', '{}-gmsa-hosts')
watch._sync()
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 1},
})
os.remove(placement_path)
watch._on_deleted_placement(placement_path)
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 0},
})
mock_connection.modify.assert_has_calls([
mock.call('CN=proid1-gmsa-hosts,OU=test,DC=ad,DC=com',
{'member': [(ldap3.MODIFY_DELETE,
['CN=server1,DC=AD,DC=COM'])]}),
])
@mock.patch('ldap3.Connection')
@mock.patch('treadmill.ad.gmsa._check_ldap3_operation', mock.Mock())
def test_on_deleted_same_host(self, connection):
"""Test gmsa.HostGroupWatch._on_deleted_placement."""
# Access protected module
# pylint: disable=W0212
server1_path = os.path.join(self.placement_dir, 'server1.ad.com')
os.mkdir(server1_path)
placement_path1 = os.path.join(server1_path,
'proid1.app#0000000000001')
utils.touch(placement_path1)
placement_path2 = os.path.join(server1_path,
'proid1.app#0000000000002')
utils.touch(placement_path2)
with io.open(os.path.join(self.servers_dir, 'server1.ad.com'),
'w') as f:
yaml.dump({
servers.DC_KEY: 'dc.ad.com',
servers.DN_KEY: 'CN=server1,DC=AD,DC=COM',
'partition': 'partition1'
}, f)
mock_connection = mock.MagicMock()
connection.return_value = mock_connection
type(mock_connection).result = mock.PropertyMock(side_effect={
'result': 0
})
type(mock_connection).response = mock.PropertyMock(return_value=[
{'attributes': {
'samAccountName': 'proid1-gmsa-hosts',
'member': ['CN=server1,DC=AD,DC=COM']
}}
])
watch = gmsa.HostGroupWatch(self.root, 'partition1',
'OU=test,DC=ad,DC=com', '{}-gmsa-hosts')
watch._sync()
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 2},
})
os.remove(placement_path2)
watch._on_deleted_placement(placement_path2)
self.assertEqual(watch._proids, {
'proid1': {'CN=server1,DC=AD,DC=COM': 1},
})
mock_connection.modify.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 36.31016
| 79
| 0.548233
| 1,478
| 13,580
| 4.870095
| 0.090663
| 0.026118
| 0.032509
| 0.048764
| 0.842039
| 0.807724
| 0.77327
| 0.748819
| 0.740622
| 0.72145
| 0
| 0.036732
| 0.312371
| 13,580
| 373
| 80
| 36.407507
| 0.734097
| 0.043446
| 0
| 0.719595
| 0
| 0
| 0.209859
| 0.104621
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.023649
| false
| 0
| 0.054054
| 0
| 0.081081
| 0.003378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adb07688bdf832f451debba4caa77fba9a4d1315
| 2,431
|
py
|
Python
|
Chi-Square-Test.py
|
munafsaiyed/Data-Science-crime-analysis-of-india
|
d0481b0c6c7796f2424dc627b40a49bed7083068
|
[
"MIT"
] | null | null | null |
Chi-Square-Test.py
|
munafsaiyed/Data-Science-crime-analysis-of-india
|
d0481b0c6c7796f2424dc627b40a49bed7083068
|
[
"MIT"
] | null | null | null |
Chi-Square-Test.py
|
munafsaiyed/Data-Science-crime-analysis-of-india
|
d0481b0c6c7796f2424dc627b40a49bed7083068
|
[
"MIT"
] | null | null | null |
import pandas as pd
from scipy import stats
import array
data=pd.read_csv('project_file.csv')
rowhead=['rape','robbery','murder','total']
rowguj=[data["rape"][data["state_ut"] == "GUJARAT"].sum(),data["robbery"][data["state_ut"] == "GUJARAT"].sum(),data["murder"][data["state_ut"] == "GUJARAT"].sum()]
rowraj=[data["rape"][data["state_ut"] == "RAJASTHAN"].sum(),data["robbery"][data["state_ut"] == "RAJASTHAN"].sum(),data["murder"][data["state_ut"] == "RAJASTHAN"].sum()]
rowbihar=[data["rape"][data["state_ut"] == "BIHAR"].sum(),data["robbery"][data["state_ut"] == "BIHAR"].sum(),data["murder"][data["state_ut"] == "BIHAR"].sum()]
rowup=[data["rape"][data["state_ut"] == "UTTAR PRADESH"].sum(),data["robbery"][data["state_ut"] == "UTTAR PRADESH"].sum(),data["murder"][data["state_ut"] == "UTTAR PRADESH"].sum()]
rowt=data["rape"][data["state_ut"] == "GUJARAT"].sum()+data["rape"][data["state_ut"] == "RAJASTHAN"].sum()+data["rape"][data["state_ut"] == "BIHAR"].sum()+data["rape"][data["state_ut"] == "UTTAR PRADESH"].sum()+data["robbery"][data["state_ut"] == "GUJARAT"].sum()+data["robbery"][data["state_ut"] == "RAJASTHAN"].sum()+data["robbery"][data["state_ut"] == "BIHAR"].sum()+data["robbery"][data["state_ut"] == "UTTAR PRADESH"].sum()+data["murder"][data["state_ut"] == "GUJARAT"].sum()+data["murder"][data["state_ut"] == "RAJASTHAN"].sum()+data["murder"][data["state_ut"] == "BIHAR"].sum()+data["murder"][data["state_ut"] == "UTTAR PRADESH"].sum()
rowtot=["TOTAL",data["rape"][data["state_ut"] == "GUJARAT"].sum()+data["rape"][data["state_ut"] == "RAJASTHAN"].sum()+data["rape"][data["state_ut"] == "BIHAR"].sum()+data["rape"][data["state_ut"] == "UTTAR PRADESH"].sum(),data["robbery"][data["state_ut"] == "GUJARAT"].sum()+data["robbery"][data["state_ut"] == "RAJASTHAN"].sum()+data["robbery"][data["state_ut"] == "BIHAR"].sum()+data["robbery"][data["state_ut"] == "UTTAR PRADESH"].sum(),data["murder"][data["state_ut"] == "GUJARAT"].sum()+data["murder"][data["state_ut"] == "RAJASTHAN"].sum()+data["murder"][data["state_ut"] == "BIHAR"].sum()+data["murder"][data["state_ut"] == "UTTAR PRADESH"].sum(),rowt]
table=[rowguj,rowraj,rowbihar,rowup]
print(table)
chi2_stat, p_val, dof, ex = stats.chi2_contingency(table)
print("===Chi2 Stat===")
print(chi2_stat)
print("\n")
print("===Degrees of Freedom===")
print(dof)
print("\n")
print("===P-Value===")
print(p_val)
print("\n")
print("===Contingency Table===")
print(ex)
| 83.827586
| 659
| 0.634307
| 337
| 2,431
| 4.448071
| 0.136499
| 0.216144
| 0.264176
| 0.136091
| 0.771848
| 0.771848
| 0.771848
| 0.771848
| 0.735824
| 0.735824
| 0
| 0.001736
| 0.052242
| 2,431
| 28
| 660
| 86.821429
| 0.648872
| 0
| 0
| 0.12
| 0
| 0
| 0.379424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.48
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
add51fa293b8043119ef92446e0617a91e47e638
| 748
|
py
|
Python
|
r2con2020/tasks/defuse/scripts/solve2.py
|
BlackVS/CTFs
|
ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde
|
[
"MIT"
] | 10
|
2020-09-06T12:08:32.000Z
|
2021-07-19T15:12:30.000Z
|
r2con2020/tasks/defuse/scripts/solve2.py
|
BlackVS/CTFs
|
ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde
|
[
"MIT"
] | null | null | null |
r2con2020/tasks/defuse/scripts/solve2.py
|
BlackVS/CTFs
|
ca7d5c9a3dbf1bcfe7607bbfd454eca470634dde
|
[
"MIT"
] | 1
|
2021-11-22T05:14:56.000Z
|
2021-11-22T05:14:56.000Z
|
#!/usr/bin/env python3
import os,sys
from itertools import *
#for p in product( 'prxz', 'su', 'ln', 'ce', 'prxz', 'aio', 'ln', 'ce', 'km', 'aio', 'prxz', 'ce', 'tv', 'aio', 'km', 'ce' ):
#for p in product( 'prxz', 'su', 'ln', 'ce', 'prxz', 'aio' ):
#for p in product( ['rule'], 'prxz', 'aio', 'ln', 'ce', 'km', 'aio', 'prxz', 'ce', 'tv', 'aio', 'km', 'ce' ):
#for p in product( 'prxz', 'ce', 'tv', 'aio', 'km', 'ce' ):
#for p in product( ['rule'], 'prxz', 'aio', 'ln', 'ce', 'km', 'aio', 'prxz', 'ce', ['take','time','tome']):
#for p in product( ['rule'], 'prxz', 'aio', 'ln', 'ce', 'km', 'aio', 'prxz', 'ce', ['time']):
for p in product( ['rule'], 'prxz', 'aio', 'ln', 'ce', ['more'], ['time']):
print("esil"+"".join(p))
esilrulezonemoretime
| 53.428571
| 125
| 0.494652
| 113
| 748
| 3.274336
| 0.256637
| 0.075676
| 0.113514
| 0.245946
| 0.718919
| 0.718919
| 0.718919
| 0.718919
| 0.718919
| 0.643243
| 0
| 0.00158
| 0.153743
| 748
| 14
| 126
| 53.428571
| 0.582938
| 0.760695
| 0
| 0
| 0
| 0
| 0.156069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
bc22e61aa14f9b54edc3219b2abfe2f3cd9285b3
| 13,324
|
py
|
Python
|
tests/flytekit/unit/common_tests/types/impl/test_blobs.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | 1
|
2019-10-22T05:22:16.000Z
|
2019-10-22T05:22:16.000Z
|
tests/flytekit/unit/common_tests/types/impl/test_blobs.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | null | null | null |
tests/flytekit/unit/common_tests/types/impl/test_blobs.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | 1
|
2019-08-28T22:27:07.000Z
|
2019-08-28T22:27:07.000Z
|
from __future__ import absolute_import
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.types.impl import blobs
from flytekit.common.utils import AutoDeletingTempDir
from flytekit.models.core import types as _core_types
from flytekit.sdk import test_utils
import pytest
import os
def test_blob():
b = blobs.Blob("/tmp/fake")
assert b.remote_location == "/tmp/fake"
assert b.local_path is None
assert b.mode == "rb"
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
def test_blob_from_python_std():
with test_utils.LocalTestFileSystem() as t:
with AutoDeletingTempDir('test') as wd:
tmp_name = wd.get_named_tempfile("from_python_std")
with open(tmp_name, 'wb') as w:
w.write("hello hello".encode('utf-8'))
b = blobs.Blob.from_python_std(tmp_name)
assert b.mode == "wb"
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
assert b.remote_location.startswith(t.name)
assert b.local_path == tmp_name
with open(b.remote_location, 'rb') as r:
assert r.read() == "hello hello".encode('utf-8')
b = blobs.Blob("/tmp/fake")
b2 = blobs.Blob.from_python_std(b)
assert b == b2
with pytest.raises(_user_exceptions.FlyteTypeException):
blobs.Blob.from_python_std(3)
def test_blob_create_at():
with test_utils.LocalTestFileSystem() as t:
with AutoDeletingTempDir('test') as wd:
tmp_name = wd.get_named_tempfile('tmp')
b = blobs.Blob.create_at_known_location(tmp_name)
assert b.local_path is None
assert b.remote_location == tmp_name
assert b.mode == 'wb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
with b as w:
w.write("hello hello".encode('utf-8'))
assert b.local_path.startswith(t.name)
with open(tmp_name, 'rb') as r:
assert r.read() == "hello hello".encode('utf-8')
def test_blob_fetch_managed():
with AutoDeletingTempDir('test') as wd:
with test_utils.LocalTestFileSystem() as t:
tmp_name = wd.get_named_tempfile('tmp')
with open(tmp_name, 'wb') as w:
w.write("hello".encode('utf-8'))
b = blobs.Blob.fetch(tmp_name)
assert b.local_path.startswith(t.name)
assert b.remote_location == tmp_name
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
with b as r:
assert r.read() == "hello".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.Blob.fetch(tmp_name, local_path=b.local_path)
with open(tmp_name, 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.Blob.fetch(tmp_name, local_path=b.local_path, overwrite=True)
with b2 as r:
assert r.read() == "bye".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.Blob.fetch(tmp_name)
def test_blob_fetch_unmanaged():
with AutoDeletingTempDir('test') as wd:
with AutoDeletingTempDir('test2') as t:
tmp_name = wd.get_named_tempfile('source')
tmp_sink = t.get_named_tempfile('sink')
with open(tmp_name, 'wb') as w:
w.write("hello".encode('utf-8'))
b = blobs.Blob.fetch(tmp_name, local_path=tmp_sink)
assert b.local_path == tmp_sink
assert b.remote_location == tmp_name
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
with b as r:
assert r.read() == "hello".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.Blob.fetch(tmp_name, local_path=tmp_sink)
with open(tmp_name, 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.Blob.fetch(tmp_name, local_path=tmp_sink, overwrite=True)
with b2 as r:
assert r.read() == "bye".encode('utf-8')
def test_blob_double_enter():
with test_utils.LocalTestFileSystem():
with AutoDeletingTempDir('test') as wd:
b = blobs.Blob(wd.get_named_tempfile("sink"), mode='wb')
with b:
with pytest.raises(_user_exceptions.FlyteAssertion):
with b:
pass
def test_blob_download_managed():
with AutoDeletingTempDir('test') as wd:
with test_utils.LocalTestFileSystem() as t:
tmp_name = wd.get_named_tempfile('tmp')
with open(tmp_name, 'wb') as w:
w.write("hello".encode('utf-8'))
b = blobs.Blob(tmp_name)
b.download()
assert b.local_path.startswith(t.name)
assert b.remote_location == tmp_name
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
with b as r:
assert r.read() == "hello".encode('utf-8')
b2 = blobs.Blob(tmp_name)
with pytest.raises(_user_exceptions.FlyteAssertion):
b2.download(b.local_path)
with open(tmp_name, 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.Blob(tmp_name)
b2.download(local_path=b.local_path, overwrite=True)
with b2 as r:
assert r.read() == "bye".encode('utf-8')
b = blobs.Blob(tmp_name)
with pytest.raises(_user_exceptions.FlyteAssertion):
b.download()
def test_blob_download_unmanaged():
with AutoDeletingTempDir('test') as wd:
with AutoDeletingTempDir('test2') as t:
tmp_name = wd.get_named_tempfile('source')
tmp_sink = t.get_named_tempfile('sink')
with open(tmp_name, 'wb') as w:
w.write("hello".encode('utf-8'))
b = blobs.Blob(tmp_name)
b.download(tmp_sink)
assert b.local_path == tmp_sink
assert b.remote_location == tmp_name
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
with b as r:
assert r.read() == "hello".encode('utf-8')
b = blobs.Blob(tmp_name)
with pytest.raises(_user_exceptions.FlyteAssertion):
b.download(tmp_sink)
with open(tmp_name, 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.Blob(tmp_name)
b2.download(tmp_sink, overwrite=True)
with b2 as r:
assert r.read() == "bye".encode('utf-8')
def test_multipart_blob():
b = blobs.MultiPartBlob("/tmp/fake", mode='w', format='csv')
assert b.remote_location == "/tmp/fake/"
assert b.local_path is None
assert b.mode == "w"
assert b.metadata.type.format == "csv"
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.MULTIPART
def _generate_multipart_blob_data(tmp_dir):
n = tmp_dir.get_named_tempfile("0")
with open(n, 'wb') as w:
w.write("part0".encode('utf-8'))
n = tmp_dir.get_named_tempfile("1")
with open(n, 'wb') as w:
w.write("part1".encode('utf-8'))
n = tmp_dir.get_named_tempfile("2")
with open(n, 'wb') as w:
w.write("part2".encode('utf-8'))
def test_multipart_blob_from_python_std():
with test_utils.LocalTestFileSystem() as t:
with AutoDeletingTempDir('test') as wd:
_generate_multipart_blob_data(wd)
b = blobs.MultiPartBlob.from_python_std(wd.name)
assert b.mode == "wb"
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.MULTIPART
assert b.remote_location.startswith(t.name)
assert b.local_path == wd.name
with open(os.path.join(b.remote_location, '0'), 'rb') as r:
assert r.read() == "part0".encode('utf-8')
with open(os.path.join(b.remote_location, '1'), 'rb') as r:
assert r.read() == "part1".encode('utf-8')
with open(os.path.join(b.remote_location, '2'), 'rb') as r:
assert r.read() == "part2".encode('utf-8')
b = blobs.MultiPartBlob("/tmp/fake/")
b2 = blobs.MultiPartBlob.from_python_std(b)
assert b == b2
with pytest.raises(_user_exceptions.FlyteTypeException):
blobs.MultiPartBlob.from_python_std(3)
def test_multipart_blob_create_at():
with test_utils.LocalTestFileSystem():
with AutoDeletingTempDir('test') as wd:
b = blobs.MultiPartBlob.create_at_known_location(wd.name)
assert b.local_path is None
assert b.remote_location == wd.name + "/"
assert b.mode == 'wb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.MULTIPART
with b.create_part('0') as w:
w.write("part0".encode('utf-8'))
with b.create_part('1') as w:
w.write("part1".encode('utf-8'))
with b.create_part('2') as w:
w.write("part2".encode('utf-8'))
with open(os.path.join(wd.name, '0'), 'rb') as r:
assert r.read() == "part0".encode('utf-8')
with open(os.path.join(wd.name, '1'), 'rb') as r:
assert r.read() == "part1".encode('utf-8')
with open(os.path.join(wd.name, '2'), 'rb') as r:
assert r.read() == "part2".encode('utf-8')
def test_multipart_blob_fetch_managed():
with AutoDeletingTempDir('test') as wd:
with test_utils.LocalTestFileSystem() as t:
_generate_multipart_blob_data(wd)
b = blobs.MultiPartBlob.fetch(wd.name)
assert b.local_path.startswith(t.name)
assert b.remote_location == wd.name + "/"
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.MULTIPART
with b as r:
assert r[0].read() == "part0".encode('utf-8')
assert r[1].read() == "part1".encode('utf-8')
assert r[2].read() == "part2".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.MultiPartBlob.fetch(wd.name, local_path=b.local_path)
with open(os.path.join(wd.name, "0"), 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.MultiPartBlob.fetch(wd.name, local_path=b.local_path, overwrite=True)
with b2 as r:
assert r[0].read() == "bye".encode('utf-8')
assert r[1].read() == "part1".encode('utf-8')
assert r[2].read() == "part2".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.Blob.fetch(wd.name)
def test_multipart_blob_fetch_unmanaged():
with AutoDeletingTempDir('test') as wd:
with AutoDeletingTempDir('test2') as t:
_generate_multipart_blob_data(wd)
tmp_sink = t.get_named_tempfile('sink')
b = blobs.MultiPartBlob.fetch(wd.name, local_path=tmp_sink)
assert b.local_path == tmp_sink
assert b.remote_location == wd.name + "/"
assert b.mode == 'rb'
assert b.metadata.type.format == ""
assert b.metadata.type.dimensionality == _core_types.BlobType.BlobDimensionality.MULTIPART
with b as r:
assert r[0].read() == "part0".encode('utf-8')
assert r[1].read() == "part1".encode('utf-8')
assert r[2].read() == "part2".encode('utf-8')
with pytest.raises(_user_exceptions.FlyteAssertion):
blobs.MultiPartBlob.fetch(wd.name, local_path=tmp_sink)
with open(os.path.join(wd.name, "0"), 'wb') as w:
w.write("bye".encode('utf-8'))
b2 = blobs.MultiPartBlob.fetch(wd.name, local_path=tmp_sink, overwrite=True)
with b2 as r:
assert r[0].read() == "bye".encode('utf-8')
assert r[1].read() == "part1".encode('utf-8')
assert r[2].read() == "part2".encode('utf-8')
def test_multipart_blob_no_enter_on_write():
with test_utils.LocalTestFileSystem():
b = blobs.MultiPartBlob.create_at_any_location()
with pytest.raises(_user_exceptions.FlyteAssertion):
with b:
pass
| 40.13253
| 102
| 0.590213
| 1,703
| 13,324
| 4.44862
| 0.059307
| 0.05821
| 0.060718
| 0.06019
| 0.918427
| 0.885296
| 0.873548
| 0.852957
| 0.811642
| 0.774419
| 0
| 0.012407
| 0.280171
| 13,324
| 331
| 103
| 40.253776
| 0.7775
| 0
| 0
| 0.720149
| 0
| 0
| 0.054038
| 0
| 0
| 0
| 0
| 0
| 0.380597
| 1
| 0.05597
| false
| 0.007463
| 0.029851
| 0
| 0.085821
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc2ee864ebb0fa0013ff728ac206f3c64231998e
| 2,330
|
py
|
Python
|
python/Expression.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | null | null | null |
python/Expression.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | 5
|
2021-08-24T11:09:54.000Z
|
2021-08-24T21:14:15.000Z
|
python/Expression.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | null | null | null |
class Expression:
@staticmethod
def process_string_without_clocks(data, raw_txt):
result = raw_txt
for constant in data.constants:
result = result.replace(constant["name"], "this->" + constant["cname"])
return result
@staticmethod
def process_string(data, raw_txt):
result = raw_txt
for constant in data.constants:
result = result.replace(constant["name"], "this->v->" + constant["cname"])
for clock in data.clocks_par:
result = result.replace(clock["name"], "this->v->clock->" + clock["cname"])
for clock in data.clocks_seq:
result = result.replace(clock["name"], "this->v->clock->" + clock["cname"])
return result
@staticmethod
def process_str_exp_but_set_clock_to_min(data, raw_txt):
result = raw_txt
for constant in data.constants:
result = result.replace(constant["name"], "this->v->" + constant["cname"])
for clock in data.clocks_par:
result = result.replace(clock["name"], "this->v->" + clock["cname_start_f"] + "()")
for clock in data.clocks_seq:
result = result.replace(clock["name"], "this->v->" + clock["cname_start_f"] + "()")
return result
@staticmethod
def process_str_exp_but_set_clock_to_min_except_for(data, raw_txt, exceptional_clocks, add_custom_value):
x = raw_txt
for constant in data.constants:
x = x.replace(constant["name"], "this->v->" + constant["cname"])
for clock in data.clocks_par:
if is_in(exceptional_clocks, clock["name"]):
x = x.replace(clock["name"], "(this->v->" + clock["cname_start_f"] + "() + " + str(add_custom_value) + ")")
else:
x = x.replace(clock["name"], "this->v->" + clock["cname_start_f"] + "()")
for clock in data.clocks_seq:
if is_in(exceptional_clocks, clock["name"]):
x = x.replace(clock["name"], "(this->v->" + clock["cname_start_f"] + "() + " + str(add_custom_value) + ")")
else:
x = x.replace(clock["name"], "this->v->" + clock["cname_start_f"] + "()")
return x
def is_in(search_list, key):
for item in search_list:
if key == item:
return True
return False
| 42.363636
| 123
| 0.579399
| 290
| 2,330
| 4.448276
| 0.165517
| 0.074419
| 0.076744
| 0.124031
| 0.850388
| 0.850388
| 0.820155
| 0.795349
| 0.795349
| 0.795349
| 0
| 0
| 0.264807
| 2,330
| 54
| 124
| 43.148148
| 0.753065
| 0
| 0
| 0.708333
| 0
| 0
| 0.130901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb2d2e512b3faec3b836f6d9eb8806788663cec6
| 1,754
|
py
|
Python
|
tests/test_provider_onelogin_onelogin.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_onelogin_onelogin.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_onelogin_onelogin.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_onelogin_onelogin.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:23:37 UTC)
def test_provider_import():
import terrascript.provider.onelogin.onelogin
def test_resource_import():
from terrascript.resource.onelogin.onelogin import onelogin_app_role_attachments
from terrascript.resource.onelogin.onelogin import onelogin_app_rules
from terrascript.resource.onelogin.onelogin import onelogin_apps
from terrascript.resource.onelogin.onelogin import onelogin_auth_servers
from terrascript.resource.onelogin.onelogin import onelogin_oidc_apps
from terrascript.resource.onelogin.onelogin import onelogin_privileges
from terrascript.resource.onelogin.onelogin import onelogin_roles
from terrascript.resource.onelogin.onelogin import onelogin_saml_apps
from terrascript.resource.onelogin.onelogin import (
onelogin_smarthook_environment_variables,
)
from terrascript.resource.onelogin.onelogin import onelogin_smarthooks
from terrascript.resource.onelogin.onelogin import onelogin_user_mappings
from terrascript.resource.onelogin.onelogin import onelogin_users
def test_datasource_import():
from terrascript.data.onelogin.onelogin import onelogin_user
from terrascript.data.onelogin.onelogin import onelogin_users
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.onelogin.onelogin
#
# t = terrascript.provider.onelogin.onelogin.onelogin()
# s = str(t)
#
# assert 'https://github.com/onelogin/terraform-provider-onelogin' in s
# assert '0.1.23' in s
| 31.890909
| 84
| 0.796465
| 212
| 1,754
| 6.424528
| 0.349057
| 0.223201
| 0.226138
| 0.30837
| 0.625551
| 0.552129
| 0.552129
| 0.207783
| 0
| 0
| 0
| 0.010589
| 0.13854
| 1,754
| 54
| 85
| 32.481481
| 0.890801
| 0.287913
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0
| 1
| 0.15
| true
| 0
| 0.9
| 0
| 1.05
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.