hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ba44cd9d91cc8c729aafc0cddc794fc2187f3f9
| 25,015
|
py
|
Python
|
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | 1
|
2021-03-09T19:12:44.000Z
|
2021-03-09T19:12:44.000Z
|
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | null | null | null |
lizardanalysis/calculations/aep_pep_test.py
|
JojoReikun/ClimbingLizardDLCAnalysis
|
6cc38090217a3ffd4860ef6d06ba7967d3c10b7c
|
[
"MIT"
] | null | null | null |
def aep_pep_test(**kwargs):
"""
Calculates two different things:
1.) The x and y coordinates of the AEP and PEP, relative to the coxa of a respective leg
2.) The swing phases and the stance phases, identifying on a frame by frame basis
Return: results data frame with 30 key value pairs:
x6 allocation of swing and stance phases for each foot/leg
x6 x coordinates of AEP for each foot/leg
x6 y coordinates for AEP for each foot/leg
x6 x coordinates for PEP for each foot/leg
x6 y coordinates for PEP for each foot/leg
"""
import os.path
import pandas as pd
from pandas import np
from pathlib import Path
from lizardanalysis.utils import animal_settings
from scipy import signal
import math
# print("footfall_by_switches")
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
config = kwargs.get('config')
filename = kwargs.get('filename')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
df_result_current = kwargs.get('df_result_current')
# added in this so that you can get the estimated values from alpha
# so long as that column currently resides in the data frame
config_file = Path(config).resolve()
# result folder for footfall plots
step_detection_folder = os.path.join(str(config_file).rsplit(os.path.sep, 1)[0], "analysis-results",
"step_detection")
# create file path for foot fall pattern diagrams
plotting_footfall_folder = os.path.join(step_detection_folder, "footfall-pattern-diagrams")
# TODO: instead of hard-coding the feet and the three points for body_motion,
# TODO: let the user choose based on labels available in DLC result file: Choose feet & choose body motion
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
relative = False
plotting_footfall_patterns = True
# define cut-off value -> crops X% of frames on each side of video
p_cut_off = 0.05
body_motion = {"frame": [], "mean_motion_x": []}
abdomen_diff = 0
head_diff = 0
# assuming that the body from the head to the abdomen is rigid?
# this for loop is used to calculate the x coordinate difference between a given frame and the previous
# therefore gives you can indicator of the direction of motion
# if the [row] - [row-1] > 0 , then the stick insect is moving to the right
# if the [row] - [row-1] < 0, then the stick insect is moving to the left
for row in range(1, data_rows_count):
if data.loc[row][scorer, "head", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "head", 'likelihood'] >= likelihood:
head_diff = data.loc[row][scorer, "head"] - data.loc[row - 1][scorer, "head"]
if data.loc[row][scorer, "abdomen", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "abdomen", 'likelihood'] >= likelihood:
abdomen_dif = data.loc[row][scorer, "abdomen"] - data.loc[row - 1][scorer, "abdomen"]
body_motion["frame"].append(row - 1)
body_motion["mean_motion_x"].append(abs((head_diff + abdomen_diff) / 2.0))
# am taking the absolute value, because if the stick insect walks to the left, then you don't want to
# switch the which sign changes indicates swing/pep and which sign change indicates stance/aep.
# taking the average of the differences, to determine the average 'speed' i.e. the displacement over one frame of the whole body
# one class instance and one result array for every foot, since every foot needs its own counter
calculators = {}
results = {}
# for every foot, need to do within the original for loop, so all foot calculations are performed for a given frame
foot_motions = {}
rel_foot_motions = {}
# left the for loop for the body motion, and will now be working with for loops for the foot motion
for foot in feet:
foot_motions[f"{foot}"] = []
rel_foot_motions[f"rel_{foot}"] = []
# if the [row] - [row-1] > 0 , then the stick insect FOOT is moving to the right
# if the [row] - [row-1] < 0, then the stick insect FOOT is moving to the left
# taking an absolute value for the body and foot motions avoid issues with directions (?)
foot_motion = 0
for row in range(1, data_rows_count):
if data.loc[row][scorer, f"{foot}", 'likelihood'] >= likelihood and data.loc[row - 1][scorer,
f"{foot}",'likelihood'] >= likelihood:
foot_motion = abs(data.loc[row][scorer, f"{foot}", 'x'] - data.loc[row - 1][
scorer, f"{foot}", 'x'])
foot_motions[f"{foot}"].append(foot_motion)
rel_foot_motions[f"rel_{foot}"].append(foot_motion - body_motion['mean_motion_x'][row - 1])
else:
foot_motions[f"foot"].append
# now need to store the body motion data, the foot motion data, and the relative foot motion all in a dataframe
# this dataframe within the loop is only for one foot
dict_df = {'body_motion': body_motion['mean_motion_x'], 'foot_motion': foot_motions[f"{foot}"],
"rel_foot_motion": rel_foot_motions[f"rel_{foot}"]}
print(dict_df)
df = pd.DataFrame.from_dict(dict_df)
intersections = smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename,
step_detection_folder)
######################################################################################################################
# the smooth_and_plot function returns 'intersection_dict'
# intersection dict is: {"idx":[], "sign":[]}
# idx = the idx of the number list/array of differences in the sign, only storing when the differences are non-zero
# sign = stores the sign of the number associated with the index of the non zero number
# positive => start of swing =>PEP
# negative => start of stance => AEP
# gives the alpha_estimation values for the
rom_list = [col for col in df_result_current.columns if ("rom_angle_{}".format(foot) in col)]
aep_pep_angle = []
# for loop will calculate the angle that defines the femur-coxa vector relative to the normal
# to the body axis, running through the coxa of the foot of interest
for angle in range(len(rom_list)):
aep_pep_angle.append(90 - angle)
foot_chars = list(foot)
f_t_joint_lpx = []
f_t_joint_lpy = []
t_c_joint_lpx = []
t_c_joint_lpy = []
# low pass filter application of the coordinate data alone?
# is this necessary
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
f_t_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "x")]))
f_t_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "y")]))
t_c_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "x")]))
t_c_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "y")]))
# ensuring that the values for the keys are defined as arrays, so that you can append for the
# following for loop
results_aep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
results_pep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
for i in range(2, data_rows_count):
if i - 2 in intersections["idx"]:
# atm just leaving the likelihood check
# is it worth doing, considering the alpha angles depended on those likelihoods anyway?
# so you would be just checking the same likelihood even though
# now calculating the Euclidean distance between the coxa label and the femur label
f_t_joint_co = (f_t_joint_lpx[i], f_t_joint_lpy[i])
t_c_joint_co = (t_c_joint_lpx[i], t_c_joint_lpy[i])
distance = np.sqrt(
(f_t_joint_co[0] - t_c_joint_co[0]) ** 2 + (f_t_joint_co[1] - t_c_joint_co[1]) ** 2)
# calibrate distance with conversion factor
# NEED TO WRITE THE CONVERSION FACTOR!
distance_calib = distance # / conv_fac
# results_aep = {}
# results_pep = {}
if intersections["sign"][i - 2] > 0:
# this means you are transitioning to the swing phase, so should be PEP
results_pep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_pep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
if intersections["sign"][i - 2] < 0:
# this means you are transitioning to the stance phase so should be aep
results_aep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_aep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
# therefore should now have two dictionaries that contain the x coordinates and the y coordinates
# of the aep and the pep for each foot
# one aep value and one pep value per stepping cycle
#####################################################################################################################
# initializes class instance for every foot and empty result dict to be filled with the swing and stance phases:
calculators[foot] = StridesAndStances()
# "S10" = string of 10 characters: stance/stride + counter 000n
results[foot] = calculators[foot].determine_stride_phases(intersections, data_rows_count)
# rename dictionary keys of results
results = {'stepphase_' + key: value for (key, value) in results.items()}
results_aep = {"AEP_" + key: value for (key, value) in results_aep.items()}
results_pep = {"PEP_" + key: value for (key, value) in results_pep.items()}
# print("results: ", results)
if plotting_footfall_patterns:
""" plots a foot fall pattern diagram for every DLC result csv file/every lizard run """
plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder)
## need to add the result of the code here!
# last step must be combining the three results dictionaries
results.update(results_aep)
results.update(results_pep)
return results
# shouldn't matter whether the stick insect walks in a straight horizontal line or not, because you're only looking at
# the switch in the direction of movement
# therefore, as long as the insect doesn't walk completely vertically suddenly, then the algorithm should still work
def smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename, step_detection_folder, plotting=True):
# smoothing of the raw input data from foot motion and body motion, using the Butterworth low-pass filter an a Savintzky-
# Golay smoothing alogirthm. Then, the intersection points are computed between the smoothed body and foot curves
# If relative is TRUE: body motion is already subtracted from the foot motion, hence foot is relative to the x-axis
# If relative is FALSE: the intersection of the foot motion and body motion data curves needs to be determined
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
import errno
# savgol filter smoothing window (must be odd!)
smooth_wind = 13
x_cut_off_value = int(round(data_rows_count * p_cut_off, 0))
x = np.linspace(0, data_rows_count - 1, data_rows_count - 1)
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
x_cut_off = np.linspace(x_cut_off_value, data_rows_count - 1, int(data_rows_count - 1 - x_cut_off_value))
if plotting == True:
# initiate plot
plt.figure()
plt.axvline(x_cut_off_value, color='black', label='cutoff 0.05%')
if relative == True:
"""Uses the relative foot motion i.e. the foot motion where body motion has been subtracted"""
rel_foot_motion_low_passed = signal.filtfilt(b, a, df['rel_foot_motion'])
# smooth curves with Savitzky-Golay filter:
y_foot_rel = df.loc[x_cut_off_value:, 'rel_foot_motion']
y_foot_rel_lp = rel_foot_motion_low_passed[x_cut_off_value:] # two different types of filtering (?)
# smooth without the low pass filter
y_foot_rel_smoothed = signal.savgol_filter(y_foot_rel, smooth_wind, 3)
# smooth with the low pass filter
y_foot_rel_lp_smoothed = signal.savgol_filter(y_foot_rel_lp, smooth_wind, 3)
x_axis_f = np.zeros(data_rows_count - 1 - x_cut_off_value)
# get the indexes of the frames where you are transitioning from swing -> stance or stance -> swing
idx = np.argwhere(np.diff(np.sign(x_axis_f - y_foot_rel_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
for i in idx:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(x_axis_f[i] - y_foot_rel_smoothed[i]))
intersections_dict["idx"] = [b + x_cut_off_value for b in intersections_dict['idx']]
if plotting == True:
df['rel_foot_motion'].plot(color='#f5c242') # plot_rel_foot
plt.plot(x, rel_foot_motion_low_passed, color='green', label='rel_foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_foot_rel_smoothed, color='red', label='rel_foot_motion_smoothed')
plt.plot(x_cut_off, y_foot_rel_lp_smoothed, color='lightgreen', label='rel_foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_foot_rel_lp_smoothed[idx], 'ko') # plot intersection points
# edit here -> second argument was changed from x_axis_f to y_foot_rel_lp_smoothed
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_foot_rel_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
# another edit here?
else:
"""
Uses the foot motion and the body motion and computes the intersection points for the smoothed curves.
Intersection points for the lizard standing (bodymotion -> 0) will get excluded by using a body-motion threshold
of 10% of max(body_motion_lp_smoothed).
"""
# lowpass filter for body motion
body_motion_low_passed = signal.filtfilt(b, a, df['body_motion'])
# lowpass filter for foot motion
foot_motion_low_passed = signal.filtfilt(b, a, df['foot_motion'])
# smooth curves:
y_body = df.loc[x_cut_off_value:, 'body_motion']
y_body_lp = body_motion_low_passed[x_cut_off_value:]
y_foot = df.loc[x_cut_off_value:, 'foot_motion']
y_foot_lp = foot_motion_low_passed[x_cut_off_value:]
# smooth original body motion without low pass filter
y_body_smoothed = signal.savgol_filter(y_body, 51, 3)
# smooth low-pass-filtered body motion
y_body_lp_smoothed = signal.savgol_filter(y_body_lp, 17, 3)
# smooth original foot motion without low pass filter
y_foot_smoothed = signal.savgol_filter(y_foot, 17, 3)
# smooth low-pass-filtered rel foot motion
y_foot_lp_smoothed = signal.savgol_filter(y_foot_lp, 17, 3)
# compute and plot intersection points:
idx = np.argwhere(np.diff(np.sign(y_body_lp_smoothed - y_foot_lp_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
max_body_motion = max([abs(max(y_body_lp_smoothed)), abs(min(y_body_lp_smoothed))])
body_motion_stand = round(max_body_motion * 0.1, 2)
# print(f"max body motion: {max_body_motion}, 10%: {body_motion_stand}")
for i in idx:
# exclude all intersections which are within 0+-1% of max body motion (~standing)
if abs(y_body_lp_smoothed[i]) >= body_motion_stand:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(y_body_lp_smoothed[i] - y_foot_lp_smoothed[i]))
intersections_dict['idx'] = [b + x_cut_off_value for b in intersections_dict['idx']]
# print("x intersections: ", intersections_dict)
# remove intersection points when lizard has stopped walking (usually in the end):
# intersections_dict = remove_standing_intersections(intersections_dict, y_body_lp_smoothed, y_foot_lp_smoothed)
if plotting == True:
df['body_motion'].plot(color='#3089db') # plot body motion
df['foot_motion'].plot(color='#d68f00') # plot foot motion
plt.plot(x, body_motion_low_passed, color='lightblue', label='body_motion low pass (lp) filter')
plt.plot(x, foot_motion_low_passed, color='green', label='foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_body_smoothed, color='#160578', label='body_motion_smoothed')
plt.plot(x_cut_off, y_foot_smoothed, color='red', label='foot_motion_smoothed')
plt.plot(x_cut_off, y_body_lp_smoothed, color='#9934b3', label='body_motion_lp_smoothed')
plt.plot(x_cut_off, y_foot_lp_smoothed, color='lightgreen', label='foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_body_lp_smoothed[idx], 'ko') # plot intersection points
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_body_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
if plotting == True:
# set y-limits, add legend and display plots
plt.axhline(0, color='black')
plt.ylim(-30, 30)
plt.legend()
plt.xlabel('frames')
plt.ylabel('dx/frame')
filename_title = filename.split("_", 2)[:2]
filename_title = filename_title[0] + filename_title[1]
plt.title(f"{filename_title}-{foot}")
# plt.show()
try:
os.makedirs(step_detection_folder)
# print("folder for curve_fitting plots created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
if relative == True:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}_rel.pdf"))
else:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}.pdf"))
# plt.show()
plt.close()
return intersections_dict
## removed the unused function, might need to put back in at some point
class StridesAndStances:
"""
class to detect stride and stance phases for current feet => initialize class instance for every foot.
This method iterates through all frames, if the current frame is one of the intersection points, the sign of the
point will be checked. If the sign is positive the phase will be set to swing and the swing_phase_counter increased
by 1. All frames until the next intersection will be assigned that phase name and number.
Rows before and after first and last index respectively will be filled with np.nan.
"""
def __init__(self):
import numpy as np
self.stride_phase_counter = 0
self.stance_phase_counter = 0
self.phase = 'UNKNOWN'
self.current_phase = np.nan
def determine_stride_phases(self, intersection_dict, data_rows_count):
"""
Function to detect the swing or stance phases using the intersection points and their signs.
Return: list with one entry for every row.
"""
import numpy as np
# create empty list with length of data rows count:
results = np.full((data_rows_count,), '', dtype='S10')
index = 0
for row in range(data_rows_count):
# switch swing or stance depending on sign of intersection point
if row in intersection_dict['idx']:
index = intersection_dict['idx'].index(row) # find the index in list of current idx
sign = intersection_dict['sign'][index] # find the respective sign
# if sign is positive, the phase till next idx will be swing
self.current_phase = self.assign_swing_or_stance(sign)
# fill all rows until next idx with that swing or stance number
results[row] = self.current_phase
# fill all rows after last idx with np.nan
if index != 0:
results[intersection_dict['idx'][index]:] = np.nan
# print("results: ", results)
return results
# Todo: Go through intersection_dict and assign correct swing or stance phase for every row
def assign_swing_or_stance(self, sign):
if sign > 0: # swing
if self.phase == 'stance' or self.phase == 'UNKNOWN':
self.stride_phase_counter += 1
self.phase = 'swing' # originally called stride
retval = f'swing{self.stride_phase_counter:04d}'
else: # stance
if self.phase == 'swing' or self.phase == 'UNKNOWN':
self.stance_phase_counter += 1
self.phase = 'stance'
retval = f'stance{self.stance_phase_counter:04d}'
return retval
def __str__(self):
return f"swings: {self.stride_phase_counter}, stances: {self.stance_phase_counter}"
def plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder):
"""
takes the result dataframe and creates a new dataframe for plotting. Every foot gets assigned an individual number.
The dataframe is then filtered for strings containing "stride", the strides get replaced by the respective number,
while all stances will be NaN.
In the plot strides are therefore displayed as bars and stances are empty.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import os
import errno
df_plot = pd.DataFrame(columns=results.keys(), index=range(data_rows_count))
# filter here and only fill in stances as numbers => stances bars, strides white
for i, key in enumerate(results):
df_plot[key] = [i + 1 if s.startswith(b'stance') else np.NaN for s in results[key]]
key_list = [key for key in df_plot.columns]
colors = False
if colors:
cmap = plt.cm.coolwarm
legend_elements = [Line2D([0], [0], color=cmap(0.), lw=4, label=key_list[0]),
Line2D([0], [0], color=cmap(.33), lw=4, label=key_list[1]),
Line2D([0], [0], color=cmap(.66), lw=4, label=key_list[2]),
Line2D([0], [0], color=cmap(1.), lw=4, label=key_list[3]),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color=cmap(np.linspace(0, 1, 5)), ax=ax)
ax.legend(handles=legend_elements)
else:
legend_elements = [Line2D([0], [0], color='white', lw=1, label='1 = FL | 2 = FR | 3 = HR | 4 = HL'),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color='black', ax=ax)
ax.legend(handles=legend_elements)
# saves footfall pattern diagrams as pdf in defined result folder. If folder is not extant yet, it will be created
try:
os.makedirs(plotting_footfall_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig(os.path.join(plotting_footfall_folder, "{}.pdf".format(filename)))
plt.clf()
plt.close()
| 49.534653
| 136
| 0.625625
| 3,469
| 25,015
| 4.327472
| 0.163159
| 0.025313
| 0.013056
| 0.013589
| 0.355582
| 0.298428
| 0.252864
| 0.210631
| 0.17526
| 0.161338
| 0
| 0.011959
| 0.267959
| 25,015
| 504
| 137
| 49.632937
| 0.807831
| 0.318729
| 0
| 0.229323
| 0
| 0
| 0.092941
| 0.021352
| 0
| 0
| 0
| 0.003968
| 0
| 1
| 0.026316
| false
| 0.041353
| 0.078947
| 0.003759
| 0.12782
| 0.003759
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ba99aa02744fe90eebce52ab7ecf4ce0854c775
| 1,367
|
py
|
Python
|
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 3
|
2020-05-09T12:55:09.000Z
|
2022-03-11T18:56:05.000Z
|
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | null | null | null |
Medium/918. Maximum Sum Circular Subarray/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 1
|
2022-03-11T18:56:16.000Z
|
2022-03-11T18:56:16.000Z
|
# 918. Maximum Sum Circular Subarray
# Runtime: 1028 ms, faster than 5.09% of Python3 online submissions for Maximum Sum Circular Subarray.
# Memory Usage: 18.6 MB, less than 33.98% of Python3 online submissions for Maximum Sum Circular Subarray.
import math
class Solution:
def maxSubarraySumCircular(self, nums: list[int]) -> int:
def max_one_interval() -> int:
curr_max_sum, max_sum = -math.inf, -math.inf
for x in nums:
curr_max_sum = max(x, curr_max_sum + x)
max_sum = max(max_sum, curr_max_sum)
return max_sum
def max_two_interval() -> int:
right_sums = [-math.inf] * len(nums)
right_sums[-1] = nums[-1]
for i in range(len(nums) - 2, -1, -1):
right_sums[i] = right_sums[i + 1] + nums[i]
max_right_sums = [-math.inf] * len(nums)
max_right_sums[-1] = right_sums[-1]
for i in range(len(nums) - 2, -1, -1):
max_right_sums[i] = max(max_right_sums[i + 1], right_sums[i])
max_sum = -math.inf
left_sum = 0
for i in range(len(nums) - 2):
left_sum += nums[i]
max_sum = max(max_sum, left_sum + max_right_sums[i + 2])
return max_sum
return max(max_one_interval(), max_two_interval())
| 35.973684
| 106
| 0.567666
| 199
| 1,367
| 3.683417
| 0.266332
| 0.098226
| 0.081855
| 0.106412
| 0.3397
| 0.298772
| 0.236016
| 0.210096
| 0.210096
| 0.060027
| 0
| 0.036677
| 0.321873
| 1,367
| 38
| 107
| 35.973684
| 0.754045
| 0.175567
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.04
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ba9d75f770e59ab5f8bd4c1745fa1e171a92981
| 10,644
|
py
|
Python
|
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
testing.py
|
gustxsr/learning-with-assemblies
|
4158829adf4500a9ae868ca7c64ffef90753c66b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve
from matplotlib.gridspec import GridSpec
import matplotlib as mpl
rng = np.random.default_rng()
def k_cap(input, cap_size):
"""
Given a vector input it returns the highest cap_size
entries from cap_zie
"""
output = np.zeros_like(input)
if len(input.shape) == 1:
idx = np.argsort(input)[-cap_size:]
output[idx] = 1
else:
idx = np.argsort(input, axis=-1)[:, -cap_size:]
np.put_along_axis(output, idx, 1, axis=-1)
return output
class brain_region:
"""
Creates a brain region from assembly calculus
"""
def __init__(self, n_neurons , n_in, cap_size, id: int ) -> None:
"""
Creates a brain region that takes
"""
self.id=id
self.n_neurons=n_neurons
self._n_in=n_in
self.cap_size=cap_size
mask = np.zeros((self.n_neurons, self.n_neurons), dtype=bool) # NXN array of zeros
W = np.zeros((self.n_neurons, self.n_neurons))
mask_a = np.zeros((self._n_in, self.n_neurons), dtype=bool) # image to N matrix
A = np.zeros((self._n_in, self.n_neurons))
mask = (rng.random((self.n_neurons, self.n_neurons)) < sparsity) & np.logical_not(np.eye(n_neurons, dtype=bool)) # Creating matrix from N to B with no entries in the diagonal
W = np.ones((self.n_neurons, self.n_neurons)) * mask
W /= W.sum(axis=0) # Transition probabiliy matrix
mask_a = rng.random((self._n_in, self.n_neurons)) < sparsity
A = np.ones((self._n_in, self.n_neurons)) * mask_a
A /= A.sum(axis=0)
W = np.ones_like(W) * mask
A = np.ones_like(A) * mask_a
W /= W.sum(axis=0, keepdims=True)
A /= A.sum(axis=0, keepdims=True)
self._W=W
self._A=A
self.mask=mask
self.mask_a=mask_a
self.act_h = np.zeros(self.n_neurons)
self.bias = np.zeros(self.n_neurons)
self.b = -1
self.classify_act_h=np.zeros(self.n_neurons)
def next(self, input, initial=False, final=False ):
"""
It computes the activation output of the input going
through this brain region.
"""
if initial:
self.act_h = np.zeros(self.n_neurons)
act_h_new = k_cap(self.act_h @ self._W + input @ self._A + self.bias, self.cap_size) # output a NXN array for the neurons that are activated. The first part is from self activiation and second from inoput
self._A[(input > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
self._W[(self.act_h > 0)[:, np.newaxis] & (act_h_new > 0)[np.newaxis, :]] *= 1 + beta
self.act_h = act_h_new
if final:
self.reinforce_bias()
print("Shape of act_h:"+str(self.act_h.shape))
return self.act_h.copy()
def reinforce_bias(self):
"""
This function is meant to be called at the end of each round to renormalize the transition matrices
"""
self.bias[self.act_h>0]+=-1 # after all rounds the activated neurons have a smaller bias so they more likely to fire
self._A /= self._A.sum(axis=0, keepdims=True)
self._W /= self._W.sum(axis=0, keepdims=True)
def classify(self,input, n_examples, initial=False, ):
if initial:
self.classify_act_h=np.zeros((n_examples , self.n_neurons))
self.classify_act_h=k_cap(self.classify_act_h @ self._W + input @ self._A + self.bias, self.cap_size)
return self.classify_act_h
class assembly_network:
"""
This class is meant to implement the assembly calculus structure
This generalizes for multiple inputs and brain regions
"""
def __init__(self, number_of_inputs: int , sparsity:int, layers: list, beta: float) -> None:
"""
Initializes the structure of the Brain Region. It takes the number of inputs and then a list for layers that should contain tuples of the form (neurons, cap_size).
"""
self.n_in = number_of_inputs # Vector of 28X28 pixels
# List with pairs of tuples (n, c) where n is the number of neurons and c is the size of the cap
self.create_layers(layers) # Creates all the structure for the brain regions
self.sparsity = sparsity
self.beta =beta
def create_layers(self, layers)-> None:
"""
Creates brain regions according to the list from layers
The layers list should contain tuples of the form (number of neurons, cap size)
"""
self.layers=[]
temp=self.n_in+0
for k, (neurons, cap_size) in enumerate(layers):
self.layers.append(brain_region(neurons, temp, cap_size, k))
temp=neurons+0
def next(self, input: np.array, initial=False, final=False ):
"""
During the training process, it puts the input
through the network and it runs it through all the layers
"""
temp=input
print(self.layers)
for k , brain_region_k in enumerate(self.layers):
new_temp=brain_region_k.next(temp, initial=initial, final=final)
temp=new_temp
return temp
def classify(self,input, initial=False ):
temp=input
for brain_region in self.layers:
print("temp shape"+str(temp.shape))
temp=brain_region.classify(temp, input.shape[0], initial)
return temp
class classification_mnist:
def __init__(self, kernels: list ,train_path: str, test_path: str, number_of_inputs: int , sparsity:int, layers: list, beta: float ) :
"""
Creates a MNIST recognition architecture based on assembly calculus
"""
self.cap_size=layers[-1][1]
self.n_neurons= layers[-1][0]
self.n_in=number_of_inputs
self.assembly_network=assembly_network(number_of_inputs, sparsity, layers, beta , )
self.get_files( train_path, test_path)
self.create_training_data(kernels)
self.create_testing_data(kernels)
def create_training_data(self ,kernels= [np.ones((1, 3, 3))] ):
"""
Creates a data set with n_examples from the files obtained
by get_files
"""
self.train_examples = []
for kernel in kernels:
self.train_examples.append(np.zeros((10, self.n_examples, 784)))
for i in range(10):
#Does the convulution between a all 1's 3X3 kernel and each of the images
self.train_examples[-1][i] = k_cap(convolve(self.train_imgs[self.train_labels == i][:self.n_examples].reshape(-1, 28, 28), kernel, mode='same').reshape(-1, 28 * 28), self.cap_size)
def create_testing_data(self ,kernels= [np.ones((1, 3, 3))] ):
"""
Creates a data set with n_examples from the files obtained
by get_files
"""
self.test_examples = []
for kernel in kernels:
self.test_examples.append( np.zeros((10, self.n_examples, 784) ))
for i in range(10):
#Does the convulution between a all 1's 3X3 kernel and each of the images
self.test_examples[-1][i] = k_cap(convolve(self.test_imgs[self.test_labels == i][:self.n_examples].reshape(-1, 28, 28), kernel, mode='same').reshape(-1, 28 * 28), self.cap_size)
def get_files(self, train_path: str, test_path: str)-> None:
"""
Given two paths it retrieves the data structure encoded in those paths. traun_path should be the path of the training data
and test_path should be the path for test data.
Assumes a csv format on nthe data on the paths
"""
test_data = np.loadtxt(test_path, delimiter=',')
train_data = np.loadtxt(train_path, delimiter=',')
self.train_imgs = train_data[:, 1:]
self.train_imgs.shape
self.test_imgs = test_data[:, 1:]
self.train_labels = train_data[:, 0]
self.test_labels = test_data[:, 0]
def train_model(self, n_rounds)-> np.array:
"""
Given the number of rounds (images that will be shown to the model)
The program runs and trains the edge weights for the network.
"""
self.activations = np.zeros((10, n_rounds, self.n_neurons))
for i in range(10): # iterations for each of the labels
for j in range(n_rounds): # for each of the rounds
input = self.train_examples[0][i, j] # image inputs
act_h= self.assembly_network.next(input, initial=(j==0), final= (j==n_rounds-1) ) # output a NXN array for the neurons that are activated. The first part is from self activiation and second from inoput
self.activations[i, j] = act_h
return self.activations
def classify(self, n_rounds, test=True )-> dict:
"""
When called, this function runs one batch of data through
the whole network and then returns a dictionary with succes rates
"""
if test:
examples=self.test_examples[0]
else:
examples=self.train_examples[0]
self.n_examples=examples.shape[1]
#### RUNS THROUGH NETWORK
outputs = np.zeros((10, n_rounds+1, self.n_examples, self.n_neurons))
for i in np.arange(10):
for j in range(n_rounds):
outputs[i, j+1] = self.assembly_network.classify(examples[i], initial= (j==0)) # run each one network for n_rounds and save the neurons active at each step
#### STARTS CLASSIFICATION
c = np.zeros((10, self.n_neurons))
for i in range(10):
c[i, outputs[i, 1].sum(axis=0).argsort()[-self.cap_size:]] = 1
predictions = (outputs[:, 1] @ c.T).argmax(axis=-1)
acc = (predictions == np.arange(10)[:, np.newaxis]).sum(axis=-1) / self.n_examples
return acc
n_in = 784 # Vector of 28X28 pixels
cap_size = 200 # Size of the cap
sparsity = 0.1
n_rounds = 10
n_examples=800
beta = 1e0
train_path="./data/mnist/mnist_train.csv"
layers=[ (2000,200)]# number of neurons in network with respective cap_size
test_path="./data/mnist/mnist_test.csv"
kernels=[np.ones((1, 3, 3))]
classify_two=classification_mnist(kernels,train_path,test_path, n_in , sparsity, layers, beta)
classify_two.train_model( 5)
print(classify_two.classify( 5, test=False))
| 37.087108
| 217
| 0.613209
| 1,541
| 10,644
| 4.079169
| 0.162881
| 0.031817
| 0.043907
| 0.015272
| 0.312281
| 0.288419
| 0.221444
| 0.200127
| 0.165447
| 0.156857
| 0
| 0.018678
| 0.280722
| 10,644
| 286
| 218
| 37.216783
| 0.802377
| 0.24643
| 0
| 0.12987
| 0
| 0
| 0.011966
| 0.007313
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097403
| false
| 0
| 0.032468
| 0
| 0.194805
| 0.025974
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bab281692147103f4b861c83d053ce8c6a1c16f
| 4,398
|
py
|
Python
|
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
src/chatstats.py
|
brendancsmith/cohort-facebook
|
a7b37d14b7152349930bc10f69cb72446d6c3581
|
[
"MIT"
] | null | null | null |
from collections import Counter, defaultdict
from datetime import datetime
from statistics import mean
from dateutil.parser import parse as parse_datetime
from dateutil import rrule
def num_comments_by_user(comments):
commenters = (comment['from']['name'] for comment in comments)
counter = Counter(commenters)
return counter
def percent_empty_comments_by_user(emptyComments, nonEmptyComments):
numEmptyCommentsByUser = num_comments_by_user(emptyComments)
numNonEmptyCommentsByUser = num_comments_by_user(nonEmptyComments)
# TODO: could break if a user doesn't have one type of comment
percentEmptyCommentsByUser = Counter()
for user in numNonEmptyCommentsByUser:
numEmpty = numEmptyCommentsByUser[user]
numTotal = numEmpty + numNonEmptyCommentsByUser[user]
percentEmptyCommentsByUser[user] = numEmpty / numTotal
return percentEmptyCommentsByUser
def num_comments_by_day(comments):
dts = datetimes(comments)
counter = Counter(dt.date() for dt in dts)
first_day = min(counter.keys())
last_day = datetime.now().date()
all_dates = (dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day))
for date in all_dates:
if date not in counter:
counter[date] = 0
return counter
def avg_word_count_by_user(comments, default_word_count=1):
wordCountsByUser = defaultdict(list)
for comment in comments:
name = comment['from']['name']
words = None
if 'message' not in comment:
words = default_word_count
else:
words = len(comment['message'].split())
wordCountsByUser[name].append(words)
avgWordCountByUser = dict((user, mean(wordCounts))
for user, wordCounts in wordCountsByUser.items())
return avgWordCountByUser
def longest_comment_by_users(comments):
longestCommentByUser = defaultdict(int)
commentsByUser = defaultdict(list)
for comment in comments:
name = comment['from']['name']
commentsByUser[name].append(comment)
for name, comments in commentsByUser.items():
commentLengths = (len(comment['message']) for comment in comments)
maxCommentLength = max(commentLengths)
longestCommentByUser[name] = maxCommentLength
return longestCommentByUser
def word_count_by_day(comments):
wordCountsByDay = defaultdict(int)
for comment in comments:
timestamp = comment['created_time']
date = parse_datetime(timestamp).date()
words = len(comment['message'].split())
wordCountsByDay[date] += words
first_day = min(wordCountsByDay.keys())
last_day = datetime.now().date()
all_dates = (dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day))
for date in all_dates:
if date not in wordCountsByDay:
wordCountsByDay[date] = 0
return wordCountsByDay
def daily_activity_by_user(comments):
first_day = min(parse_datetime(comment['created_time']).date() for comment in comments)
last_day = datetime.now().date()
all_dates = [dt.date() for dt in rrule.rrule(rrule.DAILY,
dtstart=first_day,
until=last_day)]
activityByUser = defaultdict(list)
for comment in comments:
user = comment['from']['name']
timestamp = comment['created_time']
date = parse_datetime(timestamp).date()
activityByUser[user].append(date)
make_blank_counter = lambda: Counter(dict(zip(all_dates, [0] * len(all_dates))))
dailyActivityByUser = {}
for user, activity in activityByUser.items():
dailyActivityByUser[user] = make_blank_counter()
dailyActivityByUser[user].update(activity)
return dailyActivityByUser
def datetimes(comments):
timestamps = (comment['created_time'] for comment in comments)
dts = map(parse_datetime, timestamps)
return dts
def corpus(comments):
messages = [comment['message'] for comment in comments
if 'message' in comment]
corpus = '\n'.join(messages)
return corpus
| 29.918367
| 91
| 0.648931
| 463
| 4,398
| 6.023758
| 0.226782
| 0.03227
| 0.038724
| 0.064539
| 0.260308
| 0.236285
| 0.199355
| 0.199355
| 0.199355
| 0.15848
| 0
| 0.001237
| 0.264666
| 4,398
| 146
| 92
| 30.123288
| 0.861163
| 0.013643
| 0
| 0.268041
| 0
| 0
| 0.028598
| 0
| 0
| 0
| 0
| 0.006849
| 0
| 1
| 0.092784
| false
| 0
| 0.051546
| 0
| 0.237113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bae71f7a1d534c3b03ab7c28df3edc847994f0b
| 2,125
|
py
|
Python
|
utils/lsms/compositional_histogram_cutoff.py
|
allaffa/HydraGNN
|
b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6
|
[
"BSD-3-Clause"
] | 1
|
2022-01-30T16:50:51.000Z
|
2022-01-30T16:50:51.000Z
|
utils/lsms/compositional_histogram_cutoff.py
|
allaffa/HydraGNN
|
b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T11:45:53.000Z
|
2022-02-09T17:59:37.000Z
|
utils/lsms/compositional_histogram_cutoff.py
|
kshitij-v-mehta/HydraGNN
|
d27958270b2beb35f98e4403239e3c5c77ad4a04
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def find_bin(comp, nbins):
bins = np.linspace(0, 1, nbins)
for bi in range(len(bins) - 1):
if comp > bins[bi] and comp < bins[bi + 1]:
return bi
return nbins - 1
def compositional_histogram_cutoff(
dir,
elements_list,
histogram_cutoff,
num_bins,
overwrite_data=False,
create_plots=True,
):
"""
Downselect LSMS data with maximum number of samples per binary composition.
"""
if dir.endswith("/"):
dir = dir[:-1]
new_dir = dir + "_histogram_cutoff/"
if os.path.exists(new_dir):
if overwrite_data:
shutil.rmtree(new_dir)
else:
print("Exiting: path to histogram cutoff data already exists")
return
if not os.path.exists(new_dir):
os.makedirs(new_dir)
comp_final = []
comp_all = np.zeros([num_bins])
for filename in tqdm(os.listdir(dir)):
path = os.path.join(dir, filename)
# This is LSMS specific - it assumes only one header line and only atoms following.
atoms = np.loadtxt(path, skiprows=1)
elements, counts = np.unique(atoms[:, 0], return_counts=True)
# Fixup for the pure component cases.
for e, elem in enumerate(elements_list):
if elem not in elements:
elements = np.insert(elements, e, elem)
counts = np.insert(counts, e, 0)
num_atoms = atoms.shape[0]
composition = counts[0] / num_atoms
b = find_bin(composition, num_bins)
comp_all[b] += 1
if comp_all[b] < histogram_cutoff:
comp_final.append(composition)
new_path = os.path.join(new_dir, filename)
os.symlink(path, new_path)
if create_plots:
plt.figure(0)
plt.hist(comp_final, bins=num_bins)
plt.savefig("composition_histogram_cutoff.png")
plt.figure(1)
w = 1 / num_bins
plt.bar(np.linspace(0, 1, num_bins), comp_all, width=w)
plt.savefig("composition_initial.png")
| 27.960526
| 91
| 0.610353
| 289
| 2,125
| 4.346021
| 0.363322
| 0.071656
| 0.017516
| 0.019108
| 0.028662
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011251
| 0.288941
| 2,125
| 75
| 92
| 28.333333
| 0.819987
| 0.091294
| 0
| 0
| 0
| 0
| 0.066353
| 0.028736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.089286
| 0
| 0.178571
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bb0067ad50b3ebfd94976cc78cce86faed75925
| 1,256
|
py
|
Python
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | 2
|
2021-01-05T03:42:50.000Z
|
2022-03-16T07:17:02.000Z
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | 4
|
2021-01-07T06:28:01.000Z
|
2021-01-18T11:59:56.000Z
|
PointMatcher/actions/export.py
|
daisatojp/PointMatcher
|
927bd4dd676b18da763ccaab2f429f27de281710
|
[
"MIT"
] | null | null | null |
import os.path as osp
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QFileDialog
from PointMatcher.utils.filesystem import icon_path
class ExportAction(QAction):
def __init__(self, parent):
super(ExportAction, self).__init__('Export', parent)
self.p = parent
self.setIcon(QIcon(icon_path('save')))
self.setShortcut('Ctrl+Alt+S')
self.triggered.connect(self.export)
self.setEnabled(False)
def export(self, _value=False):
if (self.p.annotDir is not None) and osp.exists(self.p.annotDir):
defaultDir = self.p.annotDir
elif (self.p.imageDir is not None) and osp.exists(self.p.imageDir):
defaultDir = self.p.imageDir
else:
defaultDir = '.'
defaultDir = self.p.settings.get('exportPath', defaultDir)
filters = 'json file (*.json)'
filename = QFileDialog.getSaveFileName(
self.p, 'choose file name to be exported', defaultDir, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.p.matching.export(filename)
self.p.settings['exportPath'] = osp.dirname(filename)
| 35.885714
| 75
| 0.642516
| 149
| 1,256
| 5.342282
| 0.442953
| 0.069095
| 0.048995
| 0.060302
| 0.065327
| 0.065327
| 0.065327
| 0.065327
| 0
| 0
| 0
| 0.004242
| 0.249204
| 1,256
| 34
| 76
| 36.941176
| 0.839873
| 0
| 0
| 0
| 0
| 0
| 0.071656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bb514fb57dd5b2a6965770909c4eb7274835dca
| 3,453
|
py
|
Python
|
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | 15
|
2018-12-06T16:03:32.000Z
|
2021-06-23T01:17:00.000Z
|
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | null | null | null |
secistsploit/modules/auxiliary/whatweb.py
|
reneaicisneros/SecistSploit
|
b4e1bb0a213bee39c3bb79ab36e03e19122b80c0
|
[
"MIT"
] | 6
|
2019-03-01T04:10:00.000Z
|
2020-02-26T08:43:54.000Z
|
# -*- coding: UTF-8 -*-
import os
from secistsploit.core.exploit import *
from secistsploit.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "whatweb",
"description": "whatweb",
"authors": (
"jjiushi",
),
"references": (
"www.422926799.github.io"
"www.422926799.github.io"
),
}
target = OptString("www.whatweb.net", "Target URl")
domain = OptString("", "Target domain or IP")
port = OptPort(443, "Target HTTP port")
files = OptString("", "Files to import")
iplist = OptString(
"", "Batch detection of IP segments, such as input like 1.1.1.")
def __init__(self):
self.endianness = "<"
def run(self):
rhost = (self.domain)
file = (self.files)
iplist = (self.iplist)
if rhost != '':
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(rhost),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(rhost))
print('[+] fingerprint:{}'.format(response.text))
if rhost == '' and file != '':
if os.path.exists(file):
print('[+] {} Open ok'.format(file))
else:
print('[-] {} Not Found'.format(file))
dk = open(file, 'r')
for rd in dk.readlines():
qc = "".join(rd.split('\n'))
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(qc),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(qc))
print('[+] fingerprint:{}'.format(response.text))
if rhost == '' and iplist != '':
for i in range(1, 255):
ip = iplist + str(i)
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Connection': 'keep-alive',
'Content-Length': '383',
}
data = {
'target': '{}'.format(ip),
}
response = self.http_request(
method="POST",
path="/whatweb.php",
headers=headers,
data=data,
)
if response:
print('[+] url:{}'.format(ip))
print('[+] fingerprint:{}'.format(response.text))
| 34.188119
| 141
| 0.435274
| 316
| 3,453
| 4.708861
| 0.35443
| 0.020161
| 0.032258
| 0.046371
| 0.561828
| 0.538978
| 0.538978
| 0.538978
| 0.479839
| 0.479839
| 0
| 0.059175
| 0.417608
| 3,453
| 100
| 142
| 34.53
| 0.680756
| 0.006082
| 0
| 0.444444
| 0
| 0.033333
| 0.254811
| 0.013411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.044444
| 0
| 0.144444
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bb942cefeb3547baf593097bb2c4998d052f1b8
| 3,285
|
py
|
Python
|
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
pygnss/__init__.py
|
nmerlene/pygnss
|
9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import logging
import xarray
from time import time
from typing import Union
#
from .io import opener
from .rinex2 import rinexnav2, _scan2
from .rinex3 import rinexnav3, _scan3
# for NetCDF compression. too high slows down with little space savings.
COMPLVL = 1
def readrinex(rinexfn: Path, outfn: Path=None, use: Union[str, list, tuple]=None, verbose: bool=True) -> xarray.Dataset:
"""
Reads OBS, NAV in RINEX 2,3. Plain ASCII text or GZIP .gz.
"""
nav = None
obs = None
rinexfn = Path(rinexfn).expanduser()
# %% detect type of Rinex file
if rinexfn.suffix == '.gz':
fnl = rinexfn.stem.lower()
else:
fnl = rinexfn.name.lower()
if fnl.endswith('n') or fnl.endswith('n.rnx'):
nav = rinexnav(rinexfn, outfn)
elif fnl.endswith('o') or fnl.endswith('o.rnx'):
obs = rinexobs(rinexfn, outfn, use=use, verbose=verbose)
elif rinexfn.suffix.endswith('.nc'):
nav = rinexnav(rinexfn)
obs = rinexobs(rinexfn)
else:
raise ValueError(f"I dont know what type of file you're trying to read: {rinexfn}")
return obs, nav
def getRinexVersion(fn: Path) -> float:
"""verify RINEX version"""
fn = Path(fn).expanduser()
with opener(fn) as f:
ver = float(f.readline()[:9]) # yes :9
return ver
# %% Navigation file
def rinexnav(fn: Path, ofn: Path=None, group: str='NAV') -> xarray.Dataset:
""" Read RINEX 2,3 NAV files in ASCII or GZIP"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
ver = getRinexVersion(fn)
if int(ver) == 2:
nav = rinexnav2(fn)
elif int(ver) == 3:
nav = rinexnav3(fn)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
if ofn:
ofn = Path(ofn).expanduser()
print('saving NAV data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
nav.to_netcdf(ofn, group=group, mode=wmode)
return nav
# %% Observation File
def rinexobs(fn: Path, ofn: Path=None, use: Union[str, list, tuple]=None,
group: str='OBS', verbose: bool=False) -> xarray.Dataset:
"""
Read RINEX 2,3 OBS files in ASCII or GZIP
"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
logging.debug(f'loading {fn} with xarray')
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
tic = time()
ver = getRinexVersion(fn)
if int(ver) == 2:
obs = _scan2(fn, use, verbose)
elif int(ver) == 3:
obs = _scan3(fn, use, verbose)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
print(f"finished in {time()-tic:.2f} seconds")
if ofn:
ofn = Path(ofn).expanduser()
print('saving OBS data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
enc = {k: {'zlib': True, 'complevel': COMPLVL, 'fletcher32': True}
for k in obs.data_vars}
obs.to_netcdf(ofn, group=group, mode=wmode, encoding=enc)
return obs
| 28.318966
| 120
| 0.595129
| 449
| 3,285
| 4.329621
| 0.302895
| 0.018519
| 0.010802
| 0.030864
| 0.388889
| 0.375514
| 0.350823
| 0.290123
| 0.220165
| 0.175926
| 0
| 0.010879
| 0.272451
| 3,285
| 115
| 121
| 28.565217
| 0.80251
| 0.094977
| 0
| 0.4125
| 0
| 0
| 0.116541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.25
| 0.0375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bb96ea949af7533581d8e4cca76f381e779a9b0
| 5,201
|
py
|
Python
|
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
classroom/pref_graph.py
|
norabelrose/whisper
|
79642bab696f3e166b6af61a447602e8e5d58270
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING
import networkx as nx
from .fas import eades_fas
if TYPE_CHECKING: # Prevent circular import
from .pref_dag import PrefDAG
class PrefGraph(nx.DiGraph):
"""
`PrefGraph` represents a possibly cyclic set of preferences over clips as a weighted directed graph.
Edge weights represent the strength of the preference of A over B, and indifferences are represented
as edges with zero weights. Clips are represented as string IDs. If you want to prevent cycles from
being added to the graph in an online fashion, you should probably use `PrefDAG` instead.
"""
@property
def indifferences(self) -> nx.Graph:
"""Return a read-only, undirected view of the subgraph containing only indifferences."""
edge_view = self.edges
return nx.graphviews.subgraph_view(
self,
filter_edge=lambda a, b: edge_view[a, b].get('weight', 1.0) == 0.0
).to_undirected(as_view=True)
@property
def nonisolated(self) -> 'PrefGraph':
deg_view = self.degree
return nx.graphviews.subgraph_view(
self,
filter_node=lambda n: deg_view(n) > 0
)
@property
def strict_prefs(self) -> nx.DiGraph:
"""Return a read-only view of the subgraph containing only strict preferences."""
edge_view = self.edges
return nx.graphviews.subgraph_view(
self,
filter_edge=lambda a, b: edge_view[a, b].get('weight', 1.0) > 0
)
def __repr__(self) -> str:
num_indiff = self.indifferences.number_of_edges()
num_prefs = self.strict_prefs.number_of_edges()
return f'{type(self).__name__}({num_prefs} strict prefs, {num_indiff} indifferences)'
def add_indiff(self, a: str, b: str, **attr):
"""Try to dd the indifference relation `a ~ b`, and throw an error if the expected
coherence properties of the graph would be violated."""
if attr.setdefault('weight', 0.0) != 0.0:
raise CoherenceViolation("Indifferences cannot have nonzero weight")
self.add_edge(a, b, **attr)
def add_edge(self, a: str, b: str, **attr):
"""Add an edge to the graph, and check for coherence violations. Usually you
should use the `add_pref` or `add_indiff` wrapper methods instead of this method."""
if attr.get('weight', 1) < 0:
raise CoherenceViolation("Preferences must have non-negative weight")
super().add_edge(a, b, **attr)
add_pref = add_edge
def draw(self):
"""Displays a visualization of the graph using `matplotlib`. Strict preferences
are shown as solid arrows, and indifferences are dashed lines."""
strict_subgraph = self.strict_prefs
pos = nx.drawing.spring_layout(strict_subgraph)
nx.draw_networkx_nodes(strict_subgraph, pos)
nx.draw_networkx_edges(strict_subgraph, pos)
nx.draw_networkx_edges(self.indifferences, pos, arrowstyle='-', style='dashed')
nx.draw_networkx_labels(strict_subgraph, pos)
def acyclic_subgraph(self) -> 'PrefDAG':
"""Return an acyclic subgraph of this graph as a `PrefDAG`. The algorithm will try
to remove as few preferences as possible, but it is not guaranteed to be optimal.
If the graph is already acyclic, the returned `PrefDAG` will be isomorphic to this graph."""
from .pref_dag import PrefDAG
fas = set(eades_fas(self.strict_prefs))
return PrefDAG((
(u, v, d) for u, v, d in self.edges(data=True) # type: ignore
if (u, v) not in fas
))
def is_quasi_transitive(self) -> bool:
"""Return whether the strict preferences are acyclic."""
return nx.is_directed_acyclic_graph(self.strict_prefs)
def pref_prob(self, a: str, b: str, eps: float = 5e-324) -> float:
"""Return the probability that `a` is preferred to `b`."""
a_weight = self.pref_weight(a, b)
denom = a_weight + self.pref_weight(b, a)
# If there's no strict preference between a and b, then the
# probability that A is preferred to B is 1/2.
return (a_weight + eps) / (denom + 2 * eps)
def pref_weight(self, a: str, b: str, default: float = 0.0) -> float:
"""
Return the weight of the preference `a > b`, or 0.0 if there is no such
preference. Preferences with no explicit weight are assumed to have weight 1.
"""
attrs = self.edges.get((a, b), None)
return attrs.get('weight', 1.0) if attrs is not None else default
def unlink(self, a: str, b: str):
"""Remove the preference relation between `a` and `b`."""
try:
self.remove_edge(a, b)
except nx.NetworkXError:
# Try removing the edge in the other direction.
try:
self.remove_edge(b, a)
except nx.NetworkXError:
raise KeyError(f"No preference relation between {a} and {b}")
class CoherenceViolation(Exception):
"""Raised when an operation would violate the coherence of the graph."""
pass
| 42.284553
| 104
| 0.635455
| 715
| 5,201
| 4.513287
| 0.283916
| 0.006817
| 0.012395
| 0.013945
| 0.206384
| 0.159281
| 0.111559
| 0.076852
| 0.056399
| 0.056399
| 0
| 0.007368
| 0.269371
| 5,201
| 122
| 105
| 42.631148
| 0.841842
| 0.344357
| 0
| 0.232877
| 0
| 0
| 0.077065
| 0.010132
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164384
| false
| 0.013699
| 0.068493
| 0
| 0.383562
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bbcdfbd01a5563f9c4786b31c8c24dcfa3b565b
| 683
|
py
|
Python
|
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | 1
|
2021-02-25T01:02:40.000Z
|
2021-02-25T01:02:40.000Z
|
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | null | null | null |
hisitter/reviews/permissions.py
|
babysitter-finder/backend
|
5c37c6876ca13b5794ac44e0342b810426acbc76
|
[
"MIT"
] | 1
|
2020-11-23T20:57:47.000Z
|
2020-11-23T20:57:47.000Z
|
""" Reviews permissions."""
# Python
import logging
# Django Rest Framework
from rest_framework.permissions import BasePermission
class IsServiceOwner(BasePermission):
""" This permission allow determine if the user
is a client, if not permission is denied.
"""
def has_permission(self, request, view):
""" Manage the permission if the user is a client. """
try:
user = request.user.user_client
if user == view.service.user_client:
return True
else:
return False
except Exception as error:
logging.info(f'We have a problem that Exception raise {error}')
| 28.458333
| 75
| 0.628111
| 79
| 683
| 5.379747
| 0.594937
| 0.061176
| 0.042353
| 0.051765
| 0.084706
| 0.084706
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297218
| 683
| 23
| 76
| 29.695652
| 0.885417
| 0.2694
| 0
| 0
| 0
| 0
| 0.098925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bbd9c4b8b498fde19563e3848c89d37d52b9838
| 1,678
|
py
|
Python
|
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
pk.py
|
CnybTseng/SOSNet
|
9f1e96380388dde75fe0737ec0b3516669054205
|
[
"MIT"
] | null | null | null |
import sys
import torch
import timeit
sys.path.append('../JDE')
from mot.models.backbones import ShuffleNetV2
from sosnet import SOSNet
if __name__ == '__main__':
print('SOSNet PK ShuffleNetV2')
model1 = ShuffleNetV2(
stage_repeat={'stage2': 4, 'stage3': 8, 'stage4': 4},
stage_out_channels={'conv1': 24, 'stage2': 48, 'stage3': 96,
'stage4': 192, 'conv5': 1024}).cuda().eval()
arch={
'conv1': {'out_channels': 64},
'stage2': {'out_channels': 256, 'repeate': 2, 'out': True},
'stage3': {'out_channels': 384, 'repeate': 2, 'out': True},
'stage4': {'out_channels': 512, 'repeate': 2, 'out': True},
'conv5': {'out_channels': 1024}}
model2 = SOSNet(arch).cuda().eval()
x = torch.rand(1, 3, 224, 224).cuda()
loops = 1000
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model1(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('ShuffleNetV2 latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model2(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('SOSNet latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.autograd.profiler.profile(use_cuda=True, record_shapes=True) as prof:
model2(x)
print(prof.key_averages().table())
| 37.288889
| 85
| 0.567342
| 198
| 1,678
| 4.671717
| 0.409091
| 0.071351
| 0.077838
| 0.048649
| 0.372973
| 0.372973
| 0.372973
| 0.372973
| 0.372973
| 0.372973
| 0
| 0.056106
| 0.277712
| 1,678
| 45
| 86
| 37.288889
| 0.707096
| 0
| 0
| 0.363636
| 0
| 0
| 0.159021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113636
| 0
| 0.113636
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bbda2f39a11084b661e8fe58491f418c2a36b6f
| 2,255
|
py
|
Python
|
test/generate_netmhcpan_functions.py
|
til-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | 6
|
2020-10-27T15:31:32.000Z
|
2020-11-29T03:26:06.000Z
|
test/generate_netmhcpan_functions.py
|
til-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | 4
|
2020-10-27T14:57:16.000Z
|
2020-11-04T21:56:39.000Z
|
test/generate_netmhcpan_functions.py
|
pirl-unc/mhcgnomes
|
0bfbe193daeb7cd38d958222f6071dd657e9fb6e
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
NETMHCPAN_3_0_DEST = "test_netmhcpan_3_0_alleles.py"
NETMHCPAN_3_0_SOURCE = "netmhcpan_3_0_alleles.txt"
NETMHCPAN_4_0_DEST = "test_netmhcpan_4_0_alleles.py"
NETMHCPAN_4_0_SOURCE = "netmhcpan_4_0_alleles.txt"
special_chars = " *:-,/."
def generate(src, dst, exclude=set()):
alleles = set()
with open(dst, "w") as f:
f.write("from mhcgnomes import parse, Allele, Gene, AlleleWithoutGene\n")
with open(src) as alleles_file:
for line in alleles_file:
line = line.strip()
if line.startswith("#"):
continue
elif not line:
continue
parts = line.split()
allele_name = parts[0]
if allele_name in exclude:
continue
if allele_name in alleles:
print("Skipping repeat allele: '%s'" % allele_name)
continue
alleles.add(allele_name)
fn_name = allele_name.replace("\"", "").strip()
for c in special_chars:
fn_name = fn_name.replace(c, "_")
fn_name = fn_name.replace("__", "_")
f.write(f"\ndef test_{fn_name}():")
f.write(f"\n result = parse('{allele_name}')")
if ":" in allele_name:
f.write(
f"""\n assert result.__class__ is Allele, \\
'Expected parse(\"{allele_name}\") to be Allele but got %s' % (result,)""")
else:
f.write(
f"""\n assert result.__class__ in (Gene, Allele, AlleleWithoutGene), \\
'Unexpected type for parse(\"{allele_name}\"): %s' % (result,)""")
f.write("\n")
print(f"Wrote {len(alleles)} from {src} tests to {dst}")
return alleles
netmhcpan_3_0_alleles = generate(
src=NETMHCPAN_3_0_SOURCE,
dst=NETMHCPAN_3_0_DEST)
generate(
src=NETMHCPAN_4_0_SOURCE,
dst=NETMHCPAN_4_0_DEST,
exclude=netmhcpan_3_0_alleles)
| 35.234375
| 107
| 0.501552
| 247
| 2,255
| 4.267206
| 0.291498
| 0.094877
| 0.083491
| 0.068311
| 0.098672
| 0.047438
| 0.047438
| 0
| 0
| 0
| 0
| 0.021122
| 0.391131
| 2,255
| 63
| 108
| 35.793651
| 0.74654
| 0
| 0
| 0.122449
| 0
| 0
| 0.281846
| 0.07945
| 0
| 0
| 0
| 0
| 0.040816
| 1
| 0.020408
| false
| 0
| 0.040816
| 0
| 0.081633
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9bbf5d23053e93f4be3618d38f8307dfe71dd5b9
| 2,156
|
py
|
Python
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 14
|
2020-10-27T05:52:20.000Z
|
2021-11-07T20:24:55.000Z
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 1
|
2021-09-17T07:40:00.000Z
|
2021-09-17T07:40:00.000Z
|
美团爬取商家信息/paquxinxi.py
|
13060923171/Crawl-Project2
|
effab1bf31979635756fc272a7bcc666bb499be2
|
[
"MIT"
] | 8
|
2020-11-18T14:23:12.000Z
|
2021-11-12T08:55:08.000Z
|
import requests
import re
import json
headers = {
"Origin": "https://bj.meituan.com",
"Host": "apimobile.meituan.com",
"Referer": "https://bj.meituan.com/s/%E7%81%AB%E9%94%85/",
"Cookie": "uuid=692a53319ce54d0c91f3.1597223761.1.0.0; ci=1; rvct=1; _lxsdk_cuid=173e1f47707c8-0dcd4ff30b4ae3-3323765-e1000-173e1f47707c8; _lxsdk_s=173e1f47708-21d-287-4d9%7C%7C35",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
}
def get_parse(url):
html = requests.get(url,headers = headers)
if html.status_code:
get_html(html)
else:
print(html.status_code)
def get_html(html):
content = html.text
#店名
titles= re.compile('","title":"(.*?)",',re.S|re.I)
title = titles.findall(content)
#地址
addresses = re.compile(',"address":"(.*?)",', re.S | re.I)
address = addresses.findall(content)
#评分
avgscores = re.compile(',"avgscore":(.*?),', re.S | re.I)
avgscore = avgscores.findall(content)
#评价人数
commentses = re.compile(',"comments":(.*?),', re.S | re.I)
comments = commentses.findall(content)
#联系电话
phones = re.compile('"phone":"(.*?)",', re.S | re.I)
phone = phones.findall(content)
for i in range(len(title)):
try:
t = title[i]
a = address[i]
avg = avgscore[i]
c = comments[i]
p = phone[i]
print(t,a,avg,c,p)
dowload(t,a,avg,c,p)
except:
pass
def dowload(t,a,avg,c,p):
data = {
'店铺名称': t,
'店铺地址': a,
'店铺评分': avg,
'评价人数': c,
'电话': p
}
with open("美团信息.txt","a+",encoding="utf-8")as f:
f.write(json.dumps(data,ensure_ascii=False)+"\n")
print("写入成功")
if __name__ == '__main__':
#在这个URL里面offse参数每次翻页增加32,limit参数是一次请求的数据量,q是搜索关键词poi/pcsearch/1?其中的1是北京城市的id编号。
for i in range(0,33,32):
url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/1?uuid=692a53319ce54d0c91f3.1597223761.1.0.0&userid=-1&limit=32&offset={}&cateId=-1&q=%E7%81%AB%E9%94%85".format(i)
get_parse(url)
| 33.169231
| 185
| 0.590909
| 299
| 2,156
| 4.197324
| 0.474916
| 0.035857
| 0.01992
| 0.023904
| 0.105976
| 0.100398
| 0
| 0
| 0
| 0
| 0
| 0.103819
| 0.222635
| 2,156
| 65
| 186
| 33.169231
| 0.644988
| 0.042672
| 0
| 0
| 0
| 0.074074
| 0.337543
| 0.084021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.018519
| 0.055556
| 0
| 0.111111
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cada166139a42c2081b8a48a2bcd39a15cb5ab
| 2,612
|
py
|
Python
|
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
create_categories.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Program to batch create categories.
The program expects a generator containing a list of page titles to be used as
base.
The following command line parameters are supported:
-always (not implemented yet) Don't ask, just do the edit.
-overwrite (not implemented yet).
-parent The name of the parent category.
-basename The base to be used for the new category names.
Example:
create_categories.py
-lang:commons
-family:commons
-links:User:Multichill/Wallonia
-parent:"Cultural heritage monuments in Wallonia"
-basename:"Cultural heritage monuments in"
"""
__version__ = '$Id$'
#
# (C) Multichill, 2011
# (C) xqt, 2011
#
# Distributed under the terms of the MIT license.
#
#
import os, sys, re, codecs
import urllib, httplib, urllib2
import catlib
import time
import socket
import StringIO
import wikipedia as pywikibot
import config
import pagegenerators
def createCategory(page, parent, basename):
title = page.title(withNamespace=False)
newpage = pywikibot.Page(pywikibot.getSite(u'commons', u'commons'),
u'Category:' + basename + u' ' + title)
newtext = u''
newtext += u'[[Category:' + parent + u'|' + title + u']]\n'
newtext += u'[[Category:' + title + u']]\n'
if not newpage.exists():
#FIXME: Add user prompting and -always option
pywikibot.output(newpage.title())
pywikibot.showDiff(u'', newtext)
comment = u'Creating new category'
#FIXME: Add exception handling
newpage.put(newtext, comment)
else:
#FIXME: Add overwrite option
pywikibot.output(u'%s already exists, skipping' % (newpage.title(),))
def main(args):
'''
Main loop. Get a generator and options.
'''
generator = None
parent = u''
basename = u''
always = False
genFactory = pagegenerators.GeneratorFactory()
for arg in pywikibot.handleArgs():
if arg == '-always':
always = True
elif arg.startswith('-parent:'):
parent = arg [len('-parent:'):].strip()
elif arg.startswith('-basename'):
basename = arg [len('-basename:'):].strip()
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
if generator:
for page in generator:
createCategory(page, parent, basename)
else:
pywikibot.output(u'No pages to work on')
pywikibot.output(u'All done')
if __name__ == "__main__":
try:
main(sys.argv[1:])
finally:
pywikibot.stopme()
| 25.359223
| 78
| 0.630551
| 305
| 2,612
| 5.357377
| 0.459016
| 0.03672
| 0.029376
| 0.033048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005644
| 0.253828
| 2,612
| 102
| 79
| 25.607843
| 0.832735
| 0.326187
| 0
| 0.058824
| 0
| 0
| 0.106605
| 0
| 0
| 0
| 0
| 0.009804
| 0
| 1
| 0.039216
| false
| 0
| 0.176471
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cae26d8eb99a201dc12930e81a1edb58d4cace
| 10,287
|
py
|
Python
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 114
|
2019-03-13T01:42:22.000Z
|
2022-03-31T07:56:04.000Z
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 12
|
2019-03-26T08:18:13.000Z
|
2021-05-19T14:36:27.000Z
|
avod/core/losses.py
|
Zengyi-Qin/TLNet
|
11fa48160158b550ad2dc810ed564eebe17e8f5e
|
[
"Apache-2.0"
] | 22
|
2019-03-22T10:44:49.000Z
|
2021-04-01T00:11:07.000Z
|
"""Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSoftmaxClassificationLoss
* WeightedSigmoidClassificationLoss
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from avod.core import ops
class Loss(object):
"""Abstract base class for loss functions."""
__metaclass__ = ABCMeta
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: a tensor representing predicted quantities.
target_tensor: a tensor representing regression or classification
targets.
ignore_nan_targets: whether to ignore nan targets in the loss
computation. E.g. can be used if the target
tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations
of the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
with tf.name_scope(scope, 'Loss',
[prediction_tensor, target_tensor, params]) as scope:
if ignore_nan_targets:
target_tensor = tf.where(tf.is_nan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(
prediction_tensor, target_tensor, **params)
@abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overriden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification
targets
**params: Additional keyword arguments for specific implementations
of the Loss.
Returns:
loss: a tensor representing the value of the loss function
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted
locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]
"""
weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
weights, 2)
square_diff = 0.5 * tf.square(weighted_diff)
return tf.reduce_sum(square_diff)
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]
"""
weights = tf.expand_dims(weights, 2)
if class_indices is not None:
weights *= tf.reshape(
ops.indices_to_dense_vector(class_indices,
tf.shape(prediction_tensor)[2]),
[1, 1, -1])
per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return tf.reduce_sum(per_entry_cross_ent * weights)
class WeightedSmoothL1Loss(Loss):
"""Smooth L1 localization loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def _compute_loss(self, prediction_tensor, target_tensor, weight):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [num_anchors,
code_size] representing the (encoded) predicted
locations of objects.
target_tensor: A float tensor of shape [num_anchors,
code_size] representing the regression targets
Returns:
loss: an anchorwise tensor of shape [num_anchors] representing
the value of the loss function
"""
diff = prediction_tensor - target_tensor
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
anchorwise_smooth_l1norm = tf.reduce_sum(
tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
axis=1) * weight
return anchorwise_smooth_l1norm
class WeightedSoftmaxLoss(Loss):
"""Softmax cross-entropy loss function."""
def _compute_loss(self, prediction_tensor, target_tensor, weight):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
per_row_cross_ent = tf.reshape(per_row_cross_ent, [-1, 1])
positive_cls = tf.cast(tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1), tf.float32)
weight_cls = tf.reshape((positive_cls + 0.1) * 10, [-1, 1])
pred_argmax = tf.argmax(tf.reshape(prediction_tensor, [-1, num_classes]), axis=1)
true_argmax = tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1)
accuracy_all = tf.reduce_mean(tf.cast(tf.equal(pred_argmax, true_argmax), tf.float32))
pred_positive = tf.cast(tf.greater(pred_argmax, 0), tf.float32)
true_positive = tf.cast(tf.greater(true_argmax, 0), tf.float32)
accuracy_positive = tf.reduce_sum(true_positive * tf.cast(tf.equal(pred_positive, \
true_positive), tf.float32)) / (tf.reduce_sum(true_positive) + 1e-2)
#accuracy_positive = tf.constant(num_classes)
#accuracy_positive = tf.reduce_sum(true_positive) / (tf.reduce_sum(tf.ones_like(true_positive, dtype=tf.float32)) + 1e-2)
return tf.reduce_sum(per_row_cross_ent) * weight, accuracy_all, accuracy_positive
class WeightedSoftmaxLossPiecewise(object):
"""Softmax cross-entropy loss function."""
def _compute_loss(self, prediction_tensor, target_tensor, pos_gt, neg_gt, weight, balance=1.0):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
Returns:
loss: a (scalar) tensor representing the value of the loss function
"""
num_classes = prediction_tensor.get_shape().as_list()[-1]
per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
labels=tf.reshape(target_tensor, [-1, num_classes]),
logits=tf.reshape(prediction_tensor, [-1, num_classes])))
pred_argmax = tf.argmax(tf.reshape(prediction_tensor, [-1, num_classes]), axis=1)
true_argmax = tf.argmax(tf.reshape(target_tensor, [-1, num_classes]), axis=1)
equal_all = tf.cast(tf.equal(pred_argmax, true_argmax), tf.float32)
accuracy_neg = tf.reduce_sum(equal_all * neg_gt) / (tf.reduce_sum(neg_gt) + 1e-2)
accuracy_pos = tf.reduce_sum(equal_all * pos_gt) / (tf.reduce_sum(pos_gt) + 1e-2)
#accuracy_positive = tf.constant(num_classes)
#accuracy_positive = tf.reduce_sum(true_positive) / (tf.reduce_sum(tf.ones_like(true_positive, dtype=tf.float32)) + 1e-2)
#rate = tf.reduce_sum(neg_gt) / (tf.reduce_sum(pos_gt) + 1e-2)
pos_loss = tf.reduce_sum(per_row_cross_ent * pos_gt) * weight / (tf.reduce_sum(pos_gt) + 1e-2)
neg_loss = tf.reduce_sum(per_row_cross_ent * neg_gt) * weight / (tf.reduce_sum(neg_gt) + 1e-2)
return pos_loss * balance + neg_loss, accuracy_neg, accuracy_pos
| 44.150215
| 129
| 0.636726
| 1,247
| 10,287
| 5.026464
| 0.159583
| 0.053606
| 0.035099
| 0.03127
| 0.654595
| 0.591417
| 0.577377
| 0.558392
| 0.524729
| 0.515156
| 0
| 0.013124
| 0.28152
| 10,287
| 232
| 130
| 44.340517
| 0.834934
| 0.448722
| 0
| 0.22619
| 0
| 0
| 0.000803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.011905
| 0.047619
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cd6811a8df581555a9e17bfebdb7625e6646ac
| 19,282
|
py
|
Python
|
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | null | null | null |
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | 2
|
2020-02-11T22:09:10.000Z
|
2020-06-05T18:02:28.000Z
|
routing/views.py
|
iqqmuT/tsari
|
343ef5cf08ee24bdb710e94c0b6fb334264e5677
|
[
"MIT"
] | null | null | null |
import json
from datetime import datetime, timedelta
from dateutil import parser as dateparser
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Q
from django.http import HttpResponseNotFound, JsonResponse
from django.shortcuts import render
from django.utils import timezone
from avdb.models import \
Convention, \
Equipment, \
EquipmentType, \
Location, \
LocationType, \
TransportOrder, \
TransportOrderLine
import logging
logger = logging.getLogger(__name__)
# TO data structure
# {
# 'disabled': False,
# 'equipment': 1,
# 'week': '2018-05-28T09:00:00+00:00',
# 'from': {
# 'location': 9,
# 'load_out': '2018-06-19T08:00:00+00:00'
# },
# 'to': {
# 'location': 4,
# 'convention': 7,
# 'load_in': '2018-06-19T09:00:00+00:00'
# },
# 'name': 'First TO',
# 'notes': 'Notes',
# 'unitNotes': 'Uniittinotes'
# }
@user_passes_test(lambda u: u.is_superuser)
def index(request, year):
video_types = EquipmentType.objects.filter(name__istartswith='Video').order_by('name')
audio_types = EquipmentType.objects.filter(name__istartswith='Audio').order_by('name')
elec_types = EquipmentType.objects.filter(name__istartswith='Electricity').order_by('name')
#video_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Video')
#audio_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Audio')
#elec_eqs = Equipment.objects.filter(equipment_type__name__istartswith='Electricity')
# get time period
first = _get_first_convention(year)
last = _get_last_convention(year)
if first is None or last is None:
return HttpResponseNotFound("No conventions found for year %d" % year)
start_date = first.load_in
end_date = last.load_out
# move start_date backwards to previous monday + 1 week
start_date = start_date - timedelta(days=start_date.weekday() + 7)
# move end_date forwards by week
end_date = end_date + timedelta(weeks=2)
weeks = []
monday = start_date
while monday < end_date:
weeks.append({
'monday': _get_previous_monday(monday),
'sunday': _get_next_sunday(monday),
'number': monday.isocalendar()[1],
})
monday = monday + timedelta(weeks=1)
to_data = []
equipment_groups = []
equipment_groups.extend(_add_eq_group(video_types, weeks, to_data))
equipment_groups.extend(_add_eq_group(audio_types, weeks, to_data))
equipment_groups.extend(_add_eq_group(elec_types, weeks, to_data))
#equipment_groups = [
# _handle_equipments(video_eqs, weeks, to_data),
# _handle_equipments(audio_eqs, weeks, to_data),
# _handle_equipments(elec_eqs, weeks, to_data),
#]
return render(request, 'routing/index.html', {
'year': year,
'equipment_groups': equipment_groups,
'weeks': weeks,
'conventions': json.dumps(_get_conventions_json()),
'start': start_date,
'end': end_date,
'other_locations': _get_other_locations(),
'locations_json': json.dumps(_get_locations_json()),
'json': json.dumps(to_data),
})
def _add_eq_group(equipment_types, weeks, to_data):
groups = []
for eq_type in equipment_types:
eqs = Equipment.objects.filter(equipment_type=eq_type, disabled=False)
groups.append(_handle_equipments(eqs, weeks, to_data))
return groups
# Save JSON request
def save(request, year):
data = json.loads(request.body.decode('utf-8'))
# Disable all existing TransportOrders for this year,
# and enable only those we are editing/creating
_disable_all_tos(year)
# Remove existing TransportOrderLines for this year
# We will re-create new TransportOrderLines
eq_ids = set()
for to_data in data['tos']:
eq_ids.add(to_data['equipment'])
for id in eq_ids:
_remove_tols(id, year)
# transit_from is storage for transit information
transit_from = None
for to_data in data['tos']:
if to_data['disabled'] == False:
if 'inTransit' in to_data['from'].keys() and to_data['from']['inTransit'] == True and ('inTransit' not in to_data['to'].keys() or to_data['to']['inTransit'] == False):
# end of transit
# from.load_out is saved to last TO in transit in UI
if 'load_out' in to_data['from'].keys():
transit_from['load_out'] = to_data['from']['load_out']
# copy 'from' data from beginning of transit
to_data['from'] = transit_from
transit_from = None
# save TO data
tol = _save_to_data(to_data)
else:
if 'inTransit' in to_data['to'].keys() and to_data['to']['inTransit'] == True and ('inTransit' not in to_data['from'].keys() or to_data['from']['inTransit'] == False):
# save 'from' data from beginning of transit
transit_from = to_data['from']
return JsonResponse({ 'ok': True })
def _save_to_data(to_data):
"""Saves TransportOrder data."""
to = _get_or_create_to(to_data)
if to is None:
# could not create TO
return None
#week = dateparser.parse(to_data['week'])
#monday = _get_previous_monday(week)
#sunday = _get_next_sunday(week)
# create new TransportOrderLine
tol = TransportOrderLine(
equipment=Equipment.objects.get(pk=to_data['equipment']),
transport_order=to,
)
tol.save()
return tol
def _disable_all_tos(year):
"""Disables all TransportOrders from given year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
tos = TransportOrder.objects.filter(
Q(from_loc_load_out__range=(start, end)) | Q(to_loc_load_in__range=(start, end)) | Q(from_convention__load_out__range=(start, end)) | Q(to_convention__load_in__range=(start, end))
)
for to in tos:
to.disabled = True
to.save()
def _get_or_create_to(to_data):
"""Gets or creates TransportOrder with given data."""
from_location = None
from_convention = None
from_load_out = None
if 'from' in to_data.keys():
if 'convention' in to_data['from'].keys() and to_data['from']['convention'] is not None:
id = to_data['from']['convention']
from_convention = Convention.objects.get(pk=id)
if 'location' in to_data['from'].keys() and to_data['from']['location'] is not None:
id = to_data['from']['location']
from_location = Location.objects.get(pk=id)
if from_convention is None and 'load_out' in to_data['from'].keys() and _is_valid_datetime(to_data['from']['load_out']):
from_load_out = dateparser.parse(to_data['from']['load_out'])
to_location = None
to_convention = None
to_load_in = None
if 'from' in to_data.keys():
if 'convention' in to_data['to'].keys() and to_data['to']['convention'] is not None:
id = to_data['to']['convention']
to_convention = Convention.objects.get(pk=id)
if 'location' in to_data['to'].keys() and to_data['to']['location'] is not None:
id = to_data['to']['location']
to_location = Location.objects.get(pk=id)
if to_convention is None and 'load_in' in to_data['to'].keys() and _is_valid_datetime(to_data['to']['load_in']):
to_load_in = dateparser.parse(to_data['to']['load_in'])
if from_location is None or to_location is None:
# can't create TransportOrder with empty Locations
return None
to, created = TransportOrder.objects.get_or_create(
from_convention=from_convention,
to_convention=to_convention,
from_loc=from_location,
to_loc=to_location,
from_loc_load_out=from_load_out,
to_loc_load_in=to_load_in,
)
# update other fields
if 'name' in to_data.keys():
to.name = to_data['name']
if 'notes' in to_data.keys():
to.notes = to_data['notes']
if 'unitNotes' in to_data.keys():
to.unit_notes = to_data['unitNotes']
to.disabled = False
to.save()
return to
def _is_valid_datetime(s):
try:
dateparser.parse(s)
return True
except ValueError:
logger.error("Invalid datetime '%s'" % s)
return False
def _get_previous_monday(d):
"""Returns previous monday from given datetime."""
monday = d - timedelta(days=d.weekday())
# set time to 00:00:00
return datetime(monday.year, monday.month, monday.day, 0, 0, 0)
def _get_next_sunday(d):
sunday = d + timedelta(days=6-d.weekday())
# set time to 23:59:59
return datetime(sunday.year, sunday.month, sunday.day, 23, 59, 59)
def _get_first_convention(year):
try:
return Convention.objects.filter(load_in__year=year).earliest('load_in')
except Convention.DoesNotExist:
return None
def _get_last_convention(year):
try:
return Convention.objects.filter(load_out__year=year).latest('load_out')
except Convention.DoesNotExist:
return None
def _get_conventions_json():
data = {}
for conv in Convention.objects.all():
d = {
'name': conv.routing_name()
}
if conv.load_in is not None:
d['load_in'] = conv.load_in.isoformat()
if conv.load_out is not None:
d['load_out'] = conv.load_out.isoformat()
data[conv.id] = d
return data
def _get_locations_json():
data = {}
for loc in Location.objects.all():
d = {
'name': loc.name,
}
data[loc.id] = d
return data
def _get_earliest_to(year):
try:
return TransportOrder.objects.filter(from_loc_load_out__year=year).earliest('from_loc_load_out')
except TransportOrder.DoesNotExist:
return None
def _get_latest_to(year):
try:
return TransportOrder.objects.filter(to_loc_load_in__year=year).latest('to_loc_load_in')
except TransportOrder.DoesNotExist:
return None
def _handle_equipments(equipments, weeks, to_data):
objs = []
for equipment in equipments:
eq_weeks = []
selected = {
'name': 'Select',
'type': None
}
start_location = selected
latest_location = None
latest_convention = None
for week in weeks:
# transport data
tod = {
#'transportOrder': None,
'disabled': False,
'equipment': equipment.pk,
'week': week['monday'].isoformat(),
'from': {
#'location': None,
#'convention': None,
},
'to': {
#'location': None,
#'convention': None,
}
}
#if latest_location is not None:
# tod['from']['location'] = latest_location
# tod['to']['location'] = latest_location
#if latest_convention is not None:
# tod['from']['convention'] = latest_convention
# tod['to']['convention'] = latest_convention
# find matching TransportOrderLine and fill information from there to toData object
filter_start = week['monday'] - timedelta(days=3)
filter_end = week['sunday'] - timedelta(days=3)
tols = _find_tols(equipment.pk,
filter_start,
filter_end)
if len(tols):
to = tols.first().transport_order
tod['id'] = to.pk
tod['name'] = to.name
tod['notes'] = to.notes
tod['unitNotes'] = to.unit_notes
if to.from_loc is not None:
tod['from']['location'] = to.from_loc.pk
if start_location['type'] is None:
selected = {
'name': to.from_loc.name,
'type': 'location'
}
start_location = selected
# set selected for previous empty weeks
for old_eq_week in eq_weeks:
if old_eq_week['selected']['type'] is None:
old_eq_week['selected'] = selected
if len(eq_weeks) > 0:
eq_weeks[len(eq_weeks)-1]['selected'] = {
'name': to.from_loc.name,
'type': 'location'
}
if to.from_convention is not None:
tod['from']['convention'] = to.from_convention.pk
tod['from']['location'] = to.from_convention.location.pk
if to.from_convention.load_out is not None:
tod['from']['load_out'] = to.from_convention.load_out.isoformat()
if start_location['type'] is None:
selected = {
'name': to.from_convention.routing_name(),
'type': 'convention',
}
start_location = selected
# set selected for previous empty weeks
for old_eq_week in eq_weeks:
if old_eq_week['selected']['type'] is None:
old_eq_week['selected'] = selected
if len(eq_weeks) > 0:
eq_weeks[len(eq_weeks)-1]['selected'] = {
'name': to.from_convention.routing_name(),
'type': 'convention',
}
if to.from_loc_load_out is not None:
tod['from']['load_out'] = to.from_loc_load_out.isoformat()
# special case: in transit
transit_length = to.transit_length()
if transit_length is not None and transit_length.days > 7:
_handle_in_transit(to, tod, to_data, eq_weeks)
if to.to_loc is not None:
tod['to']['location'] = to.to_loc.pk
selected = {
'name': to.to_loc.name,
'type': 'location'
}
latest_location = to.to_loc.pk
if to.to_convention is not None:
tod['to']['convention'] = to.to_convention.pk
tod['to']['location'] = to.to_convention.location.pk
if to.to_convention.load_in is not None:
tod['to']['load_in'] = to.to_convention.load_in.isoformat()
selected = {
'name': to.to_convention.routing_name(),
'type': 'convention',
}
latest_convention = to.to_convention.pk
if 'inTransit' in tod['to'].keys() and tod['to']['inTransit'] == True:
selected = {
'name': 'In transit',
'type': 'inTransit',
}
if to.to_loc_load_in is not None:
tod['to']['load_in'] = to.to_loc_load_in.isoformat()
to_data.append(tod)
# week data
w = {
'week': week,
#'to_idx': len(to_data) - 1, # index for to_data
'convention': None,
'conventions': _get_conventions(week['monday'], week['sunday']),
'other_locations': _get_other_locations(),
'selected': selected,
}
eq_weeks.append(w)
objs.append({
'eq': equipment,
'weeks': eq_weeks,
'start_location': start_location,
})
return objs
def _handle_in_transit(to, tod, to_data, eq_weeks):
transit_weeks = to.transit_length().days / 7
convention = None
location = None
if 'convention' in tod['from'].keys():
convention = tod['from']['convention']
if 'location' in tod['from'].keys():
location = tod['from']['location']
tod['from']['inTransit'] = True
tod['from']['convention'] = None
tod['from']['location'] = None
i = 0
l = len(to_data)
l2 = len(eq_weeks)
while i < transit_weeks - 2:
to_data[l-i-1]['to']['inTransit'] = True
to_data[l-i-1]['from']['inTransit'] = True
eq_weeks[l2-i-1]['selected'] = {
'name': 'In transit',
'type': 'inTransit',
}
i = i + 1
to_data[l-i-1]['to']['inTransit'] = True
to_data[l-i-1]['from']['convention'] = convention
to_data[l-i-1]['from']['location'] = location
eq_weeks[l2-i-1]['selected'] = {
'name': 'In transit',
'type': 'inTransit',
}
convention_cache = {}
def _get_conventions(start, end):
key = start.isoformat() + end.isoformat()
if key not in convention_cache.keys():
convention_cache[key] = Convention.objects.filter(
starts__gte=start,
ends__lte=end,
)
return convention_cache[key]
location_cache = {}
def _get_other_locations():
"""Returns all locations except convention venues."""
if 'all' not in location_cache.keys():
conv_venue = LocationType.objects.get(name='Convention venue')
location_cache['all'] = Location.objects.exclude(loc_type=conv_venue)
return location_cache['all']
def _find_tols(equipment_id, start, end):
"""Returns existing TransportOrderLines matching with given arguments.
Matches only if load_in is matching between start and end."""
#logger.error('Trying to find TOL')
#logger.error(equipment_id)
#logger.error(start_time)
#logger.error(end_time)
tols = TransportOrderLine.objects.filter(
equipment__id=equipment_id).filter(
Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
#Q(transport_order__from_loc_load_out__range=(start, end)) | Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__from_convention__load_out__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
)
return tols
def _remove_tols(equipment_id, year):
"""Removes all TransportOrderLines for given equipment id and from that year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_loc_load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_loc_load_in__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_convention__load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_convention__load_in__range=(start, end),
).delete()
| 36.041121
| 256
| 0.587283
| 2,311
| 19,282
| 4.635223
| 0.10688
| 0.041449
| 0.01587
| 0.011202
| 0.428305
| 0.352595
| 0.289955
| 0.233383
| 0.190721
| 0.155246
| 0
| 0.009713
| 0.295198
| 19,282
| 534
| 257
| 36.108614
| 0.778514
| 0.151281
| 0
| 0.240106
| 0
| 0
| 0.087877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055409
| false
| 0.005277
| 0.026385
| 0
| 0.150396
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cf5c6af409ad539e05135e062b11460576c4f6
| 5,575
|
py
|
Python
|
my_ner.py
|
shouxieai/nlp-bilstm_crf-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 16
|
2021-12-14T10:51:25.000Z
|
2022-03-30T10:10:09.000Z
|
my_ner.py
|
shouxieai/nlp-bilstm-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 1
|
2022-03-23T04:28:50.000Z
|
2022-03-23T04:28:50.000Z
|
my_ner.py
|
shouxieai/nlp-bilstm-ner
|
907381325eeb0a2c29004e1c617bea7312579ba8
|
[
"Apache-2.0"
] | 2
|
2021-12-08T02:48:01.000Z
|
2021-12-13T13:03:25.000Z
|
import os
from torch.utils.data import Dataset,DataLoader
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
def build_corpus(split, make_vocab=True, data_dir="data"):
"""读取数据"""
assert split in ['train', 'dev', 'test']
word_lists = []
tag_lists = []
with open(os.path.join(data_dir, split+".char.bmes"), 'r', encoding='utf-8') as f:
word_list = []
tag_list = []
for line in f:
if line != '\n':
word, tag = line.strip('\n').split()
word_list.append(word)
tag_list.append(tag)
else:
word_lists.append(word_list)
tag_lists.append(tag_list)
word_list = []
tag_list = []
word_lists = sorted(word_lists, key=lambda x: len(x), reverse=True)
tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=True)
# 如果make_vocab为True,还需要返回word2id和tag2id
if make_vocab:
word2id = build_map(word_lists)
tag2id = build_map(tag_lists)
word2id['<UNK>'] = len(word2id)
word2id['<PAD>'] = len(word2id)
tag2id['<PAD>'] = len(tag2id)
return word_lists, tag_lists, word2id, tag2id
else:
return word_lists, tag_lists
def build_map(lists):
maps = {}
for list_ in lists:
for e in list_:
if e not in maps:
maps[e] = len(maps)
return maps
class MyDataset(Dataset):
def __init__(self,datas,tags,word_2_index,tag_2_index):
self.datas = datas
self.tags = tags
self.word_2_index = word_2_index
self.tag_2_index = tag_2_index
def __getitem__(self,index):
data = self.datas[index]
tag = self.tags[index]
data_index = [self.word_2_index.get(i,self.word_2_index["<UNK>"]) for i in data]
tag_index = [self.tag_2_index[i] for i in tag]
return data_index,tag_index
def __len__(self):
assert len(self.datas) == len(self.tags)
return len(self.datas)
def batch_data_pro(self,batch_datas):
global device
data , tag = [],[]
da_len = []
for da,ta in batch_datas:
data.append(da)
tag.append(ta)
da_len.append(len(da))
max_len = max(da_len)
data = [i + [self.word_2_index["<PAD>"]] * (max_len - len(i)) for i in data]
tag = [i + [self.tag_2_index["<PAD>"]] * (max_len - len(i)) for i in tag]
data = torch.tensor(data,dtype=torch.long,device = device)
tag = torch.tensor(tag,dtype=torch.long,device = device)
return data , tag, da_len
class MyModel(nn.Module):
def __init__(self,embedding_num,hidden_num,corpus_num,bi,class_num,pad_index):
super().__init__()
self.embedding_num = embedding_num
self.hidden_num = hidden_num
self.corpus_num = corpus_num
self.bi = bi
self.embedding = nn.Embedding(corpus_num,embedding_num)
self.lstm = nn.LSTM(embedding_num,hidden_num,batch_first=True,bidirectional=bi)
if bi:
self.classifier = nn.Linear(hidden_num*2,class_num)
else:
self.classifier = nn.Linear(hidden_num, class_num)
self.cross_loss = nn.CrossEntropyLoss(ignore_index=pad_index)
def forward(self,data_index,data_len , tag_index=None):
em = self.embedding(data_index)
pack = nn.utils.rnn.pack_padded_sequence(em,data_len,batch_first=True)
output,_ = self.lstm(pack)
output,lens = nn.utils.rnn.pad_packed_sequence(output,batch_first=True)
pre = self.classifier(output)
self.pre = torch.argmax(pre, dim=-1).reshape(-1)
if tag_index is not None:
loss = self.cross_loss(pre.reshape(-1,pre.shape[-1]),tag_index.reshape(-1))
return loss
if __name__ == "__main__":
device = "cuda:0" if torch.cuda.is_available() else "cpu"
train_word_lists, train_tag_lists, word_2_index, tag_2_index = build_corpus("train")
dev_word_lists, dev_tag_lists = build_corpus("dev", make_vocab=False)
test_word_lists, test_tag_lists = build_corpus("test", make_vocab=False)
corpus_num = len(word_2_index)
class_num = len(tag_2_index)
train_batch_size = 5
dev_batch_size = len(dev_word_lists)
epoch = 100
lr = 0.001
embedding_num = 128
hidden_num = 129
bi = True
train_dataset = MyDataset(train_word_lists,train_tag_lists,word_2_index, tag_2_index)
train_dataloader = DataLoader(train_dataset,batch_size=train_batch_size,shuffle=False,collate_fn=train_dataset.batch_data_pro)
dev_dataset = MyDataset(dev_word_lists, dev_tag_lists, word_2_index, tag_2_index)
dev_dataloader = DataLoader(dev_dataset, batch_size=dev_batch_size, shuffle=False,collate_fn=dev_dataset.batch_data_pro)
model = MyModel(embedding_num,hidden_num,corpus_num,bi,class_num,word_2_index["<PAD>"])
model = model.to(device)
opt = torch.optim.Adam(model.parameters(),lr = lr)
for e in range(epoch):
model.train()
for data , tag, da_len in train_dataloader:
loss = model.forward(data,da_len,tag)
loss.backward()
opt.step()
opt.zero_grad()
model.eval() # F1,准确率,精确率,召回率
for dev_data , dev_tag, dev_da_len in dev_dataloader:
test_loss = model.forward(dev_data,dev_da_len,dev_tag)
score = f1_score(dev_tag.reshape(-1).cpu().numpy(),model.pre.cpu().numpy(),average="micro")
print(score)
break
| 32.794118
| 130
| 0.63139
| 794
| 5,575
| 4.142317
| 0.201511
| 0.036485
| 0.033445
| 0.015202
| 0.209182
| 0.148373
| 0.09395
| 0.09395
| 0.067498
| 0.043174
| 0
| 0.013932
| 0.253274
| 5,575
| 169
| 131
| 32.988166
| 0.776123
| 0.010404
| 0
| 0.055118
| 0
| 0
| 0.019056
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 1
| 0.062992
| false
| 0
| 0.03937
| 0
| 0.173228
| 0.007874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cfbeee160a6e50ceb471701c99ace872cbfe2b
| 362
|
py
|
Python
|
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:15:25.000Z
|
2019-08-28T23:15:25.000Z
|
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
leetcode/409.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode.com/problems/longest-palindrome
problem: 问用s中字符组成的最长回文串长度
solution: map 记录字符出现次数
"""
class Solution:
def longestPalindrome(self, s: str) -> int:
m, res = collections.defaultdict(int), 0
for x in s:
m[x] += 1
for x in m:
res += m[x] // 2 * 2
return min(len(s), res + 1)
| 18.1
| 54
| 0.558011
| 48
| 362
| 4.208333
| 0.666667
| 0.039604
| 0.059406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01992
| 0.30663
| 362
| 19
| 55
| 19.052632
| 0.784861
| 0.290055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32cfc631e8d4a50ff93f3a9a349602c8342fb97a
| 847
|
py
|
Python
|
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | 1
|
2016-08-10T12:20:58.000Z
|
2016-08-10T12:20:58.000Z
|
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | null | null | null |
nickenbot/config.py
|
brlafreniere/nickenbot
|
f13ec78057ec25823eb16df6ffab3a32eddfd3ca
|
[
"MIT"
] | null | null | null |
import yaml
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.realpath(os.path.join(current_dir, ".."))
class ConfigManager:
network = None
config = None
@classmethod
def load(clss):
if clss.network:
config_filepath = os.path.join(project_dir, 'config/%s.config.yaml' % clss.network)
else:
config_filepath = os.path.join(project_dir, 'config/config.yaml')
config_file = open(config_filepath, 'r')
config_yaml = config_file.read()
clss.config = yaml.load(config_yaml)
@classmethod
def get(clss, key):
if not clss.config:
clss.load()
if not clss.config:
print("Configuration not found. Exiting.")
sys.exit(1)
return clss.config[key]
| 27.322581
| 95
| 0.615112
| 106
| 847
| 4.764151
| 0.358491
| 0.071287
| 0.059406
| 0.079208
| 0.158416
| 0.158416
| 0.158416
| 0.158416
| 0
| 0
| 0
| 0.001621
| 0.271547
| 847
| 30
| 96
| 28.233333
| 0.816856
| 0
| 0
| 0.16
| 0
| 0
| 0.088548
| 0.024793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.36
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32d046c8c2ed3ece0b08aa280a40083f8b7d16ab
| 2,277
|
py
|
Python
|
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
qna/views.py
|
channprj/KU-PL
|
7fc3719b612a819ed1bd443695d7f13f509ee596
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.utils import timezone
from .forms import QuestionForm
from .forms import AnswerForm
from .models import Question
from .models import Answer
def question_list(request):
questions = Question.objects.filter(created_date__lte = timezone.now()).order_by('-updated_date')
return render(request, 'qna/question_list.htm', {'questions': questions})
def question_detail(request, pk):
question = get_object_or_404(Question, pk=pk)
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
answer.question = question
answer.user = request.user
answer.updated_date = timezone.now()
answer.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = AnswerForm()
return render(request, 'qna/question_detail.htm', {'question': question, 'form': form})
def question_new(request):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.updated_date = timezone.now()
question.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = QuestionForm()
return render(request, 'qna/question_edit.htm', {'form': form})
def question_edit(request, pk):
question = get_object_or_404(Question, pk=pk)
if request.method == "POST":
form = QuestionForm(request.POST, instance=question)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.updated_date = timezone.now()
question.save()
return redirect('qna.views.question_detail', pk=question.pk)
else:
form = QuestionForm(instance=question)
return render(request, 'qna/question_edit.htm', {'form': form})
def question_remove(request, pk):
question = get_object_or_404(Question, pk=pk)
question.delete()
return redirect('qna.views.question_list')
| 35.030769
| 101
| 0.665349
| 272
| 2,277
| 5.4375
| 0.194853
| 0.047329
| 0.02975
| 0.037863
| 0.592292
| 0.53144
| 0.515213
| 0.515213
| 0.46856
| 0.46856
| 0
| 0.00678
| 0.222661
| 2,277
| 64
| 102
| 35.578125
| 0.828814
| 0
| 0
| 0.462963
| 0
| 0
| 0.104569
| 0.080844
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.148148
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32d33f3c862ddf8043ee8ce09e1a526264e7c51a
| 1,648
|
py
|
Python
|
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | null | null | null |
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | null | null | null |
python/tests/test_oci.py
|
miku/labe
|
2d784f418e24ab6fef9f76791c9fdd02dd505657
|
[
"MIT"
] | 1
|
2021-09-16T10:51:00.000Z
|
2021-09-16T10:51:00.000Z
|
"""
Unit tests for labe. Most not mocked yet, hence slow.
"""
import collections
import socket
import pytest
import requests
from labe.oci import get_figshare_download_link, get_terminal_url
def no_internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return False
except socket.error as ex:
return True
@pytest.mark.skipif(no_internet(), reason="no internet")
def test_get_redirct_url():
with pytest.raises(requests.exceptions.MissingSchema):
get_terminal_url("undefined")
assert get_terminal_url("https://google.com") == "https://www.google.com/"
assert get_terminal_url("http://google.com") == "https://www.google.com/?gws_rd=ssl"
assert (get_terminal_url("https://doi.org/10.1111/icad.12417") ==
"https://onlinelibrary.wiley.com/doi/10.1111/icad.12417")
@pytest.mark.skipif(no_internet(), reason="no internet")
def test_get_figshare_download_link():
Case = collections.namedtuple("Case", "link result")
cases = (
Case(
"https://doi.org/10.6084/m9.figshare.6741422.v11",
"https://figshare.com/ndownloader/articles/6741422/versions/11",
),
Case(
"https://doi.org/10.6084/m9.figshare.6741422.v7",
"https://figshare.com/ndownloader/articles/6741422/versions/7",
),
)
for c in cases:
assert get_figshare_download_link(c.link) == c.result
| 30.518519
| 88
| 0.662621
| 221
| 1,648
| 4.809955
| 0.434389
| 0.011289
| 0.065851
| 0.064911
| 0.374412
| 0.312324
| 0.263405
| 0.169332
| 0.169332
| 0.097836
| 0
| 0.062126
| 0.18932
| 1,648
| 53
| 89
| 31.09434
| 0.733533
| 0.086772
| 0
| 0.171429
| 0
| 0
| 0.303256
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.085714
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32d559b8ce0d7d1c7f26302620ef00f9255a82dc
| 26,404
|
py
|
Python
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/bdf/cards/test/test_dynamic.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
"""tests dynamic cards and dynamic load cards"""
import unittest
from io import StringIO
import numpy as np
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf, CrossReferenceError
from pyNastran.bdf.cards.test.utils import save_load_deck
#ROOT_PATH = pyNastran.__path__[0]
class TestDynamic(unittest.TestCase):
"""
The cards tested are:
* TSTEP
"""
def test_tstep(self):
"""tests a TSTEP card"""
model = BDF(debug=None)
sid = 42
n1 = n2 = 5
dt1 = dt2 = 0.1
no1 = no2 = 3
card = ['TSTEP', sid,
n1, dt1, no1, None, None, None, None, None,
n2, dt2, no2]
model.add_card(card, card[0], comment='tstep comment')
model.validate()
tstep = model.tsteps[42]
tstep.raw_fields()
tstep.write_card()
tstep.write_card(size=16)
sid = 43
N = 5
DT = 0.1
NO = 3
tstep2 = model.add_tstep(sid, N, DT, NO)
tstep2.raw_fields()
tstep2.write_card()
tstep2.write_card(size=16)
save_load_deck(model)
def test_tstepnl(self):
"""tests a TSTEPNL card"""
model = BDF(debug=None)
card = ['TSTEPNL', 250, 100, .01, 1, 'ADAPT', 2, 10, 'PW',
1.E-2, 1.E-3, 1.E-6, 2, 10, 2, .02, None,
5, 5, 0, 0.75, 16.0, 0.1, 20.,]
model.add_card(card, card[0], comment='tstepnl comment')
model.validate()
tstepnl = model.tstepnls[250]
tstepnl.raw_fields()
tstepnl.write_card()
tstepnl.write_card(size=16)
sid = 42
ndt = 10
dt = 3.
no = 5
tstepnl2 = model.add_tstepnl(sid, ndt, dt, no)
tstepnl2.raw_fields()
tstepnl2.write_card()
tstepnl2.write_card(size=16)
save_load_deck(model)
def test_delay(self):
"""tests a two field DELAY card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DELAY', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.delays[42])
save_load_deck(model)
def test_dphase(self):
"""tests a two field DPHASE card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.dphases[42])
save_load_deck(model)
def test_freq(self):
"""tests FREQ, FREQ1, FREQ2, FREQ4"""
model = BDF(debug=False)
sid = 101
freqs = 0.1
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
freqs = [2.0, 3.0]
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
f1 = 0.
df = 2.0
freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')
assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs
#print(freq1)
f1 = 1.
f2 = 8.0
freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')
assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs
assert np.allclose(freq2.freqs.max(), f2), freq2.freqs
#print(freq2)
freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4')
#print(model.frequencies[sid])
#print(freq4)
fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
freq5 = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
fractions = np.linspace(0., 1.)
unused_freq5b = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
model.validate()
freq.raw_fields()
freq.write_card()
freq.write_card(size=16)
freq1.raw_fields()
freq1.write_card()
freq1.write_card(size=16)
freq2.raw_fields()
freq2.write_card()
freq2.write_card(size=16)
freq4.raw_fields()
freq4.write_card()
freq4.write_card(size=16)
freq5.raw_fields()
freq5.write_card()
freq5.write_card(size=16)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
save_load_deck(model)
def test_tload(self):
"""tests DLOAD, TLOAD1, TLOAD2, TABLED2 cards"""
model = BDF(debug=False)
model.set_error_storage(nparse_errors=0, stop_on_parsing_error=True,
nxref_errors=0, stop_on_xref_error=True)
sid = 2
excite_id = 20
delay = 0
tid = 42
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='LOAD',
us0=0.0, vs0=0.0, comment='tload1')
tload1 = model.add_tload1(sid, excite_id, tid, delay=1., Type='DISP',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=2, Type='VELO',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='ACC',
us0=0.0, vs0=0.0, comment='')
nid = 100
model.add_grid(nid, [0., 0., 0.])
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
sid = 3
excite_id = 30
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='LOAD',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='tload2')
tload2 = model.add_tload2(sid, excite_id, delay=1., Type='D',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=2, Type='V',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='A',
T1=0., T2=1., frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
tload1.validate()
tload1.raw_fields()
tload1.write_card()
tload1.write_card(size=16)
tload2.validate()
tload2.raw_fields()
tload2.write_card()
tload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
time = 0.5
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
#print(out1)
assert len(out1) == 1, out1
assert len(out2) == 1, out2
#print(out1)
#print(out2)
time = [0.5, 0.9]
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
assert len(out1) == 2, out1
assert len(out2) == 2, out2
#print(out1)
#print(out2)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_rload(self):
"""tests DLOAD, RLOAD1, RLOAD2, TABLED2 cards"""
model = BDF(debug=False)
#model.case_control_deck = CaseControlDeck(['DLOAD=2', 'BEGIN BULK'])
sid = 2
excite_id = 20
delay = 0
tid = 42
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='LOAD', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=1., dphase=0, tc=0,
td=0, Type='DISP', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=2, dphase=0, tc=0,
td=0, Type='VELO', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='ACC', comment='rload1')
sid = 3
excite_id = 30
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='LOAD', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=1., dphase=0, tb=0,
tp=0, Type='D', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=2, dphase=0, tb=0,
tp=0, Type='V', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='A', comment='rload2')
excite_id = 20
nid = 21
c = 1
scale = 1.0
model.add_darea(excite_id, nid, c, scale, comment='darea')
model.add_grid(nid, [0., 0., 0.])
excite_id = 30
model.add_darea(excite_id, nid, c, scale, comment='darea')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
rload1.validate()
rload1.raw_fields()
rload1.write_card()
rload1.write_card(size=16)
rload2.validate()
rload2.raw_fields()
rload2.write_card()
rload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
#print(model.dareas)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
freq = 0.5
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_time(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 1, out1
#assert len(out2) == 1, out2
freq = [0.5, 0.9]
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_freq(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 2, out1
#assert len(out2) == 2, out2
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_ascre(self):
"""tests ASCRE, DELAY, DPHASE, TABLED2"""
model = BDF(debug=False)
sid = 1
excite_id = 2
rho = 1.0
b = 2.0
acsrce = model.add_acsrce(sid, excite_id, rho, b, delay=0, dphase=0, power=0,
comment='acsrce')
acsrce.raw_fields()
sid = 3
excite_id = 4
rho = 1.0
b = 2.0
delay = 3
dphase = 4
power = 5
unused_acsrce2 = model.add_acsrce(sid, excite_id, rho, b, delay=delay,
dphase=dphase, power=power)
nodes = 4
components = 5
delays = 6.0
delay = model.add_delay(sid, nodes, components, delays, comment='')
nodes = 4
components = 6
phase_leads = 2.0
delay = model.add_dphase(sid, nodes, components, phase_leads)
tid = power
x1 = 1.
x = np.linspace(0., 1.) + 10.
y = np.sin(x) + 2.
model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.add_grid(4, [0., 0., 0.])
model.validate()
model.pop_parse_errors()
model.cross_reference()
model.pop_xref_errors()
save_load_deck(model, run_convert=False)
def test_nlparm(self):
"""tests NLPARM"""
model = BDF(debug=False)
nlparm_id = 42
model.add_nlparm(nlparm_id, comment='nlparm')
save_load_deck(model)
def test_nlpci(self):
"""tests NLPCI"""
model = BDF(debug=False)
nlpci_id = 42
nlpci = model.add_nlpci(nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20,
comment='nlpci')
nlpci.raw_fields()
#print(nlpci)
save_load_deck(model)
#def test_rotord(self):
#"""tests ROTORD"""
#model = BDF(debug=False)
#sid = 42
#rstart = 10.0
#rstep = 11.0
#numstep = 10
#rids = []
#rsets = [31]
#rspeeds = [None]
#rcords = []
#w3s = []
#w4s = []
#rforces = []
#brgsets = []
#rotord = model.add_rotord(
#sid, rstart, rstep, numstep,
#rids, rsets, rspeeds, rcords, w3s, w4s, rforces, brgsets,
#refsys='ROT', cmout=0.0, runit='RPM',
#funit='RPM', zstein='NO', orbeps=1.e-6,
#roprt=0, sync=1, etype=1, eorder=1.0,
#threshold=0.02, maxiter=10, comment='rotord')
#rotord.validate()
#save_load_deck(model)
def test_loadcyn(self):
"""tests LOADCYN"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
scale = 4.
segment_id = 10
scales = [1.]
load_ids = [3]
loadcyn = model.add_loadcyn(sid, scale, segment_id, scales, load_ids,
segment_type=None, comment='loadcyn')
loadcyn.validate()
model.pop_parse_errors()
card = loadcyn.write_card(size=8)
loadcyn.write_card(size=16, is_double=False)
loadcyn.write_card(size=16, is_double=True)
loadcyn.raw_fields()
str(loadcyn)
#print(model.loads)
model.loads = {}
model.add_card(card.split('\n')[1:], 'LOADCYN', comment='', is_list=False, has_none=True)
model.cross_reference()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model, run_convert=False)
def test_deform(self):
"""tests DEFORM"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
eid = 10
deformation = 32.
deform = model.add_deform(sid, eid, deformation, comment='deform')
deform.validate()
model.pop_parse_errors()
card = deform.write_card(size=8)
deform.write_card(size=16, is_double=False)
deform.write_card(size=16, is_double=True)
deform.raw_fields()
str(deform)
model.loads = {}
model.add_card(card.split('\n')[1:], 'DEFORM', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
model.safe_cross_reference()
delta = 0.1
eid1 = 11
eid2 = 12
eid3 = 13
fields = ['DEFORM', sid, eid1, delta, eid2, delta, eid3, delta]
model.add_card(fields, 'DEFORM')
eid = 10
nids = [2, 3]
mid = 100
model.add_grid(2, [0., 0., 0.])
model.add_grid(3, [1., 0., 0.])
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
model.add_conrod(eid, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid1, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid2, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.add_conrod(eid3, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment='')
model.cross_reference()
save_load_deck(model)
def test_rforce(self):
"""tests RFORCE"""
model = BDF(debug=False, log=None, mode='msc')
#model._nxref_errors = 0
sid = 42
nid = 2
cid = 1
scale = 2.
r123 = [0., 1., 2.]
rforce = model.add_rforce(sid, nid, scale, r123, cid=cid,
method=1, racc=0., mb=0, idrf=0, comment='rforce')
rforce.validate()
card = rforce.write_card(size=8)
rforce.write_card(size=16, is_double=False)
rforce.write_card(size=16, is_double=True)
rforce.raw_fields()
str(rforce)
model.loads = {}
model.add_card(card.split('\n')[1:], 'RFORCE', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
with self.assertRaises(KeyError):
model.safe_cross_reference()
model.reset_errors()
model.add_grid(2, [0., 0., 0.])
model.add_cord2r(cid, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.], rid=0, comment='')
model.cross_reference()
save_load_deck(model, run_convert=False)
def test_rforce1(self):
"""tests RFORCE1"""
model = BDF(debug=False, log=None, mode='msc')
sid = 42
nid = 2
scale = 2.
#r123 = None
group_id = -4
cid = 1
rforce1 = model.add_rforce1(sid, nid, scale, group_id, cid=cid, r123=None,
racc=0., mb=0, method=2, comment='rforce1')
rforce1.validate()
rforce1b = model.add_rforce1(sid, nid, scale, group_id, cid=0, r123=[1., 2., 3.],
racc=0., mb=0, method=2, comment='rforce1')
rforce1b.validate()
model.pop_parse_errors()
card = rforce1.write_card(size=8)
rforce1.write_card(size=16, is_double=False)
rforce1.write_card(size=16, is_double=True)
rforce1.raw_fields()
str(rforce1)
model.loads = {}
model.add_card(card.split('\n')[1:], 'RFORCE1', comment='', is_list=False, has_none=True)
model.pop_parse_errors()
with self.assertRaises(CrossReferenceError):
model.cross_reference()
with self.assertRaises(CrossReferenceError):
model.pop_xref_errors()
model.uncross_reference()
model.reset_errors()
with self.assertRaises(KeyError):
model.safe_cross_reference()
model.reset_errors()
model.add_grid(2, [0., 0., 0.])
model.add_cord2r(cid, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.], rid=0, comment='')
model.cross_reference()
save_load_deck(model, run_convert=False)
def _test_dynamic1(self):
"""
xref test for:
- DLOAD -> DAREA -> NID
DLOAD take priority
useful for dynamic nodal forces/disp/vel/acc
"""
msg = """
SOL 108
CEND
SUBCASE 1
DLOAD = 33
DISP(PLOT) = ALL
BEGIN BULK
$DLOAD SID S S1 L1 S2 L2
DLOAD, 33, 1.0, 1.0, 35, 1.0, 36
$RLOAD1 SID EXCITEID DELAY DPHASE TC TD TYPE
RLOAD1, 35, 29, 0.2, 5.0, 40, 0.0, 0
RLOAD1, 36, 29, 31, 32, 4.0, 41, 0
$DAREA SID GRID COMP SCALE
DAREA, 29, 30, 1, 5.2
$DELAY SID GRID COMP LAG
DELAY, 31, 30, 1, 0.2
$DPHASE SID GRID COMP ANGLE
DPHASE, 32, 30, 1, 5.0
$TABLED1 TID XAXIS YAXIS
$ x1 y1 x2 y2 x3 y3 x4 y4
TABLED1, 40, LINEAR, LINEAR
,0.0, 4.0, 2.0, 8.0, 6.0, 8.0, ENDT
TABLED1, 41, LINEAR, LINEAR
,0.0, 0.5, 0.6, 0.4, 0.8, 0.7, ENDT
GRID,30
"""
model = BDF(debug=False)
bdf_file = StringIO()
bdf_file.write(msg)
bdf_file.seek(0)
model.read_bdf(bdf_file)
#In the example:
# * The DLOAD case control command selects the loading reference
# by the DLOAD bulk entry having SID = 33 as the dynamic
# loading for the analysis.
# * The DLOAD bulk entry combines the dynamic loads defined by
# two RLOAD1 entries having SIDs of 35 and 36. Neither dynamic
# load is scaled using the DLOAD entry.
# * Both RLOAD1 entries reference the same DAREA entry. Thus,
# both dynamic loads are applied to the same degree-of-freedom.
# In this example, it is a single degree-of-freedom, Component 1
# of Grid 30. Both dynamic loads are scaled 5.2 times by the
# DAREA entry.
# * Because the dynamic loads are applied at only one
# degree-of-freedom, the time delay and phase angle can be
# defined directly on the RLOAD1 entries. This is the case
# for the RLOAD1 entry having SID = 35. However, for
# demonstration purposes, the RLOAD1 entry having SID = 36
# references DELAY and DPHASE bulk entries. Both approaches
# define a delay of 0.2 and a phase angle of 5.0 for the
# corresponding dynamic load.
# * C(f) for the RLOAD1 entry having SID = 35 is defined by the
# TABLED1 entry having TID = 40. (See Figure 6-6.) D(f) for
# this same RLOAD1 entry is defined as zero.
# * C(f) for the RLOAD1 entry having SID = 36 is a constant
# value of 4.0. D(f) for this same RLOAD entry is defined by
# the TABLED1 entry having TID = 41.
def _test_dynamic2(self):
"""
xref test for:
- LOADSET -> LSEQ -> FORCE, PLOAD
- DLOAD -> RLOAD1 -> TABLED1
LOADSET take priority
useful for generalized dynamic forces/disp/vel/acc
"""
msg = """
SOL 108
CEND
SUBCASE 1
LOADSET = 27
DLOAD = 25
DISP(PLOT) = ALL
BEGIN BULK
$LSEQ SID EXCITEID LID
LSEQ, 27, 28, 26
$RLOAD1 SID EXCITEID DELAY DPHASE TC TD
RLOAD1, 25, 28, 0.0, 10.0, 29
$FORCE SID GRID CID F N1 N2 N3
FORCE, 26, 425, , 2.5, 1.0
$PLOAD SID PRES GRID1 GRID2 GRID3 GRID4
PLOAD, 26, 50.0, 63, 64, 88, 91
$TABLED1 TID XAXIS YAXIS
$ x1 y1 x2 y2 x3 y3 x4 y4
TABLED1, 29, LINEAR, LINEAR
,0.0, 0.5, 0.6, 0.4, 0.8, 0.7, ENDT
"""
model = BDF(debug=False)
bdf_file = StringIO()
bdf_file.write(msg)
bdf_file.seek(0)
model.read_bdf(bdf_file)
#In the example:
# * The LOADSET request in case control selects the LSEQ entry
# having SID = 27.
# * The LSEQ entry references the static loads having SID = 26.
# These loads include the FORCE and PLOAD entries. The FORCE
# and PLOAD entries provide the spatial distribution of the
# dynamic loading.
# * The DLOAD request in case control selects the RLOAD1 entry
# having SID = 25.
# * The RLOAD1 entry references a TABLED1 entry having TID = 29.
# This TABLED1 entry defines C(f) for the RLOAD1 entry. Because
# the TD field on the RLOAD1 entry is undefined, D(f) defaults
# to zero.
# * The EXCITEID fields of the LSEQ and RLOAD1 entries are both
# 28, thereby linking the temporal and spatial distributions of
# the dynamic loading. Thus, the dynamic load defined by the
# RLOAD1 entry is:
# o Scaled by 2.5 and applied as a force to Component 1 of Grid 425.
# o Scaled by 50.0 and applied as a pressure to the quadrilateral
# element face defined by Grids 63, 64, 88, and 91.
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 34.069677
| 97
| 0.549879
| 3,542
| 26,404
| 3.966403
| 0.121683
| 0.042708
| 0.028685
| 0.028828
| 0.581821
| 0.549434
| 0.52623
| 0.473486
| 0.451491
| 0.419105
| 0
| 0.068243
| 0.325708
| 26,404
| 774
| 98
| 34.113695
| 0.720849
| 0.15581
| 0
| 0.532491
| 0
| 0.00722
| 0.067482
| 0
| 0
| 0
| 0
| 0
| 0.030686
| 1
| 0.028881
| false
| 0
| 0.01083
| 0
| 0.041516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32d6f22794e1af28d1b004461271504fb7680002
| 4,691
|
py
|
Python
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 242
|
2015-08-13T06:38:10.000Z
|
2022-03-17T13:49:56.000Z
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 44
|
2018-04-08T07:12:02.000Z
|
2022-03-04T06:15:01.000Z
|
src/kv/benchmark/runbench.py
|
showapicxt/iowow
|
a29ac5b28f1b6c2817061c2a43b7222176458876
|
[
"MIT"
] | 18
|
2016-01-14T09:50:34.000Z
|
2022-01-26T23:07:40.000Z
|
import subprocess
import argparse
import os
import random
from collections import OrderedDict
from parse import parse
from bokeh.io import export_png
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.layouts import gridplot
from bokeh.embed import components
parser = argparse.ArgumentParser(description='IWKV Benchmarks')
parser.add_argument(
'-b', '--basedir', help='Base directory with benchmark executables', default='.', nargs='?')
args = parser.parse_args()
basedir = os.path.abspath(args.basedir)
print('Base directory:', basedir)
benchmarks = [
'iwkv',
'lmdb',
'bdb',
'wiredtiger',
'kyc',
'tc'
#'leveldb'
]
runs = []
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 2853624176, 'sizestats': True}
for n in (int(1e6),)
for vz in (1000,)]
runs += [{'b': 'fillrandom2,readrandom,deleterandom', 'n': n, 'vz': vz, 'kz': kz, 'rs': 2105940112}
for n in (int(2e6),)
for vz in (40, 400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillseq,overwrite,deleteseq', 'n': n, 'kz': kz, 'rs': 570078848}
for n in (int(2e6),)
for vz in (400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillrandom2,readrandom,readseq,readreverse', 'n': n, 'vz': vz, 'rs': 1513135152}
for n in (int(10e6),)
for vz in (200,)]
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 3434783568}
for n in (int(10e3),)
for vz in ((200 * 1024),)]
results = OrderedDict()
def fill_result(bm, run, sizestats, line):
key = ' '.join(['-{} {}'.format(a, v) for a, v in run.items()])
if key not in results:
results[key] = OrderedDict()
if bm not in results[key]:
results[key][bm] = OrderedDict()
res = results[key][bm]
pval = parse('done: {} in {}', line)
if sizestats:
pval = parse('db size: {} ({})', line)
if pval and 'db size' not in res:
print(line, flush=True)
res['db size'] = int(pval[0]) / (1024 * 1024)
elif pval:
print(line, flush=True)
res[pval[0]] = int(pval[1])
def run_benchmark_run(bm, run):
args = ['{}/{}_benchmark'.format(basedir, bm)]
sizestats = False
for a, v in run.items():
if a in ('sizestats',):
sizestats = True
continue
args.append('-{}'.format(a))
args.append(str(v))
print('Run {}'.format(' '.join(args)), flush=True)
with subprocess.Popen(args,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=basedir,
bufsize=1) as output:
for line in output.stdout:
fill_result(bm, run, sizestats, line.strip())
output.wait()
def run_benchmark(bm):
for run in runs:
run_benchmark_run(bm, run)
def run():
for b in benchmarks:
run_benchmark(b)
def main():
run()
plots = []
palette = ["#00B377", "#e84d60", "#0054AE", "#c9d9d3",
"#BFF500", "#555555", "#DFBFFF", "#B1D28F",
"#FFAA00", "#A18353", "#888888", "#718dbf"]
for bn, rmap in results.items():
pfactors = None
x = [(bm, brun) for bm in iter(rmap) for brun in iter(rmap[bm])]
if len([v for v in x if v[1] == 'db size']):
sizestats = True
else:
sizestats = False
if pfactors is None:
pfactors = [f[1] for f in x]
counts = [rmap[bm][brun]
for bm in iter(rmap) for brun in iter(rmap[bm])]
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(x_range=FactorRange(*x), plot_height=350, plot_width=750,
title=bn) # y_axis_type="log"
p.vbar(x='x', top='counts', width=0.9, source=source, line_color='white',
fill_color=factor_cmap('x', palette=palette, factors=pfactors, start=1, end=2))
p.y_range.start = 0
p.yaxis.axis_label = 'Time ms' if not sizestats else 'Database file size (MB)'
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar_location = None
plots.append(p)
os.makedirs("charts", exist_ok=True)
export_png(p, filename="charts/{}.png".format(bn))
p.toolbar_location = "right"
grid = gridplot(plots, ncols=1, merge_tools=False)
output_file('benchmark_results_raw.html')
save(grid)
show(grid)
if __name__ == '__main__':
main()
| 31.273333
| 99
| 0.568322
| 610
| 4,691
| 4.293443
| 0.337705
| 0.020619
| 0.011455
| 0.017182
| 0.148148
| 0.113784
| 0.092402
| 0.07942
| 0.030546
| 0.030546
| 0
| 0.050723
| 0.277126
| 4,691
| 149
| 100
| 31.483221
| 0.721616
| 0.005756
| 0
| 0.080645
| 0
| 0
| 0.118833
| 0.027885
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040323
| false
| 0
| 0.096774
| 0
| 0.137097
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32db89f97cc25f33ad056f8860c98d1fafd8baab
| 2,652
|
py
|
Python
|
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
chapt05/triangle.py
|
ohlogic/PythonOpenGLSuperBible4Glut
|
a0d01caaeb811002c191c28210268b5fcbb8b379
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Demonstrates OpenGL color triangle
# Ben Smith
# benjamin.coder.smith@gmail.com
#
# based heavily on ccube.cpp
# OpenGL SuperBible
# Program by Richard S. Wright Jr.
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
ESCAPE = b'\033'
xRot = 0.0
yRot = 0.0
def InitGL(Width, Height):
# Black background
glClearColor(0.0, 0.0, 0.0, 1.0)
# Called to draw scene
def DrawGLScene():
# Clear the window with current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Enable smooth shading
glShadeModel(GL_SMOOTH)
# Draw the triangle
glBegin(GL_TRIANGLES)
# Red Apex
glColor3ub(255,0,0)
glVertex3f(0.0,200.0,0.0)
# Green on the right bottom corner
glColor3ub(0,255,0)
glVertex3f(200.0,-70.0,0.0)
# Blue on the left bottom corner
glColor3ub(0,0,255)
glVertex3f(-200.0, -70.0, 0.0)
glEnd()
glutSwapBuffers()
def ReSizeGLScene(w, h):
# Prevent a divide by zero
if(h == 0):
h = 1
# Set Viewport to window dimensions
glViewport(0, 0, w, h)
# Reset coordinate system
glLoadIdentity()
# Window is higher than wide
if w <= h:
windowHeight = 250.0 * h / w
windowWidth = 250.0
else:
#window wider than high
windowWidth = 250.0 * w/h
windowHeight = 250.0
# Set the clipping volume
glOrtho(-windowWidth, windowWidth, -windowHeight, windowHeight, 1.0, -1.0)
def keyPressed(key, x, y):
if key == ESCAPE:
glutDestroyWindow(window)
sys.exit()
# Main program entry point
if __name__ == '__main__':
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("RGB Triangle")
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
#glutIdleFunc(DrawGLScene)
#glutTimerFunc( int(1.0/60.0), update, 0)
glutReshapeFunc(ReSizeGLScene)
glutKeyboardFunc(keyPressed)
#glutSpecialFunc (specialkeyPressed);
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
| 21.737705
| 83
| 0.562217
| 294
| 2,652
| 5.003401
| 0.530612
| 0.024473
| 0.014276
| 0.008158
| 0.054385
| 0.025833
| 0.025833
| 0
| 0
| 0
| 0
| 0.062245
| 0.35181
| 2,652
| 121
| 84
| 21.917355
| 0.793485
| 0.286199
| 0
| 0
| 0
| 0
| 0.013857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32e2062c20d3f7d54552e963b99e3b7f219ffa2e
| 19,175
|
py
|
Python
|
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
ScreenTrainer.py
|
ZihaoChen0319/CMB-Segmentation
|
99c5788baacc280ca5dbe02f3e18403e399fb238
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import os
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import torch
import torch.nn.functional as nnf
import SimpleITK as sitk
import json
from scipy import ndimage
import medpy.io as mio
from Utils import find_binary_object
from MyDataloader import get_train_cases, get_cmbdataloader
from MyNetwork import ScreenNet
from MyLoss import FocalLoss
from PairwiseMeasures_modified import PairwiseMeasures
class ScreenTrainer(nn.Module):
def __init__(self, data_path, model_save_path, dataset_path, device='cuda', all_epoch=50,
fold=0, bbox=(20, 20, 16), batch_size=32, loss='ce',
optimizer='sgd', init_lr=1e-3, decay_exponent=0.9, config=None, if_test=False,
random_negatives=1e5, aug_num=10, add_fp=False,
resample_num=(10000, 10000, 10000), modality=('T1', 'T2', 'T2S')):
"""
Trainer of the Screening Network.
"""
super(ScreenTrainer, self).__init__()
self.bbox = bbox
self.batch_size = batch_size
self.init_lr = init_lr
self.decay_exponent = decay_exponent
self.all_epoch = all_epoch
self.config = config
self.resample_num = resample_num
self.modality = modality
self.aug_num = aug_num
self.fold = fold
self.random_negatives = random_negatives
# path define
self.data_path = data_path
self.dataset_path = dataset_path
self.model_name = model_save_path.split('/')[-2]
self.model_save_path = model_save_path + 'fold_%d/' % fold
if not os.path.exists(self.model_save_path):
os.makedirs(self.model_save_path)
# device
self.device = device
# load division of data
if os.path.exists(dataset_path + 'fold_division.json'):
with open(dataset_path + 'fold_division.json', mode='r') as f:
splits = json.load(f)
self.train_list_sub = splits[str(fold)]['train']
self.val_list_sub = splits[str(fold)]['val']
else:
self.train_list_sub = []
self.val_list_sub = []
print('Data division is empty!')
# training and validation samples
if not if_test:
self.dataset_name = 'fold_%d/bbox-%d-%d-%d_neg-%d_aug-%d/' % \
(fold, self.bbox[0], self.bbox[1], self.bbox[2], random_negatives, aug_num)
if not os.path.exists(dataset_path + self.dataset_name):
os.makedirs(dataset_path + self.dataset_name)
# load or generate the training samples
if os.path.exists(dataset_path + self.dataset_name + 'pos.json'):
with open(dataset_path + self.dataset_name + 'pos.json', mode='r') as f:
self.train_cases_pos = json.load(f)
if os.path.exists(dataset_path + self.dataset_name + 'neg.json'):
with open(dataset_path + self.dataset_name + 'neg.json', mode='r') as f:
self.train_cases_neg = json.load(f)
else:
self.train_cases_pos, self.train_cases_neg = get_train_cases(
data_path=self.data_path, train_list=self.train_list_sub, bbox=self.bbox, seed=2021,
if_translation=True, random_negatives=random_negatives, aug_num=aug_num)
with open(dataset_path + self.dataset_name + 'pos.json', mode='w') as f:
json.dump(self.train_cases_pos, f)
with open(dataset_path + self.dataset_name + 'neg.json', mode='w') as f:
json.dump(self.train_cases_neg, f)
# load false positive samples
self.train_cases_fp = []
if add_fp:
if os.path.exists(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name)):
with open(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='r') as f:
self.train_cases_fp = json.load(f)
print('Dataset: pos %d, neg %d, fp %d' %
(len(self.train_cases_pos), len(self.train_cases_neg), len(self.train_cases_fp)))
else:
self.train_cases_fp = []
self.train_cases_pos = []
self.train_cases_neg = []
# model
self.model = ScreenNet(is_fc=False, in_channel=len(modality), num_class=2)
self.model.to(self.device)
# loss function
if loss == 'ce':
self.loss_fc = nn.CrossEntropyLoss()
elif loss == 'weighted ce':
self.loss_fc = nn.CrossEntropyLoss(weight=torch.tensor([0.25, 0.75], device=device))
elif loss == 'focal loss':
self.loss_fc = FocalLoss(alpha=0.25, gamma=2, num_classes=2)
else:
raise ValueError('No such optimizer')
# optimizer
if optimizer == 'sgd':
self.optimizer = optim.SGD(self.model.parameters(), lr=init_lr, momentum=0.99, nesterov=True)
elif optimizer == 'adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=init_lr)
else:
raise ValueError('No such optimizer')
self.epoch = 1
self.lr = init_lr
self.train_metric = [0] * 3
self.test_metric = [0] * 4
def train_epoch(self):
self.model.train()
train_accum = [0] * 6
train_cases_fp = self.train_cases_fp.copy()
train_cases_pos = self.train_cases_pos.copy()
train_cases_neg = self.train_cases_neg.copy()
# randomly choose training samples, ensuring that the number of samples is fixed under different conditions
if len(self.resample_num):
train_cases_pos = np.random.choice(train_cases_pos, size=self.resample_num[0]).tolist()
train_cases_neg = np.random.choice(train_cases_neg, size=self.resample_num[1]).tolist()
if len(train_cases_fp):
train_cases_fp = np.random.choice(train_cases_fp, size=self.resample_num[2]).tolist()
data_list = train_cases_pos + train_cases_neg + train_cases_fp
dataloader = get_cmbdataloader(
data_path=self.data_path,
dataset_index=data_list,
bbox=self.bbox,
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2,
modality=self.modality
)
dataloader = tqdm(dataloader)
for img_batch, label_batch in dataloader:
img_batch = img_batch.to(self.device).float()
label_batch = label_batch.to(self.device)
pred_batch = self.model(img_batch)
loss = self.loss_fc(pred_batch, label_batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
y_hat = pred_batch.argmax(axis=1).detach().cpu().numpy()
y = label_batch.detach().cpu().numpy()
train_accum[0] += img_batch.shape[0]
train_accum[1] += loss.detach().cpu().numpy() * img_batch.shape[0]
train_accum[2] += np.sum(y_hat == y) # acc
train_accum[3] += np.sum((y_hat == 1) & (y == 1)) # tp
train_accum[4] += np.sum((y_hat == 1) & (y != 1)) # fp
train_accum[5] += np.sum((y_hat != 1) & (y == 1)) # fn
self.train_metric[0] = train_accum[1] / train_accum[0] # loss
self.train_metric[1] = train_accum[2] / train_accum[0] # acc
self.train_metric[2] = 2 * train_accum[3] / np.clip(2 * train_accum[3] + train_accum[4] + train_accum[5],
a_min=1e-5, a_max=1e10) # f1
dataloader.set_description('Epoch: %d, ' % self.epoch +
'train loss %.4f, ' % self.train_metric[0] +
'train acc %.4f, ' % self.train_metric[1] +
'train f1 %.4f, ' % self.train_metric[2])
return self.train_metric
def val_epoch(self):
self.model.eval()
test_accum = [0] * 6
for pat in self.val_list_sub:
data_list = []
for mod in self.modality:
data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))
img = np.stack(data_list, axis=0)
cmb, h = mio.load(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))
pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space = \
self.inference(img, patch_size=(160, 160, 80), thresh=0.1, size=2, if_nms=True)
pe = PairwiseMeasures(ref_img=cmb, seg_img=pred_init_space, analysis='microbleeds',
measures=('f1_score', 'tp', 'fn', 'fp'),
connectivity=3, pixdim=h.get_voxel_spacing(), empty=True,
threshold=0.5, thresh_assign=3)
tp, fn, fp = pe.m_dict['tp'][0](), pe.m_dict['fn'][0](), pe.m_dict['fp'][0]()
f1 = pe.m_dict['f1_score'][0]()
test_accum[0] += 1
test_accum[1] += tp
test_accum[2] += fn
test_accum[3] += fp
test_accum[4] += f1 if np.sum(cmb) else 0
test_accum[5] += 1 if np.sum(cmb) else 0
print('%s: TP %d, FN %d, FP %d, F1 %.4f' % (pat, tp, fn, fp, f1))
self.test_metric[0] = test_accum[1] # TP
self.test_metric[1] = test_accum[2] # FN
self.test_metric[2] = test_accum[3] / test_accum[0] # avg FP
self.test_metric[3] = test_accum[4] / test_accum[5] # avg F1
print('Epoch: %d, TP %d, FN %d, avg FP %.4f, avg F1 %.4f' %
(self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))
return self.test_metric
def get_fp(self, thresh=0.1, if_aug=False):
"""Obtain false positives by applying initial model on training data"""
print(' --- Obtaining FP --- ')
self.model.eval()
if if_aug:
if os.path.exists(self.dataset_path + 'fold_%d/fp_%s_current_aug.json' % (self.fold, self.model_name)):
with open(self.dataset_path + 'fold_%d/fp_%s_current_aug.json' % (self.fold, self.model_name), mode='r') as f:
fp = json.load(f)
with open(self.dataset_path + 'fold_%d/fp_%s_epoch-%d_aug.json' % (self.fold, self.model_name, self.epoch), mode='w') as f:
json.dump(fp, f)
else:
if os.path.exists(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name)):
with open(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='r') as f:
fp = json.load(f)
with open(self.dataset_path + 'fold_%d/fp_%s_epoch-%d.json' % (self.fold, self.model_name, self.epoch), mode='w') as f:
json.dump(fp, f)
aug_list = []
if if_aug:
for pat in self.train_list_sub:
for i in range(self.aug_num):
aug_list.append(pat + '_aug%d' % i)
fp_list = self.train_cases_fp if len(self.train_cases_fp) else []
loader = tqdm(self.train_list_sub + aug_list)
for pat in loader:
data_list = []
for mod in self.modality:
data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))
cmb = np.load(self.data_path + '%s/%s_space-T2S_CMB.npy' % (pat, pat), mmap_mode='r')
shape = cmb.shape
img = np.stack(data_list, axis=0)
pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space = \
self.inference(img, patch_size=(160, 160, 80), thresh=thresh, size=4)
for (x, y, z) in candidates_list:
if x > shape[0] - self.bbox[0] // 2 or x < self.bbox[0] // 2 or \
y > shape[1] - self.bbox[1] // 2 or y < self.bbox[1] // 2 or \
z > shape[2] - self.bbox[2] // 2 or z < self.bbox[2] // 2:
continue
if np.sum(cmb[x - 1:x + 1, y - 1:y + 1, z - 1:z + 1]):
sample = {'pat': pat, 'start': (x, y, z), 'have cmb': 1}
else:
sample = {'pat': pat, 'start': (x, y, z), 'have cmb': 0}
fp_list.append(sample)
loader.set_description('FP num: %d' % len(fp_list))
self.train_cases_fp = fp_list
with open(self.dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.model_name), mode='w') as f:
json.dump(fp_list, f)
print(' --- Finish, FP num: %d ---' % len(fp_list))
return fp_list
def adjust_lr(self):
"""Adjust the learning rate following ‘poly’ policy"""
self.lr = self.init_lr * (1 - self.epoch / self.all_epoch) ** self.decay_exponent
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def save_model(self, force=False):
"""Save the model every epoch(current) and every 5 epochs(epoch_xx)"""
state = {
'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'config': self.config,
}
torch.save(state, self.model_save_path + 'current.pth.tar')
if self.epoch % 5 == 0 or force:
torch.save(state, self.model_save_path + 'epoch_%d_%d_%d_%.4f_%.4f.pth.tar' %
(self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))
def load_model(self, model_name='current', silent=False):
all_saved_models = os.listdir(self.model_save_path)
matched_model = [model for model in all_saved_models if model.startswith(model_name)]
if len(matched_model) == 1:
checkpoint = torch.load(self.model_save_path + matched_model[0], map_location={'cuda:0': self.device})
self.epoch = checkpoint['epoch'] + 1
self.model.load_state_dict(checkpoint['state_dict'])
self.model.to(self.device)
# self.config = checkpoint['config']
self.adjust_lr()
elif len(matched_model) > 1:
raise ValueError('Too many matched models!')
if not silent:
print('Screen model: %s, device: %s, epoch: %d'
% (self.model_save_path + model_name, self.device, self.epoch))
def inference(self, data: np.ndarray, patch_size=None, thresh=0.5, size=2, if_nms=True):
if len(data.shape) == 3:
data = np.expand_dims(data, axis=0)
shape = [data.shape[1], data.shape[2], data.shape[3]]
# compute the output size and the patches location, exactly corresponding to the architecture of ScreenNet
if patch_size is None:
patch_size = shape
out_size = [(shape[0] - 8) // 2 - 5, (shape[1] - 8) // 2 - 5, (shape[2] - 4) // 2 - 5]
out_patch_size = [(patch_size[0] - 8) // 2 - 5, (patch_size[1] - 8) // 2 - 5, (patch_size[2] - 4) // 2 - 5]
# print(data.shape, out_size, out_patch_size)
num_xyz = [out_size[i] // out_patch_size[i] for i in range(3)]
overlap_xyz = [((num_xyz[i] + 1) * out_patch_size[i] - out_size[i]) // num_xyz[i] for i in range(3)]
x_starts = [(out_patch_size[0] - overlap_xyz[0]) * n for n in range(num_xyz[0])]
x_starts.append(out_size[0] - out_patch_size[0])
y_starts = [(out_patch_size[1] - overlap_xyz[1]) * n for n in range(num_xyz[1])]
y_starts.append(out_size[1] - out_patch_size[1])
z_starts = [(out_patch_size[2] - overlap_xyz[2]) * n for n in range(num_xyz[2])]
z_starts.append(out_size[2] - out_patch_size[2])
out_starts = [(x, y, z) for z in z_starts for y in y_starts for x in x_starts]
starts = [(2*x, 2*y, 2*z) for (x, y, z) in out_starts]
# inference by sliding window strategy
pred = np.zeros(out_size)
overlap = np.zeros(out_size)
data = torch.tensor(data).float()
for st, out_st in zip(starts, out_starts):
data_patch = data[:, st[0]:st[0] + patch_size[0], st[1]:st[1] + patch_size[1], st[2]:st[2] + patch_size[2]]
data_patch = data_patch.to(self.device).unsqueeze(0)
pred_patch = self.model(data_patch).detach()
pred_patch = nnf.softmax(pred_patch, dim=1).squeeze()[1].detach().cpu().numpy()
pred[out_st[0]:out_st[0] + out_patch_size[0],
out_st[1]:out_st[1] + out_patch_size[1],
out_st[2]:out_st[2] + out_patch_size[2]] += pred_patch
overlap[out_st[0]:out_st[0] + out_patch_size[0],
out_st[1]:out_st[1] + out_patch_size[1],
out_st[2]:out_st[2] + out_patch_size[2]] += 1
pred /= overlap
pred_th = pred.copy()
pred_th[pred_th < thresh] = 0
if if_nms:
pred_itk = sitk.GetImageFromArray(pred_th)
pred_itk = sitk.RegionalMaxima(pred_itk)
pred_post = sitk.GetArrayFromImage(pred_itk)
labeled, n_obj = find_binary_object(pred_post)
maxima_list = ndimage.center_of_mass(labeled, labeled, range(1, n_obj+1))
else:
pred_post = pred_th.copy()
pred_post[pred_post >= thresh] = 1
labeled, n_obj = find_binary_object(pred_post)
maxima_list = ndimage.center_of_mass(labeled, labeled, range(1, n_obj + 1))
# find candidates
score_init_space = np.zeros(shape)
score_init_space[9:pred.shape[0] * 2 + 9, 9:pred.shape[1] * 2 + 9, 7:pred.shape[2] * 2 + 7] = \
nnf.interpolate(torch.tensor(pred, dtype=torch.float32).unsqueeze(0).unsqueeze(0),
scale_factor=2, mode='trilinear', align_corners=False).squeeze().numpy()
# map the results back to input volume space
pred_init_space = np.zeros(shape)
candidates_list = []
for (x, y, z) in maxima_list:
x = int(2 * x + 9)
y = int(2 * y + 9)
z = int(2 * z + 7)
if x < 0 or x >= shape[0] \
or y < 0 or y >= shape[1] \
or z < 0 or z >= shape[2]:
continue
pred_init_space[max(x-size//2, 0):min(x+size//2, shape[0]),
max(y-size//2, 0):min(y+size//2, shape[1]),
max(z-size//2, 0):min(z+size//2, shape[1])] = 1
candidates_list.append((x, y, z))
return pred, pred_post, n_obj, pred_init_space, candidates_list, score_init_space
| 50.460526
| 140
| 0.557445
| 2,672
| 19,175
| 3.783308
| 0.129117
| 0.034623
| 0.027698
| 0.014245
| 0.354536
| 0.28994
| 0.238204
| 0.210703
| 0.195865
| 0.176674
| 0
| 0.026951
| 0.313064
| 19,175
| 379
| 141
| 50.593668
| 0.74051
| 0.042451
| 0
| 0.13125
| 0
| 0.003125
| 0.059674
| 0.022831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.09375
| 0.021875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32e36a60281e09d72c79ad1807ea74035aa73e60
| 534
|
py
|
Python
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 65
|
2021-04-27T13:13:09.000Z
|
2022-01-24T00:26:06.000Z
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 22
|
2021-10-06T10:30:40.000Z
|
2021-12-10T11:36:55.000Z
|
examples/earthquakes/main.py
|
admariner/beneath
|
a6aa2c220e4a646be792379528ae673f4bef440b
|
[
"MIT"
] | 4
|
2021-04-24T15:29:51.000Z
|
2022-03-30T16:20:12.000Z
|
import beneath
from generators import earthquakes
with open("schemas/earthquake.graphql", "r") as file:
EARTHQUAKES_SCHEMA = file.read()
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True)
p.description = "Continually pings the USGS earthquake API"
earthquakes = p.generate(earthquakes.generate_earthquakes)
p.write_table(
earthquakes,
"earthquakes",
schema=EARTHQUAKES_SCHEMA,
description="Earthquakes fetched from https://earthquake.usgs.gov/",
)
p.main()
| 28.105263
| 76
| 0.700375
| 59
| 534
| 6.118644
| 0.59322
| 0.141274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196629
| 534
| 18
| 77
| 29.666667
| 0.841492
| 0
| 0
| 0
| 0
| 0
| 0.262172
| 0.048689
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32e3ce811bff9ec736c02ce8188ebe9e69d6a483
| 5,073
|
py
|
Python
|
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
examples/tf_vision/tensorflow_saved_model_service.py
|
siddharthgee/multi-model-server
|
bd795b402330b491edd5d2a235b8b8c2ef9fcb58
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
TensorflowSavedModelService defines an API for running a tensorflow saved model
"""
import json
import os
import tensorflow as tf
from model_handler import ModelHandler
class TensorflowSavedModelService(ModelHandler):
"""
TensorflowSavedModelService defines the fundamental loading model and inference
operations when serving a TF saved model. This is a base class and needs to be
inherited.
"""
def __init__(self):
super(TensorflowSavedModelService, self).__init__()
self.predictor = None
self.labels = None
self.signature = None
self.epoch = 0
# noinspection PyMethodMayBeStatic
def get_model_files_prefix(self, context):
return context.manifest["model"]["modelName"]
def initialize(self, context):
"""
Initialize model. This will be called during model loading time
:param context: Initial context contains model server system properties.
:return:
"""
super(TensorflowSavedModelService, self).initialize(context)
properties = context.system_properties
model_dir = properties.get("model_dir")
signature_file_path = os.path.join(model_dir, "signature.json")
if not os.path.isfile(signature_file_path):
raise RuntimeError("Missing signature.json file.")
with open(signature_file_path) as f:
self.signature = json.load(f)
#Define signature.json and work here
data_names = []
data_shapes = []
for input_data in self.signature["inputs"]:
data_name = input_data["data_name"]
data_shape = input_data["data_shape"]
# Replace 0 entry in data shape with 1 for binding executor.
for idx in range(len(data_shape)):
if data_shape[idx] == 0:
data_shape[idx] = 1
data_names.append(data_name)
data_shapes.append((data_name, tuple(data_shape)))
self.predictor = tf.contrib.predictor.from_saved_model(model_dir)
def inference(self, model_input):
"""
Internal inference methods for TF - saved model. Run forward computation and
return output.
:param model_input: list of dict of {name : numpy_array}
Batch of preprocessed inputs in tensor dict.
:return: list of dict of {name: numpy_array}
Batch of inference output tensor dict
"""
if self.error is not None:
return None
# Check input shape
check_input_shape(model_input, self.signature)
#Restricting to one request which contains the whole batch. Remove this line if adding custom batching support
model_input = model_input[0]
results = self.predictor(model_input)
return results
def check_input_shape(inputs, signature):
"""
Check input data shape consistency with signature.
Parameters
----------
inputs : List of dicts
Input data in this format [{input_name: input_tensor, input2_name: input2_tensor}, {...}]
signature : dict
Dictionary containing model signature.
"""
assert isinstance(inputs, list), 'Input data must be a list.'
for input_dict in inputs:
assert isinstance(input_dict, dict), 'Each request must be dict of input_name: input_tensor.'
assert len(input_dict) == len(signature["inputs"]), \
"Input number mismatches with " \
"signature. %d expected but got %d." \
% (len(signature['inputs']), len(input_dict))
for tensor_name, sig_input in zip(input_dict, signature["inputs"]):
assert len(input_dict[tensor_name].shape) == len(sig_input["data_shape"]), \
'Shape dimension of input %s mismatches with ' \
'signature. %d expected but got %d.' \
% (sig_input['data_name'],
len(sig_input['data_shape']),
len(input_dict[tensor_name].shape))
for idx in range(len(input_dict[tensor_name].shape)):
if idx != 0 and sig_input['data_shape'][idx] != 0:
assert sig_input['data_shape'][idx] == input_dict[tensor_name].shape[idx], \
'Input %s has different shape with ' \
'signature. %s expected but got %s.' \
% (sig_input['data_name'], sig_input['data_shape'],
input_dict[tensor_name].shape)
| 38.431818
| 118
| 0.640647
| 623
| 5,073
| 5.070626
| 0.317817
| 0.037037
| 0.026591
| 0.030073
| 0.120924
| 0.071225
| 0.045584
| 0.045584
| 0.020893
| 0
| 0
| 0.00487
| 0.271437
| 5,073
| 131
| 119
| 38.725191
| 0.849838
| 0.342795
| 0
| 0
| 0
| 0
| 0.147666
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 1
| 0.080645
| false
| 0
| 0.064516
| 0.016129
| 0.209677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32e861d95e4d1e621303b5ebac3624de50614805
| 4,007
|
py
|
Python
|
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
mazegen/solver.py
|
alekratz/mazegen
|
2799a5cf790cec4bab94a147315cc8541c5efec7
|
[
"MIT"
] | null | null | null |
import random
from typing import Optional
from .grid import *
class Solver:
def __init__(self, grid: Grid):
self._grid = grid
self._backtrack = []
self._pos = (0, 0)
self._dir = None
self._backtracking = False
self._branches = {
self._pos: set(self.valid_cells().keys()),
}
# Add entrance and exit
self.grid.cells[0][0].remove_wall(Wall.NORTH)
self.grid.cells[self.grid.height - 1][self.grid.width - 1].remove_wall(
Wall.EAST
)
@property
def is_done(self) -> bool:
return self.goal == self.pos
@property
def goal(self):
return (self.grid.width - 1, self.grid.height - 1)
@property
def grid(self):
return self._grid
@property
def pos(self):
return self._pos
@property
def cell(self):
x, y = self.pos
return self.grid.cells[y][x]
@property
def backtracking(self) -> bool:
return self._backtracking
def valid_cells(self):
"Gets the cells that are available to move into."
cell = self.cell
return {w: n for w, n in self.cell.neighbors().items() if w not in cell.walls}
def move(self, wall: Wall):
assert wall in self.valid_cells()
x, y = self.pos
if wall == Wall.NORTH:
y -= 1
elif wall == Wall.SOUTH:
y += 1
elif wall == Wall.EAST:
x += 1
elif wall == Wall.WEST:
x -= 1
else:
assert False
# Add this motion to the backtrack list
self._backtrack.append(self.pos)
self._pos = (x, y)
def step(self):
if self.is_done:
return
valid_cells = self.valid_cells()
# Register this branch if there are multiple targets to go to
if len(valid_cells) > 1 and self.pos not in self._branches:
self._branches[self.pos] = set(valid_cells.keys())
# Also, if we have backtrack positions available, disable the cell that we just came
# from.
if self._backtrack:
x1, y1 = self.pos
x2, y2 = self._backtrack[-1]
diff = (x2 - x1, y2 - y1)
if diff == (-1, 0):
wall = Wall.WEST
elif diff == (1, 0):
wall = Wall.EAST
elif diff == (0, -1):
wall = Wall.NORTH
elif diff == (0, 1):
wall = Wall.SOUTH
else:
assert False
self._branches[self.pos].remove(wall)
if self.pos in self._branches and self._branches[self.pos]:
# Choose a direction to move if we're at a branch
self._backtracking = False
self._dir = random.choice(list(self._branches[self.pos]))
self._branches[self.pos].remove(self._dir)
if self.backtracking:
# Set up for backtracking, but there's no backtrack left.
if not self._backtrack:
self._backtracking = False
self.step()
else:
self._pos = self._backtrack.pop()
else:
if self._dir not in valid_cells:
# Can't move this direction, try these options in this order:
# * Choose a random direction on this branch if we are on a branch,
# * Start backtracking
if self.pos in self._branches and self._branches[self.pos]:
self._dir = random.choice(list(self._branches[self.pos]))
self._branches[self.pos].remove(self._dir)
else:
self._backtracking = True
# TODO : prevent stack overflow where we have no backtrack available
assert self._backtrack
self.step()
return
self.move(self._dir)
| 32.056
| 96
| 0.520839
| 486
| 4,007
| 4.183128
| 0.236626
| 0.072307
| 0.078701
| 0.084112
| 0.201181
| 0.134776
| 0.117068
| 0.117068
| 0.117068
| 0.117068
| 0
| 0.01218
| 0.385326
| 4,007
| 124
| 97
| 32.314516
| 0.813236
| 0.143499
| 0
| 0.28866
| 0
| 0
| 0.013549
| 0
| 0
| 0
| 0
| 0.008065
| 0.041237
| 1
| 0.103093
| false
| 0
| 0.030928
| 0.051546
| 0.237113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32ea368fa5ba2732d1c51618d8edfc516b6eb773
| 1,224
|
py
|
Python
|
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
example/RunModel/Abaqus_Model_Example/process_odb.py
|
volpatto/UQpy
|
acbe1d6e655e98917f56b324f019881ea9ccca82
|
[
"MIT"
] | null | null | null |
from odbAccess import *
from abaqusConstants import *
from textRepr import *
import timeit
import numpy as np
import os
import sys
start_time = timeit.default_timer()
index = sys.argv[-1]
# print(index)
# index = float(index)
index = int(index)
# print(index)
odbFile = os.path.join(os.getcwd(), "single_element_simulation_" + str(index) + ".odb")
odb = openOdb(path=odbFile)
step1 = odb.steps.values()[0]
his_key = 'Element PART-1-1.1 Int Point 1 Section Point 1'
region = step1.historyRegions[his_key]
LE22 = region.historyOutputs['LE22'].data
S22 = region.historyOutputs['S22'].data
# t = np.array(LE22)[:, 0]
x = np.array(LE22)[:, 1]
y = np.array(S22)[:, 1]
fnm = os.path.join(os.getcwd(), 'Output', 'output_element_{0}.csv'.format(index))
if not os.path.exists(os.path.dirname(fnm)):
try:
os.makedirs(os.path.dirname(fnm))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
output_file = open(fnm, 'wb')
for k in range(len(x)):
output_file.write('%13.6e, %13.6e\n' % (x[k], y[k]))
output_file.close()
elapsed = timeit.default_timer() - start_time
print('Finished running odb_process_script. It took ' + str(elapsed) + ' s to run.')
| 27.818182
| 87
| 0.684641
| 190
| 1,224
| 4.326316
| 0.489474
| 0.036496
| 0.043796
| 0.029197
| 0.043796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031884
| 0.154412
| 1,224
| 43
| 88
| 28.465116
| 0.762319
| 0.081699
| 0
| 0
| 0
| 0
| 0.16458
| 0.042934
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.21875
| 0
| 0.21875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32eaa0a294af2308ff208fed9c050fd370b31fec
| 8,526
|
py
|
Python
|
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
analysis_methods/shuff_time.py
|
gbrookshire/simulated_rhythmic_sampling
|
5c9ed507847a75dbe38d10d78b54441ae83f5831
|
[
"MIT"
] | null | null | null |
"""
Tools to perform analyses by shuffling in time, as in Landau & Fries (2012) and
Fiebelkorn et al. (2013).
"""
import os
import yaml
import numpy as np
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
from .utils import avg_repeated_timepoints, dft
# Load the details of the behavioral studies
_pathname = os.path.dirname(os.path.abspath(__file__))
_behav_fname = os.path.join(_pathname, '../behav_details.yaml')
behav_details = yaml.safe_load(open(_behav_fname))
def landau(x, t, fs, k_perm):
"""
Analyze the data as in Landau & Fries (2012)
Parameters
----------
x : nd.array
Array of Hit (1) or Miss (0) for each trial
t : nd.array
Time-stamp (SOA) for each trial
Returns
-------
res : dict
The results of the randomization test as returned by
`time_shuffled_perm`, plus these items:
t : np.ndarray
The time-stamps of the individual trials
t_agg : np.ndarray
The time-steps for the aggregated accuracy time-series
x_agg : np.ndarray
The aggregated accuracy time-series
p_corr : np.ndarray
P-values corrected for multiple comparisons using Bonforroni
correction
"""
def landau_spectrum_trialwise(x_perm):
""" Helper to compute spectrum on shuffled data
"""
_, x_avg = avg_repeated_timepoints(t, x_perm)
f, y = landau_spectrum(x_avg, fs)
return f, y
# Compute the results
res = time_shuffled_perm(landau_spectrum_trialwise, x, k_perm)
res['t'] = t
res['t_agg'], res['x_agg'] = avg_repeated_timepoints(t, x)
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='bonferroni')
res['p_corr'] = p_corr
return res
def landau_spectrum(x, fs, detrend_ord=1):
"""
Get the spectrum of behavioral data as in Landau & Fries (2012)
The paper doesn't specifically mention detrending, but A.L. says they
always detrend with a 2nd-order polynomial. That matches the data --
without detrending, there should have been a peak at freq=0 due to the
offset from mean accuracy being above 0.
2021-06-14: AL tells me they used linear detrending.
The paper says the data were padded before computing the FFT, but doesn't
specify the padding or NFFT. I've chosen a value to match the frequency
resolution in the plots.
Parameters
----------
x : np.ndarray
The data time-series
Returns
-------
f : np.ndarray
The frequencies of the amplitude spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['landau']
# Detrend the data
x = sm.tsa.tsatools.detrend(x, order=detrend_ord)
# Window the data
x = window(x, np.hanning(len(x)))
# Get the spectrum
f, y = dft(x, fs, details['nfft'])
return f, y
def fiebelkorn(x, t, k_perm):
"""
Search for statistically significant behavioral oscillations as in
Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
A sequence of accuracy (Hit: 1, Miss: 0) for each trial
t : np.ndarray
The time-stamps for each trial
k_perm : int
The number of times to randomly shuffle the data when computing the
permuted surrogate distribution
Returns
-------
res : dict
The results as given by `time_shuffled_perm`plus these items:
t : np.ndarray
The original time-stamps of the raw data
p_corr : np.ndarray
P-values for each frequency, corrected for multiple comparisons
using FDR
"""
# Compute the results
res = time_shuffled_perm(lambda xx: fiebelkorn_spectrum(xx, t), x, k_perm)
res['t'] = t
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='fdr_bh')
res['p_corr'] = p_corr
return res
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
def fiebelkorn_spectrum(x, t):
"""
Compute the spectrum of accuracy data as in Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
The data for each trial
t : np.ndarray
The time-stamp for each trial
Returns
-------
f : np.ndarray
The frequencies of the resulting spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['fiebelkorn']
# Get the moving average of accuracy
x_bin, t_bin = fiebelkorn_binning(x, t)
# Detrend the binned data
x_bin = sm.tsa.tsatools.detrend(x_bin, order=2)
# Window the data
x_bin = window(x_bin, np.hanning(len(x_bin)))
# Get the spectrum
f, y = dft(x_bin, 1 / details['bin_step'], details['nfft'])
# Only keep frequencies that were reported in the paper
f_keep = f <= details['f_max']
f = f[f_keep]
y = y[f_keep]
return f, y
def time_shuffled_perm(analysis_fnc, x, k_perm):
"""
Run a permutation test by shuffling the time-stamps of individual trials.
Parameters
----------
analysis_fnc : function
The function that will be used to generate the spectrum
x : np.ndarray
The data time-series
k_perm : int
How many permutations to run
Returns
-------
res : dict
Dictionary of the results of the randomization analysis
x : np.ndarray
The raw data
x_perm : np.ndarray
The shuffled data
f : np.ndarray
The frequencies of the resulting spectrum
y_emp : np.ndarray
The spectrum of the empirical (unshuffled) data
y_avg : np.ndarray
The spectra of the shuffled permutations
y_cis : np.ndarray
Confidence intervals for the spectra, at the 2.5th, 95th, and
97.5th percentile
p : np.ndarray
P-values (uncorrected for multiple comparisons) for each frequency
"""
# Compute the empirical statistics
f, y_emp = analysis_fnc(x)
# Run a bootstrapped permutation test.
# Create a surrogate distribution by randomly shuffling resps in time.
x_perm = []
y_perm = []
x_shuff = x.copy()
for k in range(k_perm):
np.random.shuffle(x_shuff)
_, y_perm_k = analysis_fnc(x_shuff)
y_perm.append(y_perm_k)
if k < 10: # Keep a few permutations for illustration
x_perm.append(x_shuff.copy())
# Find statistically significant oscillations
# Sometimes we get p=0 if no perms are larger than emp. Note that in this
# case, a Bonferroni correction doesn't have any effect on the p-values.
p = np.mean(np.vstack([y_perm, y_emp]) > y_emp, axis=0)
# Get summary of simulated spectra
y_avg = np.mean(y_perm, 1)
y_cis = np.percentile(y_perm, [2.5, 95, 97.5], 1)
# Bundle the results together
res = {}
res['x'] = x
res['x_perm'] = np.array(x_perm)
res['f'] = f
res['y_emp'] = y_emp
res['y_perm'] = np.array(y_perm)
res['y_avg'] = y_avg
res['y_cis'] = y_cis
res['p'] = p
return res
def window(x, win):
""" Apply a window to a segment of data
Parameters
----------
x : np.ndarray
The data
win : np.ndarray
The window
Returns
-------
x : np.ndarray
The windowed data
"""
return np.multiply(win, x.T).T
| 29
| 79
| 0.62327
| 1,219
| 8,526
| 4.223134
| 0.231337
| 0.052448
| 0.055944
| 0.015152
| 0.286908
| 0.21115
| 0.17366
| 0.130536
| 0.101204
| 0.101204
| 0
| 0.011265
| 0.281609
| 8,526
| 293
| 80
| 29.098976
| 0.829224
| 0.555712
| 0
| 0.144578
| 0
| 0
| 0.05601
| 0.006608
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096386
| false
| 0
| 0.072289
| 0
| 0.26506
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32ef88405f3f3c3db42531c5dfa16c38dbb4d202
| 1,405
|
py
|
Python
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 39
|
2020-07-04T11:15:13.000Z
|
2022-02-04T22:33:42.000Z
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 1
|
2020-07-15T11:53:37.000Z
|
2020-07-15T11:53:37.000Z
|
Easy/112.PathSum.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 20
|
2020-07-14T19:12:53.000Z
|
2022-03-02T06:28:17.000Z
|
"""
Given a binary tree and a sum, determine if the tree has a root-to-leaf path
such that adding up all the values along the path equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ \
7 2 1
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
"""
#Difficulty: Easy
#114 / 114 test cases passed.
#Runtime: 44 ms
#Memory Usage: 15.6 MB
#Runtime: 44 ms, faster than 72.99% of Python3 online submissions for Path Sum.
#Memory Usage: 15.6 MB, less than 43.57% of Python3 online submissions for Path Sum.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, summ: int) -> bool:
result = []
s = 0
self.summFunc(root, s, result)
return True if summ in result else False
def summFunc(self, root, s, result):
if not root:
return 0
s += root.val
self.summFunc(root.left, s, result)
self.summFunc(root.right, s, result)
if not root.left and not root.right:
result.append(s)
| 28.673469
| 84
| 0.577936
| 210
| 1,405
| 3.847619
| 0.452381
| 0.034653
| 0.059406
| 0.027228
| 0.205446
| 0.089109
| 0.089109
| 0
| 0
| 0
| 0
| 0.052239
| 0.332384
| 1,405
| 48
| 85
| 29.270833
| 0.809168
| 0.608541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32f6cfa5b601a97d41e10a68ea610b54a023b9f0
| 864
|
py
|
Python
|
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
src/test.py
|
ayieko168/Arduino-Oscilloscope
|
5a0634437010f4303c86aef141f33cc6a628b3dc
|
[
"MIT"
] | null | null | null |
import pyqtgraph as pg
import pyqtgraph.exporters
import numpy as np
import math
from time import sleep
f = 10
t = 0
Samples = 1000
# while True:
# y2 = np.sin( 2* np.pi * f * t)
# print(y)
# t+=0.01
# sleep(0.25)
def update():
global f, t, ys, y2
print(len(y2))
if len(y2) == Samples:
y2.pop(y2.index(y2[0]))
y2.append(np.sin( 2 * np.pi * f * t))
t += 0.0001
c2.updateData(y2)
# define the data
theTitle = "pyqtgraph plot"
y2 = []
# create plot
plt = pg.plot()
plt.showGrid(x=True,y=True)
dat2 = []
c2 = pg.PlotCurveItem(dat2)
plt.addItem(c2)
timer = pg.QtCore.QTimer ()
timer.timeout.connect(update)
timer.start(0.1)
## Start Qt event loop.
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
| 16.941176
| 76
| 0.618056
| 139
| 864
| 3.769784
| 0.532374
| 0.01145
| 0.022901
| 0.030534
| 0.045802
| 0.045802
| 0.045802
| 0
| 0
| 0
| 0
| 0.058735
| 0.231481
| 864
| 51
| 77
| 16.941176
| 0.730422
| 0.157407
| 0
| 0
| 0
| 0
| 0.047288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.2
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32fcb908b2dfd2baf6aec8baabfb5d1f269220d0
| 1,577
|
py
|
Python
|
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
src/plyer_lach/platforms/android/email.py
|
locksmith47/turing-sim-kivy
|
f57de9d52494245c56f67dd7e63121434bb0553f
|
[
"MIT"
] | null | null | null |
from jnius import autoclass, cast
from kivy.logger import Logger
from plyer_lach.facades import Email
from plyer_lach.platforms.android import activity
Intent = autoclass('android.content.Intent')
AndroidString = autoclass('java.lang.String')
URI = autoclass('android.net.Uri')
class AndroidEmail(Email):
def _send(self, **kwargs):
intent = Intent(Intent.ACTION_SEND)
intent.setType('*/*')
recipient = kwargs.get('recipient')
subject = kwargs.get('subject')
text = kwargs.get('text')
create_chooser = kwargs.get('create_chooser')
file_path = kwargs.get('file_path')
if recipient:
intent.putExtra(Intent.EXTRA_EMAIL, [recipient])
if subject:
android_subject = cast('java.lang.CharSequence',
AndroidString(subject))
intent.putExtra(Intent.EXTRA_SUBJECT, android_subject)
if file_path:
file_uri = URI.parse('file://' + file_path)
Logger.info(str(file_uri.toString()))
intent.putExtra(Intent.EXTRA_STREAM, cast('android.os.Parcelable', file_uri))
Logger.info('Added file')
if create_chooser:
chooser_title = cast('java.lang.CharSequence',
AndroidString('Send message with:'))
activity.startActivity(Intent.createChooser(intent,
chooser_title))
else:
activity.startActivity(intent)
def instance():
return AndroidEmail()
| 36.674419
| 89
| 0.606848
| 160
| 1,577
| 5.85
| 0.3625
| 0.048077
| 0.064103
| 0.080128
| 0.07906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287254
| 1,577
| 42
| 90
| 37.547619
| 0.83274
| 0
| 0
| 0
| 0
| 0
| 0.126189
| 0.055168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0.027778
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd032c799cd2f082ede61113614415437237b7bc
| 40,263
|
py
|
Python
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | 2
|
2019-12-12T15:08:25.000Z
|
2020-05-19T08:52:06.000Z
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | 10
|
2021-01-19T15:03:51.000Z
|
2022-03-08T15:48:22.000Z
|
src/eventail/async_service/pika/base.py
|
allo-media/eventail
|
aed718d733709f1a522fbfec7083ddd8ed7b5039
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018-2019 Groupe Allo-Media
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
A base class implementing AM service architecture and its requirements.
Inspired from pika complete examples.
"""
import functools
import json
import logging
import os
import signal
import socket
import traceback
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple
import cbor
import pika
from eventail.gelf import GELF
from eventail.log_criticity import ALERT, EMERGENCY, ERROR, WARNING
LOGGER = logging.getLogger("async_service")
JSON_MODEL = Dict[str, Any]
HEADER = Dict[str, str]
class Service(object):
"""This is an example service that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, this class will stop and indicate
that reconnection is necessary. You should look at the output, as
there are limited reasons why the connection may be closed, which
usually are tied to permission related issues or socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
To leverage the binary nature of AMQP messages, we use CBOR instead of
JSON as data serialization (transparent). Moreover, CBOR is much faster
and much more compact than JSON.
"""
ID = os.getpid()
HOSTNAME = socket.gethostname()
EVENT_EXCHANGE = "events"
CMD_EXCHANGE = "commands"
LOG_EXCHANGE = "logs"
EVENT_EXCHANGE_TYPE = "topic"
CMD_EXCHANGE_TYPE = "topic"
LOG_EXCHANGE_TYPE = "topic"
RETRY_DELAY = 15 # in seconds
#: Heartbeat interval, must be superior to the expected blocking processing time (in seconds).
#: Beware that the actual delay is negotiated with the broker, and the lower value is taken, so
#: configure Rabbitmq accordingly.
HEARTBEAT = 60
#: When rabbitmq is low on resources, it may temporarily block the connection.
#: We can specify a timeout if it is not acceptable to the service (in seconds)
BLOCKED_TIMEOUT = 3600
#: In production, experiment with higher prefetch values
#: for higher consumer throughput
PREFETCH_COUNT = 3
def __init__(
self,
amqp_urls: List[str],
event_routing_keys: Sequence[str],
command_routing_keys: Sequence[str],
logical_service: str,
) -> None:
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_urls: List of AMQP urls.
The service will try to connect to one of them, in a round-robin fashion.
"""
self._urls = amqp_urls
self._event_routing_keys = event_routing_keys
self._command_routing_keys = command_routing_keys
self.logical_service = logical_service
self.url_idx = 0
self._event_queue = logical_service + ".events"
self._command_queue = logical_service + ".commands"
self.exclusive_queues = False
self._serialize: Callable[..., bytes] = cbor.dumps
self._mime_type = "application/cbor"
self._connection: pika.SelectConnection
self._channel: pika.channel.Channel
self._log_channel: pika.channel.Channel
for s in (signal.SIGHUP, signal.SIGTERM, signal.SIGINT):
signal.signal(s, lambda _s, _f: self.stop())
def reset_connection_state(self) -> None:
self._bind_count = (len(self._event_routing_keys) or 1) + (
len(self._command_routing_keys) or 1
)
self.should_reconnect = False
self.was_consuming = False
self._closing = False
self._event_consumer_tag: Optional[str] = None
self._command_consumer_tag: Optional[str] = None
self._consuming = False
# for events publishing only
self._deliveries: Dict[
int, Tuple[str, str, JSON_MODEL, str, bool, Optional[HEADER]]
] = {}
self._acked = 0
self._nacked = 0
self._message_number = 0
def connect(self) -> pika.SelectConnection:
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.reset_connection_state()
url = self._urls[self.url_idx]
self.url_idx = (self.url_idx + 1) % len(self._urls)
LOGGER.info("Connecting to %s", url)
connection_params = pika.URLParameters(url)
connection_params.heartbeat = self.HEARTBEAT
connection_params.blocked_connection_timeout = self.BLOCKED_TIMEOUT
return pika.SelectConnection(
parameters=connection_params,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
)
def close_connection(self) -> None:
self._consuming = False
if self._connection.is_closing or self._connection.is_closed:
LOGGER.info("Connection is closing or already closed")
else:
LOGGER.info("Closing connection")
self._connection.close()
def on_connection_open(self, _unused_connection: pika.BaseConnection) -> None:
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:param pika.SelectConnection _unused_connection: The connection
"""
LOGGER.info("Connection opened")
self.open_channels()
def on_connection_open_error(
self, _unused_connection: pika.BaseConnection, err: Exception
) -> None:
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
LOGGER.error("Connection open failed: %s", err)
self.reconnect(True)
def on_connection_closed(
self, _unused_connection: pika.BaseConnection, reason: Exception
) -> None:
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param Exception reason: exception representing reason for loss of
connection.
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.reconnect(True)
def reconnect(self, should_reconnect=True) -> None:
"""Will be invoked if the connection can't be opened or is
closed. Indicates that a reconnect is necessary then stops the
ioloop.
"""
self.should_reconnect = should_reconnect
self.stop(should_reconnect)
def open_channels(self) -> None:
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info("Creating channels")
self._connection.channel(
on_open_callback=functools.partial(self.on_channel_open, main=True)
)
self._connection.channel(
on_open_callback=functools.partial(self.on_channel_open, main=False)
)
def on_channel_open(self, channel: pika.channel.Channel, main: bool) -> None:
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchanges to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info("Channel opened")
if main:
self._channel = channel
self.setup_exchange(self.EVENT_EXCHANGE, self.EVENT_EXCHANGE_TYPE, channel)
self.setup_exchange(self.CMD_EXCHANGE, self.CMD_EXCHANGE_TYPE, channel)
else:
self._log_channel = channel
self.setup_exchange(self.LOG_EXCHANGE, self.LOG_EXCHANGE_TYPE, channel)
self.add_on_channel_close_callback(channel)
def add_on_channel_close_callback(self, channel: pika.channel.Channel) -> None:
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info("Adding channel close callback")
channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(
self, channel: pika.channel.Channel, reason: Exception
) -> None:
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception reason: why the channel was closed
"""
LOGGER.warning("Channel %i was closed: %s", channel, reason)
self.close_connection()
def setup_exchange(
self, exchange_name: str, exchange_type: str, channel: pika.channel.Channel
) -> None:
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info("Declaring exchange: %s", exchange_name)
# Note: using functools.partial is not required, it is demonstrating
# how arbitrary data can be passed to the callback when it is called
cb = functools.partial(self.on_exchange_declareok, exchange_name=exchange_name)
channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb,
durable=True,
)
def on_exchange_declareok(
self, _unused_frame: pika.frame.Method, exchange_name: str
) -> None:
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info("Exchange declared: %s", exchange_name)
if (
exchange_name == self.EVENT_EXCHANGE
and self._event_routing_keys
or exchange_name == self.CMD_EXCHANGE
and self._command_routing_keys
):
self.setup_queue(exchange_name)
elif exchange_name != self.LOG_EXCHANGE:
self._bind_count -= 1
if self._bind_count == 0:
self.set_qos()
def setup_queue(self, exchange_name: str) -> None:
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode exchange: The name of exchange to bind.
"""
cb = functools.partial(self.on_queue_declareok, exchange_name=exchange_name)
if self.exclusive_queues:
LOGGER.info("Declaring exclusive on exchange %s", exchange_name)
self._channel.queue_declare("", exclusive=True, callback=cb)
else:
queue = (
self._event_queue
if exchange_name == self.EVENT_EXCHANGE
else self._command_queue
)
LOGGER.info("Declaring queue %s on exchange %s", queue, exchange_name)
self._channel.queue_declare(queue=queue, durable=True, callback=cb)
def on_queue_declareok(self, frame: pika.frame.Method, exchange_name: str) -> None:
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method frame: The Queue.DeclareOk frame
"""
queue_name = frame.method.queue
routing_keys: Sequence[str]
if exchange_name == self.EVENT_EXCHANGE:
routing_keys = self._event_routing_keys
self._event_queue = queue_name
else:
routing_keys = self._command_routing_keys
self._command_queue = queue_name
LOGGER.info("Binding %s to %s with %s", exchange_name, queue_name, routing_keys)
for key in routing_keys:
self._channel.queue_bind(
queue_name, exchange_name, routing_key=key, callback=self.on_bindok
)
def on_bindok(self, _unused_frame: pika.frame.Method) -> None:
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will set the prefetch count for the channel.
:param pika.frame.Method _unused_frame: The Queue.BindOk response frame
"""
LOGGER.info("Queue bound")
self._bind_count -= 1
if self._bind_count == 0:
self.set_qos()
def set_qos(self) -> None:
"""This method sets up the consumer prefetch to only be delivered
PREFETCH_COUNT at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self.PREFETCH_COUNT, callback=self.on_basic_qos_ok
)
def on_basic_qos_ok(self, _unused_frame: pika.frame.Method) -> None:
"""Invoked by pika when the Basic.QoS method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method _unused_frame: The Basic.QosOk response frame
"""
LOGGER.info("QOS set to: %d", self.PREFETCH_COUNT)
self.enable_delivery_confirmations()
self.start_consuming()
def enable_delivery_confirmations(self) -> None:
"""Send the Confirm.Select RPC method to RabbitMQ to enable delivery
confirmations on the channel. The only way to turn this off is to close
the channel and create a new one.
When the message is confirmed from RabbitMQ, the
on_delivery_confirmation method will be invoked passing in a Basic.Ack
or Basic.Nack method from RabbitMQ that will indicate which messages it
is confirming or rejecting.
"""
LOGGER.info("Issuing Confirm.Select RPC command")
self._channel.confirm_delivery(self.on_delivery_confirmation)
def on_delivery_confirmation(self, method_frame: pika.frame.Method) -> None:
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
BEWARE: the `ack` and `nack` received here are emitted by the broker,
not by other services! They mean the broker accepted/received the
message or not.
Unroutable messages won't raise a `nack`.
If you want to be notified of unroutable messages,
you need to set `mandatory=True` on the emitted message and
implement `handle_returned_message`. The unroutable message
will then be returned to this callback.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type: str = method_frame.method.NAME.split(".")[1].lower()
delivery_tag: int = method_frame.method.delivery_tag
multiple: bool = method_frame.method.multiple
LOGGER.info("Received %s for delivery tag: %i", confirmation_type, delivery_tag)
confirm_range: List[int]
if multiple:
confirm_range = [
i for i in sorted(self._deliveries.keys()) if i <= delivery_tag
]
else:
confirm_range = [delivery_tag]
num_confirms = len(confirm_range)
if confirmation_type == "ack":
self._acked += num_confirms
elif confirmation_type == "nack":
self._nacked += num_confirms
# The broker in is trouble, resend later
for i in confirm_range:
self.call_later(
self.RETRY_DELAY, lambda args=self._deliveries[i]: self._emit(*args)
)
for i in confirm_range:
del self._deliveries[i]
LOGGER.info(
"Published %i messages, %i have yet to be confirmed, "
"%i were acked and %i were nacked",
self._message_number,
len(self._deliveries),
self._acked,
self._nacked,
)
def start_consuming(self) -> None:
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info("Issuing consumer related RPC commands")
self.add_on_cancel_callback()
self.add_on_return_callback()
if self._event_routing_keys:
self._event_consumer_tag = self._channel.basic_consume(
self._event_queue, self.on_message
)
self._consuming = True
if self._command_routing_keys:
self._command_consumer_tag = self._channel.basic_consume(
self._command_queue, self.on_message
)
self._consuming = True
self.was_consuming = True
self.on_ready()
def add_on_cancel_callback(self) -> None:
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info("Adding consumer cancellation callback")
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def add_on_return_callback(self) -> None:
"""Add a callback that will be invoked to return an unroutable message."""
LOGGER.info("Adding return callback")
self._channel.add_on_return_callback(self.on_message_returned)
def on_consumer_cancelled(self, method_frame: pika.frame.Method) -> None:
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info("Consumer was cancelled remotely, shutting down: %r", method_frame)
if not (self._channel.is_closed or self._channel.is_closing):
self._channel.close()
def on_message_returned(
self,
ch: pika.channel.Channel,
basic_return: pika.spec.Basic.Return,
properties: pika.spec.BasicProperties,
body: bytes,
):
"""Invoked by pika when a message is returned.
A message maybe returned if:
* it was sent with the `mandatory` flag on True;
* the broker was unable to route it to a queue.
:param pika.channel.Channel ch: The channel object
:param pika.Spec.Basic.Return basic_deliver: method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
decoder = cbor if properties.content_type == "application/cbor" else json
# If we are not able to decode our own payload, better crash the service now
payload: JSON_MODEL = decoder.loads(body) if body else None
routing_key: str = basic_return.routing_key
envelope: Dict[str, str] = {}
if properties.reply_to:
envelope["reply_to"] = properties.reply_to
if properties.correlation_id:
envelope["correlation_id"] = properties.correlation_id
if properties.headers:
envelope.update(properties.headers)
LOGGER.info("Received returned message: %s", routing_key)
try:
self.handle_returned_message(routing_key, payload, envelope)
except Exception as e:
# unexpected error
self.log(
EMERGENCY,
"in handle_returned_message [{}] {}".format(self.logical_service, e),
conversation_id=envelope.get("conversation_id", ""),
)
# Crash the service now
self.stop()
def on_message(
self,
ch: pika.channel.Channel,
basic_deliver: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes,
) -> None:
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel ch: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
headers: HEADER = properties.headers
decoder = cbor if properties.content_type == "application/cbor" else json
routing_key: str = basic_deliver.routing_key
exchange: str = basic_deliver.exchange
if headers is None or "conversation_id" not in headers:
self.log(EMERGENCY, f"Missing headers on {routing_key}")
# unrecoverable error, send to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
conversation_id = headers["conversation_id"]
try:
payload: JSON_MODEL = decoder.loads(body) if body else None
except ValueError:
self.log(
EMERGENCY,
f"Unable to decode payload for {routing_key}; dead lettering.",
conversation_id=conversation_id,
)
# Unrecoverable, put to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
LOGGER.info("Received message from %s: %s", exchange, routing_key)
if exchange == self.CMD_EXCHANGE:
correlation_id = properties.correlation_id
reply_to = properties.reply_to
status = headers.get("status", "") if headers else ""
if not (reply_to or status):
self.log(
EMERGENCY,
"invalid enveloppe for command/result: {}; dead lettering.".format(
headers
),
conversation_id=conversation_id,
)
# Unrecoverable. Put to dead letter
ch.basic_nack(delivery_tag=basic_deliver.delivery_tag, requeue=False)
return
if reply_to:
with self.ack_policy(
ch, basic_deliver, conversation_id, reply_to, correlation_id
):
self.handle_command(
routing_key, payload, conversation_id, reply_to, correlation_id
)
else:
with self.ack_policy(
ch, basic_deliver, conversation_id, reply_to, correlation_id
):
self.handle_result(
routing_key, payload, conversation_id, status, correlation_id
)
else:
with self.ack_policy(ch, basic_deliver, conversation_id, "", ""):
self.handle_event(routing_key, payload, conversation_id)
@contextmanager
def ack_policy(
self,
ch: pika.channel.Channel,
deliver: pika.spec.Basic.Deliver,
conversation_id: str,
reply_to: str,
correlation_id: str,
) -> Generator[None, None, None]:
try:
yield None
except Exception:
error = traceback.format_exc()
self.log(
ALERT,
f"Unhandled error while processing message {deliver.routing_key}",
error,
conversation_id=conversation_id,
)
# retry once
if not deliver.redelivered:
ch.basic_nack(delivery_tag=deliver.delivery_tag, requeue=True)
else:
# dead letter
self.log(
EMERGENCY,
f"Giving up on {deliver.routing_key}",
error,
conversation_id=conversation_id,
)
ch.basic_nack(delivery_tag=deliver.delivery_tag, requeue=False)
else:
ch.basic_ack(delivery_tag=deliver.delivery_tag)
def stop_consuming(self) -> None:
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if not (self._channel.is_closed or self._channel.is_closing):
LOGGER.info("Sending a Basic.Cancel RPC command to RabbitMQ")
for consumer_tag in (self._event_consumer_tag, self._command_consumer_tag):
if consumer_tag is not None:
cb = functools.partial(self.on_cancelok, userdata=consumer_tag)
self._channel.basic_cancel(consumer_tag, cb)
def on_cancelok(self, _unused_frame: pika.frame.Method, userdata: str) -> None:
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method _unused_frame: The Basic.CancelOk frame
:param str|unicode userdata: Extra user data (consumer tag)
"""
self._consuming = False
LOGGER.info(
"RabbitMQ acknowledged the cancellation of the consumer: %s", userdata
)
self.close_channel()
def close_channel(self) -> None:
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info("Closing the channels")
self._channel.close()
self._log_channel.close()
def _emit(
self,
exchange: str,
routing_key: str,
message: JSON_MODEL,
conversation_id: str,
mandatory: bool,
reply_to: str = "",
correlation_id: str = "",
headers: Optional[HEADER] = None,
) -> None:
"""Send a message.
The `message` is any data conforming to the JSON model.
"""
if headers is None:
headers = {}
headers["conversation_id"] = conversation_id
self._channel.basic_publish(
exchange=exchange,
routing_key=routing_key,
body=self._serialize(message),
mandatory=mandatory,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
content_type=self._mime_type,
reply_to=reply_to,
correlation_id=correlation_id,
headers=headers,
),
)
self._message_number += 1
self._deliveries[self._message_number] = (
exchange,
routing_key,
message,
conversation_id,
mandatory,
headers,
)
LOGGER.info("Published message # %i", self._message_number)
# Public interface
def use_json(self) -> None:
"""Force sending message serialized in plain JSON instead of CBOR."""
self._serialize = lambda message: json.dumps(message).encode("utf-8")
self._mime_type = "application/json"
def use_exclusive_queues(self) -> None:
"""Force usage of exclusive queues.
This is useful for debug tools that should not leave a queue behind them (overflow risk)
and not interfere between instances.
"""
self.exclusive_queues = True
def log(
self,
criticity: int,
short: str,
full: str = "",
conversation_id: str = "",
additional_fields: Dict = {},
) -> None:
"""Log to the log bus.
Parameters:
- `criticity`: int, in the syslog scale
- `short`: str, short description of log
- `full`: str, the full message of the log (appears as `message` in Graylog)
- `additional_fields: Dict, data to be merged into the GELF payload as additional fields
"""
gelf = GELF(self, criticity, short, full, conversation_id, additional_fields)
LOGGER.debug("Application logged: %s\n%s", short, full)
# no persistent messages, no delivery confirmations
self._log_channel.basic_publish(
exchange=self.LOG_EXCHANGE,
routing_key=gelf.routing_key,
body=gelf.payload,
)
def send_command(
self,
command: str,
message: JSON_MODEL,
conversation_id: str,
reply_to: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a command message.
The `message` is any data conforming to the JSON model.
if `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
self._emit(
self.CMD_EXCHANGE,
command,
message,
conversation_id,
mandatory,
reply_to=reply_to,
correlation_id=correlation_id,
)
def return_success(
self,
destination: str,
message: JSON_MODEL,
conversation_id: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a successful result message.
The `message` is any data conforming to the JSON model.
if `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
headers = {"status": "success"}
self._emit(
self.CMD_EXCHANGE,
destination,
message,
conversation_id,
mandatory,
correlation_id=correlation_id,
headers=headers,
)
def return_error(
self,
destination: str,
message: JSON_MODEL,
conversation_id: str,
correlation_id: str,
mandatory: bool = True,
) -> None:
"""Send a failure result message.
The `message` is any data conforming to the JSON model.
If `mandatory` is True (default) and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable."""
headers = {"status": "error"}
self._emit(
self.CMD_EXCHANGE,
destination,
message,
conversation_id,
mandatory,
correlation_id=correlation_id,
headers=headers,
)
def publish_event(
self,
event: str,
message: JSON_MODEL,
conversation_id: str,
mandatory: bool = False,
) -> None:
"""Publish an event on the bus.
The ``event`` is the name of the event,
and the `message` is any data conforming to the JSON model.
If `mandatory` is True and you have implemented
`handle_returned_message`, then it will be called if your message
is unroutable.
The default is False because some events maybe unused yet.
"""
self._emit(self.EVENT_EXCHANGE, event, message, conversation_id, mandatory)
def call_later(self, delay: int, callback: Callable) -> None:
"""Call `callback` after `delay` seconds."""
self._connection.ioloop.call_later(delay, callback)
def run(self) -> None:
"""Run the service by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start()
def stop(self, reconnect=False) -> None:
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again if this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
This method is automatically triggered if we receive one of
these UNIX signals: signal.SIGHUP, signal.SIGTERM, signal.SIGINT.
"""
self.should_reconnect = reconnect
if not self._closing:
if not self._connection.is_closed:
self.log(WARNING, "Shutting down…")
self._closing = True
LOGGER.info("Stopping")
if self._consuming:
self.stop_consuming()
try:
self._connection.ioloop.start()
except RuntimeError:
# already running!
pass
else:
self._connection.ioloop.stop()
LOGGER.info("Stopped")
def handle_event(
self, event: str, payload: JSON_MODEL, conversation_id: str
) -> None:
"""Handle incoming event (may be overwritten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead
(see ``__init__()``).
The default implementation dispatches the messages by calling methods in the form
``self.on_KEY(payload)`` where key is the routing key.
"""
handler = getattr(self, "on_" + event)
if handler is not None:
handler(payload, conversation_id)
else:
self.log(
ERROR,
f"unexpected event {event}; check your subscriptions!",
conversation_id=conversation_id,
)
def handle_command(
self,
command: str,
payload: JSON_MODEL,
conversation_id: str,
reply_to: str,
correlation_id: str,
) -> None:
"""Handle incoming commands (may be overwriten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead (see ``__init__()``).
Expected errors should be returned with the ``return_error`` method.
The default implementation dispatches the messages by calling methods in the form
``self.on_COMMAND(payload, reply_to, correlation_id)`` where COMMAND is what is left
after stripping the ``service.`` prefix from the routing key.
"""
handler = getattr(self, "on_" + command.split(".")[-1])
if handler is not None:
handler(payload, conversation_id, reply_to, correlation_id)
else:
# should never happens: means we misconfigured the routing keys
self.log(
ERROR,
f"unexpected command {command}; check your subscriptions!",
conversation_id=conversation_id,
)
def handle_result(
self,
key: str,
payload: JSON_MODEL,
conversation_id: str,
status: str,
correlation_id: str,
) -> None:
"""Handle incoming result (may be overwritten by subclasses).
The `payload` is already decoded and is a python data structure compatible with the JSON data model.
You should never do any filtering here: use the routing keys intead (see ``__init__()``).
The ``key`` is the routing key and ``status`` is either "success" or "error".
The default implementation dispatches the messages by calling methods in the form
``self.on_KEY(payload, status, correlation_id)`` where KEY is what is left
after stripping the ``service.`` prefix from the routing key.
"""
handler = getattr(self, "on_" + key.split(".")[-1])
if handler is not None:
handler(payload, conversation_id, status, correlation_id)
else:
# should never happens: means we misconfigured the routing keys
self.log(
ERROR,
f"unexpected result {key}; check your subscriptions!",
conversation_id=conversation_id,
)
# Abstract methods
def handle_returned_message(
self, key: str, payload: JSON_MODEL, envelope: Dict[str, str]
):
"""Invoked when a message is returned (to be implemented by subclasses).
A message maybe returned if:
* it was sent with the `mandatory` flag on True;
* and the broker was unable to route it to a queue.
"""
pass
def on_ready(self) -> None:
"""Code to execute once the service comes online.
(to be implemented by subclasses)
"""
pass
| 39.014535
| 108
| 0.634304
| 4,882
| 40,263
| 5.082548
| 0.139697
| 0.025954
| 0.009954
| 0.008222
| 0.36771
| 0.299077
| 0.255108
| 0.224922
| 0.199694
| 0.177649
| 0
| 0.001231
| 0.293967
| 40,263
| 1,031
| 109
| 39.052376
| 0.871531
| 0.367956
| 0
| 0.327616
| 0
| 0
| 0.068712
| 0.002774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080618
| false
| 0.005146
| 0.022298
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd0394b6bd7363e7ed4aa89ca0603954bd731b42
| 889
|
py
|
Python
|
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | 1
|
2021-02-08T17:45:28.000Z
|
2021-02-08T17:45:28.000Z
|
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | null | null | null |
CLI/mainmenue.py
|
MeatBoyed/PasswordBank2
|
f4367b22902ce1282772b184899e3d6e899c1cca
|
[
"MIT"
] | null | null | null |
from .mock_api.utils import GetSelection
from .viewAccounts import ViewAccounts
from .addAccount import AddAccount
def MainMenue():
headerMessage = (
"""\n\n=========================================================\n===================== Main Menue ========================\n""")
print(headerMessage)
accessMessage = (
"""1: Search for Account(s)\n2: Add an Account\n3: Quit\n\n=========================================================""")
print(accessMessage)
while True:
select = GetSelection()
print("---------------------------------------------------------")
if select == 1:
ViewAccounts()
elif select == 2:
AddAccount()
pass
elif select == 3:
print("Quitting account")
break
else:
print("Enter a valid select option")
| 26.939394
| 137
| 0.418448
| 69
| 889
| 5.376812
| 0.594203
| 0.016173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.257593
| 889
| 32
| 138
| 27.78125
| 0.55303
| 0
| 0
| 0
| 0
| 0
| 0.155763
| 0.088785
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.043478
| 0.130435
| 0
| 0.173913
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd06722fb8cfe07ace7e4c46b654df0346766b26
| 4,181
|
py
|
Python
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 244
|
2018-08-31T18:35:29.000Z
|
2022-03-20T01:12:50.000Z
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 26
|
2018-08-29T15:31:21.000Z
|
2021-06-24T08:05:53.000Z
|
nn_similarity_index/cwt_kernel_mat.py
|
forgi86/xfer
|
56d98a66d6adb2466d1a73b52f3b27193930a008
|
[
"Apache-2.0"
] | 57
|
2018-09-11T13:40:35.000Z
|
2022-02-22T14:43:34.000Z
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import os
os.environ["OMP_NUM_THREADS"] = "1"
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import numpy as np
from abc import ABC
import os
import argparse
from sketched_kernels import SketchedKernels
from utils import *
if __name__ == "__main__":
# Get arguments from the command line
parser = argparse.ArgumentParser(description='PyTorch CWT sketching kernel matrices')
parser.add_argument('--datapath', type=str,
help='absolute path to the dataset')
parser.add_argument('--modelname', type=str,
help='model name')
parser.add_argument('--pretrained', action='store_true',
help='whether to load a pretrained ImageNet model')
parser.add_argument('--seed', default=0, type=int,
help='random seed for sketching')
parser.add_argument('--task', default='cifar10', type=str, choices=['cifar10', 'cifar100', 'svhn', 'stl10'],
help='the name of the dataset, cifar10 or cifar100 or svhn or stl10')
parser.add_argument('--split', default='train', type=str,
help='split of the dataset, train or test')
parser.add_argument('--bsize', default=512, type=int,
help='batch size for computing the kernel')
parser.add_argument('--M', '--num-buckets-sketching', default=512, type=int,
help='number of buckets in Sketching')
parser.add_argument('--T', '--num-buckets-per-sample', default=1, type=int,
help='number of buckets each data sample is sketched to')
parser.add_argument('--freq_print', default=10, type=int,
help='frequency for printing the progress')
args = parser.parse_args()
# Set the backend and the random seed for running our code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(args.seed)
if device == 'cuda':
cudnn.benchmark = True
torch.cuda.manual_seed(args.seed)
# The size of images for training and testing ImageNet models
imgsize = 224
# Generate a dataloader that iteratively reads data
# Load a model, either pretrained or not
loader = load_dataset(args.task, args.split, args.bsize, args.datapath, imgsize)
net = load_model(device, args.modelname, pretrained=True)
# Set the model to be in the evaluation mode. VERY IMPORTANT!
# This step to fix the running statistics in batchnorm layers,
# and disable dropout layers
net.eval()
csm = SketchedKernels(net, loader, imgsize, device, args.M, args.T, args.freq_print)
csm.compute_sketched_kernels()
# Compute sketched kernel matrices for each layer
for layer_id in range(len(csm.kernel_matrices)):
nkme = (csm.kernel_matrices[layer_id].sum() ** 0.5) / csm.n_samples
print("The norm of the kernel mean embedding of layer {:d} is {:.4f}".format(layer_id, nkme))
del net, loader
torch.cuda.empty_cache()
# Save the sketched kernel matrices
savepath = 'sketched_kernel_mat/'
if not os.path.isdir(savepath):
os.mkdir(savepath)
save_filename = '{}_{}_{}_{}.npy'.format(args.modelname, args.split, args.task, args.seed)
np.save(savepath + save_filename, csm.kernel_matrices)
| 40.201923
| 112
| 0.648649
| 543
| 4,181
| 4.907919
| 0.39779
| 0.033771
| 0.06379
| 0.019512
| 0.031144
| 0.019512
| 0
| 0
| 0
| 0
| 0
| 0.012805
| 0.234155
| 4,181
| 103
| 113
| 40.592233
| 0.819488
| 0.267161
| 0
| 0.034483
| 0
| 0
| 0.226645
| 0.015461
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.224138
| 0
| 0.224138
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd0c1d5bae5b02c0610c8254bb0ed033a6e6d1e5
| 1,079
|
py
|
Python
|
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | 1
|
2019-06-05T10:41:06.000Z
|
2019-06-05T10:41:06.000Z
|
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | null | null | null |
optaux/helper_functions/check_nonvalidated_auxs.py
|
coltonlloyd/OptAux
|
3ee1f8cdfa32f1a732ad41d5f854659159694160
|
[
"MIT"
] | null | null | null |
import cobra
from optaux import resources
resource_dir = resources.__path__[0]
met_to_rs = {'EX_pydam_e': ['PDX5PS', 'PYDXK', 'PYDXNK'],
'EX_orot_e': ['DHORTS', 'UPPRT', 'URIK2'],
'EX_thr__L_e': ['PTHRpp', 'THRS'],
'EX_pro__L_e': ['AMPTASEPG', 'P5CR'],
'EX_skm_e': ['DHQTi'],
'EX_cys__L_e': ['AMPTASECG', 'CYSS']}
for m, rs in met_to_rs.items():
ijo = cobra.io.load_json_model('%s/iJO1366.json' % resource_dir)
ijo.reactions.EX_o2_e.lower_bound = -20
biomass_reaction = list(ijo.objective.keys())[0]
biomass_reaction.lower_bound = .1
biomass_reaction.upper_bound = .1
for r in rs:
for g in [i.id for i in ijo.reactions.get_by_id(r).genes]:
print(ijo.genes.get_by_id(g).name,
[i.id for i in ijo.genes.get_by_id(g).reactions])
ijo.genes.get_by_id(g).remove_from_model()
ijo.objective = m
ijo.reactions.get_by_id(m).lower_bound = -10
ijo.optimize()
print(m, ijo.solution.f)
ijo.reactions.get_by_id(m).lower_bound = 0
| 33.71875
| 68
| 0.615385
| 168
| 1,079
| 3.630952
| 0.422619
| 0.04918
| 0.068852
| 0.083607
| 0.237705
| 0.211475
| 0.098361
| 0.098361
| 0
| 0
| 0
| 0.020457
| 0.229842
| 1,079
| 32
| 69
| 33.71875
| 0.713598
| 0
| 0
| 0
| 0
| 0
| 0.137963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd105e9dfaa8a1cb5dda8aab7e3ed98167bf73e4
| 10,430
|
py
|
Python
|
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
csv-to-mysql.py
|
LongPhan1912/Youtube-Playlist-Extractor
|
80b10e0b459c2cb264113cfaff644f5f28650813
|
[
"CC0-1.0"
] | null | null | null |
import csv
import MySQLdb
# installing MySQL: https://dev.mysql.com/doc/refman/8.0/en/osx-installation-pkg.html
# how to start, watch: https://www.youtube.com/watch?v=3vsC05rxZ8c
# or read this (absolutely helpful) guide: https://www.datacamp.com/community/tutorials/mysql-python
# this is mainly created to get a database of all the songs in my Favorites playlist
# if you wish to change the topic to 'FILM', 'SPORTS', or 'POLITICS'
# 1/ initially, set up the MySQL connection and craft a cursor
mydb = MySQLdb.connect(host='localhost', user='root', passwd='yourPasswordHere')
cursor = mydb.cursor()
# 2/ create a database:
cursor.execute("CREATE DATABASE mydb")
mydb.commit()
# 3/ after database is created, comment out steps 1/ and 2/ and uncomment step 3/
# mydb = MySQLdb.connect(host='localhost', user='root', passwd='', database="mydb")
# cursor = mydb.cursor()
# from here on out, whenever you call `cursor.execute()`, call `mydb.commit()` right afterwards
# 4/ create a table -- three options available to you
# the table's hardcoded right now so if the columns here are changed then other
def initialise_main_music_table(table_name):
cursor.execute("CREATE TABLE " + table_name
+ " (songID INTEGER PRIMARY KEY AUTO_INCREMENT, \
songTitle VARCHAR(150) NOT NULL, \
artist VARCHAR(100) NOT NULL, \
genre VARCHAR(100) NOT NULL, \
videoLink VARCHAR(100) NOT NULL, \
viewCount BIGINT NOT NULL, \
likeToDislikeRatio decimal(5, 4) NOT NULL)")
mydb.commit()
# the main music table helps extract info to create sub tables for a specific music category
def initialise_custom_music_table(table_name, main_music_table_name):
cursor.execute("CREATE TABLE " + table_name
+ " (categorySongID INTEGER PRIMARY KEY AUTO_INCREMENT, \
mainSongID INTEGER NOT NULL DEFAULT 1, \
FOREIGN KEY(mainSongID) REFERENCES " + main_music_table_name + "(songID), \
songTitle VARCHAR(150) NOT NULL, \
artist VARCHAR(100) NOT NULL, \
genre VARCHAR(100) NOT NULL, \
videoLink VARCHAR(100) NOT NULL, \
viewCount BIGINT NOT NULL, \
likeToDislikeRatio decimal(5, 4) NOT NULL)")
mydb.commit()
# def create_custom_table(table_name):
# cursor.execute("CREATE TABLE " + table_name
# + " (tableID INTEGER PRIMARY KEY AUTO_INCREMENT, \
# videoTitle VARCHAR(150) NOT NULL, \
# author VARCHAR(100) NOT NULL, \
# category VARCHAR(100) NOT NULL, \
# videoLink VARCHAR(100) NOT NULL, \
# viewCount BIGINT NOT NULL, \
# likeToDislikeRatio decimal(5, 4) NOT NULL)")
# mydb.commit()
# 5/ from a list of wanted fields, the function searches for the index corresponding to each field on the list
# and stores the index inside a dict (easy to look up and flexible if the order of the columns in the csv file is changed)
def get_indices_of_csv_table_items(csv_file_name, wanted_items):
indices = {}
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, heading in enumerate(csv_headings):
if heading in wanted_items:
indices[heading] = idx
csv_file.close()
return indices
wanted_items = ['song_name', 'artist', 'topics', 'video_link', 'view_count', 'like_to_dislike_ratio']
# 6/ fill up our main table with the relevant data
def populate_main_music_table_from_csv(csv_file_name, table_name):
indices = get_indices_of_csv_table_items(csv_file_name, wanted_items)
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, row in enumerate(csv_data):
song_name = row[indices['song_name']]
artist = row[indices['artist']]
if ' - Topic' in artist:
artist = artist[:artist.index(' - Topic')]
genre = row[indices['topics']][1:-1]
video_link = row[indices['video_link']]
view_count = int(row[indices['view_count']])
ratio = 0
if row[indices['like_to_dislike_ratio']]:
ratio = float(row[indices['like_to_dislike_ratio']][:-1]) / 100
if 'MUSIC' in genre:
cursor.execute(f"INSERT INTO {table_name} (songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio)\
VALUES(%s, %s, %s, %s, %s, %s)", (song_name, artist, genre, video_link, view_count, ratio))
mydb.commit() # remember to commit after populating the table
csv_file.close()
# 7/ fill up our custom table using data from the main music table
def populate_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value):
cursor.execute(f"INSERT INTO {your_new_table_name} (mainSongID, songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio)\
SELECT songID, songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio \
FROM {main_music_table_name} WHERE {column} LIKE '%{chosen_value}%'")
mydb.commit()
# -------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS START-------------------
# -------------------------------------------------------------------
# add a field after table is created (new field placed after a specific column of a table)
def add_new_column(table_name, new_column_name, data_type, pivot_column):
cursor.execute(f"ALTER TABLE {table_name} ADD {new_column_name} {data_type} NOT NULL AFTER {pivot_column}")
mydb.commit()
# change data type for any given field
def modify_data_type(table_name, column_name, new_data_type):
cursor.execute(f"ALTER TABLE {table_name} MODIFY COLUMN {column_name} {new_data_type}")
mydb.commit()
# delete all the data from a specified table
def delete_data_from_table(table_name):
cursor.execute(f"DELETE FROM {table_name}")
mydb.commit()
def delete_selected_record_from_table(table_name, record):
cursor.execute(f"DELETE FROM {table_name} WHERE address = {record}")
mydb.commit()
# make a table disappear from existence :)
def drop_table(table_name):
cursor.execute(f"DROP TABLE {table_name}")
mydb.commit()
def print_table_plain(table_name):
cursor.execute(f"SELECT * FROM {table_name}")
result = cursor.fetchall()
for row in result:
print(row)
# print out all the songs in the playlist
# 'DESC' means descending order (most popular song on top) and 'ASC' is the opposite
def print_table_by_criteria(table_name, order_criteria, order):
if order_criteria != '' and order != '':
cursor.execute(f"SELECT * FROM {table_name} ORDER BY {order_criteria} {order}")
for item in cursor:
print(item)
# show the name of all the tables present in the database
def show_tables():
cursor.execute("SHOW TABLES")
tables = cursor.fetchall()
print(tables)
# check if a table already exists
def check_table_exists(table_name):
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'
""".format(table_name.replace('\'', '\'\'')))
return True if cursor.fetchone()[0] == 1 else False
# optional / not required function: should you wish to look up the different video topics
# if you want to search for all topics, leave `selected_topic` as an empty string
def get_all_selected_topics(csv_file_name, selected_topic):
res = []
indices = get_indices_of_csv_table_items(csv_file_name, wanted_items)
with open(csv_file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
csv_headings = next(csv_data)
for idx, row in enumerate(csv_data):
topics = row[indices['topics']][1:-1]
topic_list = topics.split(', ')
for item in topic_list:
if selected_topic in item and item not in res:
res.append(item)
return res
# ------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS ENDS-------------------
# ------------------------------------------------------------------
# 8/ Create main music table
def build_main_music_table(csv_file_name, main_music_table_name):
initialise_main_music_table(main_music_table_name)
populate_main_music_table_from_csv(csv_file_name, main_music_table_name)
# 9/ Build a new music table based on the genre you love
def build_your_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value):
if check_table_exists(main_music_table_name) == False:
build_main_music_table(main_music_table_name)
initialise_custom_music_table(your_new_table_name, main_music_table_name)
populate_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value)
def main(): # example; feel free to change the variable names to your choosing
csv_file_name = 'favorite-playlist.csv' # name of csv file (use `main-extractor.py` first to create a csv file)
your_new_table_name = 'ElectronicMusic' # name your table
main_music_table_name = 'MainMusic' # name the main music table
column = 'genre' # column choices: songTitle, artist, genre, videoLink, viewCount, likeToDislikeRatio
chosen_value = 'ELECTRONIC MUSIC' # what you'd like to query, e.g. artist name or song title or genre
# to get a list of all possible video topics or music genres, you can run the function get_all_selected_topics()
# e.g. get_all_selected_topics('favorite-playlist.csv',
order_criteria = 'viewCount' # e.g. viewCount or likeToDislikeRatio or artist name in alphabetical order
ascending_order = False # change to true if you want to print the table in ascending order (i.e. lowest order at the top)
order = 'ASC' if ascending_order == True else 'DESC'
build_your_custom_music_table(your_new_table_name, main_music_table_name, column, chosen_value)
print_table_by_criteria(your_new_table_name, order_criteria, order)
if __name__ == "__main__":
main()
| 48.287037
| 136
| 0.661266
| 1,397
| 10,430
| 4.725125
| 0.219041
| 0.064081
| 0.050901
| 0.038176
| 0.395698
| 0.342524
| 0.28223
| 0.235419
| 0.215422
| 0.189062
| 0
| 0.008935
| 0.216683
| 10,430
| 215
| 137
| 48.511628
| 0.799021
| 0.346309
| 0
| 0.288889
| 0
| 0
| 0.124704
| 0.019231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0.007407
| 0.014815
| 0
| 0.17037
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1270311d2747042f749172a656ddde2d001d75
| 1,221
|
py
|
Python
|
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
from mininet.net import Mininet
from mininet.node import Controller, UserSwitch, IVSSwitch, OVSSwitch
from mininet.log import info, setLogLevel
setLogLevel("info")
import importlib
switch_num = 1
def add_switch(net):
global switch_num
res = "s%s" % str(switch_num)
switch_num += 1
return net.addSwitch(res)
host_num = 1
def add_host(net):
global host_num
res = "h%s" % str(host_num)
host_num += 1
return net.addHost(res)
client_num = 1
def add_client(net):
global client_num
res = "mc%s" % str(client_num)
client_num += 1
return net.addHost(res)
def setup(n="3", nc="10"):
n = int(n)
nc = int(nc)
# - Core setup -------
net = Mininet(controller=Controller)
info("*** Adding controller\n")
net.addController("c0")
info("*** Adding switches\n")
sw = add_switch(net)
info("*** Adding hosts and links\n")
cluster = [add_host(net) for _ in range(n)]
clients = [add_client(net) for _ in range(nc)]
for host in cluster:
net.addLink(host, sw)
for client in clients:
net.addLink(client, sw)
net.start()
cluster_ips = [host.IP() for host in cluster]
return (net, cluster, clients)
| 18.784615
| 69
| 0.633907
| 177
| 1,221
| 4.254237
| 0.299435
| 0.031873
| 0.027888
| 0.039841
| 0.061089
| 0.061089
| 0
| 0
| 0
| 0
| 0
| 0.010741
| 0.23751
| 1,221
| 64
| 70
| 19.078125
| 0.798067
| 0.01638
| 0
| 0.04878
| 0
| 0
| 0.075897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.097561
| 0
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1396e2ed5013e365c0832fe7ee283e5e1bda20
| 856
|
py
|
Python
|
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
__author__ = "Ville Myllynen"
__copyright__ = "Copyright 2017, Ohsiha Project"
__credits__ = ["Ville Myllynen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Ville Myllynen"
__email__ = "ville.myllynen@student.tut.fi"
__status__ = "Development"
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `author` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `author`.
return obj.author == request.user
| 31.703704
| 73
| 0.712617
| 103
| 856
| 5.572816
| 0.718447
| 0.090592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008824
| 0.205607
| 856
| 27
| 74
| 31.703704
| 0.835294
| 0.315421
| 0
| 0
| 0
| 0
| 0.20885
| 0.051327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd16885dbeb4939e362807cdc853aa44683b010f
| 18,284
|
py
|
Python
|
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | 4
|
2018-02-12T09:11:26.000Z
|
2022-01-24T20:46:15.000Z
|
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | null | null | null |
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | 3
|
2018-08-23T15:08:54.000Z
|
2020-03-13T14:21:08.000Z
|
from collections import OrderedDict
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from .player import MCTSPlayer, RandomPlayer, OptimalPlayer
from .evaluator import evaluate
from .mcts_tree import MCTSNode, mcts
from .utilities import sample_distribution
__all__ = ["train_alphago", "self_play", "process_self_play_data",
"process_training_data"]
def compute_checkpoint_name(step, path):
return path + "{}.checkpoint".format(step)
def train_alphago(game, create_estimator, self_play_iters, training_iters,
checkpoint_path, summary_path, alphago_steps=100,
evaluate_every=1, batch_size=32, mcts_iters=100, c_puct=1.0,
replay_length=100000, num_evaluate_games=500,
win_rate=0.55, verbose=True, restore_step=None,
self_play_file_path=None):
"""Trains AlphaGo on the game.
Parameters
----------
game: object
An object that has the attributes a game needs.
create_estimator: func
Creates a trainable estimator for the game. The estimator should
have a train function.
self_play_iters: int
Number of self-play games to play each self-play step.
training_iters: int
Number of training iters to use for each training step.
checkpoint_path: str
Where to save the checkpoints to.
summary_path: str
Where to save the summaries (tensorboard) to.
alphago_steps: int
Number of steps to run the alphago loop for.
evaluate_every: int
Evaluate the network every evaluate_every steps.
batch_size: int
Batch size to train with.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS. See AlphaGo paper.
replay_length: int
The amount of training data to use. Only train on the most recent
training data.
num_evaluate_games: int
Number of games to evaluate the players for.
win_rate: float
Number between 0 and 1. Only update self-play player when training
player beats self-play player by at least this rate.
verbose: bool
Whether or not to output progress.
restore_step: int or None
If given, restore the network from the checkpoint at this step.
self_play_file_path: str or None
Where to load self play data from, if given.
"""
# TODO: Do self-play, training and evaluating in parallel.
# We use a fixed estimator (the best one that's been trained) to
# generate self-play training data. We then train the training estimator
# on that data. We produce a checkpoint every 1000 training steps. This
# checkpoint is then evaluated against the current best neural network.
# If it beats the current best network by at least 55% then it becomes
# the new best network.
# 1 is the fixed player, and 2 is the training player.
self_play_estimator = create_estimator()
training_estimator = create_estimator()
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
tf_success_rate = tf.placeholder(
tf.float32, name='success_rate_summary')
success_rate_summary = tf.summary.scalar(
'success_rate_summary', tf_success_rate)
tf_success_rate_random = tf.placeholder(
tf.float32, name='success_rate_random')
success_rate_random_summary = tf.summary.scalar(
'success_rate_random', tf_success_rate_random)
#tf_success_rate_optimal = tf.placeholder(
# tf.float32, name='success_rate_optimal')
#success_rate_optimal_summary = tf.summary.scalar(
# 'success_rate_optimal', tf_success_rate_optimal)
#merged_summary = tf.summary.merge([success_rate_summary,
# success_rate_random_summary,
# success_rate_optimal_summary])
merged_summary = tf.summary.merge([success_rate_summary,
success_rate_random_summary])
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(summary_path)
if restore_step:
restore_path = compute_checkpoint_name(restore_step, checkpoint_path)
self_play_estimator.restore(restore_path)
training_estimator.restore(restore_path)
all_losses = []
self_play_data = None
initial_step = restore_step + 1 if restore_step else 0
for alphago_step in range(initial_step, initial_step + alphago_steps):
self_play_data = generate_self_play_data(
game, self_play_estimator, mcts_iters, c_puct, self_play_iters,
verbose=verbose, data=self_play_data)
training_data = process_training_data(self_play_data, replay_length)
if len(training_data) < 100:
continue
optimise_estimator(training_estimator, training_data, batch_size,
training_iters, writer=writer, verbose=verbose)
# Evaluate the players and choose the best.
if alphago_step % evaluate_every == 0:
success_rate, success_rate_random = \
evaluate_model(game, self_play_estimator,
training_estimator, mcts_iters, c_puct,
num_evaluate_games, verbose=verbose)
summary = sess.run(merged_summary,
feed_dict=
{tf_success_rate: success_rate,
tf_success_rate_random: success_rate_random})
writer.add_summary(summary, training_estimator.global_step)
checkpoint_model(training_estimator, alphago_step, checkpoint_path)
# If training player beats self-play player by a large enough
# margin, then it becomes the new best estimator.
if success_rate > win_rate:
# Create a new self player, with the weights of the most
# recent training_estimator.
if verbose:
print("Updating self-play player.")
print("Restoring from step: {}".format(alphago_step))
self_play_estimator = create_estimator()
restore_path = compute_checkpoint_name(alphago_step,
checkpoint_path)
self_play_estimator.restore(restore_path)
return all_losses
def optimise_estimator(estimator, training_data, batch_size, training_iters,
mode='reinforcement', writer=None, verbose=True):
summary = estimator.train(training_data, batch_size, training_iters,
mode=mode, writer=writer, verbose=verbose)
return summary
def evaluate_model(game, player1, player2, mcts_iters, c_puct, num_games,
verbose=True):
# Checkpoint the model.
# TODO: Implement evaluation
# TODO: Choose tau more systematically.
if verbose:
print("Evaluating. Self-player vs training, then training vs "
"self-player")
wins1, wins2, draws = evaluate_estimators_in_both_positions(
game, player1.create_estimate_fn(), player2.create_estimate_fn(),
mcts_iters, c_puct, num_games, tau=0.01, verbose=verbose)
if verbose:
print("Self-play player wins: {}, Training player wins: {}, "
"Draws: {}".format(wins1, wins2, draws))
success_rate = (wins2 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Win + draw rate for training player: {}".format(
success_rate))
# Also evaluate against a random player
wins1, wins2, draws = evaluate_mcts_against_random_player(
game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
tau=0.01, verbose=verbose)
success_rate_random = (wins1 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Training player vs random. Wins: {}, Losses: {}, "
"Draws: {}".format(wins1, wins2, draws))
## Also evaluate against an optimal player
#wins1, wins2, draws = evaluate_mcts_against_optimal_player(
# game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
# tau=0.1, verbose=verbose)
#success_rate_optimal = (wins1 + draws) / (wins1 + wins2 + draws)
#if verbose:
# print("Training player vs optimal. Wins: {}, Losses: {}, "
# "Draws: {}".format(wins1, wins2, draws))
#return success_rate, success_rate_random, success_rate_optimal
return success_rate, success_rate_random
def checkpoint_model(player, step, path):
"""Checkpoint the training player.
"""
checkpoint_name = compute_checkpoint_name(step, path)
player.save(checkpoint_name)
def evaluate_mcts_against_optimal_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: OptimalPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: OptimalPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_mcts_against_random_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: RandomPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: RandomPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_estimators_in_both_positions(game, estimator1, estimator2,
mcts_iters, c_puct,
num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def generate_self_play_data(game, estimator, mcts_iters, c_puct, num_iters,
data=None, verbose=True):
"""Generates self play data for a number of iterations for a given
estimator. Saves to save_file_path, if given.
"""
# if save_file_path is not None:
# with open(save_file_path, 'r') as f:
# data = json.load(save_file_path)
# index = max(data.keys()) + 1
if data is not None:
index = max(data.keys()) + 1
else:
data = OrderedDict()
index = 0
# Collect self-play training data using the best estimator.
disable_tqdm = False if verbose else True
for _ in tqdm(range(num_iters), disable=disable_tqdm):
data[index] = self_play(
game, estimator.create_estimate_fn(), mcts_iters, c_puct)
index += 1
# if save_file_path is not None:
# with open(save_file_path, 'w') as f:
# json.dump(data, f)
return data
def self_play(game, estimator, mcts_iters, c_puct):
"""Plays a single game using MCTS to choose actions for both players.
Parameters
----------
game: Game
An object representing the game to be played.
estimator: func
An estimate function.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS.
Returns
-------
game_state_list: list
A list of game states encountered in the self-play game. Starts
with the initial state and ends with a terminal state.
action_probs_list: list
A list of action probability dictionaries, as returned by MCTS
each time the algorithm has to take an action. The ith action
probabilities dictionary corresponds to the ith game_state, and
action_probs_list has length one less than game_state_list,
since we don't have to move in a terminal state.
"""
node = MCTSNode(game.initial_state, game.current_player(game.initial_state))
game_state_list = [node.game_state]
action_probs_list = []
action_list = []
move_count = 0
while not node.is_terminal:
# TODO: Choose this better.
tau = 1
if move_count >= 10:
tau = 1 / (move_count - 10 + 1)
# First run MCTS to compute action probabilities.
action_probs = mcts(node, game, estimator, mcts_iters, c_puct, tau=tau)
# Choose the action according to the action probabilities.
action = sample_distribution(action_probs)
action_list.append(action)
# Play the action
node = node.children[action]
# Add the action probabilities and game state to the list.
action_probs_list.append(action_probs)
game_state_list.append(node.game_state)
move_count += 1
data = process_self_play_data(game_state_list, action_list,
action_probs_list, game, game.action_indices)
return data
def process_training_data(self_play_data, replay_length=None):
"""Takes self play data and returns a list of tuples (state,
action_probs, utility) suitable for training an estimator.
Parameters
----------
self_play_data: dict
Dictionary with keys given by an index (int) and values given by a
log of the game. This is a list of tuples as in generate self play
data.
replay_length: int or None
If given, only return the last replay_length (state, probs, utility)
tuples.
"""
training_data = []
for index, game_log in self_play_data.items():
for (state, action, probs_vector, z) in game_log:
training_data.append((state, probs_vector, z))
print("Training data length: {}".format(len(training_data)))
print("Self play data length: {}".format(len(self_play_data)))
if replay_length is not None:
training_data = training_data[-replay_length:]
return training_data
def process_self_play_data(states_, actions_, action_probs_, game,
action_indices):
"""Takes a list of states and action probabilities, as returned by
play, and creates training data from this. We build up a list
consisting of (state, probs, z) tuples, where player is the player
in state 'state', and 'z' is the utility to 'player' in 'last_state'.
We omit the terminal state from the list as there are no probabilities to
train. TODO: Potentially include the terminal state in order to train the
value. # TODO: why the underscores in the parameter names?
Parameters
----------
states_: list
A list of n states, with the last being terminal.
actions_: list
A list of n-1 actions, being the action taken in the corresponding
state.
action_probs_: list
A list of n-1 dictionaries containing action probabilities. The ith
dictionary applies to the ith state, representing the probabilities
returned by play of taking each available action in the state.
game: Game
An object representing the game to be played.
action_indices: dict
A dictionary mapping actions (in the form of the legal_actions
function) to action indices (to be used for training the neural
network).
Returns
-------
training_data: list
A list consisting of (state, action, probs, z) tuples, where player
is the player in state 'state', and 'z' is the utility to 'player' in
'last_state'.
"""
# Get the outcome for the game. This should be the last state in states_.
last_state = states_.pop()
outcome = game.utility(last_state)
# Now action_probs_ and states_ are the same length.
training_data = []
for state, action, probs in zip(states_, actions_, action_probs_):
# Get the player in the state, and the value to this player of the
# terminal state.
player = game.current_player(state)
z = outcome[player]
# Convert the probs dictionary to a numpy array using action_indices.
probs_vector = np.zeros(len(action_indices))
for a, prob in probs.items():
probs_vector[action_indices[a]] = prob
non_nan_state = np.nan_to_num(state)
training_data.append((non_nan_state, action, probs_vector, z))
return training_data
| 39.152034
| 80
| 0.644553
| 2,293
| 18,284
| 4.924117
| 0.139555
| 0.030467
| 0.018599
| 0.026038
| 0.400939
| 0.347533
| 0.295279
| 0.246125
| 0.232132
| 0.211762
| 0
| 0.014066
| 0.280683
| 18,284
| 466
| 81
| 39.236052
| 0.844434
| 0.366823
| 0
| 0.280374
| 0
| 0
| 0.044378
| 0.003886
| 0
| 0
| 0
| 0.008584
| 0
| 1
| 0.056075
| false
| 0
| 0.037383
| 0.004673
| 0.14486
| 0.037383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1902e85156fc45744e6e48892733db33d5f755
| 4,373
|
py
|
Python
|
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | 2
|
2020-08-14T08:00:53.000Z
|
2020-11-21T11:01:55.000Z
|
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | null | null | null |
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/6/25 22:41
# @Author : Yongfei Liu
# @Email : liuyf3@shanghaitech.edu.cn
import numpy as np
import os.path as osp
import os
import pickle
from collections import OrderedDict
import torch
import json
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
def check_objects_vocab():
with open('./flickr30k_datasets/objects_vocab.txt', 'r') as load_f:
object_vocab = load_f.readlines()
with open('./flickr30k_datasets/skip-thoughts/dictionary.txt', 'r') as load_f:
skip_dict = load_f.readlines()
object_v1 = []
for vocab in object_vocab:
object_v1.append(vocab.strip())
skip_v1 = []
for sk_dict in skip_dict:
skip_v1.append(sk_dict.strip())
for vocab in object_v1:
vocab = vocab.split(' ')
for vo in vocab:
if vo not in skip_v1:
print(vocab)
def _make_emb_state_dict(self, dictionary, parameters):
weight = torch.zeros(len(self.vocab)+1, 620) # first dim = zeros -> +1
unknown_params = parameters[dictionary['UNK']]
nb_unknown = 0
for id_weight, word in enumerate(self.vocab):
if word in dictionary:
id_params = dictionary[word]
params = parameters[id_params]
else:
print('Warning: word `{}` not in dictionary'.format(word))
params = unknown_params
nb_unknown += 1
weight[id_weight+1] = torch.from_numpy(params)
state_dict = OrderedDict({'weight':weight})
if nb_unknown > 0:
print('Warning: {}/{}({}) words are not in dictionary, thus set UNK embedding parameter to init'
.format(nb_unknown, len(self.vocab), len(dictionary)))
return state_dict
def extract_embedding():
# {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
path = './RefSegDatasets/refseg_anno'
dataset = 'refcoco+'
# object_vocab = []
# for cat in COCO_CATEGORIES:
# thing = cat['isthing']
# if thing==1:
# object_vocab.append(cat['name'])
# with open('./flickr30k_datasets/objects_vocab.txt', 'r') as load_f:
# object_vocab = load_f.readlines()
with open('./flickr30k_datasets/skip-thoughts/dictionary.txt', 'r') as load_f:
skip_dict = load_f.readlines()
skip_dict = {word.strip():idx for idx, word in enumerate(skip_dict)}
path_params = './flickr30k_datasets/skip-thoughts/utable.npy'
params = np.load(path_params, encoding='latin1', allow_pickle=True) # to load from python2
# object_embed = []
# for vocab in object_vocab:
# vocab = vocab.strip().split(' ')
# vocab_eb = []
# for vb in vocab:
# vb_idx = skip_dict.get(vb)
# vocab_eb.append(params[vb_idx].squeeze())
#
# vocab_eb = np.stack(vocab_eb, axis=0).mean(0)
# object_embed.append(vocab_eb)
#
# object_embed = np.array(object_embed) ## 1600*620
# print('object_dim', object_embed.shape)
#
# with open(osp.join(path, dataset, 'skip_label.pkl'), 'wb') as pickle_dump:
# pickle.dump(object_embed, pickle_dump)
vocab_file = open(osp.join(path, dataset, 'vocab.json'))
vocab = json.load(vocab_file)
vocab_file.close()
# add_vocab = ['relate', 'butted']
# vocab.extend(add_vocab)
skip_thoughts_dict = {}
for vb in vocab:
vb = vb.strip()
vb_idx = skip_dict.get(vb)
if vb_idx is not None:
skip_thoughts_dict[vb] = params[vb_idx].squeeze()
else:
vb_split = vb.split('-')
vb_split_embed = []
for vbs in vb_split:
vbs_idx = skip_dict.get(vbs)
if vbs_idx is not None:
vb_split_embed.append(params[vbs_idx].squeeze())
else:
print(vb, 'not in dictionary')
break
if len(vb_split_embed) == len(vb_split):
# print(vb, 'are in list')
vb_split_embed = np.stack(vb_split_embed, axis=0).mean(0)
skip_thoughts_dict[vb] = vb_split_embed
print(len(vocab))
with open(osp.join(path, dataset, 'skip_vocab.pkl'), 'wb') as pickle_dump:
pickle.dump(skip_thoughts_dict, pickle_dump)
if __name__ == '__main__':
extract_embedding()
| 30.58042
| 104
| 0.607363
| 576
| 4,373
| 4.390625
| 0.262153
| 0.027679
| 0.02847
| 0.039541
| 0.215896
| 0.180308
| 0.166074
| 0.120996
| 0.120996
| 0.120996
| 0
| 0.01995
| 0.266408
| 4,373
| 142
| 105
| 30.795775
| 0.768392
| 0.241482
| 0
| 0.090909
| 0
| 0
| 0.125571
| 0.0637
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0
| 0.103896
| 0
| 0.155844
| 0.064935
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1912c311e861ca371e1043073ef9f199c996c4
| 4,909
|
py
|
Python
|
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Attributes:
config (dict): Description
logger (logging.Logger): Description
"""
import logging
import pymongo
logger = None
config = {}
class MongoMap(object):
"""Info about MongoDB
Attributes:
ca (None, optional): ssl-ca
cert (None, optional): ssl-cert
collection_map (dict): mapping for the collection-name in the code vs. real collection-name in the mongo.
db_name (str, optional): db-name used in the code.
ensure_index (None, optional): ensure-index
ensure_unique_index (None, optional): ensure-unique-index
hostname (str): hostname of the real mongo.
mongo_db_name (str, optional): real db-name in mongodb.
ssl (bool, optional): whether to use ssl
"""
def __init__(self, collection_map: dict, ensure_index=None, ensure_unique_index=None, db_name="mongo", hostname="localhost:27017", mongo_db_name="test", ssl=False, cert=None, ca=None):
self.db_name = db_name
self.hostname = hostname
self.mongo_db_name = mongo_db_name
self.collection_map = collection_map
self.ensure_index = ensure_index
self.ensure_unique_index = ensure_unique_index
self.ssl = ssl
self.cert = cert
self.ca = ca
def init(the_logger: logging.Logger, mongo_maps: list):
"""init
Args:
the_logger (logging.Logger): Description
mongo_maps (list): list of MongoDB info
Returns:
TYPE: Description
"""
global logger
logger = the_logger
return restart_mongo(mongo_maps=mongo_maps)
def restart_mongo(collection_name="", db_name="", mongo_maps=None):
"""restarting mongo
Args:
collection_name (str, optional): collection-name
db_name (str, optional): db-name
mongo_maps (None, optional): mongo-maps
Returns:
TYPE: Description
"""
'''
initialize mongo
'''
global config
if mongo_maps is None:
mongo_maps = [each['mongo_map'] for each in config.values()]
if len(mongo_maps) == 0:
return
errs = []
for idx, mongo_map in enumerate(mongo_maps):
each_err = _init_mongo_map_core(mongo_map, collection_name=collection_name, db_name=db_name)
if each_err:
errs.append(each_err)
logger.error('(%s/%s): e: %s', idx, len(mongo_maps), each_err)
if not errs:
return None
err_str = ','.join(['%s' % (each) for each in errs])
return Exception(err_str)
def _init_mongo_map_core(mongo_map: MongoMap, collection_name="", db_name=""):
"""Summary
Args:
mongo_map (MongoMap): Description
collection_name (str, optional): Description
db_name (str, optional): Description
Returns:
TYPE: Description
"""
global config
global logger
mongo_map_db_name, hostname, mongo_db_name, collection_map, ensure_index, ensure_unique_index = mongo_map.db_name, mongo_map.hostname, mongo_map.mongo_db_name, mongo_map.collection_map, mongo_map.ensure_index, mongo_map.ensure_unique_index
if db_name != '' and mongo_map_db_name != db_name:
return
if collection_name != '' and collection_name not in collection_map:
return
if collection_name == '' and mongo_map_db_name in config:
return Exception('db already in config: db_name: %s config: %s', mongo_map_db_name, config[mongo_map_db_name])
if ensure_index is None:
ensure_index = {}
if ensure_unique_index is None:
ensure_unique_index = {}
# mongo_server_url
mongo_server_url = 'mongodb://%s/%s' % (hostname, mongo_db_name)
# mongo-server-client
mongo_kwargs = {}
if mongo_map.ssl:
mongo_kwargs.update({
'ssl': True,
'authSource': '$external',
'authMechanism': 'MONGODB-X509',
'ssl_certfile': mongo_map.cert,
'ssl_ca_certs': mongo_map.ca,
})
mongo_server_client = pymongo.MongoClient(
mongo_server_url,
**mongo_kwargs,
)[mongo_db_name]
# config-by-db-name
config_by_db_name = {'mongo_map': mongo_map, 'db': {}, 'url': mongo_server_url}
# collection
for (key, val) in collection_map.items():
logger.info('mongo: %s => %s', key, val)
config_by_db_name['db'][key] = mongo_server_client[val]
# enure index
for key, val in ensure_index.items():
logger.info('to ensure_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True)
# enure unique index
for key, val in ensure_unique_index.items():
logger.info('to ensure_unique_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True, unique=True)
config[mongo_map_db_name] = config_by_db_name
def clean():
"""Reset config
"""
global config
config = {}
| 28.375723
| 243
| 0.646771
| 648
| 4,909
| 4.621914
| 0.162037
| 0.074124
| 0.062437
| 0.032721
| 0.211352
| 0.142571
| 0.039399
| 0.039399
| 0.039399
| 0.039399
| 0
| 0.002692
| 0.243227
| 4,909
| 172
| 244
| 28.540698
| 0.803499
| 0.254023
| 0
| 0.12987
| 0
| 0
| 0.077967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.025974
| 0
| 0.194805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1a5012b7966cdeb8f03d71591cc6d8a74c6420
| 2,337
|
py
|
Python
|
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import stakkr.stakkr_compose as sc
import subprocess
import unittest
base_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, base_dir + '/../')
# https://docs.python.org/3/library/unittest.html#assert-methods
class StakkrComposeTest(unittest.TestCase):
services = {
'a': 'service_a.yml',
'b': 'service_b.yml',
}
def test_get_invalid_config_in_cli(self):
cmd = ['stakkr-compose', '-c', base_dir + '/static/config_invalid.ini']
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in result.stdout:
self.assertRegex(line.decode(), 'Failed validating.*config_invalid.ini.*')
break
def test_get_uid(self):
uid = '1000' if os.name == 'nt' else str(os.getuid())
self.assertEqual(sc._get_uid(None), uid)
def test_get_gid(self):
gid = '1000' if os.name == 'nt' else str(os.getgid())
self.assertEqual(sc._get_gid(None), gid)
def test_get_uid_whenset(self):
self.assertEqual(sc._get_uid(1000), '1000')
def test_get_gid_whenset(self):
self.assertEqual(sc._get_gid(1000), '1000')
def test_get_wrong_enabled_service(self):
with self.assertRaises(SystemExit):
sc.get_enabled_services(['c'])
def test_get_right_enabled_service(self):
services_files = sc.get_enabled_services(['maildev'])
self.assertTrue(services_files[0].endswith('static/services/maildev.yml'))
def test_get_available_services(self):
services = sc.get_available_services()
self.assertTrue('apache' in services)
self.assertTrue('mongo' in services)
self.assertTrue('php' in services)
self.assertTrue('elasticsearch' in services)
self.assertEqual('static/services/apache.yml', services['apache'][-26:])
self.assertEqual('static/services/mongo.yml', services['mongo'][-25:])
def test_get_valid_configured_services(self):
services = sc.get_configured_services(base_dir + '/static/config_valid.ini')
self.assertTrue('maildev' in services)
self.assertTrue('php' in services)
self.assertFalse('mongo' in services)
self.assertFalse('elasticsearch' in services)
if __name__ == "__main__":
unittest.main()
| 31.16
| 86
| 0.670946
| 297
| 2,337
| 5.043771
| 0.309764
| 0.080107
| 0.06008
| 0.053405
| 0.218959
| 0.126836
| 0.085447
| 0.085447
| 0
| 0
| 0
| 0.016498
| 0.195978
| 2,337
| 74
| 87
| 31.581081
| 0.780734
| 0.026102
| 0
| 0.04
| 0
| 0
| 0.139463
| 0.070392
| 0
| 0
| 0
| 0
| 0.34
| 1
| 0.18
| false
| 0
| 0.1
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1dc3801dfc8c9bd5d0968ee738990d98e2881b
| 2,792
|
py
|
Python
|
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | 8
|
2019-04-14T03:05:19.000Z
|
2020-02-13T18:35:41.000Z
|
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | null | null | null |
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | 2
|
2019-05-08T08:23:50.000Z
|
2020-01-23T03:54:41.000Z
|
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
import datetime
import time
from backtesting.backtest import Backtest
from backtesting.data import HistoricCSVDataHandler
from backtesting.execution import SimulatedExecutionHandler
from backtesting.portfolio import Portfolio,Portfolio_For_futures
from Strategy.strategy import MovingAverageCrossStrategy
def exc_time(func, *args, **kwargs):
"""
计算运行时间
:param func: a function
:param args: arguements of the function
:param kwargs: arguements of the function
:return: time costed on running the function
"""
def wrapper(func, *args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
stop_time = time.time()
print("run time is %s" % (stop_time - start_time))
return wrapper(func, *args, **kwargs)
if __name__ == "__main__":
csv_dir = r"~/data"
symbol_list = ['000001.SH']
initial_capital = 5000.0
start_date = datetime.datetime(2012,1,1,0,0,0) # 这个没用上.
heartbeat = 0.0
#default: MACS = MovingAverageCrossStrategy(short_window=10,long_window=30)
MACS = MovingAverageCrossStrategy
commission = 5
exchangeID = ''
lever = 10 # 杠杆,(在期货中,一手跳动一个单位的价格,如:rb一手,跳动1个点表示10元)
backtest_type = 'futures'
if backtest_type == 'stock':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio,
strategy=MACS,
commission=commission,
exchangeID=None,
lever=1
)
elif backtest_type == 'futures':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio_For_futures,
strategy=MACS,
commission=commission,
exchangeID=exchangeID,
lever=lever
)
exc_time(backtest.simulate_trading) # run time is 1.2880792617797852
| 33.638554
| 78
| 0.567335
| 255
| 2,792
| 6
| 0.34902
| 0.019608
| 0.036601
| 0.036601
| 0.321569
| 0.266667
| 0.266667
| 0.266667
| 0.266667
| 0.266667
| 0
| 0.02798
| 0.359957
| 2,792
| 82
| 79
| 34.04878
| 0.828204
| 0.112106
| 0
| 0.315789
| 0
| 0
| 0.022913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.140351
| 0
| 0.192982
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd1e95f3c8250711415f3acb25bf5e3b26c63f39
| 3,306
|
py
|
Python
|
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | 2
|
2020-03-22T14:35:00.000Z
|
2020-05-26T05:06:41.000Z
|
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from COMMONTOOL import PTCureve
DB = pd.read_csv('0_228.txt')
# DB = pd.read_csv('../3차 검증/322.txt')
# target_time = 100
# for i in range(0, len(DB)):
# if DB['KCNTOMS'].loc[i] != target_time:
# DB.drop([i], inplace=True)
# else:
# target_time += 100
x, y, z, zero = [], [], [], []
PTY, PTX, BotZ, UpZ = [], [], [], []
RateX, RateY, RateZ = [], [], []
SaveTIMETEMP = {'Temp':0, 'Time':0}
for temp, t, pres, co1, co2 in zip(DB['UAVLEG2'].tolist(), DB['KCNTOMS'].tolist(),
DB['ZINST65'].tolist(), DB['KLAMPO6'].tolist(),
DB['KLAMPO9'].tolist()):
x.append(temp)
y.append(-t)
z.append(pres)
if co1 == 0 and co2 == 1 and t > 1500:
if SaveTIMETEMP['Time'] == 0:
SaveTIMETEMP['Time'] = t
SaveTIMETEMP['Temp'] = temp
rate = -55 / (60 * 60 * 5)
get_temp = rate * (t - SaveTIMETEMP['Time']) + SaveTIMETEMP['Temp']
RateX.append(get_temp)
RateY.append(-t)
RateZ.append(0)
zero.append(0)
Temp = []
UpPres = []
BotPres = []
for _ in range(0, 350):
uppres, botpres = PTCureve()._get_pres(_)
Temp.append([_])
UpPres.append([uppres])
BotPres.append([botpres])
PTX = np.array(Temp)
BotZ = np.array(BotPres)
UpZ = np.array(UpPres)
PTY = np.array([[0] for _ in range(0, 350)])
PTX = np.hstack([PTX[:, 0:1], Temp])
BotZ = np.hstack([BotZ[:, 0:1], BotPres])
UpZ = np.hstack([UpZ[:, 0:1], UpPres])
PTY = np.hstack([PTY[:, 0:1], np.array([[-t] for _ in range(0, 350)])])
print(np.shape(PTX))
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.plot3D(RateX, RateY, RateZ, color='orange', lw=1.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[29.5, 29.5, 29.5, 29.5, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[17, 17, 17, 17, 17], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [y[-1], y[-1]],
[17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [y[-1], y[-1]], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot_surface(PTX, PTY, UpZ, rstride=8, cstride=8, alpha=0.15, color='r')
ax1.plot_surface(PTX, PTY, BotZ, rstride=8, cstride=8, alpha=0.15, color='r')
# ax1.scatter(PTX, PTY, BotZ, marker='*')
ax1.plot3D(x, y, z, color='blue', lw=1.5)
# linewidth or lw: float
ax1.plot3D([x[-1], x[-1]], [y[-1], y[-1]], [0, z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([0, x[-1]], [y[-1], y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([x[-1], x[-1]], [0, y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
# each
ax1.plot3D(x, y, zero, color='black', lw=1, ls='--') # temp
ax1.plot3D(zero, y, z, color='black', lw=1, ls='--') # pres
ax1.plot3D(x, zero, z, color='black', lw=1, ls='--') # PT
# 절대값 처리
ax1.set_yticklabels([int(_) for _ in abs(ax1.get_yticks())])
ax1.set_xlabel('Temperature')
ax1.set_ylabel('Time [Tick]')
ax1.set_zlabel('Pressure')
ax1.set_xlim(0, 350)
ax1.set_zlim(0, 200)
plt.show()
| 30.897196
| 85
| 0.5366
| 550
| 3,306
| 3.178182
| 0.223636
| 0.017162
| 0.030892
| 0.030892
| 0.314645
| 0.262014
| 0.229405
| 0.228833
| 0.221968
| 0.221968
| 0
| 0.100379
| 0.201452
| 3,306
| 107
| 86
| 30.897196
| 0.561742
| 0.086509
| 0
| 0.055556
| 0
| 0
| 0.066201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd22786701bf4e42b8d3932674e46c80a650982c
| 678
|
py
|
Python
|
common/common/management/commands/makemessages.py
|
FSTUM/rallyetool-v2
|
2f3e2b5cb8655abe023ed1215b7182430b75bb23
|
[
"MIT"
] | 1
|
2021-10-30T09:31:02.000Z
|
2021-10-30T09:31:02.000Z
|
common/common/management/commands/makemessages.py
|
FSTUM/rallyetool-v2
|
2f3e2b5cb8655abe023ed1215b7182430b75bb23
|
[
"MIT"
] | 9
|
2021-11-23T10:13:43.000Z
|
2022-03-01T15:04:15.000Z
|
common/common/management/commands/makemessages.py
|
CommanderStorm/rallyetool-v2
|
721413d6df8afc9347dac7ee83deb3a0ad4c01bc
|
[
"MIT"
] | 1
|
2021-10-16T09:07:47.000Z
|
2021-10-16T09:07:47.000Z
|
from django.core.management.commands import makemessages
class Command(makemessages.Command):
def build_potfiles(self):
potfiles = super().build_potfiles()
for potfile in sorted(set(potfiles)):
self._remove_pot_creation_date(potfile)
return potfiles
@staticmethod
def _remove_pot_creation_date(path):
modified_lines = []
with open(path, "rb") as file:
for line in file:
if not line.startswith(b'"POT-Creation-Date: '):
modified_lines.append(line)
with open(path, "wb") as file:
for line in modified_lines:
file.write(line)
| 27.12
| 64
| 0.610619
| 78
| 678
| 5.141026
| 0.525641
| 0.082294
| 0.112219
| 0.104738
| 0.074813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29941
| 678
| 24
| 65
| 28.25
| 0.844211
| 0
| 0
| 0
| 0
| 0
| 0.035398
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2333a5ba2bad8fcd4a158981a7c15852072e07
| 6,529
|
py
|
Python
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 3
|
2019-01-09T15:17:28.000Z
|
2019-12-01T18:40:50.000Z
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 13
|
2018-11-30T05:33:13.000Z
|
2021-04-30T20:46:41.000Z
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 3
|
2018-12-02T16:10:12.000Z
|
2019-01-04T14:51:04.000Z
|
"""Views for users"""
from flask_restful import Resource
from flask import jsonify, request
from app.api.v2.users.models import UserModel
from app.api.v2.decorator import token_required, get_token
from app.api.v2.send_email import send
class UserStatus(Resource):
"""Class with method for updating a specific user admin status"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to promote a user"""
user = self.db.get_user(username)
if user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['isadmin'] is not True or self.db.get_user(username)['id'] is 1:
return jsonify({
"status": 403,
"message": "You cannot change the status of this user"
})
if current_user['username'] == username:
return jsonify({
"status": 403,
"message": "You cannot change your own admin status"
})
user_status_updated = self.db.promote_user(username)
if user_status_updated is True:
success_message = {
"username": username,
"message": "User status has been updated"
}
return jsonify({
"status": 200,
"data": success_message
})
class UserActivity(Resource):
"""Class with method for disabling or enabling user activity"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to deactivate/activate a user"""
user = self.db.get_user(username)
if user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['isadmin'] is not True or user['id'] == 1:
return jsonify({
"status": 403,
"message": "You cannot change this user's active status"
})
if current_user['username'] == username:
return jsonify({
"status": 403,
"message": "You cannot change your own active status"
})
user_activity_updated = self.db.activate_user(username)
if user_activity_updated is True:
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "User active status has been updated"
}
})
class UserProfilePic(Resource):
"""Class with method for uploading a user's profile picture"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to upload a user's profile picture"""
if current_user['username'] != username:
return jsonify({
"status": 403,
"message": "A user can only upload a picture to their own profile"
})
upload_status = self.db.upload_profile_pic(username)
if upload_status == 'File type not supported' or upload_status == 'select an image' or upload_status == 'no file part':
return jsonify({
"status": 400,
"message": upload_status
})
if upload_status is True:
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "Your profile picture has been uploaded"
}
})
class VerifyAccount(Resource):
"""Class with method to verify a user's account"""
@token_required
def patch(current_user, self, username):
"""Method to verify a user's account"""
loggedin_user = UserModel().get_user(username)
if current_user['verification'] is False:
return {"message": "Verification failed please use the link in your email address"}, 403
if loggedin_user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['username'] != username:
return jsonify({
"status": 403,
"message": "A user can only verify their own account"
})
verification = UserModel().verify_user(username)
if verification == 'account verified':
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "Your account has been verified"
}
})
class RequestVerification(Resource):
"""Class with method to request an account verification link"""
@token_required
def post(current_user, self, username):
"""Method to request account verification"""
unverified_user = UserModel().get_user(username)
if current_user['username'] != username:
return {
"status": 403,
"message": "A user can only request verification for their own account"
}, 403
email = unverified_user['email']
public_id = unverified_user['public_id']
json_verification_link = request.json.get('verificationlink', None)
verification_token = get_token(public_id, 30, True).decode('UTF-8')
if json_verification_link is None:
return {
"message": {
"verificationlink": "This key is required"
}
}, 400
verification_link = json_verification_link + '?username=' + username + '?token=' + verification_token
verification_message = "If you didn't ask to verify this address, you can ignore this email.\n\nThanks,\n\nYour iReporter team"
subject = "Account Verification"
body = "Follow this link within half an hour to verify your email address: {0}\n{1}".format(
verification_link, verification_message)
if send(email, subject, body) is True:
return jsonify({
"status": 200,
"message": "Verification link has been sent to your email"
})
return {
"status": 400,
"message": "Verification failed please try again"
}, 400
| 33.482051
| 135
| 0.551386
| 682
| 6,529
| 5.159824
| 0.205279
| 0.055413
| 0.080989
| 0.037511
| 0.448423
| 0.389599
| 0.366581
| 0.345553
| 0.322251
| 0.280477
| 0
| 0.017254
| 0.351968
| 6,529
| 194
| 136
| 33.654639
| 0.814465
| 0.07214
| 0
| 0.581081
| 0
| 0.006757
| 0.224592
| 0.004165
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.033784
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd247bba2b56b1306086374a12025c1833517c10
| 7,357
|
py
|
Python
|
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import traceback
import ujson
from pprint import pprint
from textblob import TextBlob as tb
from textblob import Word as wd
import shutil
from collections import defaultdict
from gensim.corpora import Dictionary
from socialconfig import config
class get_reviews_iterable(object):
def __init__(self, rfname):
self.read_file = rfname
def __iter__(self):
try:
print("Opening file:{file}".format(file=self.read_file))
with open(self.read_file,'r') as file:
for line in file:
yield line
except:
raise
print("Error reading {f}:".format(f=self.read_file))
# print(traceback.format_exc())
class make_dictionaries(object):
"""docstring for ClassName"""
def __init__(self):
pass
def create_dictionary(self):
YELP_DATASET_DIR = config.get("YELP_DATASET_DIR",None)
SAVE_REVIEWS_BY_CATEGORY_DIRECTORY = config.get("SAVE_REVIEWS_BY_CATEGORY_DIRECTORY",None)
SAVE_DICTIONARY_DIR = config.get("SAVE_DICTIONARY_DIR",None)
SAVE_BAG_OF_WORDS_DIR = config.get("SAVE_BAG_OF_WORDS_DIR",None)
SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE = int(config.get("SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE",25000))
if not (YELP_DATASET_DIR and SAVE_REVIEWS_BY_CATEGORY_DIRECTORY and SAVE_DICTIONARY_DIR and SAVE_BAG_OF_WORDS_DIR and SAVE_DICTIONARY_DIR):
print("config keys are not set correctly in the config file: socialconfig.py")
exit(0)
SAVE_UNFILTERED_DICTIONARY_DIR = os.path.join(SAVE_DICTIONARY_DIR,"Unfiltered")
if not os.path.exists(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY) and not os.path.isdir(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY):
raise("Directory {d} does not exist".format(d=SAVE_REVIEWS_BY_CATEGORY_DIRECTORY))
if not (os.path.exists(SAVE_BAG_OF_WORDS_DIR) and os.path.isdir(SAVE_BAG_OF_WORDS_DIR)):
os.makedirs(SAVE_BAG_OF_WORDS_DIR)
if not (os.path.exists(SAVE_UNFILTERED_DICTIONARY_DIR) and os.path.isdir(SAVE_UNFILTERED_DICTIONARY_DIR)):
os.makedirs(SAVE_UNFILTERED_DICTIONARY_DIR)
for pardir, sub_dirs, files in os.walk(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY):
if len(files) > 0:
error_count = 0
review_docs = []
negative_docs = []
positive_docs = []
doc_count = 0
docs_per_file = SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE
file_num = str((doc_count/docs_per_file) + 1)
for file in files:
if "yelp_reviews_" in file and "category" in pardir:
reviews = get_reviews_iterable(os.path.join(pardir,file))
yelp_category = pardir.split('/')[-1]
CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category)
if not (os.path.exists(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR) and os.path.isdir(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR)):
os.makedirs(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR)
fname = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category,"{cat}_file_{file_num}.txt".format(cat=yelp_category,file_num=file_num))
bow_file = open(fname,'w')
print("Writing docs (in bag of words form) for {cat} to directory: {d}".format(cat=yelp_category,d=os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category)))
for review in reviews:
try:
review_dict = ujson.loads(review)
except:
error_count += 1
pass
adjs = review_dict.get("adjectives",None)
rating = int(review_dict.get("rating",-1))
if adjs:
doc_count += 1
bow_file.write(ujson.dumps(adjs.encode("utf-8")) + "\n")
review_docs.append(adjs.strip().split())
if (doc_count%docs_per_file) == 0:
if bow_file:
bow_file.close()
file_num = str((doc_count/docs_per_file) + 1)
fname = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category,"{cat}_file_{file_num}.txt".format(cat=yelp_category,file_num=file_num))
bow_file = open(fname,'w')
if rating:
if rating > 3:
positive_docs.append(adjs.strip().split())
elif rating < 3:
negative_docs.append(adjs.strip().split())
else:
pass
print("Wrote {total} docs in {cat} category".format(total=str(doc_count),cat=yelp_category))
dictionary = Dictionary(review_docs)
CATEGORY_SPECIFIC_DICT_DIR = os.path.join(SAVE_UNFILTERED_DICTIONARY_DIR,yelp_category)
POSITIVE_SUB_DIR = os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"positive")
NEGATIVE_SUB_DIR = os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"negative")
if not (os.path.exists(CATEGORY_SPECIFIC_DICT_DIR) and os.path.isdir(CATEGORY_SPECIFIC_DICT_DIR)):
os.makedirs(CATEGORY_SPECIFIC_DICT_DIR)
os.makedirs(POSITIVE_SUB_DIR)
os.makedirs(NEGATIVE_SUB_DIR)
dictionary.save(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.dict".format(yelp_category=yelp_category)))
dictionary.save_as_text(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.txt".format(yelp_category=yelp_category)))
sorted_doc_freqs = sorted(dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
# print("Will save file in:\n " + os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.txt".format(yelp_category=yelp_category)))
with open(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_doc_freqs:
df_file.write(str(dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del dictionary
del review_docs
del sorted_doc_freqs
pos_dictionary = Dictionary(positive_docs)
del positive_docs
neg_dictionary = Dictionary(negative_docs)
del negative_docs
pos_dictionary.save(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_dict.dict".format(yelp_category=yelp_category)))
pos_dictionary.save_as_text(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_dict.txt".format(yelp_category=yelp_category)))
sorted_pos_doc_freqs = sorted(pos_dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
with open(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_pos_doc_freqs:
df_file.write(str(pos_dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del pos_dictionary
del sorted_pos_doc_freqs
neg_dictionary.save(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_dict.dict".format(yelp_category=yelp_category)))
neg_dictionary.save_as_text(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_dict.txt".format(yelp_category=yelp_category)))
sorted_neg_doc_freqs = sorted(neg_dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
with open(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_neg_doc_freqs:
df_file.write(str(neg_dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del neg_dictionary
del sorted_neg_doc_freqs
print("{count} {cat} reviews were discarded because of parsing errors".format(count=error_count,cat=yelp_category))
print("Created dictionary for {cat} tokens".format(cat=yelp_category))
if __name__ == "__main__":
try:
mk = make_dictionaries()
mk.create_dictionary()
except:
raise
| 42.773256
| 156
| 0.740791
| 1,113
| 7,357
| 4.538185
| 0.14735
| 0.099782
| 0.037616
| 0.036032
| 0.560285
| 0.484063
| 0.403286
| 0.339735
| 0.296971
| 0.249258
| 0
| 0.00413
| 0.144352
| 7,357
| 171
| 157
| 43.023392
| 0.798253
| 0.028952
| 0
| 0.128788
| 0
| 0
| 0.129905
| 0.060538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.022727
| 0.083333
| 0
| 0.128788
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd266f079f9daa527e01e31d5df3c4df79e8150b
| 1,126
|
py
|
Python
|
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 2
|
2020-12-17T18:49:20.000Z
|
2021-02-20T16:48:14.000Z
|
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | null | null | null |
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 3
|
2020-12-20T19:08:32.000Z
|
2020-12-26T22:11:15.000Z
|
from helpers.cyclic_list import CyclicList
from helpers.coordinates2d import Coordinates2D
RUN_TEST = False
TEST_SOLUTION = 7
TEST_INPUT_FILE = 'test_input_day_03.txt'
INPUT_FILE = 'input_day_03.txt'
START = Coordinates2D((0, 0)) # top left corner
TRAJECTORY = Coordinates2D((3, 1)) # right 3, down 1
ARGS = [START, TRAJECTORY]
def main_part1(input_file, start, trajectory):
with open(input_file) as file:
tree_map = list(map(lambda line: CyclicList(line.rstrip()), file.readlines()))
solution = count_trees(tree_map, trajectory, start)
return solution
def count_trees(tree_map, trajectory, start):
bottom = len(tree_map)
cur_pos = start
num_trees = 0 # start is a tree-free square
while cur_pos.y < bottom:
num_trees += tree_map[cur_pos.y][cur_pos.x] == '#'
cur_pos = cur_pos + trajectory
return num_trees
if __name__ == '__main__':
if RUN_TEST:
solution = main_part1(TEST_INPUT_FILE, *ARGS)
print(solution)
assert (TEST_SOLUTION == solution)
else:
solution = main_part1(INPUT_FILE, *ARGS)
print(solution)
| 26.186047
| 86
| 0.688277
| 156
| 1,126
| 4.679487
| 0.384615
| 0.073973
| 0.049315
| 0.035616
| 0.158904
| 0.087671
| 0
| 0
| 0
| 0
| 0
| 0.021372
| 0.21048
| 1,126
| 42
| 87
| 26.809524
| 0.799775
| 0.052398
| 0
| 0.066667
| 0
| 0
| 0.043274
| 0.019755
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd279c40ef3cc1786017d66b7c1e1b885d2e67e1
| 807
|
py
|
Python
|
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
###############################################
# LeetCode Problem Number : 112
# Difficulty Level : Easy
# URL : https://leetcode.com/problems/path-sum/
###############################################
from binary_search_tree.tree_node import TreeNode
class BinaryTree:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if not root:
return False
def dfs(node: TreeNode, total: int) -> bool:
nonlocal sum
res = False
total += node.val
if not node.left and not node.right:
return total == sum
if node.left:
res = dfs(node.left, total)
if node.right and not res:
res = dfs(node.right, total)
return res
return dfs(root, 0)
| 25.21875
| 59
| 0.484511
| 87
| 807
| 4.45977
| 0.471264
| 0.054124
| 0.051546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007339
| 0.324659
| 807
| 31
| 60
| 26.032258
| 0.704587
| 0.122677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2a6655ca5c2bb5ada546fc62616fc063ba84a1
| 3,900
|
py
|
Python
|
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | 1
|
2022-01-27T01:32:14.000Z
|
2022-01-27T01:32:14.000Z
|
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test module for disk module
"""
#
# IMPORTS
#
from tessia.baselib.common.ssh.client import SshClient
from tessia.baselib.common.ssh.shell import SshShell
from tessia.baselib.guests.linux.storage import disk as disk_module
from unittest import mock
from unittest import TestCase
#
# CONSTANTS AND DEFINITIONS
#
PARAMS_WITH_SYS_ATTRS = {
"system_attributes": {
"libvirt": "somexml"
},
"volume_id": "some_disk_id"
}
PARAMS_WITHOUT_SYS_ATTRS = {
"volume_id": "some_disk_id"
}
#
# CODE
#
class TestDisk(TestCase):
"""
Class that provides unit tests for the DiskBase class.
"""
def setUp(self):
"""
Create mocks that are used in all test cases.
"""
# since the class is abstract we need to define a concrete child class
# to be able to instantiate it
class DiskConcrete(disk_module.DiskBase):
"""
Concrete class of DiskBase
"""
def activate(self, *args, **kwargs):
super().activate(*args, **kwargs)
self._disk_cls = DiskConcrete
patcher = mock.patch.object(disk_module, 'sleep', autospec=True)
patcher.start()
self.addCleanup(patcher.stop)
self._mock_host_conn = mock.Mock(spec_set=SshClient)
self._mock_shell = mock.Mock(spec_set=SshShell)
self._mock_host_conn.open_shell.return_value = self._mock_shell
# setUp()
def _create_disk(self, parameters):
"""
Auxiliary method to create a disk.
"""
return self._disk_cls(parameters, self._mock_host_conn)
def test_abstract_methods(self):
"""
Confirm that abstract methods raise NotImplementedError if called.
"""
disk = self._create_disk(PARAMS_WITH_SYS_ATTRS)
self.assertRaises(NotImplementedError, disk.activate)
# test_abstract_methods()
def test_init(self):
"""
Test proper initialization
"""
disk = self._create_disk(PARAMS_WITH_SYS_ATTRS)
self.assertEqual(disk.volume_id, 'some_disk_id')
# test_init()
def test_enable_device(self):
"""
Test the protected method that enable the device.
"""
disk = self._create_disk({'volume_id': 'some_id'})
devicenr = "some device number"
self._mock_shell.run.side_effect = [(0, ""), (0, "")]
disk._enable_device(devicenr)
cmd1 = "echo free {} > /proc/cio_ignore".format(devicenr)
cmd2 = 'chccwdev -e {}'.format(devicenr)
calls = [mock.call(cmd1), mock.call(cmd2)]
self.assertEqual(self._mock_shell.run.mock_calls, calls)
# test_enable_device()
def test_enable_device_fails(self):
"""
Test the protected method that enable the device in the case
it fails to be enabled.
"""
disk = self._create_disk({'volume_id': 'some_id'})
devicenr = "some device number"
ret_output = [(0, "")]
# _enable_device perform many attempts
for _ in range(0, 6):
ret_output.append((1, ""))
self._mock_shell.run.side_effect = ret_output
self.assertRaisesRegex(RuntimeError, "Failed to activate",
disk._enable_device, devicenr)
# test_enable_device_fails()
# TestBaseDisk
| 30
| 78
| 0.646923
| 483
| 3,900
| 5.024845
| 0.403727
| 0.02637
| 0.024722
| 0.029666
| 0.181294
| 0.137618
| 0.116193
| 0.116193
| 0.116193
| 0.046148
| 0
| 0.006186
| 0.253846
| 3,900
| 129
| 79
| 30.232558
| 0.827835
| 0.319231
| 0
| 0.153846
| 0
| 0
| 0.091134
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.134615
| false
| 0
| 0.096154
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2ae8dc293d1b7377165b6678e015927d2d75d1
| 5,479
|
py
|
Python
|
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import logging
from typing import Callable, Dict
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
# the accelerator library is a requirement for the Trainer
# but it is optional for grousnd base user of kornia.
try:
from accelerate import Accelerator
except ImportError:
Accelerator = None
from .metrics import AverageMeter
from .utils import Configuration, TrainerState
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
callbacks_whitelist = [
"preprocess", "augmentations", "evaluate", "fit", "checkpoint", "terminate"
]
class Trainer:
"""Base class to train the different models in kornia.
.. warning::
The API is experimental and subject to be modified based on the needs of kornia models.
Args:
model: the nn.Module to be optimized.
train_dataloader: the data loader used in the training loop.
valid_dataloader: the data loader used in the validation loop.
criterion: the nn.Module with the function that computes the loss.
optimizer: the torch optimizer object to be used during the optimization.
scheduler: the torch scheduler object with defiing the scheduling strategy.
accelerator: the Accelerator object to distribute the training.
config: a TrainerConfiguration structure containing the experiment hyper parameters.
callbacks: a dictionary containing the pointers to the functions to overrides. The
main supported hooks are ``evaluate``, ``preprocess``, ``augmentations`` and ``fit``.
.. important::
The API heavily relies on `accelerate <https://github.com/huggingface/accelerate/>`_.
In order to use it, you must: ``pip install kornia[x]``
.. seealso::
Learn how to use the API in our documentation
`here <https://kornia.readthedocs.io/en/latest/get-started/training.html>`_.
"""
def __init__(
self,
model: nn.Module,
train_dataloader: DataLoader,
valid_dataloader: DataLoader,
criterion: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler.CosineAnnealingLR,
config: Configuration,
callbacks: Dict[str, Callable] = {},
) -> None:
# setup the accelerator
if Accelerator is None:
raise ModuleNotFoundError(
"accelerate library is not installed: pip install kornia[x]")
self.accelerator = Accelerator()
# setup the data related objects
self.model = self.accelerator.prepare(model)
self.train_dataloader = self.accelerator.prepare(train_dataloader)
self.valid_dataloader = self.accelerator.prepare(valid_dataloader)
self.criterion = criterion.to(self.device)
self.optimizer = self.accelerator.prepare(optimizer)
self.scheduler = scheduler
self.config = config
# configure callbacks
for fn_name, fn in callbacks.items():
if fn_name not in callbacks_whitelist:
raise ValueError(f"Not supported: {fn_name}.")
setattr(self, fn_name, fn)
# hyper-params
self.num_epochs = config.num_epochs
self._logger = logging.getLogger('train')
@property
def device(self) -> torch.device:
return self.accelerator.device
def backward(self, loss: torch.Tensor) -> None:
self.accelerator.backward(loss)
def fit_epoch(self, epoch: int) -> None:
# train loop
self.model.train()
losses = AverageMeter()
for sample_id, sample in enumerate(self.train_dataloader):
source, target = sample # this might change with new pytorch dataset structure
self.optimizer.zero_grad()
# perform the preprocess and augmentations in batch
img = self.preprocess(source)
img = self.augmentations(img)
# make the actual inference
output = self.model(img)
loss = self.criterion(output, target)
self.backward(loss)
self.optimizer.step()
losses.update(loss.item(), img.shape[0])
if sample_id % 50 == 0:
self._logger.info(
f"Train: {epoch + 1}/{self.num_epochs} "
f"Sample: {sample_id + 1}/{len(self.train_dataloader)} "
f"Loss: {losses.val:.3f} {losses.avg:.3f}"
)
def fit(self,) -> None:
# execute the main loop
# NOTE: Do not change and keep this structure clear for readability.
for epoch in range(self.num_epochs):
# call internally the training loop
# NOTE: override to customize your evaluation routine
self.fit_epoch(epoch)
# call internally the evaluation loop
# NOTE: override to customize your evaluation routine
valid_stats = self.evaluate()
self.checkpoint(self.model, epoch, valid_stats)
state = self.terminate(self.model, epoch, valid_stats)
if state == TrainerState.TERMINATE:
break
# END OF THE EPOCH
self.scheduler.step()
...
def evaluate(self):
...
def preprocess(self, x):
return x
def augmentations(self, x):
return x
def checkpoint(self, *args, **kwargs):
...
def terminate(self, *args, **kwargs):
...
| 34.459119
| 95
| 0.632962
| 626
| 5,479
| 5.479233
| 0.354633
| 0.030612
| 0.025656
| 0.013411
| 0.069388
| 0.046647
| 0.046647
| 0.027988
| 0
| 0
| 0
| 0.002029
| 0.280343
| 5,479
| 158
| 96
| 34.677215
| 0.867867
| 0.334003
| 0
| 0.068182
| 0
| 0
| 0.083474
| 0.015792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.102273
| 0.034091
| 0.261364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2c1f1342cad7325a43f5762d5c2d1d94cfe573
| 2,795
|
py
|
Python
|
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | 97
|
2022-02-08T09:00:57.000Z
|
2022-03-23T05:33:35.000Z
|
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | null | null | null |
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | 7
|
2022-02-08T15:13:02.000Z
|
2022-03-19T19:11:13.000Z
|
# python3.7
"""Implements JPEG compression on images."""
import cv2
import numpy as np
try:
import nvidia.dali.fn as fn
import nvidia.dali.types as types
except ImportError:
fn = None
from utils.formatting_utils import format_range
from .base_transformation import BaseTransformation
__all__ = ['JpegCompress']
class JpegCompress(BaseTransformation):
"""Applies random JPEG compression to images.
This transformation can be used as an augmentation by distorting images.
In other words, the input image(s) will be first compressed (i.e., encoded)
with a random quality ratio, and then decoded back to the image space.
The distortion is introduced in the encoding process.
Args:
quality_range: The range within which to uniformly sample a quality
value after compression. 100 means highest and 0 means lowest.
(default: (40, 60))
prob: Probability of applying JPEG compression. (default: 0.5)
"""
def __init__(self, prob=0.5, quality_range=(40, 60)):
super().__init__(support_dali=(fn is not None))
self.prob = np.clip(prob, 0, 1)
self.quality_range = format_range(quality_range, min_val=0, max_val=100)
def _CPU_forward(self, data):
# Early return if no compression is needed.
if np.random.uniform() >= self.prob:
return data
# Set compression quality.
quality = np.random.randint(*self.quality_range)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
# Compress images.
outputs = []
for image in data:
_, encoded_image = cv2.imencode('.jpg', image, encode_param)
decoded_image = cv2.imdecode(encoded_image, cv2.IMREAD_UNCHANGED)
if decoded_image.ndim == 2:
decoded_image = decoded_image[:, :, np.newaxis]
outputs.append(decoded_image)
return outputs
def _DALI_forward(self, data):
# Set compression quality.
if self.quality_range[0] == self.quality_range[1]:
quality = self.quality_range[0]
else:
quality = fn.random.uniform(range=self.quality_range)
quality = fn.cast(quality, dtype=types.INT32)
# Compress images.
compressed_data = fn.jpeg_compression_distortion(
data, quality=quality)
if not isinstance(compressed_data, (list, tuple)):
compressed_data = [compressed_data]
# Determine whether the transformation should be applied.
cond = fn.random.coin_flip(dtype=types.BOOL, probability=self.prob)
outputs = []
for image, compressed_image in zip(data, compressed_data):
outputs.append(compressed_image * cond + image * (cond ^ True))
return outputs
| 35.379747
| 80
| 0.65975
| 348
| 2,795
| 5.140805
| 0.413793
| 0.060369
| 0.053661
| 0.027949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01677
| 0.253309
| 2,795
| 78
| 81
| 35.833333
| 0.840441
| 0.284079
| 0
| 0.093023
| 0
| 0
| 0.008235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.162791
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2ca1a6e56d2464e000ae2d9a68e5afd6f6c238
| 2,046
|
py
|
Python
|
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 00:31:35 2021
@author: RayaBit
"""
from flask import Flask, render_template, Response
from imutils.video import VideoStream
from skeletonDetector import skeleton
import cv2
from skeleton3DDetector import Skeleton3dDetector
from visualization import Visualizer
import time
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index2.html')
def gen():
vs = VideoStream(src=0).start()
#vs = cv2.VideoCapture("kid3.mp4")
# sk3d = Skeleton3dDetector(width = vs.get(3), height = vs.get(4))#width and height?
# 320, 240
sk3d = Skeleton3dDetector(width = 320, height = 240)#width and height?
init_buff=[]
for _ in range(2):
frame = vs.read()
while frame is None:
frame = vs.read()
processed_fram, x = skeleton(frame)
init_buff.append(x)
sk3d.fill_buff(init_buff)
cv2.waitKey(0)
#prev_frame = processed_fram
visualizer = Visualizer(frame.shape[0], frame.shape[1])
while cv2.waitKey(1) < 0: # breaks on pressing q
t = time.time()
frame = vs.read()
if frame is None:
continue
prev_frame = processed_fram #curr actually
processed_fram, x = skeleton(frame)
kps3d = sk3d.detect(x) # buff is initialized fr3d are kps3d for prev_frame
img = visualizer.draw(prev_frame,kps3d)
cv2.putText(img, "time taken = {:.2f} sec".format(time.time() - t), (50, 50), cv2.FONT_HERSHEY_COMPLEX, .5,
(255, 50, 0), 2, lineType=cv2.LINE_AA)
(flag, encodedImage) = cv2.imencode(".jpg", img)
if not flag:
continue
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8087)
| 26.571429
| 115
| 0.616813
| 269
| 2,046
| 4.572491
| 0.483271
| 0.042276
| 0.026829
| 0.035772
| 0.043902
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05239
| 0.253666
| 2,046
| 77
| 116
| 26.571429
| 0.753111
| 0.160313
| 0
| 0.155556
| 0
| 0
| 0.091069
| 0.015276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.155556
| 0.044444
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2dbbfa0aac3167c6b35b08529f51283edf8826
| 8,144
|
py
|
Python
|
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | 1
|
2017-08-03T12:58:53.000Z
|
2017-08-03T12:58:53.000Z
|
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | null | null | null |
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | null | null | null |
"""Define the :class:`Thread`."""
import threading
from schedsi.cpu import request as cpurequest
from schedsi.cpu.time import Time
#: Whether to log individual times, or only the sum
LOG_INDIVIDUAL = True
class _ThreadStats: # pylint: disable=too-few-public-methods
"""Thread statistics."""
def __init__(self):
"""Create a :class:`_ThreadStats`."""
self.finished_time = None
self.response_time = None
self.ctxsw = []
self.run = []
self.total_run = Time(0)
self.wait = [[]]
class Thread:
"""The basic thread class.
A thread has
* an associated module
* a locally unique thread id
* ready time (`None` if finished)
* response units - after how many units to set
:attr:`stats.response_time` (`None` if irrelevant)
* remaining workload (`None` if infinite)
* a lock indicating whether this thread is currently active
* :class:`_ThreadStats`
"""
def __init__(self, module, tid=None, *, ready_time=0, units=None, response_units=None):
"""Create a :class:`Thread`."""
assert ready_time >= 0
assert units is None or units >= 0
assert response_units is None or units is None or response_units <= units
self.module = module
if tid is None:
tid = str(module.num_work_threads())
self.tid = tid
self.ready_time = ready_time
self.response_units = response_units
self.remaining = units
self.is_running = threading.Lock()
self.stats = _ThreadStats()
def execute(self):
"""Simulate execution.
The thread will run for as long as it can.
Yields a :class:`~schedsi.cpurequest.Request`.
Consumes the current time.
"""
locked = self.is_running.acquire(False)
assert locked
current_time = yield cpurequest.Request.current_time()
while True:
current_time = yield from self._execute(current_time, None)
def _update_ready_time(self, current_time):
"""Update ready_time while executing.
Includes some checks to make sure the state is sane.
"""
assert self.ready_time is not None and 0 <= self.ready_time <= current_time
assert self.is_running.locked()
self.ready_time = current_time
def _execute(self, current_time, run_time):
"""Simulate execution.
Update some state.
Yields an execute :class:`~schedsi.cpurequest.Request`
respecting :attr:`remaining`, so it won't yield more than that.
Returns the next current time or None if :attr:`remaining` is 0.
"""
assert not self.is_finished()
self._update_ready_time(current_time)
if run_time is None:
run_time = self.remaining
else:
assert run_time > 0
assert self.remaining is None or run_time <= self.remaining
current_time = yield cpurequest.Request.execute(run_time)
if self.is_finished():
yield cpurequest.Request.idle()
return
return current_time
def is_finished(self):
"""Check if the :class:`Thread` is finished.
Returns True if the :class:`Thread` still has something to do,
False otherwise.
"""
return self.remaining == 0
def run_ctxsw(self, _current_time, run_time):
"""Update runtime state.
This should be called just after a context switch to another thread
when this was just the active thread, or when returning to this thread,
whether the context switch was successful or not.
`current_time` refers to the time just after the context switch.
"""
if not self.is_running.locked():
# this can happen if the thread was just switched to when the timer elapsed
# and we now switch away from this thread
locked = self.is_running.acquire(False)
assert locked
if LOG_INDIVIDUAL:
self.stats.ctxsw.append(run_time)
def run_background(self, _current_time, _run_time):
"""Update runtime state.
This should be called while the thread is in the context stack, but not
active (not the top).
`_current_time` refers to the time just after the active thread has run.
"""
assert self.is_running.locked()
# never called for work threads
assert False
def run_crunch(self, current_time, run_time):
"""Update runtime state.
This should be called while the thread is active.
`current_time` refers to the time just after this thread has run.
"""
assert self.is_running.locked()
self.stats.total_run += run_time
if LOG_INDIVIDUAL:
self.stats.run[-1].append(run_time)
assert self.stats.total_run == sum(map(sum, self.stats.run))
self.ready_time += run_time
assert self.ready_time == current_time
if self.response_units is not None:
self.response_units -= run_time
if self.response_units <= 0:
self.stats.response_time = current_time + self.response_units
self.response_units = None
if self.remaining is not None:
assert self.remaining >= run_time
self.remaining -= run_time
if self.is_finished():
# the job was completed within the slice
self.end()
return
def end(self):
"""End execution."""
assert self.is_finished()
assert self.response_units is None
self.stats.finished_time = self.ready_time
# never start again
self.ready_time = None
def suspend(self, current_time):
"""Become suspended.
This should be called when the thread becomes inactive,
but will be resumed later.
`current_time` refers to the time before the context switch
away from this thread.
"""
if self.is_running.locked():
if LOG_INDIVIDUAL:
# only record waiting time if the thread has executed
self.stats.wait.append([])
if self.ready_time is not None:
self.ready_time = max(self.ready_time, current_time)
else:
assert self.stats.finished_time > 0
def resume(self, current_time, returning):
"""Resume execution.
This should be called after :meth:`suspend` to become active again.
`current_time` refers to the time just after the context switch.
"""
if self.is_finished():
return
assert self.ready_time is not None
if returning:
self._update_ready_time(current_time)
else:
if current_time >= self.ready_time:
if LOG_INDIVIDUAL:
# we only want to record waiting time if the thread is ready to execute
self.stats.wait[-1].append(current_time - self.ready_time)
self.stats.run.append([])
# we can't use _update_ready_time() here because we might not yet be executing
self.ready_time = current_time
def finish(self, _current_time):
"""Become inactive.
This should be called when the thread becomes inactive.
"""
assert self.is_running.locked()
self.is_running.release()
def get_statistics(self, current_time):
"""Obtain statistics.
Not thread-safe.
"""
# the CPU should be locked during this
# this means we can read data without locking self.is_running
stats = self.stats.__dict__.copy()
if not self.is_finished() and current_time >= self.ready_time:
assert self.ready_time is not None
stats['waiting'] = current_time - self.ready_time
if stats['wait'] and stats['wait'][-1] == []:
stats['wait'].pop()
stats['remaining'] = self.remaining
return stats
| 32.706827
| 94
| 0.608915
| 1,025
| 8,144
| 4.687805
| 0.187317
| 0.077836
| 0.048699
| 0.029136
| 0.31155
| 0.227888
| 0.150052
| 0.13257
| 0.091988
| 0.064932
| 0
| 0.002316
| 0.310658
| 8,144
| 248
| 95
| 32.83871
| 0.85358
| 0.336198
| 0
| 0.232759
| 0
| 0
| 0.005653
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 1
| 0.12069
| false
| 0
| 0.025862
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd2e8e4f0baea2b1df6722ef44a729d6280f10cc
| 5,552
|
py
|
Python
|
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | null | null | null |
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 3
|
2021-06-08T21:39:12.000Z
|
2022-01-13T02:46:20.000Z
|
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 1
|
2021-05-09T21:01:28.000Z
|
2021-05-09T21:01:28.000Z
|
def fill_team_about(db):
###############################
# FILL TEAM ABOUT TABLE #
###############################
print('Inserting data into team member profiles table')
add_about_team_query = ("INSERT INTO team_about "
"(name, link, position, image, description, facebook, twitter, instagram, linkedin) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)")
team_about_entries = [
('Avery Chen', '/avery', 'Back-end Member', 'static/team_images/avery.jpg', 'Undergraduate student at SFSU',
'', '', '', ''),
('Akhil Gandu', '/akhil', 'GitHub Master', 'static/team_images/akhil.jpg', 'Graduate Student at SFSU',
'https://www.facebook.com/rockstar290', 'https://twitter.com/Rockstar5645',
'https://www.instagram.com/akhil_gandu/', 'https://www.linkedin.com/in/akhilgandu/'),
('Chris Eckhardt', '/chris', 'Back-end lead', 'static/team_images/chris.jpg', 'Undergrad at SFState', '', '',
'https://www.instagram.com/chris_evan_eckhardt/', 'https://www.linkedin.com/in/christopher-eckhardt-04abb1119/'),
('Elliot Yardley', '/elliot', 'Front-end lead', 'static/team_images/elliot.jpg', 'Graduate Student, SFSU',
'', '', '', ''),
('Thomas Yu', '/thomas', 'Front End Member', 'static/team_images/Thomas.jpg', 'Undergraduate Student, SFSU',
'', '', '', ''),
('Bakulia Kurmant', '/bakulia', 'Team Lead', 'static/team_images/bakulia.jpg', 'In my spare time I like going outdoors, techno music, reading, traveling', '', '', 'https://www.instagram.com/bakuliak/?hl=en', 'https://www.linkedin.com/in/bakulia-kurmant/')
]
for team_about_entry in team_about_entries:
db.query(add_about_team_query, team_about_entry)
db.commit()
print('Inserted data into team member profiles table')
def fill_media_types(db):
############################
# FILL MEDIA TYPES #
############################
print('Initializing media types table with an enumeration of available media types')
add_media_type_query = ("INSERT INTO media_types "
"(media_type)"
"VALUES (%s)")
media_types_entries = [
('all',),
('image',),
('video',),
('audio',),
('document',)
]
for media_type_entry in media_types_entries:
db.query(add_media_type_query, media_type_entry)
db.commit()
print('Media types table initialized')
def fill_digital_media(db):
############################
# FILL DIGITAL MEDIA #
############################
print('Inserting data test entries into digital media table')
add_digital_media_query = ("INSERT INTO digital_media "
"(owner_id, name, description, file_path, thumbnail_path, category, media_type, price, approval) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)")
digital_media_entries = [
('1', 'sponge bob 1', 'this is the first test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 00.00, 1),
('2', 'sponge bob 2', 'this is the second test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 3', 'this is the third test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 00.00, 1),
('4', 'sponge bob 4', 'this is the fourth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 5', 'this is the fifth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 00.00, 1),
('1', 'sponge bob 6', 'this is the sixth test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 99.99, 1),
('2', 'sponge bob 7', 'this is the seventh test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 8', 'this is the eight test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 79.99, 1),
('4', 'sponge bob 9', 'this is the ninth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 10', 'this is the tenth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 59.99, 1),
('1', 'sponge bob 11', 'this is the eleventh test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 99.99, 1),
('2', 'sponge bob 12', 'this is the twelfth test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 13', 'this is the thirteenth test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 79.99, 1),
('4', 'sponge bob 14', 'this is the fourteenth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 15', 'this is the fifteenth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 59.99, 0)
]
for digital_media_entry in digital_media_entries:
db.query(add_digital_media_query, digital_media_entry)
db.commit()
print('Initialization of digital media entries table complete')
| 52.377358
| 263
| 0.568444
| 719
| 5,552
| 4.23644
| 0.222531
| 0.010506
| 0.04432
| 0.073867
| 0.395272
| 0.343073
| 0.322718
| 0.317794
| 0.317794
| 0.317794
| 0
| 0.039896
| 0.237032
| 5,552
| 106
| 264
| 52.377358
| 0.679178
| 0.012968
| 0
| 0.2625
| 0
| 0.025
| 0.562654
| 0.094878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0
| 0
| 0.0375
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd3187f8e540b93ec7789114fa6cc6e3969608ec
| 1,335
|
py
|
Python
|
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
import copy
from pyBRML import utils
from pyBRML import Array
def multiply_potentials(list_of_potentials):
"""
Returns the product of each potential in list_of_potentials, useful for
calculating joint probabilities.
For example, if the joint probability of a system is defined as
p(A,B,C) = p(C|A,B) p(A) p(B)
then, list_of_potentials should contain 3 potentials corresponding to each factor.
Since, potentials can be defined in an arbitrary order, each potential will be
reshaped and cast using numpy ndarray functions before being multiplied, taking
advantage of numpy's broadcasting functionality.
"""
# Collect the set of variables from each pot. Used to reshape each potential.table
variable_set = set(var for potential in list_of_potentials for var in potential.variables)
variable_set = list(variable_set)
# Copy potentials to avoid mutating original objects
potentials = copy.deepcopy(list_of_potentials)
# Reshape each potential prior to taking their product
for pot in potentials:
pot = utils.format_table(pot.variables, pot.table, variable_set)
# Multiply potentials and return
new_potential = potentials[0]
for pot in potentials[1:]:
new_potential.table = new_potential.table * pot.table
return new_potential
| 35.131579
| 94
| 0.743071
| 190
| 1,335
| 5.115789
| 0.436842
| 0.030864
| 0.082305
| 0.034979
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002825
| 0.204494
| 1,335
| 37
| 95
| 36.081081
| 0.912429
| 0.534831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd3394f2b7968055dc2a5d2b8bdde46ae4644c49
| 2,098
|
py
|
Python
|
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | 1
|
2019-03-06T14:59:44.000Z
|
2019-03-06T14:59:44.000Z
|
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | null | null | null |
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | null | null | null |
import re
class KnownUncategories(object):
"""
List of known entries in test_data which are no categories,
but are recognized as such
"""
def __init__(self):
# un-category regex strings (care for commas)
self.uc = [
"Beteiligung", # 1956: is part of Beteiligungen
"Ferngespräche", # 1956: is part of Fernruf/Telefon
"Kapital", # 1956: is part of multiple top-level items
"Umstellung \d\d?", # 1956: is part of Grundkapital or other
"Dividenden ab \d{4}.*", # 1956: is part of Dividenden or other (with year or yearspan)s
"^Kurs.*", # 1956: second level tag
"ab \d{4}(\/\d{2})?" # 1956: i.e "ab 1949/50"-part of other categories
]
# non-specific keys (which get not removed from original-rest in analysis)
self.nkeys = [
"street",
"street_number",
"additional_info",
"city",
"name",
"title",
"rest",
"location",
"number_Sa.-Nr.",
"rest_info",
"bank",
"title",
"amount",
"ord_number",
"organization",
]
# create corresponding regexes
self.uc_regex = []
for item in self.uc:
regex_compiled = re.compile(item)
self.uc_regex.append(regex_compiled)
@property
def uncategories(self):
return self.uc
@property
def unkeys(self):
return self.nkeys
def check_uncategories(self, text_to_check):
"""
Allows to compare a tag against the existing uncategories
:param text_to_check: tag text
:return: True if un-category, False if not
"""
for regex_to_check in self.uc_regex:
match_result = regex_to_check.search(text_to_check)
if match_result is not None:
return True
return False
| 31.313433
| 108
| 0.515253
| 229
| 2,098
| 4.598253
| 0.510917
| 0.034188
| 0.047483
| 0.05698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029088
| 0.393708
| 2,098
| 67
| 109
| 31.313433
| 0.798742
| 0.306482
| 0
| 0.088889
| 0
| 0
| 0.152738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.022222
| 0.044444
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd33bdf592a5bbf5b20d72627b7e89fa294ef5bf
| 1,640
|
py
|
Python
|
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | null | null | null |
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | 3
|
2020-02-11T23:04:56.000Z
|
2021-06-10T18:07:53.000Z
|
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | 1
|
2021-08-20T14:49:09.000Z
|
2021-08-20T14:49:09.000Z
|
from django import template
register = template.Library()
def mostrar_resumen_mapa(context, m, order_by):
# print context
to_return = {
'mapa': m,
'order_by': order_by,
}
return to_return
@register.inclusion_tag('mapas/lista_mapas.html')
def mostrar_mapas(lista_mapas, order_by):
to_return = {
'lista_mapas': lista_mapas,
'order_by': order_by
}
return to_return
register.inclusion_tag('mapas/mapa.html', takes_context=True)(mostrar_resumen_mapa)
def quitar_char(value, arg):
return value.replace(arg, ' ')
register.filter('quitar_char',quitar_char)
def replace_text(value, replacement):
rep = replacement.split(',')
try:
return value.replace(rep[0], rep[1])
except:
return value
register.filter('replace_text',replace_text)
def truncar_string(value, max_length=0):
if max_length==0:
return value
return value[:max_length]+('...' if len(value)>max_length else '')
register.filter('truncar_string',truncar_string)
def get_range(value):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
try:
return range( value )
except:
return None
register.filter('get_range',get_range)
def sort_by(queryset, order):
return queryset.order_by(*order.split(','))
register.filter('sort_by',sort_by)
| 22.777778
| 83
| 0.668902
| 230
| 1,640
| 4.582609
| 0.33913
| 0.04649
| 0.049336
| 0.026565
| 0.135674
| 0.100569
| 0.100569
| 0.100569
| 0.100569
| 0.100569
| 0
| 0.006844
| 0.198171
| 1,640
| 71
| 84
| 23.098592
| 0.794677
| 0.215244
| 0
| 0.25
| 0
| 0
| 0.103336
| 0.017901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175
| false
| 0
| 0.025
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd349e3352814cffd9d5b6c0c4f84624bb4c6bc6
| 1,868
|
py
|
Python
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | 1
|
2020-05-19T20:18:05.000Z
|
2020-05-19T20:18:05.000Z
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | 2
|
2019-10-21T14:56:04.000Z
|
2020-03-27T12:48:26.000Z
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from pydash.objects import get
class Aggregator(object):
def __init__(self, field, lens=None, sublens='_id', include=[], opts={}):
self._field = field
self._lens = lens
self._sublens = sublens
self._result = []
self._transf = []
self._include = include
self._opts = opts
self._df = None
def execute(self, df, entity='all'):
if (not self._include) or (entity in self._include):
self.aggregate(df)
def aggregate(self, df):
self._tmp_dataframe = df \
.dropna() \
.apply(self.transformData)
if "stack" in self._transf:
self._tmp_dataframe = self._tmp_dataframe.stack() \
.reset_index(level=1, drop=True)
self.groupCount()
def groupAggrCount(self):
self._result = self._tmp_dataframe \
.groupby(self._sub) \
.agg({self._aggrk: self._aggr}) \
.get(self._aggrk)
def groupCount(self):
self._result = self._tmp_dataframe \
.groupby(self._tmp_dataframe) \
.count()
def transformData(self, data):
if isinstance(data, dict):
data = get(data, self._lens)
if isinstance(data, list):
self._transf.append("stack")
data = map(self.reducev, data)
return pd.Series(data)
return data
def reducev(self, data):
if isinstance(data, dict):
return get(data, self._sublens)
return data
def getField(self):
return self._field
def uniqueField(self):
arr = [self._field]
if self._lens:
arr += self._lens.split(".")
return "_".join(arr)
def getResult(self):
return self._result
def getOpts(self):
return self._opts
| 23.64557
| 77
| 0.556745
| 207
| 1,868
| 4.806763
| 0.328502
| 0.042211
| 0.096482
| 0.036181
| 0.138693
| 0.138693
| 0.082412
| 0.082412
| 0
| 0
| 0
| 0.000798
| 0.329229
| 1,868
| 78
| 78
| 23.948718
| 0.793296
| 0
| 0
| 0.109091
| 0
| 0
| 0.009636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.036364
| 0.054545
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd3bd480b9c0a1b8e0dc9e02d722d288943bec44
| 357
|
py
|
Python
|
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
def selection_sort(A):
n = len(A)
for i in range(n - 1):
position = i
for j in range(i + 1, n):
if A[j] < A[position]:
position = j
temp = A[i]
A[i] = A[position]
A[position] = temp
A = [3, 5, 8, 9, 6, 2]
print('Original Array: ', A)
selection_sort(A)
print('Sorted Array: ', A)
| 21
| 34
| 0.473389
| 57
| 357
| 2.929825
| 0.438596
| 0.161677
| 0.167665
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035556
| 0.369748
| 357
| 16
| 35
| 22.3125
| 0.706667
| 0
| 0
| 0
| 0
| 0
| 0.084034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd464df7fbbebbe26bc4d827bb8cf980aecbe03a
| 13,019
|
py
|
Python
|
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
import os
import os.path
import pickle
from shutil import copyfile
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.manifold import Isomap
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.metrics import accuracy_score, balanced_accuracy_score, r2_score, mean_absolute_error, mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.utils.testing import ignore_warnings
from src.model.utils import makedir
from src.utils.logging import logger
#####################################################################
# HERE IS LIST OF VARIES LIBRARIES WE STUDIED DURING SCS_3253_024 Machine Learning COURSE that are
# relevant to classification problem. We will tray use as many ideas as possible for this project.
#####################################################################
def add_preproc_step(preproc_str, steps):
if preproc_str == 'min_max':
steps.append(('preprocs', MinMaxScaler()))
elif preproc_str == 'standard_scalar':
steps.append(('preprocs', StandardScaler()))
elif preproc_str == 'none':
pass
else:
logger.error("unsupported preprocs option " + preproc_str)
raise Exception("unsupported preprocs option " + preproc_str)
def add_transform_step(transforms_str, steps, transforms_info, param_grid):
if transforms_str == 'pca':
steps.append(('transforms', PCA()))
elif transforms_str == 'kpca':
steps.append(('transforms', KernelPCA(kernel='rbf')))
elif transforms_str == 'lle':
steps.append(('transforms', LocallyLinearEmbedding()))
# elif transforms_str == 'mds':
# steps.append(('transforms', MDS())) # DOES NOT HAVE transform() function
# param_grid[0]["transforms__n_components"] = [3, 4, 5]
elif transforms_str == 'isomap':
steps.append(('transforms', Isomap()))
# elif transforms_str == 'tsne': # DOES NOT HAVE transform() function
# steps.append(('transforms', TSNE()))
# param_grid[0]["transforms__n_components"] = [3, 4, 5]
elif transforms_str == 'none':
pass
else:
logger.error("unsupported transforms option " + transforms_str)
raise Exception("unsupported transforms option " + transforms_str)
if 'params' in transforms_info:
for trans_param in transforms_info['params']:
vals = trans_param['vals']
name = trans_param['name']
for vidx in range(len(vals)):
if vals[vidx] == 'None':
vals[vidx] = None
param_grid[0]["transforms__" + name] = vals
def add_classifier_step(clf_str, steps, clf_info, param_grid, tot_classes):
########### classifiers ################
if clf_str == 'logistic':
steps.append(('clf', LogisticRegression(multi_class='auto', random_state=0, solver='liblinear')))
elif clf_str == 'naive_bayes':
steps.append(('clf', GaussianNB()))
elif clf_str == 'knn':
steps.append(('clf', KNeighborsClassifier()))
elif clf_str == 'random_forest':
steps.append(('clf', RandomForestClassifier()))
elif clf_str == 'svc':
steps.append(('clf', SVC(class_weight='balanced', random_state=42)))
elif clf_str == 'xgboost':
steps.append(('clf', xgb.XGBClassifier(random_state=42, objective="multi:softmax", num_class=tot_classes)))
elif clf_str == 'adaboost':
steps.append(('clf', AdaBoostClassifier(random_state=42)))
elif clf_str == 'gradboost':
steps.append(('clf', GradientBoostingClassifier(random_state=42)))
#########################################
elif clf_str == 'linear':
steps.append(('clf', LinearRegression()))
else:
logger.error("Classifier option " + clf_str + " not supported")
raise Exception("Classifier option " + clf_str + " not supported")
if 'params' in clf_info:
for clf_param in clf_info['params']:
vals = clf_param['vals']
name = clf_param['name']
for vidx in range(len(vals)):
if vals[vidx] == 'None':
vals[vidx] = None
param_grid[0]["clf__" + name] = vals
def get_crashed_list(configs):
crashed_list_fname = "output/" + configs["experiment_name"] + "/" + "crashed.txt"
if os.path.exists(crashed_list_fname):
fid = open(crashed_list_fname, "r")
crashed_list = []
for line in fid:
crashed_list.append(line.strip())
fid.close()
else:
crashed_list = []
fid = open(crashed_list_fname, "w")
fid.close()
return crashed_list, crashed_list_fname
@ignore_warnings(category=ConvergenceWarning)
def build_models(configs):
mtype = configs['mtype']
data_path = "output/" + configs['experiment_name'] + "/features.csv"
train_path = "output/" + configs['experiment_name'] + "/train.csv"
val_path = train_path.replace("train.csv", "val.csv")
makedir("output/" + configs['experiment_name'] + "/interim")
logger.info('Building Models for ' + str(train_path))
# Read train and val sets
X = pd.read_csv(data_path)
y = X[configs['target']].values
X = X.drop([configs['target']], axis=1)
X = X.values
ids = list((pd.read_csv(train_path, header=None).values)[:, 0])
ids.extend(list((pd.read_csv(val_path, header=None).values)[:, 0]))
X = X[ids, :]
y = y[ids]
all_scores_path = "output/" + configs["experiment_name"] + "/all_scores.pkl"
all_scores_done_flg = "output/" + configs["experiment_name"] + "/all_scores_flg"
crashed_list, crashed_list_fname = get_crashed_list(configs)
if os.path.exists(all_scores_path) and os.path.exists(all_scores_done_flg):
# If allready model building is done just return the results.
# This is heplful to display result in a jupyter notebook.
logger.info('All the models have been built already.')
else:
all_scores = []
if mtype == 'classification':
tot_classes = np.unique(y).shape[0]
else:
tot_classes = None
# For Each classifier - For Each Data transform - For each Dimensionality reduction BUILD THE MODEL!
for clf_info in configs["models"]["classifiers"]:
for preproc_str in configs["models"]["preprocs"]:
for transforms_info in configs["models"]["transforms"]:
transforms_str = transforms_info['name']
clf_str = clf_info['name']
res_path = "output/" + configs["experiment_name"] + "/interim/" + clf_str + "_" + preproc_str + \
"_" + transforms_str + ".pkl"
model_str = 'classifier "' + clf_str + '" with preprocessing "' + preproc_str + \
'" and with transform "' + transforms_str + '"'
logger.info('Building ' + model_str)
################# ADD IMPUTERS #################################
steps = [('imputer', SimpleImputer(strategy='mean'))]
param_grid = [{}]
###################################################################
################# PICK A DATA TRANSFORM ###########################
add_preproc_step(preproc_str, steps)
###################################################################
################### PICK A Dimensionality reduction method ################
add_transform_step(transforms_str, steps, transforms_info, param_grid)
###################################################################
##################### PICK A classifier #######################
add_classifier_step(clf_str, steps, clf_info, param_grid, tot_classes)
###################################################################
##################### Perform grid search #####################
pipeline = Pipeline(steps=steps)
if mtype == 'classification':
scoring = {'balanced_accuracy': make_scorer(balanced_accuracy_score),
'accuracy': make_scorer(accuracy_score),
'f1': 'f1_micro'}
refit = 'balanced_accuracy'
else:
scoring = {'r2': make_scorer(r2_score), 'mae': make_scorer(mean_absolute_error),
'mse': make_scorer(mean_squared_error)}
refit = 'r2'
clf = GridSearchCV(estimator=pipeline, cv=5, param_grid=param_grid,
verbose=1, scoring=scoring, refit=refit)
if os.path.exists(res_path):
logger.info('Model has been built already. Loading the model ' + str(res_path))
clf = joblib.load(res_path)
elif model_str in crashed_list:
logger.info('Model fails to build. Ignoring. Please consider modifying the params.')
continue
else:
try:
clf.fit(X, y)
except Exception as e:
logger.info("Model building crashed for " + model_str)
logger.error(e, exc_info=True)
fid = open(crashed_list_fname, "a")
fid.write(model_str + "\n")
fid.close()
continue
joblib.dump(clf, res_path)
###################################################################
################### Record scores ################
cv_results = clf.cv_results_
score_info = {'classifier': clf_str, 'preprocess': preproc_str, 'transform': transforms_str,
'res_path': res_path}
for scorer in scoring:
best_index = np.nonzero(cv_results['rank_test_%s' % scorer] == 1)[0][0]
best_score = cv_results['mean_test_%s' % scorer][best_index]
if scorer == 'balanced_accuracy':
score_info['balanced_accuracy'] = np.round(best_score * 100, 1)
elif scorer == 'accuracy':
score_info['accuracy'] = np.round(best_score * 100, 1)
elif scorer == 'f1':
score_info['f1_score'] = np.round(best_score, 2)
elif scorer == 'r2':
score_info['r2'] = np.round(best_score, 2)
elif scorer == 'mae':
score_info['mae'] = np.round(best_score, 2)
elif scorer == 'mse':
score_info['mse'] = np.round(best_score, 2)
if mtype == 'classification':
res_str = ' => accuracy = ' + str(score_info['accuracy']) + '%, F1 = ' + \
str(score_info['f1_score'])
else:
res_str = ' => r2 = ' + str(score_info['r2'])
logger.info(model_str + res_str)
all_scores.append(score_info)
pickle.dump(all_scores, open(all_scores_path, "wb"))
###################################################################
# end each transforms
# end each preprocs
# end each classifiers
if mtype == 'classification':
all_scores = sorted(all_scores, key=lambda x: x['balanced_accuracy'], reverse=True)
else:
all_scores = sorted(all_scores, key=lambda x: x['r2'], reverse=True)
best_model_path = "output/" + configs["experiment_name"] + "/best_model.pkl"
copyfile(all_scores[0]['res_path'], best_model_path)
fid = open(all_scores_done_flg, "wb")
fid.close()
logger.info("All the models are built.")
| 47.688645
| 118
| 0.541132
| 1,325
| 13,019
| 5.111698
| 0.216604
| 0.032482
| 0.018603
| 0.031891
| 0.242433
| 0.150746
| 0.111324
| 0.088882
| 0.088882
| 0.067326
| 0
| 0.006846
| 0.293187
| 13,019
| 272
| 119
| 47.863971
| 0.729189
| 0.074583
| 0
| 0.142857
| 0
| 0
| 0.133548
| 0
| 0.004762
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0.009524
| 0.138095
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd4d6ed01b3decd5927f1d836a338350d16f500c
| 941
|
py
|
Python
|
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 822.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/3/29 13:19
------------
"""
from typing import List
class Solution:
def flipgame(self, fronts: List[int], backs: List[int]) -> int:
ans = float('+inf')
for i in range(len(fronts)):
if fronts[i] == backs[i]:
continue
for x in [fronts[i], backs[i]]:
success = True
for j in range(len(fronts)):
if j == i:
continue
if x != fronts[j] or x != backs[j]:
continue
else:
success = False
break
if success:
ans = min(ans, x)
return ans
if __name__ == '__main__':
s = Solution()
print(s.flipgame([1],[1]))
| 27.676471
| 67
| 0.420829
| 102
| 941
| 3.803922
| 0.568627
| 0.036082
| 0.051546
| 0.082474
| 0.092784
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032505
| 0.444208
| 941
| 34
| 68
| 27.676471
| 0.709369
| 0.170032
| 0
| 0.130435
| 0
| 0
| 0.015524
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.173913
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd4d784f79a128a2168a7d3f9c317a2fb64d12f1
| 22,795
|
py
|
Python
|
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import csv
import glob
import scikit_posthocs as sp
from scipy import stats
import os
from scipy import stats
import scikit_posthocs as sp
sns.set(context='paper', style='whitegrid')
hue_order = ["traffic light", "crossing intention", "trajectory"]
eps=0.01
tl_black_list = [
"3_3_96tl",
"3_3_102tl",
"3_4_107tl",
"3_4_108tl",
"3_5_112tl",
"3_5_113tl",
"3_5_116tl",
"3_5_117tl",
"3_5_118tl",
"3_5_119tl",
"3_5_122tl",
"3_5_123tl",
"3_5_126tl",
"3_5_127tl",
"3_6_128tl",
"3_6_137tl",
"3_7_142tl",
"3_8_153tl",
"3_8_160tl",
"3_9_173tl",
"3_9_174tl",
"3_9_179tl",
"3_10_185tl",
"3_10_188tl",
"3_11_205tl",
"3_12_218tl",
"3_12_221tl",
"3_15_241tl",
"3_16_256tl",
"3_16_257tl",
]
opposite_anno_list = ["3_16_259tl", "3_16_258tl", "3_16_249tl"]
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = int(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
trial = filename.split("_")[-1].replace(".csv", "")
buf["subject"] = filename.replace("log_data_", "").split("_")[0]
buf["task"] = filename.replace("log_data_", "").split("_")[1]
correct_list = []
response_list = []
for idx, row in buf.iterrows():
if row.id in tl_black_list:
row.last_state = -2
if row.last_state == -1: # no intervention
correct_list.append(-1)
response_list.append(-1)
elif int(row.last_state) == int(row.state):
if row.id in opposite_anno_list:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
if row.id in opposite_anno_list:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
buf["correct"] = correct_list
buf["response"] = response_list
len(correct_list)
if log_data is None:
log_data = buf
else:
log_data = log_data.append(buf, ignore_index=True)
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
subject_data = pd.DataFrame(columns=["subject", "task", "acc", "int_length", "missing"])
for subject in log_data.subject.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.subject == subject) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
acc = len(target[target.correct == 1])/(len(target[target.correct == 0]) + len(target[target.correct == 1])+eps)
missing = len(target[target.correct == -1])/(len(target[target.correct != -2])+eps)
buf = pd.DataFrame([(subject, task_list.get(task), acc, length, missing)], columns=subject_data.columns)
subject_data = pd.concat([subject_data, buf])
subject_data.acc = subject_data.acc * 100
subject_data.missing = subject_data.missing * 100
# sns.barplot(x="task", y="acc", hue="int_length", data=subject_data, ci="sd")
# sns.barplot(x="task", y="acc", data=subject_data, ci="sd")
################################################
print("check intervene acc")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"acc : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.acc.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].acc.dropna(),
target_df[target_df.task == 'crossing intention'].acc.dropna(),
target_df[target_df.task == 'traffic light'].acc.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').acc, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].acc,
target_df[target_df.task == "crossing intention"].acc,
target_df[target_df.task == "traffic light"].acc,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('conover\n', sp.posthoc_conover(target_df, val_col="acc", group_col="task"))
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="acc", data=subject_data, hue="task", hue_order=hue_order, ax=ax, capsize=0.1, ci="sd")
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention accuracy [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
################################################
print("check miss rate")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"miss : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.missing.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].missing.dropna(),
target_df[target_df.task == 'crossing intention'].missing.dropna(),
target_df[target_df.task == 'traffic light'].missing.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').missing, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].missing,
target_df[target_df.task == "crossing intention"].missing,
target_df[target_df.task == "traffic light"].missing,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
print('conover\n', sp.posthoc_conover(target_df, val_col="missing", group_col="task"))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="missing", data=subject_data, hue="task", hue_order=hue_order, ax = ax, capsize=0.1, ci=95)
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention missing rate [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
#####################################
# mean val show
#####################################
target = subject_data[subject_data.task == "crossing intention"]
print("int acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "crossing intention"]
print("int missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
###########################################
# collect wrong intervention ids
###########################################
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
id_data = pd.DataFrame(columns=["id", "task", "false_rate", "missing", "total"])
for id in log_data.id.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.id == id) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
total = len(target)
name = id.replace("tl","")+task+"_"+str(length)
if len(target) > 0:
false_rate = len(target[target.correct == 0])/len(target)
else:
false_rate = 0.0
missing = len(target[target.correct == -1])
buf = pd.DataFrame([(name, task, false_rate, missing, total)], columns=id_data.columns)
id_data = pd.concat([id_data, buf])
pd.set_option("max_rows", None)
sort_val = id_data.sort_values(["false_rate","total"], ascending=False)
false_playlist = sort_val[(sort_val.false_rate>0.0)&(sort_val.total>1)]
print(false_playlist)
false_playlist.to_csv("/home/kuriatsu/Dropbox/data/pie202203/false_playlist.csv")
# sns.barplot(x="id", y="acc", hue="int_length", data=id_data)
###############################################################################################
print("response rate stacked bar plot")
###############################################################################################
response_summary_pred = pd.DataFrame(columns=["int_length", "task", "response", "count"])
for int_length in log_data.int_length.drop_duplicates():
for task in log_data.task.drop_duplicates():
for response in [0, 1, 2, 3, -1]:
buf = pd.Series([int_length, task, response,
len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response <= response)])/len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response!=4)])],
index=response_summary_pred.columns)
response_summary_pred = response_summary_pred.append(buf, ignore_index=True)
fig, axes = plt.subplots()
cr = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==3], ax=axes, palette=sns.color_palette(["turquoise"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
fa = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==2], ax=axes, palette=sns.color_palette(["orangered"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
miss = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==1], ax=axes, palette=sns.color_palette(["lightsalmon"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
hit = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==0], ax=axes, palette=sns.color_palette(["teal"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
no_int = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==-1], ax=axes, palette=sns.color_palette(["gray"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
axes.set_xticks([-0.3, -0.1, 0.1, 0.3, 0.7, 0.9, 1.1, 1.3, 1.7, 1.9, 2.1, 2.3])
axes.set_xticklabels(["1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0"], fontsize=14)
# axes.set_yticklabels(fontsize=14)
ax_pos = axes.get_position()
fig.text(ax_pos.x1-0.75, ax_pos.y1-0.84, "traffic light", fontsize=14)
fig.text(ax_pos.x1-0.55, ax_pos.y1-0.84, "crossing intention", fontsize=14)
fig.text(ax_pos.x1-0.25, ax_pos.y1-0.84, "trajectory", fontsize=14)
axes.tick_params(labelsize=14)
axes.set_ylabel("Response Rate", fontsize=18)
axes.set_xlabel("")
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles[::4], ["CR", "FA", "miss", "hit", "no_int"], bbox_to_anchor=(1.0, 1.0), loc='upper left', fontsize=14)
plt.show()
###############################################
# Workload
###############################################
workload = pd.read_csv("{}/workload.csv".format(data_path))
workload.satisfy = 10-workload.satisfy
workload_melted = pd.melt(workload, id_vars=["subject", "type"], var_name="scale", value_name="score")
#### nasa-tlx ####
for item in workload_melted.scale.drop_duplicates():
print(item)
_, norm_p1 = stats.shapiro(workload[workload.type == "trajectory"][item])
_, norm_p2 = stats.shapiro(workload[workload.type == "crossing intention"][item])
_, norm_p3 = stats.shapiro(workload[workload.type == "traffic light"][item])
_, var_p = stats.levene(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
center='median'
)
if norm_p1 < 0.05 or norm_p2 < 0.05 or norm_p3 < 0.05 or norm_p4 < 0.05:
_, anova_p = stats.friedmanchisquare(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
)
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print(sp.posthoc_conover(workload, val_col=item, group_col="type"))
else:
melted_df = pd.melt(nasa_df, id_vars=["name", "experiment_type"], var_name="type", value_name="score")
aov = stats_anova.AnovaRM(workload_melted[workload_melted.type == item], "score", "subject", ["type"])
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(workload_melted[item], nasa_df.type)
print(multicomp_result.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.barplot(x="scale", y="score", data=workload_melted, hue="type", hue_order=hue_order, ax=ax)
ax.set_ylim(0, 10)
ax.legend(bbox_to_anchor=(0.0, 1.0), loc='lower left', fontsize=14)
ax.set_xlabel("scale", fontsize=18)
ax.set_ylabel("score (lower is better)", fontsize=18)
ax.tick_params(labelsize=14)
plt.show()
###############################################
# necessary time
###############################################
time = pd.read_csv("/home/kuriatsu/Dropbox/documents/subjective_time.csv")
fig, ax = plt.subplots()
# mean_list = [
# time[time.type=="crossing intention"].ideal_time.mean(),
# time[time.type=="trajectory"].ideal_time.mean(),
# time[time.type=="traffic light"].ideal_time.mean(),
# ]
# sem_list = [
# time[time.type=="crossing intention"].ideal_time.sem(),
# time[time.type=="trajectory"].ideal_time.sem(),
# time[time.type=="traffic light"].ideal_time.sem(),
# ]
_, norm_p = stats.shapiro(time.ideal_time.dropna())
_, var_p = stats.levene(
time[time.type == 'crossing intention'].ideal_time.dropna(),
time[time.type == 'trajectory'].ideal_time.dropna(),
time[time.type == 'traffic light'].ideal_time.dropna(),
center='median'
)
if norm_p < 0.05 or var_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(time, val_col='ideal_time', group_col='type'))
else:
multicomp_result = multicomp.MultiComparison(np.array(time.dropna(how='any').ideal_time, dtype="float64"), time.dropna(how='any').type)
print('levene', multicomp_result.tukeyhsd().summary())
sns.pointplot(x="type", y="ideal_time", hue="type", hue_order=hue_order, data=time, join=False, ax=ax, capsize=0.1, ci=95)
ax.set_ylim(0.5,3.5)
plt.yticks([1, 2, 3, 4], ["<3", "3-5", "5-8", "8<"])
plt.show()
###############################################
# compare prediction and intervention
###############################################
with open("/home/kuriatsu/Dropbox/data/pie202203/database.pkl", "rb") as f:
database = pickle.load(f)
tl_result = pd.read_csv("/home/kuriatsu/Dropbox/data/pie202203/tlr_result.csv")
overall_result = pd.DataFrame(columns=["id", "task", "subject", "gt", "int", "prediction"])
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = float(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
subject = filename.replace("log_data_", "").split("_")[0]
task = filename.replace("log_data_", "").split("_")[1]
for idx, row in buf.iterrows():
if task != "tl":
database_id = row.id+task+"_"+str(float(row.int_length))
prediction = (database[database_id].get("likelihood") <= 0.5)
gt = False if row.state else True
else:
database_id = row.id+"_"+str(float(row.int_length))
prediction = 1 if float(tl_result[tl_result.id == row.id].result) == 2 else 0
gt = False if row.state else True
if row.id in tl_black_list:
intervention = -2
if row.last_state == -1: # no intervention
intervention = -1
else:
if row.id in opposite_anno_list:
intervention = False if row.last_state else True
else:
intervention = row.last_state
buf = pd.DataFrame([(row.id, task, subject, int(gt), int(intervention), int(prediction))], columns = overall_result.columns)
overall_result = pd.concat([overall_result, buf])
overall_result.to_csv("/home/kuriatsu/Dropbox/data/pie202203/acc.csv")
| 44.696078
| 222
| 0.612547
| 3,196
| 22,795
| 4.176471
| 0.098874
| 0.052592
| 0.053941
| 0.075517
| 0.737114
| 0.699655
| 0.64579
| 0.596344
| 0.54128
| 0.529293
| 0
| 0.036641
| 0.176267
| 22,795
| 509
| 223
| 44.78389
| 0.674229
| 0.087037
| 0
| 0.506173
| 0
| 0.014815
| 0.158824
| 0.021922
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034568
| 0
| 0.034568
| 0.101235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd518544ef8c44c965453eb8925336fcec4f3ee3
| 3,005
|
py
|
Python
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | 1
|
2018-10-05T17:00:23.000Z
|
2018-10-05T17:00:23.000Z
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | 6
|
2016-05-29T09:33:06.000Z
|
2019-12-18T20:24:50.000Z
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | null | null | null |
# Database switched from having nsrId to using orgnr, this script helps with this conversion.
import os
import re
import json
import subprocess
from glob import glob
from utility_to_osm import file_util
if __name__ == '__main__':
data_dir = 'data' #'barnehagefakta_osm_data/data'
nsrId_to_orgnr_filename = 'nsrId_to_orgnr.json'
if False:
# Done once, on a old dump of the database, to get mapping from nsrId to orgnr
nsrId_to_orgnr = dict()
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
content = file_util.read_file(filename)
if content == '404':
# cleanup
os.remove(filename)
continue
dct = json.loads(content)
nsrId = dct['nsrId']
orgnr = dct['orgnr']
if nsrId in nsrId_to_orgnr and nsrId_to_orgnr[nsrId] != orgnr:
raise ValueError('Duplicate key %s, %s != %s' % (nsrId, nsrId_to_orgnr[nsrId], orgnr))
nsrId_to_orgnr[nsrId] = orgnr
count += 1
print('Found', count)
with open(nsrId_to_orgnr_filename, 'w') as f:
json.dump(nsrId_to_orgnr, f)
content = file_util.read_file(nsrId_to_orgnr_filename)
nsrId_to_orgnr = json.loads(content)
if True:
# Rename files
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
reg = re.search('barnehagefakta_no_nbrId(\d+)', filename)
if reg:
nbrId = reg.group(1)
try:
orgnr = nsrId_to_orgnr[nbrId]
except KeyError as e:
content = file_util.read_file(filename)
print('ERROR', repr(e), filename, content)
if content == '404':
os.remove(filename)
continue
new_filename = filename.replace('barnehagefakta_no_nbrId%s' % nbrId,
'barnehagefakta_no_orgnr%s' % orgnr)
subprocess.run(['git', 'mv', filename, new_filename])
# if the file is still there, probably not version controlled
if os.path.exists(filename):
os.rename(filename, new_filename)
| 40.608108
| 110
| 0.509151
| 324
| 3,005
| 4.512346
| 0.314815
| 0.067031
| 0.106703
| 0.046512
| 0.365937
| 0.305062
| 0.262654
| 0.262654
| 0.213406
| 0.213406
| 0
| 0.00564
| 0.409983
| 3,005
| 73
| 111
| 41.164384
| 0.818951
| 0.092845
| 0
| 0.357143
| 0
| 0
| 0.082751
| 0.050018
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd543b58f8ff3f846e998d58939fe4d5bc4acf05
| 5,859
|
py
|
Python
|
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
"""Main entrypoint for this application"""
from pathlib import Path
from math import degrees
from datetime import datetime
import logging
import warnings
from environs import Env
from streamz import Stream
from paho.mqtt.client import Client as MQTT
from pycluon import OD4Session, Envelope as cEnvelope
from pycluon.importer import import_odvd
from marulc import NMEA2000Parser
from marulc.utils import filter_on_pgn, deep_get
from marulc.exceptions import MultiPacketInProcessError
from brefv.envelope import Envelope
from brefv.messages.observations.rudder import Rudder
from brefv.messages.observations.propeller import Propeller
# Reading config from environment variables
env = Env()
CLUON_CID = env.int("CLUON_CID", 111)
MQTT_BROKER_HOST = env("MQTT_BROKER_HOST")
MQTT_BROKER_PORT = env.int("MQTT_BROKER_PORT", 1883)
MQTT_CLIENT_ID = env("MQTT_CLIENT_ID", None)
MQTT_TRANSPORT = env("MQTT_TRANSPORT", "tcp")
MQTT_TLS = env.bool("MQTT_TLS", False)
MQTT_USER = env("MQTT_USER", None)
MQTT_PASSWORD = env("MQTT_PASSWORD", None)
MQTT_BASE_TOPIC = env("MQTT_BASE_TOPIC", "/test/test")
RUDDER_CONFIG = env.dict("RUDDER_CONFIG", default={})
PROPELLER_CONFIG = env.dict("PROPELLER_CONFIG", default={})
LOG_LEVEL = env.log_level("LOG_LEVEL", logging.WARNING)
## Import and generate code for message specifications
THIS_DIR = Path(__file__).parent
memo = import_odvd(THIS_DIR / "memo" / "memo.odvd")
# Setup logger
logging.basicConfig(level=LOG_LEVEL)
logging.captureWarnings(True)
warnings.filterwarnings("once")
LOGGER = logging.getLogger("crowsnest-connector-cluon-n2k")
mq = MQTT(client_id=MQTT_CLIENT_ID, transport=MQTT_TRANSPORT)
# Not empty filter
not_empty = lambda x: x is not None
## Main entrypoint for N2k frames
entrypoint = Stream()
parser = NMEA2000Parser()
def unpack_n2k_frame(envelope: cEnvelope):
"""Extract an n2k frame from an envelope and unpack it using marulc"""
LOGGER.info("Got envelope from pycluon")
try:
frame = memo.memo_raw_NMEA2000()
frame.ParseFromString(envelope.serialized_data)
LOGGER.debug("Frame: %s", frame.data)
msg = parser.unpack(frame.data)
LOGGER.debug("Unpacked: %s", msg)
msg["timestamp"] = envelope.sampled
return msg
except MultiPacketInProcessError:
LOGGER.debug("Multi-packet currently in process")
return None
except Exception: # pylint: disable=broad-except
LOGGER.exception("Exception when unpacking a frame")
return None
unpacked = entrypoint.map(unpack_n2k_frame).filter(not_empty)
## Rudder
def pgn127245_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := RUDDER_CONFIG.get(n2k_id):
crowsnest_id = list(RUDDER_CONFIG.keys()).index(n2k_id)
rud = Rudder(
sensor_id=sensor_id, angle=degrees(-1 * msg["Fields"]["angleOrder"])
) # Negating to adhere to brefv conventions
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/rudder.json",
message=rud.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Rudder message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/rudder/{crowsnest_id}", envelope
warnings.warn(f"No Rudder config found for N2k instance id: {n2k_id}")
return None
brefv_rudder = (
unpacked.filter(filter_on_pgn(127245)).map(pgn127245_to_brefv).filter(not_empty)
)
## Propeller (Using engine data for now...)
def pgn127488_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := PROPELLER_CONFIG.get(n2k_id):
crowsnest_id = list(PROPELLER_CONFIG.keys()).index(n2k_id)
prop = Propeller(sensor_id=sensor_id, rpm=msg["Fields"]["speed"])
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/propeller.json", # pylint: disable=line-too-long
message=prop.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Propeller message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/propeller/{crowsnest_id}", envelope
warnings.warn(f"No Propeller config found for {n2k_id}")
return None
brefv_propeller = (
unpacked.filter(filter_on_pgn(127488)).map(pgn127488_to_brefv).filter(not_empty)
)
# Finally, publish to mqtt
def to_mqtt(data):
"""Push data to a mqtt topic"""
subtopic, envelope = data
topic = f"{MQTT_BASE_TOPIC}{subtopic}"
LOGGER.debug("Publishing on %s", topic)
try:
mq.publish(
topic,
envelope.json(),
)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Failed publishing to broker!")
if __name__ == "__main__":
print("All setup done, lets start processing messages!")
# Connect remaining pieces
brefv_rudder.latest().rate_limit(0.1).sink(to_mqtt)
brefv_propeller.latest().rate_limit(0.1).sink(to_mqtt)
# Connect to broker
mq.username_pw_set(MQTT_USER, MQTT_PASSWORD)
if MQTT_TLS:
mq.tls_set()
mq.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT)
# Register triggers
session = OD4Session(CLUON_CID)
session.add_data_trigger(10002, entrypoint.emit)
mq.loop_forever()
| 30.046154
| 135
| 0.70524
| 769
| 5,859
| 5.192458
| 0.288687
| 0.010018
| 0.012021
| 0.014525
| 0.326071
| 0.282995
| 0.268971
| 0.237415
| 0.196844
| 0.196844
| 0
| 0.018235
| 0.185697
| 5,859
| 194
| 136
| 30.201031
| 0.818696
| 0.122717
| 0
| 0.166667
| 0
| 0.016667
| 0.187402
| 0.025314
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.016667
| 0.141667
| 0
| 0.233333
| 0.008333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd54b2677eda2400e60664de51925feee4550c09
| 7,569
|
py
|
Python
|
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | 1
|
2022-03-29T12:39:36.000Z
|
2022-03-29T12:39:36.000Z
|
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | null | null | null |
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | null | null | null |
from typing import Any, Optional
from dataclasses import dataclass, field
import aiohttp
from ..types import aliases
from ..types import exceptions
@dataclass
class BaseMethod:
# config
base_url: aliases.Url | None = field(init=False, default=None)
default_http_method: aliases.RequestMethod | None = field(init=False, default=None)
# actual dataclass members
path: aliases.RelativeUrl
method: Optional[aliases.RequestMethod] = None
url: aliases.Url = field(init=False)
def __post_init__(self):
if not self.base_url:
raise NotImplementedError(
f"You must define static field 'base_url' for {self.__class__}"
) from None
if not self.method and not self.default_http_method:
raise NotImplementedError(
f"You must define either static field 'default_http_method' or pass it directly in {self.__class__}"
) from None
self.method = self.default_http_method if self.method is None else self.method
self.url = self.base_url + self.path
def __call__(self, **kwargs: Any):
try:
self.url = self.url.format(**kwargs)
return self
except KeyError as error:
(missing_field,) = error.args
raise KeyError(
f"Missing field: '{missing_field}' when formatting {self.url}"
) from error
class Methods:
class Method(BaseMethod):
base_url = "https://api.clashofclans.com/v1"
default_http_method = "GET"
# clans
CLANS = Method("/clans")
"""`GET`: `/clans`"""
CLAN = Method("/clans/{clantag}")
"""`GET`: `/clans/{clantag:str}`"""
CLAN_WARLOG = Method("/clans/{clantag}/warlog")
"""`GET`: `/clans/{clantag:str}/warlog`"""
CLAN_MEMBERS = Method("/clans/{clantag}/members")
"""`GET`: `/clans/{clantag:str}/members`"""
CLAN_CURRENT_WAR = Method("/clans/{clantag}/currentwar")
"""`GET`: `/clans/{clantag:str}/currentwar`"""
CLAN_CURRENT_WAR_LEAGUEGROUP = Method("/clans/{clantag}/currentwar/leaguegroup")
"""`GET`: `/clans/{clantag:str}/currentwar/leaguegroup`"""
CLAN_CURRENT_LEAGUE_WAR = Method("/clanwarleagues/wars/{wartag}")
"""`GET`: `/clanwarleagues/wars/{wartag:str}`"""
# players
PLAYER = Method("/players/{playertag}")
"""`GET`: `/players/{playertag:str}`"""
PLAYER_VERIFY_API_TOKEN = Method("/players/{playertag}/verifytoken", "POST")
"""`POST`: `/players/{playertag:str}/verifytoken`"""
# leagues
LEAGUES = Method("/leagues")
"""`GET`: `/leagues`"""
LEAGUE_INFO = Method("/leagues/{league_id}")
"""`GET`: `/leagues/{league_id:int}`"""
LEAGUE_SEASONS = Method("/leagues/{league_id}/seasons")
"""`GET`: `/leagues/{league_id:int}/seasons`"""
LEAGUE_SEASONS_RANKINGS = Method("/leagues/{league_id}/seasons/{season_id}")
"""`GET`: `/leagues/{league_id:int}/seasons/{season_id:int}`"""
WARLEAGUES = Method("/warleagues")
"""`GET`: `/warleagues`"""
WARLEAGUE_INFORMATION = Method("/warleagues/{league_id}")
"""`GET`: `/warleagues/{league_id:int}`"""
# locations
LOCATIONS = Method("/locations")
"""`GET`: `/locations`"""
LOCATION = Method("/locations/{location_id}")
"""`GET`: `/locations/{location_id:int}`"""
# rankings
CLAN_RANKINGS = Method("/locations/{location_id}/rankings/clans")
"""`GET`: `/locations/{location_id:int}/rankings/clans`"""
PLAYER_RANKINGS = Method("/locations/{location_id}/rankings/players")
"""`GET`: `/locations/{location_id:int}/rankings/players`"""
CLAN_VERSUS_RANKINGS = Method("/locations/{location_id}/rankings/clans-versus")
"""`GET`: `/locations/{location_id:int}/rankings/clans-versus`"""
PLAYER_VERSUS_RANKINGS = Method("/locations/{location_id}/rankings/players-versus")
"""`GET`: `/locations/{location_id:int}/rankings/players-versus`"""
# goldpass
GOLDPASS = Method("/goldpass/seasons/current")
"""`GET`: `/goldpass/seasons/current`"""
# labels
CLAN_LABELS = Method("/labels/clans")
"""`GET`: `/labels/clans`"""
PLAYER_LABELS = Method("/labels/players")
"""`GET`: `/labels/players`"""
# not used, but can be
class ServiceMethods:
class Method(BaseMethod):
base_url = "https://developer.clashofclans.com/api"
default_http_method = "POST"
# developer-api
LOGIN = Method("/login")
"""`POST`: `/login`"""
LIST_KEY = Method("/apikey/list")
"""`POST`: `/apikey/list`"""
CREATE_KEY = Method("/apikey/create")
"""`POST`: `/apikey/create`"""
REVOKE_KEY = Method("/apikey/revoke")
"""`POST`: `/apikey/revoke`"""
async def check_result(response: aiohttp.ClientResponse):
"""
Validate request for success.
Parameters
----------
response : aiohttp.ClientResponse
Actual response
Raises
------
``IncorrectParameters``, ``AccessDenied``, ``ResourceNotFound``,
``TooManyRequests``, ``UnknownError``, ``ServiceUnavailable``
According to the official Clash of Clans API
--------------------------------------------
- ``200`` Succesfull response.
- ``400`` Client provided incorrect parameters for the request.
- ``403`` Access denied, either because of missing/incorrect credentials or used API token does not grant access to the requested resource.
- ``404`` Resource was not found.
- ``429`` Request was throttled, because amount of requests was above the threshold defined for the used API token.
- ``500`` Unknown error happened when handling the request.
- ``503`` Service is temprorarily unavailable because of maintenance.
"""
# because json decoding must be executed in request context manager
json = await response.json()
if not response.ok:
match response.status:
case 400:
raise exceptions.IncorrectParameters(response)
case 403:
raise exceptions.AccessDenied(response)
case 404:
raise exceptions.ResourceNotFound(response)
case 429:
raise exceptions.TooManyRequests(response)
case 503:
raise exceptions.ServiceUnavailable(response)
case _: # 500 also
response_data = {
"error": json.get("error"),
"description": json.get("description"),
"reason": json.get("reason"),
"message": json.get("message"),
}
raise exceptions.UnknownError(response, data=response_data)
return response
async def make_request(
session: aiohttp.ClientSession,
api_method: BaseMethod,
**kwargs: Any,
) -> aiohttp.ClientResponse:
"""
Parameters
----------
session : ``aiohttp.ClientSession``
Client session to be used for requests
api_method : ``BaseMethod``
Request API method
**kwargs:
This keyword arguments are compatible with :meth:``aiohttp.ClientSession.request``
Returns
-------
aiohttp.ClientResponse
Response object.
"""
params = kwargs.pop("params", None)
if params is not None:
filtered_params = {
key: value for key, value in params.items() if value is not None
}
kwargs["params"] = filtered_params
async with session.request(
method=api_method.method, # type: ignore
url=api_method.url,
**kwargs,
) as response:
return await check_result(response)
| 34.880184
| 143
| 0.62069
| 797
| 7,569
| 5.771644
| 0.258469
| 0.040652
| 0.041304
| 0.019565
| 0.172609
| 0.148261
| 0.08
| 0
| 0
| 0
| 0
| 0.006826
| 0.225789
| 7,569
| 216
| 144
| 35.041667
| 0.778157
| 0.027877
| 0
| 0.055556
| 0
| 0
| 0.208555
| 0.104175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0.018519
| 0.046296
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd56674cc383ba9fa6321e89c2463e251d94abf2
| 28,594
|
py
|
Python
|
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import re
import json
import random
from pathlib import Path
from datetime import date
from typing import Any, Callable, Set, Tuple
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from tags import Filter, Box, CompilationError, escape, enum_subject_parser_factory, tagset, PredicateDefinitions, DEFAULT_BOOL_PARSER, highlighting
from model import Search, Sort, Score, Status, Page, Tag, Rating, Model
#
# filter setup
#
parser_score = enum_subject_parser_factory(Score)
parser_status = enum_subject_parser_factory(Status)
####################
# RANDOM PREDICATE #
####################
def parser_random(string: str) -> float:
if string == 'nan':
raise ValueError("percentage cannot be nan")
percent = float(string)
if percent < 0.0 or 100.0 < percent:
raise ValueError(f"percentage {percent} is not in range 0 to 100 inclusive")
return percent
def action_random(percent: float, ignore: Any) -> bool:
return random.random() * 100 <= percent
PREDICATES = PredicateDefinitions(
action=lambda tag, rating: tag in rating.tags
)
###################
# COUNT PREDICATE #
###################
count_pattern = re.compile(r"^(\d+)\s+of\s+(.*)$")
def parser_count(string: str) -> Tuple[Box[int], Callable[[Any], bool]]:
items = count_pattern.fullmatch(string)
if not items:
raise ValueError(f"subject {string} is not of the form: <number> of <expression>")
integer = int(items[1])
if integer < 0:
raise ValueError(f"counting must begin from 0 or greater, not {integer}")
try:
subroutine = Filter(items[2], PREDICATES)
except CompilationError as e:
# intercept the error and change the source
# to the source that we're trying to compile
e.reason = "in quotation: " + e.reason
e.source = source
raise e
return (Box(integer), subroutine)
def action_count(subject: Tuple[Box[int], Callable[[Any], bool]], rating: Rating) -> bool:
integer, subroutine = subject
if integer.value != 0 and subroutine(rating):
integer.value -= 1
return True
return False
##################
# EVAL PREDICATE #
##################
def parser_eval(source):
try:
return Filter(source, PREDICATES)
except CompilationError as e:
# intercept the error and change the source
# to the source that we're trying to compile
e.reason = "in quotation: " + e.reason
e.source = source
raise e
def parser_lower_str(source):
return source.lower()
###################
# DATE PREDICATES #
###################
def parser_date(isoformat: str):
try:
return date.fromisoformat(isoformat)
except ValueError as e:
raise ValueError("date must be in the format YYYY-MM-DD")
(PREDICATES
.define("tag",
readme="filter for ratings with the specified tag",
action=lambda tag, rating: tag in rating.tags,
parser=lambda x: x,
)
.define("score",
readme="filter for ratings with the specified score",
action=lambda score, rating: score == rating.score,
parser=parser_score,
)
.define("minimum score",
readme="filter for ratings with at least a certain score",
action=lambda score, rating: rating.score >= score,
parser=parser_score,
)
.define("maximum score",
readme="filter for ratings with at most a certain score",
action=lambda score, rating: rating.score <= score,
parser=parser_score,
)
.define("status",
readme="filter for ratings with the specified status",
action=lambda status, rating: status == rating.status,
parser=parser_status,
)
.define("minimum status",
readme="filter for ratings with at least a certain status",
action=lambda status, rating: rating.status >= status,
parser=parser_status,
)
.define("maximum status",
readme="filter for ratings with at most a certain status",
action=lambda status, rating: rating.status <= status,
parser=parser_status,
)
.define("tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) == number,
parser=int,
)
.define("minimum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) >= number,
parser=int,
)
.define("maximum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) <= number,
parser=int,
)
.define("random",
readme="filter ratings with a percent chance to include each",
action=action_random,
parser=parser_random,
pure=False
)
.define("count", # TODO possibly remove
readme="filter for a certain number of results at most",
action=action_count,
parser=parser_count,
pure=False
)
.define("eval", # TODO possibly remove
readme="evaluate a string as an expression",
action=lambda function, rating: function(rating),
parser=parser_eval,
pure=False
)
.define("title",
readme="filter for ratings with certain text in the title (case insensitive)",
action=lambda string, rating: rating.title.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("comment",
readme="filter for ratings with certain text in the comments (case insensitive)",
action=lambda string, rating: rating.comments.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("text",
readme="filter for ratings with certain text in the title or the comments (case insensitive)",
action=lambda string, rating: (
rating.title.lower().find(string) != -1 or rating.comments.lower().find(string) != -1
),
parser=parser_lower_str,
)
.define("commented",
readme="filter for ratings that either have or lack a comment",
action=lambda boolean, rating: bool(rating.comments) == boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("value",
readme="a literal boolean value; true or false",
action=lambda boolean, rating: boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("modified",
readme="ratings modified on YYYY-MM-DD",
action=lambda day, rating: rating.modified == day,
parser=parser_date,
)
.define("modified after",
readme="ratings modified after YYYY-MM-DD",
action=lambda day, rating: rating.modified > day,
parser=parser_date,
)
.define("modified before",
readme="ratings modified before YYYY-MM-DD",
action=lambda day, rating: rating.modified < day,
parser=parser_date,
)
.define("created",
readme="ratings created on YYYY-MM-DD",
action=lambda day, rating: rating.created == day,
parser=parser_date,
)
.define("created after",
readme="ratings created after YYYY-MM-DD",
action=lambda day, rating: rating.created > day,
parser=parser_date,
)
.define("created before",
readme="ratings created before YYYY-MM-DD",
action=lambda day, rating: rating.created < day,
parser=parser_date,
)
# alias definitions
.alias("minimum score", "min score")
.alias("maximum score", "max score")
.alias("minimum status", "min status")
.alias("maximum status", "max status")
.alias("minimum tags", "min tags")
.alias("maximum tags", "max tags")
.alias("commented", "has comment")
)
def create_rating_filter(filter_tab):
search = filter_tab.search
if search == Search.COMMENTS:
criterion = filter_tab.query.lower()
elif search == Search.ADVANCED:
adv = filter_tab.advanced
criterion = Filter(adv if adv else filter_tab.query, PREDICATES)
elif search == Search.TITLE:
criterion = filter_tab.query.lower()
else:
pass
def function(rating):
if search == Search.COMMENTS:
if rating.comments.lower().find(criterion) == -1:
return False
elif search == Search.ADVANCED:
if not criterion(rating):
return False
elif search == Search.TITLE:
if rating.title.lower().find(criterion) == -1:
return False
else:
pass
return True
return function
def create_tagging_filter(filter_tab):
search = filter_tab.tags_search
criterion = filter_tab.tags_query.lower()
def function(tag):
if search == Search.COMMENTS:
if tag.description.lower().find(criterion) == -1:
return False
elif search == Search.TITLE:
if tag.name.lower().find(criterion) == -1:
return False
else:
pass
return True
return (lambda item: True) if criterion == "" else function
class FilterTab(object):
def __init__(self, builder, model):
self._search = builder.get_object("filter_search_combobox")
self._tags_search = builder.get_object("filter_tags_search_combobox")
self._tags_ascending = builder.get_object("filter_tags_ascending")
self._tags_descending = builder.get_object("filter_tags_descending")
self._sort = builder.get_object("filter_sort_combobox")
self._query = builder.get_object("search_entry")
self._tags_query = builder.get_object("tagging_search_entry")
self._ascending = builder.get_object("filter_ascending")
self._descending = builder.get_object("filter_descending")
self.model = model
self._advanced = builder.get_object("highlight_textview").get_buffer()
self._advanced.create_tag("operator", foreground="red", background=None)
self._advanced.create_tag("predicate", foreground="magenta")
self._advanced.create_tag("grouping", foreground="red")
self._advanced.create_tag("error", foreground="white", background="red")
self.reset()
self.reset_tags()
def reset(self):
self.advanced = ""
self.search = Search.TITLE
self.sort = Sort.TITLE
def reset_tags(self):
self.tags_search = Search.TITLE
# BUG setting sorting does not actually work
# only sorts by title regardless of function
def configure_sorting(self, tree_sortable):
# a descending order is a good default
if self.descending:
order = Gtk.SortType.DESCENDING
else:
order = Gtk.SortType.ASCENDING
tree_sortable.set_sort_column_id(0, order)
sort = self.sort
if sort == Sort.SCORE:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].score
y = userdata[model.get(b, 1)].score
return 1 if x > y else -1 if x < y else 0
elif sort == Sort.STATUS:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].status
y = userdata[model.get(b, 1)].status
return 1 if x > y else -1 if x < y else 0
elif sort == Sort.TITLE:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].title
y = userdata[model.get(b, 1)].title
return 1 if x > y else -1 if x < y else 0
else:
raise ValueError('Enum value "%s" unknown for Sort' % sort)
tree_sortable.set_sort_func(1, sort_func, self.model.ratings)
def configure_sorting_tags(self, tree_sortable):
# a descending order is a good default
if self.tags_descending:
order = Gtk.SortType.DESCENDING
else:
order = Gtk.SortType.ASCENDING
tree_sortable.set_sort_column_id(0, order)
def _check_tag(self, criterion, tag):
return True
def create(self, kind):
if kind is Rating:
return create_rating_filter(self)
elif kind is Tag:
return create_tagging_filter(self)
else:
# TODO
raise TypeError("kind must be one of Rating or Tag")
def highlight(self):
highlights = highlighting(self.advanced, PREDICATES.keys())
self._advanced.remove_all_tags(*self._advanced.get_bounds())
for position, token, tag in highlights:
start = self._advanced.get_iter_at_offset(position)
end = self._advanced.get_iter_at_offset(position + len(token))
self._advanced.apply_tag_by_name(tag, start, end)
@property
def query(self):
return self._query.get_text()
@query.setter
def query(self, value):
self._query.set_text(value)
@property
def advanced(self):
return self._advanced.get_text(*self._advanced.get_bounds(), True)
@advanced.setter
def advanced(self, value):
self._advanced.set_text(value)
self.highlight()
@property
def tags_query(self):
return self._tags_query.get_text()
@tags_query.setter
def tags_query(self, value):
self._tags_query.set_text(value)
@property
def ascending(self):
return self._ascending.get_active()
@ascending.setter
def ascending(self, value):
self._ascending.set_active(value)
@property
def descending(self):
return self._descending.get_active()
@descending.setter
def descending(self, value):
self._descending.set_active(value)
@property
def tags_ascending(self):
return self._tags_ascending.get_active()
@ascending.setter
def tags_ascending(self, value):
self._tags_ascending.set_active(value)
@property
def tags_descending(self):
return self._tags_descending.get_active()
@descending.setter
def tags_descending(self, value):
self._tags_descending.set_active(value)
@property
def search(self):
return Search(int(self._search.get_active_id()))
@search.setter
def search(self, value):
self._search.set_active_id(str(value.value))
@property
def tags_search(self):
return Search(int(self._tags_search.get_active_id()))
@tags_search.setter
def tags_search(self, value):
self._tags_search.set_active_id(str(value.value))
@property
def sort(self):
return Sort(int(self._sort.get_active_id()))
@sort.setter
def sort(self, value):
self._sort.set_active_id(str(value.value))
class EditorTab(object):
def __init__(self, builder, model):
self._idnum = builder.get_object("id_label")
self._title = builder.get_object("title_entry")
self._score = builder.get_object("score_combobox")
self._recommendation = builder.get_object("recommend_combobox")
self._status = builder.get_object("status_combobox")
self._comments = builder.get_object("comments_textview").get_buffer()
self._tags = builder.get_object("tags_textview").get_buffer()
self._original_tags = None
self._created_label = builder.get_object("created_label")
self._modified_label = builder.get_object("modified_label")
self._created = date.today()
self._modified = date.today()
self.model = model
def copy_to(self, rating):
modified = (
rating.score != self.score
or rating.status != self.status
or rating.title != self.title
or rating.comments != self.comments
)
rating.title = self.title
rating.score = self.score
rating.status = self.status
rating.comments = self.comments
if self._original_tags != self._tags.get_text(*self._tags.get_bounds(), True):
self.model.change_tags(rating, self.tags)
modified = True
if modified:
rating.modified = date.today()
self.modified = rating.modified
def copy_from(self, value):
self.idnum = value.idnum
self.title = value.title
self.score = value.score
self.status = value.status
self.comments = value.comments
self.tags = value.tags
# set date attributes/labels these are not edittable
self.created = value.created
self.modified = value.modified
@property
def idnum(self):
return self._idnum.get_text()
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@property
def title(self):
return self._title.get_text()
@title.setter
def title(self, text):
self._title.set_text(text)
@property
def score(self):
return Score(int(self._score.get_active_id()))
@score.setter
def score(self, value):
self._score.set_active_id(str(value.value))
@property
def status(self):
return Status(int(self._status.get_active_id()))
@status.setter
def status(self, value):
self._status.set_active_id(str(value.value))
@property
def comments(self):
start, end = self._comments.get_bounds()
return self._comments.get_text(start, end, True)
@comments.setter
def comments(self, text):
self._comments.set_text(text)
@property
def tags(self):
start, end = self._tags.get_bounds()
text = self._tags.get_text(start, end, True)
return tagset(text)
@tags.setter
def tags(self, tag_set):
self._original_tags = ", ".join([escape(tag) for tag in tag_set])
self._tags.set_text(self._original_tags)
@property
def created(self):
return self._created
@created.setter
def created(self, day):
self._created = day
self._created_label.set_text(day.isoformat())
@property
def modified(self):
return self._modified
@modified.setter
def modified(self, day):
self._modified = day
self._modified_label.set_text(day.isoformat())
def clear(self):
self.title = ""
self.score = Score.UNSPECIFIED
self.status = Status.UNSPECIFIED
self.comments = ""
self.tags = ""
self._created = date.today()
self._modified = date.today()
class FilesTab(object):
def __init__(self, builder):
self._path = builder.get_object("filepath_entry")
self._chooser = builder.get_object("file_chooser_button")
self._chooser.set_current_folder(str(Path.home()))
# self.update_path()
@property
def path(self):
return self._path.get_text()
@path.setter
def path(self, text):
self._path.set_text(text)
def update_path(self):
self.path = self._chooser.get_filename()
class TaggingTab(object):
def __init__(self, builder, model):
self._idnum = builder.get_object("tagging_id_label")
self._name = builder.get_object("tagging_name_entry")
self._description = builder.get_object("tagging_description_textview").get_buffer()
self._original_name = None
self.selected = None
self.selected_row = None
self.model = model
def sort_func(model, a, b, userdata):
x = model.get(a, 0)
y = model.get(b, 0)
return 1 if x > y else -1 if x < y else 0
self.liststore = Gtk.ListStore(str)
self.liststore.set_sort_column_id(0, Gtk.SortType.DESCENDING)
self.liststore.set_sort_func(0, sort_func, self.model.tags)
self.treeview = builder.get_object("tagging_treeview")
self.treeview.set_model(self.liststore)
self.title_column = Gtk.TreeViewColumn("Name", Gtk.CellRendererText(), text=0)
self.treeview.append_column(self.title_column)
self.results_label = builder.get_object("tagging_results_label")
self.instances_label = builder.get_object("tagging_instances_label")
self.filter = lambda item: True
def copy_to(self, tag):
name = self.name
if self._original_name is not None and name != self._original_name:
self.model.rename(tag, name)
self.selected = None
self.refresh()
tag.description = self.description
def copy_from(self, tag):
self.idnum = tag.idnum
self.name = tag.name
self._original_name = self.name
self.description = tag.description
if tag.instances == 1:
self.instances_label.set_text("1 instance")
else:
self.instances_label.set_text("%i instances" % tag.instances)
def change_selection(self):
if self.selected is not None:
self.copy_to(self.selected)
model, tag = self.treeview.get_selection().get_selected()
if tag is None:
return
name = model.get_value(tag, 0)
self.selected = self.model.tags[name]
self.selected_row = self.liststore[tag]
self.copy_from(self.model.tags[name])
def remove(self):
self.model.remove(self.selected)
tag = self.liststore.get_iter_first()
while tag is not None:
if self.liststore.get_value(tag, 0) == self.selected.name:
has_next_row = self.liststore.remove(tag)
break
tag = self.liststore.iter_next(tag)
if has_next_row or (tag := self.liststore.get_iter_first()) is not None:
name = self.liststore.get_value(tag, 0)
self.selected = self.model.tags[name]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.copy_from(self.selected)
else:
self.selected = None
self.name = ""
self.description = ""
self.idnum = -1
self.instances_label.set_text("0 instances")
def vacuum(self):
self.model.vacuum()
self.refresh()
def refresh(self):
results = self.model.fill_in(Tag, self.liststore, self.filter)
self.results_label.set_text("%i results" % results)
if self.selected is not None:
for row in self.liststore:
if row[0] == self.selected.name:
self.selected_row = row
break
else:
self.selected = None
self.selected_row = None
@property
def idnum(self):
return self._idnum.get_text()
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@property
def name(self):
return self._name.get_text()
@name.setter
def name(self, value):
self._name.set_text(value)
@property
def description(self):
start, end = self._description.get_bounds()
return self._description.get_text(start, end, True)
@description.setter
def description(self, text):
self._description.set_text(text)
class Rater:
#
# Location of configuration file
#
CONFIG = Path.home() / ".ratings.json"
def __init__(self):
builder = Gtk.Builder.new_from_file("ratings.glade")
self.selected = None
self.selected_row = None
self.model = Model()
self.editor = EditorTab(builder, self.model)
self.filter = FilterTab(builder, self.model)
self.files = FilesTab(builder)
self.tagging = TaggingTab(builder, self.model)
self.liststore = Gtk.ListStore(str, int)
self.treeview = builder.get_object("search_treeview")
self.treeview.set_model(self.liststore)
self.title_column = Gtk.TreeViewColumn("Title", Gtk.CellRendererText(), text=0)
self.treeview.append_column(self.title_column)
self.filter.configure_sorting(self.liststore)
self.rater_window = builder.get_object("rater_window")
self.rater_window.connect("destroy", self.exit_rater)
self.rater_window.show_all()
self.results_label = builder.get_object("results_label")
self.message_window = builder.get_object("message_window")
self.query_label = builder.get_object("message_window_query_label")
self.error_label = builder.get_object("message_window_error_label")
# TODO fix it so that closing the message window via border icon doesn't destroy it
self.message_window.connect("destroy",
self.on_close_message_button_clicked
)
self.message_window.connect("delete-event",
lambda widget, event: self.close_message_window()
)
self.message_window.connect("response",
lambda widget, event: self.close_message_window()
)
builder.connect_signals(self)
if Rater.CONFIG.is_file():
config = json.loads(Rater.CONFIG.read_text())
file = config["last_open"]
self.files.path = file if file is not None else ""
if not self.open_file():
self.new_file()
else:
self.file = None
self.new_file()
def exit_rater(self, widget):
Rater.CONFIG.write_text(
json.dumps({"last_open": self.file}, indent=4)
)
Gtk.main_quit()
def close_message_window(self):
self.message_window.hide()
def open_file(self):
if not self.model.load(self.files.path):
return False
results = self.model.fill_in(Tag, self.tagging.liststore)
self.tagging.results_label.set_text("%i results" % results)
results = self.model.fill_in(Rating, self.liststore)
self.results_label.set_text("%s results" % results)
tag = self.liststore.get_iter_first()
index = self.liststore.get_value(tag, 1)
self.selected = self.model.ratings[index]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
self.file = self.files.path
return True
def save_file(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
self.model.save(self.files.path)
def new_file(self):
self.liststore.clear()
self.model.clear()
self.new_rating()
def new_rating(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
self.selected = self.model.create_rating()
tag = self.liststore.prepend(
[self.selected.title, self.selected.idnum]
)
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
def delete_rating(self):
self.model.remove(self.selected)
tag = self.liststore.get_iter_first()
while tag is not None:
if self.liststore.get_value(tag, 1) == self.selected.idnum:
has_next_row = self.liststore.remove(tag)
break
tag = self.liststore.iter_next(tag)
if has_next_row or (tag := self.liststore.get_iter_first()) is not None:
idnum = self.liststore.get_value(tag, 1)
self.selected = self.model.ratings[idnum]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
else:
self.new_rating()
def change_selection(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
model, tag = self.treeview.get_selection().get_selected()
if tag is None:
return
idnum = model.get_value(tag, 1)
self.selected = self.model.ratings[idnum]
self.selected_row = self.liststore[tag]
self.editor.copy_from(self.selected)
def on_new_rating_button_clicked(self, widget):
self.new_rating()
def on_delete_rating_button_clicked(self, widget):
self.delete_rating()
def on_search_treeview_row_activated(self, widget, column, index):
self.change_selection()
def on_tagging_treeview_row_activated(self, widget, column, index):
self.tagging.change_selection()
def on_title_entry_changed(self, widget):
if self.selected_row is not None:
self.selected_row[0] = self.editor.title
def on_filter_apply_clicked(self, widget, option_a=None, option_b=None):
try:
function = self.filter.create(Rating)
except CompilationError as error:
self.query_label.set_text(
"%s%s" % (
error.source if error.source else self.filter.query,
"\n" + error.underline_string() if error.position else "",
)
)
self.error_label.set_text(error.reason)
self.message_window.show_all()
return
self.liststore.clear()
results = self.model.fill_in(Rating, self.liststore, function)
self.results_label.set_text("%i results" % results)
if self.selected is not None:
for row in self.liststore:
if row[1] == self.selected.idnum:
self.selected_row = row
break
else:
self.selected = None
self.selected_row = None
self.filter.configure_sorting(self.liststore)
def on_filter_tags_apply_clicked(self, widget, option_a=None, option_b=None):
self.tagging.filter = self.filter.create(Tag)
self.filter.configure_sorting_tags(self.tagging.liststore)
self.tagging.refresh()
def on_filter_reset_clicked(self, widget):
self.filter.reset()
def on_filter_tags_reset_clicked(self, widget):
self.filter.reset_tags()
def on_switch_page(self, widget, page, index):
if self.selected is not None:
self.editor.copy_to(self.selected)
if index == Page.TAGGING.value:
self.tagging.refresh()
elif self.tagging.selected is not None:
self.tagging.copy_to(self.tagging.selected)
def on_file_chooser_button_file_set(self, other):
self.files.update_path()
def on_open_file_button_clicked(self, widget):
self.open_file()
def on_save_file_button_clicked(self, widget):
self.save_file()
def on_new_file_button_clicked(self, widget):
self.new_file()
def on_attach_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.model.attach_tag(self.selected, self.tagging.selected)
self.editor.tags = self.selected.tags
def on_detach_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.model.detach_tag(self.selected, self.tagging.selected)
self.editor.tags = self.selected.tags
def on_delete_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.editor.tags = self.selected.tags
self.tagging.remove()
def on_vacuum_tag_button_clicked(self, widget):
self.tagging.vacuum()
def on_search_tag_button_clicked(self, widget):
self.filter.search = Search.ADVANCED
self.filter.advanced = ""
self.filter.query = self.tagging.name
self.on_filter_apply_clicked(widget)
def on_close_message_button_clicked(self, widget):
self.close_message_window()
def on_highlight_buffer_changed(self, widget):
self.filter.highlight()
def main():
rater = Rater()
Gtk.main()
if __name__ == '__main__':
main()
| 26.305428
| 148
| 0.708435
| 3,978
| 28,594
| 4.918301
| 0.087984
| 0.036187
| 0.026987
| 0.015742
| 0.485101
| 0.430411
| 0.363046
| 0.329415
| 0.305597
| 0.28229
| 0
| 0.003033
| 0.169721
| 28,594
| 1,087
| 149
| 26.305428
| 0.821069
| 0.023641
| 0
| 0.304956
| 0
| 0
| 0.094603
| 0.007787
| 0
| 0
| 0
| 0.00092
| 0
| 1
| 0.142313
| false
| 0.003812
| 0.012706
| 0.027954
| 0.227446
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd567ff8b78d041903de62043964d3c66a7450a4
| 10,218
|
py
|
Python
|
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from serial.serialutil import SerialException
import serial.tools.list_ports as port_list
import serial
import time
def Hex_To_Dec(input): # input of form "40048024"
return int(input, 16)
def Hex_To_Bin(input): # input of form "40048024"
return bin(int(input, 16))
def Hex_To_Bytes(input): # input of form "40048024"
return bytes.fromhex(input)
# def List_All_Mbed_USB_Devices(self):
def List_All_Mbed_USB_Devices():
ports = list(port_list.comports())
Num_Serial_Devices = len(ports)
Num_Mbed_Devices = 0
COM_PORTS = []
connectionType = [] # Create a unique value for USB K64F devices which trigger new functions
# Say 11 = mbed USB, 10 = mbed ANY, 12 = mbed TCP, 14 = mbed WIFI
VID_PID = [] # USB VID:PID are the Vendor/Product ID respectively - smae for each K64F Board? - You can determine HIC ID from last 8 digits of Serial Number?
# Note that the 0240 at the start of Serial Number refers to the K64F Family
ID_USB = [] # ID_USB will be the USB serial number - should be unique
Baud_Rate = [] # For now assume all operating at 9600 - may change later so might need to add later on
# IP = [] # Don't think we need this for USB Serial(Mbed) devices
if Num_Serial_Devices > 0:
for i in range(Num_Serial_Devices):
COM_Port = ports[i].usb_description() # ports[i].device outputs COM_PORT (Note port[i][0][0:16] is a particular device - port[i][0] is the COM Port of the device)
if(ports[i][1].startswith("mbed Serial Port")): # port[i] is a particular device - port[i][1] is the description of the device - port[i][1][0:16] are the characters containing the mbed Serial Port description
default_baudrate = 115200 # Assume all boards use default baudrate of 9600
try:
Serial_device = serial.Serial(port=COM_Port, baudrate=default_baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
except:
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
# How can we/Do we need to check we have actually connected to device - and that it is meant to be used for what we are using it for
if(not Serial_device.readable()):
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
Num_Mbed_Devices += 1
COM_PORTS.append(COM_Port)
USB_INFO = ports[i].usb_info().split('=') # USB-PID should be Unique
USB_VIDPID = USB_INFO[1].split(' ')[0]
VID_PID.append(USB_VIDPID)
USB_Serial_Number = USB_INFO[2].split(' ')[0]
ID_USB.append(USB_Serial_Number)
connectionType.append(11) # Added 10 onto definitions used by LJM library to avoid mixing up - however can change if confusing
Serial_device.close() # Close COM Port communication once info obtained
if(ports[i][1].startswith("USB Serial Device")):
default_baudrate = 115200 # Assume all boards use default baudrate of 9600
try:
Serial_device = serial.Serial(port=COM_Port, baudrate=default_baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
except:
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
# How can we/Do we need to check we have actually connected to device - and that it is meant to be used for what we are using it for
if(not Serial_device.readable()):
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
Num_Mbed_Devices += 1
COM_PORTS.append(COM_Port)
USB_INFO = ports[i].usb_info().split('=') # USB-PID should be Unique
USB_VIDPID = USB_INFO[1].split(' ')[0]
VID_PID.append(USB_VIDPID)
USB_Serial_Number = USB_INFO[2].split(' ')[0]
ID_USB.append(USB_Serial_Number)
connectionType.append(11) # Added 10 onto definitions used by LJM library to avoid mixing up - however can change if confusing
Serial_device.close() # Close COM Port communication once info obtained
return(Num_Mbed_Devices, COM_PORTS, connectionType, ID_USB, VID_PID)
def _Serial_Read_Raw(Serial_Device, Expected_Bytes):
serialString = "" # Used to hold data coming over UART
# NEED TO ADD A TIMEOUT TO FOLLOWING LINE
# Removing while(1) should allow the read(size=Expected_Bytes) to naturally timeout after configured time
''' Don't need this chucnk since we know how many bytes we expect to get back
Therefore we can use the builtin timeout from the pyserial library
while(1):
print(self.Serial_Device.in_waiting)
if self.Serial_Device.in_waiting > 0:
# Read data out of the buffer until a carriage return / new line is found
# Note there is an inherant issue with this where, if a transmitted value has the equivalent hex value of 0a, it triggers the end of line
# If we are sending unsigned bytes across channel, inevitably some will have the value of '\n' character
# serialString = serialPort.readline()
'''
serialString = Serial_Device.read(size=Expected_Bytes)
serialString = serialString.hex() # Decode bytes into raw hex values
# if(serialString[Expected_Bytes-1] != 10): # 10 = 0x0a = '\n' LF character in Ascii
if(serialString[-2: ] != '0a'): # 10 = 0x0a = '\n' LF character in Ascii
# raise Exception ("Issue with Received Data") # Need to implement proper error handling
print("Error", serialString)
else:
return serialString[:-2]
# def Reverse_4byte_hex(input):
# reverse = ""
# if(len(input) == 4*2):
# reverse += input[6:8] + input[4:6] + input[2:4] + input[0:2]
# return reverse
def Reverse_Hex_Byte_Order(input):
reverse = ""
if(len(input) %2 == 0): # Should be twice as many hex characters as bytes therefore even number of hex
Num_Bytes = len(input) / 2
x = (int)(Num_Bytes)
while(x > 0):
y = 2 * (x - 1)
reverse += input[y : y+2]
x -= 1
return reverse
def ADC_8x_16_Raw_Read(Serial_Device):
Expected_Bytes = 8*2 + 4 + 1 # Inlcude EOL Char
Raw = _Serial_Read_Raw(Serial_Device, Expected_Bytes)
data = []
seconds = Hex_To_Dec(Reverse_Hex_Byte_Order(Raw[0:8]))
for x in range(2, 8+2, 2): # Bytes
y = x+1 # Due to byte order of device -
data.append(Hex_To_Dec(Raw[(4*y) + 0 : (4*y) + 4]))
data.append(Hex_To_Dec(Raw[(4*x) + 0 : (4*x) + 4]))
return seconds, data
# Assumes Data recieved is
def Convert_ADC_Raw(Raw_Reading, ADC_Resolution, Max_Min_Voltage):
Signed_Value = np.int16(Raw_Reading)
quant_step = (2 * Max_Min_Voltage) / (2**ADC_Resolution)
return Signed_Value * quant_step
def Decode_Time_In_Secs(HEX_four_bytes):
return Hex_To_Dec(Reverse_Hex_Byte_Order(HEX_four_bytes))
def Decode_8x16_Raw(HEX_sixteen_bytes):
if(len(HEX_sixteen_bytes) != 16*2):
raise ValueError
Raw_Readings = []
for x in range(8):
Raw_Readings.append(Hex_To_Dec(Reverse_Hex_Byte_Order(HEX_sixteen_bytes[4*x : 4*(x+1)])))
return Raw_Readings
def Decode_6_Compressed_PWM_Duty(HEX_six_bytes):
if(len(HEX_six_bytes) != 6*2):
raise ValueError
Duty_Cycles = []
for x in range(6):
Duty_Cycles.append(Hex_To_Dec(HEX_six_bytes[2*x : 2*x + 2]) / 100.)
return Duty_Cycles
def Decode_Raw_Data(Raw_Data):
Results = []
for sample in Raw_Data:
entry = []
time = Decode_Time_In_Secs(sample[0:(4*2)])
entry.append(time)
entry.append(Decode_8x16_Raw(sample[4 * 2 : (4 + (2*8)) * 2]))
entry.append(Decode_6_Compressed_PWM_Duty(sample[(4 + (2*8)) * 2 : 40 + 6*2]))
Results.append(entry)
return Results
# for x in range(2, 8+2, 2): # Bytes
# y = x+1 # Due to byte order of device -
# entry.append(Hex_To_Dec(sample[(4*y) + 0 : (4*y) + 4]))
# entry.append(Hex_To_Dec(sample[(4*x) + 0 : (4*x) + 4]))
# # entry.append(Convert_ADC_Raw(Hex_To_Dec(sample[(4*y) + 0 : (4*y) + 4]), 16, 5))
# # entry.append(Convert_ADC_Raw(Hex_To_Dec(sample[(4*x) + 0 : (4*x) + 4]), 16, 5))
# Results.append(entry)
# Testing
if __name__ == "__main__":
mbed_USB_info = List_All_Mbed_USB_Devices()
for i in range(5):
print(mbed_USB_info[i])
# serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=115200, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# for x in range(1000):
# raw_data = ADC_8x_16_Raw_Read(serial_port)
# # raw_data = serial_port.read(1)
# data = []
# for x in range(8):
# data.append(Convert_ADC_Raw(raw_data[1][x], 16, 5))
# # print(raw_data)
# print(data, raw_data [0])
Bytes_Per_Sample = 32
Number_Samples = 300
Serial_Baudrate = 230400 # 962100
serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=Serial_Baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
data = []
for x in range(Number_Samples):
raw = serial_port.read(Bytes_Per_Sample).hex()
data.append(raw)
# print(data)
# print(data)
Formatted = Decode_Raw_Data(data)
print(Formatted[0], Formatted[Number_Samples - 1])
# print(Results[0:2])
#
# Serial_device = serial.Serial(port="COM4", baudrate=9600, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# Target_Register = "0x40048024"
# Received_String = Read_K64F_Hex_Register(Serial_device, Target_Register, 4)
# print("READ COMMAND (0x30): Requested Register = %s; Contents of Register(Hex) = %s" % (Target_Register , Received_String[:-2]))
| 47.305556
| 224
| 0.641124
| 1,496
| 10,218
| 4.198529
| 0.20254
| 0.028658
| 0.014011
| 0.012259
| 0.465372
| 0.422067
| 0.373348
| 0.338481
| 0.315555
| 0.307594
| 0
| 0.040964
| 0.256997
| 10,218
| 216
| 225
| 47.305556
| 0.786354
| 0.368174
| 0
| 0.276923
| 0
| 0
| 0.037518
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.038462
| 0.030769
| 0.215385
| 0.023077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd5ed16b310aacd62d38f7ed79f88685cc24b454
| 1,189
|
py
|
Python
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 2
|
2019-03-19T08:46:27.000Z
|
2020-11-12T10:55:59.000Z
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 1
|
2021-03-30T16:55:09.000Z
|
2021-03-30T16:55:09.000Z
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 7
|
2019-03-19T08:47:35.000Z
|
2021-08-24T11:47:41.000Z
|
# -*- coding: utf-8 -*-
import json
import logging
from .request import RequestApi
from .exceptions import ApiError, WrongId, HttpError
logger = logging.getLogger(__name__)
class Senler:
def __init__(self, secret, vk_group_id=None):
self.vk_group = vk_group_id
self.__secret = str(secret).strip()
self._rq = RequestApi(self.secret)
def __error_handler(self, response):
if bool(response['success']):
return response
raise ApiError(response)
def __call__(self, method, **kwargs):
if 'vk_group_id' not in kwargs.keys():
if self.vk_group is None:
raise WrongId('vk_group_id is not specified by any of the methods')
kwargs['vk_group_id'] = self.vk_group
response = self._rq.send(str(method), kwargs)
json_response = {}
try:
json_response = json.loads(response.text)
except:
logger.debug(f'{response.status_code}:{response.text}')
raise HttpError(f'status_code:{response.status_code}, error with decode json')
return self.__error_handler(json_response)
@property
def secret(self):
return self.__secret
@property
def vk_group(self):
return self.__vk_group
@vk_group.setter
def vk_group(self, value):
self.__vk_group = str(value)
| 25.847826
| 81
| 0.735071
| 172
| 1,189
| 4.77907
| 0.372093
| 0.110706
| 0.054745
| 0.03163
| 0.043796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000988
| 0.148865
| 1,189
| 45
| 82
| 26.422222
| 0.811265
| 0.017662
| 0
| 0.055556
| 0
| 0
| 0.150086
| 0.062607
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0.055556
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd5f3466a377d682676cf2f35cddaec4567f59df
| 11,354
|
py
|
Python
|
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
import pyotp
import robin_stocks as r
import pandas as pd
import numpy as np
import ta as ta
from pandas.plotting import register_matplotlib_converters
from ta import *
from misc import *
from tradingstats import *
from config import *
#Log in to Robinhood
#Put your username and password in a config.py file in the same directory (see sample file)
totp = pyotp.TOTP(rh_2fa_code).now()
login = r.login(rh_username,rh_password, totp)
#Safe divide by zero division function
def safe_division(n, d):
return n / d if d else 0
def get_spy_symbols():
"""
Returns: the symbol for each stock in the S&P 500 as a list of strings
"""
symbols = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0]['Symbol']
return list(symbols.values.flatten())
def get_watchlist_symbols():
"""
Returns: the symbol for each stock in your watchlist as a list of strings
"""
my_list_names = []
symbols = []
for name in r.get_all_watchlists(info='name'):
my_list_names.append(name)
for name in my_list_names:
list = r.get_watchlist_by_name(name)
for item in list:
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by r.get_open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = r.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
return "Not found"
def get_modified_holdings():
""" Retrieves the same dictionary as r.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from r.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = r.build_holdings()
holdings_data = r.get_open_stock_positions()
for symbol, dict in holdings.items():
bought_at = get_position_creation_date(symbol, holdings_data)
bought_at = str(pd.to_datetime(bought_at))
holdings[symbol].update({'bought_at': bought_at})
return holdings
def golden_cross(stockTicker, n1, n2, direction=""):
"""Determine if a golden/death cross has occured for a specified stock in the last X trading days
Args:
stockTicker(str): Symbol of the stock we're querying
n1(int): Specifies the short-term indicator as an X-day moving average.
n2(int): Specifies the long-term indicator as an X-day moving average.
(n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200)
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if the short-term indicator crosses below the long-term one
price(float): last listed close price
"""
history = get_historicals(stockTicker)
closingPrices = []
dates = []
for item in history:
closingPrices.append(float(item['close_price']))
dates.append(item['begins_at'])
price = pd.Series(closingPrices)
dates = pd.Series(dates)
dates = pd.to_datetime(dates)
ema1 = ta.trend.EMAIndicator(price, int(n1)).ema_indicator()
ema2 = ta.trend.EMAIndicator(price, int(n2)).ema_indicator()
if plot:
show_plot(price, ema1, ema2, dates, symbol=stockTicker, label1=str(n1)+" day EMA", label2=str(n2)+" day EMA")
return ema1.iat[-1] > ema2.iat[-1], closingPrices[len(closingPrices) - 1]
def get_rsi(symbol, days):
"""Determine the relative strength index for a specified stock in the last X trading days
Args:
symbol(str): Symbol of the stock we're querying
days(int): Specifies the maximum number of days that the cross can occur by
Returns:
rsi(float): Relative strength index value for a specified stock in the last X trading days
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
rsi = ta.momentum.RSIIndicator(close=price, window=int(days), fillna=False).rsi()
return rsi.iat[-1]
def get_macd(symbol):
"""Determine the Moving Average Convergence/Divergence for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rsi(float): Moving Average Convergence/Divergence value for a specified stock
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
macd = ta.trend.MACD(price).macd_diff()
return macd.iat[-1]
def get_buy_rating(symbol):
"""Determine the listed investor rating for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rating(int): 0-100 rating of a particular stock
"""
ratings = r.get_ratings(symbol=symbol)['summary']
if ratings:
return ratings['num_buy_ratings'] / (ratings['num_buy_ratings'] + ratings['num_hold_ratings'] + ratings['num_sell_ratings']) * 100
return 0
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = int(float(holdings_data[symbol].get("quantity")))
if not debug:
r.order_sell_market(symbol, shares_owned)
print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######")
def buy_holdings(potential_buys, profile_data, holdings_data):
""" Places orders to buy holdings of stocks. This method will try to order
an appropriate amount of shares such that your holdings of the stock will
roughly match the average for the rest of your portfoilio. If the share
price is too high considering the rest of your holdings and the amount of
buying power in your account, it will not order any shares.
Args:
potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method
"""
cash = float(profile_data.get('cash'))
portfolio_value = float(profile_data.get('equity')) - cash
ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys))
prices = r.get_latest_price(potential_buys)
for i in range(0, len(potential_buys)):
stock_price = float(prices[i])
if(ideal_position_size < stock_price < ideal_position_size*1.5):
num_shares = int(ideal_position_size*1.5/stock_price)
elif (stock_price < ideal_position_size):
num_shares = int(ideal_position_size/stock_price)
else:
print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######")
break
print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######")
if not debug:
r.order_buy_market(potential_buys[i], num_shares)
def scan_stocks():
""" The main method. Sells stocks in your portfolio if their 50 day moving average crosses
below the 200 day, and buys stocks in your watchlist if the opposite happens.
###############################################################################################
WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade.
###############################################################################################
If you sell a stock, this updates tradehistory.txt with information about the position,
how much you've earned/lost, etc.
"""
if debug:
print("----- DEBUG MODE -----\n")
print("----- Starting scan... -----\n")
register_matplotlib_converters()
spy_symbols = get_spy_symbols()
portfolio_symbols = get_portfolio_symbols()
holdings_data = get_modified_holdings()
potential_buys = []
sells = []
stock_data = []
print("Current Portfolio: " + str(portfolio_symbols) + "\n")
# print("Current Watchlist: " + str(watchlist_symbols) + "\n")
print("----- Scanning portfolio for stocks to sell -----\n")
print()
print("PORTFOLIO")
print("-------------------")
print()
print ("{}\t{}\t\t{}\t{}\t{}\t{}".format('SYMBOL', 'PRICE', 'RSI', 'MACD', 'RATING', 'EMA'))
print()
for symbol in portfolio_symbols:
cross, price = golden_cross(symbol, n1=50, n2=200, direction="below")
data = {'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)}
stock_data.append(data)
print ("{}\t${:.2f}\t\t{}\t{}\t{}\t{}".format(data['symbol'], data['price'], rsi_to_str(data['rsi']), macd_to_str(data['macd']), rating_to_str(data['buy_rating']), cross_to_str(data['cross'])))
if(cross == False):
sell_holdings(symbol, holdings_data)
sells.append(symbol)
profile_data = r.build_user_profile()
print("\n----- Scanning S&P 500 for stocks to buy -----\n")
for symbol in spy_symbols:
if(symbol not in portfolio_symbols):
cross, price = golden_cross(symbol, n1=50, n2=200, direction="above")
stock_data.append({'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)})
if(cross == True):
potential_buys.append(symbol)
if(len(potential_buys) > 0):
buy_holdings(potential_buys, profile_data, holdings_data)
if(len(sells) > 0):
update_trade_history(sells, holdings_data, "tradehistory.txt")
print("----- Scan over -----\n")
print_table(stock_data)
if debug:
print("----- DEBUG MODE -----\n")
#execute the scan
scan_stocks()
| 42.365672
| 201
| 0.656068
| 1,562
| 11,354
| 4.62484
| 0.211908
| 0.028239
| 0.011074
| 0.013566
| 0.332087
| 0.276993
| 0.238234
| 0.223284
| 0.181202
| 0.168605
| 0
| 0.009769
| 0.215607
| 11,354
| 267
| 202
| 42.524345
| 0.80137
| 0.353708
| 0
| 0.184211
| 0
| 0
| 0.127116
| 0.007604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085526
| false
| 0.006579
| 0.065789
| 0.006579
| 0.230263
| 0.118421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd618c3a159e9f99d7c6ca6d044db4a500817e13
| 1,160
|
py
|
Python
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 36
|
2021-07-22T08:11:31.000Z
|
2022-01-31T13:09:26.000Z
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 10
|
2021-07-21T19:39:38.000Z
|
2022-02-26T15:35:35.000Z
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 2
|
2021-07-28T09:55:13.000Z
|
2022-02-18T11:29:25.000Z
|
import typing as t
from fastapi import Request, Response
from pyinstrument import Profiler
from starlette.concurrency import run_in_threadpool
from debug_toolbar.panels import Panel
from debug_toolbar.types import Stats
from debug_toolbar.utils import is_coroutine, matched_endpoint
class ProfilingPanel(Panel):
title = "Profiling"
template = "panels/profiling.html"
async def process_request(self, request: Request) -> Response:
self.profiler = Profiler(**self.toolbar.settings.PROFILER_OPTIONS)
endpoint = matched_endpoint(request)
if endpoint is None:
return await super().process_request(request)
is_async = is_coroutine(endpoint)
async def call(func: t.Callable) -> None:
await run_in_threadpool(func) if not is_async else func()
await call(self.profiler.start)
try:
response = await super().process_request(request)
finally:
await call(self.profiler.stop)
return response
async def generate_stats(self, request: Request, response: Response) -> Stats:
return {"content": self.profiler.output_html()}
| 30.526316
| 82
| 0.70431
| 139
| 1,160
| 5.741007
| 0.395683
| 0.070175
| 0.06015
| 0.065163
| 0.077694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218103
| 1,160
| 37
| 83
| 31.351351
| 0.879824
| 0
| 0
| 0
| 0
| 0
| 0.031897
| 0.018103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.269231
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd63367d2463bae216c32c0f3162ba07be04c060
| 3,003
|
py
|
Python
|
test/system/auto/simple/compaction.py
|
marciosilva/accumulo
|
70404cbd1e0a2d2b7c2235009e158979abeef35f
|
[
"Apache-2.0"
] | 3
|
2021-11-11T05:18:23.000Z
|
2021-11-11T05:18:43.000Z
|
test/system/auto/simple/compaction.py
|
jatrost/accumulo
|
6be40f2f3711aaa7d0b68b5b6852b79304af3cff
|
[
"Apache-2.0"
] | 1
|
2021-06-22T09:52:37.000Z
|
2021-06-22T09:52:37.000Z
|
test/system/auto/simple/compaction.py
|
isabella232/accumulo-1
|
70404cbd1e0a2d2b7c2235009e158979abeef35f
|
[
"Apache-2.0"
] | 1
|
2021-06-22T09:33:38.000Z
|
2021-06-22T09:33:38.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
from simple.bulk import SimpleBulkTest
N = 100000
COUNT = 5
log = logging.getLogger('test.auto')
class CompactionTest(SimpleBulkTest):
"Start a clean accumulo, bulk import a lot of map files, read while a multi-pass compaction is happening"
order = 26
tableSettings = SimpleBulkTest.tableSettings.copy()
tableSettings['test_ingest'] = {
'table.compaction.major.ratio': 1.0
}
settings = SimpleBulkTest.settings.copy()
settings.update({
'tserver.compaction.major.files.open.max':4,
'tserver.compaction.major.delay': 1,
'tserver.compaction.major.concurrent.max':1,
})
def createRFiles(self, host):
handle = self.runClassOn(
self.masterHost(),
'org.apache.accumulo.server.test.CreateRFiles',
"testrf 4 0 500000 59".split())
out, err = handle.communicate()
self.assert_(handle.returncode == 0)
def runTest(self):
# initialize the database
self.createTable('test_ingest')
self.execute(self.masterHost(), 'hadoop dfs -rmr /testrf'.split())
self.execute(self.masterHost(), 'hadoop dfs -rmr /testrfFail'.split())
# insert some data
self.createRFiles(self.masterHost())
self.bulkLoad(self.masterHost(), '/testrf')
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
beforeCount = len(out.split('\n'))
log.info("Verifying Ingestion")
for c in range(5):
handles = []
for i in range(COUNT):
handles.append(self.verify(self.hosts[i%len(self.hosts)], N, i * N))
for h in handles:
out, err = h.communicate()
self.assert_(h.returncode == 0)
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
afterCount = len(out.split('\n'))
self.assert_(afterCount < beforeCount)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(CompactionTest())
return result
| 33.741573
| 109
| 0.656011
| 377
| 3,003
| 5.204244
| 0.461538
| 0.049949
| 0.033639
| 0.01631
| 0.115189
| 0.115189
| 0.115189
| 0.077472
| 0.077472
| 0.077472
| 0
| 0.014329
| 0.2331
| 3,003
| 88
| 110
| 34.125
| 0.837603
| 0.299034
| 0
| 0.075472
| 0
| 0.018868
| 0.226174
| 0.082079
| 0
| 0
| 0
| 0
| 0.09434
| 1
| 0.056604
| false
| 0.018868
| 0.09434
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5b97c67425c6b42d928076e5a8d8cb8fc8a23c8
| 12,107
|
py
|
Python
|
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | 1
|
2022-01-05T07:11:01.000Z
|
2022-01-05T07:11:01.000Z
|
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
from dataclasses import dataclass
from functools import reduce
from typing import Callable, Iterable, Iterator
'''
The first phase of a compiler is called `lexical analysis` implemented by a `scanner` or `lexer`.
It breaks a program into a sequence `lexemes`:
meaningful substrings of the input.
It also transforms lexemes into `tokens`:
symbolic representations of lexemes with some internalized information.
The classic, state-of-the-art method to specify lexemes is by regular expressions.
'''
'''
1. Representation of regular expressions.
'''
@dataclass
class Regexp:
'abstract class for AST of regular expressions'
def is_null(self):
return False
@dataclass
class Null (Regexp):
'empty set: {}'
def is_null(self):
return True
@dataclass
class Epsilon (Regexp):
'empty word: { "" }'
@dataclass
class Symbol (Regexp):
'single symbol: { "a" }'
sym: str
@dataclass
class Concat(Regexp):
'concatenation: r1.r2'
left: Regexp
right: Regexp
@dataclass
class Alternative(Regexp):
'alternative: r1|r2'
left: Regexp
right: Regexp
@dataclass
class Repeat(Regexp):
'Kleene star: r*'
body: Regexp
## smart constructors for regular expressions
## goal: construct regexps in "normal form"
## * avoid Null() subexpressions
## * Epsilon() subexpressions as much as possible
## * nest concatenation and alternative to the right
null = Null()
epsilon = Epsilon()
symbol = Symbol
def concat(r1, r2):
match (r1, r2):
case (Null(), _) | (_, Null()):
return null
case (Epsilon(), _):
return r2
case (_, Epsilon()):
return r1
case (Concat(r11, r12), _):
return Concat(r11, concat(r12, r2))
case _:
return Concat(r1, r2)
def alternative(r1, r2):
match (r1, r2):
case (Null(), _):
return r2
case (_, Null()):
return r1
case (Alternative(r11, r12), _):
return Alternative(r11, alternative(r12, r2))
case _:
return Alternative(r1, r2)
def repeat(r: Regexp) -> Regexp:
match r:
case Null() | Epsilon():
return epsilon
case Repeat(r1): # r** == r*
return r
case _:
return Repeat(r)
## utilities to construct regular expressions
def optional(r : Regexp) -> Regexp:
'construct r?'
return alternative(r, epsilon)
def repeat_one(r : Regexp) -> Regexp:
'construct r+'
return concat(r, repeat(r))
def concat_list(rs : Iterable[Regexp]) -> Regexp:
return reduce(lambda out, r: concat(out, r), rs, epsilon)
def alternative_list(rs : Iterable[Regexp]) -> Regexp:
return reduce(lambda out, r: alternative(out, r), rs, null)
## a few examples for regular expressions (taken from JavaScript definition)
'''
⟨digit⟩ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
⟨hexdigit⟩ ::= ⟨digit⟩ | A | B | C | D | E | F | a | b | c | d | e | f
⟨hexprefix⟩ ::= 0x | 0X
⟨sign⟩ ::= ⟨empty⟩ | -
⟨empty⟩ ::=
⟨integer-literal⟩ ::= ⟨sign⟩ ⟨digit⟩+ | ⟨sign⟩ ⟨hexprefix⟩ ⟨hexdigit⟩+
⟨letter⟩ ::= A | B | C | ...| Z | a | b | c | ...| z
⟨identifier-start⟩ ::= ⟨letter⟩ | $ | _
⟨identifier-part⟩ ::= ⟨identifier-start⟩ | ⟨digit⟩
⟨identifier⟩ ::= ⟨identifier-start⟩ ⟨identifier-part⟩*
'''
def class_regexp(s: str) -> Regexp:
'returns a regexp for the alternative of all characters in s'
return alternative_list(map(symbol, s))
def string_regexp(s: str) -> Regexp:
'returns a regexp for the concatenation of all characters in s'
return concat_list(map(symbol, s))
def char_range_regexp(c1: str, c2: str) -> Regexp:
return alternative_list(map(symbol, map(chr, range(ord(c1), ord(c2)+1))))
digit = class_regexp("0123456789")
hexdigit = alternative(digit, class_regexp("ABCDEFabcdef"))
hexprefix = alternative(string_regexp("0x"), string_regexp("0X"))
sign = optional(symbol('-'))
integer_literal = concat(sign, repeat_one(digit))
integer_literal_js = alternative( concat(sign, repeat_one(digit)),
concat_list([sign, hexprefix, repeat_one(hexdigit)]))
lc_letter = alternative_list(map(symbol, map(chr, range(ord('a'), ord('z')+1))))
uc_letter = alternative_list(map(symbol, map(chr, range(ord('A'), ord('Z')+1))))
letter = alternative(lc_letter, uc_letter)
identifier_start = alternative_list([letter, symbol('$'), symbol('_')])
identifier_part = alternative(identifier_start, digit)
identifier = concat(identifier_start, repeat(identifier_part))
blank_characters = "\t "
line_end_characters = "\n\r"
white_space = repeat_one(class_regexp(blank_characters + line_end_characters))
'''
2. Executing regular expressions
The standard method to 'execute' regular expressions is to transform them into finite automata.
Here we use a different method to execute them directly using `derivatives`.
This method uses regular expressions themselves as states of an automaton without constructing it.
We consider a regexp a final state if it accepts the empty word "".
This condition can be checked by a simple function on the regexp.
'''
def accepts_empty(r : Regexp) -> bool:
'check if r accepts the empty word'
match r:
case Null() | Symbol(_):
return False
case Epsilon() | Repeat(_):
return True
case Concat(r1, r2):
return accepts_empty(r1) and accepts_empty(r2)
case Alternative(r1, r2):
return accepts_empty(r1) or accepts_empty(r2)
'''
The transition function of a (deterministic) finite automaton maps
state `r0` and symbol `s` to the next state, say, `r1`.
If the state `r0` recognizes any words `w` that start with `s` (w[0] == s),
then state `r1` recognizes all those words `w` with the first letter removed (w[1:]).
This construction is called the `derivative` of a language by symbol `s`:
derivative(L, s) = { w[1:] | w in L and w[0] == s }
If L is the language recognized by regular expression `r0`,
then we can effectively compute a regular expression for derivative(L, s)!
As follows:
'''
def after_symbol(s : str, r : Regexp) -> Regexp:
'produces regexp after r consumes symbol s'
match r:
case Null() | Epsilon():
return null
case Symbol(s_expected):
return epsilon if s == s_expected else null
case Alternative(r1, r2):
return alternative(after_symbol(s, r1), after_symbol(s, r2))
case Concat(r1, r2):
return alternative(concat(after_symbol(s, r1), r2),
after_symbol(s, r2) if accepts_empty(r1) else null)
case Repeat(r1):
return concat(after_symbol(s, r1), Repeat(r1))
## matching against a regular expression
def matches(r : Regexp, ss: str) -> bool:
i = 0
while i < len(ss):
r = after_symbol(ss[i], r)
if r.is_null():
return False
i += 1
# reached end of string
return accepts_empty(r)
########################################################################
'''
3. Lexer descriptions
A lexer (scanner) is different from a finite automaton in several aspects.
1. The lexer must classify the next lexeme from a choice of several regular expressions.
It cannot match a single regexp, but it has to keep track and manage matching for
several regexps at the same time.
2. The lexer follows the `maximum munch` rule, which says that the next lexeme is
the longest prefix that matches one of the regular expressions.
3. Once a lexeme is identified, the lexer must turn it into a token and attribute.
Re maximum munch consider this input:
ifoundsalvationinapubliclavatory
Suppose that `if` is a keyword, why should the lexer return <identifier> for this input?
Similarly:
returnSegment
would count as an identifier even though starting with the keyword `return`.
These requirements motivate the following definitions.
A lex_action
* takes some (s : str, i : int position in s, j : int pos in s)
* consumes the lexeme sitting at s[i:j]
* returns (token for s[i:j], some k >= j)
'''
class Token: pass # abstract class of tokens
Position = int # input position
lex_result = tuple[Token, Position]
lex_action = Callable[[str, Position, Position], lex_result]
# a lexer rule attaches a lex_action to a regular expression
@dataclass
class Lex_rule:
re : Regexp
action: lex_action
# a lexer tries to match its input to a list of lex rules
Lex_state = list[Lex_rule]
# reading a symbol advances the regular expression of each lex rule
def next_state(state: Lex_state, ss: str, i: int):
return list(filter(lambda rule: not (rule.re.is_null()),
[Lex_rule(after_symbol(ss[i], rule.re), rule.action)
for rule in state]))
def initial_state(rules: list[Lex_rule]) -> Lex_state:
return rules
def matched_rules(state: Lex_state) -> Lex_state:
return [rule for rule in state if accepts_empty(rule.re)]
def is_stuck(state: Lex_state) -> bool:
return not state
#####################################################################
class ScanError (Exception): pass
@dataclass
class Match:
action: lex_action
final : Position
@dataclass
class Scan:
spec: Lex_state
def scan_one(self) -> Callable[[str, Position], lex_result]:
return lambda ss, i: self.scan_one_token(ss, i)
def scan_one_token(self, ss: str, i: Position) -> lex_result:
state = self.spec
j = i
last_match = None
while j < len(ss) and not is_stuck(state):
state = next_state(state, ss, j); j += 1
all_matches = matched_rules(state)
if all_matches:
this_match = all_matches[0]
last_match = Match(this_match.action, j)
match last_match:
case None:
raise ScanError("no lexeme found:", ss[i:])
case Match(action, final):
return action(ss, i, final)
raise ScanError("internal error: last_match=", last_match)
def make_scanner(scan_one: Callable[[str, Position], lex_result], ss: str) -> Iterator[Token]:
i = 0
while i < len(ss):
(token, i) = scan_one(ss, i)
yield (token)
## example: excerpt from JavaScript scanner
escaped_char = concat(symbol('\\'), alternative(symbol('\\'), symbol('"')))
content_char = alternative_list([symbol(chr(a))
for a in range(ord(' '), 128)
if a not in [ord('\\'), ord('"')]])
string_literal = concat_list([symbol('"'), repeat(alternative(escaped_char, content_char)), symbol('"')])
@dataclass
class Return(Token): pass
@dataclass
class Intlit(Token): value: int
@dataclass
class Ident(Token): name: str
@dataclass
class Lparen(Token): pass
@dataclass
class Rparen(Token): pass
@dataclass
class Slash(Token): pass
@dataclass
class Strlit(Token): value: str
string_spec: Lex_state = [
Lex_rule(escaped_char, lambda ss, i, j: (ss[i+1], j)),
Lex_rule(content_char, lambda ss, i, j: (ss[i], j))
]
string_token = Scan(string_spec).scan_one()
def strlit(ss: str) -> Strlit:
"use subsidiary scanner to transform string content"
return Strlit("".join(make_scanner(string_token, ss)))
js_spec: Lex_state = [
Lex_rule(string_regexp("return"), lambda ss, i, j: (Return(), j)),
Lex_rule(integer_literal, lambda ss, i, j: (Intlit(int(ss[i:j])), j)),
Lex_rule(identifier, lambda ss, i, j: (Ident(ss[i:j]), j)),
Lex_rule(white_space, lambda ss, i, j: js_token(ss, j)),
Lex_rule(symbol("("), lambda ss, i, j: (Lparen(), j)),
Lex_rule(symbol(")"), lambda ss, i, j: (Rparen(), j)),
Lex_rule(symbol("/"), lambda ss, i, j: (Slash(), j)),
Lex_rule(string_literal, lambda ss, i, j: (strlit(ss[i+1:j-1]), j))
]
js_token = Scan(js_spec).scan_one()
def example1():
return js_token(" 42...", 0)
def example2():
sc = make_scanner(js_token, "return Segment (pi / 2)")
for ta in sc:
print(ta)
def example3():
sc = make_scanner(js_token, 'return "foobar\\"..."')
for ta in sc:
print(ta)
| 32.810298
| 105
| 0.641943
| 1,712
| 12,107
| 4.468458
| 0.202103
| 0.008627
| 0.006797
| 0.013072
| 0.181961
| 0.120784
| 0.069542
| 0.059608
| 0.035033
| 0.025621
| 0
| 0.013235
| 0.22615
| 12,107
| 369
| 106
| 32.810298
| 0.798591
| 0.090278
| 0
| 0.258621
| 0
| 0
| 0.068941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112069
| false
| 0.025862
| 0.012931
| 0.047414
| 0.439655
| 0.008621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5bc0b82b561c3ccd0c214272db1e77e19243f08
| 4,003
|
py
|
Python
|
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | 1
|
2021-09-17T13:40:13.000Z
|
2021-09-17T13:40:13.000Z
|
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | null | null | null |
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | 1
|
2021-09-17T16:26:32.000Z
|
2021-09-17T16:26:32.000Z
|
# Copyright 2021, Guillermo Adrián Molina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import json
import yaml
from rad.rest.client.util import print_table, print_parsable
from rad.rest.client.api.authentication_1 import Session
from rad.rest.client.api.zfsmgr_1 import Zpool
from rad.rest.client.api.zfsmgr_1.zpool_resource import ZpoolResource
LOG = logging.getLogger(__name__)
class CmdZpoolList:
name = 'list'
aliases = ['ls']
@staticmethod
def init_parser(subparsers, parent_parser):
parser = subparsers.add_parser(CmdZpoolList.name,
aliases=CmdZpoolList.aliases,
parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='List ZFS pools',
help='List ZFS pools')
parser.add_argument('-c', '--columns',
nargs='+',
choices=ZpoolResource.get_property_names(),
default=['name', 'size', 'allocated', 'free',
'capacity', 'dedupratio', 'health', 'altroot'],
help='Specify wich columns to show in the table')
parser.add_argument('-s', '--sort-by',
choices=ZpoolResource.get_property_names(),
default='name',
help='Specify the sort order in the table')
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--table',
action='store_true',
default=True,
help='Show output in table format')
group.add_argument('-y', '--yaml',
action='store_true',
help='Show output in yaml format')
group.add_argument('-j', '--json',
action='store_true',
help='Show output in json format')
group.add_argument('-d', '--delimiter',
help='Show output in a parsable format delimited by the string')
def __init__(self, options):
with Session(protocol=options.protocol, hostname=options.hostname, port=options.port) as session:
zpool_instances = session.list_objects(Zpool())
zpool_resources = [instance.get_properties(
options.columns) for instance in zpool_instances]
zpools = []
for zfs_resource in zpool_resources:
resource = {}
for property in zfs_resource.properties:
resource[property.name] = property
zpools.append(resource)
# sort by key
if options.sort_by is not None:
zpools = sorted(zpools, key=lambda i: i[options.sort_by])
if options.json:
resources = [resource.to_json() for resource in zpool_resources]
print(json.dumps(resources, indent=4))
elif options.yaml:
resources = [resource.to_json() for resource in zpool_resources]
print(yaml.dump(resources))
elif options.delimiter is not None:
print_parsable(zpools, options.delimiter)
elif options.table:
print_table(zpools)
| 43.043011
| 105
| 0.572321
| 422
| 4,003
| 5.308057
| 0.395735
| 0.026786
| 0.019643
| 0.030357
| 0.160714
| 0.142857
| 0.142857
| 0.049107
| 0.049107
| 0.049107
| 0
| 0.004552
| 0.341494
| 4,003
| 92
| 106
| 43.51087
| 0.84522
| 0.142143
| 0
| 0.106061
| 0
| 0
| 0.114653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.19697
| 0.075758
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5bcf620df665e14fd0ade4b0917ffe41b1ea768
| 3,736
|
py
|
Python
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 63
|
2018-08-02T20:50:41.000Z
|
2022-03-02T02:42:48.000Z
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 2
|
2018-08-30T16:31:48.000Z
|
2021-12-02T01:28:23.000Z
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 14
|
2018-08-05T04:45:07.000Z
|
2022-02-18T10:56:20.000Z
|
#!/usr/bin/python3
import playback
import display
import navigation
import device
import pygame
done = False
music = playback.music()
view = display.view()
menu = navigation.menu()
PiPod = device.PiPod()
menu.loadMetadata()
status = PiPod.getStatus()
songMetadata = music.getStatus()
displayUpdate = pygame.USEREVENT + 1
pygame.time.set_timer(displayUpdate, 500)
view.update(status, menu.menuDict, songMetadata)
while not done:
music.loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
PiPod.toggleSleep()
elif event.key == pygame.K_u:
music.volumeUp()
elif event.key == pygame.K_d:
music.volumeDown()
elif event.key == pygame.K_UP:
if status[2]:
music.volumeUp()
elif menu.menuDict["current"] == "musicController":
menu.gotomenu()
else:
action = menu.up()
elif event.key == pygame.K_DOWN:
if status[2]:
music.volumeDown()
elif menu.menuDict["current"] == "musicController":
music.shuffle()
menu.menuDict["Queue"] = music.playlist
else:
action = menu.down()
elif event.key == pygame.K_LEFT:
if status[2] or menu.menuDict["current"] == "musicController":
music.prev()
else:
action = menu.left()
elif event.key == pygame.K_RIGHT:
if status[2] or menu.menuDict["current"] == "musicController":
music.next()
else:
action = menu.right()
if action == "updateList":
music.updateList(menu.menuDict["Queue"])
elif event.key == pygame.K_RETURN:
if status[2] or menu.menuDict["current"] == "musicController":
music.playPause()
else:
action = menu.select()
if action == "play":
music.loadList(menu.menuDict["Queue"])
music.play()
elif action == "clearQueue":
menu.menuDict["Queue"] = []
music.clearQueue()
elif action == "updateLibrary":
if music.updateLibrary():
done = True
elif action == "toggleSleep":
PiPod.toggleSleep()
elif action == "shutdown":
while not PiPod.shutdown():
view.popUp("Shutdown")
elif action == "reboot":
while not PiPod.reboot():
view.popUp("Reboot")
elif action == "playAtIndex":
if menu.menuDict["selectedItem"] == 0:
music.clearQueue()
menu.menuDict["Queue"] = []
else:
music.playAtIndex(menu.menuDict["selectedItem"]-1)
status = PiPod.getStatus()
songMetadata = music.getStatus()
view.update(status, menu.menuDict, songMetadata)
# display.update() without arguments updates the entire display just like display.flip()
pygame.time.Clock().tick(
30) # Limit the framerate to 20 FPS, this is to ensure it doesn't use all of the CPU resources
| 34.592593
| 103
| 0.482869
| 334
| 3,736
| 5.374252
| 0.317365
| 0.093593
| 0.062396
| 0.066852
| 0.298607
| 0.179387
| 0.083565
| 0.083565
| 0.083565
| 0
| 0
| 0.007303
| 0.413544
| 3,736
| 107
| 104
| 34.915888
| 0.811958
| 0.05166
| 0
| 0.352273
| 0
| 0
| 0.069511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056818
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5bdd10944be47a0eef70a2d5c3fc45fddcfaaf6
| 5,698
|
py
|
Python
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 30
|
2015-09-23T20:38:57.000Z
|
2021-03-10T03:12:46.000Z
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 2,132
|
2015-06-08T21:50:35.000Z
|
2022-02-15T22:44:18.000Z
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 10
|
2015-09-25T20:11:25.000Z
|
2020-12-09T02:58:44.000Z
|
""" Cross-object data auditing
Schema validation allows for checking values within a single object.
We also need to perform higher order checking between linked objects.
"""
from past.builtins import basestring
import logging
import venusian
logger = logging.getLogger(__name__)
def includeme(config):
config.registry['auditor'] = Auditor()
config.add_directive('add_audit_checker', add_audit_checker)
config.add_request_method(audit, 'audit')
# Same as logging
_levelNames = {
0: 'NOTSET',
10: 'DEBUG',
20: 'INFO',
30: 'DCC_ACTION',
40: 'WARNING',
50: 'NOT_COMPLIANT',
60: 'ERROR',
'DEBUG': 10,
'ERROR': 60,
'INFO': 20,
'NOTSET': 0,
'WARNING': 40,
'NOT_COMPLIANT': 50,
'DCC_ACTION': 30,
}
class AuditFailure(Exception):
def __init__(self, category, detail=None, level=0, path=None, name=None):
super(AuditFailure, self)
self.category = category
self.detail = detail
if not isinstance(level, int):
level = _levelNames[level]
self.level = level
self.path = path
self.name = name
def __json__(self, request=None):
return {
'category': self.category,
'detail': self.detail,
'level': self.level,
'level_name': _levelNames[self.level],
'path': self.path,
'name': self.name,
}
class Auditor(object):
""" Data audit manager
"""
_order = 0
def __init__(self):
self.type_checkers = {}
def add_audit_checker(self, checker, item_type, condition=None, frame='embedded'):
checkers = self.type_checkers.setdefault(item_type, [])
self._order += 1 # consistent execution ordering
if isinstance(frame, list):
frame = tuple(sorted(frame))
checkers.append((self._order, checker, condition, frame))
def audit(self, request, types, path, **kw):
if isinstance(types, basestring):
types = [types]
checkers = set()
checkers.update(*(self.type_checkers.get(item_type, ()) for item_type in types))
errors = []
system = {
'request': request,
'path': path,
'types': types,
}
system.update(kw)
for order, checker, condition, frame in sorted(checkers):
if frame is None:
uri = path
elif isinstance(frame, basestring):
uri = '%s@@%s' % (path, frame)
else:
uri = '%s@@expand?expand=%s' % (path, '&expand='.join(frame))
value = request.embed(uri)
if condition is not None:
try:
if not condition(value, system):
continue
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit condition error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit condition error auditing %s', path, exc_info=True)
continue
try:
try:
result = checker(value, system)
except AuditFailure as e:
e = e.__json__(request)
if e['path'] is None:
e['path'] = path
e['name'] = checker.__name__
errors.append(e)
continue
if result is None:
continue
if isinstance(result, AuditFailure):
result = [result]
for item in result:
if isinstance(item, AuditFailure):
item = item.__json__(request)
if item['path'] is None:
item['path'] = path
item['name'] = checker.__name__
errors.append(item)
continue
raise ValueError(item)
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit script error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit script error auditing %s', path, exc_info=True)
continue
return errors
# Imperative configuration
def add_audit_checker(config, checker, item_type, condition=None, frame='embedded'):
auditor = config.registry['auditor']
config.action(None, auditor.add_audit_checker,
(checker, item_type, condition, frame))
# Declarative configuration
def audit_checker(item_type, condition=None, frame='embedded'):
""" Register an audit checker
"""
def decorate(checker):
def callback(scanner, factory_name, factory):
scanner.config.add_audit_checker(
checker, item_type, condition, frame)
venusian.attach(checker, callback, category='auditor')
return checker
return decorate
def audit(request, types=None, path=None, context=None, **kw):
auditor = request.registry['auditor']
if path is None:
path = request.path
if context is None:
context = request.context
if types is None:
types = [context.item_type] + context.base_types
return auditor.audit(
request=request, types=types, path=path, root=request.root, context=context,
registry=request.registry, **kw)
| 32.56
| 92
| 0.551071
| 584
| 5,698
| 5.207192
| 0.234589
| 0.023676
| 0.029596
| 0.039461
| 0.204209
| 0.186452
| 0.186452
| 0.146005
| 0.092733
| 0.092733
| 0
| 0.007754
| 0.343629
| 5,698
| 174
| 93
| 32.747126
| 0.805348
| 0.055985
| 0
| 0.123188
| 0
| 0
| 0.076363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07971
| false
| 0
| 0.021739
| 0.007246
| 0.15942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5cdf29e6b6b8257a8b1c9b388ba9bf3693defbc
| 726
|
py
|
Python
|
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
"""Flask configuration."""
from os import environ, path
basedir = path.abspath(path.dirname(__file__))
class Config:
"""Base config."""
SECRET_KEY = "qsZ5srBF9-j3tgdMsd11hdbg2VLUyKQYqWFQ1EZyKI6PDVVTLXduxWoM1N0wESR0zFvSPFDs9ogpMjgl9wFxXw"
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
class ProdConfig(Config):
FLASK_ENV = 'production'
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI ="sqlite:///databank.sqlite"
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevConfig(Config):
FLASK_ENV = 'development'
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///test.db"
SQLALCHEMY_TRACK_MODIFICATIONS = True
| 27.923077
| 106
| 0.698347
| 67
| 726
| 7.313433
| 0.58209
| 0.044898
| 0.057143
| 0.110204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022648
| 0.209366
| 726
| 26
| 107
| 27.923077
| 0.83101
| 0.045455
| 0
| 0
| 0
| 0
| 0.24924
| 0.168693
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.944444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d0213de62ed3ea48e3a10bf0cc5d6b41c2e553
| 5,979
|
py
|
Python
|
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
import logging
import json
import base64
import time
# Create your views here.
logger = logging.getLogger(__name__)
# 文件上传:form-data/Multipart方式 POST方法
def index(request):
sendfile = request.FILES.items()
haveFiles = False
for key,value in sendfile:
fileData = request.FILES.getlist(key)
if len(fileData):
haveFiles = True
for fl in fileData:
name = fl.name
with open('/home/hych007/project/serverproject/'+name, 'wb') as fp:
fp.write(bytes(fl.read()))
if not haveFiles:
result = {"errcode": 511, "errmsg": "form-data中未读取到图片信息"}
return HttpResponse(json.dumps(result), content_type="application/json")
cameraCode = request.POST.get("cameraCode", "")
if not cameraCode:
result = {"errcode": 501, "errmsg": "cameraCode摄像头编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
doorCode = request.POST.get("i3310A", "")
if not doorCode:
result = {"errcode": 505, "errmsg": "i3310A门禁编码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
userCode = request.POST.get("i3310D", "")
if not userCode:
result = {"errcode": 507, "errmsg": "i3310D用户唯一标识码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
photoCodes = request.POST.get("photoCodes", "")
if not len(photoCodes):
result = {"errcode": 502, "errmsg": "photoCode照片编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
dataTimes = request.POST.get("dataTimes", "")
if not len(dataTimes):
result = {"errcode": 503, "errmsg": "dataTime拍照时间不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": "上传成功"}
return HttpResponse(json.dumps(result), content_type="application/json")
# 文件上传:Body中传Json方式 POST方法
def body(request):
try:
jsData = json.loads(str(request.body, encoding='utf-8'))
except Exception as e:
result = {"errcode": -1, "errmsg": "数据不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if not isinstance(jsData, list):
result = {"errcode": 500, "errmsg": "上传的json数据错误"}
return HttpResponse(json.dumps(result), content_type="application/json")
for data in jsData:
imgData = data.get("base64String", "")
if len(imgData) <= 23:
result = {"errcode": 509, "errmsg": "照片转Base64String 编码后的字符串不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
imgData = imgData[23:]
temp = base64.b64decode(imgData)
picName = time.strftime("%Y%m%d%H%M%S", time.localtime())
#图片文件的读写要用二进制模式
with open("/home/hych007/project/serverproject/"+picName+".jpeg", 'wb') as f:
f.write(temp)
cameraCode = data.get("cameraCode", "")
if not cameraCode:
result = {"errcode": 501, "errmsg": "cameraCode摄像头编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
doorCode = data.get("i3310A", "")
if not doorCode:
result = {"errcode": 505, "errmsg": "i3310A门禁编码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
userCode = data.get("i3310D", "")
if not userCode:
result = {"errcode": 507, "errmsg": "i3310D用户唯一标识码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
photoCode = data.get("photoCode", "")
if not photoCode:
result = {"errcode": 502, "errmsg": "photoCode照片编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
dataTime = data.get("dataTime", "")
if not dataTime:
result = {"errcode": 503, "errmsg": "dataTime拍照时间不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": "上传成功"}
return HttpResponse(json.dumps(result), content_type="application/json")
# 提供图片下载功能
def picture(request):
file = open("/home/hych007/project/100001.png", "rb")
response = HttpResponse(content=file.read(), content_type="image/png")
return response
# def writeLog(text):
# with open('/mnt/testlog.txt', 'a+') as fp:
# fp.write(text+'\n')
# 文件上传:form-data/Multipart方式 PUT方法
def puttest(request):
# writeLog('get a request')
if(request.method != "PUT"):
result = {"errcode": 1, "errmsg": "Not put method"}
# writeLog('Not put method')
return HttpResponse(json.dumps(result), content_type="application/json")
put, files = request.parse_file_upload(request.META, request)
# request.FILES.update(files)
# request.POST = put.dict()
fileitems = files.items()
haveFiles = False
for key, value in fileitems:
# fileData = files.get(key)
if value:
haveFiles = True
# 图片文件的读写要用二进制模式
name = value.name
with open('/home/hych007/project/serverproject/' + name, 'wb') as fp:
fp.write(bytes(value.read()))
if not haveFiles:
result = {"errcode": 2, "errmsg": "No file data"}
# writeLog('No file data')
return HttpResponse(json.dumps(result), content_type="application/json")
dataInfo = put.get("FaceDataRecord", "")
if not dataInfo:
result = {"errcode": 3, "errmsg": "no FaceDataRecord data"}
# writeLog('no FaceDataRecord data')
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": dataInfo}
# writeLog('request secceed')
return HttpResponse(json.dumps(result), content_type="application/json")
| 38.082803
| 85
| 0.632882
| 648
| 5,979
| 5.79784
| 0.239198
| 0.061485
| 0.117115
| 0.143732
| 0.566676
| 0.566676
| 0.539792
| 0.539792
| 0.520096
| 0.425872
| 0
| 0.025232
| 0.224452
| 5,979
| 157
| 86
| 38.082803
| 0.78499
| 0.078608
| 0
| 0.418182
| 0
| 0
| 0.209798
| 0.025496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.054545
| 0
| 0.281818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d0c034f7242aa14fa3baca13d703e86f187f17
| 276
|
py
|
Python
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | 1
|
2015-03-09T01:58:23.000Z
|
2015-03-09T01:58:23.000Z
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | 3
|
2015-04-01T22:49:58.000Z
|
2015-05-01T19:09:11.000Z
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | null | null | null |
from bson.objectid import ObjectId
from torrents.file import TorrentFile
class TestTorrentFile:
def test_get_output_file(self):
id1 = ObjectId()
file1 = TorrentFile(id1)
assert file1.get_output_file() == 'torrent_files/' + str(id1) + '.torrent'
| 25.090909
| 82
| 0.695652
| 33
| 276
| 5.636364
| 0.606061
| 0.096774
| 0.139785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022831
| 0.206522
| 276
| 10
| 83
| 27.6
| 0.826484
| 0
| 0
| 0
| 0
| 0
| 0.07971
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d13876f65729d4efb83ad2b61955efd49a0d23
| 2,444
|
py
|
Python
|
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 1
|
2021-01-16T02:43:50.000Z
|
2021-01-16T02:43:50.000Z
|
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 1
|
2020-05-09T20:12:05.000Z
|
2020-05-09T20:12:05.000Z
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summarize the results from running storage_throughput_benchmark."""
# %%
import argparse
import pandas as pd
import plotnine as p9
from scipy.stats import mannwhitneyu
# %%
pd.set_option("precision", 2)
# %%
def load_benchmark_output(file):
"""Loads the output generated by storage_throughput_benchmark."""
df = pd.read_csv(file, comment="#", names=["Op", "Api", "Bytes", "ElapsedMs"])
df["MiB"] = df.Bytes / 1024 / 1024
df["MiBs"] = df.MiB * 1000 / df.ElapsedMs
return df
# %%
def compare_api(df, op_name, alpha=0.05):
subset = df[df.Op == op_name]
stat, p = mannwhitneyu(
subset[subset.Api == "XML"].MiBs, subset[subset.Api == "JSON"].MiBs
)
print(
"\n\n===== %s XML vs. JSON =====\np-value=%.3f Statistics=%.3f"
% (op_name, p, stat)
)
print(subset.groupby(by="Api").MiBs.describe(percentiles=[0.50, 0.90, 0.95]))
if p > alpha:
print("%s/XML vs. READ/JSON: same distribution (fail to reject H0)" % op_name)
else:
print("%s/XML vs. READ/JSON: different distribution (reject H0)" % op_name)
# %%
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-file",
type=argparse.FileType("r"),
required=True,
help="the benchmark output file to load",
)
parser.add_argument(
"--output-file", type=str, required=True, help="the name for the output plot"
)
args = parser.parse_args()
# %%
data = load_benchmark_output(args.input_file)
# %%
print(data.head())
# %%
print(data.describe())
# %%
(
p9.ggplot(
data=data[(data.Op != "CREATE") & (data.Op != "DELETE")],
mapping=p9.aes(x="Op", y="MiBs", color="Api"),
)
+ p9.facet_wrap(facets="Op", labeller="label_both", scales="free")
+ p9.geom_boxplot()
).save(args.output_file)
# %%
compare_api(data, "READ")
compare_api(data, "WRITE")
| 26.857143
| 86
| 0.657529
| 347
| 2,444
| 4.556196
| 0.495677
| 0.037951
| 0.011385
| 0.02024
| 0.024035
| 0.024035
| 0
| 0
| 0
| 0
| 0
| 0.021619
| 0.18617
| 2,444
| 90
| 87
| 27.155556
| 0.773253
| 0.296236
| 0
| 0.041667
| 0
| 0
| 0.211744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.145833
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d2438e72ede4149becee229525d2ab304971e9
| 939
|
py
|
Python
|
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
from vietocr.model.trainer import Trainer
from vietocr.tool.config import Cfg
import sys
sys.path.insert(0, './')
from char import character
logging.basicConfig(level=logging.INFO, )
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='see example at ')
parser.add_argument('--checkpoint', help='your checkpoint')
args = parser.parse_args()
config_base = Cfg.load_config_from_file("config/base.yml")
config = Cfg.load_config_from_file(args.config)
config_base.update(config)
config = config_base
config['vocab'] = character
trainer = Trainer(config, pretrained=False)
# args.checkpoint = config.trainer["checkpoint"]
# if args.checkpoint:
# trainer.load_checkpoint(args.checkpoint)
# logging.info(f"Load checkpoint form {args.checkpoint}....")
trainer.train()
if __name__ == '__main__':
main()
| 28.454545
| 68
| 0.707135
| 115
| 939
| 5.591304
| 0.4
| 0.062208
| 0.052877
| 0.052877
| 0.065319
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001282
| 0.169329
| 939
| 32
| 69
| 29.34375
| 0.823077
| 0.184239
| 0
| 0
| 0
| 0
| 0.102497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d4e05a5e5fe08d9de941f7f2c1980a53f27d2a
| 598
|
py
|
Python
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 923
|
2020-01-11T06:36:53.000Z
|
2022-03-31T00:26:57.000Z
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 25
|
2020-02-27T08:35:46.000Z
|
2022-01-25T08:54:19.000Z
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 262
|
2020-01-02T02:19:40.000Z
|
2022-03-23T04:56:16.000Z
|
import torch.nn as nn
"""
https://zhuanlan.zhihu.com/p/76378871
arxiv: 1804.03821
ExFuse
"""
class SematicEmbbedBlock(nn.Module):
def __init__(self, high_in_plane, low_in_plane, out_plane):
super(SematicEmbbedBlock, self).__init__()
self.conv3x3 = nn.Conv2d(high_in_plane, out_plane, 3, 1, 1)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv1x1 = nn.Conv2d(low_in_plane, out_plane, 1)
def forward(self, high_x, low_x):
high_x = self.upsample(self.conv3x3(high_x))
low_x = self.conv1x1(low_x)
return high_x * low_x
| 29.9
| 67
| 0.688963
| 89
| 598
| 4.314607
| 0.438202
| 0.072917
| 0.078125
| 0.117188
| 0.09375
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068323
| 0.192308
| 598
| 20
| 68
| 29.9
| 0.726708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d85732ed11a9abee1adac3c37bfb5f5d7fe0c2
| 9,874
|
py
|
Python
|
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
from IPython import get_ipython
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def import_star(module, ns):
def public(name):
return not name.startswith('_')
ns.update({name: getattr(module, name)
for name in dir(module) if public(name)})
def configure_base(user_ns, broker_name, *,
bec=True, epics_context=False, magics=True, mpl=True,
ophyd_logging=True, pbar=True):
"""
Perform base setup and instantiation of important objects.
This factory function instantiates essential objects to data collection
environments at NSLS-II and adds them to the current namespace. In some
cases (documented below), it will check whether certain variables already
exist in the user name space, and will avoid creating them if so. The
following are added:
* ``RE`` -- a RunEngine
This is created only if an ``RE`` instance does not currently exist in
the namespace.
* ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
* ``bec`` -- a BestEffortCallback, subscribed to ``RE``
* ``peaks`` -- an alias for ``bec.peaks``
* ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
* ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``
And it performs some low-level configuration:
* creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
* turns out interactive plotting (``matplotlib.pyplot.ion()``)
* bridges the RunEngine and Qt event loops
(``bluesky.utils.install_kicker()``)
* logs ERROR-level log message from ophyd to the standard out
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
broker_name : Union[str, Broker]
Name of databroker configuration or a Broker instance.
bec : boolean, optional
True by default. Set False to skip BestEffortCallback.
epics_context : boolean, optional
True by default. Set False to skip ``setup_ophyd()``.
magics : boolean, optional
True by default. Set False to skip registration of custom IPython
magics.
mpl : boolean, optional
True by default. Set False to skip matplotlib ``ion()`` at event-loop
bridging.
ophyd_logging : boolean, optional
True by default. Set False to skip ERROR-level log configuration for
ophyd.
pbar : boolean, optional
True by default. Set false to skip ProgressBarManager.
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure IPython for CHX.
>>>> configure_base(get_ipython().user_ns, 'chx');
"""
ns = {} # We will update user_ns with this at the end.
# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
# if RunEngine already defined grab it
# useful when users make their own custom RunEngine
if 'RE' in user_ns:
RE = user_ns['RE']
else:
RE = RunEngine(get_history())
ns['RE'] = RE
# Set up SupplementalData.
# (This is a no-op until devices are added to it,
# so there is no need to provide a 'skip_sd' switch.)
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
ns['sd'] = sd
if isinstance(broker_name, str):
# Set up a Broker.
from databroker import Broker
db = Broker.named(broker_name)
ns['db'] = db
else:
db = broker_name
RE.subscribe(db.insert)
if pbar:
# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager
ns['pbar_manager'] = pbar_manager
if magics:
# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
get_ipython().register_magics(BlueskyMagics)
if bec:
# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
_bec = BestEffortCallback()
RE.subscribe(_bec)
ns['bec'] = _bec
ns['peaks'] = _bec.peaks # just as alias for less typing
if mpl:
# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
ns['plt'] = plt
plt.ion()
# Make plots update live while scans run.
from bluesky.utils import install_kicker
install_kicker()
if epics_context:
# Create a context in the underlying EPICS client.
from ophyd import setup_ophyd
setup_ophyd()
if not ophyd_logging:
# Turn on error-level logging, particularly useful for knowing when
# pyepics callbacks fail.
import logging
import ophyd.ophydobj
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ophyd.ophydobj.logger.addHandler(ch)
# convenience imports
# some of the * imports are for 'back-compatibility' of a sort -- we have
# taught BL staff to expect LiveTable and LivePlot etc. to be in their
# namespace
import numpy as np
ns['np'] = np
import bluesky.callbacks
ns['bc'] = bluesky.callbacks
import_star(bluesky.callbacks, ns)
import bluesky.plans
ns['bp'] = bluesky.plans
import_star(bluesky.plans, ns)
import bluesky.plan_stubs
ns['bps'] = bluesky.plan_stubs
import_star(bluesky.plan_stubs, ns)
# special-case the commonly-used mv / mvr and its aliases mov / movr4
ns['mv'] = bluesky.plan_stubs.mv
ns['mvr'] = bluesky.plan_stubs.mvr
ns['mov'] = bluesky.plan_stubs.mov
ns['movr'] = bluesky.plan_stubs.movr
import bluesky.preprocessors
ns['bpp'] = bluesky.preprocessors
import bluesky.callbacks.broker
import_star(bluesky.callbacks.broker, ns)
import bluesky.simulators
import_star(bluesky.simulators, ns)
user_ns.update(ns)
return list(ns)
def configure_olog(user_ns, *, callback=None, subscribe=True):
"""
Setup a callback that publishes some metadata from the RunEngine to Olog.
Also, add the public contents of pyOlog.ophyd_tools to the namespace.
This is expected to be run after :func:`configure_base`. It expects to find
an instance of RunEngine named ``RE`` in the user namespace. Additionally,
if the user namespace contains the name ``logbook``, that is expected to be
an instance ``pyOlog.SimpleOlogClient``.
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
callback : callable, optional
a hook for customizing the logbook_cb_factory; if None a default is
used
subscribe : boolean, optional
True by default. Set to False to skip the subscription. (You still get
pyOlog.ophyd_tools.)
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure the Olog.
>>>> configure_olog(get_ipython().user_ns);
"""
# Conceptually our task is simple: add a subscription to the RunEngine that
# publishes to the Olog using the Python wrapper of its REST API, pyOlog.
# In practice this is messy because we have deal with the many-layered API
# of pyOlog and, more importantly, ensure that slowness or errors from the
# Olog do not affect the run. Historically the Olog deployment has not been
# reliable, so it is important to be robust against these issues. Of
# course, by ignoring Olog errors, we leave gaps in the log, which is not
# great, but since all data is saved to a databroker anyway, we can always
# re-generate them later.
ns = {} # We will update user_ns with this at the end.
from bluesky.callbacks.olog import logbook_cb_factory
from functools import partial
from pyOlog import SimpleOlogClient
import queue
import threading
from warnings import warn
# This is for pyOlog.ophyd_tools.get_logbook, which simply looks for
# a variable called 'logbook' in the global IPython namespace.
if 'logbook' in user_ns:
simple_olog_client = user_ns['logbook']
else:
simple_olog_client = SimpleOlogClient()
ns['logbook'] = simple_olog_client
if subscribe:
if callback is None:
# list of logbook names to publish to
LOGBOOKS = ('Data Acquisition',)
generic_logbook_func = simple_olog_client.log
configured_logbook_func = partial(generic_logbook_func,
logbooks=LOGBOOKS)
callback = logbook_cb_factory(configured_logbook_func)
def submit_to_olog(queue, cb):
while True:
name, doc = queue.get() # waits until document is available
try:
cb(name, doc)
except Exception as exc:
warn('This olog is giving errors. This will not be logged.'
'Error:' + str(exc))
olog_queue = queue.Queue(maxsize=100)
olog_thread = threading.Thread(target=submit_to_olog,
args=(olog_queue, callback),
daemon=True)
olog_thread.start()
def send_to_olog_queue(name, doc):
try:
olog_queue.put((name, doc), block=False)
except queue.Full:
warn('The olog queue is full. This will not be logged.')
RE = user_ns['RE']
RE.subscribe(send_to_olog_queue, 'start')
import pyOlog.ophyd_tools
import_star(pyOlog.ophyd_tools, ns)
user_ns.update(ns)
return list(ns)
| 34.404181
| 79
| 0.645331
| 1,272
| 9,874
| 4.908805
| 0.284591
| 0.016336
| 0.0213
| 0.023543
| 0.110026
| 0.10394
| 0.098975
| 0.098975
| 0.090006
| 0.049648
| 0
| 0.000557
| 0.272635
| 9,874
| 286
| 80
| 34.524476
| 0.868839
| 0.480758
| 0
| 0.102362
| 0
| 0
| 0.045007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047244
| false
| 0
| 0.275591
| 0.007874
| 0.346457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5d915f6cc267b773bbe24b2332fae333a3982c5
| 714
|
py
|
Python
|
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
"""create fake data to the db file"""
from config import data_nodes, get_db
from type import DataNodeStatus, DataNode
from datetime import timedelta
from config import get_second_datetime
def create_fake_data_status(data_node: DataNode):
now = get_second_datetime()
db = get_db()
for i in range(100):
status = DataNodeStatus(
# 每十次宕机一次
dead=i % 10 == 0,
capacity=1000,
used=100 + (i % 7),
datetime=now + timedelta(minutes=i)
)
db.save(data_node.node_id, status)
def create_fake_data():
for data_node in data_nodes:
create_fake_data_status(data_node)
if __name__ == '__main__':
create_fake_data()
| 24.62069
| 49
| 0.648459
| 96
| 714
| 4.5
| 0.427083
| 0.115741
| 0.162037
| 0.078704
| 0.12963
| 0.12963
| 0
| 0
| 0
| 0
| 0
| 0.026718
| 0.266106
| 714
| 28
| 50
| 25.5
| 0.79771
| 0.056022
| 0
| 0
| 0
| 0
| 0.011976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5db0b0b72cf05ff56cc67988018bcfa4797221d
| 371
|
py
|
Python
|
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
'''
Module to pull keys from test geckoboard widgets.
'''
import os
import json
def get_keys():
settings_folder = os.path.dirname(__file__)
settings_file = os.path.join(settings_folder,'gecko_settings.json')
with open(settings_file, 'r') as file:
json_data = json.load(file)
return json_data
if __name__ == '__main__':
print(get_keys())
| 23.1875
| 71
| 0.692722
| 52
| 371
| 4.538462
| 0.596154
| 0.059322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191375
| 371
| 16
| 72
| 23.1875
| 0.786667
| 0.132075
| 0
| 0
| 0
| 0
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5db74f8420d00fdc906f19f599f41aad18c69af
| 2,596
|
py
|
Python
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 128
|
2015-12-28T01:02:30.000Z
|
2019-05-24T21:20:50.000Z
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 277
|
2015-05-03T18:48:57.000Z
|
2019-05-23T17:41:28.000Z
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 96
|
2015-08-07T18:49:50.000Z
|
2019-05-20T19:49:27.000Z
|
from __future__ import annotations
from typing import Any, Dict, List, Union
import logging
from pajbot.web.utils import get_cached_enabled_modules
log = logging.getLogger(__name__)
class MenuItem:
def __init__(
self,
href: Union[str, List[MenuItem]],
menu_id: str,
caption: str,
enabled: bool = True,
level: int = 100,
) -> None:
self.href = href
self.id = menu_id
self.caption = caption
self.enabled = enabled
self.level = level
self.type = "single"
if isinstance(self.href, list):
self.type = "multi"
def init(app):
@app.context_processor
def menu() -> Dict[str, Any]:
enabled_modules = get_cached_enabled_modules()
# Menu items that are shown for normal users
menu_items: List[MenuItem] = [
MenuItem("/", "home", "Home"),
MenuItem("/commands", "commands", "Commands"),
MenuItem("/points", "points", "Points", "chatters_refresh" in enabled_modules),
MenuItem("/stats", "stats", "Stats"),
MenuItem("/decks", "decks", "Decks", "deck" in enabled_modules),
MenuItem("/playsounds", "user_playsounds", "Playsounds", "playsound" in enabled_modules),
]
# Menu items that are shown to admin when in an /admin page
admin_menu_items: List[MenuItem] = [
MenuItem("/", "home", "Home"),
MenuItem("/admin", "admin_home", "Admin Home"),
MenuItem(
[
MenuItem("/admin/banphrases", "admin_banphrases", "Banphrases"),
MenuItem("/admin/links/blacklist", "admin_links_blacklist", "Blacklisted links"),
MenuItem("/admin/links/whitelist", "admin_links_whitelist", "Whitelisted links"),
],
"filters",
"Filters",
),
MenuItem("/admin/commands", "admin_commands", "Commands"),
MenuItem("/admin/timers", "admin_timers", "Timers"),
MenuItem("/admin/moderators", "admin_moderators", "Moderators"),
MenuItem("/admin/modules", "admin_modules", "Modules"),
MenuItem("/admin/playsounds", "admin_playsounds", "Playsounds", "playsound" in enabled_modules),
MenuItem("/admin/streamer", "admin_streamer", "Streamer Info"),
]
data = {
"enabled_modules": enabled_modules,
"nav_bar_header": menu_items,
"nav_bar_admin_header": admin_menu_items,
}
return data
| 35.081081
| 108
| 0.571649
| 254
| 2,596
| 5.633858
| 0.330709
| 0.090846
| 0.044724
| 0.050314
| 0.16492
| 0.16492
| 0.11181
| 0.062893
| 0
| 0
| 0
| 0.001644
| 0.296995
| 2,596
| 73
| 109
| 35.561644
| 0.782466
| 0.038521
| 0
| 0.033898
| 0
| 0
| 0.265142
| 0.034497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.067797
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5db8ac1529ed13c3cad056d88e711f36bbfbbe1
| 611
|
py
|
Python
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 4
|
2018-06-18T05:39:25.000Z
|
2022-01-04T07:35:52.000Z
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 20
|
2019-11-30T03:42:40.000Z
|
2020-05-17T03:25:43.000Z
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 2
|
2020-02-08T14:10:42.000Z
|
2021-09-23T13:51:36.000Z
|
"""
@no 463
@name Island Perimeter
"""
class Solution:
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
ans = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
if i - 1 < 0 or grid[i - 1][j] == 0: ans += 1
if i + 1 > len(grid) - 1 or grid[i + 1][j] == 0: ans += 1
if j - 1 < 0 or grid[i][j - 1] == 0: ans += 1
if j + 1 > len(grid[i]) - 1 or grid[i][j + 1] == 0: ans += 1
return ans
| 30.55
| 80
| 0.392799
| 93
| 611
| 2.580645
| 0.290323
| 0.145833
| 0.116667
| 0.0875
| 0.2875
| 0.275
| 0.25
| 0.25
| 0.133333
| 0
| 0
| 0.072886
| 0.438625
| 611
| 19
| 81
| 32.157895
| 0.626822
| 0.114566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5dde242388a3c0b90abd4420143d4c4d72acbeb
| 914
|
py
|
Python
|
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import requests
def kv2dict(kvinfo):
kv = {}
for item in kvinfo.split(","):
item_list = item.split("=")
kv[item_list[0]] = item_list[1].strip('"')
return kv
def get_service_realm(registry_url):
registry_api_url = (
registry_url if registry_url.endswith("/v2/") else registry_url + "/v2/"
)
registry_res = requests.get(registry_api_url)
www_authenticate_header = registry_res.headers.get("Www-Authenticate")
if www_authenticate_header:
return kv2dict(www_authenticate_header.split()[-1])
return None
def required_auth(registry_url):
registry_api_url = (
registry_url if registry_url.endswith("/v2/") else registry_url + "/v2/"
)
registry_res = requests.get(registry_api_url)
return registry_res.status_code == 401
def scope_generate(image):
return "repository:{}:pull,push".format(image)
| 26.114286
| 80
| 0.682713
| 120
| 914
| 4.916667
| 0.391667
| 0.149153
| 0.094915
| 0.074576
| 0.379661
| 0.379661
| 0.379661
| 0.379661
| 0.379661
| 0.379661
| 0
| 0.016238
| 0.191466
| 914
| 34
| 81
| 26.882353
| 0.782138
| 0.021882
| 0
| 0.25
| 0
| 0
| 0.06495
| 0.025756
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.041667
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5de2f232c7693a7a9e178d8efeaacaaaf172cb4
| 1,081
|
py
|
Python
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4
|
2018-05-07T15:39:17.000Z
|
2019-07-03T21:28:10.000Z
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4
|
2020-09-05T10:57:19.000Z
|
2021-05-09T16:01:22.000Z
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 1
|
2018-05-09T07:57:03.000Z
|
2018-05-09T07:57:03.000Z
|
from flask import Flask, g
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from config import config
from app.models import db, ma
from app.models.RevokedToken import RevokedToken
def create_app(config_name):
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
config[config_name].init_app(app)
app.secret_key = app.config['SECRET_KEY']
db.init_app(app)
ma.init_app(app)
jwt = JWTManager(app)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedToken.is_jti_blacklisted(jti)
from .api import api_blueprint
app.register_blueprint(api_blueprint)
@app.route('/')
def index():
return 'API Dock, a web application for managing and testing your APIs.'
return app
| 29.216216
| 80
| 0.719704
| 148
| 1,081
| 5
| 0.398649
| 0.072973
| 0.040541
| 0.056757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174838
| 1,081
| 36
| 81
| 30.027778
| 0.829596
| 0
| 0
| 0
| 0
| 0
| 0.167438
| 0.07123
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.25
| 0.035714
| 0.464286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5df02ad3bc4934c674cd77a38e8acef0d4d0b9f
| 730
|
py
|
Python
|
Snippets/auto_scroll.py
|
ColinShark/Pyrogram-Snippets
|
50ede9ca9206bd6d66c6877217b4a80b4f845294
|
[
"WTFPL"
] | 59
|
2021-01-07T16:19:48.000Z
|
2022-02-22T06:56:36.000Z
|
Snippets/auto_scroll.py
|
Mrvishal2k2/Pyrogram-Snippets
|
d4e66876f6aff1252dfb88423fedd66e18057446
|
[
"WTFPL"
] | 4
|
2019-10-14T14:02:38.000Z
|
2020-11-06T11:47:03.000Z
|
Snippets/auto_scroll.py
|
ColinShark/Pyrogram-Snippets
|
50ede9ca9206bd6d66c6877217b4a80b4f845294
|
[
"WTFPL"
] | 26
|
2021-03-02T14:31:51.000Z
|
2022-03-23T21:19:14.000Z
|
# Send .autoscroll in any chat to automatically read all sent messages until you call
# .autoscroll again. This is useful if you have Telegram open on another screen.
from pyrogram import Client, filters
from pyrogram.types import Message
app = Client("my_account")
f = filters.chat([])
@app.on_message(f)
def auto_read(_, message: Message):
app.read_history(message.chat.id)
message.continue_propagation()
@app.on_message(filters.command("autoscroll", ".") & filters.me)
def add_keep(_, message: Message):
if message.chat.id in f:
f.remove(message.chat.id)
message.edit("Autoscroll deactivated")
else:
f.add(message.chat.id)
message.edit("Autoscroll activated")
app.run()
| 25.172414
| 85
| 0.710959
| 102
| 730
| 5
| 0.509804
| 0.086275
| 0.101961
| 0.117647
| 0.133333
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175342
| 730
| 28
| 86
| 26.071429
| 0.847176
| 0.221918
| 0
| 0
| 0
| 0
| 0.111504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e13346685449cfbebc7876faf4f41723fbe5c9
| 2,977
|
py
|
Python
|
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | 8
|
2018-02-26T16:24:07.000Z
|
2021-06-30T07:40:52.000Z
|
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | null | null | null |
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | null | null | null |
from intermezzo import Intermezzo as mzo
curCol = [0]
curRune = [0]
backbuf = []
bbw, bbh = 0, 0
runes = [' ', '░', '▒', '▓', '█']
colors = [
mzo.color("Black"),
mzo.color("Red"),
mzo.color("Green"),
mzo.color("Yellow"),
mzo.color("Blue"),
mzo.color("Magenta"),
mzo.color("Cyan"),
mzo.color("White"),
]
def updateAndDrawButtons(current, x, y, mx, my, n, attrf):
lx, ly = x, y
for i in range(0, n):
if lx <= mx and mx <= lx+3 and ly <= my and my <= ly+1:
current[0] = i
r, fg, bg = attrf(i)
mzo.set_cell(lx+0, ly+0, r, fg, bg)
mzo.set_cell(lx+1, ly+0, r, fg, bg)
mzo.set_cell(lx+2, ly+0, r, fg, bg)
mzo.set_cell(lx+3, ly+0, r, fg, bg)
mzo.set_cell(lx+0, ly+1, r, fg, bg)
mzo.set_cell(lx+1, ly+1, r, fg, bg)
mzo.set_cell(lx+2, ly+1, r, fg, bg)
mzo.set_cell(lx+3, ly+1, r, fg, bg)
lx += 4
lx, ly = x, y
for i in range(0, n):
if current[0] == i:
fg = mzo.color("Red") | mzo.attr("Bold")
bg = mzo.color("Default")
mzo.set_cell(lx+0, ly+2, '^', fg, bg)
mzo.set_cell(lx+1, ly+2, '^', fg, bg)
mzo.set_cell(lx+2, ly+2, '^', fg, bg)
mzo.set_cell(lx+3, ly+2, '^', fg, bg)
lx += 4
def update_and_redraw_all(mx, my):
global backbuf, runes, curRune, colors, curCol
mzo.clear(mzo.color("Default"), mzo.color("Default"))
if mx != -1 and my != -1:
backbuf[bbw*my+mx] = {"Ch": runes[curRune[0]], "Fg": colors[curCol[0]], "Bg": 0}
err = mzo.copy_into_cell_buffer(backbuf)
if err:
raise(Exception(err))
_, h = mzo.size()
def rune_cb(i):
global runes
return runes[i], mzo.color("Default"), mzo.color("Default")
def color_cb(i):
global colors
return ' ', mzo.color("Default"), colors[i]
updateAndDrawButtons(curRune, 0, 0, mx, my, len(runes), rune_cb)
updateAndDrawButtons(curCol, 0, h-3, mx, my, len(colors), color_cb)
mzo.flush()
def reallocBackBuffer(w, h):
global backbuf, bbw, bbh
bbw, bbh = w, h
backbuf = [{"Ch": "", "Fg": 0, "Bg": 0} for _ in range(w*h)]
def main():
err = mzo.init()
if err:
raise(Exception(err))
mzo.set_input_mode(mzo.input("Esc") | mzo.input("Mouse"))
w, h = mzo.size()
reallocBackBuffer(w, h)
update_and_redraw_all(-1, -1)
while True:
mx, my = -1, -1
evt = mzo.poll_event()
if evt["Type"] == mzo.event("Key"):
if evt["Key"] == mzo.key("Esc"):
break
elif evt["Type"] == mzo.event("Mouse"):
if evt["Key"] == mzo.mouse("Left"):
mx, my = evt["MouseX"], evt["MouseY"]
elif evt["Type"] == mzo.event("Resize"):
reallocBackBuffer(evt["Width"], evt["Height"])
update_and_redraw_all(mx, my)
if __name__ == "__main__":
try:
main()
finally:
mzo.close()
| 29.186275
| 88
| 0.518979
| 459
| 2,977
| 3.285403
| 0.220044
| 0.079576
| 0.079576
| 0.095491
| 0.312334
| 0.253979
| 0.167109
| 0.167109
| 0.125995
| 0.027851
| 0
| 0.024079
| 0.288546
| 2,977
| 101
| 89
| 29.475248
| 0.686025
| 0
| 0
| 0.113636
| 0
| 0
| 0.063151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.011364
| 0
| 0.102273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e16df4333ead8fee7050f33874cfa2a8d52eb0
| 1,896
|
py
|
Python
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 5
|
2021-12-22T08:49:23.000Z
|
2022-02-22T12:38:40.000Z
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 1
|
2022-01-30T00:51:05.000Z
|
2022-02-03T04:59:42.000Z
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 1
|
2022-01-29T09:38:16.000Z
|
2022-01-29T09:38:16.000Z
|
import logging
from .media_reader import MediaReader
from .util.media_type import MediaType
class MediaReaderCLI(MediaReader):
auto_select = False
def print_results(self, results):
for i, media_data in enumerate(results):
print("{:4}| {}\t{} {} ({})".format(i, media_data.global_id, media_data["name"], media_data.get("label", media_data["season_title"]), MediaType(media_data["media_type"]).name))
def select_media(self, term, results, prompt, no_print=False, auto_select_if_single=False):
index = 0
print("Looking for", term)
if not self.auto_select and not (len(results) == 1 and auto_select_if_single):
if not no_print:
self.print_results(results)
index = input(prompt)
try:
return results[int(index)]
except (ValueError, IndexError):
logging.warning("Invalid input; skipping")
return None
def list_some_media_from_server(self, server_id, limit=None):
self.print_results(self.get_server(server_id).get_media_list(limit=limit)[:limit])
def list_servers(self):
for id in sorted(self.state.get_server_ids()):
print(id)
def test_login(self, server_ids=None, force=False):
failures = False
for server in self.get_servers():
if server.has_login() and (not server_ids or server.id in server_ids):
if (force or server.needs_to_login()) and not server.relogin():
logging.error("Failed to login into %s", server.id)
failures = True
return not failures
def auth(self, tracker_id, just_print=False):
tracker = self.get_tracker_by_id(tracker_id)
print("Get token form", tracker.get_auth_url())
if not just_print:
self.settings.store_secret(tracker.id, input("Enter token:"))
| 38.693878
| 188
| 0.642405
| 252
| 1,896
| 4.615079
| 0.353175
| 0.046432
| 0.027515
| 0.030954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002105
| 0.248418
| 1,896
| 48
| 189
| 39.5
| 0.814035
| 0
| 0
| 0
| 0
| 0
| 0.070675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.078947
| 0
| 0.368421
| 0.289474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e250ffeccc9fb9e0d710d9d521ebecc7097405
| 1,272
|
py
|
Python
|
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | null | null | null |
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | 37
|
2020-12-16T06:30:22.000Z
|
2022-03-28T03:04:28.000Z
|
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | null | null | null |
"""
Dependency management package.
"""
def debug_print(message: str, verbose: bool):
"""
Print if verbose is set to true.
:param message: message to print
:param verbose: whether to print
:return:
"""
if verbose:
print(message)
def download_and_unzip_archive(url: str, zip_file_fp: str, static_folder: str, remove: bool = True, verbose: bool = True):
"""
Downloads and unzips an archive.
:param url: url to request
:param zip_file_fp: filepath for zip
:param static_folder: folder for flasks static files
:param remove: whether to remove the zip after unpacking, defaults to true.
:param verbose: whether to print information, defaults to true.
:exception OSError: os.remove, requests.get, open, TextIOWrapper.write, ZipFile, ZipFile.extractall
"""
from requests import get
r = get(url)
debug_print("Saving archive..", verbose)
with open(zip_file_fp, 'wb') as f:
f.write(r.content)
debug_print("Extracting..", verbose)
from zipfile import ZipFile
with ZipFile(zip_file_fp, 'r') as zip_file:
zip_file.extractall(static_folder)
if remove:
debug_print("Removing archive..", verbose)
from os import remove
remove(zip_file_fp)
| 30.285714
| 122
| 0.677673
| 171
| 1,272
| 4.912281
| 0.368421
| 0.058333
| 0.053571
| 0.05
| 0.061905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227201
| 1,272
| 41
| 123
| 31.02439
| 0.854527
| 0.416667
| 0
| 0
| 0
| 0
| 0.073353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.176471
| 0
| 0.294118
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e50a13752cec91e8412a4602fb057eaceaa6b0
| 1,113
|
py
|
Python
|
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
import torch
import wandb
def val(
criterion=None,
metric=None,
loader=None,
model=None,
device=None
):
r'''
Args:
criterion: a differentiable function to provide gratitude for backward
metric: a score to save best model
loader: a data iterator
model: model
device: calculation device, cpu or cuda.
Returns:
a metric socre on behalf of the accuracy on unseen dataset of the prediction of the model
'''
model.eval()
model.to(device)
loss_value_mean = 0
with torch.no_grad():
for idx, data in enumerate(loader):
img = data['img'].to(device)
gt = data['gt'].to(device)
pred = model(img)
loss_value = criterion(pred, gt)
loss_value_mean += loss_value
metric.accumulate(pred, gt)
wandb.log({'val_loss': loss_value})
metric_value = metric.value
loss_value_mean = loss_value_mean / len(loader)
return model, metric_value, loss_value_mean
| 27.146341
| 98
| 0.574124
| 135
| 1,113
| 4.607407
| 0.42963
| 0.115756
| 0.104502
| 0.054662
| 0.12701
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001377
| 0.347709
| 1,113
| 40
| 99
| 27.825
| 0.855372
| 0.281222
| 0
| 0
| 0
| 0
| 0.018207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e76e091ee3230443db9902e3df57b4dbeb04c4
| 4,428
|
py
|
Python
|
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | null | null | null |
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | null | null | null |
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | 2
|
2022-01-08T12:18:43.000Z
|
2022-02-23T07:59:18.000Z
|
########################################
# plot_fig07d_anaa_practical.py
#
# Description. Script used to actually plot Fig. 07 (d) of the paper.
#
# Author. @victorcroisfelt
#
# Date. December 29, 2021
#
# This code is part of the code package used to generate the numeric results
# of the paper:
#
# Croisfelt, V., Abrão, T., and Marinello, J. C., “User-Centric Perspective in
# Random Access Cell-Free Aided by Spatial Separability”, arXiv e-prints, 2021.
#
# Available on:
#
# https://arxiv.org/abs/2107.10294
#
# Comment. Please, make sure that you have the required data files. They are
# obtained by running the scripts:
#
# - data_fig07_08_bcf.py
# - data_fig07_08_cellular.py
# - data_fig07_08_cellfree.py
#
########################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import warnings
########################################
# Preamble
########################################
# Comment the line below to see possible warnings related to python version
# issues
warnings.filterwarnings("ignore")
axis_font = {'size':'12'}
plt.rcParams.update({'font.size': 12})
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
########################################
# Loading data
########################################
data_bcf = np.load('data/fig07e_bcf.npz')
data_cellfree_est1 = np.load('data/fig07e_cellfree_est1.npz')
data_cellfree_est2 = np.load('data/fig07e_cellfree_est2.npz')
data_cellfree_est3 = np.load('data/fig07e_cellfree_est3.npz')
# Extract x-axis
L_range = data_cellfree_est1["L_range"]
N_range = data_cellfree_est1["N_range"]
# Extract ANAA
anaa_bcf = data_bcf["anaa"]
anaa_cellfree_est1 = data_cellfree_est1["anaa"]
anaa_cellfree_est2 = data_cellfree_est2["anaa"]
anaa_cellfree_est3 = data_cellfree_est3["anaa"]
########################################
# Plot
########################################
# Fig. 07e
fig, ax = plt.subplots(figsize=(4/3 * 3.15, 2))
#fig, ax = plt.subplots(figsize=(1/3 * (6.30), 3))
# Go through all values of N
for nn, N in enumerate(N_range):
plt.gca().set_prop_cycle(None)
if N == 1:
# BCF
ax.plot(L_range[:-2], anaa_bcf[:-2], linewidth=1.5, linestyle=(0, (3, 1, 1, 1, 1, 1)), color='black', label='BCF')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--', color='black', label='CF-SUCRe: Est. 1')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.', color='black', label='CF-SUCRe: Est. 2')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':', color='black', label='CF-SUCRe: Est. 3')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
plt.gca().set_prop_cycle(None)
if N == 1:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^', color='black', label='$N=1$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='^')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v', color='black', label='$N=8$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='v')
def forward(x):
return x**(1/2)
def inverse(x):
return x**2
ax.set_xscale('function', functions=(forward, inverse))
ax.set_xticks(L_range[:-2])
ax.set_yticks(np.array([1, 3, 5, 7, 9, 10]))
ax.grid(visible=True, alpha=0.25, linestyle='--')
ax.set_xlabel(r'number of APs $L$')
ax.set_ylabel('ANAA')
ax.legend(fontsize='xx-small', markerscale=.5)
plt.show()
| 30.537931
| 124
| 0.630759
| 676
| 4,428
| 3.964497
| 0.284024
| 0.047015
| 0.049627
| 0.080597
| 0.428731
| 0.384701
| 0.384701
| 0.375746
| 0.375373
| 0.356716
| 0
| 0.050973
| 0.118338
| 4,428
| 144
| 125
| 30.75
| 0.635502
| 0.200316
| 0
| 0.20339
| 0
| 0
| 0.114079
| 0.027341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.067797
| 0.033898
| 0.135593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5e97f4578877e1fcf5bd928b8d18930e062681c
| 6,697
|
py
|
Python
|
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
import datetime
from time import sleep
import re
import pytz
# try:
# from .emhmeter import MeterBase, create_input_vars, logger
# except ModuleNotFoundError:
# from emhmeter import MeterBase, create_input_vars, logger
# TODO: Not working
class GetTime:
def __init__(self, input_vars):
self.input_vars = input_vars
self.meter_number = input_vars["meter"]["meterNumber"]
self.results = dict()
def _get(self, what):
# results = dict()
# Get time
if what == "time":
obis = "0.9.1"
name = "time"
elif what == "date":
obis = "0.9.2"
name = "date"
else:
logger.error(f"Incorrect input {what}, use \"time\" or \"date\"")
raise KeyError
delta = datetime.timedelta(seconds=14)
ref_time = datetime.datetime.utcnow() + delta
self.results[name] = [ref_time, None]
logger.debug(f"===================== Getting {name} ===================== ")
value = self.query("R5", f"{obis}()")
if obis not in value:
logger.error(f"Unable to receive {name}. Received: \"{value}\"")
value = f"{obis}(error)"
self.results[name][1] = value
logger.debug(f"{self.results}")
if what == "time":
self.make_pause()
return
def query(self, cmd, data):
with MeterBase(self.input_vars) as m:
m.sendcmd_and_decode_response(b"/" + b"?" + self.meter_number.encode() + b"!\r\n")
m.sendcmd_and_decode_response(MeterBase.ACK + b'051\r\n')
result = m.sendcmd_and_decode_response(cmd.encode(), data.encode())
cmd = MeterBase.SOH + b'B0' + MeterBase.ETX
m.sendcmd_and_decode_response(cmd + MeterBase.bcc(cmd))
return result
@staticmethod
def make_pause():
pause = 25
logger.debug(f"Pausing for {pause} seconds")
sleep(pause)
return
def check(self, what, value):
# 0.9.2(1190724), 0.9.1(1221856)
re_in_parenthesis = re.compile('^0.9..[(](.+?)[)]')
logger.debug(f"Checking meter {what} \"{value}\"")
reference_value = value[0]
checked_value = value[1]
found_value = re_in_parenthesis.search(checked_value).groups()[0]
if what == "date":
return self.check_date(reference_value, found_value)
elif what == "time":
return self.check_time(reference_value, found_value)
@staticmethod
def check_date(ref_value, checked_value):
# datetime object, string
if checked_value != "error":
ref_value = ref_value.strftime("%y%m%d")
else:
ref_value = "010000"
checked_value = checked_value[1:]
logger.debug(f"Checking {ref_value} == {checked_value}")
return ref_value == checked_value
@staticmethod
def check_time(ref_value, checked_value):
# datetime object, string
# Select meter TZ based on response
if checked_value[0] == "1":
local_tz = pytz.timezone("Europe/Berlin") # UTC +2
elif checked_value[0] == "2":
local_tz = pytz.timezone("UTC")
else:
local_tz = pytz.timezone("Europe/Moscow") # UTC +3
# Generate "now" time in UTC
utc_now = pytz.utc.localize(datetime.datetime.utcnow())
# ref_value is UTC already, insert TZ info into object
ref_value = pytz.utc.localize(ref_value)
# Adjust it to actual meter TZ
ref_value = ref_value.astimezone(local_tz)
if checked_value != "error":
now_date = utc_now.strftime("%y%m%d")
else:
now_date = "010000"
# Take meter TZ now date (generated by script), add to meter TZ now time (received from meter)
checked_value = datetime.datetime.strptime(now_date + checked_value[1:], "%y%m%d%H%M%S")
# Insert local_tz into datetime object
checked_value = local_tz.localize(checked_value)
# Now both objects are in local TZ
logger.debug(f"Checking {ref_value} == {checked_value}")
# Compare
delta = (checked_value - ref_value).total_seconds()
logger.debug(f"Delta = {delta}")
allowable_delta = 6 # Seconds
return abs(delta) <= allowable_delta
def get(self):
self.input_vars["get_id"] = False
self._get("time")
self._get("date")
for key in self.results.keys():
if self.check(key, self.results[key]):
logger.debug(f"{key} is correct")
self.results[key].append("0")
else:
logger.debug(f"{key} is incorrect")
self.results[key].append("1")
return self.results
def parse(self, data):
# Input
# {'time': [datetime.datetime(2019, 7, 27, 13, 43, 28, 274370), '0.9.1(1154336)', '0'],
# 'date': [datetime.datetime(2019, 7, 27, 13, 44, 20, 519825), '0.9.2(1190727)', '1']},
logger.debug(f"{self.meter_number} Parsing time output")
logger.debug(f"{self.meter_number} {data}")
results = dict()
for key in data.keys():
if key == "time":
obis = "0.9.1"
elif key == "date":
obis = "0.9.2"
epoch = data[key][0].strftime("%s")
item_value = data[key][1]
trigger_value = data[key][2]
results[epoch] = [(f"{obis}-value", item_value), (f"{obis}-trigger", trigger_value)]
# {epoch: [(obis_code, val), (), (), ...]}
final_result = {"time": results}
logger.debug(f"{final_result}")
return final_result
if __name__ == "__main__":
meter = {
"meterNumber": "04180616",
"Manufacturer": "",
"ip": "10.124.2.48",
"InstallationDate": "2018-10-10T10:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 10,
"totalFactor": 210
}
meter = {
"meterNumber": "05296170",
"Manufacturer": "EMH",
"ip": "10.124.2.120",
"InstallationDate": "2019-02-20T09:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 15,
"totalFactor": 215
}
variables = {"port": MeterBase.get_port(meter["ip"]),
"timestamp": MeterBase.get_dt(),
"data_handler": "P.01",
"exporter": "Zabbix",
"server": "192.168.33.33",
"meter": meter
}
logger.setLevel("DEBUG")
m = GetTime(variables)
data = m.get()
print(m.parse(data))
| 31.441315
| 102
| 0.551441
| 797
| 6,697
| 4.486826
| 0.269762
| 0.060403
| 0.040268
| 0.027964
| 0.198546
| 0.14038
| 0.095638
| 0.049217
| 0
| 0
| 0
| 0.047568
| 0.303121
| 6,697
| 212
| 103
| 31.589623
| 0.718663
| 0.124683
| 0
| 0.176871
| 0
| 0
| 0.168437
| 0.007197
| 0
| 0
| 0
| 0.004717
| 0
| 1
| 0.061224
| false
| 0
| 0.027211
| 0
| 0.156463
| 0.006803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5ea1cb63e2208d12c4791c91ece989cd820bf44
| 3,889
|
py
|
Python
|
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | 11
|
2021-01-09T22:52:30.000Z
|
2022-03-22T18:33:38.000Z
|
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | null | null | null |
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | 4
|
2020-12-26T06:14:53.000Z
|
2022-01-05T05:00:16.000Z
|
import re
from typing import List
from .utils import dumps
from .types import DirectThread, DirectMessage
from .exceptions import ClientNotFoundError, DirectThreadNotFound
from .extractors import extract_direct_thread, extract_direct_message
class Direct:
def direct_threads(self, amount: int = 20) -> List[DirectThread]:
"""Return last threads
"""
assert self.user_id, "Login required"
params = {
"visual_message_return_type": "unseen",
"thread_message_limit": "10",
"persistentBadging": "true",
"limit": "20",
}
cursor = None
threads = []
self.private_request("direct_v2/get_presence/")
while True:
if cursor:
params['cursor'] = cursor
result = self.private_request("direct_v2/inbox/", params=params)
inbox = result.get("inbox", {})
for thread in inbox.get("threads", []):
threads.append(extract_direct_thread(thread))
cursor = inbox.get("oldest_cursor")
if not cursor or (amount and len(threads) >= amount):
break
if amount:
threads = threads[:amount]
return threads
def direct_thread(self, thread_id: int, amount: int = 20) -> DirectThread:
"""Return full information by thread
"""
assert self.user_id, "Login required"
params = {
"visual_message_return_type": "unseen",
"direction": "older",
"seq_id": "40065", # 59663
"limit": "20",
}
cursor = None
items = []
while True:
if cursor:
params['cursor'] = cursor
try:
result = self.private_request(f"direct_v2/threads/{thread_id}/", params=params)
except ClientNotFoundError as e:
raise DirectThreadNotFound(e, thread_id=thread_id, **self.last_json)
thread = result['thread']
for item in thread['items']:
items.append(item)
cursor = thread.get("oldest_cursor")
if not cursor or (amount and len(items) >= amount):
break
if amount:
items = items[:amount]
thread['items'] = items
return extract_direct_thread(thread)
def direct_messages(self, thread_id: int, amount: int = 20) -> List[DirectMessage]:
"""Fetch list of messages by thread (helper)
"""
assert self.user_id, "Login required"
return self.direct_thread(thread_id, amount).messages
def direct_answer(self, thread_id: int, text: str) -> DirectMessage:
"""Send message
"""
assert self.user_id, "Login required"
return self.direct_send(text, [], [int(thread_id)])
def direct_send(self, text: str, user_ids: List[int] = [], thread_ids: List[int] = []) -> DirectMessage:
"""Send message
"""
assert self.user_id, "Login required"
method = "text"
kwargs = {}
if 'http' in text:
method = "link"
kwargs["link_text"] = text
kwargs["link_urls"] = dumps(
re.findall(r"(https?://[^\s]+)", text))
else:
kwargs["text"] = text
if thread_ids:
kwargs["thread_ids"] = dumps([int(tid) for tid in thread_ids])
if user_ids:
kwargs["recipient_users"] = dumps([[int(uid) for uid in user_ids]])
data = {
"client_context": self.generate_uuid(),
"action": "send_item",
**kwargs
}
result = self.private_request(
"direct_v2/threads/broadcast/%s/" % method,
data=self.with_default_data(data),
with_signature=False
)
return extract_direct_message(result["payload"])
| 35.678899
| 108
| 0.558498
| 411
| 3,889
| 5.121655
| 0.272506
| 0.030404
| 0.033254
| 0.038005
| 0.27886
| 0.266508
| 0.236105
| 0.178147
| 0.178147
| 0.098812
| 0
| 0.009916
| 0.325791
| 3,889
| 108
| 109
| 36.009259
| 0.792906
| 0.044484
| 0
| 0.255556
| 0
| 0
| 0.133875
| 0.036856
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.066667
| 0
| 0.188889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b5eee5ae8e8ac24bba961d0d4420546bd6f06e1d
| 26,090
|
py
|
Python
|
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | 1
|
2018-10-01T10:59:55.000Z
|
2018-10-01T10:59:55.000Z
|
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | null | null | null |
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | 1
|
2021-11-01T00:09:00.000Z
|
2021-11-01T00:09:00.000Z
|
"""
This module contains the visualization bar class.
"""
import glob
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import FuncFormatter
from cybercaptain.utils.exceptions import ValidationError
from cybercaptain.visualization.base import visualization_base
from cybercaptain.utils.jsonFileHandler import json_file_reader
from cybercaptain.utils.helpers import str2bool
class visualization_bar(visualization_base):
"""
This class handles the bar graph plotting.
**Parameters**:
kwargs:
contains a dictionary of all attributes.
**Attributes**:
type:
the defined bar plot type (Currently supported: histogram, comparedbarplot, groupedbarplot, barplot3d, barplotgroupedstacked, barplotcomparedstacked)
dataAttribute:
in which attribute the values can be found in the dataset (E.g. 'example1.test.val')
Recommended to use the group-module and reuse the there set value attribute here.
groupNameAttribute:
in which attribute the grouped name can be found (E.g. 'example1.test.group')
Recommended to use the group-module and reuse the there set group attribute here.
threshold:
possibility to set a value threshold to hide smaller groups for example.
figureSize:
define a tuple to set the figure size proportion (E.g. '20, 10').
rotateXTicks:
int to rotate the x-ticks names if needed (E.g. 90 or -90).
rotateYTicks:
int to rotate the y-ticks names if needed (E.g. 90 or -90).
filenamesRegexExtract:
enables to extract stuff from the filenames to for example use on the x/y axis of file/run grouped plots (E.g. '([-+]\\d+)').
colormapAscending:
normalizes given values and set a color depending on their value (Ascending heat - possible to combine with 'colormap')
(Supported for: comparedbarplot, groupedbarplot, barplot3d - Defaults to False)
(Important: Ascending heat colors do not make sense for every plot although it is supported!)
colormap:
set the string for the colormap to be used on the graphs (Reference: https://matplotlib.org/users/colormaps.html)
horizontal:
the bool to display the barchart horizontal to the default vertical (Supported for: comparedbarplot, groupedbarplot, barplotcomparedstacked, barplotgroupedstacked)
scaledTo100:
the bool to scale a stacked bar plot to 100 (Supported for: barplotcomparedstacked, barplotgroupedstacked)
xlabel:
the string for the x-axis.
ylabel:
the string for the y-axis.
title:
the string for the title.
zlabel:
the string for the z-axis.
showYAxisFileNames:
the bool if on the BarPlot3D plot the filenames should be shown on the y-axis.
Can be combined with filenamesRegexExtract to just get certain things.
showGrid:
show the grid behind the plot (Defaults to False).
showLegend:
show the data legend for the chart (Defaults to True).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.validate(kwargs)
# If subclass needs special variables define here
self.type = kwargs.get("type") # Histogram, ComparedBarPlot, GroupedBarPlot, BarPlot3D, StackedBarPlot, GroupedBarPlot
# General
self.data_attribute = kwargs.get("dataAttribute") # In which attribute to find the group value in the dataset
self.group_name_attribute = kwargs.get("groupNameAttribute") # In which attribute to find the group name in the dataset
self.x_label = kwargs.get("xlabel", "")
self.y_label = kwargs.get("ylabel", "")
self.title = kwargs.get("title", "")
self.threshold = kwargs.get("threshold")
self.figure_size = kwargs.get("figureSize", [20, 10])
self.filenames_regex_extract = kwargs.get("filenamesRegexExtract")
self.color_map_ascending = str2bool(kwargs.get("colormapAscending"))
self.color_map = kwargs.get("colormap")
self.rotate_xticks = kwargs.get("rotateXTicks", 0)
self.rotate_yticks = kwargs.get("rotateYTicks", 0)
self.horizontal = str2bool(kwargs.get("horizontal"))
self.show_grid = str2bool(kwargs.get("showGrid"))
self.show_legend = str2bool(kwargs.get("showLegend", True))
# Stacked Plots
self.scaled_to_100 = str2bool(kwargs.get("scaledTo100"))
# BarPlot3D
self.z_label = kwargs.get("zlabel", "")
self.show_y_axis_file_names = str2bool(kwargs.get("showYAxisFileNames"))
def run(self):
"""
The bar run method collects and bundles the data for the plotting method.
**Returns**:
``True`` if the run was successful.
``False``if the run did not end successful.
"""
self.cc_log("INFO", "Data Visualization Bar: Started")
success = False
self.cc_log("INFO", "Bar visualization type: %s" % self.type)
plt.rcParams['figure.figsize'] = (self.figure_size[0], self.figure_size[1])
files = glob.glob(self.src)
if len(files) < 1:
self.cc_log("ERROR", "No files to plot were found - maybe recheck wildcard if defined!")
return False
if self.type == "histogram":
success = self.plot_histogram(files)
elif self.type == "comparedbarplot":
success = self.plot_comparedbarplot(files)
elif self.type == "groupedbarplot":
success = self.plot_groupedbarplot(files)
elif self.type == "barplot3d":
success = self.plot_barplot3d(files)
elif self.type == "barplotcomparedstacked":
success = self.plot_barplotcomparedstacked(files)
elif self.type == "barplotgroupedstacked":
success = self.plot_barplotgroupedstacked(files)
else:
self.cc_log("ERROR", "Data Visualization Bar: An unknown bar plot type (%s) was defined!" % (self.type))
return False
if success:
self.cc_log("DEBUG", "Data Visualization Bar: The plot can be found at: %s" % self.target)
self.cc_log("INFO", "Data Visualization Bar: Finished")
return True
return False
def plot_comparedbarplot(self, files):
"""
Plots a simple compared barplot according to the groups and their values.
Multiple files/runs will show on the X-Axis and different groups beside eachother.
(colormapAscending supported - ascending heat for every group)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 1/len(data_keys)
x_pos = np.arange(file_count)
for i in range(0, len(data_keys)):
custom_colormap = self.get_heat_colormap(data_vals[i]) # Ascending Heat If Activated
if self.horizontal:
plt.barh(x_pos, data_vals[i], height=barWidth, color=custom_colormap, edgecolor='white', label=data_keys[i])
x_pos = [p + barWidth for p in x_pos]
else:
plt.bar(x_pos, data_vals[i], width=barWidth, color=custom_colormap, edgecolor='white', label=data_keys[i])
x_pos = [p + barWidth for p in x_pos]
#data_keys_expanded = data_keys*file_count
#data_keys_expanded[0] = data_keys_expanded[0] + "\n"+names_list[0]
#for i in range(1, file_count):
# data_keys_expanded[i*len(data_keys)] = data_keys_expanded[i*len(data_keys)] + "\n"+names_list[i]
plt.xticks([0], names_list, rotation=self.rotate_xticks)
if self.horizontal:
plt.yticks(np.arange(file_count), names_list, rotation=self.rotate_yticks)
else:
plt.xticks(np.arange(file_count), names_list, rotation=self.rotate_xticks)
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(data_keys, loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_groupedbarplot(self, files):
"""
Plots a simple grouped barplot according to the groups and their values.
Multiple files/runs will show beside each other. X-Axis resembles the groups.
(colormapAscending supported - ascending heat colors for every run/file)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 1/file_count
x_pos = np.arange(len(data_vals))
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
custom_colormap = self.get_heat_colormap(plot_values) # Ascending Heat If Activated
if self.horizontal:
plt.barh(x_pos, plot_values, height=barWidth, color=custom_colormap, edgecolor='white', label=names_list[i])
x_pos = [x + barWidth for x in x_pos]
else:
plt.bar(x_pos, plot_values, width=barWidth, color=custom_colormap, edgecolor='white', label=names_list[i])
x_pos = [x + barWidth for x in x_pos]
if self.horizontal:
plt.yticks(np.arange(len(data_vals)), data_keys, rotation=self.rotate_yticks)
else:
plt.xticks(np.arange(len(data_vals)), data_keys, rotation=self.rotate_xticks)
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplot3d(self, files):
"""
Plots a barplot plot in 3D x axis according to the groups.
Multiple files/runs will show on the z axis.
(colormapAscending supported - ascending heat colors for every run/file)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
custom_colormap = self.get_heat_colormap(plot_values) # Ascending Heat If Activated
ax.bar(np.arange(len(data_keys)), plot_values, color=custom_colormap, zs=i, zdir='y', alpha=0.8)
ax.set_xticks(np.arange(len(data_keys)))
ax.set_xticklabels(data_keys, rotation=self.rotate_xticks)
ax.set_yticks(np.arange(file_count))
if self.show_y_axis_file_names:
ax.set_yticklabels(names_list, ha="left")
else:
ax.set_yticklabels([])
ax.set_xlabel(self.x_label, y=1.10, labelpad=20, fontweight='bold')
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_zlabel(self.z_label, fontweight='bold')
ax.set_title(self.title, y=1.02, fontweight='bold')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplotgroupedstacked(self, files):
"""
Plots a simple barplot according to the groups and their values.
Multiple files/runs will be shown stacked on each of the groups.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 0.9
bottom = np.array([0] * len(data_keys))
if self.scaled_to_100: totals = [sum(x) for x in data_vals]
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
if self.scaled_to_100: plot_values = [l / j * 100 for l,j in zip(plot_values, totals)]
if self.horizontal:
ax.barh(np.arange(len(data_keys)), plot_values, linewidth=0, height=barWidth, left=bottom, label=names_list[i])
bottom = bottom + plot_values
else:
ax.bar(np.arange(len(data_keys)), plot_values, linewidth=0, width=barWidth, bottom=bottom, label=names_list[i])
bottom = bottom + plot_values
if self.horizontal:
ax.set_yticks(np.arange(len(data_keys)))
ax.set_yticklabels(data_keys, rotation=self.rotate_yticks)
if self.scaled_to_100: ax.xaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
else:
ax.set_xticks(np.arange(len(data_keys)))
ax.set_xticklabels(data_keys, rotation=self.rotate_xticks)
if self.scaled_to_100: ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplotcomparedstacked(self, files):
"""
Plots a simple barplot but with the different files/runs shown on the x_axis.
Different groups/values are stacked.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
data_to_plot_length = len(data_vals[0])
self.set_color_cycle(len(data_keys), ax)
ind = np.arange(data_to_plot_length) # the x locations for the groups
width = 0.9 # the width of the bars: can also be len(x) sequence
bottom = [0] * data_to_plot_length # init a list with zeros for bottom
plts = []
if self.scaled_to_100: totals = [sum(x) for x in zip(*data_vals)]
for single_data_set in data_vals:
if self.scaled_to_100: single_data_set = [i / j * 100 for i,j in zip(single_data_set, totals)]
if self.horizontal:
plts.append(plt.barh(ind, single_data_set, linewidth=0, height=width, left=bottom))
else:
plts.append(plt.bar(ind, single_data_set, linewidth=0, width=width, bottom=bottom))
for i in range(len(single_data_set)):
bottom[i] = bottom[i] + single_data_set[i]
if self.horizontal:
plt.yticks(ind, names_list, rotation=self.rotate_yticks)
if self.scaled_to_100: ax.xaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
else:
plt.xticks(ind, names_list, rotation=self.rotate_xticks)
if self.scaled_to_100: ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
plt.ylabel(self.y_label, fontweight='bold')
plt.xlabel(self.x_label, fontweight='bold')
plt.title(self.title, fontweight='bold')
if self.show_legend: plt.legend(plts, data_keys, loc='best', bbox_to_anchor=(1, 0.5))
plt.subplots_adjust(right=0.7)
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_histogram(self, files):
"""
Plots a histogram.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
values_list = []
names_list = []
for file in files:
json_fr = json_file_reader(file)
values = []
while not json_fr.isEOF():
data = json_fr.readRecord()
value = data
for a in self.data_attribute.split('.'):
value = value[a]
# Threshold
if self.threshold and int(value) < int(self.threshold):
continue # Skip this line as its < threshold
values.append(value)
json_fr.close()
values_list.append(values)
names_list.append(os.path.basename(file))
self.set_color_cycle(len(names_list), ax)
ax.hist(values_list, label = names_list, bins=10, edgecolor='white')
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def get_data_from_files(self, files):
"""
Gets and extracts the data from the given fileslist.
**Parameters**:
files : list
list of filepaths to process.
**Returns**:
``file_count, names_list, data_dict`` amount of files, names list of the files, grouped data dict with the values scaled in case of missing data
"""
data_dict = {}
names_list = []
file_count = 0
for file in files:
json_fr = json_file_reader(file)
while not json_fr.isEOF():
json_data = json_fr.readRecord()
value = json_data
for a in self.data_attribute.split('.'):
value = value[a]
# Threshold
if self.threshold and int(value) < int(self.threshold):
continue # Skip this line as its < threshold
group_name = json_data
for a in self.group_name_attribute.split('.'):
group_name = group_name[a]
if group_name in data_dict:
data_dict[group_name].append(value)
else:
data_dict[group_name] = [0] * file_count
data_dict[group_name].append(value)
for gn in data_dict:
if len(data_dict[gn]) < file_count+1: # Will not be appended as filecount isnt incremented yet, +1 added
data_dict[gn].append(0)
json_fr.close()
# Add filenames list to names list or extract regex if defined
name = None
if self.filenames_regex_extract:
name = re.search(self.filenames_regex_extract, os.path.basename(file))
if name: name = name.group(0)
if not name: name = os.path.basename(file)
names_list.append(name)
file_count += 1
return file_count, names_list, data_dict
def set_color_cycle(self, amount, ax, colormap_name="tab20"):
"""
Sets the color cycle for the plot according to the amount needed.
**Parameters**:
amount : int
amount of colors needed.
ax : MatplotLib Axes Object
the axes subplot object to set the colors on.
colormap_name : MatplotLib ColorMap
the wanted colormap to set (More infos on: https://matplotlib.org/users/colormaps.html)
Default 'tab20'
"""
if self.color_map: colormap_name = self.color_map
cmap = plt.get_cmap(colormap_name)
ax.set_prop_cycle(plt.cycler('color', cmap(np.linspace(0, 1, amount))))
def get_heat_colormap(self, values, colormap="Reds"):
"""
Returns a custom heat asscending colormap according to the given values.
**Parameters**:
values : list
list of the values to normalize and get the heat ascending matplotlib colormap back.
colormap : str
possibility to use a custom heat ascending colormap (Default: Reds)
**Returns**:
``list`` containing a matplotlib color depending on the previous given value.
``None`` if 'colormapAscending' is not configured or False
"""
if self.color_map: colormap = self.color_map
if not self.color_map_ascending: return None
cNorm = colors.Normalize(vmin=min(values), vmax=max(values))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=plt.get_cmap(colormap))
return [scalarMap.to_rgba(v) for v in values]
def validate(self, kwargs):
"""
The validate method checks if all the input arguments are corret.
**Parameters**:
kwargs : dict
Contains a dict of all the arguments for the line chart visualisation.
"""
super().validate(kwargs)
self.cc_log("INFO", "Data Visualization Bar: started validation")
if not kwargs.get("type"):
raise ValidationError(self, ["type"], "Parameter cannot be empty!")
if not kwargs.get("dataAttribute"):
raise ValidationError(self, ["dataAttribute"], "Parameter cannot be empty!")
if not kwargs.get("groupNameAttribute") and kwargs.get("type") != "histogram":
raise ValidationError(self, ["groupNameAttribute"], "Parameter cannot be empty!")
if kwargs.get("threshold"):
try:
int(kwargs.get("threshold"))
except:
raise ValidationError(self, ["threshold"], "Parameter has to be an int!")
if kwargs.get("figureSize"):
if not isinstance(kwargs.get("figureSize"), list) or len(kwargs.get("figureSize")) != 2:
raise ValidationError(self, ["figureSize"], "Parameter has to be a list of two (E.g. 20, 10)!")
if kwargs.get("rotateXTicks"):
try:
int(kwargs.get("rotateXTicks"))
except:
raise ValidationError(self, ["rotateXTicks"], "Parameter has to be an int!")
if kwargs.get("rotateYTicks"):
try:
int(kwargs.get("rotateYTicks"))
except:
raise ValidationError(self, ["rotateYTicks"], "Parameter has to be an int!")
if kwargs.get("colormap"):
if kwargs.get("colormap") not in plt.colormaps(): raise ValidationError(self, ["colormap"], "Colormap has to be existing, check the matplotlibb docu!")
# Optional
#if not kwargs.get("title"):
# raise ValidationError(self, ["title"], "Parameter cannot be empty!")
#if not kwargs.get("ylabel"):
# raise ValidationError(self, ["ylabel"], "Parameter cannot be empty!")
#if not kwargs.get("xlabel"):
# raise ValidationError(self, ["xlabel"], "Parameter cannot be empty!")
#if not kwargs.get("zlabel"):
# raise ValidationError(self, ["zlabel"], "Parameter cannot be empty!")
#if not kwargs.get("horizontal"):
# raise ValidationError(self, ["horizontal"], "Parameter cannot be empty!")
#if not kwargs.get("scaledTo100"):
# raise ValidationError(self, ["scaledTo100"], "Parameter cannot be empty!")
self.cc_log("INFO", "Data Visualization Bar: finished validation")
| 41.086614
| 175
| 0.617555
| 3,290
| 26,090
| 4.761094
| 0.128875
| 0.022983
| 0.013023
| 0.013343
| 0.476251
| 0.448481
| 0.417582
| 0.403601
| 0.363445
| 0.331333
| 0
| 0.00825
| 0.279877
| 26,090
| 635
| 176
| 41.086614
| 0.825474
| 0.305979
| 0
| 0.452096
| 0
| 0
| 0.109569
| 0.003581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035928
| false
| 0
| 0.038922
| 0
| 0.128743
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|