hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dc53cab381b3550c5fbb70d5e5970550c8653ba8 | 10,730 | py | Python | spokenlanguageassessment.py | Shahabks/Speechat | c4cb67b26e117ab53c06aed6c56c2b46998e8193 | [
"MIT"
] | 11 | 2020-04-29T05:30:21.000Z | 2022-01-19T08:15:21.000Z | spokenlanguageassessment.py | Shahabks/Speechat | c4cb67b26e117ab53c06aed6c56c2b46998e8193 | [
"MIT"
] | 1 | 2020-04-29T05:30:54.000Z | 2020-05-06T23:09:19.000Z | spokenlanguageassessment.py | Shahabks/Speechat | c4cb67b26e117ab53c06aed6c56c2b46998e8193 | [
"MIT"
] | 5 | 2020-10-15T10:11:02.000Z | 2022-01-02T01:20:14.000Z | import sys
def my_except_hook(exctype, value, traceback):
print('There has been an error in the system')
sys.excepthook = my_except_hook
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
import parselmouth
from parselmouth.praat import call, run_file
import glob
import errno
import csv,sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import os
from subprocess import check_output
from sklearn import preprocessing
import queue
import sounddevice as sd
import soundfile as sf
import _thread
import pickle
from scipy.stats import binom
from scipy.stats import ks_2samp
from scipy.stats import ttest_ind
from pandas import read_csv
pathy = input("Enter the path to the Auto-Speech_Rater directory: ")
name = input("what is your name? ")
t0 = int(input("Your desired Recording time in seconds: "))
levvel=int(input("Pick degree of difficulties between 0 to 100: "))
pa00=pathy+"/"+"dataset"+"/"+"audioFiles"+"/"
pa0=pathy+"/"+"dataset"+"/"+"audioFiles"+"/"+name+".wav"
pa1=pathy+"/"+"dataset"+"/"+"datanewchi22.csv"
pa2=pathy+"/"+"dataset"+"/"+"stats.csv"
pa3=pathy+"/"+"dataset"+"/"+"datacorrP.csv"
pa4=pathy+"/"+"dataset"+"/"+"datanewchi.csv"
pa5=pathy+"/"+"dataset"+"/"+"datanewchi33.csv"
pa6=pathy+"/"+"dataset"+"/"+"datanewchi33.csv"
pa7=pathy+"/"+"dataset"+"/"+"datanewchi44.csv"
pa8=pathy+"/"+"dataset"+"/"+"essen"+"/"+"MLTRNL.praat"
pa9=pathy+"/"+"dataset"+"/"+"essen"+"/"+"myspsolution.praat"
rere=pa0
RECORD_TIME = t0
def countdown(p,q,w):
i=p
j=q
z=w
k=0
while True:
if(j==-1):
j=59
i -=1
if(j > 9):
print(str(k)+str(i)+ " : " +str(j), "\t", end="\r")
else:
print(str(k)+str(i)+" : " + str(k)+str(j), "\t", end="\r")
time.sleep(1)
j -= 1
if(i==0 and j==-1):
break
if(i==0 and j==-1):
if z==0:
huf="Go ahead!"
print(huf)
if z==1:
huf="Time up!"
# time.sleep(1)
print("===========================================")
print("HOLD ON!! get ready, 5 seconds to go!")
print("===========================================")
countdown(0,5,0) #countdown(min,sec)
q = queue.Queue()
rec_start = int(time.time())
dev_info = sd.query_devices(2,'input')
#dev_info = default.device()
# samplerate = int(dev_info['default_samplerate'])
samplerate = 48000
def data_callback(input_data, frames, time, status):
if status:
print(status, file=sys.stderr)
q.put(input_data.copy())
with sf.SoundFile(rere, mode='x', samplerate=samplerate, channels=2) as file:
with sd.InputStream(samplerate=samplerate, device=2, channels=2, callback=data_callback,blocksize=20500):
rec_time = int(time.time()) - rec_start
_thread.start_new_thread(countdown,(0,t0,1))
while rec_time <= RECORD_TIME:
file.write(q.get())
rec_time = int(time.time()) - rec_start
result_array = np.empty((0, 100))
path = pa0
files = glob.glob(path)
result_array = np.empty((0, 27))
try:
def mysppron(m,p,q):
sound=m
sourcerun=p
path=q
objects= run_file(sourcerun, -20, 2, 0.3, "yes",sound,path, 80, 400, 0.01, capture_output=True)
print (objects[0]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object
z1=str( objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside
z2=z1.strip().split()
z3=int(z2[13]) # will be the integer number 10
z4=float(z2[14]) # will be the floating point number 8.3
db= binom.rvs(n=10,p=z4,size=10000)
a=np.array(db)
b=np.mean(a)*100/10
print ("Pronunciation_posteriori_probability_score_percentage= :%.2f" % (b))
return;
def myspp(m,p,q):
sound=m
sourcerun=p
path=q
objects= run_file(sourcerun, -20, 2, 0.3, "yes",sound,path, 80, 400, 0.01, capture_output=True)
print (objects[0]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object
z1=str( objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside
z2=z1.strip().split()
z3=int(z2[13]) # will be the integer number 10
z4=float(z2[14]) # will be the floating point number 8.3
db= binom.rvs(n=10,p=z4,size=10000)
a=np.array(db)
b=np.mean(a)*100/10
return b
def myspgend(m,p,q):
sound=m
sourcerun=p
path=q
objects= run_file(sourcerun, -20, 2, 0.3, "yes",sound,path, 80, 400, 0.01, capture_output=True)
print (objects[0]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object
z1=str( objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside
z2=z1.strip().split()
z3=float(z2[8]) # will be the integer number 10
z4=float(z2[7]) # will be the floating point number 8.3
if z4<=114:
g=101
j=3.4
elif z4>114 and z4<=135:
g=128
j=4.35
elif z4>135 and z4<=163:
g=142
j=4.85
elif z4>163 and z4<=197:
g=182
j=2.7
elif z4>197 and z4<=226:
g=213
j=4.5
elif z4>226:
g=239
j=5.3
else:
print("Voice not recognized")
exit()
def teset(a,b,c,d):
d1=np.random.wald(a, 1, 1000)
d2=np.random.wald(b,1,1000)
d3=ks_2samp(d1, d2)
c1=np.random.normal(a,c,1000)
c2=np.random.normal(b,d,1000)
c3=ttest_ind(c1,c2)
y=([d3[0],d3[1],abs(c3[0]),c3[1]])
return y
nn=0
mm=teset(g,j,z4,z3)
while (mm[3]>0.05 and mm[0]>0.04 or nn<5):
mm=teset(g,j,z4,z3)
nn=nn+1
nnn=nn
if mm[3]<=0.09:
mmm=mm[3]
else:
mmm=0.35
if z4>97 and z4<=114:
print("a Male, mood of speech: Showing no emotion, normal, p-value/sample size= :%.2f" % (mmm), (nnn))
elif z4>114 and z4<=135:
print("a Male, mood of speech: Reading, p-value/sample size= :%.2f" % (mmm), (nnn))
elif z4>135 and z4<=163:
print("a Male, mood of speech: speaking passionately, p-value/sample size= :%.2f" % (mmm), (nnn))
elif z4>163 and z4<=197:
print("a female, mood of speech: Showing no emotion, normal, p-value/sample size= :%.2f" % (mmm), (nnn))
elif z4>197 and z4<=226:
print("a female, mood of speech: Reading, p-value/sample size= :%.2f" % (mmm), (nnn))
elif z4>226 and z4<=245:
print("a female, mood of speech: speaking passionately, p-value/sample size= :%.2f" % (mmm), (nnn))
else:
print("Voice not recognized")
for soundi in files:
objects= run_file(pa8, -20, 2, 0.3, "yes", soundi, pa00, 80, 400, 0.01, capture_output=True)
#print (objects[0]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object
z1=( objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside
z3=z1.strip().split()
z2=np.array([z3])
result_array=np.append(result_array,[z3], axis=0)
np.savetxt(pa1,result_array, fmt='%s',delimiter=',')
#Data and features analysis
df = pd.read_csv(pa1,
names = ['avepauseduratin','avelongpause','speakingtot','avenumberofwords','articulationrate','inpro','f1norm','mr','q25',
'q50','q75','std','fmax','fmin','vowelinx1','vowelinx2','formantmean','formantstd','nuofwrds','npause','ins',
'fillerratio','xx','xxx','totsco','xxban','speakingrate'],na_values='?')
scoreMLdataset=df.drop(['xxx','xxban'], axis=1)
scoreMLdataset.to_csv(pa7, header=False,index = False)
newMLdataset=df.drop(['avenumberofwords','f1norm','inpro','q25','q75','vowelinx1','nuofwrds','npause','xx','totsco','xxban','speakingrate','fillerratio'], axis=1)
newMLdataset.to_csv(pa5, header=False,index = False)
namess=nms = ['avepauseduratin','avelongpause','speakingtot','articulationrate','mr',
'q50','std','fmax','fmin','vowelinx2','formantmean','formantstd','ins',
'xxx']
df1 = pd.read_csv(pa5,
names = namess)
df33=df1.drop(['xxx'], axis=1)
array = df33.values
array=np.log(array)
x = array[:,0:13]
print(" ")
print(" ")
print("===========================================")
p=pa0
c=pa9
a=pa00
bi=myspp(p,c,a)
if bi<levvel:
mysppron(p,c,a)
input("Try again, unnatural-sounding speech detected. No further result. Press any key to exit.")
exit()
mysppron(p,c,a)
myspgend(p,c,a)
print(" ")
print(" ")
print("====================================================================================================")
print("HERE ARE THE RESULTS, your spoken language level (speaking skills).")
print("a: just started, a1: beginner, a2: elementary, b1: intermediate, b2: upper intermediate, c: master")
print("====================================================================================================")
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"CART_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("58% accuracy ",predictions)
#filename=pathy+"/"+"essen"+"/"+"ETC_model.sav"
#model = pickle.load(open(filename, 'rb'))
#predictions = model.predict(x)
#print("70% accuracy ",predictions)
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"KNN_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("65% accuracy ",predictions)
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"LDA_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("70% accuracy ",predictions)
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"LR_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("67% accuracy ",predictions)
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"NB_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("64% accuracy ",predictions)
#filename=pathy+"/"+"essen"+"/"+"PCA_model.sav"
#model = pickle.load(open(filename, 'rb'))
#predictions = model.predict(x)
#print("70% accuracy ",predictions)
#filename=pathy+"/"+"essen"+"/"+"RFE_model.sav"
#model = pickle.load(open(filename, 'rb'))
#predictions = model.predict(x)
#print("70% accuracy ",predictions)
filename=pathy+"/"+"dataset"+"/"+"essen"+"/"+"SVN_model.sav"
model = pickle.load(open(filename, 'rb'))
predictions = model.predict(x)
print("63% accuracy ",predictions)
except:
print(" ")
print(" ")
print("===========================================")
print("Try again, noisy background or unnatural-sounding speech detected. No result.")
print("===========================================")
input("RECORDING PROCESS IS DONE, press any key to terminate the programe")
| 33.742138 | 163 | 0.62479 | 1,590 | 10,730 | 4.175472 | 0.248428 | 0.030728 | 0.017623 | 0.025757 | 0.47959 | 0.440428 | 0.388914 | 0.381081 | 0.376563 | 0.370688 | 0 | 0.051864 | 0.162628 | 10,730 | 317 | 164 | 33.84858 | 0.687034 | 0.161137 | 0 | 0.310078 | 0 | 0.011628 | 0.288869 | 0.052309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027132 | false | 0.007752 | 0.085271 | 0 | 0.120155 | 0.155039 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc53eefc3c23760c67c49f90ee90173b07008f5c | 4,199 | py | Python | alpha-beta/toexe.py | YaochenS/Gomoku-AI-Agent | 3e298e86ee1e8daa9700e84ed0e326dcee018bda | [
"MIT"
] | null | null | null | alpha-beta/toexe.py | YaochenS/Gomoku-AI-Agent | 3e298e86ee1e8daa9700e84ed0e326dcee018bda | [
"MIT"
] | null | null | null | alpha-beta/toexe.py | YaochenS/Gomoku-AI-Agent | 3e298e86ee1e8daa9700e84ed0e326dcee018bda | [
"MIT"
] | null | null | null | import pisqpipe as pp
import minimax
""" This file is adapt from the example.py file provided. The only thing changed
here is the brain_turn method which uses the minimax with alpha–beta pruning
algorithm now.
"""
MAX_BOARD = 100
board = [[0 for i in range(MAX_BOARD)] for j in range(MAX_BOARD)]
def brain_init():
if pp.width < 5 or pp.height < 5:
pp.pipeOut("ERROR size of the board")
return
if pp.width > MAX_BOARD or pp.height > MAX_BOARD:
pp.pipeOut("ERROR Maximal board size is {}".format(MAX_BOARD))
return
pp.pipeOut("OK")
def brain_restart():
for x in range(pp.width):
for y in range(pp.height):
board[x][y] = 0
pp.pipeOut("OK")
def isFree(x, y):
return x >= 0 and y >= 0 and x < pp.width and y < pp.height and board[x][y] == 0
def brain_my(x, y):
if isFree(x, y):
board[x][y] = 1
else:
pp.pipeOut("ERROR my move [{},{}]".format(x, y))
def brain_opponents(x, y):
if isFree(x, y):
board[x][y] = 2
else:
pp.pipeOut("ERROR opponents's move [{},{}]".format(x, y))
def brain_block(x, y):
if isFree(x, y):
board[x][y] = 3
else:
pp.pipeOut("ERROR winning move [{},{}]".format(x, y))
def brain_takeback(x, y):
if x >= 0 and y >= 0 and x < pp.width and y < pp.height and board[x][y] != 0:
board[x][y] = 0
return 0
return 2
""" In this method, we first construct a tree with the current board,
given that no action taken, brain turn as 1 and the expansion number
as 7. If the root is not none, action is given by the getValue function
in the minimax while if the root is none (there is no suitable position),
the brain simply will make the next step in the middle of the board.
"""
def brain_turn():
root = minimax.PlantATree(board, None, 1, 10)
if root is not None:
theV, action = minimax.getValue(root, float("-inf"), float("inf"))
pp.do_mymove(action[0], action[1])
else:
pp.do_mymove(10, 10)
def brain_end():
pass
def brain_about():
pp.pipeOut(pp.infotext)
#if DEBUG_EVAL:
#import win32gui
#def brain_eval(x, y):
# TODO check if it works as expected
#wnd = win32gui.GetForegroundWindow()
#dc = win32gui.GetDC(wnd)
#rc = win32gui.GetClientRect(wnd)
#c = str(board[x][y])
#win32gui.ExtTextOut(dc, rc[2] - 15, 3, 0, None, c, ())
#win32gui.ReleaseDC(wnd, dc)
######################################################################
# A possible way how to debug brains.
# To test it, just "uncomment" it (delete enclosing """)
######################################################################
"""
# define a file for logging ...
DEBUG_LOGFILE = "/tmp/pbrain-pyrandom.log"
# ...and clear it initially
with open(DEBUG_LOGFILE,"w") as f:
pass
# define a function for writing messages to the file
def logDebug(msg):
with open(DEBUG_LOGFILE,"a") as f:
f.write(msg+"\n")
f.flush()
# define a function to get exception traceback
def logTraceBack():
import traceback
with open(DEBUG_LOGFILE,"a") as f:
traceback.print_exc(file=f)
f.flush()
raise
# use logDebug wherever
# use try-except (with logTraceBack in except branch) to get exception info
# an example of problematic function
def brain_turn():
logDebug("some message 1")
try:
logDebug("some message 2")
1. / 0. # some code raising an exception
logDebug("some message 3") # not logged, as it is after error
except:
logTraceBack()
"""
######################################################################
# "overwrites" functions in pisqpipe module
pp.brain_init = brain_init
pp.brain_restart = brain_restart
pp.brain_my = brain_my
pp.brain_opponents = brain_opponents
pp.brain_block = brain_block
pp.brain_takeback = brain_takeback
pp.brain_turn = brain_turn
pp.brain_end = brain_end
pp.brain_about = brain_about
#if DEBUG_EVAL:
#pp.brain_eval = brain_eval
def main():
pp.main()
if __name__ == "__main__":
main()
| 26.575949 | 85 | 0.589188 | 611 | 4,199 | 3.96072 | 0.302782 | 0.016529 | 0.023141 | 0.013223 | 0.102066 | 0.102066 | 0.077273 | 0.057438 | 0.057438 | 0.033884 | 0 | 0.016492 | 0.249107 | 4,199 | 157 | 86 | 26.745223 | 0.750714 | 0.107407 | 0 | 0.206349 | 0 | 0 | 0.074837 | 0 | 0 | 0 | 0 | 0.006369 | 0 | 1 | 0.174603 | false | 0.015873 | 0.031746 | 0.015873 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc552e9ec086aa0bae663a63fe2fb64055acb058 | 5,518 | py | Python | mod/MyBankAcc.py | dev-mikevvl-ms/PyDev.05.HW | 5689ddb03e74e367ae75b7ed7a35944e6a601549 | [
"BSD-3-Clause"
] | null | null | null | mod/MyBankAcc.py | dev-mikevvl-ms/PyDev.05.HW | 5689ddb03e74e367ae75b7ed7a35944e6a601549 | [
"BSD-3-Clause"
] | null | null | null | mod/MyBankAcc.py | dev-mikevvl-ms/PyDev.05.HW | 5689ddb03e74e367ae75b7ed7a35944e6a601549 | [
"BSD-3-Clause"
] | null | null | null | import copy, sys
from mod.MVVlStd import (glSep_s, mInP_FltAVali_fefi, mMenu_c, mSupportsWrite_ca,
mCre_SFrFloat_ff)
# from mod.MVVlStd import glSep_s, mInP_FltAVali_fefi, mMenu_c
mOutStt_d = dict(kAccSum_n=0, kBuyHstT_l=[])
def mA_RefillAcc_ffmp(laSf_o, file=sys.stdout):
# loAdd_n = mInP_FltAVali_fefi(f' Введите сумму на сколько пополнить счет\n',
lo_s = 'положительное число,\n например: (10), (1_000,33), (100.15) или (1000,55)\n'
loAdd_n = mInP_FltAVali_fefi(f' сумму пополнения счета\n',
laInPTypeFlt_cll=lambda _s: float(_s.replace(',', '.')),
laDfV_s=mCre_SFrFloat_ff(100),
# laInPTypeFlt_cll=float, laDfV_s='100.00',
laAcceptEmptyInPAsDf_b=True, laValiInPMsg_s=lo_s,
# laAcceptEmptyInPAsDf_b=True, laValiInPMsg_s=f'положительное число с возм.десят.точкой\n',
laVali_cll=lambda _n: 0 <= _n, file=file)[0]
mOutStt_d['kAccSum_n'] += loAdd_n #DVL: input by mInP_FltAVali_fefi
# print(f'DBG: На счету:({mOutStt_d['kAccSum_n']:.2f}) и в истории покупок {len(mOutStt_d['kBuyHstT_l'])} зап.')
# lo_s = (f"{loAdd_n:_f}").replace('.', ',', 1).rstrip('0')
print(f'Пополнение на:({mCre_SFrFloat_ff(loAdd_n)}).', file=file)
return loAdd_n
def mA_Buy_ffmp(laSf_o, file=sys.stdout):
if mOutStt_d['kAccSum_n'] <= 0:
print(f"На Вашем счету:({mCre_SFrFloat_ff(mOutStt_d['kAccSum_n'])}) <= 0.",
' Пополните счет, пожалуйста.', sep='\n', file=file)
return (None, None)
lo_s = 'положительное число,\n например: (10), (1_000,33), (100.15) или (1000,55)\n'
loCost_n = mInP_FltAVali_fefi((" сумму покупки (на Вашем счету:" +
f"{mCre_SFrFloat_ff(mOutStt_d['kAccSum_n'])})\n"),
laInPTypeFlt_cll=lambda _s: float(_s.replace(',', '.')),
laDfV_s=mCre_SFrFloat_ff(min(100.00, mOutStt_d['kAccSum_n'])),
# laDfV_s=(f"{min(100.00, mOutStt_d['kAccSum_n']):_f}").replace('.', ',', 1).rstrip('0'),
# laDfV_s=f"{min(100.00, mOutStt_d['kAccSum_n']):.2f}",
laAcceptEmptyInPAsDf_b=True, laValiInPMsg_s=lo_s,
laVali_cll=lambda _n: 0 <= _n, file=file)[0]
if mOutStt_d['kAccSum_n'] < loCost_n: #DVL: input by mInP_FltAVali_fefi
print(f"Денег на Вашем счету:({mCre_SFrFloat_ff(mOutStt_d['kAccSum_n'])})",
f' не хватает для покупки на сумму:({mCre_SFrFloat_ff(loCost_n)}).',
' Пополните счет, пожалуйста.', sep='\n', file=file)
return (None, loCost_n)
loDesc_s = mInP_FltAVali_fefi(f' название покупки\n', laInPTypeFlt_cll=None,
laDfV_s="Еда", laAcceptEmptyInPAsDf_b=True, file=file)[0]
# print(f'DBG: На счету:({mOutStt_d['kAccSum_n']}) и в истории покупок {len(mOutStt_d['kBuyHstT_l'])} зап.')
mOutStt_d['kAccSum_n'] -= loCost_n
mOutStt_d['kBuyHstT_l'].append((loDesc_s, loCost_n)) #DVL: input by mInP_FltAVali_fefi
# print(f'DBG: На счету:({mOutStt_d['kAccSum_n']}) и в истории покупок {len(mOutStt_d['kBuyHstT_l'])} зап.')
print(f'Покупка: "{loDesc_s}", на сумму:({mCre_SFrFloat_ff(loCost_n)}).', file=file)
return (loDesc_s, loCost_n)
def mA_VieHst_ffmp(laSf_o, file=sys.stdout):
print(f"История покупок (всего {len(mOutStt_d['kBuyHstT_l'])} зап.):",
*enumerate(mOutStt_d['kBuyHstT_l'], 1), '', sep='\n', file=file)
return (mOutStt_d['kAccSum_n'], len(mOutStt_d['kBuyHstT_l']))
def mA_Exit_fm(laSf_o, file=sys.stdout):
laSf_o.fRunLoop_b = False
# tMenu_d = {'1':('Пополнение счета', mA_RefillAcc_ffmp, ??Type??(Exit, Back, SbMenu, CtrlVieMenu??)),
# '2':('Покупка', tBuy_fm),
# '3':('История покупок', tVieHst_fm),
# '4':('Выход', None)}
# def mOuP_Stt_fmp(laSf_o:mod.MVVlStd.mMenu_ca, file:mod.MVVlStd.mSupportsWrite_ca=sys.stdout):
def mOuP_Stt_fmp(laSf_o:mMenu_c, file:mSupportsWrite_ca=sys.stdout):
# def mOuP_Stt_fmp(laSf_o, file=sys.stdout):
# 2Do: CheExs(kAccSum_n, kBuyHstT_l)
if 'kAccSum_n' in mOutStt_d and 'kBuyHstT_l' in mOutStt_d:
print(f"На счету:({mCre_SFrFloat_ff(mOutStt_d['kAccSum_n'])})",
f"и в истории покупок {len(mOutStt_d['kBuyHstT_l'])} зап.",
# glSep_s[:len(glSep_s)//3 *2], sep='\n',
file=file)
def main(laArgs: list[str], *laArg_l, **laKwArg_d) -> dict:
''' Arg laKMenuCrePP_d=dict(BasePP 4 Cre All Menu In2(Sf))
Will UseW(.deepcopy)
'''
# Ww:laArgs(sys.argv[1:])
if 'laKMenuCrePP_d' in laKwArg_d:
loKwArg_d = copy.deepcopy(dict(laKwArg_d['laKMenuCrePP_d']))
else: loKwArg_d = {}
loAppDesc_s = 'Мой банковский счет'
# loKwArg_d.update(dict(fOutStt_d=mOutStt_d, fPrnOutStt_cll=mOuP_Stt_fmp,
# fHeaFmt_s= glSep_s + f'\n{loAppDesc_s}:'))
loKwArg_d.update(dict(fPrnOutStt_cll=mOuP_Stt_fmp,
fHeaFmt_s= glSep_s + f'\n{loAppDesc_s}:'))
''' Arg laKMenuCrePP_d=dict(PP 4 Upd:PP(Cre All Menu In2(Sf)))
'''
# # Ww:laArgs(sys.argv[1:])
# loKwArg_d = dict(fOutStt_d=mOutStt_d, fPrnOutStt_cll=mOuP_Stt_fmp,
# fHeaFmt_s= glSep_s + '\nМой банковский счет:')
# if 'laKMenuCrePP_d' in laKwArg_d:
# loKwArg_d.update(laKwArg_d['laKMenuCrePP_d'])
loMenu_o = mMenu_c({1:('Пополнение счета', mA_RefillAcc_ffmp),
'2':('Покупка', mA_Buy_ffmp),
'3':('История покупок', mA_VieHst_ffmp),
# 'E':('Выход', mA_Exit_fm),
'4':('Выход', mA_Exit_fm)
}, **loKwArg_d)
# HeaFmt_s= glSep_s[:len(glSep_s)//3 *2] + '\nМой банковский счет:')
# loMenu_o = mMenu_c()
# loMenu_o.add_Itm?_ffm(...)
# loRes_o = loMenu_o.run_ffpm()
loRes_o = loMenu_o()
# loRes_o = mMenu_c(...)()
print(f'DVL:loRes_o:', *loRes_o, '', sep='\n') #DVL
return loRes_o
if __name__ == '__main__':
import sys
# main(sys.argv[1:])
main(None)
| 46.369748 | 114 | 0.676151 | 874 | 5,518 | 3.965675 | 0.200229 | 0.064628 | 0.064916 | 0.069244 | 0.584824 | 0.528563 | 0.445759 | 0.380554 | 0.361512 | 0.30929 | 0 | 0.020374 | 0.146067 | 5,518 | 118 | 115 | 46.762712 | 0.715195 | 0.355564 | 0 | 0.151515 | 0 | 0.030303 | 0.303348 | 0.105386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.227273 | 0.106061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc55b9831e899022f7368c70886367fbd51ebde9 | 3,800 | py | Python | Exercise 10/exercise_code/networks/segmentation_nn.py | CornellLenard/Deep-Learning-Course-Exercises | db32f2b9ab93a50580e93e9dd83be1db7c4c4a19 | [
"MIT"
] | null | null | null | Exercise 10/exercise_code/networks/segmentation_nn.py | CornellLenard/Deep-Learning-Course-Exercises | db32f2b9ab93a50580e93e9dd83be1db7c4c4a19 | [
"MIT"
] | null | null | null | Exercise 10/exercise_code/networks/segmentation_nn.py | CornellLenard/Deep-Learning-Course-Exercises | db32f2b9ab93a50580e93e9dd83be1db7c4c4a19 | [
"MIT"
] | null | null | null | """SegmentationNN"""
import torch
import torch.nn as nn
from torchvision import models
class SegmentationNN(nn.Module):
def __init__(self, num_classes=23, hparams=None):
super().__init__()
self.hparams = hparams
self.num_classes = num_classes
#######################################################################
# YOUR CODE #
#######################################################################
# The encoder part
self.encoder = models.alexnet(pretrained=True).features
# The decoder part
self.decoder = nn.Sequential(
nn.Conv2d(256, 4096, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(4096),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Upsample(scale_factor=8, mode="bilinear"),
nn.Conv2d(4096, 256, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Upsample(scale_factor=5, mode="bilinear"),
nn.Conv2d(256, self.num_classes, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(self.num_classes),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Conv2d(self.num_classes, self.num_classes, kernel_size=3, padding=1, stride=1),
)
self.initialize()
def initialize(self):
for m in self.decoder.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x):
"""
Forward pass of the convolutional neural network. Should not be called
manually but by calling a model instance directly.
Inputs:
- x: PyTorch input Variable
"""
#######################################################################
# YOUR CODE #
#######################################################################
x = self.encoder(x)
x = self.decoder(x)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return x
@property
def is_cuda(self):
"""
Check if model parameters are allocated on the GPU.
"""
return next(self.parameters()).is_cuda
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
class DummySegmentationModel(nn.Module):
def __init__(self, target_image):
super().__init__()
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
target_image[target_image == -1] = 1
self.prediction = _to_one_hot(target_image, 23).permute(2, 0, 1).unsqueeze(0)
def forward(self, x):
return self.prediction.float()
| 33.928571 | 94 | 0.441579 | 367 | 3,800 | 4.433243 | 0.351499 | 0.055317 | 0.051629 | 0.036878 | 0.180701 | 0.157345 | 0.157345 | 0.157345 | 0.145052 | 0.097111 | 0 | 0.025829 | 0.317368 | 3,800 | 111 | 95 | 34.234234 | 0.601388 | 0.168421 | 0 | 0.178571 | 0 | 0 | 0.015619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.053571 | 0.017857 | 0.303571 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc5b9bdbf31b2b0988680ab52acfcb750fb29506 | 1,464 | py | Python | bridger/display/formatting.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 2 | 2020-03-17T00:53:23.000Z | 2020-07-16T07:00:33.000Z | bridger/display/formatting.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 76 | 2019-12-05T01:15:57.000Z | 2021-09-07T16:47:27.000Z | bridger/display/formatting.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T15:09:47.000Z | 2020-02-05T15:09:47.000Z | from dataclasses import dataclass
from typing import Dict, List, Union
from bridger.enums import Operator
@dataclass(unsafe_hash=True)
class Condition:
operator: Operator
value: Union[str, float, int, bool]
def __post_init__(self):
if self.operator == Operator.EXISTS:
assert isinstance(self.value, bool), f"{Operator.EXISTS.value} is only compatible with bool"
@dataclass(unsafe_hash=True)
class FormattingRule:
icon: str = None
style: Dict = None
condition: Condition = None
def __post_init__(self):
assert self.icon or self.style, "icon and style cannot both be None."
def __iter__(self):
yield "icon", self.icon
yield "style", self.style
if self.condition:
if isinstance(self.condition, tuple):
yield "condition", self.condition
else:
yield "condition", (self.condition.operator.value, self.condition.value)
@dataclass(unsafe_hash=True)
class Formatting:
formatting_rules: List[FormattingRule]
column: str = None
def __post_init__(self):
if self.column is None:
assert all(
[not bool(rule.condition) for rule in self.formatting_rules]
), "Specifying conditions, without a reference column is not possible."
def __iter__(self):
yield "column", self.column
yield "formatting_rules", [dict(rule) for rule in self.formatting_rules]
| 29.28 | 104 | 0.661202 | 178 | 1,464 | 5.269663 | 0.342697 | 0.069296 | 0.060768 | 0.073561 | 0.21855 | 0.104478 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249317 | 1,464 | 49 | 105 | 29.877551 | 0.853503 | 0 | 0 | 0.216216 | 0 | 0 | 0.137978 | 0.01571 | 0 | 0 | 0 | 0 | 0.081081 | 1 | 0.135135 | false | 0 | 0.081081 | 0 | 0.486486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc5cbc73303e165afc5465203bd96aba744b5f31 | 7,585 | py | Python | Main_ES.py | MessireToaster/CoEvolution | 965050f0374bbe6f6d33b371c582a5485bd22410 | [
"Apache-2.0"
] | 2 | 2020-07-09T16:28:21.000Z | 2020-07-29T08:07:19.000Z | Main_ES.py | JeremyF-141592/CoEvolution | 965050f0374bbe6f6d33b371c582a5485bd22410 | [
"Apache-2.0"
] | null | null | null | Main_ES.py | JeremyF-141592/CoEvolution | 965050f0374bbe6f6d33b371c582a5485bd22410 | [
"Apache-2.0"
] | null | null | null | """
Evolution Strategies (Salimans 2017), evaluated by default on a random set of 20 environments at each iteration.
The environment set can be specified with a pickle file, using --load_env.
"""
from Utils.Loader import resume_from_folder, prepare_folder
from Utils.Stats import bundle_stats, append_stats
from Algorithms.NSGA2.NSGAII_tools import *
from Parameters import Configuration
import ipyparallel as ipp
import argparse
import json
import pickle
import warnings
import os
warnings.filterwarnings("ignore")
Configuration.make()
# Ipyparallel --------------------------------------------------------------------------------------------------
# Local parallelism, make sure that ipcluster is started beforehand otherwise this will raise an error.
Configuration.rc = ipp.Client()
with Configuration.rc[:].sync_imports():
from Parameters import Configuration
Configuration.rc[:].execute("Configuration.make()")
Configuration.lview = Configuration.rc.load_balanced_view()
Configuration.lview.block = True
# Parse arguments ------------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Evolution Strategies as in Salimans et al. 2017')
# General
parser.add_argument('--T', type=int, default=400, help='Iterations limit')
parser.add_argument('--resume_from', type=str, default="", help="Resume execution from folder.")
parser.add_argument('--save_to', type=str, default="./ES_execution", help="Execution save-to folder.")
parser.add_argument('--save_mode', type=str, default="all", help="Specify save mode among ['all', 'last', N] where N is"
"a number corresponding the saving's interval.")
parser.add_argument('--verbose', type=int, default=0, help="Print information.")
parser.add_argument('--max_budget', type=int, default=-1, help="Maximum number of environment evaluations.")
# Population
parser.add_argument('--pop_size', type=int, default=100, help='Population size')
parser.add_argument('--pop_env_size', type=int, default=20, help='Environment Population size')
parser.add_argument('--load_env', type=str, default="", help='Path to pickled environment')
# Local optimization
parser.add_argument('--lr_init', type=float, default=0.01, help="Learning rate initial value")
parser.add_argument('--lr_decay', type=float, default=0.9999, help="Learning rate decay")
parser.add_argument('--lr_limit', type=float, default=0.001, help="Learning rate limit")
parser.add_argument('--noise_std', type=float, default=0.1, help='Noise std for local ES-optimization')
parser.add_argument('--noise_decay', type=float, default=0.999)
parser.add_argument('--noise_limit', type=float, default=0.01)
parser.add_argument('--batch_size', type=int, default=256, help='Batch size for ES gradient descent')
parser.add_argument('--w_decay', type=float, default=0.01, help='Weight decay penalty')
parser.add_argument('--knn', type=int, default=5, help='KNN novelty')
args = parser.parse_args()
# Resume execution -----------------------------------------------------------------------------------------------------
folder = ""
start_from = 0
pop = list()
if args.resume_from != "":
# if we load arguments, args is going to change so we need a variable to store the folder name
folder = args.resume_from
if folder != "":
pop, start_from = resume_from_folder(folder, args)
else:
prepare_folder(args) # checks if folder exist and propose to erase it
def ES_Step(theta, envs, args):
"""Local optimization by Evolution Strategy steps, rank normalization and weight decay."""
og_weights = theta.get_weights()
shared_gaussian_table = [np.random.normal(0, 1, size=len(og_weights)) for i in range(args.batch_size)]
if theta.get_opt_state() is None:
theta.set_opt_state(Configuration.optimizer.default_state())
if "t" not in theta.get_opt_state().keys():
z = theta.get_opt_state().copy()
z.update({"t": 1})
theta.set_opt_state(z)
sigma = max(args.noise_limit, args.noise_std * args.noise_decay ** theta.get_opt_state()["t"])
thetas = []
for i in range(args.batch_size):
new_theta = Configuration.agentFactory.new()
new_theta.set_weights(og_weights + sigma * shared_gaussian_table[i])
thetas.append(new_theta)
scores = list()
for E in envs:
partial_scores = Configuration.lview.map(E, thetas)
if len(scores) == 0:
scores = partial_scores.copy()
else:
for i in range(len(scores)):
scores[i] += partial_scores[i]
Configuration.budget_spent[-1] += len(thetas)
scores = np.array(scores)
self_score = 0
for E in envs:
self_score += E(theta)
self_score /= len(envs)
for i in range(len(scores)):
scores[i] -= args.w_decay * np.linalg.norm(og_weights + sigma * shared_gaussian_table[i])
scores = rank_normalize(scores)
summed_weights = np.zeros(og_weights.shape)
for i in range(len(scores)):
summed_weights += scores[i] * shared_gaussian_table[i]
grad_estimate = -(1/(len(shared_gaussian_table))) * summed_weights
step, new_state = Configuration.optimizer.step(grad_estimate, theta.get_opt_state())
new_ag = Configuration.agentFactory.new()
new_ag.set_opt_state(new_state)
new_ag.set_weights(og_weights + step)
return new_ag, self_score
def rank_normalize(arr):
asorted = arr.argsort()
linsp = np.linspace(0, 1, num=len(asorted))
res = np.zeros(len(asorted))
for i in range(len(asorted)):
res[asorted[i]] = linsp[i]
return 2*res - 1
envs = list()
default = True
if os.path.exists(args.load_env):
with open(args.load_env, "rb") as f:
envs = pickle.load(f)
default = False
# ES Algorithm ---------------------------------------------------------------------------------------------------------
if len(pop) == 0:
pop.append(Configuration.agentFactory.new())
for t in range(start_from, args.T):
print(f"Iteration {t} ...", flush=True)
Configuration.budget_spent.append(0)
if default:
envs = list()
for i in range(args.pop_env_size):
ev = Configuration.envFactory.new()
for j in range(30):
ev = ev.get_child()
envs.append(ev)
ag, sc = ES_Step(pop[0], envs, args)
pop = [ag]
# Save execution ----------------------------------------------------------------------------------
remove_previous = False
if args.save_mode == "last" and t > 0:
remove_previous = True
if args.save_mode.isdigit():
remove_previous = True
if t % int(args.save_mode) == 0:
remove_previous = False
if remove_previous:
os.remove(f'{args.save_to}/Iteration_{t - 1}.pickle')
with open(f'{args.save_to}/Iteration_{t}.pickle', 'wb') as f:
pickle.dump(pop, f)
with open(f"{args.save_to}/TotalBudget.json", 'w') as f:
budget_dic = dict()
budget_dic["Budget_per_step"] = Configuration.budget_spent
budget_dic["Total"] = sum(Configuration.budget_spent)
json.dump(budget_dic, f)
bundle = bundle_stats(pop, envs)
bundle["Fitness"] = sc
append_stats(f"{args.save_to}/Stats.json", bundle)
if args.verbose > 0:
print(f"\tExecution saved at {args.save_to}.")
if 0 < args.max_budget < sum(Configuration.budget_spent):
print(f"\nMaximum budget exceeded : {sum(Configuration.budget_spent)} > {args.max_budget}.\n")
break
| 39.712042 | 120 | 0.641793 | 1,003 | 7,585 | 4.696909 | 0.260219 | 0.034388 | 0.064954 | 0.02526 | 0.115687 | 0.064742 | 0.036086 | 0.011463 | 0 | 0 | 0 | 0.011317 | 0.172841 | 7,585 | 190 | 121 | 39.921053 | 0.73956 | 0.14766 | 0 | 0.107143 | 0 | 0 | 0.161596 | 0.02344 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.085714 | 0 | 0.114286 | 0.021429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc5fc573160c85c33e630df03ca8bf191e6e605f | 4,748 | py | Python | pypelines/task.py | Yash-Amin/pypelines | a803f1a8cada6113660532eedc5cfa76cbb38988 | [
"MIT"
] | null | null | null | pypelines/task.py | Yash-Amin/pypelines | a803f1a8cada6113660532eedc5cfa76cbb38988 | [
"MIT"
] | null | null | null | pypelines/task.py | Yash-Amin/pypelines | a803f1a8cada6113660532eedc5cfa76cbb38988 | [
"MIT"
] | null | null | null | """Abstract class for PipelineTask"""
from dataclasses import dataclass
from typing import Any, Callable, Dict, List
from pypelines import utils
from pypelines.pipeline_options import PipelineOptions
@dataclass
class TaskInputSchema:
"""Schema for task input"""
name: str
default_value: str = None
allow_parameters: bool = True
allowed_values: List[str] = None
description: str = None
# If you want to type check your inputs, set value_type in your
# task_input_schema of the task class.
# For example: if you want specific input to be integer only,
# use `value_type=int`, if you want input to be boolean only,
# use `value_type=string_to_bool`.
value_type: Callable = None
# if required is true and value is None then validation will fail
# but if required is false it will allow None value
required: bool = True
class PipelineTask:
"""Base class for pipeline task"""
# Task type
task_type: str = "Task"
# Task name
name: str = None
# Task input schema
task_input_schema: List[TaskInputSchema] = []
def __init__(
self,
# Name of the task
name: str,
# input values required for given tasks
task_input_values: Dict[str, Any],
# Pipeline Parameters
pipeline_parameters: Dict[str, Any],
# PipelineOptions
pipeline_options: PipelineOptions,
# In _extra_parameters, extra/output parameters are stored
# For example, some output parameters from previous parant task can be
# passed via _extra_parameters
_extra_parameters: Dict[str, Any],
) -> None:
self.name = name
self.pipeline_options: PipelineOptions = pipeline_options
self.task_input_values: Dict[str, Any] = task_input_values
self._pipeline_parameters: Dict[str, Any] = pipeline_parameters
self._extra_parameters: Dict[str, Any] = _extra_parameters
# Parameters, any parameter with same name in the _pipeline_parameters
# will be orverriden by _extra_parameters
self.parameters = {**self._pipeline_parameters, **self._extra_parameters}
def get_task_hash(self) -> str:
"""Return task hash.
Task hash will be used when use-snapshots is true. Tash hash will be
stored in the database to avoid re-running the task.
Override this method to provide custom task hash, for example, if you
want to use task inputs as part of the task hash.
"""
return utils.sha256_hash(self.name)
def get_parsed_inputs(self) -> Dict[str, Any]:
"""Return parsed task input values."""
unique_input_keys = set([x.name for x in self.task_input_schema])
# If invalid key is provided, then raise error
for key in self.task_input_values:
if key not in unique_input_keys:
raise ValueError(
f"{key} is not a valid input for task {self.task_type}"
)
# Store input values in a dictionary
parsed_input_values: Dict[str, Any] = {}
# Set default values for each input
for task_input in self.task_input_schema:
parsed_input_values[task_input.name] = task_input.default_value
for task_input in self.task_input_schema:
val = self.task_input_values.get(task_input.name)
if val is None:
continue
if task_input.allow_parameters:
val = utils.replace_parameters_from_anything(val, self.parameters)
if task_input.value_type is not None:
try:
val = task_input.value_type(val)
except:
raise Exception(
f"{task_input.name} is not of type {task_input.value_type}"
)
parsed_input_values[task_input.name] = val
# Validate inputs
for task_input in self.task_input_schema:
if task_input.required and parsed_input_values[task_input.name] is None:
raise ValueError(f"{task_input.name} is required but not provided")
return parsed_input_values
def validate_inputs(self) -> None:
"""Validate inputs.
Basic input validation will be performed in the get_parsed_inputs() method
but to provide more complex validation, override this method.
"""
pass
def set_task_inputs(self) -> None:
"""Set input values.
Override this method to update values of variables from input variable dictionary.
"""
pass
def run(self) -> None:
"""Run the task."""
raise NotImplementedError("Task is not implemented")
| 34.158273 | 90 | 0.643639 | 609 | 4,748 | 4.837438 | 0.231527 | 0.088595 | 0.027155 | 0.025458 | 0.182621 | 0.089613 | 0.033605 | 0.033605 | 0 | 0 | 0 | 0.000888 | 0.288753 | 4,748 | 138 | 91 | 34.405797 | 0.871484 | 0.323926 | 0 | 0.074627 | 0 | 0 | 0.059073 | 0.007507 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0.029851 | 0.059701 | 0 | 0.358209 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc602b055dd520fa32e651e28b5cde3e5b9747bd | 4,370 | py | Python | everything_at_once/model/utils/fusion_transformer.py | ninatu/everything_at_once | b4cd3a70076ea3ea2b40832aa3e2afab50495c47 | [
"BSD-3-Clause"
] | null | null | null | everything_at_once/model/utils/fusion_transformer.py | ninatu/everything_at_once | b4cd3a70076ea3ea2b40832aa3e2afab50495c47 | [
"BSD-3-Clause"
] | null | null | null | everything_at_once/model/utils/fusion_transformer.py | ninatu/everything_at_once | b4cd3a70076ea3ea2b40832aa3e2afab50495c47 | [
"BSD-3-Clause"
] | null | null | null | import collections
from timm.models.vision_transformer import _init_vit_weights, trunc_normal_
import torch.nn as nn
from functools import partial
import torch
from everything_at_once.model.utils.layers import FusionBlock
class FusionTransformer(nn.Module):
def __init__(self, embed_dim=768, depth=1, num_heads=12, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
act_layer=None,
use_cls_token=False,
):
super().__init__()
self.embed_dim = embed_dim
if use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
self.masking_token = nn.Parameter(torch.zeros(embed_dim))
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
FusionBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) # TODO: not needed, remove?
self.init_weights()
def init_weights(self):
trunc_normal_(self.masking_token, std=.02)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def forward(self, text=None, video=None, audio=None):
# concatenate tokens
data = [text, video, audio]
tokens = [x['all_tokens'] for x in data if x is not None]
tokens = torch.cat(tokens, dim=1)
# concatenate attention masks
tokens_mask = [x['attention_mask'] for x in data if x is not None]
tokens_mask = torch.cat(tokens_mask, dim=1)
# concatenate cls token
if self.cls_token is None:
offset = 0
else:
cls_token = self.cls_token.expand(tokens.shape[0], -1, -1)
tokens = torch.cat((cls_token, tokens), dim=1)
cls_token_mask = torch.ones((1, 1)).to(tokens_mask.device).expand(tokens_mask.shape[0], -1)
tokens_mask = torch.cat((cls_token_mask, tokens_mask), dim=1)
offset = 1
for block in self.blocks:
tokens = block(tokens, attention_mask=tokens_mask)
output = collections.OrderedDict()
def _get_average(tokens, attention_mask):
attention_mask = attention_mask.unsqueeze(2).expand_as(tokens)
return (tokens * attention_mask).sum(1) / attention_mask.sum(1)
if text is not None:
n_tokens = text['all_tokens'].size(1)
attention_mask = text['attention_mask']
all_tokens = tokens[:, offset:offset + n_tokens]
offset += n_tokens
output['text'] = {
"all_tokens": all_tokens,
"attention_mask": attention_mask,
}
if video is not None:
n_tokens = video['all_tokens'].size(1)
attention_mask = video['attention_mask']
all_tokens = tokens[:, offset:offset + n_tokens]
offset += n_tokens
output['video'] = {
"all_tokens": all_tokens,
"attention_mask": attention_mask,
}
if audio is not None:
n_tokens = audio['all_tokens'].size(1)
attention_mask = audio['attention_mask']
all_tokens = tokens[:, offset: offset + n_tokens]
offset += n_tokens
output['audio'] = {
"all_tokens": all_tokens,
"attention_mask": attention_mask,
}
if self.cls_token is None:
for key, value in output.items():
output[key]['embed'] = _get_average(value["all_tokens"], value['attention_mask'])
else:
modalities = list(output.keys())
modalities = '_'.join(modalities)
if modalities not in output:
output[modalities] = {}
output[modalities]['embed'] = tokens[:, 0]
return output
| 36.722689 | 107 | 0.592449 | 550 | 4,370 | 4.452727 | 0.227273 | 0.106166 | 0.0343 | 0.053083 | 0.283381 | 0.208657 | 0.160065 | 0.160065 | 0.160065 | 0.103716 | 0 | 0.012825 | 0.304119 | 4,370 | 118 | 108 | 37.033898 | 0.792502 | 0.027918 | 0 | 0.184783 | 0 | 0 | 0.051155 | 0 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.043478 | false | 0 | 0.065217 | 0 | 0.141304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc648939857f1c7569eb038a6c4f655e94e9cfe6 | 7,141 | py | Python | test/test_mains.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | 4 | 2020-09-05T00:17:27.000Z | 2022-01-25T19:44:32.000Z | test/test_mains.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | null | null | null | test/test_mains.py | sriz1/mudslide | 78aa8a1bda4080eacd777da7ff6bcbfd9afe129c | [
"MIT"
] | 6 | 2020-11-20T15:42:03.000Z | 2022-02-10T02:43:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import os
import mudslide
import mudslide.__main__
import mudslide.surface
testdir = os.path.dirname(__file__)
def print_problem(problem, file=sys.stdout):
what = problem["what"]
if what == "incorrect data":
where = problem["where"]
line1 = problem["a"]
line2 = problem["b"]
print("files differ at column: %s" % (", ".join([str(x) for x in where])), file=file)
print("< %s" % (line1.rstrip()), file=file)
print("> %s" % (line2.rstrip()), file=file)
else:
print(what, file=file)
def compare_line_by_line(f1, f2, typespec, tol=1e-3):
"""Compare two files line by line
:param f1: file like object to iterate over lines for file 1
:param f2: file like object to iterate over lines for file 2
:param types: list of f (float), d (integer), s (string)
:param tol: floating point tolerance
:returns: [ problems ]
"""
def compare(x, y, typekey):
if typekey == "f":
return abs(x-y) < tol
elif typekey == "d":
return x == y
elif typekey == "s":
return x == y
else:
raise Exception("only float, integer, and string comparisons allowed right now")
types = { "f" : float, "d" : int, "s" : str }
typelist = [ types[x] for x in typespec ]
failed = False
problems = []
for l1, l2 in zip(f1, f2):
if l1[0] == '#' and l2[0] == '#': continue
ldata = [ typ(x) for x, typ in zip(l1.split(), typelist) ]
rdata = [ typ(x) for x, typ in zip(l2.split(), typelist) ]
lineproblems = []
for i in range(len(ldata)):
if not compare(ldata[i], rdata[i], typespec[i]):
lineproblems.append(i)
if lineproblems:
problems.append( { "what" : "incorrect data", "where": lineproblems, "a": l1, "b": l2 } )
try:
next(f1) # this should throw
problems.append( { "what" : "file1 is longer than file2" } )
except StopIteration:
pass
try:
next(f2) # this should throw
problems.append( { "what" : "file2 is longer than file1" } )
except StopIteration:
pass
return problems
class TrajectoryTest(object):
samples = 1
method = "fssh"
x = -10
dt = 5
n = 1
seed = 200
o = "single"
j = 1
electronic = "exp"
def capture_traj_problems(self, k, tol, extra_options = []):
options = "-s {0:d} -m {1:s} -k {2:f} {2:f} -x {3:f} --dt {4:f} -n {5:d} -z {6:d} -o {7:s} -j {8:d} -a {9:s} --electronic {10:s}".format(self.samples, self.model, k, self.x, self.dt, self.n, self.seed, self.o, self.j, self.method, self.electronic).split()
options += extra_options
checkdir = os.path.join(testdir, "checks", self.method)
os.makedirs(checkdir, exist_ok=True)
outfile = os.path.join(checkdir, "{:s}_k{:d}.out".format(self.model, k))
with open(outfile, "w") as f:
mudslide.__main__.main(options, f)
if self.o == "single":
form = "f" * (6 + 2*self.nstate) + "df"
elif self.o == "averaged":
form = "ffff"
reffile = os.path.join(testdir, "ref", self.method, "{:s}_k{:d}.ref".format(self.model, k))
with open(reffile) as ref, open(outfile) as out:
problems = compare_line_by_line(ref, out, form, tol)
for p in problems:
print_problem(p)
return problems
class TestTSAC(unittest.TestCase, TrajectoryTest):
"""Test Suite for tully simple avoided crossing"""
model = "simple"
nstate = 2
def test_tsac(self):
for k in [8, 14, 20]:
with self.subTest(k=k):
probs = self.capture_traj_problems(k, 1e-3)
self.assertEqual(len(probs), 0)
class TestDual(unittest.TestCase, TrajectoryTest):
"""Test Suite for tully dual avoided crossing"""
model = "dual"
nstate = 2
def test_dual(self):
for k in [20, 50, 100]:
with self.subTest(k=k):
probs = self.capture_traj_problems(k, 1e-3)
self.assertEqual(len(probs), 0)
class TestExtended(unittest.TestCase, TrajectoryTest):
"""Test Suite for tully dual avoided crossing"""
model = "extended"
nstate = 2
def test_extended(self):
for k in [10, 15, 20]:
with self.subTest(k=k):
probs = self.capture_traj_problems(k, 1e-3)
self.assertEqual(len(probs), 0)
class TestTSACc(unittest.TestCase, TrajectoryTest):
"""Test Suite for tully simple avoided crossing with cumulative hopping"""
model = "simple"
nstate = 2
seed = 756396545
method = "cumulative-sh"
electronic = "linear-rk4"
def test_tsac_c(self):
for k in [10, 20]:
with self.subTest(k=k):
probs = self.capture_traj_problems(k, 1e-3)
self.assertEqual(len(probs), 0)
class TestEhrenfest(unittest.TestCase, TrajectoryTest):
"""Test suite for ehrenfest trajectory"""
model = "simple"
nstate = 2
method = "ehrenfest"
def test_ehrenfest(self):
k = 15
probs = self.capture_traj_problems(k, 1e-3)
self.assertEqual(len(probs), 0)
class TestES(unittest.TestCase, TrajectoryTest):
"""Test Suite for tully simple avoided crossing with cumulative hopping"""
model = "simple"
nstate = 2
dt = 20
seed = 84329
method = "even-sampling"
o = "averaged"
def test_es_tsac(self):
for k in [10, 20]:
with self.subTest(k=k):
probs = self.capture_traj_problems(k, 1e-3, extra_options=["--sample-stack", "5"])
self.assertEqual(len(probs), 0)
class TestSurface(unittest.TestCase):
"""Test Suite for surface writer"""
def test_surface(self):
tol = 1e-3
for m in [ "simple", "extended", "dual", "super", "shin-metiu", "modelx", "models", "vibronic" ]:
with self.subTest(m=m):
if m in ["vibronic"]:
options = "-m {:s} --x0 0 0 0 0 0 -s 2 -r -5 5".format(m).split()
else:
options = "-m {:s} -r -11 11 -n 200".format(m).split()
checkdir = os.path.join(testdir, "checks", "surface")
os.makedirs(checkdir, exist_ok=True)
outfile = os.path.join(checkdir, "{:s}.out".format(m))
with open(outfile, "w") as f:
mudslide.surface.main(options, f)
form = "f" * (8 if m in ["simple", "extended", "dual"] else 13)
if m in ["vibronic"]:
form = "f" * 20
reffile = os.path.join(testdir, "ref", "surface", "{:s}.ref".format(m))
with open(reffile) as ref, open(outfile) as out:
problems = compare_line_by_line(ref, out, form, tol)
for p in problems:
print_problem(p)
self.assertEqual(len(problems), 0)
if __name__ == '__main__':
unittest.main()
| 33.060185 | 263 | 0.558325 | 933 | 7,141 | 4.20686 | 0.237942 | 0.006115 | 0.033885 | 0.051975 | 0.435924 | 0.41758 | 0.344968 | 0.323057 | 0.323057 | 0.303185 | 0 | 0.029854 | 0.301078 | 7,141 | 215 | 264 | 33.213953 | 0.756562 | 0.095925 | 0 | 0.341615 | 0 | 0.006211 | 0.110224 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.068323 | false | 0.012422 | 0.037267 | 0 | 0.36646 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc64de93761e9199618480ad37464a2e517cd6f9 | 4,076 | py | Python | velocityhelper/delta.py | adriangrepo/velocity_modelling | 71e11675e225df7fad80543c8e8a0bfbc01a7322 | [
"Unlicense"
] | 2 | 2019-10-04T13:55:37.000Z | 2020-06-28T05:32:52.000Z | velocityhelper/delta.py | adriangrepo/velocity_modelling | 71e11675e225df7fad80543c8e8a0bfbc01a7322 | [
"Unlicense"
] | null | null | null | velocityhelper/delta.py | adriangrepo/velocity_modelling | 71e11675e225df7fad80543c8e8a0bfbc01a7322 | [
"Unlicense"
] | 1 | 2020-07-02T13:21:48.000Z | 2020-07-02T13:21:48.000Z |
from velocityhelper.api.deltamodel import DeltaModel
from velocityhelper.api.dataio import DataIO
from settings import DELTACALCSPATH
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Delta(object):
def __init__(self):
self.gridPath = DELTACALCSPATH+"20150514_Grids_TWT.csv"
self.wellTopPath = DELTACALCSPATH+"20150514_Lithostrat_WellTopData.csv"
self.gridDf = None
self.wellTopDf = None
self.negativeZ = False
def calcDifferences(self):
dataIO = DataIO()
self.wellTopDf = dataIO.readCSVZeroIndex(self.wellTopPath)
wellTopDict = dataIO.getData(self.wellTopDf, self.negativeZ)
self.gridDf = dataIO.readCSVZeroIndex(self.gridPath)
gridDict = dataIO.getData(self.gridDf, self.negativeZ)
deltaList = []
for gridList in gridDict.values():
for gridModel in gridList:
for topList in wellTopDict.values():
for topModel in topList:
if gridModel.well == topModel.well:
if gridModel.surfaceName == topModel.surfaceName:
deltaModel = DeltaModel()
deltaModel.well = gridModel.well
deltaModel.surfaceName = gridModel.surfaceName
deltaModel.gridTwt = gridModel.twtAuto
deltaModel.wellTwt = topModel.twtAuto
deltaModel.gridZ = gridModel.z
deltaModel.wellZ = topModel.z
deltaModel.deltaTWT = gridModel.twtAuto - topModel.twtAuto
deltaModel.deltaZ = ((-1)*gridModel.z) - ((-1)*topModel.z)
deltaList.append(deltaModel.getDataList())
if len(deltaList)>0:
deltaList.insert(0, DeltaModel.HEADERS)
dataIO.writeIsoModels(deltaList, DELTACALCSPATH, "DeltaCalcs", False)
else:
logger.debug("No matching surfaces found")
'''
def writeResults(self, results, appendFlag):
dataIO = DataIO()
result = IsoModel()
resultsCSV = result.getResultsCSV(results)
dataIO.writeCSV(resultsCSV, self.filePath+results[0].calcFunction+"_calc.csv", appendFlag)
first = False
def calcLoop(self, readWb, data, functionList, domain):
first = True
for function in functionList:
result = IsoModel()
results = readWb.calcDifference(data, function)
resultsCSV = result.getResultsCSV(results)
if first:
appendFlag=False
else:
appendFlag=True
readWb.writeCSV(resultsCSV, DELTACALCSPATH+"Deltas_Output.csv", appendFlag)
first = False
'''
'''
def runFunctions(self):
dataIO = DataIO()
functionsDf = dataIO.readCSVZeroIndex(self.functionsPath)
functionList = dataIO.functionReader(functionsDf)
calculations = Calculations()
first = True
for function in functionList:
if (Function.ISOPACH == function.operation.lower()) or (Function.ISOCHRON == function.operation.lower()):
if self.markersDf == None:
self.markersDf = dataIO.readCSVZeroIndex(self.isoCalcsMarkersPath)
results = calculations.doIsoCalculations(function, self.markersDf)
elif Function.VINT == function.operation.lower():
if self.deltaTopDf == None:
self.deltaTopDf = dataIO.readCSVZeroIndex(self.deltaWellTopPath)
if self.deltaBaseDf == None:
self.deltaBaseDf = dataIO.readCSVZeroIndex(self.deltaBaseDf)
results = calculations.doVintCalculations(function, self.deltaWellTopPath, self.deltaBaseDf)
self.writeResult(results)
'''
if __name__ == '__main__':
delta = Delta()
delta.calcDifferences()
| 39.192308 | 117 | 0.599853 | 343 | 4,076 | 7.06414 | 0.335277 | 0.054478 | 0.064383 | 0.029715 | 0.072637 | 0.028064 | 0 | 0 | 0 | 0 | 0 | 0.007524 | 0.31526 | 4,076 | 103 | 118 | 39.572816 | 0.860623 | 0 | 0 | 0 | 0 | 0 | 0.045537 | 0.025699 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc65716756447d035330c8c7078e88ce91d0d4c7 | 2,269 | py | Python | MyLib/my_digital_pin.py | NSE-labs/ESP8266-wifi-devices | d445c669c8a52ef9dd61085e1bedc61bfe6c6c3e | [
"MIT"
] | null | null | null | MyLib/my_digital_pin.py | NSE-labs/ESP8266-wifi-devices | d445c669c8a52ef9dd61085e1bedc61bfe6c6c3e | [
"MIT"
] | null | null | null | MyLib/my_digital_pin.py | NSE-labs/ESP8266-wifi-devices | d445c669c8a52ef9dd61085e1bedc61bfe6c6c3e | [
"MIT"
] | null | null | null | import machine
ARRAYSIZE = 20
class PinToWatch:
def __init__(self, pin_number, pull_up=False):
self.buffer = bytearray(ARRAYSIZE)
self.copy = bytearray(ARRAYSIZE)
self.index = 0
if pull_up:
self.pin = machine.Pin(pin_number, machine.Pin.IN,
machine.Pin.PULL_UP)
else:
self.pin = machine.Pin(pin_number, machine.Pin.IN)
# pretend the pin changed to publish the current value
self.pin_change(self.pin)
self.pin.irq(trigger=machine.Pin.IRQ_RISING | machine.Pin.IRQ_FALLING,
handler=self.pin_change)
def pin_change(self, pin):
irq_state = machine.disable_irq() # interrupts off
self.buffer[self.index] = pin.value()
self.index += 1
if self.index >= ARRAYSIZE:
self.index = ARRAYSIZE - 1
print('Buffer overflow in MyDigitalPin')
machine.enable_irq(irq_state) # interrupts back on
def check_pin(self, broker, topic, invert=False):
irq_state = machine.disable_irq() # interrupts off
i = self.index
for x in range(i):
if invert:
self.copy[x] = 1 - self.buffer[x]
else:
self.copy[x] = self.buffer[x]
self.index = 0
machine.enable_irq(irq_state) # interrupts back on
for x in range(i):
broker.publish(topic, b'{}'.format(self.copy[x]))
def publish_pin(self, broker, topic, invert=False):
""" publish pin state regardless of whether it has changed """
pin_state = self.pin.value()
if invert:
pin_state = 1 - pin_state
broker.publish(topic, b'{}'.format(pin_state))
class PinToSample:
def __init__(self, pin_number, pull_up=False):
if pull_up:
self.pin = machine.Pin(pin_number, machine.Pin.IN,
machine.Pin.PULL_UP)
else:
self.pin = machine.Pin(pin_number, machine.Pin.IN)
def publish_pin(self, broker, topic, invert=False):
pin_state = self.pin.value()
if invert:
pin_state = 1 - pin_state
broker.publish(topic, b'{}'.format(pin_state))
| 35.453125 | 78 | 0.575143 | 285 | 2,269 | 4.424561 | 0.217544 | 0.072165 | 0.044409 | 0.053925 | 0.58525 | 0.546392 | 0.523394 | 0.463125 | 0.28866 | 0.28866 | 0 | 0.005844 | 0.321287 | 2,269 | 63 | 79 | 36.015873 | 0.812987 | 0.078008 | 0 | 0.615385 | 0 | 0 | 0.01778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.019231 | 0 | 0.173077 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc66e4b99386d65bbe636360f3f9615d3f54842b | 2,479 | py | Python | jupyterlab_pyflyby/pyflyby_handler.py | Carreau/jupyterlab-pyflyby | 19887fe0d5202eb6d197bdfe783e8b238ff8813e | [
"BSD-3-Clause"
] | null | null | null | jupyterlab_pyflyby/pyflyby_handler.py | Carreau/jupyterlab-pyflyby | 19887fe0d5202eb6d197bdfe783e8b238ff8813e | [
"BSD-3-Clause"
] | null | null | null | jupyterlab_pyflyby/pyflyby_handler.py | Carreau/jupyterlab-pyflyby | 19887fe0d5202eb6d197bdfe783e8b238ff8813e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division
import json
from notebook.base.handlers import IPythonHandler
import os
import subprocess
class PyflybyStatus(IPythonHandler):
"""
Checks if pyflyby is loaded by default in ipython session
Return {"status": "loaded"} if included by default, else {"status": "not-loaded"}
"""
def get(self):
from IPython.terminal.ipapp import load_default_config
extensions = load_default_config().InteractiveShellApp.extensions.to_dict()
if any(["pyflyby" in val for val in extensions.values()]):
self.finish({"status": "loaded"})
else:
self.finish({"status": "not-loaded"})
class InstallPyflyby(IPythonHandler):
"""
Adds pyflyby to ipython extensions, to be included default everytime ipython is launched
"""
def post(self):
try:
subprocess.run(["py", "pyflyby.install_in_ipython_config_file"])
self.finish({"result": "Installed pyflyby successfully"})
except Exception as err:
self.send_error({"result": "Pyflyby installation failed - {}".format(err)})
class DisablePyflybyClient(IPythonHandler):
"""
Disables jupyterlab-pyflyby labextension for user
"""
def post(self):
try:
settings_dir = os.environ.get(
"JUPYTERLAB_SETTINGS_DIR",
os.path.join(os.environ.get("HOME"), ".jupyter/lab/user-settings"),
)
pyflyby_settings_file = os.path.join(
settings_dir, "@deshaw/jupyterlab-pyflyby/plugin.jupyterlab-settings"
)
installDialogDisplayed = (
True if self.get_body_argument("installDialogDisplayed") == "true" else False
)
settings = {"enabled": False}
# To remember dialog box to install pyflyby ipython extension was displayed for current user
settings["installDialogDisplayed"] = installDialogDisplayed
if os.path.exists(pyflyby_settings_file):
with open(pyflyby_settings_file, "r") as f:
settings = {**json.load(f), **settings}
with open(pyflyby_settings_file, "w") as f:
json.dump(settings, f, indent=4)
self.finish({"result": "Disabled pyflyby extension successfully"})
except Exception as err:
self.send_error({"result": "Could not disable pyflyby extension - {}".format(err)})
| 35.927536 | 104 | 0.630093 | 262 | 2,479 | 5.847328 | 0.412214 | 0.02611 | 0.049608 | 0.018277 | 0.101828 | 0.06658 | 0.06658 | 0.06658 | 0.06658 | 0 | 0 | 0.000548 | 0.263413 | 2,479 | 68 | 105 | 36.455882 | 0.838445 | 0.149254 | 0 | 0.139535 | 0 | 0 | 0.195726 | 0.089364 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.139535 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc670ac7174f08d106aa42c103460a0145f1d466 | 1,621 | py | Python | invenio_communities/records/records/models.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | invenio_communities/records/records/models.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | invenio_communities/records/records/models.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-Communities is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Abstract database model for modelling community/record relationships."""
from invenio_db import db
from invenio_requests.records.models import RequestMetadata
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils.types import UUIDType
from ...communities.records.models import CommunityMetadata
class CommunityRelationMixin:
"""Model mixin to define a relationship between a communities and records.
Usage:
.. code-block:: python
class CommunityRecordM2M(db.Model, CommunityRelationMixin):
__record_model__ = MyParentRecord
"""
__record_model__ = None
__request_model__ = None
@declared_attr
def community_id(cls):
"""Foreign key to the related communithy."""
return db.Column(
UUIDType,
db.ForeignKey(CommunityMetadata.id, ondelete="CASCADE"),
primary_key=True,
)
@declared_attr
def record_id(cls):
"""Foreign key to the related record."""
return db.Column(
UUIDType,
db.ForeignKey(cls.__record_model__.id, ondelete="CASCADE"),
primary_key=True,
)
@declared_attr
def request_id(cls):
"""Foreign key to a related request."""
return db.Column(
UUIDType,
db.ForeignKey(RequestMetadata.id, ondelete="SET NULL"),
nullable=True,
)
| 27.474576 | 78 | 0.662554 | 180 | 1,621 | 5.788889 | 0.477778 | 0.046065 | 0.043186 | 0.043186 | 0.254319 | 0.238004 | 0.140115 | 0.088292 | 0.088292 | 0 | 0 | 0.004934 | 0.249846 | 1,621 | 58 | 79 | 27.948276 | 0.851974 | 0.361505 | 0 | 0.37931 | 0 | 0 | 0.022403 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.172414 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc6898bc736e3680304bdc19fa25fd6153e8ddad | 1,214 | py | Python | guides/python/pysample/subproc_tcp/parent_server.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | guides/python/pysample/subproc_tcp/parent_server.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | guides/python/pysample/subproc_tcp/parent_server.py | ToraNova/library | 20b321302868e8c2ce8723c808aa9e7a313e2cb8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import socket
import sys
import subprocess
# Bind the socket to the port
server_address = ('localhost', 10000)
# Listen for incoming connections
if __name__ == "__main__":
print('starting up on %s port %s' % server_address)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a TCP/IP socket
sock.bind(server_address)
sock.listen(1) #listen for one
while True:
try:
print('waiting for a connection') # Wait for a connection
connection, client_address = sock.accept()
print('connection from', client_address)
data = connection.recv(16) # Receive the data in small chunks and retransmit it
connection.sendall(data)
data = data.decode("utf-8")
#processing is done here
childproc = subprocess.Popen(['./child.py',data],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = childproc.communicate(timeout=5)
print("STDOUT:",out,"/ STDERR:",err)
except Exception as e:
print("Exception has occurred:",str(e))
out,err = childproc.communicate()
print("STDOUT:",out,"/ STDERR:",err)
finally:
# Clean up the connection
connection.close()
finally:
sock.close() #shutdowns and deallocate the socket
| 31.128205 | 100 | 0.705107 | 164 | 1,214 | 5.128049 | 0.536585 | 0.046373 | 0.033294 | 0.061831 | 0.054697 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010967 | 0.173806 | 1,214 | 38 | 101 | 31.947368 | 0.827517 | 0.221582 | 0 | 0.206897 | 0 | 0 | 0.161497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.103448 | 0.206897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc6a56899bbf13b1b2af99495656645d4e8e5c21 | 220 | py | Python | webcam_video_capture.py | Programista3/Python-OpenCV-Examples | 73081d3a107a0f55285466a0dc9eac6605e69414 | [
"BSD-3-Clause"
] | null | null | null | webcam_video_capture.py | Programista3/Python-OpenCV-Examples | 73081d3a107a0f55285466a0dc9eac6605e69414 | [
"BSD-3-Clause"
] | null | null | null | webcam_video_capture.py | Programista3/Python-OpenCV-Examples | 73081d3a107a0f55285466a0dc9eac6605e69414 | [
"BSD-3-Clause"
] | null | null | null | import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
cv2.imshow('WebCam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 18.333333 | 41 | 0.604545 | 30 | 220 | 4.433333 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047904 | 0.240909 | 220 | 12 | 42 | 18.333333 | 0.748503 | 0 | 0 | 0 | 0 | 0 | 0.031674 | 0 | 0 | 0 | 0.0181 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc6d0754a896ceebfd7161e4b326a249bb03c2bc | 1,208 | py | Python | src/models/train_model.py | zarak/domain_focused_language_model | c2906f7e02cafd40e48c23d51cffb0817a465298 | [
"MIT"
] | null | null | null | src/models/train_model.py | zarak/domain_focused_language_model | c2906f7e02cafd40e48c23d51cffb0817a465298 | [
"MIT"
] | 4 | 2020-03-31T11:14:37.000Z | 2021-08-23T20:38:21.000Z | src/models/train_model.py | zarak/domain_focused_language_model | c2906f7e02cafd40e48c23d51cffb0817a465298 | [
"MIT"
] | null | null | null | import pathlib
import pickle
from datetime import datetime
import pandas as pd
from utils import count_ngrams, create_model
PROCESSED_DATA_DIR = pathlib.Path('../data/processed/')
def read_files():
so = pd.read_csv(PROCESSED_DATA_DIR / 'tokenized.csv')
so = so.loc[so.text.dropna().index]
return so
def train_test_split(so, sample_size=None, random_state=0):
train = so.query("category != 'title'")
test = so.query("category == 'title'")
if sample_size:
train = train.sample(sample_size, random_state=random_state)
test = test.sample(int(sample_size * 0.2), random_state=random_state)
return train, test
def fit(train, n=3, save_model=False):
vocab_set = set(' '.join(train.text.tolist()))
counts = count_ngrams(train.text.tolist(), n)
model = create_model(counts, len(vocab_set))
if save_model:
print("Saving model as pickle file")
timestamp = datetime.now()
pickle.dump(model, open(f"model_n{n}_{timestamp}.p", "wb"))
return model, counts
def main():
DATASET_SIZE = 1000
so = read_files()
train, test = train_test_split(so, DATASET_SIZE)
# fit(train)
if __name__ == "__main__":
main()
| 25.702128 | 77 | 0.675497 | 172 | 1,208 | 4.511628 | 0.395349 | 0.070876 | 0.041237 | 0.041237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008214 | 0.193709 | 1,208 | 46 | 78 | 26.26087 | 0.788501 | 0.008278 | 0 | 0 | 0 | 0 | 0.109532 | 0.020067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.15625 | 0 | 0.375 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc6d891f3fec625a891432fb90b976885a223e6c | 2,584 | py | Python | solutions/alice_in_wonderland/the_rows_of_cakes.py | roman-kachanovsky/checkio | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | 1 | 2017-02-07T19:50:52.000Z | 2017-02-07T19:50:52.000Z | solutions/alice_in_wonderland/the_rows_of_cakes.py | roman-kachanovsky/checkio-python | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | null | null | null | solutions/alice_in_wonderland/the_rows_of_cakes.py | roman-kachanovsky/checkio-python | 3134cbc04ed56e92006d1e2f09d7365e900953db | [
"BSD-3-Clause"
] | null | null | null | """ --- The Rows of Cakes --- Challenging
Someone has decided to bake a load of cakes and place them
on the floor. Our robots can't help but try to find a pattern
behind the cakes' disposition. Some cakes form rows, we want
to count these rows. A row is a sequence of three or more cakes
if we can draw a straight line through its centers. The greater
row takes up the smaller rows. So if we have a row with 4 cakes,
then we have only one row (not 4 by 3).
The cake locations are represented as a list of coordinates.
A coordinate is a list of two integers. You should count the rows.
Input: Coordinates as a list of lists with two integers.
Output: The quantity of rows as an integer.
How it is used: This is an example of the image and pattern
recognition. This concept can be useful for
the game mechanics or if you want to write a bot
for games, or when transposing printed text
to a digital format.
Precondition: 0 < |coordinates| < 20
coordinates: 0 <= x, y <= 10
"""
class Row(object):
def __init__(self, row):
self.p1 = row[0]
self.p2 = row[1]
def my_solution(cakes):
from itertools import combinations
from math import sqrt
def is_between(a, c, b):
def distance(m, n):
return sqrt((m[0] - n[0]) ** 2 + (m[1] - n[1]) ** 2)
return round(distance(a, c) + distance(c, b), 2) == round(distance(a, b), 2)
rows = {Row(r): 0 for r in combinations(cakes, 2)}
# Find the rows which contain more than 2 points
for k in rows.keys():
for cake in cakes:
if cake not in [k.p1, k.p2] and is_between(k.p1, cake, k.p2):
rows[k] += 1
# Drop all excess rows
rows = [k for k in rows.keys() if rows[k]]
# Find fully immersed rows
immersed_rows = []
for a in rows:
for b in rows:
if a != b and is_between(b.p1, a.p1, b.p2) and is_between(b.p1, a.p2, b.p2):
if a not in immersed_rows:
immersed_rows.append(a)
return len(rows) - len(immersed_rows)
def nickie_solution(cakes):
from itertools import combinations
def L(x, y, z): # Checks if three points are colinear
return (y[0] - x[0]) * (z[1] - x[1]) == (y[1] - x[1]) * (z[0] - x[0])
rows = set()
for p, q in combinations(cakes, 2):
colinear = frozenset(tuple(r) for r in cakes if L(p, q, r))
if len(colinear) > 2:
rows.add(colinear)
return len(rows)
| 34 | 88 | 0.596362 | 427 | 2,584 | 3.576112 | 0.370023 | 0.039293 | 0.013752 | 0.011788 | 0.096922 | 0.078585 | 0 | 0 | 0 | 0 | 0 | 0.025112 | 0.306502 | 2,584 | 75 | 89 | 34.453333 | 0.827009 | 0.477167 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.088235 | 0.058824 | 0.441176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc70302b5aa182506c2b23050cadcf676234af2c | 954 | py | Python | lib/pre_filter.py | kdrkdrkdr/UserDictionaryForPapago | df243c949a50fea566eae0f4056170ea9a92b70f | [
"MIT"
] | 1 | 2022-03-28T14:02:54.000Z | 2022-03-28T14:02:54.000Z | lib/pre_filter.py | kdrkdrkdr/UserDict4Papago | df243c949a50fea566eae0f4056170ea9a92b70f | [
"MIT"
] | null | null | null | lib/pre_filter.py | kdrkdrkdr/UserDict4Papago | df243c949a50fea566eae0f4056170ea9a92b70f | [
"MIT"
] | null | null | null | import MeCab
from lib.util import ReplaceText
from lib.convert_dict import ConvertDictionary
class PreFilter:
def __init__(self, text: str, dictList: dict):
self.mecab = MeCab.Tagger()
self.text = text
self.dictList = dictList
self.c = ConvertDictionary()
def pre_process(self):
sep_nl = '∮'
self.text = ReplaceText(
self.text, {
'\r':'',
'\n':sep_nl,
' ':'',
'「':' "',
'」':'" '
}
)
a = self.mecab.parse(self.text).split()[:-1]
surface = a[0::2]
pos = a[1::2]
b = [(surface[i], i) for i, p in enumerate(pos) if ('固有名詞' in p) and (surface[i] in self.dictList)]
for sur, idx in b:
surface[idx] = f'^{self.c._ko2kata(self.dictList[sur])}'
pre = ''.join(surface).replace(sep_nl, '\n')
return (pre, b)
| 28.058824 | 107 | 0.481132 | 113 | 954 | 4 | 0.460177 | 0.088496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009901 | 0.36478 | 954 | 34 | 108 | 28.058824 | 0.731023 | 0 | 0 | 0 | 0 | 0 | 0.058639 | 0.039791 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc759a409824341db10f9477a7f9f77d8fb37dc1 | 11,715 | py | Python | composr/composr.py | asifr/composr | 7c8d7d312a8f2f06abb0f1c5a019626d2e0444e0 | [
"MIT"
] | null | null | null | composr/composr.py | asifr/composr | 7c8d7d312a8f2f06abb0f1c5a019626d2e0444e0 | [
"MIT"
] | null | null | null | composr/composr.py | asifr/composr | 7c8d7d312a8f2f06abb0f1c5a019626d2e0444e0 | [
"MIT"
] | null | null | null | import os
import io
import random
import string
import typing as t
import json
from jinja2 import Environment, PackageLoader, select_autoescape, contextfilter
from markdown import Markdown
# Markdown extensions
extensions = [
"markdown.extensions.fenced_code",
"markdown.extensions.footnotes",
"markdown.extensions.tables",
"markdown.extensions.codehilite",
"markdown_katex",
]
@contextfilter
def call_macro_by_name(context, macro_name, *args, **kwargs):
return context.vars[macro_name](*args, **kwargs)
def generate_key(N=6, prefix="co-") -> str:
return prefix + "".join(
random.SystemRandom().choice(
string.ascii_lowercase + string.ascii_uppercase + string.digits
)
for _ in range(N)
)
def fwrite(file_path: str, data: str):
"""Write data to file"""
with io.open(file_path, "w", encoding="utf8") as f:
f.write(data)
def fread(file_path):
"""Read data from file"""
with io.open(file_path, "r", encoding="utf8") as f:
return f.read()
def mpl_svg(fig) -> str:
"""Return the SVG string of a matplotlib figure.
Parameters
----------
fig : Figure
Matplotlib figure
Returns
-------
str
Figure SVG
"""
f = io.BytesIO()
fig.savefig(f, format="svg", bbox_inches="tight")
svg = str(f.getvalue().decode("utf-8"))
svg = "\n".join(svg.split("\n")[3:])
return svg
def mpl_png(fig) -> str:
"""Return the base64 encoded png string of a matplotlib figure.
Parameters
----------
fig : Figure
Matplotlib figure
Returns
-------
str
Figure base64 encoding
"""
import base64
f = io.BytesIO()
fig.savefig(f, format="png", bbox_inches="tight")
f.seek(0)
b = base64.b64encode(f.getvalue()).decode("utf-8").replace("\n", "")
return '<img class="mpl-figure-png" align="center" src="data:image/png;base64,%s">' % b
class Composr:
"""The composr object loads the templates, macros, extensions, and acts as
the central object for adding components and saving documents. Once it is
created it will act as the central repository for components, figure and
table numbers and much more."""
def __init__(self, basic: t.Optional[bool] = False):
self.env = Environment(
loader=PackageLoader("composr"), autoescape=select_autoescape()
)
# call macro in template using the macro name: {{name | macro(params)}}
self.env.filters["macro"] = call_macro_by_name
self.mdprocessor = Markdown(
extensions=extensions,
extension_configs={
"markdown_katex": {
"insert_fonts_css": True,
},
},
)
# global parameters for layout.html template
self.tpl_params: t.Mapping[str, t.Any] = {
"basic": basic,
"width": 980
}
# collection of components
self.components_: t.Sequence[t.Mapping[str, t.Any]] = []
# figure and table numbering
self.figure_number_: int = 0
self.table_number_: int = 0
def add_title(self, text: str):
"""Add a page title"""
self.tpl_params["title"] = text
def add_heading(self, text: str):
"""Add a H1 HTML heading"""
self.append_component("heading", value=text)
def add_subheading(self, text: str):
"""Add a H2 HTML heading"""
self.append_component("subheading", value=text)
def add_markdown(self, text: str):
"""Add markdown formatted text."""
self.append_component("markdown", value=self.mdprocessor.convert(text))
def add_markdown_file(self, file_path: str):
"""Add markdown formatted text from a file"""
assert os.path.isfile(file_path), f"{file_path} does not exist"
text = fread(file_path)
self.append_component("markdown", value=self.mdprocessor.convert(text))
def add_html(self, text: str):
"""Add raw HTML"""
self.append_component("html", value=text)
def add_text(self, text: str):
"""Add plain unformatted text"""
self.append_component("text", value=text)
def add_dataframe(
self, df, caption: t.Optional[str] = None, max_rows: t.Optional[int] = 1000
):
"""Add a Pandas dataframe as an HTML table"""
self.table_number_ += 1
try:
df = df.iloc[:max_rows]
except:
pass
self.append_component(
"dataframe",
value=df.to_html(),
caption=caption,
table_number=self.table_number_,
)
def add_tabulator(
self,
df,
caption: t.Optional[str] = None,
rows_per_page: t.Optional[int] = 20,
max_rows: t.Optional[int] = 1000,
height: t.Optional[int] = 300,
):
"""Add a pandas dataframe as a paginated table"""
# set the global tabulator variable to load the javascript and css
self.tpl_params["tabulator"] = True
self.table_number_ += 1
self.append_component(
"tabulator",
value=df.iloc[:max_rows].to_dict(orient="records"),
caption=caption,
table_number=self.table_number_,
rows_per_page=rows_per_page,
height=height,
)
def add_link(self, text: str, url: str):
"""Add a link"""
self.append_component("link", value=text, url=url)
def add_docstring(self, fun: t.Callable):
"""Add the preformatted docstring from a function or module"""
import inspect
source = inspect.cleandoc(fun.__doc__)
value = f"```text\n{source}\n```"
value = self.mdprocessor.convert(value)
self.append_component("docstring", value=value, name=fun.__name__)
def add_markdown_docstring(self, fun: t.Callable):
"""Add the markdown formatted docstring from a function or module"""
import inspect
self.append_component(
"docstring",
value=self.mdprocessor.convert(inspect.cleandoc(fun.__doc__)),
name=fun.__name__,
)
def add_comments(self, fun: t.Callable):
"""Add the markdown formatted comments from a function or module"""
import inspect
text = inspect.getcomments(fun)
# remove comment token
text = inspect.cleandoc(
"\n".join([line.lstrip("#") for line in text.split("\n")])
)
self.append_component(
"docstring", value=self.mdprocessor.convert(text), name=fun.__name__
)
def add_details(self, text: str, title: t.Optional[str] = None):
"""Add details and summary"""
self.append_component("details", value=text, title=title)
def add_patient(self, df, columns: t.List[str]=[], rows: t.List[str]=[]):
"""Add patient tables"""
vitals = df.astype(str).to_dict()
self.append_component("patient", vitals=vitals, columns=columns, rows=rows)
def add_tip_aside(self, text: str, title: t.Optional[str] = None):
"""Add an markdown formatted tip aside"""
self.append_component("tip", value=self.mdprocessor.convert(text), title=title)
def add_important_aside(self, text: str, title: t.Optional[str] = None):
"""Add an markdown formatted important aside"""
self.append_component("important", value=self.mdprocessor.convert(text), title=title)
def add_sourcecode(self, fun, lang: t.Optional[str] = "python", hidden=False):
"""Add source code from a function or module"""
import inspect
source = inspect.getsource(fun)
value = self.mdprocessor.convert(f"```{lang}\n{source}\n```")
self.append_component(
"sourcecode", value=value, name=fun.__name__, hidden=hidden
)
def add_plotly(
self,
fig,
caption: t.Optional[str] = None,
width: t.Optional[int] = 800,
height: t.Optional[int] = 600,
):
"""Embed a plotly figure"""
from plotly.io import to_json
# set the global plotly variable to load the javascript and css
self.tpl_params["plotly"] = True
self.figure_number_ += 1
ps = json.dumps(to_json(fig))
self.append_component(
"plotly",
value=ps,
width=width,
height=height,
caption=caption,
figure_number=self.figure_number_,
)
def add_svg(self, fig, caption: t.Optional[str]=None):
"""Add matplotlib figure as embedded SVG"""
svg = mpl_svg(fig)
self.figure_number_ += 1
self.append_component(
"svg", value=svg, caption=caption, figure_number=self.figure_number_
)
def add_png(self, fig, caption: t.Optional[str]=None):
"""Add matplotlib figure as base64 encoded PNG"""
png = mpl_png(fig)
self.figure_number_ += 1
self.append_component(
"png", value=png, caption=caption, figure_number=self.figure_number_
)
def add_json(self, data):
"""Show a dictionary or sequence in a JSON viewer"""
self.tpl_params["jquery"] = True
self.append_component("json", value=json.dumps(data))
def insert_custom_css_file(self, file_path: str):
"""Add custom css from a file"""
assert os.path.isfile(file_path), f"{file_path} does not exist"
css = fread(file_path)
self.tpl_params["custom_css"] = css
def replace_default_css_file(self, file_path: str):
"""Replace the default CSS from a file"""
assert os.path.isfile(file_path), f"{file_path} does not exist"
css = fread(file_path)
self.tpl_params["css"] = css
def replace_codehilite_css_file(self, file_path: str):
"""Replace the default codehilite CSS from a file"""
assert os.path.isfile(file_path), f"{file_path} does not exist"
css = fread(file_path)
self.tpl_params["codehilite_css"] = css
def append_component(self, type: str, **kwargs):
"""Add a new component"""
params = {"type": type, "id": generate_key()}
assert "type" not in kwargs, "type is a reserved component template variable"
assert "id" not in kwargs, "id is a reserved component template variable"
params.update(kwargs)
self.components_.append(params)
def generate_html(self) -> str:
"""Generate HTML"""
template = self.env.get_template("layout.html")
html = template.render(components=self.components_, **self.tpl_params)
return html
def save_html(self, file_path: str):
"""Save generated document as HTML file"""
assert file_path.endswith(".html"), "file_path must have a .html extension"
assert (
len(self.components_) > 0
), "components list is empty, add components before saving"
html = self.generate_html()
print("Creating HTML...")
fwrite(file_path, html)
def save_pdf(self, file_path: str):
"""Save generated document as a PDF"""
assert file_path.endswith(".pdf"), "file_path must have a .pdf extension"
assert (
len(self.components_) > 0
), "components list is empty, add components before saving"
import pdfkit
html = self.generate_html()
print("Creating PDF...")
pdfkit.from_string(html, file_path)
def display_notebook(self):
"""Display generated HTML in a Jupyter notebook"""
from IPython.core.display import display, HTML
html = self.generate_html()
display(HTML(html)) | 33.186969 | 93 | 0.610414 | 1,452 | 11,715 | 4.780992 | 0.192149 | 0.032267 | 0.057476 | 0.031115 | 0.423077 | 0.357246 | 0.317632 | 0.274993 | 0.218237 | 0.154134 | 0 | 0.006407 | 0.267179 | 11,715 | 353 | 94 | 33.186969 | 0.802213 | 0.166197 | 0 | 0.221739 | 0 | 0.004348 | 0.105053 | 0.022632 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.16087 | false | 0.004348 | 0.078261 | 0.008696 | 0.269565 | 0.008696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc767a70688f91630b0b889066788187fd46f4c8 | 4,838 | py | Python | attributes/tests/test_modifier_handler.py | cluebyte/nextrpi | 5927b158b318bcb0436be1cac9ecffb89c2e0dfe | [
"BSD-3-Clause"
] | 4 | 2016-07-18T21:41:40.000Z | 2020-05-03T08:35:58.000Z | attributes/tests/test_modifier_handler.py | cluebyte/nextrpi | 5927b158b318bcb0436be1cac9ecffb89c2e0dfe | [
"BSD-3-Clause"
] | null | null | null | attributes/tests/test_modifier_handler.py | cluebyte/nextrpi | 5927b158b318bcb0436be1cac9ecffb89c2e0dfe | [
"BSD-3-Clause"
] | null | null | null | """
Unit test for ModifierHandler.
"""
from django.test import TestCase
from attributes.modifier import Modifier
from attributes.modifier_handler import ModifierHandler
from mock import Mock
class ModifierHandlerTestCase(TestCase):
BASE_VAL = 10
FLOAT_VAL = 0.5
ADD_MOD = {
'desc': "add modifier",
'val': BASE_VAL,
'dbref': 1,
'typeclass': 'Script',
'operator': '+'
}
SUB_MOD = {
'desc': "subtract modifier",
'val': BASE_VAL,
'dbref': 2,
'typeclass': 'Object',
'operator': '-'
}
MULTI_MOD = {
'desc': "multiply modifier",
'val': BASE_VAL,
'dbref': 3,
'typeclass': 'Player',
'operator': '*'
}
MULTI_FLOAT_MOD = {
'desc': "multiply modifier",
'val': FLOAT_VAL,
'dbref': 4,
'typeclass': 'Script',
'operator': '*'
}
RAW_MODS = [
ADD_MOD,
SUB_MOD,
MULTI_MOD,
MULTI_FLOAT_MOD
]
def setUp(self):
self.handler = ModifierHandler(self.RAW_MODS)
self.add_mod = Modifier.factory(**self.ADD_MOD)
self.sub_mod = Modifier.factory(**self.SUB_MOD)
self.multi_mod = Modifier.factory(**self.MULTI_MOD)
self.multi_float_mod = Modifier.factory(**self.MULTI_FLOAT_MOD)
def tearDown(self):
self.handler = None
def unpack_modifiers(self, handler):
mod_list = []
for mods in handler.modifiers.values():
mod_list = mod_list + mods
return mod_list
def test_initial_state(self):
self.assertIn(self.add_mod, self.handler._raw_modifiers)
self.assertIn(self.sub_mod, self.handler._raw_modifiers)
self.assertIn(self.multi_mod, self.handler._raw_modifiers)
self.assertIn(self.multi_float_mod, self.handler._raw_modifiers)
dict_mod_values = self.unpack_modifiers(self.handler)
self.assertIn(self.add_mod, dict_mod_values)
self.assertIn(self.sub_mod, dict_mod_values)
self.assertIn(self.multi_mod, dict_mod_values)
self.assertIn(self.multi_float_mod, dict_mod_values)
def test_get(self):
self.assertEqual(self.add_mod,
self.handler.get(self.ADD_MOD['desc']))
self.assertEqual(self.sub_mod,
self.handler.get(self.SUB_MOD['desc']))
self.assertEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc']))
def test_filter_for_dbref(self):
self.assertEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc'], dbref=3))
def test_filter_for_typeclass(self):
self.assertEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc'],
typeclass='Player'))
def test_filter_for_val(self):
self.assertEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc'],
val=10))
self.assertEqual(self.multi_float_mod,
self.handler.get(self.MULTI_FLOAT_MOD['desc'],
val=0.5))
def test_filter_for_val_negative(self):
self.assertNotEqual(self.multi_float_mod,
self.handler.get(self.MULTI_MOD['desc'],
val=10))
self.assertNotEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc'],
val=0.5))
def test_multiple_filters(self):
self.assertEqual(self.multi_mod,
self.handler.get(self.MULTI_MOD['desc'],
dbref=3,
typeclass='Player',
val=10))
def test_remove(self):
mod = self.handler.get(self.ADD_MOD['desc'])
self.handler.remove(mod)
self.assertNotIn(mod, self.handler.modifiers.values())
self.assertNotIn(mod, self.handler._raw_modifiers)
def test_all(self):
mod_list = [self.add_mod, self.sub_mod, self.multi_mod,
self.multi_float_mod]
self.assertEqual(mod_list, self.handler.all())
def test_get_mod_val(self):
self.assertEqual(self.handler.get_modified_val(self.BASE_VAL),
self.BASE_VAL
* self.BASE_VAL
* self.FLOAT_VAL
+ self.BASE_VAL
- self.BASE_VAL)
| 36.37594 | 74 | 0.530798 | 515 | 4,838 | 4.739806 | 0.128155 | 0.095862 | 0.088488 | 0.076608 | 0.575174 | 0.415813 | 0.379353 | 0.32077 | 0.256862 | 0.167554 | 0 | 0.0065 | 0.363993 | 4,838 | 132 | 75 | 36.651515 | 0.786805 | 0.006201 | 0 | 0.210526 | 0 | 0 | 0.054792 | 0 | 0 | 0 | 0 | 0 | 0.192982 | 1 | 0.114035 | false | 0 | 0.035088 | 0 | 0.22807 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc769e64c566d93ac481beb9c2e863da89910144 | 911 | py | Python | colab_util/fs.py | liangyuanruo/colab-util | 8ce11a520d2550f08a148ef4a76b3898cdfdce55 | [
"MIT"
] | null | null | null | colab_util/fs.py | liangyuanruo/colab-util | 8ce11a520d2550f08a148ef4a76b3898cdfdce55 | [
"MIT"
] | null | null | null | colab_util/fs.py | liangyuanruo/colab-util | 8ce11a520d2550f08a148ef4a76b3898cdfdce55 | [
"MIT"
] | null | null | null | import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from oauth2client.client import GoogleCredentials
from google.colab import auth
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
def ls():
return drive.ListFile({'q': "trashed=false"}).GetList()
def read_file_with(read_function, file_id, *args, **kwargs):
"""
Reads a file with file_id using read_function from GoogleDrive. Additional args/kwargs passed to read_function.
"""
download_path = os.path.expanduser('~/data')
try:
os.makedirs(download_path)
except FileExistsError:
pass
output_file = os.path.join(download_path, 'test.csv')
temp_file = drive.CreateFile({'id': file_id})
temp_file.GetContentFile(output_file)
return read_function(output_file, *args, **kwargs)
| 27.606061 | 115 | 0.738749 | 115 | 911 | 5.678261 | 0.495652 | 0.073507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001302 | 0.15697 | 911 | 32 | 116 | 28.46875 | 0.848958 | 0.121844 | 0 | 0 | 0 | 0 | 0.038265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.047619 | 0.238095 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc76f3792bc1370290bf6ce1044e7b0d5f11d474 | 10,507 | py | Python | tasks.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-08-20T14:46:02.000Z | 2017-08-20T14:46:02.000Z | tasks.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tasks.py | crylearner/RIDE3X | 767f45b0c908f18ecc7473208def8dc7489f43b0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import sys
import os
from os.path import join, exists
import re
import shutil
import tempfile
from io import StringIO
import urllib.request, urllib.error, urllib.parse
from invoke import task, run
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
SOURCE_DIR = join(ROOT_DIR, 'src')
TEST_DIR = join(ROOT_DIR, 'utest')
DIST_DIR = join(ROOT_DIR, 'dist')
BUILD_DIR = join(ROOT_DIR, 'build')
ROBOTIDE_PACKAGE = join(ROOT_DIR, 'src', 'robotide')
BUNDLED_ROBOT_DIR = join(ROBOTIDE_PACKAGE, 'lib', 'robot')
# MANIFEST = ROOT_DIR/'MANIFEST.in'
TEST_PROJECT_DIR = 'theproject'
TEST_LIBS_GENERATED = 10
# Set VERSION global variable
exec(compile(open('src/robotide/version.py').read(), 'src/robotide/version.py', 'exec'))
FINAL_RELEASE = bool(re.match('^(\d*\.){1,2}\d*$', VERSION))
wxPythonDownloadUrl = \
"http://sourceforge.net/projects/wxpython/files/wxPython/2.8.12.1/"
# Developemnt tasks
@task
def devel(args=''):
"""Start development version of RIDE."""
_set_development_path()
from robotide import main
main(*args.split(','))
@task
def test(test_filter=''):
"""Run unit tests."""
_remove_bytecode_files()
from nose import run as noserun
_set_development_path()
additional_args = []
if test_filter:
additional_args.append(test_filter)
result = noserun(defaultTest=TEST_DIR,
argv=['', '--m=^test_'] + additional_args)
assert result is True
@task
def deps(upgrade=False):
"""Fetch and install development dependencies."""
cmd = 'pip install -r requirements.txt'
if upgrade:
run('{} --upgrade'.format(cmd))
else:
run(cmd)
@task
def clean():
"""Clean bytecode files and remove `dist` and `build` directories."""
_clean()
@task
def update_robot(version=''):
"""Update robot framework to specified commit or tag.
By default, update to current master.
This task also repackages RF under `robotide.robot` to avoid
accidentally importing system installation.
`git`, `grep` and `sed` must be installed
"""
target = version if version else 'master'
run('(cd ../robotframework && git fetch && git checkout {})'.format(target))
rf_commit_hash = run('(cd ../robotframework && git rev-parse HEAD)').stdout
run('rm -rf {}'.format(BUNDLED_ROBOT_DIR))
run('cp -r ../robotframework/src/robot src/robotide/lib/')
# Prevent .pyc matching grep expressions
_clean()
# `import robot` -> `from robotide.lib import robot`
_run_sed_on_matching_files(
'import robot',
's/import robot/from robotide.lib import robot/')
# `from robot.pkg import stuff` -> `from robotide.lib.robot.pkg import stuff`
_run_sed_on_matching_files(
'from robot\..* import',
's/from robot\./from robotide.lib.robot./')
# `from robot import stuff` -> `from robotide.lib.robot import stuff`
_run_sed_on_matching_files(
'from robot import',
's/from robot import/from robotide.lib.robot import/')
with open(join(ROBOTIDE_PACKAGE, 'lib', 'robot-commit'), 'w') as rf_version_file:
rf_version_file.write('{}\n'.format(rf_commit_hash))
_log('Updated bundled Robot Framework to version {}/{}'.format(
target, rf_commit_hash))
@task
def generate_big_project(install=False, upgrade=False, args=''):
"""Generate big test data project to help perf testing."""
_remove_bytecode_files()
if install or upgrade:
rfgen_url = \
"https://raw.github.com/robotframework/Generator/master/rfgen.py"
_log("Installing/upgrading rfgen.py from github.")
f = open('rfgen.py', 'wb')
f.write(urllib.request.urlopen(rfgen_url).read())
f.close()
_log("Done.")
_set_development_path()
sys.path.insert(0, '.')
try:
import rfgen
assert rfgen.main(args.split(','))
except ImportError:
_log("Error: Did not find 'rfgen' script or installation")
_log("Use 'invoke generate_big_project --install'")
@task
def random_test():
"""Use rtest go_find_bugs.py to randomly test RIDE API."""
_remove_bytecode_files()
_set_development_path()
sys.path.insert(0, '.')
from rtest.go_find_some_bugs import main
dir = tempfile.mkdtemp()
try:
assert main(dir)
finally:
shutil.rmtree(dir, ignore_errors=True)
# Installation and distribution tasks
@task
def version(version):
"""Set `version.py` to given version."""
with open(join(ROBOTIDE_PACKAGE, 'version.py'), 'w') as version_file:
version_file.write("""# Automatically generated by `tasks.py`.
VERSION = '%s'
""" % version)
_log('Set version to %s' % version)
@task
def register():
"""Register current version to Python package index."""
_run_setup('register')
@task
def install():
"""Install development version and dependencies."""
try:
import wxversion
except ImportError:
_log("""No wxPython installation detected!
Please install wxPython before running RIDE.
You can download wxPython 2.8.12.1 from {}
""".format(wxPythonDownloadUrl))
_run_setup('install')
def _run_setup(cmd):
run('python setup.py {}'.format(cmd))
def release_notes_plugin():
changes = _download_and_format_issues()
plugin_path = os.path.join(
ROBOTIDE_PACKAGE, 'application', 'releasenotes.py')
content = open(plugin_path).read().rsplit('RELEASE_NOTES =', 1)[0]
content += 'RELEASE_NOTES = """\n%s"""\n' % changes
open(plugin_path, 'w').write(content)
@task(pre=[clean],
help={
'release-notes': 'If enabled, release notes plugin will be updated'})
def sdist(release_notes=True, upload=False):
"""Creates source distribution with bundled dependencies."""
if release_notes:
release_notes_plugin()
_run_setup('sdist{}'.format('' if not upload else ' upload'))
_after_distribution()
@task(pre=[clean])
def wininst():
"""Creates Windows installer with bundled dependencies."""
if os.sep != '\\':
sys.exit('Windows installers may only be created in Windows')
_run_setup('bdist_wininst')
_after_distribution()
@task
def release_notes():
"""Download and format issues in markdown format."""
issues = _get_issues()
_log("""ID | Type | Priority | Summary
--- | ---- | -------- | ------- """)
for i in issues:
parts = ('#{}'.format(i.number), _find_type(i), _find_priority(i),
i.title)
_log(' | '.join(parts))
# Helper functions
def _clean(keep_dist=False):
_remove_bytecode_files()
if not keep_dist and exists(DIST_DIR):
shutil.rmtree(DIST_DIR)
if exists(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
def _remove_bytecode_files():
for d in SOURCE_DIR, TEST_DIR:
_remove_files_matching(d, '.*\.pyc')
def _remove_files_matching(directory, pattern):
for root, dirs, files in os.walk(directory):
for file in [x for x in files if re.match(pattern, x)]:
os.remove(join(root, file))
def _set_development_path():
sys.path.insert(0, SOURCE_DIR)
def _run_sed_on_matching_files(pattern, sed_expression):
run("grep -lr '{}' {} | xargs sed -i '' -e '{}'".format(
pattern, BUNDLED_ROBOT_DIR, sed_expression))
def _after_distribution():
_log('Created:')
for path in os.listdir(DIST_DIR):
_log(os.path.abspath(os.path.join(DIST_DIR, path)))
_clean(keep_dist=True)
def _download_and_format_issues():
try:
from robot.utils import HtmlWriter, html_format
except ImportError:
sys.exit('creating release requires Robot Framework to be installed.')
writer = HtmlWriter(StringIO())
writer.element('h2', 'Release notes for %s' % VERSION)
writer.start('table', attrs={'border': '1'})
writer.start('tr')
for header in ['ID', 'Type', 'Priority', 'Summary']:
writer.element(
'td', html_format('*{}*'.format(header)), escape=False)
writer.end('tr')
issues = _get_issues()
base_url = 'http://github.com/robotframework/RIDE/issues/'
for issue in issues:
writer.start('tr')
link_tmpl = '<a href="{}{}">Issue {}</a>'
row = [link_tmpl.format(base_url, issue.number, issue.number),
_find_type(issue),
_find_priority(issue),
issue.title]
for cell in row:
writer.element('td', cell, escape=False)
writer.end('tr')
writer.end('table')
writer.element('p', 'Altogether %d issues.' % len(issues))
return writer.output.getvalue()
def _get_issues():
import getpass
from github3 import login
milestone = re.split('[ab-]', VERSION)[0]
username = eval(input('Enter GitHub username for downloading issues: '))
password = getpass.getpass(
'Github password for {user}: '.format(user=username))
gh = login(username, password=password)
repo = gh.repository('robotframework', 'RIDE')
milestone_number = _get_milestone(repo, milestone)
if milestone_number is None:
_log('milestone not found')
sys.exit(1)
issues = list(repo.iter_issues(milestone=milestone_number, state='closed'))
issues.sort(cmp=_issue_sorter)
return issues
def _issue_sorter(i1, i2):
prio_mapping = {
'critical': 0,
'high': 1,
'medium': 2,
'low': 3
}
prio1, prio2 = _find_priority(i1), _find_priority(i2)
return cmp(prio_mapping[prio1], prio_mapping[prio2])
def _find_type(issue):
type_labels = [l.name for l in issue.iter_labels()
if l.name in ['enhancement', 'bug', 'task']]
return type_labels[0] if type_labels else 'Unknown type'
def _find_priority(issue):
prio_labels = [l.name for l in issue.iter_labels()
if l.name.startswith('prio')]
return prio_labels[0][5:] if prio_labels else 'Unknown priority'
def _get_milestone(repo, milestone_title):
existing_milestones = list(repo.iter_milestones())
milestone = [m for m in existing_milestones if m.title == milestone_title]
if milestone:
return milestone[0].number
return None
def _log(msg):
print(msg)
| 31.178042 | 89 | 0.634053 | 1,310 | 10,507 | 4.898473 | 0.264886 | 0.011999 | 0.014025 | 0.008727 | 0.108929 | 0.066074 | 0.056413 | 0.029921 | 0.029921 | 0.029921 | 0 | 0.004958 | 0.232226 | 10,507 | 336 | 90 | 31.270833 | 0.790505 | 0.110022 | 0 | 0.168724 | 0 | 0.004115 | 0.209081 | 0.008184 | 0 | 0 | 0 | 0 | 0.012346 | 1 | 0.115226 | false | 0.016461 | 0.102881 | 0 | 0.246914 | 0.004115 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc7a74a227b6f6a355739f5121cab4dc02844088 | 617 | py | Python | scripts/test_assign_files_to_processes.py | ziotom78/dacapo_calibration | 9537dc41352d761d408286da956abf19315fdccf | [
"MIT"
] | 1 | 2018-12-31T05:43:53.000Z | 2018-12-31T05:43:53.000Z | scripts/test_assign_files_to_processes.py | ziotom78/dacapo_calibration | 9537dc41352d761d408286da956abf19315fdccf | [
"MIT"
] | null | null | null | scripts/test_assign_files_to_processes.py | ziotom78/dacapo_calibration | 9537dc41352d761d408286da956abf19315fdccf | [
"MIT"
] | null | null | null | from index import TODFileInfo
from calibrate import assign_files_to_processes
files = [TODFileInfo(name, 0, 12, 12) for name in ('A.fits',
'B.fits',
'C.fits')]
result = assign_files_to_processes([10, 10, 8, 8], files)
for mpi_idx, proc in enumerate(result):
for subrange in proc:
print('Process #{0}: {1}, {2:2d} |{3}|'
.format(mpi_idx + 1,
subrange.file_info.file_name,
subrange.first_idx,
'-' * subrange.num_of_samples))
| 44.071429 | 61 | 0.495948 | 70 | 617 | 4.185714 | 0.557143 | 0.075085 | 0.088737 | 0.150171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.393841 | 617 | 13 | 62 | 47.461538 | 0.737968 | 0 | 0 | 0 | 0 | 0 | 0.081037 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc7eaaa7da7719acd5edffdbd361ae0a797ae1df | 36,966 | py | Python | lib/googlecloudsdk/command_lib/run/serverless_operations.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/run/serverless_operations.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/run/serverless_operations.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows you to write surfaces in terms of logical Serverless operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import contextlib
import copy
import functools
import glob
import os
import random
import string
from apitools.base.py import exceptions as api_exceptions
from googlecloudsdk.api_lib.run import build_template
from googlecloudsdk.api_lib.run import configuration
from googlecloudsdk.api_lib.run import domain_mapping
from googlecloudsdk.api_lib.run import k8s_object
from googlecloudsdk.api_lib.run import metrics
from googlecloudsdk.api_lib.run import revision
from googlecloudsdk.api_lib.run import route
from googlecloudsdk.api_lib.run import service
from googlecloudsdk.api_lib.util import apis_internal
from googlecloudsdk.api_lib.util import exceptions as exceptions_util
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.command_lib.run import config_changes as config_changes_mod
from googlecloudsdk.command_lib.run import deployable as deployable_pkg
from googlecloudsdk.command_lib.run import exceptions as serverless_exceptions
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import retry
DEFAULT_ENDPOINT_VERSION = 'v1'
_NONCE_LENGTH = 10
# Used to force a new revision, and also to tie a particular request for changes
# to a particular created revision.
NONCE_LABEL = 'client.knative.dev/nonce'
# Wait 11 mins for each deployment. This is longer than the server timeout,
# making it more likely to get a useful error message from the server.
MAX_WAIT_MS = 660000
class UnknownAPIError(exceptions.Error):
pass
# Because some terminals cannot update multiple lines of output simultaneously,
# the order of conditions in this dictionary should match the order in which we
# expect cloud run resources to complete deployment.
def _ServiceStages():
"""Return a new mapping from conditions to Stages."""
return collections.OrderedDict([
('ConfigurationsReady', progress_tracker.Stage(
'Creating Revision...')),
('RoutesReady', progress_tracker.Stage('Routing traffic...'))])
@contextlib.contextmanager
def Connect(conn_context):
"""Provide a ServerlessOperations instance to use.
If we're using the GKE Serverless Add-on, connect to the relevant cluster.
Otherwise, connect to the right region of GSE.
Arguments:
conn_context: a context manager that yields a ConnectionInfo and manages a
dynamic context that makes connecting to serverless possible.
Yields:
A ServerlessOperations instance.
"""
with conn_context as conn_info:
yield ServerlessOperations(
apis_internal._GetClientInstance( # pylint: disable=protected-access
conn_info.api_name, conn_info.api_version,
ca_certs=conn_info.ca_certs),
conn_info.api_name, conn_info.api_version)
class ConditionPoller(waiter.OperationPoller):
"""A poller for serverless deployment.
Takes in a reference to a StagedProgressTracker, and updates it with progress.
"""
def __init__(self, resource_getter, tracker, stages, dependencies=None):
"""Initialize the ConditionPoller.
Start any unblocked stages in the tracker immediately.
Arguments:
resource_getter: function, returns a resource with conditions.
tracker: a StagedProgressTracker to keep updated
stages: List[Stage], the stages in the tracker
dependencies: Dict[str, Set[str]], The dependencies between conditions.
The condition represented by each key can only start when the set of
conditions in the corresponding value have all completed.
"""
# _dependencies is a map of condition -> {preceding conditions}
# It is meant to be checked off as we finish things.
self._dependencies = copy.deepcopy(dependencies) if dependencies else {}
self._stages = stages
self._resource_getter = resource_getter
self._tracker = tracker
self._completed_stages = set()
self._started_stages = set()
self._failed_stages = set()
self._StartUnblocked()
def _IsBlocked(self, condition):
return condition in self._dependencies
def IsDone(self, conditions):
"""Overrides.
Args:
conditions: A condition.Conditions object.
Returns:
a bool indicates whether `conditions` is terminal.
"""
if conditions is None:
return False
return conditions.IsTerminal()
def Poll(self, unused_ref):
"""Overrides.
Args:
unused_ref: A string representing the operation reference. Currently it
must be 'deploy'.
Returns:
A condition.Conditions object.
"""
conditions = self.GetConditions()
if conditions is None or not conditions.IsFresh():
return None
ready_message = conditions.DescriptiveMessage()
if ready_message:
self._tracker.UpdateHeaderMessage(ready_message)
for condition in conditions.TerminalSubconditions():
message = conditions[condition]['message']
status = conditions[condition]['status']
self._PossiblyUpdateMessage(condition, message, ready_message)
if status is None:
continue
elif status:
self._PossiblyCompleteStage(condition, message, conditions.IsReady())
else:
self._PossiblyFailStage(condition, message)
if conditions.IsReady():
self._tracker.UpdateHeaderMessage('Done.')
# TODO(b/120679874): Should not have to manually call Tick()
self._tracker.Tick()
elif conditions.IsFailed():
raise serverless_exceptions.DeploymentFailedError(ready_message)
return conditions
def _PossiblyUpdateMessage(self, condition, message, ready_message):
"""Update the stage message.
Args:
condition: str, The name of the status condition.
message: str, The new message to display
ready_message: str, The ready message we're displaying.
"""
if condition in self._completed_stages or not message:
return
if self._IsBlocked(condition):
return
if message != ready_message:
self._tracker.UpdateStage(self._stages[condition], message)
def _RecordStageComplete(self, condition):
"""Take care of the internal-to-this-class bookkeeping stage complete."""
self._completed_stages.add(condition)
# Unblock anything that was blocked on this.
unblocked = []
# Strategy: "check off" each dependency as we complete it by removing from
# the set in the value. When the set of dependencies is empty, remove the
# entry from the dict.
for other_condition, requirements in self._dependencies.items():
requirements.discard(condition)
if not requirements:
unblocked.append(other_condition)
for other_condition in unblocked:
del self._dependencies[other_condition]
def _PossiblyCompleteStage(self, condition, message, ready):
"""Complete the stage if it's not already complete.
Make sure the necessary internal bookkeeping is done.
Args:
condition: str, The name of the condition whose stage should be completed.
message: str, The detailed message for the condition.
ready: boolean, True if the Ready condition is true.
"""
if condition in self._completed_stages:
return
# A blocked condition is likely to remain True (indicating the previous
# operation concerning it was successful) until the blocking condition(s)
# finish and it's time to switch to Unknown (the current operation
# concerning it is in progress). Don't mark those done before they switch to
# Unknown.
if condition not in self._started_stages:
return
self._RecordStageComplete(condition)
self._StartUnblocked()
self._tracker.CompleteStage(self._stages[condition], message)
def _StartUnblocked(self):
"""Call StartStage in the tracker for any not-started not-blocked tasks.
Record the fact that they're started in our internal bookkeeping.
"""
# The set of stages that aren't marked started and don't have unsatisfied
# dependencies are "newly unblocked".
newly_unblocked = (set(self._stages.keys())
- self._started_stages - set(self._dependencies.keys()))
for unblocked in newly_unblocked:
self._started_stages.add(unblocked)
self._tracker.StartStage(self._stages[unblocked])
# TODO(b/120679874): Should not have to manually call Tick()
self._tracker.Tick()
def _PossiblyFailStage(self, condition, message):
"""Possibly fail the stage.
Args:
condition: str, The name of the status whose stage failed.
message: str, The detailed message for the condition.
Raises:
DeploymentFailedError: If the 'Ready' condition failed.
"""
# Don't fail an already failed stage.
if condition in self._failed_stages:
return
stage = self._stages[condition]
self._failed_stages.add(condition)
self._tracker.FailStage(
stage,
serverless_exceptions.DeploymentFailedError(message),
message)
def GetResult(self, conditions):
"""Overrides.
Get terminal conditions as the polling result.
Args:
conditions: A condition.Conditions object.
Returns:
A condition.Conditions object.
"""
return conditions
def GetConditions(self):
"""Returns the resource conditions wrapped in condition.Conditions.
Returns:
A condition.Conditions object.
"""
resource = self._resource_getter()
if resource is None:
return None
return resource.conditions
def _Nonce():
"""Return a random string with unlikely collision to use as a nonce."""
return ''.join(
random.choice(string.ascii_lowercase) for _ in range(_NONCE_LENGTH))
class _NewRevisionForcingChange(config_changes_mod.ConfigChanger):
"""Forces a new revision to get created by posting a random nonce label."""
def __init__(self, nonce):
self._nonce = nonce
def AdjustConfiguration(self, config, metadata):
del metadata
config.revision_labels[NONCE_LABEL] = self._nonce
def _IsDigest(url):
"""Return true if the given image url is by-digest."""
return '@sha256:' in url
class NonceBasedRevisionPoller(waiter.OperationPoller):
"""To poll for exactly one revision with the given nonce to appear."""
def __init__(self, operations, namespace_ref):
self._operations = operations
self._namespace = namespace_ref
def IsDone(self, revisions):
return bool(revisions)
def Poll(self, nonce):
return self._operations.GetRevisionsByNonce(self._namespace, nonce)
def GetResult(self, revisions):
if len(revisions) == 1:
return revisions[0]
return None
class _SwitchToDigestChange(config_changes_mod.ConfigChanger):
"""Switches the configuration from by-tag to by-digest."""
def __init__(self, base_revision):
self._base_revision = base_revision
def AdjustConfiguration(self, config, metadata):
if _IsDigest(self._base_revision.image):
return
if not self._base_revision.image_digest:
return
annotations = k8s_object.AnnotationsFromMetadata(
config.MessagesModule(), metadata)
# Mutates through to metadata: Save the by-tag user intent.
annotations[configuration.USER_IMAGE_ANNOTATION] = self._base_revision.image
config.image = self._base_revision.image_digest
class ServerlessOperations(object):
"""Client used by Serverless to communicate with the actual Serverless API.
"""
def __init__(self, client, api_name, api_version):
self._client = client
self._registry = resources.REGISTRY.Clone()
self._registry.RegisterApiByName(api_name, api_version)
self._temporary_build_template_registry = {}
@property
def _messages_module(self):
return self._client.MESSAGES_MODULE
def IsSourceBranch(self):
# TODO(b/112662240): Remove once the build field is public
return hasattr(self._client.MESSAGES_MODULE.ConfigurationSpec, 'build')
# For internal-only source testing. Codepaths inaccessable except on
# build from dev branch.
# TODO(b/112662240): productionalize when source is landing
def _TemporaryBuildTemplateRegistry(self, namespace_ref):
"""Return the list of build templates available, mocking the server."""
if namespace_ref.RelativeName() in self._temporary_build_template_registry:
return self._temporary_build_template_registry[
namespace_ref.RelativeName()]
detect = build_template.BuildTemplate.New(
self._client, 'default')
detect.name = 'detect'
detect.annotations[build_template.IGNORE_GLOB_ANNOTATION] = (
'["/*", "!package.json","!Pipfile.lock"]')
nodejs_8_9_4 = build_template.BuildTemplate.New(
self._client, 'default')
nodejs_8_9_4.name = 'nodejs_8_9_4'
nodejs_8_9_4.annotations[build_template.IGNORE_GLOB_ANNOTATION] = (
'["node_modules/"]')
nodejs_8_9_4.labels[build_template.LANGUAGE_LABEL] = 'nodejs'
nodejs_8_9_4.labels[build_template.VERSION_LABEL] = '8.9.4'
nodejs_8_9_4.annotations[build_template.DEV_IMAGE_ANNOTATION] = (
'gcr.io/local-run-demo/nodejs_dev:latest')
go_1_10_1 = build_template.BuildTemplate.New(
self._client, 'default')
go_1_10_1.name = 'go_1_10_1'
go_1_10_1.labels[build_template.LANGUAGE_LABEL] = 'go'
go_1_10_1.labels[build_template.VERSION_LABEL] = '1.10.1'
lst = [detect, nodejs_8_9_4, go_1_10_1]
self._temporary_build_template_registry[namespace_ref.RelativeName()] = lst
return lst
def Detect(self, namespace_ref, source_ref, function_entrypoint=None):
"""Detects important properties and returns a Deployable.
Args:
namespace_ref: str, the namespace to look for build templates in
source_ref: source_ref.SourceRef, refers to some source code
function_entrypoint: str, allows you to specify this is a function, and
the function to run.
Returns:
a new Deployable referring to the source
"""
template = self._DetectBuildTemplate(namespace_ref, source_ref)
if (source_ref.source_type == source_ref.SourceType.IMAGE
and not template and not function_entrypoint):
return deployable_pkg.ServerlessContainer(source_ref)
if not self.IsSourceBranch():
raise serverless_exceptions.UnknownDeployableError()
# TODO(b/112662240): Put at top when source lands.
from googlecloudsdk.command_lib.run import source_deployable # pylint: disable=g-import-not-at-top
if (function_entrypoint and
template and
source_ref.source_type == source_ref.SourceType.DIRECTORY):
return source_deployable.ServerlessFunction(source_ref, template,
function_entrypoint)
if (source_ref.source_type == source_ref.SourceType.DIRECTORY and
template and
not function_entrypoint):
return source_deployable.ServerlessApp(source_ref, template)
raise serverless_exceptions.UnknownDeployableError()
def GetRevision(self, revision_ref):
"""Get the revision.
Args:
revision_ref: Resource, revision to get.
Returns:
A revision.Revision object.
"""
messages = self._messages_module
revision_name = revision_ref.RelativeName()
request = messages.RunNamespacesRevisionsGetRequest(
name=revision_name)
try:
with metrics.record_duration(metrics.GET_REVISION):
response = self._client.namespaces_revisions.Get(request)
return revision.Revision(response, messages)
except api_exceptions.HttpNotFoundError:
return None
def Upload(self, deployable):
"""Upload the code for the given deployable."""
deployable.UploadFiles()
def _GetRoute(self, service_ref):
"""Return the relevant Route from the server, or None if 404."""
messages = self._messages_module
# GET the Route
route_name = self._registry.Parse(
service_ref.servicesId,
params={
'namespacesId': service_ref.namespacesId,
},
collection='run.namespaces.routes').RelativeName()
route_get_request = messages.RunNamespacesRoutesGetRequest(
name=route_name,
)
try:
with metrics.record_duration(metrics.GET_ROUTE):
route_get_response = self._client.namespaces_routes.Get(
route_get_request)
return route.Route(route_get_response, messages)
except api_exceptions.HttpNotFoundError:
return None
def _GetBuildTemplateByName(self, namespace_ref, name):
"""Return the BuildTemplate with the given name, or None."""
# Implementation to be replaced once the concept exists on the server.
for templ in self._TemporaryBuildTemplateRegistry(namespace_ref):
if templ.name == name:
return templ
return None
def _GetBuildTemplateByLanguageVersion(self, namespace_ref,
language, version):
"""Return the BuildTemplate with the given language & version, or None."""
# Implementation to be replaced once the concept exists on the server.
del namespace_ref
for templ in self._temporary_build_template_registry:
if (templ.language, templ.version) == (language, version):
return templ
return None
def WaitForCondition(self, getter):
"""Wait for a configuration to be ready in latest revision."""
stages = _ServiceStages()
with progress_tracker.StagedProgressTracker(
'Deploying...',
stages.values(),
failure_message='Deployment failed') as tracker:
config_poller = ConditionPoller(getter, tracker, stages, dependencies={
'RoutesReady': {'ConfigurationsReady'},
})
try:
conditions = waiter.PollUntilDone(
config_poller, None,
wait_ceiling_ms=1000)
except retry.RetryException as err:
conditions = config_poller.GetConditions()
# err.message already indicates timeout. Check ready_cond_type for more
# information.
msg = conditions.DescriptiveMessage() if conditions else None
if msg:
log.error('Still waiting: {}'.format(msg))
raise err
if not conditions.IsReady():
raise serverless_exceptions.ConfigurationError(
conditions.DescriptiveMessage())
def GetServiceUrl(self, service_ref):
"""Return the main URL for the service."""
serv = self.GetService(service_ref)
if serv.domain:
return serv.domain
# Older versions of knative don't populate domain on Service, only Route.
serv_route = self._GetRoute(service_ref)
return serv_route.domain
def GetActiveRevisions(self, service_ref):
"""Return the actively serving revisions.
Args:
service_ref: the service Resource reference.
Returns:
{str, int}, A dict mapping revisionID to its traffic percentage target.
Raises:
serverless_exceptions.NoActiveRevisionsError: if no serving revisions
were found.
"""
serv_route = self._GetRoute(service_ref)
active_revisions = serv_route.active_revisions
if len(active_revisions) < 1:
raise serverless_exceptions.NoActiveRevisionsError()
return serv_route.active_revisions
def _DetectBuildTemplate(self, namespace_ref, source_ref):
"""Determine the appropriate build template from source.
Args:
namespace_ref: Resource, namespace to find build templates in.
source_ref: SourceRef, The service's image repo or source directory.
Returns:
The detected build template name.
"""
if source_ref.source_type == source_ref.SourceType.IMAGE:
return None
elif glob.glob(os.path.join(source_ref.source_path, '*.go')):
return self._GetBuildTemplateByName(namespace_ref, 'go_1_10_1')
else:
return self._GetBuildTemplateByName(namespace_ref, 'nodejs_8_9_4')
def ListServices(self, namespace_ref):
messages = self._messages_module
request = messages.RunNamespacesServicesListRequest(
parent=namespace_ref.RelativeName())
with metrics.record_duration(metrics.LIST_SERVICES):
response = self._client.namespaces_services.List(request)
return [service.Service(item, messages) for item in response.items]
def ListConfigurations(self, namespace_ref):
messages = self._messages_module
request = messages.RunNamespacesConfigurationsListRequest(
parent=namespace_ref.RelativeName())
with metrics.record_duration(metrics.LIST_CONFIGURATIONS):
response = self._client.namespaces_configurations.List(request)
return [configuration.Configuration(item, messages)
for item in response.items]
def ListRoutes(self, namespace_ref):
messages = self._messages_module
request = messages.RunNamespacesRoutesListRequest(
parent=namespace_ref.RelativeName())
with metrics.record_duration(metrics.LIST_ROUTES):
response = self._client.namespaces_routes.List(request)
return [route.Route(item, messages) for item in response.items]
def GetService(self, service_ref):
"""Return the relevant Service from the server, or None if 404."""
messages = self._messages_module
service_get_request = messages.RunNamespacesServicesGetRequest(
name=service_ref.RelativeName())
try:
with metrics.record_duration(metrics.GET_SERVICE):
service_get_response = self._client.namespaces_services.Get(
service_get_request)
return service.Service(service_get_response, messages)
except api_exceptions.HttpNotFoundError:
return None
def GetConfiguration(self, service_or_configuration_ref):
"""Return the relevant Configuration from the server, or None if 404."""
messages = self._messages_module
if hasattr(service_or_configuration_ref, 'servicesId'):
name = self._registry.Parse(
service_or_configuration_ref.servicesId,
params={
'namespacesId': service_or_configuration_ref.namespacesId,
},
collection='run.namespaces.configurations').RelativeName()
else:
name = service_or_configuration_ref.RelativeName()
configuration_get_request = (
messages.RunNamespacesConfigurationsGetRequest(
name=name))
try:
with metrics.record_duration(metrics.GET_CONFIGURATION):
configuration_get_response = self._client.namespaces_configurations.Get(
configuration_get_request)
return configuration.Configuration(configuration_get_response, messages)
except api_exceptions.HttpNotFoundError:
return None
def GetRoute(self, service_or_route_ref):
"""Return the relevant Route from the server, or None if 404."""
messages = self._messages_module
if hasattr(service_or_route_ref, 'servicesId'):
name = self._registry.Parse(
service_or_route_ref.servicesId,
params={
'namespacesId': service_or_route_ref.namespacesId,
},
collection='run.namespaces.routes').RelativeName()
else:
name = service_or_route_ref.RelativeName()
route_get_request = (
messages.RunNamespacesRoutesGetRequest(
name=name))
try:
with metrics.record_duration(metrics.GET_ROUTE):
route_get_response = self._client.namespaces_routes.Get(
route_get_request)
return route.Route(route_get_response, messages)
except api_exceptions.HttpNotFoundError:
return None
def DeleteService(self, service_ref):
"""Delete the provided Service.
Args:
service_ref: Resource, a reference to the Service to delete
Raises:
ServiceNotFoundError: if provided service is not found.
"""
messages = self._messages_module
service_name = service_ref.RelativeName()
service_delete_request = messages.RunNamespacesServicesDeleteRequest(
name=service_name,
)
try:
with metrics.record_duration(metrics.DELETE_SERVICE):
self._client.namespaces_services.Delete(service_delete_request)
except api_exceptions.HttpNotFoundError:
raise serverless_exceptions.ServiceNotFoundError(
'Service [{}] could not be found.'.format(service_ref.servicesId))
def DeleteRevision(self, revision_ref):
"""Delete the provided Revision.
Args:
revision_ref: Resource, a reference to the Revision to delete
Raises:
RevisionNotFoundError: if provided revision is not found.
"""
messages = self._messages_module
revision_name = revision_ref.RelativeName()
request = messages.RunNamespacesRevisionsDeleteRequest(
name=revision_name)
try:
with metrics.record_duration(metrics.DELETE_REVISION):
self._client.namespaces_revisions.Delete(request)
except api_exceptions.HttpNotFoundError:
raise serverless_exceptions.RevisionNotFoundError(
'Revision [{}] could not be found.'.format(revision_ref.revisionsId))
def GetRevisionsByNonce(self, namespace_ref, nonce):
"""Return all revisions with the given nonce."""
messages = self._messages_module
request = messages.RunNamespacesRevisionsListRequest(
parent=namespace_ref.RelativeName(),
labelSelector='{} = {}'.format(NONCE_LABEL, nonce))
response = self._client.namespaces_revisions.List(request)
return [revision.Revision(item, messages) for item in response.items]
def _GetBaseRevision(self, config, metadata, status):
"""Return a Revision for use as the "base revision" for a change.
When making a change that should not affect the code running, the
"base revision" is the revision that we should lock the code to - it's where
we get the digest for the image to run.
Getting this revision:
* If there's a nonce in the revisonTemplate metadata, use that
* If that query produces >1 or produces 0 after a short timeout, use
the latestCreatedRevision in status.
Arguments:
config: Configuration, the configuration to get the base revision of.
May have been derived from a Service.
metadata: ObjectMeta, the metadata from the top-level object
status: Union[ConfigurationStatus, ServiceStatus], the status of the top-
level object.
Returns:
The base revision of the configuration.
"""
# Or returns None if not available by nonce & the control plane has not
# implemented latestCreatedRevisionName on the Service object yet.
base_revision_nonce = config.revision_labels.get(NONCE_LABEL, None)
base_revision = None
if base_revision_nonce:
try:
namespace_ref = self._registry.Parse(
metadata.namespace,
collection='run.namespaces')
poller = NonceBasedRevisionPoller(self, namespace_ref)
base_revision = poller.GetResult(waiter.PollUntilDone(
poller, base_revision_nonce,
sleep_ms=500, max_wait_ms=2000))
except retry.WaitException:
pass
# Nonce polling didn't work, because some client didn't post one or didn't
# change one. Fall back to the (slightly racy) `latestCreatedRevisionName`.
if not base_revision:
# TODO(b/117663680) Getattr -> normal access.
if getattr(status, 'latestCreatedRevisionName', None):
# Get by latestCreatedRevisionName
revision_ref = self._registry.Parse(
status.latestCreatedRevisionName,
params={'namespacesId': metadata.namespace},
collection='run.namespaces.revisions')
base_revision = self.GetRevision(revision_ref)
return base_revision
def _EnsureImageDigest(self, serv, config_changes):
"""Make config_changes include switch by-digest image if not so already."""
if not _IsDigest(serv.configuration.image):
base_revision = self._GetBaseRevision(
serv.configuration, serv.metadata, serv.status)
if base_revision:
config_changes.append(_SwitchToDigestChange(base_revision))
def _UpdateOrCreateService(self, service_ref, config_changes, with_code,
private_endpoint=None):
"""Apply config_changes to the service. Create it if necessary.
Arguments:
service_ref: Reference to the service to create or update
config_changes: list of ConfigChanger to modify the service with
with_code: bool, True if the config_changes contains code to deploy.
We can't create the service if we're not deploying code.
private_endpoint: bool, True if creating a new Service for
Cloud Run on GKE that should only be addressable from within the
cluster. False if it should be publicly addressable. None if
its existing visibility should remain unchanged.
Returns:
The Service object we created or modified.
"""
nonce = _Nonce()
config_changes = [_NewRevisionForcingChange(nonce)] + config_changes
messages = self._messages_module
# GET the Service
serv = self.GetService(service_ref)
try:
if serv:
if not with_code:
# Avoid changing the running code by making the new revision by digest
self._EnsureImageDigest(serv, config_changes)
if private_endpoint is None:
# Don't change the existing service visibility
pass
elif private_endpoint:
serv.labels[service.ENDPOINT_VISIBILITY] = service.CLUSTER_LOCAL
else:
del serv.labels[service.ENDPOINT_VISIBILITY]
# PUT the changed Service
for config_change in config_changes:
config_change.AdjustConfiguration(serv.configuration, serv.metadata)
serv_name = service_ref.RelativeName()
serv_update_req = (
messages.RunNamespacesServicesReplaceServiceRequest(
service=serv.Message(),
name=serv_name))
with metrics.record_duration(metrics.UPDATE_SERVICE):
updated = self._client.namespaces_services.ReplaceService(
serv_update_req)
return service.Service(updated, messages)
else:
if not with_code:
raise serverless_exceptions.ServiceNotFoundError(
'Service [{}] could not be found.'.format(service_ref.servicesId))
# POST a new Service
new_serv = service.Service.New(self._client, service_ref.namespacesId,
private_endpoint)
new_serv.name = service_ref.servicesId
pretty_print.Info('Creating new service [{bold}{service}{reset}]',
service=new_serv.name)
parent = service_ref.Parent().RelativeName()
for config_change in config_changes:
config_change.AdjustConfiguration(new_serv.configuration,
new_serv.metadata)
serv_create_req = (
messages.RunNamespacesServicesCreateRequest(
service=new_serv.Message(),
parent=parent))
with metrics.record_duration(metrics.CREATE_SERVICE):
raw_service = self._client.namespaces_services.Create(
serv_create_req)
return service.Service(raw_service, messages)
except api_exceptions.HttpBadRequestError as e:
error_payload = exceptions_util.HttpErrorPayload(e)
if error_payload.field_violations:
if (serverless_exceptions.BadImageError.IMAGE_ERROR_FIELD
in error_payload.field_violations):
exceptions.reraise(serverless_exceptions.BadImageError(e))
exceptions.reraise(e)
except api_exceptions.HttpNotFoundError as e:
# TODO(b/118339293): List available regions to check whether provided
# region is invalid or not.
raise serverless_exceptions.DeploymentFailedError(
'Deployment endpoint was not found. Perhaps the provided '
'region was invalid. Set the `run/region` property to a valid '
'region and retry. Ex: `gcloud config set run/region us-central1`')
def ReleaseService(self, service_ref, config_changes, asyn=False,
private_endpoint=None):
"""Change the given service in prod using the given config_changes.
Ensures a new revision is always created, even if the spec of the revision
has not changed.
Arguments:
service_ref: Resource, the service to release
config_changes: list, objects that implement AdjustConfiguration().
asyn: bool, if True release asyncronously
private_endpoint:
"""
with_code = any(
isinstance(c, deployable_pkg.Deployable) for c in config_changes)
self._UpdateOrCreateService(
service_ref, config_changes, with_code, private_endpoint)
if not asyn:
getter = functools.partial(self.GetService, service_ref)
self.WaitForCondition(getter)
def ListRevisions(self, namespace_ref, service_name):
"""List all revisions for the given service.
Args:
namespace_ref: Resource, namespace to list revisions in
service_name: str, The service for which to list revisions.
Returns:
A list of revisions for the given service.
"""
messages = self._messages_module
request = messages.RunNamespacesRevisionsListRequest(
parent=namespace_ref.RelativeName(),
)
if service_name is not None:
# For now, same as the service name, and keeping compatible with
# 'service-less' operation.
request.labelSelector = 'serving.knative.dev/service = {}'.format(
service_name)
with metrics.record_duration(metrics.LIST_REVISIONS):
response = self._client.namespaces_revisions.List(request)
return [revision.Revision(item, messages) for item in response.items]
def ListDomainMappings(self, namespace_ref):
"""List all domain mappings.
Args:
namespace_ref: Resource, namespace to list domain mappings in.
Returns:
A list of domain mappings.
"""
messages = self._messages_module
request = messages.RunNamespacesDomainmappingsListRequest(
parent=namespace_ref.RelativeName())
with metrics.record_duration(metrics.LIST_DOMAIN_MAPPINGS):
response = self._client.namespaces_domainmappings.List(request)
return [domain_mapping.DomainMapping(item, messages)
for item in response.items]
def CreateDomainMapping(self, domain_mapping_ref, service_name):
"""Create a domain mapping.
Args:
domain_mapping_ref: Resource, domainmapping resource.
service_name: str, the service to which to map domain.
Returns:
A domain_mapping.DomainMapping object.
"""
messages = self._messages_module
new_mapping = domain_mapping.DomainMapping.New(
self._client, domain_mapping_ref.namespacesId)
new_mapping.name = domain_mapping_ref.domainmappingsId
new_mapping.route_name = service_name
request = messages.RunNamespacesDomainmappingsCreateRequest(
domainMapping=new_mapping.Message(),
parent=domain_mapping_ref.Parent().RelativeName())
with metrics.record_duration(metrics.CREATE_DOMAIN_MAPPING):
response = self._client.namespaces_domainmappings.Create(request)
return domain_mapping.DomainMapping(response, messages)
def DeleteDomainMapping(self, domain_mapping_ref):
"""Delete a domain mapping.
Args:
domain_mapping_ref: Resource, domainmapping resource.
"""
messages = self._messages_module
request = messages.RunNamespacesDomainmappingsDeleteRequest(
name=domain_mapping_ref.RelativeName())
with metrics.record_duration(metrics.DELETE_DOMAIN_MAPPING):
self._client.namespaces_domainmappings.Delete(request)
def GetDomainMapping(self, domain_name):
"""Get a domain mapping.
Args:
domain_name: str, domain name.
Returns:
A domain_mapping.DomainMapping object.
"""
messages = self._messages_module
request = messages.RunNamespacesDomainmappingsGetRequest(
name=domain_name)
with metrics.record_duration(metrics.GET_DOMAIN_MAPPING):
response = self._client.namespaces_domainmappings.Get(request)
return domain_mapping.DomainMapping(response)
| 37.643585 | 103 | 0.71885 | 4,306 | 36,966 | 5.986763 | 0.165351 | 0.015361 | 0.013965 | 0.017146 | 0.323791 | 0.253889 | 0.191784 | 0.156911 | 0.121688 | 0.093642 | 0 | 0.006102 | 0.20646 | 36,966 | 981 | 104 | 37.681957 | 0.872707 | 0.291592 | 0 | 0.227599 | 0 | 0 | 0.037687 | 0.01053 | 0 | 0 | 0 | 0.006116 | 0 | 1 | 0.098566 | false | 0.005376 | 0.060932 | 0.008961 | 0.277778 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc7f3fd94ce0ddccf878e82df7940edbca9684dd | 1,363 | py | Python | jazz_scraper/spiders/jazz.py | palazzem/umbria-jazz-scraper | 196a3c866fc3bc5fa59fb7628d4d594717bb3979 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | jazz_scraper/spiders/jazz.py | palazzem/umbria-jazz-scraper | 196a3c866fc3bc5fa59fb7628d4d594717bb3979 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | jazz_scraper/spiders/jazz.py | palazzem/umbria-jazz-scraper | 196a3c866fc3bc5fa59fb7628d4d594717bb3979 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from ..items import JazzScraperItem
class JazzSpider(scrapy.Spider):
name = "jazz"
allowed_domains = ["umbriajazz.com"]
start_urls = (
'http://www.umbriajazz.com/pagine/programma-umbria-jazz',
)
def parse(self, response):
for days in response.xpath("//div[@id='accordion']//ul//li"):
date = days.xpath(".//h1/text()").extract()[0]
indoor = days.xpath(".//table[1]")
outdoor = days.xpath(".//table[2]")
for row in indoor.xpath(".//tr"):
concert = row.xpath(".//td").extract()
time = concert[0]
description = concert[1]
item = JazzScraperItem()
item['date'] = "%s %s" % (date, time)
item['description'] = description
item['outdoor'] = False
yield item
for row in outdoor.xpath(".//tr"):
concert = row.xpath(".//td").extract()
if len(concert) == 2:
time = concert[0]
description = concert[1]
item = JazzScraperItem()
item['date'] = "%s %s" % (date, time)
item['description'] = description
item['outdoor'] = True
yield item
| 31.697674 | 69 | 0.470286 | 130 | 1,363 | 4.915385 | 0.461538 | 0.042254 | 0.043818 | 0.053208 | 0.425665 | 0.425665 | 0.425665 | 0.328639 | 0.328639 | 0.328639 | 0 | 0.011737 | 0.374908 | 1,363 | 42 | 70 | 32.452381 | 0.738263 | 0.015407 | 0 | 0.4375 | 0 | 0 | 0.156716 | 0.022388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc820381281a332f1868daaf42c701517f6456ca | 1,080 | py | Python | reinforcement_learning/simulator.py | thominj/reinforcement-learning | 8c2ee2fac086d4fff4f4842d123d03f5c1f89c02 | [
"MIT"
] | null | null | null | reinforcement_learning/simulator.py | thominj/reinforcement-learning | 8c2ee2fac086d4fff4f4842d123d03f5c1f89c02 | [
"MIT"
] | null | null | null | reinforcement_learning/simulator.py | thominj/reinforcement-learning | 8c2ee2fac086d4fff4f4842d123d03f5c1f89c02 | [
"MIT"
] | null | null | null |
class Simulator():
def __init__(
self,
environment_generator: 'base.EnvironmentGenerator',
agent: 'agents.Agent',
view_model: 'view_models.ViewModel',
num_scenarios: int,
num_steps: int):
self.environment_generator = environment_generator
self.agent = agent
self.view_model = view_model
self.num_scenarios = num_scenarios
self.num_steps = num_steps
def run(self):
# Loop over number of scenarios
for scenario in range(self.num_scenarios):
environment = self.environment_generator.new_environment()
for step in range(self.num_steps):
action = self.agent.choose_action(environment.state)
environment.update(action)
self.agent.learn(environment.state, environment.reward)
self.view_model.update(
scenario_count=scenario,
step_count=step,
environment=environment,
agent=self.agent) | 33.75 | 71 | 0.590741 | 106 | 1,080 | 5.783019 | 0.349057 | 0.130506 | 0.117455 | 0.045677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.335185 | 1,080 | 32 | 72 | 33.75 | 0.85376 | 0.026852 | 0 | 0 | 0 | 0 | 0.055291 | 0.043851 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc826054a23704245c8e7f73c2ed7b0289e735a9 | 1,639 | py | Python | app/__init__.py | Orenoid/telegram_account_bot | d5e470ddd843698d652f3bedf51a2a54404d1810 | [
"MIT"
] | 1 | 2022-02-24T14:46:18.000Z | 2022-02-24T14:46:18.000Z | app/__init__.py | Orenoid/telegram_account_bot | d5e470ddd843698d652f3bedf51a2a54404d1810 | [
"MIT"
] | 2 | 2020-03-09T06:18:42.000Z | 2022-02-28T00:38:53.000Z | app/__init__.py | Orenoid/telegram_account_bot | d5e470ddd843698d652f3bedf51a2a54404d1810 | [
"MIT"
] | 2 | 2021-05-18T05:48:19.000Z | 2021-11-06T07:03:46.000Z | import logging
from flask import Flask
from flask.logging import default_handler
from app.api import api_bp
from app.models import db
from app.utils.middleware import log_request_params, log_response
from app.webhook import telegram_bp
from app.utils import multilog
from app.utils.error import handle_exception
from config import config_map
def create_app(config_name: str):
app = Flask(__name__)
app.config.from_object(config_map[config_name])
@app.route('/', endpoint='ping_pong')
def ping_pong():
return "I'm still alive.\n", 200
db.init_app(app)
register_logger(app)
register_hooks(app)
register_blueprints(app)
register_error_handlers(app)
return app
def register_blueprints(app: Flask):
app.register_blueprint(api_bp, url_prefix='/api')
app.register_blueprint(telegram_bp, url_prefix='/telegram')
def register_error_handlers(app: Flask):
app.register_error_handler(Exception, handle_exception)
def register_hooks(app: Flask):
app.before_request(log_request_params)
app.after_request(log_response)
def register_logger(app: Flask):
# 写入日志文件
app.logger.removeHandler(default_handler)
handler = multilog.MyLoggerHandler('flask', encoding='UTF-8', when='H')
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(lineno)s - %(message)s'
)
handler.setFormatter(logging_format)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
# 写入控制台
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
app.logger.addHandler(ch)
app.logger.setLevel(logging.DEBUG)
| 26.435484 | 79 | 0.738865 | 220 | 1,639 | 5.290909 | 0.340909 | 0.066151 | 0.030928 | 0.041237 | 0.06701 | 0.06701 | 0 | 0 | 0 | 0 | 0 | 0.002896 | 0.157413 | 1,639 | 61 | 80 | 26.868852 | 0.839971 | 0.007322 | 0 | 0 | 0 | 0.023256 | 0.074507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.232558 | 0.023256 | 0.418605 | 0.093023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc8ce3f1c68b0fffe3bc775df97faf034c55ad8f | 11,274 | py | Python | app/api/data/friend.py | rummens1337/federated-social-network | e9b15342e7640a0b154787303c8660fa75acba14 | [
"MIT"
] | null | null | null | app/api/data/friend.py | rummens1337/federated-social-network | e9b15342e7640a0b154787303c8660fa75acba14 | [
"MIT"
] | null | null | null | app/api/data/friend.py | rummens1337/federated-social-network | e9b15342e7640a0b154787303c8660fa75acba14 | [
"MIT"
] | null | null | null | """
This file contains api routes corresponding to a friend relations
on a data server.
"""
from urllib.parse import urlparse
from flask import Blueprint, request
from flask_jwt_extended import create_access_token, get_jwt_identity
import requests
from app.api import jwt_required_custom
from app.api.utils import good_json_response, bad_json_response
from app.database import users, posts, uploads, friends
from app.utils import ping, get_central_ip, get_own_ip, get_user_ip
blueprint = Blueprint('data_friend', __name__)
@blueprint.route('/all')
@jwt_required_custom
def all_friends():
"""Return all the friends of a user.
Returns:
All the friends of a user.
"""
username = get_jwt_identity()
if not users.exists(username=username):
return bad_json_response('user not found')
return good_json_response({
'friends': get_friends(username)
})
def get_friends(username):
"""Return all the friends of a user.
Note:
Make sure username is validated before.
Returns:
All the friends of a user.
"""
friendships = friends.export('friend', username=username, accepted=1)
friendships2 = friends.export('username', friend=username, accepted=1)
friends_array = [
{
'username': item
}
for item in friendships + friendships2
]
return friends_array
@blueprint.route('/requests')
@jwt_required_custom
def requests_open():
"""Return all the friend requests of a user.
Including accepted and sender information.
If sender == 0: means that the request can be
accepted by the user.
If sender == 1: means that the request is pending.
Returns:
All the friend requests pending of a user.
"""
username = get_jwt_identity()
if not users.exists(username=username):
return bad_json_response('user not found')
friendships = friends.export('friend', 'accepted', 'sender', 'id',
username=username, accepted=0, sender=0)
friendships2 = friends.export('username', 'accepted', 'sender', 'id',
friend=username, accepted=0, sender=1)
friends_array = [
{
'username': item[0],
'sender': item[2],
'id': item[3]
}
for item in friendships + friendships2
]
return good_json_response({
'friends': friends_array
})
@blueprint.route('/request/insert', methods=['POST'])
@jwt_required_custom
def request_insert():
"""Insert receiving request from other data server.
Note:
Don't use directly with the frontend. Use /add in send functions
instead.
Returns:
JSON reponse with status of the request.
"""
username = request.form['username']
friend = request.form['friend']
if not users.exists(username=username):
return bad_json_response('user not found')
# Check if friendship already exists
# Return a good json reponse, because the friend can be on
# the same data server.
if friends.exists(username=username, friend=friend) \
or friends.exists(username=friend, friend=username):
return good_json_response('friendship already exists')
# Get the friend's data server address and check if friend exists
friend_address = get_user_ip(friend)
if not friend_address:
return bad_json_response('user not found in central database')
friends.insert(username=username, friend=friend, sender=0)
return good_json_response('Friendrequest inserted')
@blueprint.route('/request/accept', methods=['POST'])
@jwt_required_custom
def request_accept():
"""Handles friend request on accept.
Note:
Don't use directly with the frontend. Use /add in send functions
instead.
Returns:
JSON reponse with status of the request.
"""
username = request.form['username']
friend = request.form['friend']
accept = request.form['accept']
if friend != get_jwt_identity():
return bad_json_response('Authentication error')
if not friends.exists(username=username, friend=friend):
return bad_json_response('friendship request does not exist')
request_db = friends.export_one('accepted', 'sender', username=username,
friend=friend)
# Check if already accepted.
if int(request_db[0]) == 1:
return bad_json_response('Request already accepted')
# Only accept if it was the sender.
if int(request_db[1]) != 1:
return bad_json_response('User sent the request him/herself')
# Update friendship.
if int(accept) == 1:
friends.update({'accepted': 1}, username=username, friend=friend)
else:
friends.delete(username=username, friend=friend)
return good_json_response('Friend request accepted or declined')
@blueprint.route('/request/delete', methods=['POST'])
@jwt_required_custom
def request_delete():
"""Handles friend request on delete.
Returns:
JSON reponse with status of the request.
"""
username = request.form['username']
friend = request.form['friend']
if username == friend:
return bad_json_response('Username equals friend')
if username != get_jwt_identity() and friend != get_jwt_identity():
return bad_json_response('Not allowed')
friends.delete(username=username, friend=friend)
friends.delete(username=friend, friend=username)
return good_json_response('Friend request deleted')
@blueprint.route('/add', methods=['POST'])
@jwt_required_custom
def add():
"""Adds a friendship between two users.
Sets the sender on 1 for the user that is sending the request. Accepted is
set on 0.
Returns:
JSON reponse with status of the request.
"""
username = get_jwt_identity()
friend = request.form['friend']
if username == friend:
return bad_json_response('Friend equals user')
if not users.exists(username=username):
return bad_json_response('user not found')
# Check if friendship already exists.
if friends.exists(username=username, friend=friend) \
or friends.exists(username=friend, friend=username):
return bad_json_response('friendship already exists')
# Get the friend's data server address and check if friend exists.
friend_address = get_user_ip(friend)
if not friend_address:
return bad_json_response('user not found in central database')
# Add the friend in current dataserver's database.
if not friends.insert(username=username, friend=friend, sender=1):
return bad_json_response('error adding friend1')
# Register friend in other database.
data = {
'username': friend,
'friend': username
}
try:
response = requests.post(
friend_address + '/api/friend/request/insert',
data=data,
headers=request.headers
).json()
if response['success']:
return good_json_response('Friend request sent')
except BaseException:
friends.delete(username=username, friend=friend)
return bad_json_response('Error while inserting')
return bad_json_response('friend error')
@blueprint.route('/accept', methods=['POST'])
@jwt_required_custom
def accept():
"""Handles friend request on accept.
Note:
Don't use directly with the frontend. Use /add in send functions
instead.
Returns:
JSON reponse with status of the request.
"""
username = get_jwt_identity()
request_id = request.form['id']
accept = request.form['accept']
# Check if friendship exists.
if not friends.exists(id=request_id):
return bad_json_response('friendship not found')
# Send other user that it is accepted.
# Can only accept if logged in user is the friend (request reciever).
request_db = friends.export_one('username', 'friend', 'accepted', 'sender',
id=request_id)
friend = request_db[1]
# Check if already accepted.
if int(request_db[2]) == 1:
return bad_json_response('Request already accepted')
# Get the friend's data server address and check if friend exists.
friend_address = get_user_ip(friend)
if not friend_address:
return bad_json_response('user not found in central database')
if urlparse(get_own_ip()).netloc == urlparse(friend_address).netloc:
if username != friend or request_db[3] != 1:
return bad_json_response('Friend undefined error')
else:
# Check if not the sender and if the username is allowed to
# accept the current request. If so, send the request to
# the other data server.
if request_db[3] == 1 or request_db[0] != username:
return bad_json_response(
'User sent the request him/herself or not authenticated'
)
data = {
'username': friend,
'friend': username,
'accept': accept
}
try:
response = requests.post(
friend_address + '/api/friend/request/accept',
data=data,
headers=request.headers
).json()
if not response['success']:
return bad_json_response(response['reason'])
except BaseException:
return bad_json_response('Friend error2')
# Update friendship in the data server's own database.
if int(accept) == 1:
friends.update({'accepted': 1}, id=request_id)
else:
friends.delete(id=request_id)
return good_json_response('Friend request accepted or declined')
@blueprint.route('/delete', methods=['POST'])
@jwt_required_custom
def delete():
"""Handles friend request on delete.
Returns:
JSON reponse with status of the request.
"""
username = get_jwt_identity()
friend = request.form['friend']
# Check if friendship exists.
if not friends.exists(username=username, friend=friend) \
and not friends.exists(username=friend, friend=username):
return bad_json_response('friendship does not exist')
# Get the friend's data server address and check if friend exists.
friend_address = get_user_ip(friend)
if not friend_address:
return bad_json_response('user not found in central database')
# Delete friendship in other data server.
if urlparse(get_own_ip()).netloc != urlparse(friend_address).netloc:
data = {
'username': friend,
'friend': username
}
try:
response = requests.post(
friend_address + '/api/friend/request/delete',
data=data,
headers=request.headers
).json()
if not response['success']:
return bad_json_response('Error while deleting1')
except BaseException:
return bad_json_response('Error while deleting2')
# Delete in this database.
friends.delete(username=username, friend=friend)
friends.delete(username=friend, friend=username)
return good_json_response('Friend deleted')
| 30.552846 | 79 | 0.653717 | 1,375 | 11,274 | 5.228364 | 0.122182 | 0.065099 | 0.060509 | 0.081792 | 0.657671 | 0.613159 | 0.576019 | 0.508972 | 0.433857 | 0.426624 | 0 | 0.004625 | 0.252084 | 11,274 | 368 | 80 | 30.63587 | 0.84796 | 0.219177 | 0 | 0.492611 | 0 | 0 | 0.151136 | 0.009131 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044335 | false | 0 | 0.039409 | 0 | 0.270936 | 0.049261 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc8de08f3ec7a3431962aa722a22c9cd5a0e0bbf | 16,013 | py | Python | test.py | DevRx28/pokemon-type | 2f62d4b88856dcd9aff79bdda993a4ddc093d7b7 | [
"Apache-2.0"
] | null | null | null | test.py | DevRx28/pokemon-type | 2f62d4b88856dcd9aff79bdda993a4ddc093d7b7 | [
"Apache-2.0"
] | null | null | null | test.py | DevRx28/pokemon-type | 2f62d4b88856dcd9aff79bdda993a4ddc093d7b7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.adapt import MLkNN
from keras.layers import Dense
from keras.models import Sequential
from keras.metrics import *
##########################################################
# Section 1 - Data Loading
##########################################################
# Getting feature data
finalData = np.array(pd.read_csv('D:/UIP/finaldata.csv', index_col='Name'))
biodata = finalData[:, 21:]
# Getting type data as dataframe for visualisations
pType = pd.read_csv('D:/UIP/primType.csv', index_col=0)
sType = pd.read_csv('D:/UIP/secondType.csv', index_col=0)
bTypes = pd.read_csv('D:/UIP/sparseTypes.csv', index_col=0)
# Getting features as numpy arrays for model inputs
primType = np.array(pType)
secType = np.array(sType)
bothTypes = np.array(bTypes)
# Get splitted data
Xtrain, Xtest, Ytrain, Ytest = train_test_split(finalData, bothTypes, test_size=0.2, random_state=12345)
XtrainPrim, XtestPrim, YtrainPrim, YtestPrim = train_test_split(finalData, primType, test_size=0.2, random_state=12345)
XtrainSec, XtestSec, YtrainSec, YtestSec = train_test_split(finalData, secType, test_size=0.2, random_state=12345)
# Get splitted biodata
XtrainBio, XtestBio, YtrainBio, YtestBio = train_test_split(biodata, bothTypes, test_size=0.2, random_state=12345)
XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio = train_test_split(biodata, primType, test_size=0.2, random_state=12345)
XtrainSecBio, XtestSecBio, YtrainSecBio, YtestSecBio = train_test_split(biodata, secType, test_size=0.2, random_state=12345)
##########################################################
# Section 2 - Data Visualisation
##########################################################
# Visualising class distribution for Pokemon type
def visualiseTypeDist(typeData, nat):
# Type Categories
categories = list(typeData.columns.values)
plt.figure(figsize=(15, 8))
ax = sns.barplot(categories, typeData.sum().values)
# Axis labels
if nat == 1:
plt.title("Distribution of Primary Pokemon Types", fontsize=14)
elif nat == 2:
plt.title("Distribution of Secondary Pokemon Types", fontsize=14)
else:
plt.title("Distribution of Pokemon Types (single and dual)", fontsize=14)
plt.ylabel('Pokemon of that Type', fontsize=14)
plt.xlabel('Pokemon Type', fontsize=14)
rects = ax.patches
labels = typeData.sum().values
# Print hist labels
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 1,
label, ha='center', va='bottom', fontsize=12)
plt.show()
visualiseTypeDist(pType, 1)
visualiseTypeDist(sType, 2)
visualiseTypeDist(bTypes, 0)
# Function to re-encode output of Neural Network into one-hot encoding
def reEncode(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# Setting epsilon for re-encoding multiple type predictions
epsilon = 0.03
# Function to re-encode output of Neural Network into multiple-hot encoding
def reEncodeMulti(predictions):
newOut = np.ndarray((len(predictions), len(predictions[0])))
for i in range(len(predictions)):
row = predictions[i]
m = max(row)
rowAlt = [e for e in row if e != m]
tx = max(rowAlt)
rowAltB = [e for e in rowAlt if e != tx]
tb = max(rowAltB)
for j in range(len(predictions[0])):
if row[j] == m:
newOut[i][j] = 1
elif row[j] == tx:
if (tx - tb) >= epsilon:
newOut[i][j] = 1
else:
newOut[i][j] = 0
return newOut
# ###############################################################
# # Section 3 - Multi-class classification for Type 1 of Pokemon
# ###############################################################
# Neural Network with Softmax + Categorical Crossentropy
def test_network(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# # Decision Tree - (Deprecated)
# def test_tree(Xtrain, Xtest, Ytrain, Ytest):
# # Setting tree parameters
# classifier = DecisionTreeClassifier(criterion='entropy', max_depth=10, random_state=12345)
# classifier.fit(Xtrain, Ytrain)
# # Accuracy Metrics and Predictions
# print('Accuracy Score for Decision Tree on training set: {:.2f}'.format(classifier.score(Xtrain, Ytrain)))
# print('Accuracy Score for Decision Tree on test set: {:.2f}'.format(classifier.score(Xtest, Ytest)))
# predictions = classifier.predict(Xtest)
# return predictions
# K-Nearest Neighbours for Multi-Class classification
def test_knn(Xtrain, Xtest, Ytrain, Ytest):
# Setting k = 3
classifier = KNeighborsClassifier(n_neighbors=3)
classifier.fit(Xtrain, Ytrain)
# Accuracy Metrics and Predictions
predictions = classifier.predict(Xtest)
score = classifier.score(Xtest, Ytest)
return predictions, score
# ######################################################################
# # Section 4 - Multi-class, Multi-label approach to Type classification
# ######################################################################
# Neural Network with Softmax + Binary Crossentropy
def test_network2(Xtrain, Xtest, Ytrain, Ytest):
model = Sequential()
feat = len(Xtrain[0])
# Hidden Layers
model.add(Dense(64, activation='relu', input_dim=feat))
# model.add(Dense(64, activation='relu'))
# Output layer with 18 nodes using Softmax activation (we have 18 Pokemon types)
model.add(Dense(18, activation='softmax'))
# Running the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=40, batch_size=32)
# Accuracy Metrics and Predictions
score = model.evaluate(Xtest, Ytest, batch_size=16)
predictions = model.predict(Xtest)
return predictions, score
# Multilabel k Nearest Neighbours (MLkNN)
def test_mlknn(Xtrain, Xtest, Ytrain, Ytest):
# Training the classfier and making predictions
classifier = MLkNN(k=1)
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
# Binary Relevance with Logistic Regression
def test_logistic(Xtrain, Xtest, Ytrain, Ytest):
# Setting parameters for Logistic Regression
reg = LogisticRegression(C = 1.0, solver='lbfgs', random_state=12345)
# Initialising the Binary Relevance Pipeline
classifier = BinaryRelevance(classifier=reg)
# Training the classfiers and making predictions
classifier.fit(Xtrain, Ytrain)
predictions = classifier.predict(Xtest)
# Measuring accuracy
scores = classifier.score(Xtest, Ytest)
loss = metrics.hamming_loss(Ytest, predictions)
return predictions, scores, loss
###############################################################
# Section 5 - Getting results from models
###############################################################
typeList = ['Normal', 'Fighting', 'Flying', 'Poison', 'Ground', 'Rock', 'Bug', 'Ghost',
'Steel', 'Fire', 'Water', 'Grass', 'Electric', 'Psychic', 'Ice', 'Dragon', 'Dark', 'Fairy']
pokemon = pd.read_csv('D:/UIP/testList.csv', header=0)['Name']
#### Section 5.1 - Predicting a Pokemon's primary type. First with bio + move data, then only biodata. ####
# Neural Network
primaryNet_predic, primaryNet_acc = test_network(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
pd.DataFrame(reEncode(primaryNet_predic), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrim.csv')
primaryNet_predicBio, primaryNet_accBio = test_network(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
pd.DataFrame(reEncode(primaryNet_predicBio), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsPrimWithoutMoves.csv')
# # Decision Tree
# primaryForest_predic = test_tree(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
# primaryForest_predicBio = test_tree(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
# K Nearest Neighbours
primaryKNN_predic, primaryKNN_acc = test_knn(XtrainPrim, XtestPrim, YtrainPrim, YtestPrim)
pd.DataFrame(primaryKNN_predic, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/KNNPredictionsPrim.csv')
primaryKNN_predicBio, primaryKNN_accBio = test_knn(XtrainPrimBio, XtestPrimBio, YtrainPrimBio, YtestPrimBio)
pd.DataFrame(primaryKNN_predicBio, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/KNNPredictionsPrimWithoutMoves.csv')
#### Section 5.2 - Predicting both types for Pokemon. First with bio + move data, then only biodata. ####
# Neural Network
primaryNet_predic2, primaryNet_acc2 = test_network2(Xtrain[:, :21], Xtest[:, :21], Ytrain, Ytest)
pd.DataFrame(reEncodeMulti(primaryNet_predic2), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictions.csv')
primaryNet_predicBio2, primaryNet_accBio2 = test_network2(XtrainBio, XtestBio, YtrainBio, YtestBio)
pd.DataFrame(reEncodeMulti(primaryNet_predicBio2), index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/NetPredictionsWithoutMoves.csv')
# # MLkNN
mlknn_pred, mlknn_acc, mlknn_hamloss = test_mlknn(Xtrain, Xtest, Ytrain, Ytest)
pd.DataFrame(mlknn_pred.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/MLKNNtPredictions.csv')
mlknn_predBio, mlknn_accBio, mlknn_hamlossBio = test_mlknn(XtrainBio, XtestBio, YtrainBio, YtestBio)
pd.DataFrame(mlknn_predBio.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/MLKNNtPredictionsWithoutMoves.csv')
# Binary Relevance - Logistic Regression
log_pred, log_acc, log_loss = test_logistic(Xtrain, Xtest, Ytrain, Ytest)
pd.DataFrame(log_pred.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/LogPredictions.csv')
log_predBio, log_accBio, log_lossBio = test_logistic(XtrainBio, XtestBio, YtrainBio, YtestBio)
pd.DataFrame(log_predBio.A, index=pokemon, columns=typeList).to_csv('D:/UIP/Pred/LogPredictionsWithoutBio.csv')
###############################################################
# Section 6 - Creating Confusion Matrices
###############################################################
# Type-list for primary type
typeListB = ['Normal', 'Fighting', 'Poison', 'Ground', 'Rock', 'Bug', 'Ghost', 'Steel', 'Fire',
'Water', 'Grass', 'Electric', 'Psychic', 'Ice', 'Dragon', 'Dark', 'Fairy', 'Flying']
# Creating class labels
ylabels = np.unique(YtestPrim.argmax(axis=1))
# Function to return confusion matrix
def getCMatrix(truth, predictions, typeListA, typeListB, primary):
cm = confusion_matrix(truth.argmax(axis=1), predictions.argmax(axis=1))
if primary == True:
cm = np.append(cm, np.zeros((17, 1), dtype=int), axis=1)
cm = np.append(cm, np.zeros((1, 18), dtype=int), axis=0)
cm_df = pd.DataFrame(cm, index=typeListB, columns=typeListB)
else:
cm_df = multilabel_confusion_matrix(truth, predictions)
return cm_df
# Function to plot confusion matrix for Primary types
def getVisualsPrim(data, typeList, Prim, Title):
plt.figure(figsize=(10, 8))
sns.heatmap(data, cmap='YlGnBu', annot=True, square=True, fmt="d")
if Prim == True:
plt.xticks(np.arange(0, 18), typeList, rotation=45)
plt.yticks(np.arange(0, 18), typeList, rotation=45)
plt.ylabel('True Label', fontsize=14)
plt.xlabel('Predicted Label', fontsize=14)
plt.title(Title)
plt.show()
# Function to plot confusion matrix for both types
def getVisuals(data, typeList):
for i in range(len(data)):
cm = pd.DataFrame(data[i], index=[0, 1])
plTitle = 'Confusion Matrix: {} Type'.format(typeList[i])
getVisualsPrim(cm, typeList, False, plTitle)
#### 6.1 - Confusion Matrices for Neural Network ######
# Recoding output to binary vector of length 18
neuralOutPrim = reEncode(primaryNet_predic)
neuralOut = reEncodeMulti(primaryNet_predic2)
neuralOut = np.where(neuralOut >= 0.5, 1, neuralOut)
# Repeating process for Neural Network without move data
neuralOutPrimBio = reEncode(primaryNet_predicBio)
neuralOutBio = reEncodeMulti(primaryNet_predicBio2)
neuralOutBio = np.where(neuralOutBio > 0.5, 1, 0)
# Getting confusion matrices
neuralPrimCM = getCMatrix(YtestPrim, neuralOutPrim, typeList, typeListB, True)
neuralCM = getCMatrix(Ytest, neuralOut, typeList, typeListB, False)
# Visualising Heatmaps of Confusion Matrices
getVisualsPrim(neuralPrimCM, typeListB, True, 'Confusion Matrix - Neural Network')
getVisuals(neuralCM, typeList)
#### 6.2 - Confusion Matrices for KNN and MLkNN ######
# Getting confusion matrices
knnCM = getCMatrix(YtestPrim, primaryKNN_predic, typeList, typeListB, True)
mlknnCM = getCMatrix(Ytest, mlknn_pred.A, typeList, typeListB, False)
# Visualising Heatmaps of Confusion Matrices
getVisualsPrim(knnCM, typeListB, True, 'Confusion Matrix - KNN')
getVisuals(mlknnCM, typeList)
###############################################################
# Section 7 - Getting accuracy measures
###############################################################
# Function to print relevant measures
def getMeasures(ytrue, ypred, name, type):
print("Printing accuracy measures for {} below:".format(name))
print('Precision Score = {}'.format(metrics.precision_score(ytrue, ypred, average='macro')))
print('Recall Score = {}'.format(metrics.recall_score(ytrue, ypred, average='macro')))
print('F1 Macro Score = {}'.format(metrics.f1_score(ytrue, ypred, average='macro')))
# if type == 1:
print('Accuracy Score = {}'.format(metrics.accuracy_score(ytrue, ypred)))
if type == 1:
C = top_k_categorical_accuracy(ytrue, ypred, k=2)
else:
C = top_k_categorical_accuracy(ytrue, ypred, k=3)
kscore = len([i for i in C if i == 1]) / len(C)
print('Top-K Categorical Accuracy = {}'.format(kscore))
print('Weighted F1 Score = {}'.format(metrics.f1_score(ytrue, ypred, average='weighted')))
# Printing the measures
getMeasures(YtestPrim, neuralOutPrim, 'NeuralNet', 1)
getMeasures(YtestPrim, neuralOutPrimBio, 'NeuralNet No Moves', 1)
getMeasures(YtestPrim, primaryKNN_predic, 'KNN', 1)
getMeasures(YtestPrim, primaryKNN_predicBio, 'KNN No Moves', 1)
getMeasures(Ytest, neuralOut, 'NeuralNet BothTypes', 1)
getMeasures(Ytest, neuralOutBio, 'NeuralNet BothTypes Bio', 2)
getMeasures(Ytest, mlknn_pred.A, 'MLkNN BothTypes', 1)
getMeasures(Ytest, mlknn_predBio.A, 'MLkNN BothTypes Bio', 1)
getMeasures(Ytest, log_pred.A, 'Logis BothTypes', 1)
getMeasures(Ytest, log_predBio.A, 'Logis BothTypes Bio', 1)
| 40.642132 | 136 | 0.684694 | 1,917 | 16,013 | 5.640063 | 0.208659 | 0.005549 | 0.009711 | 0.024972 | 0.380226 | 0.354606 | 0.29236 | 0.285886 | 0.236866 | 0.207825 | 0 | 0.01645 | 0.149628 | 16,013 | 393 | 137 | 40.745547 | 0.777557 | 0.209767 | 0 | 0.263158 | 0 | 0 | 0.119873 | 0.038356 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057416 | false | 0 | 0.07177 | 0 | 0.167464 | 0.033493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc8eeee2658476f6c7431a52b6b630a9943aba5d | 14,601 | py | Python | macauff/misc_functions.py | Onoddil/macauff | 6184b110811dfd8a3c0ccc39e660806b3b886eac | [
"BSD-3-Clause"
] | 5 | 2021-03-03T22:03:03.000Z | 2022-03-11T05:42:18.000Z | macauff/misc_functions.py | Onoddil/macauff | 6184b110811dfd8a3c0ccc39e660806b3b886eac | [
"BSD-3-Clause"
] | 8 | 2020-07-09T09:26:17.000Z | 2022-03-30T14:24:11.000Z | macauff/misc_functions.py | Onoddil/macauff | 6184b110811dfd8a3c0ccc39e660806b3b886eac | [
"BSD-3-Clause"
] | 1 | 2022-02-09T14:01:43.000Z | 2022-02-09T14:01:43.000Z | # Licensed under a 3-clause BSD style license - see LICENSE
'''
This module provides miscellaneous scripts, called in other parts of the cross-match
framework.
'''
import os
import operator
import numpy as np
__all__ = []
def create_auf_params_grid(auf_folder_path, auf_pointings, filt_names, array_name,
len_first_axis=None):
'''
Minor function to offload the creation of a 3-D or 4-D array from a series
of 2-D arrays.
Parameters
----------
auf_folder_path : string
Location of the top-level folder in which all fourier grids are saved.
auf_pointings : numpy.ndarray
Two-dimensional array with the sky coordinates of each pointing used
in the perturbation AUF component creation.
filt_names : list or numpy.ndarray
List of ordered filters for the given catalogue.
array_name : string
The name of the individually-saved arrays, one per sub-folder, to turn
into a 3-D or 4-D array.
len_first_axis : integer, optional
Length of the initial axis of the 4-D array. If not provided or is
``None``, final array is assumed to be 3-D instead.
'''
arraylengths = np.load('{}/arraylengths.npy'.format(auf_folder_path))
longestNm = np.amax(arraylengths)
if len_first_axis is None:
grid = np.lib.format.open_memmap('{}/{}_grid.npy'.format(
auf_folder_path, array_name), mode='w+', dtype=float, shape=(
longestNm, len(filt_names), len(auf_pointings)), fortran_order=True)
grid[:, :, :] = -1
else:
grid = np.lib.format.open_memmap('{}/{}_grid.npy'.format(
auf_folder_path, array_name), mode='w+', dtype=float, shape=(
len_first_axis, longestNm, len(filt_names), len(auf_pointings)), fortran_order=True)
grid[:, :, :, :] = -1
for j in range(0, len(auf_pointings)):
ax1, ax2 = auf_pointings[j]
for i in range(0, len(filt_names)):
filt = filt_names[i]
single_array = np.load('{}/{}/{}/{}/{}.npy'.format(auf_folder_path,
ax1, ax2, filt, array_name))
if len_first_axis is None:
grid[:arraylengths[i, j], i, j] = single_array
else:
grid[:, :arraylengths[i, j], i, j] = single_array
del arraylengths, longestNm, grid
def load_small_ref_auf_grid(modrefind, auf_folder_path, file_name_prefixes):
'''
Function to create reference index arrays out of larger arrays, based on
the mappings from the original reference index array into a larger grid,
such that the corresponding cutout reference index now maps onto the smaller
cutout 4-D array.
Parameters
----------
modrefind : numpy.ndarray
The reference index array that maps into saved array ``fourier_grid``
for each source in the given catalogue.
auf_folder_path : string
Location of the folder in which ``fourier_grid`` is stored.
file_name_prefixes : list
Prefixes of the files stored in ``auf_folder_path`` -- the parts before
"_grid" -- to be loaded as sub-arrays and returned.
Returns
-------
small_grids : list of numpy.ndarray
Small cutouts of ``*_grid`` files defined by ``file_name_prefixes``,
containing only the appropriate indices for AUF pointing, filter, etc.
modrefindsmall : numpy.ndarray
The corresponding mappings for each source onto ``fouriergrid``, such
that each source still points to the correct entry that it did in
``fourier_grid``.
'''
nmuniqueind, nmnewind = np.unique(modrefind[0, :], return_inverse=True)
filtuniqueind, filtnewind = np.unique(modrefind[1, :], return_inverse=True)
axuniqueind, axnewind = np.unique(modrefind[2, :], return_inverse=True)
small_grids = []
for name in file_name_prefixes:
if len(np.load('{}/{}_grid.npy'.format(auf_folder_path, name), mmap_mode='r').shape) == 4:
small_grids.append(np.asfortranarray(np.load('{}/{}_grid.npy'.format(
auf_folder_path, name), mmap_mode='r')[:, :, :, axuniqueind][
:, :, filtuniqueind, :][:, nmuniqueind, :, :]))
else:
small_grids.append(np.asfortranarray(np.load('{}/{}_grid.npy'.format(
auf_folder_path, name), mmap_mode='r')[:, :, axuniqueind][
:, filtuniqueind, :][nmuniqueind, :, :]))
modrefindsmall = np.empty((3, modrefind.shape[1]), int, order='F')
del modrefind
modrefindsmall[0, :] = nmnewind
modrefindsmall[1, :] = filtnewind
modrefindsmall[2, :] = axnewind
return small_grids, modrefindsmall
def hav_dist_constant_lat(x_lon, x_lat, lon):
'''
Computes the Haversine formula in the limit that sky separation is only
determined by longitudinal separation (i.e., delta-lat is zero).
Parameters
----------
x_lon : float
Sky coordinate of the source in question, in degrees.
x_lat : float
Orthogonal sky coordinate of the source, in degrees.
lon : float
Longitudinal sky coordinate to calculate the "horizontal" sky separation
of the source to.
Returns
-------
dist : float
Horizontal sky separation between source and given ``lon``, in degrees.
'''
dist = np.degrees(2 * np.arcsin(np.abs(np.cos(np.radians(x_lat)) *
np.sin(np.radians((x_lon - lon)/2)))))
return dist
def map_large_index_to_small_index(inds, length, folder):
inds_unique_flat = np.unique(inds[inds > -1])
map_array = np.lib.format.open_memmap('{}/map_array.npy'.format(folder), mode='w+', dtype=int,
shape=(length,))
map_array[:] = -1
map_array[inds_unique_flat] = np.arange(0, len(inds_unique_flat), dtype=int)
inds_map = np.asfortranarray(map_array[inds.flatten()].reshape(inds.shape))
os.system('rm {}/map_array.npy'.format(folder))
return inds_map, inds_unique_flat
def _load_single_sky_slice(folder_path, cat_name, ind, sky_inds):
'''
Function to, in a memmap-friendly way, return a sub-set of the nearest sky
indices of a given catalogue.
Parameters
----------
folder_path : string
Folder in which to store the temporary memmap file.
cat_name : string
String defining whether this function was called on catalogue "a" or "b".
ind : float
The value of the sky indices, as defined in ``distribute_sky_indices``,
to return a sub-set of the larger catalogue. This value represents
the index of a given on-sky position, used to construct the "counterpart"
and "field" likelihoods.
sky_inds : numpy.ndarray
The given catalogue's ``distribute_sky_indices`` values, to compare
with ``ind``.
Returns
-------
sky_cut : numpy.ndarray
A boolean array, indicating whether each element in ``sky_inds`` matches
``ind`` or not.
'''
sky_cut = np.lib.format.open_memmap('{}/{}_small_sky_slice.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len(sky_inds),))
di = max(1, len(sky_inds) // 20)
for i in range(0, len(sky_inds), di):
sky_cut[i:i+di] = sky_inds[i:i+di] == ind
return sky_cut
def _create_rectangular_slice_arrays(folder_path, cat_name, len_a):
'''
Create temporary sky slice memmap arrays for parts of the cross-match
process to use.
Parameters
----------
folder_path : string
Location of where to store memmap arrays.
cat_name : string
Unique indicator of which catalogue these arrays are for.
len_a : integer
The length of the catalogue in question, allowing for a one-to-one
mapping of sky slice per source.
'''
np.lib.format.open_memmap('{}/{}_temporary_sky_slice_1.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len_a,))
np.lib.format.open_memmap('{}/{}_temporary_sky_slice_2.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len_a,))
np.lib.format.open_memmap('{}/{}_temporary_sky_slice_3.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len_a,))
np.lib.format.open_memmap('{}/{}_temporary_sky_slice_4.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len_a,))
np.lib.format.open_memmap('{}/{}_temporary_sky_slice_combined.npy'.format(
folder_path, cat_name), mode='w+', dtype=bool, shape=(len_a,))
return
def _load_rectangular_slice(folder_path, cat_name, a, lon1, lon2, lat1, lat2, padding,
memmap_arrays):
'''
Loads all sources in a catalogue within a given separation of a rectangle
in sky coordinates, allowing for the search for all sources within a given
radius of sources inside the rectangle.
Parameters
----------
folder_path : string
Location of where the memmap files used in the slicing of the
catalogue are stored.
cat_name : string
Indication of whether we are loading catalogue "a" or catalogue "b",
for separation within a given folder.
a : numpy.ndarray
Full astrometric catalogue from which the subset of sources within
``padding`` distance of the sky rectangle are to be drawn.
lon1 : float
Lower limit on on-sky rectangle, in given sky coordinates, in degrees.
lon2 : float
Upper limit on sky region to slice sources from ``a``.
lat1 : float
Lower limit on second orthogonal sky coordinate defining rectangle.
lat2 : float
Upper sky rectangle coordinate of the second axis.
padding : float
The sky separation, in degrees, to find all sources within a distance
of in ``a``.
memmap_arrays : list of numpy.ndarray
The list of temporary arrays to use for memory-friendly sky coordinate
slicing.
Returns
-------
sky_cut : numpy.ndarray
Boolean array, indicating whether each source in ``a`` is within ``padding``
of the rectangle defined by ``lon1``, ``lon2``, ``lat1``, and ``lat2``.
'''
# Slice the memmapped catalogue, with a memmapped slicing array to
# preserve memory.
sky_cut_1, sky_cut_2, sky_cut_3, sky_cut_4, sky_cut = memmap_arrays
di = max(1, len(a) // 20)
# Iterate over each small slice of the larger array, checking for upper
# and lower longitude, then latitude, criterion matching.
for i in range(0, len(a), di):
_lon_cut(i, a, di, lon1, padding, sky_cut_1, operator.ge)
for i in range(0, len(a), di):
_lon_cut(i, a, di, lon2, padding, sky_cut_2, operator.le)
for i in range(0, len(a), di):
_lat_cut(i, a, di, lat1, padding, sky_cut_3, operator.ge)
for i in range(0, len(a), di):
_lat_cut(i, a, di, lat2, padding, sky_cut_4, operator.le)
for i in range(0, len(a), di):
sky_cut[i:i+di] = (sky_cut_1[i:i+di] & sky_cut_2[i:i+di] &
sky_cut_3[i:i+di] & sky_cut_4[i:i+di])
return sky_cut
def _lon_cut(i, a, di, lon, padding, sky_cut, inequality):
'''
Function to calculate the longitude inequality criterion for astrometric
sources relative to a rectangle defining boundary limits.
Parameters
----------
i : integer
Index into ``sky_cut`` for slicing.
a : numpy.ndarray
The main astrometric catalogue to be sliced, loaded into memmap.
di : integer
Index stride value, for slicing.
lon : float
Longitude at which to cut sources, either above or below, in degrees.
padding : float
Maximum allowed sky separation the "wrong" side of ``lon``, to allow
for an increase in sky box size to ensure all overlaps are caught in
``get_max_overlap`` or ``get_max_indices``.
sky_cut : numpy.ndarray
Array into which to store boolean flags for whether source meets the
sky position criterion.
inequality : ``operator.le`` or ``operator.ge``
Function to determine whether a source is either above or below the
given ``lon`` value.
'''
# To check whether a source should be included in this slice or not if the
# "padding" factor is non-zero, add an extra caveat to check whether
# Haversine great-circle distance is less than the padding factor. For
# constant latitude this reduces to
# r = 2 arcsin(|cos(lat) * sin(delta-lon/2)|).
# However, in both zero and non-zero padding factor cases, we always require
# the source to be above or below the longitude for sky_cut_1 and sky_cut_2
# in load_fourier_grid_cutouts, respectively.
if padding > 0:
sky_cut[i:i+di] = (hav_dist_constant_lat(a[i:i+di, 0], a[i:i+di, 1], lon) <=
padding) | inequality(a[i:i+di, 0], lon)
else:
sky_cut[i:i+di] = inequality(a[i:i+di, 0], lon)
def _lat_cut(i, a, di, lat, padding, sky_cut, inequality):
'''
Function to calculate the latitude inequality criterion for astrometric
sources relative to a rectangle defining boundary limits.
Parameters
----------
i : integer
Index into ``sky_cut`` for slicing.
a : numpy.ndarray
The main astrometric catalogue to be sliced, loaded into memmap.
di : integer
Index stride value, for slicing.
lat : float
Latitude at which to cut sources, either above or below, in degrees.
padding : float
Maximum allowed sky separation the "wrong" side of ``lat``, to allow
for an increase in sky box size to ensure all overlaps are caught in
``get_max_overlap`` or ``get_max_indices``.
sky_cut : numpy.ndarray
Array into which to store boolean flags for whether source meets the
sky position criterion.
inequality : ``operator.le`` or ``operator.ge``
Function to determine whether a source is either above or below the
given ``lat`` value.
'''
# The "padding" factor is easier to handle for constant longitude in the
# Haversine formula, being a straight comparison of delta-lat, and thus we
# can simply move the required latitude padding factor to within the
# latitude comparison.
if padding > 0:
if inequality is operator.le:
sky_cut[i:i+di] = inequality(a[i:i+di, 1] - padding, lat)
else:
sky_cut[i:i+di] = inequality(a[i:i+di, 1] + padding, lat)
else:
sky_cut[i:i+di] = inequality(a[i:i+di, 1], lat)
| 40.671309 | 98 | 0.645778 | 2,073 | 14,601 | 4.410034 | 0.177038 | 0.021658 | 0.008313 | 0.014767 | 0.380114 | 0.335594 | 0.322687 | 0.294137 | 0.273354 | 0.273354 | 0 | 0.008028 | 0.24923 | 14,601 | 358 | 99 | 40.784916 | 0.825944 | 0.531196 | 0 | 0.252252 | 0 | 0 | 0.057759 | 0.030772 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.027027 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc8f66ca7bdcea68264020e24b084175abe1349f | 4,812 | py | Python | users/views.py | amado-developer/ReadHub-RestfulAPI | 8d8b445c4a84810d52bbf78a2593e0b48351590c | [
"MIT"
] | null | null | null | users/views.py | amado-developer/ReadHub-RestfulAPI | 8d8b445c4a84810d52bbf78a2593e0b48351590c | [
"MIT"
] | 7 | 2021-03-19T03:09:53.000Z | 2022-01-13T02:48:44.000Z | users/views.py | amado-developer/ReadHub-RestfulAPI | 8d8b445c4a84810d52bbf78a2593e0b48351590c | [
"MIT"
] | null | null | null | from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes, parser_classes
from .serializers import UserSerializer
from rest_framework import permissions
from .models import User
from rest_framework.parsers import MultiPartParser, FormParser, FileUploadParser, JSONParser
from django.core.files.base import ContentFile
from permissions.services import APIPermissionClassFactory
from rest_framework.decorators import action
def is_logged(user, obj, request):
return user.email == obj.email
class UserViewset(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (
APIPermissionClassFactory(
name='UserPermission',
permission_configuration={
'base': {
'create': False,
'list': False,
},
'instance': {
'retrieve': is_logged,
'destroy': False,
'update': is_logged,
'add-to-balance': is_logged,
'get_user_data' : is_logged,
'upload_profile_picture': is_logged,
'add_to_balance': is_logged,
'sign_up': True,
}
}
),
)
@action(detail=True, url_path='add-to-balance', methods=['patch'])
def add_to_balance(self, request, pk=None):
user = self.get_object()
user.balance += float(request.data['quantity'])
user.save()
return Response({
'status': 'Balance Added'
})
@action(detail=True, url_path='get-data', methods=['get'])
def get_user_data(self, request, pk=None):
user = self.get_object()
return Response(UserSerializer(user).data)
@action(detail=True, url_path='upload-profile-picture', methods=['patch', 'put'])
def upload_profile_picture(self, request, pk):
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
profile_picture = request.data['profile_picture']
user.profile_picture = profile_picture
user.save()
# print(profile_picture)
return Response(str(profile_picture))
@action(detail=False, url_path = 'signup', methods = ['POST'])
def sign_up(self, request):
print(request)
usuario = User(
email=request.data['email'],
first_name=request.data['first_name'],
last_name=request.data['last_name'],
age = request.data['age'],
gender = request.data['gender'],
occupation = request.data['occupation'],
address_line_1 = request.data['address_line_1'],
address_line_2 = request.data['address_line_2'],
phone_number = request.data['phone_number'],
)
usuario.set_password(request.data['password'])
usuario.save()
return Response({
'status':'ok'
})
# import base64
# @api_view(['POST'])
# @permission_classes([permissions.AllowAny])
# def registration_view(request):
# if(request.method == 'POST'):
# serializer = UserSerializer(data=request.data)
# data = {}
# if serializer.is_valid():
# user = serializer.save()
# data['response'] = "Succesfully registered!"
# else:
# data = serializer.errors
# return Response(data)
# @api_view(['GET'])
# @permission_classes([permissions.IsAuthenticated])
# def users_view(request):
# if(request.method == 'GET'):
# users = User.objects.all()
# serializer = UserSerializer(users, many=True)
# return Response(serializer.data)
# @api_view(['GET'])
# @permission_classes([permissions.IsAuthenticated])
# def user_view(request, pk):
# try:
# user = User.objects.get(pk=pk)
# except User.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# if request.method == 'GET':
# serializer = UserSerializer(user)
# return Response(serializer.data)
# import base64
# @api_view(['PATCH', 'PUT'])
# @permission_classes([permissions.IsAuthenticated])
# @parser_classes([ JSONParser,FormParser, MultiPartParser])
# def upload_profile_picture(request, pk):
# try:
# user = User.objects.get(pk=pk)
# except User.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# profile_picture = request.data['profile_picture']
# user.profile_picture = profile_picture
# user.save()
# # print(profile_picture)
# return Response(str(profile_picture))
| 34.12766 | 92 | 0.618038 | 496 | 4,812 | 5.814516 | 0.239919 | 0.07767 | 0.035368 | 0.019764 | 0.339806 | 0.274965 | 0.274965 | 0.255548 | 0.231969 | 0.190361 | 0 | 0.004791 | 0.262677 | 4,812 | 140 | 93 | 34.371429 | 0.808061 | 0.325021 | 0 | 0.103896 | 0 | 0 | 0.105625 | 0.01375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0.012987 | 0.12987 | 0.012987 | 0.324675 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc95b4598062d65a094d734f7b5a69dd90fe43af | 178 | py | Python | 605.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | 1 | 2020-03-16T21:13:14.000Z | 2020-03-16T21:13:14.000Z | 605.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | null | null | null | 605.py | ssd352/quera-solutions | 7c7b572a26c3c1648f23528bcc661dec18b44943 | [
"MIT"
] | 2 | 2020-03-27T18:40:40.000Z | 2020-07-30T14:59:55.000Z | a = 1
b = 2
n = int(input())
if n == 1:
print(a)
elif n == 2:
print(b)
else:
for cnt in range(n - 2):
c = a + b
a = b
b = c
print(c)
| 11.866667 | 28 | 0.376404 | 33 | 178 | 2.030303 | 0.484848 | 0.059701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.466292 | 178 | 14 | 29 | 12.714286 | 0.652632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc98b86b73f809183ec9744a7d4b5d6852d550f9 | 10,207 | py | Python | kgcnn/ops/polynom.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | kgcnn/ops/polynom.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | kgcnn/ops/polynom.py | thegodone/gcnn_keras | 2009b9ab9a07c1a369849478812fcc2cb9799945 | [
"MIT"
] | null | null | null | import numpy as np
import scipy as sp
import scipy.special
import tensorflow as tf
from scipy.optimize import brentq
@tf.function
def tf_spherical_bessel_jn_explicit(x, n=0):
r"""Compute spherical bessel functions :math:`j_n(x)` for constant positive integer :math:`n` explicitly.
TensorFlow has to cache the function for each :math:`n`. No gradient through :math:`n` or very large number
of :math:`n`'s is possible.
The spherical bessel functions and there properties can be looked up at
https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions.
For this implementation the explicit expression from https://dlmf.nist.gov/10.49 has been used.
The definition is:
:math:`a_{k}(n+\tfrac{1}{2})=\begin{cases}\dfrac{(n+k)!}{2^{k}k!(n-k)!},&k=0,1,\dotsc,n\\
0,&k=n+1,n+2,\dotsc\end{cases}`
:math:`\mathsf{j}_{n}\left(z\right)=\sin\left(z-\tfrac{1}{2}n\pi\right)\sum_{k=0}^{\left\lfloor n/2\right\rfloor}
(-1)^{k}\frac{a_{2k}(n+\tfrac{1}{2})}{z^{2k+1}}+\cos\left(z-\tfrac{1}{2}n\pi\right)
\sum_{k=0}^{\left\lfloor(n-1)/2\right\rfloor}(-1)^{k}\frac{a_{2k+1}(n+\tfrac{1}{2})}{z^{2k+2}}.`
Args:
x (tf.Tensor): Values to compute :math:`j_n(x)` for.
n (int): Positive integer for the bessel order :math:`n`.
Returns:
tf.Tensor: Spherical bessel function of order :math:`n`
"""
sin_x = tf.sin(x - n * np.pi / 2)
cos_x = tf.cos(x - n * np.pi / 2)
sum_sin = tf.zeros_like(x)
sum_cos = tf.zeros_like(x)
for k in range(int(np.floor(n / 2)) + 1):
if 2 * k < n + 1:
prefactor = float(sp.special.factorial(n + 2 * k) / np.power(2, 2 * k) / sp.special.factorial(
2 * k) / sp.special.factorial(n - 2 * k) * np.power(-1, k))
sum_sin += prefactor*tf.pow(x, - (2*k+1))
for k in range(int(np.floor((n - 1) / 2)) + 1):
if 2 * k + 1 < n + 1:
prefactor = float(sp.special.factorial(n + 2 * k + 1) / np.power(2, 2 * k + 1) / sp.special.factorial(
2 * k + 1) / sp.special.factorial(n - 2 * k - 1) * np.power(-1, k))
sum_cos += prefactor * tf.pow(x, - (2 * k + 2))
return sum_sin*sin_x + sum_cos*cos_x
@tf.function
def tf_spherical_bessel_jn(x, n=0):
r"""Compute spherical bessel functions :math:`j_n(x)` for constant positive integer :math:`n` via recursion.
TensorFlow has to cache the function for each :math:`n`. No gradient through :math:`n` or very large number
of :math:`n` is possible.
The spherical bessel functions and there properties can be looked up at
https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions.
The recursive rule is constructed from https://dlmf.nist.gov/10.51. The recursive definition is:
:math:`j_{n+1}(z)=((2n+1)/z)j_{n}(z)-j_{n-1}(z)`
:math:`j_{0}(x)=\frac{\sin x}{x}`
:math:`j_{1}(x)=\frac{1}{x}\frac{\sin x}{x} - \frac{\cos x}{x}`
:math:`j_{2}(x)=\left(\frac{3}{x^{2}} - 1\right)\frac{\sin x}{x} - \frac{3}{x}\frac{\cos x}{x}`
Args:
x (tf.Tensor): Values to compute :math:`j_n(x)` for.
n (int): Positive integer for the bessel order :math:`n`.
Returns:
tf.tensor: Spherical bessel function of order :math:`n`
"""
if n < 0:
raise ValueError("Order parameter must be >= 0 for this implementation of spherical bessel function.")
if n == 0:
return tf.sin(x) / x
elif n == 1:
return tf.sin(x) / tf.square(x) - tf.cos(x) / x
else:
j_n = tf.sin(x) / x
j_nn = tf.sin(x) / tf.square(x) - tf.cos(x) / x
for i in range(1, n):
temp = j_nn
j_nn = (2 * i + 1) / x * j_nn - j_n
j_n = temp
return j_nn
@tf.function
def tf_legendre_polynomial_pn(x, n=0):
r"""Compute the (non-associated) Legendre polynomial :math:`P_n(x)` for constant positive integer :math:`n`
via explicit formula.
TensorFlow has to cache the function for each :math:`n`. No gradient through :math:`n` or very large number
of :math:`n` is possible.
Closed form can be viewed at https://en.wikipedia.org/wiki/Legendre_polynomials.
:math:`P_n(x)=\sum_{k=0}^{\lfloor n/2\rfloor} (-1)^k \frac{(2n - 2k)! \, }{(n-k)! \, (n-2k)! \, k! \, 2^n} x^{n-2k}`
Args:
x (tf.Tensor): Values to compute :math:`P_n(x)` for.
n (int): Positive integer for :math:`n` in :math:`P_n(x)`.
Returns:
tf.tensor: Legendre Polynomial of order :math:`n`.
"""
out_sum = tf.zeros_like(x)
prefactors = [
float((-1) ** k * sp.special.factorial(2 * n - 2 * k) / sp.special.factorial(n - k) / sp.special.factorial(
n - 2 * k) / sp.special.factorial(k) / 2 ** n) for k in range(0, int(np.floor(n / 2)) + 1)]
powers = [float(n - 2 * k) for k in range(0, int(np.floor(n / 2)) + 1)]
for i in range(len(powers)):
out_sum = out_sum + prefactors[i] * tf.pow(x, powers[i])
return out_sum
@tf.function
def tf_spherical_harmonics_yl(theta, l=0):
r"""Compute the spherical harmonics :math:`Y_{ml}(\cos\theta)` for :math:`m=0` and constant non-integer :math:`l`.
TensorFlow has to cache the function for each :math:`l`. No gradient through :math:`l` or very large number
of :math:`n` is possible. Uses a simplified formula with :math:`m=0` from
https://en.wikipedia.org/wiki/Spherical_harmonics:
:math:`Y_{l}^{m}(\theta ,\phi)=\sqrt{\frac{(2l+1)}{4\pi} \frac{(l -m)!}{(l +m)!}} \, P_{l}^{m}(\cos{\theta }) \,
e^{i m \phi}`
where the associated Legendre polynomial simplifies to :math:`P_l(x)` for :math:`m=0`:
:math:`P_n(x)=\sum_{k=0}^{\lfloor n/2\rfloor} (-1)^k \frac{(2n - 2k)! \, }{(n-k)! \, (n-2k)! \, k! \, 2^n} x^{n-2k}`
Args:
theta (tf.Tensor): Values to compute :math:`Y_l(\cos\theta)` for.
l (int): Positive integer for :math:`l` in :math:`Y_l(\cos\theta)`.
Returns:
tf.tensor: Spherical harmonics for :math:`m=0` and constant non-integer :math:`l`.
"""
x = tf.cos(theta)
out_sum = tf.zeros_like(x)
prefactors = [
float((-1) ** k * sp.special.factorial(2 * l - 2 * k) / sp.special.factorial(l - k) / sp.special.factorial(
l - 2 * k) / sp.special.factorial(k) / 2 ** l) for k in range(0, int(np.floor(l / 2)) + 1)]
powers = [float(l - 2 * k) for k in range(0, int(np.floor(l / 2)) + 1)]
for i in range(len(powers)):
out_sum = out_sum + prefactors[i] * tf.pow(x, powers[i])
out_sum = out_sum * float(np.sqrt((2 * l + 1) / 4 / np.pi))
return out_sum
@tf.function
def tf_associated_legendre_polynomial(x, l=0, m=0):
r"""Compute the associated Legendre polynomial :math:`P_{l}^{m}(x)` for :math:`m` and constant positive
integer :math:`l` via explicit formula.
Closed Form from taken from https://en.wikipedia.org/wiki/Associated_Legendre_polynomials.
:math:`P_{l}^{m}(x)=(-1)^{m}\cdot 2^{l}\cdot (1-x^{2})^{m/2}\cdot \sum_{k=m}^{l}\frac{k!}{(k-m)!}\cdot x^{k-m}
\cdot \binom{l}{k}\binom{\frac{l+k-1}{2}}{l}`.
Args:
x (tf.Tensor): Values to compute :math:`P_{l}^{m}(x)` for.
l (int): Positive integer for :math:`l` in :math:`P_{l}^{m}(x)`.
m (int): Positive/Negative integer for :math:`m` in :math:`P_{l}^{m}(x)`.
Returns:
tf.tensor: Legendre Polynomial of order n.
"""
if np.abs(m)>l:
raise ValueError("Error: Legendre polynomial must have -l<= m <= l")
if l<0:
raise ValueError("Error: Legendre polynomial must have l>=0")
if m < 0:
m = -m
neg_m = float(np.power(-1,m) * sp.special.factorial(l-m)/sp.special.factorial(l+m))
else:
neg_m = 1
x_prefactor = tf.pow(1 - tf.square(x), m/2) * float(np.power(-1,m) * np.power(2,l))
sum_out = tf.zeros_like(x)
for k in range(m, l+1):
sum_out += tf.pow(x, k-m) * float(sp.special.factorial(k)/sp.special.factorial(k-m)*sp.special.binom(l,k)*
sp.special.binom((l+k-1)/2,l))
return sum_out*x_prefactor*neg_m
def spherical_bessel_jn(r, n):
r"""Compute spherical Bessel function :math:`j_n(r)` via scipy.
The spherical bessel functions and there properties can be looked up at
https://en.wikipedia.org/wiki/Bessel_function#Spherical_Bessel_functions .
Args:
r (np.ndarray): Argument
n (np.ndarray): Order.
Returns:
np.array: Values of the spherical Bessel function
"""
return np.sqrt(np.pi / (2 * r)) * sp.special.jv(n + 0.5, r)
def spherical_bessel_jn_zeros(n, k):
r"""Compute the first :math:`k` zeros of the spherical bessel functions :math:`j_n(r)` up to
order :math:`n` (excluded).
Taken from the original implementation of DimeNet at https://github.com/klicperajo/dimenet.
Args:
n: Order.
k: Number of zero crossings.
Returns:
np.ndarray: List of zero crossings of shape (n, k)
"""
zerosj = np.zeros((n, k), dtype="float32")
zerosj[0] = np.arange(1, k + 1) * np.pi
points = np.arange(1, k + n) * np.pi
racines = np.zeros(k + n - 1, dtype="float32")
for i in range(1, n):
for j in range(k + n - 1 - i):
foo = brentq(spherical_bessel_jn, points[j], points[j + 1], (i,))
racines[j] = foo
points = racines
zerosj[i][:k] = racines[:k]
return zerosj
def spherical_bessel_jn_normalization_prefactor(n, k):
r"""Compute the normalization or rescaling pre-factor for the spherical bessel functions :math:`j_n(r)` up to
order :math:`n` (excluded) and maximum frequency :math:`k` (excluded).
Taken from the original implementation of DimeNet at https://github.com/klicperajo/dimenet.
Args:
n: Order.
k: frequency.
Returns:
np.ndarray: Normalization of shape (n, k)
"""
zeros = spherical_bessel_jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [0.5 * spherical_bessel_jn(zeros[order, i], order + 1) ** 2]
normalizer_tmp = 1 / np.array(normalizer_tmp) ** 0.5
normalizer += [normalizer_tmp]
return np.array(normalizer)
| 41.323887 | 120 | 0.597335 | 1,712 | 10,207 | 3.485981 | 0.116822 | 0.057808 | 0.05429 | 0.03502 | 0.610757 | 0.551776 | 0.481568 | 0.450905 | 0.390751 | 0.354893 | 0 | 0.02358 | 0.227197 | 10,207 | 246 | 121 | 41.49187 | 0.733012 | 0.519545 | 0 | 0.174312 | 0 | 0 | 0.041194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073395 | false | 0 | 0.045872 | 0 | 0.211009 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc98e7eab553c8191e2a184a674a6f4d922eda38 | 1,915 | py | Python | anibot/plugins/watch.py | F36/anibot | a3f5f835cdffbbc49264c98815c560bd5bc8f95a | [
"MIT"
] | null | null | null | anibot/plugins/watch.py | F36/anibot | a3f5f835cdffbbc49264c98815c560bd5bc8f95a | [
"MIT"
] | null | null | null | anibot/plugins/watch.py | F36/anibot | a3f5f835cdffbbc49264c98815c560bd5bc8f95a | [
"MIT"
] | 1 | 2021-06-12T02:47:39.000Z | 2021-06-12T02:47:39.000Z | # credits to @NotThatMF on telegram for chiaki fast api
# well i also borrowed the base code from him
from pyrogram import Client, filters
from pyrogram.types import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, Message
from .. import BOT_NAME, HELP_DICT, TRIGGERS as trg
from ..utils.data_parser import get_wo, get_wols
from ..utils.helper import check_user
@Client.on_message(filters.command(["watch", f"watch{BOT_NAME}"], prefixes=trg))
async def get_watch_order(client, message: Message):
"""Get List of Scheduled Anime"""
x = message.text.split(" ", 1)[1]
user = message.from_user.id
data = get_wols(x)
msg = f"Found related animes for the query {x}"
buttons = []
for i in data:
buttons.append([InlineKeyboardButton(str(i[1]), callback_data=f"watch_{i[0]}_{x}_{user}")])
await message.reply_text(msg, reply_markup=InlineKeyboardMarkup(buttons))
@Client.on_callback_query(filters.regex(pattern=r"watch_(.*)"))
@check_user
async def watch_(client, cq: CallbackQuery):
kek, id_, qry, user = cq.data.split("_")
msg = get_wo(int(id_))
buttons = [[InlineKeyboardButton("Back", callback_data=f"wol_{qry}_{user}")]]
await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(buttons))
@Client.on_callback_query(filters.regex(pattern=r"wol_(.*)"))
@check_user
async def wls(client, cq: CallbackQuery):
kek, qry, user = cq.data.split("_")
data = get_wols(qry)
msg = f"Found related animes for the query {qry}"
buttons = []
for i in data:
buttons.append([InlineKeyboardButton(str(i[1]), callback_data=f"watch_{i[0]}_{qry}_{user}")])
await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(buttons))
HELP_DICT["watch"] = """Use /watch cmd to get watch order of searched anime
**Usage:**
`/watch Detective Conan`
`!watch Naruto`""" | 39.081633 | 102 | 0.69295 | 264 | 1,915 | 4.848485 | 0.344697 | 0.021875 | 0.030469 | 0.042188 | 0.405469 | 0.377344 | 0.377344 | 0.377344 | 0.325781 | 0.325781 | 0 | 0.00379 | 0.173368 | 1,915 | 49 | 103 | 39.081633 | 0.804801 | 0.050653 | 0 | 0.222222 | 0 | 0 | 0.179827 | 0.027666 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.138889 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc99288a26beb12365c80c46e22562c3688eac02 | 814 | py | Python | mean_bpm_bme590hrm.py | clairenied15/bme590hrm | 29545b68c6d1dbb6861783d7c0c392bd0bdd1dd0 | [
"Apache-2.0"
] | null | null | null | mean_bpm_bme590hrm.py | clairenied15/bme590hrm | 29545b68c6d1dbb6861783d7c0c392bd0bdd1dd0 | [
"Apache-2.0"
] | 9 | 2018-10-17T19:54:42.000Z | 2018-10-28T21:12:14.000Z | mean_bpm_bme590hrm.py | clairenied15/bme590hrm | 29545b68c6d1dbb6861783d7c0c392bd0bdd1dd0 | [
"Apache-2.0"
] | null | null | null | def mean_bpm(num_beats, duration, inmin=None):
"""Find the average heart rate (in bpm) for a given ECG signal
Args:
num_beats: number of detected heart beats in an ECG strip
duration: the duration of the ECG signal (in seconds)
Returns:
bpm: average heart rate in beats per minute
"""
if inmin is None:
inmin = input("Input number of minutes ")
print(type(inmin))
# if inmin.isalpha():
if type(inmin) is not int and type(inmin) is not float:
raise TypeError("Input must be a number")
inmin = float(inmin)
sec = inmin * 60
ratio = sec/duration
nbeats = num_beats * ratio
dur = duration * ratio
bps = nbeats/dur
mean_hr_bpm = bps*60
# print(type(mean_hr_bpm))
return mean_hr_bpm
| 30.148148 | 69 | 0.619165 | 118 | 814 | 4.186441 | 0.440678 | 0.048583 | 0.054656 | 0.072874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00703 | 0.300983 | 814 | 26 | 70 | 31.307692 | 0.86116 | 0.356265 | 0 | 0 | 0 | 0 | 0.102784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc99fdbb35da22057482dd17c0fafbc6e7d140c9 | 20,627 | py | Python | falcon_kit/run_support.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | null | null | null | falcon_kit/run_support.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | null | null | null | falcon_kit/run_support.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | 5 | 2020-07-22T14:10:16.000Z | 2021-04-26T17:07:05.000Z | from . import bash, functional
from .functional import cfg_tobool
from .io import NativeIO
from .util.system import (make_fofn_abs, make_dirs, cd)
import json
import logging
import logging.config
import os
import re
import io
import sys
import tempfile
import time
import uuid
logger = logging.getLogger(__name__)
from configparser import ConfigParser
def _prepend_env_paths(content, names):
"""
E.g.
names = ['PATH', 'PYTYHONPATH']
content =
echo hi
=>
export PATH=current:path:${PATH}
export PYTHON=current:path:${PYTHONPATH}
echo hi
"""
export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' % dict(
k=name, v=os.environ.get(name, '')) for name in names]
return '\n'.join(export_env_vars + [content])
def update_env_in_script(fn, names):
"""Modify fn using on prepend_env_paths().
"""
with open(fn) as ifs:
content = ifs.read()
content = _prepend_env_paths(content, names)
with open(fn, 'w') as ofs:
ofs.write(content)
def use_tmpdir_for_files(basenames, src_dir, link_dir):
"""NOT USED. Kept only for reference. This will be done in pypeFLOW.
Generate script to copy db files to tmpdir (for speed).
- Choose tmp_dir, based on src_dir name.
- rsync basenames into tmp_dir # after 'flock', per file
- symlink from link_dir into tmp_dir.
Return list of script lines, sans linefeed.
"""
script = list()
unique = os.path.abspath(src_dir).replace('/', '_')
root = tempfile.gettempdir()
tmp_dir = os.path.join(root, 'falcon', unique)
script.append('mkdir -p %s' % tmp_dir)
for basename in basenames:
src = os.path.join(src_dir, basename)
dst = os.path.join(tmp_dir, basename)
rm_cmd = 'rm -f %s' % basename
# Wait on lock for up to 10 minutes, in case of very large files.
rsync_cmd = "flock -w 600 %s.lock -c 'rsync -av %s %s'" % (
dst, src, dst)
ln_cmd = 'ln -sf %s %s' % (dst, basename)
script.extend([rm_cmd, rsync_cmd, ln_cmd])
return script
def make_job_data(url, script_fn):
"""Choose defaults.
Run in same directory as script_fn.
Base job_name on script_fn.
"""
wd = os.path.dirname(script_fn)
job_name = '{0}-{1}-{2}'.format(
os.path.basename(script_fn),
url.split("/")[-1],
str(uuid.uuid4())[:8],
)
job_data = {"job_name": job_name,
"cwd": wd,
"script_fn": script_fn}
return job_data
def check_HPCdaligner_option(option):
msg = ''
if '-dal' in option:
msg += 'HPC.daligner option "-dal" has changed to "-B".\n'
if '-deg' in option:
msg += 'HPC.daligner option "-deg" has changed to "-D".\n'
if '-D' in option:
msg += 'HPC.daligner option "-D" is no longer valid.\n'
if msg:
raise Exception(msg)
def clean_falcon_options(fc):
"""Update some values in fc.
Replace _ with - in a couple places.
"""
keys = ('falcon_sense_option', 'overlap_filtering_setting', 'fc_ovlp_to_graph_option',
)
for key in keys:
update_dash_flags(fc, key)
for dk in ('pa_HPCdaligner_option', 'ovlp_HPCdaligner_option'):
if dk in fc:
check_HPCdaligner_option(fc[dk])
def get_config(config):
"""
This is only for the call from pbsmrtpipe:
upport.get_config(support.parse_config(fn))
We have changed parse_config() to return a dict.
So this is a no-op.
"""
cfg = dict(config) # already a dict now
return cfg
def dict2config(jdict, section):
config = ConfigParser()
if not config.has_section(section):
config.add_section(section)
for (k, v) in jdict.items():
config.set(section, k, str(v))
return config
def parse_config(config_fn):
"""Deprecated.
Called from pbsmrtpipe, for now.
"""
return parse_cfg_file(config_fn)
def parse_cfg_file(config_fn):
"""Return as dict.
"""
with open(config_fn) as stream:
ext = os.path.splitext(config_fn)[1]
if ext in ('.json', '.js'):
config = json.loads(stream.read())
else:
# Parse sections (and case-sensitively), into sub-dicts.
config = parse_cfg_with_sections(stream)
update_defaults(config['General'])
# Copy General section to top, for now.
#for key, val in config['General'].items():
# config[key] = val
##cfg.update(config.get('General', {}))
check_config_sections(config) # Ensure that the right sections exist.
update_job_sections(config)
return config
def process_job_defaults(job_defaults):
key = 'use_tmpdir'
use_tmpdir = job_defaults.get(key, '')
if '/' in use_tmpdir:
tempfile.tempdir = use_tmpdir
os.environ['TMPDIR'] = use_tmpdir
else:
if use_tmpdir.lower().startswith('t'):
use_tmpdir = tempfile.gettempdir()
else:
use_tmpdir = False
job_defaults[key] = use_tmpdir
def update_job_defaults_section(config):
"""For backwards compatibility with stuff from 'General' section.
"""
General = config['General']
job_defaults = config['job.defaults']
if 'njobs' in General:
logger.warning('"njobs" belongs in the [job.defaults] section.')
if 'pwatcher_type' in General:
logger.warning('Please specify "pwatcher_type" only in the [job.defaults] section, not in [General].')
if 'job_type' in General:
logger.warning('Please specify "job_type" only in the [job.defaults] section, not in [General].')
if 'stop_all_jobs_on_failure' in General:
logger.warning('Please specify "stop_all_jobs_on_failure" only in the [job.defaults] section, not in [General].')
if 'use_tmpdir' in General:
logger.warning('Please specify "use_tmpdir" only in the [job.defaults] section, not in [General].')
if 'job_name_style' in General:
logger.warning('Please specify "job_name_style" only in the [job.defaults] section, not in [General].')
if 'job_queue' in General:
logger.warning('Please specify "JOB_QUEUE" only in the [job.defaults] section, not as "job_queue" in [General].')
if 'sge_option' in General:
logger.warning('Please specify "JOB_OPTS" in the [job.defaults] section, not as "sge_option" in [General].')
pwatcher_type = General.get('pwatcher_type', 'fs_based') #, config.get('pwatcher_type')))
job_type = job_defaults.get('job_type', General.get('job_type', '')).lower()
job_queue = General.get('job_queue', '')
sge_option = General.get('sge_option', '')
if 'pwatcher_type' not in job_defaults:
job_defaults['pwatcher_type'] = pwatcher_type
else:
pwatcher_type = job_defaults['pwatcher_type']
if 'submit' not in config['job.defaults']:
if 'blocking' == pwatcher_type:
if not job_queue or ' ' not in job_queue:
raise Exception('pwatcher_type=blocking, but "submit" is not in [job.defaults] section.')
config['job.defaults']['submit'] = job_queue
logger.warning('Please set "submit" in [job.defaults] section. (For now, we will use "job_queue" from [General], which was a hack.)')
elif 'fs_based' == pwatcher_type or 'network_based' == pwatcher_type:
if not job_type:
logger.error('job.defaults.submit is not set; pwatcher_type={}; but job_type is not set. Maybe try "job_type=local" first.'.format(pwatcher_type))
job_type = 'local'
job_defaults['job_type'] = job_type
allowed_job_types = ['sge', 'pbs', 'torque', 'slurm', 'lsf', 'local']
assert job_type in allowed_job_types, 'job_type={} not in {}'.format(
job_type, allowed_job_types)
if job_queue and 'JOB_QUEUE' not in config['job.defaults']:
job_defaults['JOB_QUEUE'] = job_queue
else:
raise Exception('Unknown pwatcher_type={}'.format(pwatcher_type))
#assert 'submit' in config['job.defaults'], repr(config)
if sge_option and 'JOB_OPTS' not in config['job.defaults']:
job_defaults['JOB_OPTS'] = sge_option
if 'njobs' not in job_defaults:
config['job.defaults']['njobs'] = int(General.get('default_concurrent_jobs', 8)) # GLOBAL DEFAULT CONCURRENCY
msg = 'Please supply a default for "njobs" (aka concurrency) in section [job.defaults]. For now, we will use {}'.format(
config['job.defaults']['njobs'])
logger.warning(msg)
def update_if_if(key):
if key not in job_defaults:
if key in General:
job_defaults[key] = General[key]
logger.warning('Found "{}" from [General] section; should be in [job.defaults] instead.'.format(key))
update_if_if('job_name_style')
update_if_if('stop_all_jobs_on_failure')
update_if_if('use_tmpdir')
legacy_names = [
'pwatcher_type', 'pwatcher_directory',
'job_type', 'job_queue', 'job_name_style',
'use_tmpdir',
]
def update_if_missing(name, sub_dict):
if General.get(name) and name not in sub_dict:
sub_dict[name] = General[name]
for name in legacy_names:
update_if_missing(name, config['job.defaults'])
process_job_defaults(job_defaults)
def update_job_sections(config):
"""More for backwards compatibility with stuff from 'General' section.
"""
update_job_defaults_section(config)
General = config['General']
# Update a few where the names change and the section is non-default.
def update_step_job_opts(name):
if General.get('sge_option_'+name) and 'JOB_OPTS' not in config['job.step.'+name]:
config['job.step.'+name]['JOB_OPTS'] = General['sge_option_'+name]
def update_step_njobs(name):
if General.get(name+'_concurrent_jobs') and 'njobs' not in config['job.step.'+name]:
config['job.step.'+name]['njobs'] = int(General[name+'_concurrent_jobs'])
for name in ['bd', 'da', 'la', 'pda', 'pla', 'cns', 'fc', 'asm']:
update_step_job_opts(name)
update_step_njobs(name)
# Prefer 'asm' to 'fc'.
asm = dict(config['job.step.asm'])
config['job.step.asm'] = config['job.step.fc']
del config['job.step.fc']
config['job.step.asm'].update(asm)
def parse_cfg_with_sections(stream):
"""Return as dict of dict of ...
"""
#Experimental:
"""
ConfigParser sections become sub-sub sections when separated by dots.
[foo.bar]
baz = 42
is equivalent to JSON
{"foo": {"bar": {"baz": 42}}}
"""
content = stream.read()
result = dict()
try:
jdict = json.loads(NativeIO(content).read())
return jdict
except ValueError:
pass #logger.exception('Could not parse stream as JSON.')
try:
config = ConfigParser(strict=False)
config.optionxform = str
config.read_file(NativeIO(content))
sections = config.sections()
for sec in sections:
result[sec] = dict(config.items(sec))
return result
except:
raise
def check_config_sections(cfg):
"""And ensure these all exist.
"""
allowed_sections = set(['General',
'job.step.dust',
'job.step.da', 'job.step.pda',
'job.step.la', 'job.step.pla',
'job.step.cns', 'job.step.fc',
'job.step.asm',
'job.defaults',
])
all_sections = set(k for k,v in list(cfg.items()) if isinstance(v, dict))
unexpected = all_sections - allowed_sections
if unexpected:
msg = 'You have {} unexpected cfg sections: {}'.format(
len(unexpected), unexpected)
raise Exception(msg)
# Guarantee they all exist.
for sec in allowed_sections:
if sec not in cfg:
cfg[sec] = dict()
def update_dash_flags(cfg, key):
if key not in cfg:
return
val = cfg[key]
cfg[key] = new_val = functional.dash_flags(cfg[key])
if val != new_val:
msg = '''\
Option contains flags with "_":
"{key}={val}". Those should be "-", as in
"{key}={new_val}". Auto-replaced.'''.format(**locals())
logger.warning(msg)
TEXT_FILE_BUSY = 'avoid_text_file_busy'
def update_defaults(cfg):
"""cfg is probably the General sub-dict.
"""
def set_default(key, val):
if key not in cfg:
cfg[key] = val
set_default('input_type', 'raw')
set_default('overlap_filtering_setting', '--max-diff 1000 --max-cov 1000 --min-cov 2')
#set_default('pa_daligner_option', '-e.70 -s100 -t16') # TODO: -t is a dumb default
#set_default('ovlp_daligner_option', '-e.96 -s1000 -h60 -t32') # TODO: -t is a dumb default
set_default('pa_HPCdaligner_option', '-v')
set_default('ovlp_HPCdaligner_option', '-v -l500')
set_default('pa_HPCTANmask_option', '-l500') # daligner defaults to -l1000
#set_default('ovlp_HPCTANmask_option', '-l500')
set_default('pa_REPmask_code', '0,300/0,300/0,300')
set_default('pa_DBsplit_option', '-x500 -s200 -a')
set_default('skip_checks', False)
set_default('pa_DBdust_option', '') # Gene recommends the defaults. I have tried -w128 -t2.5 -m20
set_default('pa_fasta_filter_option', 'streamed-internal-median')
set_default('pa_subsample_coverage', 0)
set_default('pa_subsample_strategy', 'random')
set_default('pa_subsample_random_seed', 12345)
set_default('dazcon', False)
set_default('pa_dazcon_option', '-j 4 -x -l 500')
set_default('ovlp_DBdust_option', '')
set_default('ovlp_DBsplit_option', '-x500 -s200 -a')
set_default('falcon_sense_option', '--output-multi --min-idt 0.70 --min-cov 2 --max-n-read 1800')
set_default('falcon_sense_skip_contained', False)
set_default('falcon_sense_greedy', False)
set_default('LA4Falcon_preload', '')
set_default('fc_ovlp_to_graph_option', '')
set_default('genome_size', 0)
set_default('seed_coverage', 20)
set_default('length_cutoff', -1)
set_default('length_cutoff_pr', 0)
set_default('bestn', 12)
set_default('target', 'assembly')
set_default(TEXT_FILE_BUSY, bash.BUG_avoid_Text_file_busy)
for bool_key in ('skip_checks', 'dazcon', 'falcon_sense_skip_contained', 'falcon_sense_greedy', 'LA4Falcon_preload', TEXT_FILE_BUSY):
cfg[bool_key] = functional.cfg_tobool(cfg.get(bool_key, False))
if 'dust' in cfg:
logger.warning(
"The 'dust' option is deprecated and ignored. We always run DBdust now. Use ovlp_/pa_DBdust_option to override DBdust default arguments.")
bash.BUG_avoid_Text_file_busy = cfg[TEXT_FILE_BUSY]
clean_falcon_options(cfg)
falcon_sense_option = cfg['falcon_sense_option']
if 'local_match_count' in falcon_sense_option or 'output_dformat' in falcon_sense_option:
raise Exception('Please remove obsolete "--local_match_count_*" or "--output_dformat"' +
' from "falcon_sense_option" in your cfg: %s' % repr(falcon_sense_option))
genome_size = int(cfg['genome_size'])
length_cutoff = int(cfg['length_cutoff'])
if length_cutoff < 0 and genome_size < 1:
raise Exception(
'Must specify either length_cutoff>0 or genome_size>0')
pa_subsample_strategy = cfg['pa_subsample_strategy']
pa_subsample_random_seed = int(cfg['pa_subsample_random_seed'])
pa_subsample_coverage = int(cfg['pa_subsample_coverage'])
if pa_subsample_coverage > 0:
if genome_size < 1:
raise Exception(
'Must specify genome_size > 0 for subsampling.')
# This one depends on length_cutoff_pr for its default.
fc_ovlp_to_graph_option = cfg['fc_ovlp_to_graph_option']
if '--min_len' not in fc_ovlp_to_graph_option and '--min-len' not in fc_ovlp_to_graph_option:
length_cutoff_pr = cfg['length_cutoff_pr']
fc_ovlp_to_graph_option += ' --min-len {}'.format(length_cutoff_pr)
cfg['fc_ovlp_to_graph_option'] = fc_ovlp_to_graph_option
target = cfg['target']
if target not in ["overlapping", "pre-assembly", "assembly"]:
msg = """ Target has to be "overlapping", "pre-assembly" or "assembly" in this verison. You have an unknown target {!r} in the configuration file. """.format(target)
raise Exception(msg)
possible_extra_keys = [
'sge_option', 'default_concurrent_jobs',
'pwatcher_type', 'pwatcher_directory',
'job_type', 'job_queue', 'job_name_style',
'use_tmpdir',
]
for step in ['dust', 'da', 'la', 'pda', 'pla', 'fc', 'cns', 'asm']:
sge_option_key = 'sge_option_' + step
possible_extra_keys.append(sge_option_key)
concurrent_jobs_key = step + '_concurrent_jobs'
possible_extra_keys.append(concurrent_jobs_key)
extra = list()
for key in possible_extra_keys:
if key in cfg:
extra.append(key)
if extra:
extra.sort()
msg = 'You have several old-style options. These should be provided in the `[job.defaults]` or `[job.step.*]` sections, and possibly renamed. See https://github.com/PacificBiosciences/FALCON/wiki/Configuration\n {}'.format(extra)
logger.warning(msg)
check_unexpected_keys(cfg)
def check_unexpected_keys(cfg):
# Warn on unused variables.
expected = (TEXT_FILE_BUSY,
'input_fofn',
'input_type',
'genome_size',
'seed_coverage',
'length_cutoff',
'length_cutoff_pr',
'dazcon',
'pa_dazcon_option',
'pa_DBdust_option',
'pa_fasta_filter_option',
'pa_subsample_coverage',
'pa_subsample_strategy',
'pa_subsample_random_seed',
'pa_DBsplit_option',
'pa_HPCTANmask_option',
'pa_HPCREPmask_option',
'pa_REPmask_code',
'pa_daligner_option',
'pa_HPCdaligner_option',
'ovlp_DBdust_option',
'ovlp_DBsplit_option',
#'ovlp_HPCTANmask_option',
'ovlp_daligner_option',
'ovlp_HPCdaligner_option',
'skip_checks',
'falcon_sense_option',
'falcon_sense_skip_contained',
'falcon_sense_greedy',
'LA4Falcon_preload',
'LA4Falcon_pre', # hidden
'LA4Falcon_post', # hidden
'LA4Falcon_dbdir', # hidden
'overlap_filtering_setting',
'fc_ovlp_to_graph_option',
'bestn',
'target',
)
unused = set(cfg.keys()) - set(expected)
if unused:
logger.warning("Unexpected keys in input config: {}".format(unused))
default_logging_config = """
[loggers]
keys=root
[handlers]
keys=stream,file_all
[formatters]
keys=form01,form02
[logger_root]
level=NOTSET
handlers=stream,file_all
[handler_stream]
class=StreamHandler
level=INFO
formatter=form02
args=(sys.stderr,)
[handler_file_all]
class=FileHandler
level=DEBUG
formatter=form01
args=('all.log', 'w')
[formatter_form01]
format=%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s
[formatter_form02]
format=[%(levelname)s]%(message)s
"""
def _setup_logging(logging_config_fn):
"""See https://docs.python.org/2/library/logging.config.html
"""
logging.Formatter.converter = time.gmtime # cannot be done in .ini
if logging_config_fn:
if logging_config_fn.endswith('.json'):
logging.config.dictConfig(
json.loads(open(logging_config_fn).read()))
# print repr(logging.Logger.manager.loggerDict) # to debug
return
logger_fileobj = open(logging_config_fn)
else:
logger_fileobj = NativeIO(default_logging_config)
defaults = {
}
logging.config.fileConfig(
logger_fileobj, defaults=defaults, disable_existing_loggers=False)
def setup_logger(logging_config_fn):
global logger
try:
_setup_logging(logging_config_fn)
logger = logging.getLogger("fc_run")
logger.info('Setup logging from file "{}".'.format(logging_config_fn))
except Exception:
logging.basicConfig()
logger = logging.getLogger()
logger.exception(
'Failed to setup logging from file "{}". Using basicConfig().'.format(logging_config_fn))
try:
import logging_tree
logger.info(logging_tree.format.build_description())
except ImportError:
pass
return logger
def get_length_cutoff(length_cutoff, fn):
if length_cutoff < 0:
length_cutoff = int(open(fn).read().strip())
logger.info('length_cutoff=%d from %r' % (length_cutoff, fn))
return length_cutoff # possibly updated
| 36.251318 | 237 | 0.645174 | 2,741 | 20,627 | 4.620212 | 0.184969 | 0.03735 | 0.017056 | 0.010265 | 0.202306 | 0.141977 | 0.11513 | 0.072805 | 0.047694 | 0.033955 | 0 | 0.008808 | 0.229457 | 20,627 | 568 | 238 | 36.315141 | 0.78797 | 0.112862 | 0 | 0.079625 | 0 | 0.023419 | 0.321037 | 0.052258 | 0 | 0 | 0 | 0.001761 | 0.002342 | 1 | 0.06089 | false | 0.004684 | 0.039813 | 0 | 0.131148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc9c6ac173bb71fc3a458b96605182e981a28033 | 1,499 | py | Python | python/snpx_train_classifier.py | ahmedezzat85/SNPX_ML | 7316b0d46d39d2335b3095527a3ac81be208928d | [
"Apache-2.0"
] | null | null | null | python/snpx_train_classifier.py | ahmedezzat85/SNPX_ML | 7316b0d46d39d2335b3095527a3ac81be208928d | [
"Apache-2.0"
] | null | null | null | python/snpx_train_classifier.py | ahmedezzat85/SNPX_ML | 7316b0d46d39d2335b3095527a3ac81be208928d | [
"Apache-2.0"
] | null | null | null | """ Synaplexus Trainer Script
"""
import os
import snpx
import numpy as np
from snpx_arg_parser import snpx_parse_cmd_line_options
def main():
args = snpx_parse_cmd_line_options()
classifier = snpx.get_classifier(args)
classifier.train(num_epoch = args.num_epoch,
batch_size = args.batch_size,
start_epoch = args.begin_epoch,
optmz = args.optimizer,
lr = args.lr,
l2_reg = args.l2_reg,
lr_decay = args.lr_decay,
lr_decay_step = args.lr_step)
def test():
args = snpx_parse_cmd_line_options()
# lr_list = [0.1, 0.09, 0.08, 0.05, 0.04, 0.03, 0.02, 0.01, 0.009, 0.008, 0.005, 0.004, 0.001]
for i in range(100):
lr = 10**np.random.uniform(-4, -1)
wd = 10**np.random.uniform(-5, -2)
args.logs_subdir = 'mlp-' + str(i)
print ('ITERATION = ', i, ' ===> ', lr, wd)
classifier = snpx.get_classifier(args)
classifier.train(num_epoch = 10,
batch_size = 128,
start_epoch = 0,
optmz = 'adam',
lr = lr,
l2_reg = wd,
lr_decay = args.lr_decay,
lr_decay_step = args.lr_step)
classifier.close()
if __name__ == '__main__':
main() | 35.690476 | 98 | 0.480987 | 179 | 1,499 | 3.759777 | 0.413408 | 0.062407 | 0.053492 | 0.071322 | 0.390788 | 0.356612 | 0.276374 | 0.276374 | 0.276374 | 0.115899 | 0 | 0.071591 | 0.412942 | 1,499 | 42 | 99 | 35.690476 | 0.693182 | 0.080053 | 0 | 0.235294 | 0 | 0 | 0.026239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.176471 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc9f2365e7d7585a5b88e7b6df68a1e9709c671b | 1,339 | py | Python | setup.py | wangqr/hickle | 2cdfa2b3c0f65ac04836c409946536c224c32c70 | [
"MIT"
] | null | null | null | setup.py | wangqr/hickle | 2cdfa2b3c0f65ac04836c409946536c224c32c70 | [
"MIT"
] | null | null | null | setup.py | wangqr/hickle | 2cdfa2b3c0f65ac04836c409946536c224c32c70 | [
"MIT"
] | null | null | null | # To increment version
# Check you have ~/.pypirc filled in
# git tag x.y.z
# git push && git push --tags
# rm -rf dist; python setup.py sdist bdist_wheel
# TEST: twine upload --repository-url https://test.pypi.org/legacy/ dist/*
# twine upload dist/*
from setuptools import setup, find_packages
import sys
if sys.version_info.major == 3:
astro = "astropy<3.1"
else:
astro = "astropy<3.0"
version = '3.4.3'
author = 'Danny Price'
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='hickle',
version=version,
description='Hickle - a HDF5 based version of pickle',
long_description=long_description,
long_description_content_type='text/markdown',
author=author,
author_email='dan@thetelegraphic.com',
url='http://github.com/telegraphic/hickle',
download_url='https://github.com/telegraphic/hickle/archive/%s.tar.gz' % version,
platforms='Cross platform (Linux, Mac OSX, Windows)',
keywords=['pickle', 'hdf5', 'data storage', 'data export'],
#py_modules = ['hickle', 'hickle_legacy'],
install_requires=['numpy', 'h5py'],
setup_requires = ['pytest-runner', 'pytest-cov'],
tests_require = ['pytest', astro, 'scipy', 'pandas'],
python_requires='>=2.7',
packages=find_packages(),
zip_safe=False,
)
| 31.880952 | 87 | 0.661688 | 176 | 1,339 | 4.926136 | 0.647727 | 0.069204 | 0.029988 | 0.069204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011916 | 0.185213 | 1,339 | 41 | 88 | 32.658537 | 0.782768 | 0.207618 | 0 | 0 | 0 | 0 | 0.328897 | 0.020913 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dc9f8a4f5130a925e65f5e003bf213962fb9a4b2 | 4,470 | py | Python | metrics_layer/core/model/join.py | Zenlytic/granite | 93cc523954b1b900d7893af803a8fb3e5fc7d343 | [
"Apache-2.0"
] | null | null | null | metrics_layer/core/model/join.py | Zenlytic/granite | 93cc523954b1b900d7893af803a8fb3e5fc7d343 | [
"Apache-2.0"
] | null | null | null | metrics_layer/core/model/join.py | Zenlytic/granite | 93cc523954b1b900d7893af803a8fb3e5fc7d343 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from .base import MetricsLayerBase, SQLReplacement
from .field import Field
class Join(MetricsLayerBase, SQLReplacement):
def __init__(self, definition: dict = {}, explore=None, project=None) -> None:
self.project = project
self.explore = explore
if definition.get("from") is not None:
definition["from_"] = definition["from"]
elif definition.get("view_name") is not None:
definition["from_"] = definition["view_name"]
else:
definition["from_"] = definition["name"]
if "type" not in definition:
definition["type"] = "left_outer"
if "relationship" not in definition:
definition["relationship"] = "many_to_one"
self.validate(definition)
super().__init__(definition)
def replaced_sql_on(self, query_type: str):
if self.sql_on:
return self.get_replaced_sql_on(self.sql_on, query_type)
return f"{self.explore.from_}.{self.foreign_key}={self.from_}.{self.foreign_key}"
def validate(self, definition: dict):
required_keys = ["name", "relationship", "type"]
for k in required_keys:
if k not in definition:
raise ValueError(f"Join missing required key {k}")
neither_join_keys = "sql_on" not in definition and "foreign_key" not in definition
both_join_keys = "sql_on" in definition and "foreign_key" in definition
if both_join_keys or neither_join_keys:
raise ValueError(f"Incorrect join identifiers sql_on and foreign_key (must have exactly one)")
super().__init__(definition)
def is_valid(self):
if self.sql_on:
fields_to_replace = self.fields_to_replace(self.sql_on)
# The join isn't valid if we can't find an existing view with that name
for field in fields_to_replace:
_, view_name, _ = Field.field_name_parts(field)
if view_name not in self.explore.join_names():
err_msg = (
f"Could not find view {view_name} for join {self.name} in explore {self.explore.name}"
)
print(err_msg)
return False
return True
return self.foreign_key is not None
def required_views(self):
if not self.sql_on:
return [self.explore.from_, self.from_]
views = []
for field in self.fields_to_replace(self.sql_on):
_, join_name, _ = Field.field_name_parts(field)
if join_name == self.explore.name:
views.append(self.explore.from_)
else:
join = self.explore.get_join(join_name)
views.append(join.from_)
return list(set(views))
def to_dict(self):
output = {**self._definition}
return output
def get_replaced_sql_on(self, sql: str, query_type: str):
sql_on = deepcopy(sql)
fields_to_replace = self.fields_to_replace(sql_on)
for field in fields_to_replace:
_, join_name, column_name = Field.field_name_parts(field)
if join_name == self.explore.name:
view_name = self.explore.from_
else:
join = self.explore.get_join(join_name)
view_name = join.from_
view = self._get_view_internal(view_name)
if view is None:
return
table_name = view.name
field_obj = self.project.get_field(
column_name, view_name=table_name, explore_name=self.explore.name
)
if field_obj and table_name:
sql_condition = field_obj.sql_query(query_type)
replace_with = sql_condition
elif table_name:
replace_with = f"{table_name}.{column_name}"
else:
replace_with = column_name
replace_text = "${" + field + "}"
sql_on = sql_on.replace(replace_text, replace_with)
return sql_on
def _get_view_internal(self, view_name: str):
if self.from_ is not None and view_name == self.from_:
view = self.project.get_view(self.from_)
elif view_name == self.explore.from_:
view = self.project.get_view(self.explore.from_)
else:
view = self.project.get_view(view_name)
return view
| 36.341463 | 110 | 0.598658 | 553 | 4,470 | 4.553345 | 0.168174 | 0.033757 | 0.0417 | 0.030183 | 0.277204 | 0.222796 | 0.158459 | 0.081017 | 0.081017 | 0.081017 | 0 | 0 | 0.314094 | 4,470 | 122 | 111 | 36.639344 | 0.821265 | 0.015436 | 0 | 0.15625 | 0 | 0.010417 | 0.099341 | 0.02205 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.03125 | 0 | 0.239583 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dca77cc048d67a40b1149372b91ec26456a1dac8 | 5,404 | py | Python | mx_mul.py | majklllll/python | 09c62f86d6ebe6b437bc6fc343819956aa79f509 | [
"MIT"
] | null | null | null | mx_mul.py | majklllll/python | 09c62f86d6ebe6b437bc6fc343819956aa79f509 | [
"MIT"
] | null | null | null | mx_mul.py | majklllll/python | 09c62f86d6ebe6b437bc6fc343819956aa79f509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Utility . """
from typing import List, Union
class Matrix:
""" Represents mathematical matrix and its operations"""
def __init__(self, values: List[List[Union[int, float]]]):
if not (
isinstance(values, List) and
all([isinstance(row, List) and len(row) == len(values[0]) for row in values]) and
all([isinstance(cell, (int, float)) for row in values for cell in row]) and
(len(values) > 0 and len(values[0]) > 0)
):
raise TypeError("Incorrect input types in the 'values' list")
self.values = values
def dot(self, other: 'Matrix') -> 'Matrix':
""" Performs matrix multiplication of this and the other matrix
Args:
other: Other matrix instance
Returns:
New matrix instance that is a result of multiplication
"""
result_values = self._initialize_empty_matrix_values(others_columns=len(other.values[0]))
for i in range(len(self.values)): # i is row index of the result
for j in range(len(other.values[0])): # j is column index of the result
result_values[i][j] = sum(
[a * [row[j] for row in other.values][index] for index, a in enumerate(self._row(i))])
return Matrix(result_values)
def _initialize_empty_matrix_values(self, others_columns):
return [[0 for x in range(others_columns)] for x in range(len(self.values))]
def _row(self, index):
return self.values[index]
def __str__(self):
rows = []
for row in self.values:
rows.append(" ".join([str(cell) for cell in row]))
return "\n".join(rows)
def __mul__(self, other):
return self.dot(other)
def __eq__(self, other):
return self.__class__ == other.__class__ and \
len(self.values) == len(other.values) and \
len(self.values[0]) == len(other.values[0]) and \
all([row == other.values[i] for i, row in enumerate(self.values)])
def __ne__(self, other):
return not self.__eq__(other)
class MatrixCalculatorConsoleInterface:
""" Manages user interactions via command line interface """
def read_matrices_values(self, labels: List[str]) -> List[List[List[Union[int, float]]]]:
""" Prompt user via console for typing matrix parameters such as width, height and individual values
Args:
labels: List of labels, each assigned to one matrix prompted
Returns:
List of matrix values (2D lists of integers or floats)
"""
matrix_parameters = []
for label in labels:
width, height = self._prompt_for_dimensions(label)
matrix_parameters.append((label, width, height))
return self._prompt_for_values(matrix_parameters)
def _prompt_for_dimensions(self, label):
print("Matrix {}:".format(label))
width = self._read_attribute('width: ')
height = self._read_attribute('height: ')
print('')
return width, height
def _prompt_for_values(self, matrices):
result_matrices = []
for label, width, height in matrices:
print("Matrix {} values:".format(label))
rows = []
for i in range(height):
row = self._read_matrix_row(width=width)
rows.append(row)
assert True
result_matrices.append(rows)
print('')
return result_matrices
@classmethod
def _read_attribute(cls, prompt):
readout = input(prompt)
return cls._parse_numeric_value(readout)
@classmethod
def _parse_numeric_value(cls, readout):
try:
value = int(readout)
except ValueError:
try:
value = float(readout)
except ValueError:
raise ValueError("Unexpected format of attribute readout")
return value
@classmethod
def _read_matrix_row(cls, width):
row_data = []
row_readout = input()
split_data = row_readout.split()
if len(split_data) != width:
raise ValueError("Incorrect number of values on the row")
for cell_data in split_data:
row_data.append(cls._parse_numeric_value(cell_data))
return row_data
@staticmethod
def show_result(result: str):
""" Display result in the console
Args:
result: Result as a text to print
"""
print("Result:")
print(result)
class MatrixCalculator:
""" Represents top level of calculator application """
def __init__(self, user_interface_class=MatrixCalculatorConsoleInterface):
self.ui = user_interface_class()
def multiplication(self):
""" Perform matrix multiplication of two matrices 'A' and 'B' with data from user interface
"""
values_a, values_b = self.ui.read_matrices_values(labels=['A', 'B'])
result = Matrix(values_a).dot(Matrix(values_b))
self.ui.show_result(str(result))
if __name__ == '__main__':
calc = MatrixCalculator()
calc.multiplication()
| 34.641026 | 109 | 0.587898 | 631 | 5,404 | 4.839937 | 0.228209 | 0.02947 | 0.010478 | 0.014735 | 0.02685 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002954 | 0.310881 | 5,404 | 155 | 110 | 34.864516 | 0.817132 | 0.156551 | 0 | 0.111111 | 0 | 0 | 0.045005 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 1 | 0.171717 | false | 0 | 0.010101 | 0.050505 | 0.343434 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dca8ddb5d0061fd85a00dcf1c47006bf0b4d379f | 3,639 | py | Python | documentapp/views.py | hayasilin/Document-Manager-Python | 8414e112b86d8ada32829f607e3ee4e80a8d76c2 | [
"MIT"
] | 2 | 2017-11-08T09:31:15.000Z | 2019-06-25T11:34:06.000Z | documentapp/views.py | hayasilin/Document-Manager-Python | 8414e112b86d8ada32829f607e3ee4e80a8d76c2 | [
"MIT"
] | null | null | null | documentapp/views.py | hayasilin/Document-Manager-Python | 8414e112b86d8ada32829f607e3ee4e80a8d76c2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from documentapp.models import document
from documentapp.models import functionModel
from documentapp.form import PostForm
from django.contrib.auth import authenticate
from django.contrib import auth
from django.http import HttpResponse
from django.contrib.auth.models import User
# Create your views here.
def listone(request):
try:
unit = document.objects.get(cClassName="TestPlayerManager") #讀取一筆資料
except:
errormessage = " (讀取錯誤!)"
return render(request, "listone.html", locals())
def detail(request, id=None):
if id!=None:
if request.method == "POST": #如果是以POST方式才處理
id=request.POST['cId'] #取得表單輸入的編號
try:
documents = document.objects.all().order_by('id') #讀取資料表, 依 id 遞增排序
unit = document.objects.get(id=id)
functions = functionModel.objects.filter(fdocument__id=id).order_by('id')
except:
message = "讀取錯誤!"
return render(request, "detail.html", locals())
def listall(request):
documents = document.objects.all().order_by('id') #讀取資料表, 依 id 遞增排序
return render(request, "listall.html", locals())
def index(request):
try:
unit = document.objects.get(cClassName="TestPlayerManager") #讀取一筆資料
except:
errormessage = " (讀取錯誤!)"
documents = document.objects.all().order_by('id') #讀取資料表, 依 id 遞增排序
functions = functionModel.objects.filter(fdocument__id=1).order_by('id')
return render(request, "index.html", locals())
def post(request): #新增資料,資料必須驗證
if request.method == "POST":
postform = PostForm(request.POST) #建立forms物件
if postform.is_valid(): #通過forms驗證
cClassName = postform.cleaned_data['cClassName'] #取得表單輸入資料
cClassDescription = postform.cleaned_data['cClassDescription']
cClassOverview = postform.cleaned_data['cClassOverview']
cAuthor = postform.cleaned_data['cAuthor']
#新增一筆記錄
unit = document.objects.create(cClassName=cClassName, cClassDescription=cClassDescription, cClassOverview=cClassOverview, cAuthor=cAuthor)
unit.save() #寫入資料庫
message = '已儲存...'
return redirect('/listall/')
else:
message = '驗證碼錯誤!'
else:
message = 'Class和Description必須輸入!'
postform = PostForm()
return render(request, "post.html", locals())
def delete(request,id=None): #刪除資料
if id!=None:
if request.method == "POST": #如果是以POST方式才處理
id=request.POST['cId'] #取得表單輸入的編號
try:
unit = document.objects.get(id=id)
unit.delete()
return redirect('/listall/')
except:
message = "讀取錯誤!"
return render(request, "delete.html", locals())
def edit(request, id=None, mode=None):
if mode == "load":
unit = document.objects.get(id=id)
return render(request, "edit.html", locals())
elif mode == "save":
unit = document.objects.get(id=id)
unit.cClassName = request.POST['cClassName']
unit.cClassDescription = request.POST['cClassDescription']
unit.cClassOverview = request.POST['cClassOverview']
unit.save()
message = '已修改...'
return redirect('/listall/')
def postform(request):
postform = PostForm()
return render(request, "postform.html", locals())
#會員系統
def addUser(request, username=None, email=None, password=None, mode=None):
if mode == "load":
message = "請填寫資料"
return render(request, "adduser.html", locals())
else:
try:
user = User.objects.get(username = username)
except:
user = None
if user != None:
message = user.username + " 帳號已建立!"
return render(request, "adduser.html", locals())
else:
user = User.objects.create_user(username, email, password)
user.first_name = "wen"
user.last_name = "lin"
user.is_staff = True
user.save()
return redirect('/admin/')
| 30.579832 | 142 | 0.702116 | 436 | 3,639 | 5.818807 | 0.233945 | 0.059125 | 0.074892 | 0.05203 | 0.36421 | 0.336618 | 0.234135 | 0.178952 | 0.178952 | 0.178952 | 0 | 0.000326 | 0.158285 | 3,639 | 118 | 143 | 30.838983 | 0.827946 | 0.050289 | 0 | 0.454545 | 0 | 0 | 0.114086 | 0.006403 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.020202 | 0.080808 | 0 | 0.313131 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcaaa662e77d5e17488beb153eed0fa76229c9ba | 8,500 | py | Python | EnrollmentStation/Binaries/YubikeyManager/pymodules/smartcard/ReaderMonitoring.py | dennisrahmen/EnrollmentStation | a145345f9bb91bccb2bd67b349af8cfc4ec9e290 | [
"MIT"
] | 1 | 2020-03-16T14:57:15.000Z | 2020-03-16T14:57:15.000Z | EnrollmentStation/Binaries/YubikeyManager/pymodules/smartcard/ReaderMonitoring.py | dennisrahmen/EnrollmentStation | a145345f9bb91bccb2bd67b349af8cfc4ec9e290 | [
"MIT"
] | null | null | null | EnrollmentStation/Binaries/YubikeyManager/pymodules/smartcard/ReaderMonitoring.py | dennisrahmen/EnrollmentStation | a145345f9bb91bccb2bd67b349af8cfc4ec9e290 | [
"MIT"
] | 1 | 2022-02-04T14:55:45.000Z | 2022-02-04T14:55:45.000Z | """Smart card reader monitoring classes.
ReaderObserver is a base class for objects that are to be notified
upon smartcard reader insertion/removal.
ReaderMonitor is a singleton object notifying registered ReaderObservers
upon reader insertion/removal.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import print_function
from threading import Thread, Event
from time import sleep
import traceback
import smartcard.System
from smartcard.Observer import Observer
from smartcard.Observer import Observable
from smartcard.Synchronization import *
# ReaderObserver interface
class ReaderObserver(Observer):
"""
ReaderObserver is a base abstract class for objects that are to be notified
upon smartcard reader insertion/removal.
"""
def __init__(self):
pass
def update(self, observable, handlers):
"""Called upon reader insertion/removal.
@param observable:
@param handlers:
- addedreaders: list of added readers causing notification
- removedreaders: list of removed readers causing notification
"""
pass
class ReaderMonitor(Observable):
"""Class that monitors reader insertion/removal.
and notify observers
note: a reader monitoring thread will be running
as long as the reader monitor has observers, or ReaderMonitor.stop()
is called.
It implements the shared state design pattern, where objects
of the same type all share the same state, in our case essentially
the ReaderMonitoring Thread. Thanks to Frank Aune for implementing
the shared state pattern logics.
"""
__shared_state = {}
def __init__(self, startOnDemand=True, readerProc=smartcard.System.readers,
period=1):
self.__dict__ = self.__shared_state
Observable.__init__(self)
self.startOnDemand = startOnDemand
self.readerProc = readerProc
self.period = period
if self.startOnDemand:
self.rmthread = None
else:
self.rmthread = ReaderMonitoringThread(self, self.readerProc,
self.period)
self.rmthread.start()
def addObserver(self, observer):
"""Add an observer."""
Observable.addObserver(self, observer)
# If self.startOnDemand is True, the reader monitoring
# thread only runs when there are observers.
if self.startOnDemand:
if 0 < self.countObservers():
if not self.rmthread:
self.rmthread = ReaderMonitoringThread(
self,
self.readerProc, self.period)
# start reader monitoring thread in another thread to
# avoid a deadlock; addObserver and notifyObservers called
# in the ReaderMonitoringThread run() method are
# synchronized
try:
# Python 3.x
import _thread
_thread.start_new_thread(self.rmthread.start, ())
except:
# Python 2.x
import thread
thread.start_new_thread(self.rmthread.start, ())
else:
observer.update(self, (self.rmthread.readers, []))
def deleteObserver(self, observer):
"""Remove an observer."""
Observable.deleteObserver(self, observer)
# If self.startOnDemand is True, the reader monitoring
# thread is stopped when there are no more observers.
if self.startOnDemand:
if 0 == self.countObservers():
self.rmthread.stop()
del self.rmthread
self.rmthread = None
def __str__(self):
return self.__class__.__name__
synchronize(ReaderMonitor,
"addObserver deleteObserver deleteObservers " +
"setChanged clearChanged hasChanged " +
"countObservers")
class ReaderMonitoringThread(Thread):
"""Reader insertion thread.
This thread polls for pcsc reader insertion, since no
reader insertion event is available in pcsc.
"""
__shared_state = {}
def __init__(self, observable, readerProc, period):
self.__dict__ = self.__shared_state
Thread.__init__(self)
self.observable = observable
self.stopEvent = Event()
self.stopEvent.clear()
self.readers = []
self.setDaemon(True)
self.setName('smartcard.ReaderMonitoringThread')
self.readerProc = readerProc
self.period = period
def run(self):
"""Runs until stopEvent is notified, and notify
observers of all reader insertion/removal.
"""
while not self.stopEvent.isSet():
try:
# no need to monitor if no observers
if 0 < self.observable.countObservers():
currentReaders = self.readerProc()
addedReaders = []
removedReaders = []
if currentReaders != self.readers:
for reader in currentReaders:
if reader not in self.readers:
addedReaders.append(reader)
for reader in self.readers:
if reader not in currentReaders:
removedReaders.append(reader)
if addedReaders or removedReaders:
# Notify observers
self.readers = []
for r in currentReaders:
self.readers.append(r)
self.observable.setChanged()
self.observable.notifyObservers((addedReaders,
removedReaders))
# wait every second on stopEvent
self.stopEvent.wait(self.period)
except Exception:
# FIXME Tighten the exceptions caught by this block
traceback.print_exc()
# Most likely raised during interpreter shutdown due
# to unclean exit which failed to remove all observers.
# To solve this, we set the stop event and pass the
# exception to let the thread finish gracefully.
self.stopEvent.set()
def stop(self):
self.stopEvent.set()
self.join()
if __name__ == "__main__":
print('insert or remove readers in the next 20 seconds')
# a simple reader observer that prints added/removed readers
class printobserver(ReaderObserver):
def __init__(self, obsindex):
self.obsindex = obsindex
def update(self, observable, handlers):
addedreaders, removedreaders = handlers
print("%d - added: " % self.obsindex, addedreaders)
print("%d - removed: " % self.obsindex, removedreaders)
class testthread(Thread):
def __init__(self, obsindex):
Thread.__init__(self)
self.readermonitor = ReaderMonitor()
self.obsindex = obsindex
self.observer = None
def run(self):
# create and register observer
self.observer = printobserver(self.obsindex)
self.readermonitor.addObserver(self.observer)
sleep(20)
self.readermonitor.deleteObserver(self.observer)
t1 = testthread(1)
t2 = testthread(2)
t1.start()
t2.start()
t1.join()
t2.join()
| 34.979424 | 79 | 0.606941 | 873 | 8,500 | 5.808706 | 0.316151 | 0.02603 | 0.02603 | 0.01124 | 0.180635 | 0.150661 | 0.128574 | 0.115165 | 0.071386 | 0.071386 | 0 | 0.006834 | 0.328588 | 8,500 | 242 | 80 | 35.123967 | 0.881724 | 0.345412 | 0 | 0.266129 | 0 | 0 | 0.038476 | 0.005948 | 0 | 0 | 0 | 0.004132 | 0 | 1 | 0.104839 | false | 0.016129 | 0.080645 | 0.008065 | 0.25 | 0.056452 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcabc58fa547609a0320e308b33ba46a259930fc | 559 | py | Python | setup.py | ingmferrer/jira-cloud-python | 6e0d86e1e159ae32a4d69ab9c4568d52e6a2ca86 | [
"MIT"
] | 2 | 2019-11-17T02:23:09.000Z | 2021-03-31T17:38:46.000Z | setup.py | ingmferrer/jira-cloud-python | 6e0d86e1e159ae32a4d69ab9c4568d52e6a2ca86 | [
"MIT"
] | null | null | null | setup.py | ingmferrer/jira-cloud-python | 6e0d86e1e159ae32a4d69ab9c4568d52e6a2ca86 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='jira-cloud-python',
version='1.0.0',
description='API wrapper for Jira Cloud written in Python',
long_description=read('README.md'),
long_description_content_type="text/markdown",
url='https://github.com/ingmferrer/jira-cloud-python',
author='Miguel Ferrer',
author_email='ingferrermiguel@gmail.com',
license='MIT',
packages=['jiracloud'],
zip_safe=False)
| 27.95 | 70 | 0.681574 | 72 | 559 | 5.152778 | 0.722222 | 0.072776 | 0.080863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006508 | 0.175313 | 559 | 19 | 71 | 29.421053 | 0.798265 | 0 | 0 | 0 | 0 | 0 | 0.330948 | 0.044723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0.066667 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcad9bb603568e376cb471bd6d09d3bb075a9195 | 8,150 | py | Python | modules/FICA/FiCALight/Code/module_lnk.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | 56 | 2019-02-07T06:21:45.000Z | 2022-03-21T08:19:24.000Z | FIVE/Code/module_lnk.py | sk-yaho/carpe | 077ef7ba1582b3de9f5c08d63431e744b77a9e09 | [
"Apache-2.0"
] | 5 | 2020-05-25T17:29:00.000Z | 2021-12-13T20:49:08.000Z | FIVE/Code/module_lnk.py | sk-yaho/carpe | 077ef7ba1582b3de9f5c08d63431e744b77a9e09 | [
"Apache-2.0"
] | 31 | 2019-03-13T10:23:49.000Z | 2021-11-04T12:14:58.000Z | #-*- coding: utf-8 -*-
#!/usr/bin/python3
#!/Author : Gibartes
from moduleInterface.defines import *
from moduleInterface.interface import ModuleComponentInterface
#from defines import *
#from interface import ModuleComponentInterface
from structureReader import structureReader as sr
#import _structureReader as sr
import os,sys,platform
class ModuleLNK(ModuleComponentInterface):
def __init__(self):
super().__init__() # Initialize Module Interface
self.fileSize = 0
self.offset = list()
self.missing = 0
self.parser = sr.StructureReader()
self.flag = None
self.set_attrib(ModuleConstant.NAME,"lnk")
self.set_attrib(ModuleConstant.VERSION,"0.1")
self.set_attrib(ModuleConstant.AUTHOR,"HK")
self.set_attrib("detailed_type",True)
def __reinit__(self):
self.fileSize = 0
self.offset = list()
self.missing = 0
def __del__(self):
self.parser.cleanup()
""" Module Methods """
def __evaluate(self):
fn = self.attrib.get(ModuleConstant.FILE_ATTRIBUTE)
if(fn==None):
return ModuleConstant.Return.EINVAL_ATTRIBUTE
try:
fd = os.open(fn,os.O_RDONLY)
os.close(fd)
except:return ModuleConstant.Return.EINVAL_FILE
return ModuleConstant.Return.SUCCESS
# ShellItemList와 FileLocationInfo의 이름 필드 비교
def __read(self,offset,__encode):
header = sr._LinkFileStructure()
size = 0
__flag = 0
result = self.parser.bexecute(header.ShellLinkHeader,'int',offset,os.SEEK_SET,'little')
if(result==False):
return (False,0,-1,ModuleConstant.INVALID)
flag = self.parser.get_value("flags")
isUTF16 = flag & 0x80
if(isUTF16==0x80):isUTF16=1
hasRelative = flag & 0x08
_tmp = self.parser.get_value("lti")
if(self.parser.get_value('ltime')==0x00):
self.parser.bgoto(-self.parser.get_field_size('lti'))
_tmp = self.parser.byte2int(self.parser.bread_raw(0,2))
nbase = offset+self.parser.btell()-2
sitem = nbase+2
size += _tmp
size += 2
nbase += size
self.parser.bgoto(size-2)
_tmp = self.parser.btell()
result = self.parser.bexecute(header.FileLocationInfo,'int',0,os.SEEK_CUR,'little')
if(result==False):
return (False,0,-1,ModuleConstant.INVALID)
size += self.parser.get_size()
_name= self.parser.get_value("oftlp")
if(self.parser.get_value("oftnsi")==0):
_len = self.parser.get_value("oftcp")
elif(self.parser.get_value("oftcp")==0):
_len = self.parser.get_value("oftnsi")
else:
_len = self.parser.get_value("oftcp") if \
(self.parser.get_value("oftnsi") > \
self.parser.get_value("oftcp")) \
else self.parser.get_value("oftnsi")
_len = (_len -_name) if _len -_name >= 0 else (_name-_len)
_cmp = None
try:
_name = self.parser.bread_raw(nbase+_name,_len,os.SEEK_SET).split(b'\\')[-1].split(b'\x00')[0].strip()
except:
return (False,0,-1,ModuleConstant.INVALID)
try:
_name = _name.decode()
except:
__flag=1
if(__flag==1):
try:_name = _name.decode(__encode)
except:return (False,0,-1,ModuleConstant.INVALID)
if(hasRelative==0x08):
self.parser.bgoto(_tmp+self.parser.get_value("size"),os.SEEK_SET)
_tmp = self.parser.btell()
_len = self.parser.byte2int(self.parser.bread_raw(0,2,os.SEEK_CUR))*(isUTF16+1)
if(_len!=0):
cmp = self.parser.bread_raw(0,_len).split(b'\\\x00')[-1]
if(isUTF16):
try:
cmp = cmp.decode('UTF-16').strip()
except:
return (False,0,-1,ModuleConstant.INVALID)
if(_name==cmp):
return (True,offset,self.get_attrib(ModuleConstant.CLUSTER_SIZE),ModuleConstant.FILE_ONESHOT)
else:
self.parser.bgoto(sitem,os.SEEK_SET)
while(self.parser.btell()<nbase):
result = self.parser.bexecute(header.ShellItemList,'int',0,os.SEEK_CUR,'little')
if(result==False):
return (False,0,-1,ModuleConstant.INVALID)
if(self.parser.get_value("type") in sr._LinkFileStructure.CLSID.CLSID_ShellFSFolder):
_len = self.parser.get_value("size")-self.parser.get_size()
_cmp = self.parser.bread_raw(0,_len,os.SEEK_CUR)
try:
_tmp = _cmp[10:-1].split(b'\x00')[0].decode()
__flag = 0
except:__flag = 1
if(__flag):
_tmp = _cmp[10:-1].split(b'\x00\x00')[0]
if(len(_tmp)%2):_tmp+=b'\x00'
try:
_tmp = _tmp.decode('utf-16')
except:
return (False,offset,self.get_attrib(ModuleConstant.CLUSTER_SIZE),ModuleConstant.FILE_ONESHOT)
if(_name==_tmp):
return (True,offset,self.get_attrib(ModuleConstant.CLUSTER_SIZE),ModuleConstant.FILE_ONESHOT)
self.parser.bgoto(-_len+self.parser.get_value("size")-self.parser.get_size())
continue
elif(self.parser.get_value("size")==0):
return (False,0,-1,ModuleConstant.INVALID)
self.parser.bgoto(self.parser.get_value("size")-self.parser.get_size())
return (False,offset,self.get_attrib(ModuleConstant.CLUSTER_SIZE),ModuleConstant.FILE_ONESHOT)
def carve(self):
self.__reinit__()
self.parser.get_file_handle(
self.get_attrib(ModuleConstant.FILE_ATTRIBUTE),
self.get_attrib(ModuleConstant.IMAGE_BASE),1
)
offset = self.get_attrib(ModuleConstant.IMAGE_BASE)
self.parser.bgoto(offset,os.SEEK_SET)
res = self.__read(offset,self.get_attrib(ModuleConstant.ENCODE))
if(res[0]==True):
self.offset.append((res[1],res[2],res[3]))
self.fileSize += res[2]
offset+=res[2]
else:
self.missing+=1
self.parser.cleanup()
""" Interfaces """
def module_open(self,id): # Reserved method for multiprocessing
super().module_open()
def module_close(self): # Reserved method for multiprocessing
pass
def set_attrib(self,key,value): # 모듈 호출자가 모듈 속성 변경/추가하는 method interface
self.update_attrib(key,value)
def get_attrib(self,key,value=None): # 모듈 호출자가 모듈 속성 획득하는 method interface
return self.attrib.get(key)
def execute(self,cmd=None,option=None): # 모듈 호출자가 모듈을 실행하는 method
if(cmd=='inspect'):
return self.flag
else:
self.flag = None
ret = self.__evaluate()
if(ret!=ModuleConstant.Return.SUCCESS):
return [(False,ret,ModuleConstant.INVALID)]
self.carve()
if(self.offset==[]):
return [(False,0,ModuleConstant.INVALID)]
self.flag = "lnk"
return self.offset # return <= 0 means error while collecting information
if __name__ == '__main__':
lnk = ModuleLNK()
try:
lnk.set_attrib(ModuleConstant.FILE_ATTRIBUTE,sys.argv[1]) # Insert .lnk File
except:
print("This moudule needs exactly one parameter.")
sys.exit(1)
lnk.set_attrib(ModuleConstant.IMAGE_BASE,0) # Set offset of the file base
lnk.set_attrib(ModuleConstant.CLUSTER_SIZE,1024)
lnk.set_attrib(ModuleConstant.ENCODE,'euc-kr')
cret = lnk.execute()
print(cret)
sys.exit(0)
| 35.58952 | 122 | 0.567853 | 921 | 8,150 | 4.829533 | 0.193268 | 0.107914 | 0.070144 | 0.072842 | 0.386691 | 0.28732 | 0.22527 | 0.205486 | 0.17536 | 0.131969 | 0 | 0.021349 | 0.310307 | 8,150 | 228 | 123 | 35.745614 | 0.76997 | 0.062086 | 0 | 0.251462 | 0 | 0 | 0.031892 | 0 | 0 | 0 | 0.002636 | 0 | 0 | 1 | 0.064327 | false | 0.005848 | 0.023392 | 0.005848 | 0.192982 | 0.011696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcae44d0c3121800cb8e3903b0c33dc4633dadf1 | 1,037 | py | Python | mpl_plot_widget.py | ThatSnail/drummer | 259d5c9620382024ab17679c99465a8d816e186c | [
"MIT"
] | null | null | null | mpl_plot_widget.py | ThatSnail/drummer | 259d5c9620382024ab17679c99465a8d816e186c | [
"MIT"
] | 1 | 2021-09-28T19:08:02.000Z | 2021-09-28T19:34:55.000Z | mpl_plot_widget.py | ThatSnail/drummer | 259d5c9620382024ab17679c99465a8d816e186c | [
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QWidget, QVBoxLayout
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
from scipy.interpolate import griddata
class MplPlotWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.fig = Figure()
#self.fig.subplots_adjust(left=0.2)
self.ax1 = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.vbl = QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
self.line, = self.ax1.plot([], [])
self.ax1.set_xlim(0, 1)
self.ax1.set_ylim(-1, 1)
def plot(self, ts, values):
#self.ax1.cla()
# Normalize
values /= np.max(np.abs(values))
# Flip if weird
if values[0] < 0:
values *= -1
self.line.set_xdata(ts)
self.line.set_ydata(values)
self.canvas.draw()
| 25.292683 | 80 | 0.633558 | 134 | 1,037 | 4.791045 | 0.447761 | 0.054517 | 0.043614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02458 | 0.254581 | 1,037 | 40 | 81 | 25.925 | 0.805951 | 0.069431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.24 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb1a3e7b118288050e6f9d6c1872b090849736a | 1,705 | py | Python | imageproc_viz/Make_20xMontages.py | hshayya/2022_Shayya_UPR_Guidance | b9a305a147a105c3ac9c0173e06b94f66e4a6102 | [
"MIT"
] | null | null | null | imageproc_viz/Make_20xMontages.py | hshayya/2022_Shayya_UPR_Guidance | b9a305a147a105c3ac9c0173e06b94f66e4a6102 | [
"MIT"
] | null | null | null | imageproc_viz/Make_20xMontages.py | hshayya/2022_Shayya_UPR_Guidance | b9a305a147a105c3ac9c0173e06b94f66e4a6102 | [
"MIT"
] | null | null | null | import csv
from ij import ImagePlus, CompositeImage, IJ, gui
from ij.plugin import ImagesToStack
#Prepare Stack of 20x Images for a given OR WT/Ctrl/cKO
#Auto-levels each panel (biology of interest here is overlap & correlation red/green, not absolute levels).
reader = csv.DictReader(open('/path/to/blinded/annotation/out','r'), delimiter = '\t') #used blinded annotation output tsv to select images
#ensured that fractions of intermixed/compartmentalized etc. on final montage ~= the observed frequencies in the blinded annotations for that OR/gt combo.
#Parse the dictionary
reader = [i for i in reader]
slides_of_interest = ['b','k','l','hh','o','e_05_13_21','jj','z','i_05_13_21']
random_codes = []
imps = []
for elem in reader:
if elem['random_code'] in slides_of_interest:
random_codes.append(elem['random_code'])
imp = CompositeImage(ImagePlus(elem['file']))
#Stretch Histogram for Each Channel
for c in range(imp.getDimensions()[2]):
imp.setC(c+1) #1-based...
IJ.run(imp, "Enhance Contrast", "saturated=0.35")
#Flatten to RGB
title = imp.getTitle()
imp.setDisplayMode(1)
out_ = imp.flatten()
out_.setTitle(title)
imps.append(out_)
order = [slides_of_interest.index(i) for i in random_codes]
final_imps = [x for _, x in sorted(zip(order, imps))]
#Add scale bar to last image
IJ.run(final_imps[len(final_imps)-1], "Scale Bar...", "width=100 height=8 font=28 color=White background=None location=[Lower Right] hide overlay");
final_imps[len(final_imps)-1] = final_imps[len(final_imps)-1].flatten()
out_show = ImagesToStack.run(final_imps)
out_show.show() #made the montage manually rather than programatically for these. (See Image -> Stacks -> Make Montage) | 39.651163 | 154 | 0.735484 | 269 | 1,705 | 4.546468 | 0.550186 | 0.058872 | 0.039248 | 0.041701 | 0.053966 | 0.053966 | 0 | 0 | 0 | 0 | 0 | 0.020311 | 0.133724 | 1,705 | 43 | 155 | 39.651163 | 0.807718 | 0.334897 | 0 | 0 | 0 | 0.038462 | 0.196444 | 0.027556 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb39738e3bbf21548131aa39db7d619b9f3a311 | 6,385 | py | Python | fairseq/models/roberta/model_xlmr.py | leo-liuzy/fairseq-apollo | 00032398d78e90f40bb462ed62bff156205c3574 | [
"MIT"
] | 2 | 2021-08-07T00:12:30.000Z | 2021-08-09T02:17:57.000Z | fairseq/models/roberta/model_xlmr.py | leo-liuzy/fairseq-apollo | 00032398d78e90f40bb462ed62bff156205c3574 | [
"MIT"
] | null | null | null | fairseq/models/roberta/model_xlmr.py | leo-liuzy/fairseq-apollo | 00032398d78e90f40bb462ed62bff156205c3574 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Unsupervised Cross-lingual Representation Learning at Scale
"""
import torch
from typing import List
from torch import nn
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from .hub_interface import RobertaHubInterface
from .model import RobertaModel, RobertaEncoder
@register_model('xlmr')
class XLMRModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
'xlmr.base': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz',
'xlmr.large': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz',
}
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x['args'], x['task'], x['models'][0])
class Pooler(nn.Module):
"""
Parameter-free poolers to get the sentence embedding
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
'cls_before_pooler': [CLS] representation without the original MLP pooler.
'avg': average of the last layers' hidden states at each token.
'avg_top2': average of the last two layers.
'avg_first_last': average of the first and the last layers.
"""
def __init__(self, args):
super().__init__()
self.pooler_type = args.pooler_type
assert self.pooler_type in ["cls", "cls_before_pooler", "cls_after_pooler",
"avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
if self.pooler_type in ["cls_after_pooler"]:
self.dense = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.activation = nn.Tanh()
def forward(self, attention_mask: torch.tensor, hidden_states: List[torch.tensor]):
# pooler_output = outputs.pooler_output
# hidden_states = outputs.hidden_states
if self.pooler_type in ['cls_before_pooler', 'cls']:
return hidden_states[-1][0]
elif self.pooler_type in ['cls_after_pooler']:
first_token_tensor = hidden_states[-1][0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
elif self.pooler_type == "avg":
return ((hidden_states[-1] * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
elif self.pooler_type == "avg_first_last":
first_hidden = hidden_states[0]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
elif self.pooler_type == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
else:
raise NotImplementedError
@register_model('xlmr_xcl')
class XLMRXCLModel(XLMRModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
# TODO (Leo): add pooler
self.pooler = Pooler(args)
self.pooler_requring_all_hiddens = ["avg_top2", "avg_first_last"]
@staticmethod
def add_args(parser):
XLMRModel.add_args(parser)
parser.add_argument('--pooler-type', default="cls", type=str,
choices=["cls", "cls_before_pooler", "cls_after_pooler",
"avg", "avg_top2", "avg_first_last"],
help='probability of replacing a token with mask')
def forward(self, src_tokens,
src_positions=None, # set to None for subclassing
force_positions=True,
features_only=False, return_all_hiddens=False,
classification_head_name=None, **kwargs):
"""
Depends on different task, src_tokens could means different things.
For MLM, src_tokens is the masked sequence
For Contrastive Learning, it could means unmasked sequences
For TLM, src_tokens is the masked and concatenated sequences
Similar situation for src_positions.
"""
return super().forward(src_tokens,
src_positions=src_positions,
force_positions=force_positions,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
classification_head_name=classification_head_name, **kwargs)
@register_model_architecture('xlmr_xcl', 'xlmr_xcl_base')
def base_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
# (Leo): xlmr use learned embedding
args.dropout = getattr(args, 'dropout', 0.1) # (Leo): this includes embedding dropout
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
args.encoder_layers_to_keep = getattr(args, 'encoder_layers_to_keep', None)
args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.0)
args.pooler_type = getattr(args, "pooler_type", "cls") | 42.852349 | 147 | 0.653876 | 774 | 6,385 | 5.124031 | 0.264858 | 0.044377 | 0.03177 | 0.018154 | 0.222895 | 0.157085 | 0.136662 | 0.083207 | 0.083207 | 0.083207 | 0 | 0.010768 | 0.243696 | 6,385 | 149 | 148 | 42.852349 | 0.81052 | 0.165388 | 0 | 0.058824 | 0 | 0 | 0.142255 | 0.012636 | 0 | 0 | 0 | 0.006711 | 0.009804 | 1 | 0.078431 | false | 0 | 0.068627 | 0.009804 | 0.254902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb593676d0036da1b378867c0d0200740f067c5 | 5,189 | py | Python | main.py | MD2Korg/CerebralCortex-DataIngestion | a9fc68bc99204beab5be81ee4607b9d6f1871daf | [
"BSD-2-Clause"
] | null | null | null | main.py | MD2Korg/CerebralCortex-DataIngestion | a9fc68bc99204beab5be81ee4607b9d6f1871daf | [
"BSD-2-Clause"
] | null | null | null | main.py | MD2Korg/CerebralCortex-DataIngestion | a9fc68bc99204beab5be81ee4607b9d6f1871daf | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime, timedelta
from core.data_scanner.raw_data_scanner import get_files_list
from core.util.config_parser import get_configs
from core.file_processor.process_msgpack import msgpack_to_pandas
import argparse
import gzip
from core.util.spark_helper import get_or_create_sc
import os
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
def save_data(msg, study_name, cc_config):
files = msg.get("files")
data = pd.DataFrame()
for f in files:
with gzip.open(msg.get("file_path")+"/"+f, 'rb') as input_data:
pdf = msgpack_to_pandas(input_data)
data = data.append(pdf, ignore_index=True)
hdfs_ip = cc_config['hdfs']['host']
hdfs_port = cc_config['hdfs']['port']
raw_files_dir = cc_config['hdfs']['raw_files_dir']
if raw_files_dir[-1:]!="/":
raw_files_dir = raw_files_dir+"/"
hdfs_url = raw_files_dir+"study="+study_name+"/"+msg.get("stream_name")+"/"+msg.get("version")+"/"+msg.get("user_id")+"/"
try:
table = pa.Table.from_pandas(data, preserve_index=False)
fs = pa.hdfs.connect(hdfs_ip, hdfs_port)
pq.write_to_dataset(table, root_path=hdfs_url, filesystem=fs)
return True
except Exception as e:
raise Exception("Cannot store dataframe: " + str(e))
def run():
parser = argparse.ArgumentParser(description='CerebralCortex Kafka Message Handler.')
parser.add_argument("-c", "--config_dir", help="Configurations directory path.", required=True)
parser.add_argument("-dy", "--day", help="Day date to be processed. Format is MMDDYYYY.", required=True)
parser.add_argument("-hr", "--hour", help="hour of the day to be processed. Format is HH.", required=True)
parser.add_argument("-bs", "--batch_size", help="Number of folders to process at a time.", required=True)
parser.add_argument("-sn", "--study_name",
help="Provide a study_name.",
default="default",
required=False)
parser.add_argument("-stn", "--stream_names",
help="Provide a comma separated stream_names. All stream_names data will be processed if no name is provided.", default=[],
required=False)
parser.add_argument("-uid", "--user_ids",
help="Provide a comma separated participants UUIDs. All participants' data will be processed if no UUIDs is provided.",
default=[],
required=False)
parser.add_argument("-vr", "--versions",
help="Provide a comma separated versions. All versions data will be processed if no version is provided.",
default=[],
required=False)
args = vars(parser.parse_args())
config_dir_path = str(args["config_dir"]).strip()
study_name = args["study_name"]
day = args["day"].split(",")
hour = args["hour"].split(",")
batch_size = args["batch_size"]
stream_names = args["stream_names"]
user_ids = args["user_ids"]
versions = args["versions"]
ingestion_config = get_configs(config_dir_path, "data_ingestion.yml")
cc_config = get_configs(config_dir_path, "cerebralcortex.yml")
raw_data_path = ingestion_config["data_ingestion"]["raw_data_path"]
for files in get_files_list(raw_data_path=raw_data_path, study_name=study_name, day=day, hour=hour, stream_names=stream_names, batch_size=batch_size, user_ids=user_ids, versions=versions):
spark_context = get_or_create_sc()
message = spark_context.parallelize(files)
message.foreach(lambda msg: save_data(msg, study_name=study_name, cc_config=cc_config))
print("File Iteration count:", len(files))
if __name__ == "__main__":
run()
| 47.605505 | 192 | 0.69551 | 711 | 5,189 | 4.895921 | 0.348805 | 0.025855 | 0.039069 | 0.024131 | 0.214881 | 0.113186 | 0.066073 | 0.066073 | 0.039069 | 0.039069 | 0 | 0.001925 | 0.199075 | 5,189 | 108 | 193 | 48.046296 | 0.835659 | 0.25689 | 0 | 0.084507 | 0 | 0 | 0.238319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.15493 | 0 | 0.197183 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb66372cec4216e73548ac819b82db4993a92b9 | 679 | py | Python | code_week4_518_524/valid_palindrome_ii.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week4_518_524/valid_palindrome_ii.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week4_518_524/valid_palindrome_ii.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
给定一个非空字符串 s,最多删除一个字符。判断是否能成为回文字符串。
示例 1:
输入: "aba"
输出: True
示例 2:
输入: "abca"
输出: True
解释: 你可以删除c字符。
注意:
字符串只包含从 a-z 的小写字母。字符串的最大长度是50000。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/valid-palindrome-ii
'''
class Solution:
def validPalindrome(self, s: str) -> bool:
i = 0
j = len(s) - 1
for l in range(len(s)):
if i < j and s[i] == s[j]:
i += 1
j -= 1
return self.palindrome(s,i,j-1) or self.palindrome(s,i+1,j)
def palindrome(self,s,i,j):
for l in range(len(s)):
if i < j and s[i] == s[j]:
i += 1
j -= 1
return i >= j | 19.4 | 67 | 0.499264 | 109 | 679 | 3.110092 | 0.458716 | 0.029499 | 0.026549 | 0.064897 | 0.212389 | 0.212389 | 0.212389 | 0.212389 | 0.212389 | 0.212389 | 0 | 0.033784 | 0.346097 | 679 | 35 | 68 | 19.4 | 0.72973 | 0.315169 | 0 | 0.533333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb76c37c09b67239430caeca59fb48fa0534454 | 9,361 | py | Python | tordatahub/models/topic.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/models/topic.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | tordatahub/models/topic.py | jasonz93/python-tordatahub | 3a9a497d5a0bebf915d7e24049dd8b06099e3c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import json
from .rest import HTTPMethod, RestModel
from .record import Schema, RecordSchema, RecordType
from .. import errors
class Topic(RestModel):
"""
Topic class, there was two topic type: ``Tuple`` and ``Blob``
:Example:
>>> topic = Topic(name=topic_name)
>>>
>>> topic.project_name = project_name
>>>
>>> topic.shard_count = 3
>>>
>>> topic.life_cycle = 7
>>>
>>> topic.record_type = RecordType.TUPLE
>>>
>>> topic.record_schema = RecordSchema.from_lists(['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'], [FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])
.. seealso:: :class:`tordatahub.models.RecordSchema`, :class:`tordatahub.models.RecordType`, :class:`tordatahub.models.FieldType`
"""
__slots__ = ('_project_name', '_shard_count', '_life_cycle', '_record_type', '_record_schema')
def __init__(self, *args, **kwds):
super(Topic, self).__init__(*args, **kwds)
self._project_name = kwds['project_name'] if 'project_name' in kwds else ''
self._shard_count = kwds['shard_count'] if 'shard_count' in kwds else 0
self._life_cycle = kwds['life_cycle'] if 'life_cycle' in kwds else 0
self._record_type = kwds['record_type'] if 'record_type' in kwds else ''
self._record_schema = kwds['record_schema'] if 'record_schema' in kwds else None
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
@property
def shard_count(self):
return self._shard_count
@shard_count.setter
def shard_count(self, value):
self._shard_count = value
@property
def life_cycle(self):
return self._life_cycle
@life_cycle.setter
def life_cycle(self, value):
self._life_cycle = value
@property
def record_type(self):
return self._record_type
@record_type.setter
def record_type(self, value):
self._record_type = value
@property
def record_schema(self):
return self._record_schema
@record_schema.setter
def record_schema(self, value):
self._record_schema = value
def __str__(self):
topicjson = {
"name": "%s" % self._name,
"shard_count": self._shard_count,
"life_cycle": self._life_cycle,
"record_type": "%s" % self._record_type,
"comment": "%s" % self._comment,
"create_time": self._create_time,
"last_modify_time": self._last_modify_time
}
if RecordType.TUPLE == self._record_type:
topicjson["record_schema"] = self._record_schema.to_json_string()
return json.dumps(topicjson)
def __hash__(self):
return hash((type(self), self._name, self._shard_count, self._life_cycle, self._record_type, self._record_schema, self._comment, self._create_time, self._last_modify_time))
def throw_exception(self, response_result):
if 'TopicAlreadyExist' == response_result.error_code:
raise errors.ObjectAlreadyExistException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
elif 'NoSuchProject' == response_result.error_code or 'NoSuchTopic' == response_result.error_code:
raise errors.NoSuchObjectException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
elif 'InvalidParameter' == response_result.error_code:
raise errors.InvalidParameterException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
elif response_result.status_code >= 500:
raise errors.ServerInternalError(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
else:
raise errors.DatahubException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
def resource(self):
if not self._project_name:
raise ValueError('project name must not be empty')
return "/projects/%s/topics/%s" %(self._project_name, self._name)
def encode(self, method):
ret = {}
if HTTPMethod.POST == method:
data = {
"ShardCount": self._shard_count,
"Lifecycle": self._life_cycle,
"RecordType": "%s" % self._record_type,
"Comment": "%s" % self._comment
}
if RecordType.TUPLE == self._record_type:
if isinstance(self._record_schema, RecordSchema):
data['RecordSchema'] = self._record_schema.to_json_string()
elif isinstance(self._record_schema, dict):
data['RecordSchema'] = RecordSchema.from_dict(self._record_schema).to_json_string()
else:
data['RecordSchema'] = self._record_schema
ret["data"] = json.dumps(data)
elif HTTPMethod.PUT == method:
data = {
"Lifecycle": self._life_cycle,
"Comment": "%s" % self._comment
}
ret["body"] = json.dumps(data)
return ret
def decode(self, method, resp):
if HTTPMethod.GET == method:
content = json.loads(resp.body)
self._shard_count = content['ShardCount']
self._life_cycle = content['Lifecycle']
self._record_type = content['RecordType']
if RecordType.TUPLE == self._record_type:
self._record_schema = RecordSchema.from_jsonstring(content['RecordSchema'])
self._comment = content['Comment']
self._create_time = content['CreateTime']
self._last_modify_time = content['LastModifyTime']
class Topics(RestModel):
"""
Topics class.
List topics of a project interface will use it.
"""
__slots__ = ('_project_name', '_topic_names')
def __init__(self, project_name=''):
self._project_name = project_name
self._topic_names = []
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
def __len__(self):
return len(self._topic_names)
def append(self, topic_name):
self._topic_names.append(topic_name)
def extend(self, topic_names):
self._topic_names.extend(topic_names)
def __setitem__(self, index, topic_name):
if index < 0 or index > len(self._topic_names) - 1:
raise ValueError('index out range')
self._topic_names[index] = topic_name
def __getitem__(self, index):
if index < 0 or index > len(self._topic_names) - 1:
raise ValueError('index out range')
return self._topic_names[index]
def __str__(self):
topicsjson = {}
topicsjson['TopicNames'] = []
for topic_name in self._topic_names:
topicsjson['TopicNames'].append(topic_name)
return json.dumps(topicsjson)
def __iter__(self):
for name in self._topic_names:
yield name
def throw_exception(self, response_result):
if 'NoSuchProject' == response_result.error_code:
raise errors.NoSuchObjectException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
elif response_result.status_code >= 500:
raise errors.ServerInternalError(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
else:
raise errors.DatahubException(response_result.status_code, response_result.request_id, response_result.error_code, response_result.error_msg)
def resource(self):
if not self._project_name:
raise ValueError('project name must be provide')
return "/projects/%s/topics" % self._project_name
def encode(self, method):
ret = {}
return ret
def decode(self, method, resp):
if HTTPMethod.GET == method:
content = json.loads(resp.body)
for topic_name in content['TopicNames']:
self.append(topic_name)
| 38.842324 | 227 | 0.664245 | 1,110 | 9,361 | 5.284685 | 0.190991 | 0.097852 | 0.068019 | 0.050972 | 0.405728 | 0.365326 | 0.306853 | 0.294238 | 0.282646 | 0.282646 | 0 | 0.002654 | 0.235124 | 9,361 | 240 | 228 | 39.004167 | 0.81662 | 0.157355 | 0 | 0.37037 | 0 | 0 | 0.089029 | 0.002826 | 0 | 0 | 0 | 0 | 0 | 1 | 0.191358 | false | 0 | 0.030864 | 0.049383 | 0.339506 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb905b861d03d7baf6f6a0c8972e31b9ba0d8f4 | 18,875 | py | Python | foursq_utils.py | chenyang03/foursquare_crawler | 9b4d5b585e9e6bda790b80d3c6dc489906e3d64f | [
"MIT"
] | 1 | 2015-12-26T11:00:31.000Z | 2015-12-26T11:00:31.000Z | foursq_utils.py | chenyang03/foursquare_user_crawler | 9b4d5b585e9e6bda790b80d3c6dc489906e3d64f | [
"MIT"
] | null | null | null | foursq_utils.py | chenyang03/foursquare_user_crawler | 9b4d5b585e9e6bda790b80d3c6dc489906e3d64f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import time
import httplib2
AUTO_RECONNECT_TIMES = 5
crawl_tips_json = {}
SERVER = 'http://api.cn.faceplusplus.com/'
category_Arts_Entertainment = ['Aquarium', 'Arcade', 'Art Gallery', 'Bowling Alley', 'Casino', 'Circus', 'Comedy Club',
'Concert Hall', 'Country Dance Club', 'Disc Golf', 'General Entertainment',
'Go Kart Track', 'Historic Site', 'Laser Tag', 'Mini Golf', 'Movie Theater',
'Indie Movie Theater', 'Multiplex', 'Museum', 'Art Museum', 'Erotic Museum',
'History Museum', 'Planetarium', 'Science Museum', 'Music Venue', 'Jazz Club',
'Piano Bar', 'Rock Club', 'Performing Arts Venue', 'Dance Studio', 'Indie Theater',
'Opera House', 'Theater', 'Pool Hall', 'Public Art', 'Outdoor Sculpture', 'Street Art',
'Racetrack', 'Roller Rink', 'Salsa Club', 'Stadium', 'Baseball Stadium',
'Basketball Stadium', 'Cricket Ground', 'Football Stadium', 'Hockey Arena',
'Soccer Stadium', 'Tennis Stadium', 'Track Stadium', 'Threet Art', 'Theme Park',
'Theme Park Ride / Attraction', 'Water Park', 'Zoo']
category_College_University = ['College Academic Building', 'College Arts Building', 'College Communications Building',
'College Engineering Building', 'College History Building', 'College Math Building',
'College Science Building', 'College Technology Building',
'College Administrative Building', 'College Auditorium', 'College Bookstore',
'College Cafeteria', 'College Classroom', 'College Gym', 'College Lab',
'College Library', 'College Quad', 'College Rec Center', 'College Residence Hall',
'College Stadium', 'College Baseball Diamond', 'College Basketball Court',
'College Cricket Pitch', 'College Football Field', 'College Hockey Rink',
'College Soccer Field', 'College Tennis Court', 'College Track', 'College Theater',
'Community College', 'Fraternity House', 'General College & University', 'Law School',
'Medical School', 'Sorority House', 'Student Center', 'Trade School', 'University']
category_Event = ['Conference', 'Convention', 'Festival', 'Music Festival', 'Other Event', 'Parade', 'Stoop Sale',
'Street Fair']
male_tipping_duration = []
female_tipping_duration = []
all_tip_timestamp = {}
category_Food = ['Afghan Restaurant', 'African Restaurant', 'Ethiopian Restaurant', 'American Restaurant',
'New American Restaurant', 'Arepa Restaurant', 'Argentinian Restaurant', 'Asian Restaurant',
'Dim Sum Restaurant', 'Donburi Restaurant', 'Japanese Curry Restaurant', 'Kaiseki Restaurant',
'Kushikatsu Restaurant', 'Monjayaki Restaurant', 'Nabe Restaurant', 'Okonomiyaki Restaurant',
'Ramen Restaurant', 'Shabu-Shabu Restaurant', 'Soba Restaurant', 'Sukiyaki Restaurant',
'Takoyaki Place', 'Tempura Restaurant', 'Tonkatsu Restaurant', 'Udon Restaurant', 'Unagi Restaurant',
'Wagashi Place', 'Yakitori Restaurant', 'Yoshoku Restaurant', 'Korean Restaurant',
'Malaysian Restaurant', 'Mongolian Restaurant', 'Noodle House', 'Thai Restaurant',
'Tibetan Restaurant', 'Vietnamese Restaurant', 'Australian Restaurant', 'Austrian Restaurant',
'BBQ Joint', 'Bagel Shop', 'Bakery', 'Belarusian Restaurant', 'Belgian Restaurant', 'Bistro',
'Brazilian Restaurant', 'Acai House', 'Baiano Restaurant', 'Central Brazilian Restaurant',
'Churrascaria', 'Empada House', 'Goiano Restaurant', 'Mineiro Restaurant',
'Northeastern Brazilian Restaurant', 'Northern Brazilian Restaurant', 'Pastelaria',
'Southeastern Brazilian Restaurant', 'Southern Brazilian Restaurant', 'Tapiocaria', 'Breakfast Spot',
'Bubble Tea Shop', 'Buffet', 'Burger Joint', 'Burrito Place', 'Cafeteria', u'Café',
'Cajun / Creole Restaurant', 'Cambodian Restaurant', 'Caribbean Restaurant', 'Caucasian Restaurant',
'Chinese Restaurant', 'Anhui Restaurant', 'Beijing Restaurant', 'Cantonese Restaurant',
'Chinese Aristocrat Restaurant', 'Chinese Breakfast Place', 'Dongbei Restaurant', 'Fujian Restaurant',
'Guizhou Restaurant', 'Hainan Restaurant', 'Hakka Restaurant', 'Henan Restaurant',
'Hong Kong Restaurant', 'Huaiyang Restaurant', 'Hubei Restaurant', 'Hunan Restaurant',
'Imperial Restaurant', 'Jiangsu Restaurant', 'Jiangxi Restaurant', 'Macanese Restaurant',
'Manchu Restaurant', 'Peking Duck Restaurant', 'Shaanxi Restaurant', 'Shandong Restaurant',
'Shanghai Restaurant', 'Shanxi Restaurant', 'Szechuan Restaurant', 'Taiwanese Restaurant',
'Tianjin Restaurant', 'Xinjiang Restaurant', 'Yunnan Restaurant', 'Zhejiang Restaurant', 'Coffee Shop',
'Comfort Food Restaurant', 'Creperie', 'Cuban Restaurant', 'Cupcake Shop', 'Czech Restaurant',
'Deli / Bodega', 'Dessert Shop', 'Dim Sum Restaurant', 'Diner', 'Distillery', 'Donut Shop',
'Dumpling Restaurant', 'Eastern European Restaurant', 'English Restaurant', 'Ethiopian Restaurant',
'Falafel Restaurant', 'Fast Food Restaurant', 'Filipino Restaurant', 'Fish & Chips Shop',
'Fondue Restaurant', 'Food Truck', 'French Restaurant', 'Fried Chicken Joint', 'Gastropub',
'German Restaurant', 'Gluten-free Restaurant', 'Greek Restaurant', 'Bougatsa Shop',
'Cretan Restaurant', 'Fish Taverna', 'Grilled Meat Restaurant', 'Kafenio', 'Magirio',
'Meze Restaurant', 'Modern Greek Restaurant', 'Ouzeri', 'Patsa Restaurant', 'Taverna',
'Tsipouro Restaurant', 'Halal Restaurant', 'Hawaiian Restaurant', 'Himalayan Restaurant',
'Hot Dog Joint', 'Hotpot Restaurant', 'Hungarian Restaurant', 'Ice Cream Shop', 'Indian Restaurant',
'Indonesian Restaurant', 'Acehnese Restaurant', 'Balinese Restaurant', 'Betawinese Restaurant',
'Javanese Restaurant', 'Manadonese Restaurant', 'Meatball Place', 'Padangnese Restaurant',
'Sundanese Restaurant', 'Irish Pub', 'Italian Restaurant', 'Japanese Restaurant', 'Jewish Restaurant',
'Juice Bar', 'Korean Restaurant', 'Kosher Restaurant', 'Latin American Restaurant',
'Empanada Restaurant', 'Mac & Cheese Joint', 'Malaysian Restaurant', 'Mediterranean Restaurant',
'Mexican Restaurant']
category_Food.extend(['Middle Eastern Restaurant', 'Modern European Restaurant', 'Molecular Gastronomy Restaurant',
'Mongolian Restaurant', 'Moroccan Restaurant', 'New American Restaurant', 'Pakistani Restaurant',
'Persian Restaurant', 'Peruvian Restaurant', 'Pie Shop', 'Pizza Place', 'Polish Restaurant',
'Portuguese Restaurant', 'Ramen / Noodle House', 'Restaurant', 'Romanian Restaurant',
'Russian Restaurant', 'Blini House', 'Pelmeni House', 'Salad Place', 'Sandwich Place',
'Scandinavian Restaurant', 'Seafood Restaurant', 'Snack Place', 'Soup Place',
'South American Restaurant', 'Southern / Soul Food Restaurant', 'Souvlaki Shop',
'Spanish Restaurant', 'Paella Restaurant', 'Steakhouse', 'Sushi Restaurant', 'Swiss Restaurant',
'Taco Place', 'Tapas Restaurant', 'Tatar Restaurant', 'Tea Room', 'Thai Restaurant',
'Tibetan Restaurant', 'Turkish Restaurant', 'Borek Place', 'Cigkofte Place', 'Doner Restaurant',
'Gozleme Place', 'Home Cooking Restaurant', 'Kebab Restaurant', 'Kofte Place',
u'Kokoreç Restaurant', 'Manti Place', 'Meyhane', 'Pide Place', 'Ukrainian Restaurant',
'Varenyky restaurant', 'West-Ukrainian Restaurant', 'Vegetarian / Vegan Restaurant',
'Vietnamese Restaurant', 'Winery', 'Wings Joint', 'Frozen Yogurt', 'Friterie',
'Andhra Restaurant', 'Awadhi Restaurant', 'Bengali Restaurant', 'Chaat Place',
'Chettinad Restaurant', 'Dhaba', 'Dosa Place', 'Goan Restaurant', 'Gujarati Restaurant',
'Indian Chinese Restaurant', 'Indian Sweet Shop', 'Irani Cafe', 'Jain Restaurant',
'Karnataka Restaurant', 'Kerala Restaurant', 'Maharashtrian Restaurant', 'Mughlai Restaurant',
'Multicuisine Indian Restaurant', 'North Indian Restaurant', 'Northeast Indian Restaurant',
'Parsi Restaurant', 'Punjabi Restaurant', 'Rajasthani Restaurant', 'South Indian Restaurant',
'Udupi Restaurant', 'Indonesian Meatball Place', 'Abruzzo', 'Turkish Home Cooking Restaurant',
'Sri Lankan Restaurant', 'Veneto Restaurant', 'Umbrian Restaurant', 'Tuscan Restaurant',
'Trentino Restaurant', 'Trattoria/Osteria', 'South Tyrolean Restaurant', 'Sicilian Restaurant',
'Sardinian Restaurant', 'Roman Restaurant', 'Romagna Restaurant', 'Rifugio di Montagna',
'Puglia Restaurant', 'Piedmontese Restaurant', 'Piadineria', 'Molise Restaurant',
'Marche Restaurant', 'Malga', 'Lombard Restaurant', 'Ligurian Restaurant', 'Friuli Restaurant',
'Emilia Restaurant', 'Campanian Restaurant', 'Calabria Restaurant', 'Basilicata Restaurant',
'Aosta Restaurant', 'Agriturismo', 'Abruzzo Restaurant', ''])
category_Nightlife_Spot = ['Bar', 'Beach Bar', 'Beer Garden', 'Brewery', 'Champagne Bar', 'Cocktail Bar', 'Dive Bar',
'Gay Bar', 'Hookah Bar', 'Hotel Bar', 'Karaoke Bar', 'Lounge', 'Night Market', 'Nightclub',
'Other Nightlife', 'Pub', 'Sake Bar', 'Speakeasy', 'Sports Bar', 'Strip Club', 'Whisky Bar',
'Wine Bar', 'Speakeasy']
category_Outdoors_Recreation = ['Athletics & Sports', 'Badminton Court', 'Baseball Field', 'Basketball Court',
'Bowling Green', 'Golf Course', 'Hockey Field', 'Paintball Field', 'Rugby Pitch',
'Skate Park', 'Skating Rink', 'Soccer Field', 'Sports Club', 'Squash Court',
'Tennis Court', 'Volleyball Court', 'Bath House', 'Bathing Area', 'Beach',
'Nudist Beach', 'Surf Spot', 'Botanical Garden', 'Bridge', 'Campground', 'Castle',
'Cemetery', 'Dive Spot', 'Dog Run', 'Farm', 'Field', 'Fishing Spot', 'Forest', 'Garden',
'Gun Range', 'Harbor / Marina', 'Hot Spring', 'Island', 'Lake', 'Lighthouse',
'Mountain', 'National Park', 'Nature Preserve', 'Other Great Outdoors', 'Palace',
'Park', 'Pedestrian Plaza', 'Playground', 'Plaza', 'Pool', 'Rafting',
'Recreation Center', 'River', 'Rock Climbing Spot', 'Scenic Lookout',
'Sculpture Garden', 'Ski Area', 'Apres Ski Bar', 'Ski Chairlift', 'Ski Chalet',
'Ski Lodge', 'Ski Trail', 'Stables', 'States & Municipalities', 'City', 'County',
'Country', 'Neighborhood', 'State', 'Town', 'Village', 'Summer Camp', 'Trail', 'Tree',
'Vineyard', 'Volcano', 'Well']
category_Professional_Other_Places = ['Animal Shelter', 'Auditorium', 'Building', 'Club House', 'Community Center',
'Convention Center', 'Meeting Room', 'Cultural Center', 'Distribution Center',
'Event Space', 'Factory', 'Fair', 'Funeral Home', 'Government Building',
'Capitol Building', 'City Hall', 'Courthouse', 'Embassy / Consulate',
'Fire Station', 'Monument / Landmark', 'Police Station', 'Town Hall', 'Library',
'Medical Center', 'Acupuncturist', 'Alternative Healer', 'Chiropractor',
"Dentist's Office", "Doctor's Office", 'Emergency Room', 'Eye Doctor',
'Hospital', 'Laboratory', 'Mental Health Office', 'Veterinarian', 'Military Base',
'Non-Profit', 'Office', 'Advertising Agency', 'Campaign Office',
'Conference Room', 'Coworking Space', 'Tech Startup', 'Parking', 'Post Office',
'Prison', 'Radio Station', 'Recruiting Agency', 'School', 'Circus School',
'Driving School', 'Elementary School', 'Flight School', 'High School',
'Language School', 'Middle School', 'Music School', 'Nursery School', 'Preschool',
'Private School', 'Religious School', 'Swim School', 'Social Club',
'Spiritual Center', 'Buddhist Temple', 'Church', 'Hindu Temple', 'Monastery',
'Mosque', 'Prayer Room', 'Shrine', 'Synagogue', 'Temple', 'TV Station',
'Voting Booth', 'Warehouse']
category_Residence = ['Assisted Living', 'Home (private)', 'Housing Development',
'Residential Building (Apartment / Condo)', 'Trailer Park']
category_Shop_Service = ['Construction & Lanscape', 'Event Service', 'ATM', 'Adult Boutique', 'Antique Shop',
'Arts & Crafts Store', 'Astrologer', 'Auto Garage', 'Automotive Shop', 'Baby Store', 'Bank',
'Betting Shop', 'Big Box Store', 'Bike Shop', 'Board Shop', 'Bookstore', 'Bridal Shop',
'Camera Store', 'Candy Store', 'Car Dealership', 'Car Wash', 'Carpet Store',
'Check Cashing Service', 'Chocolate Shop', 'Christmas Market', 'Clothing Store',
'Accessories Store', 'Boutique', 'Kids Store', 'Lingerie Store', "Men's Store",
'Shoe Store', "Women's Store", 'Comic Shop', 'Convenience Store', 'Cosmetics Shop',
'Costume Shop', 'Credit Union', 'Daycare', 'Department Store', 'Design Studio',
'Discount Store', 'Dive Shop', 'Drugstore / Pharmacy', 'Dry Cleaner', 'EV Charging Station',
'Electronics Store', 'Fabric Shop', 'Financial or Legal Service', 'Fireworks Store',
'Fishing Store', 'Flea Market', 'Flower Shop', 'Food & Drink Shop', 'Beer Store', 'Butcher',
'Cheese Shop', 'Farmers Market', 'Fish Market', 'Food Court', 'Gourmet Shop', 'Grocery Store',
'Health Food Store', 'Liquor Store', 'Organic Grocery', 'Street Food Gathering', 'Supermarket',
'Wine Shop', 'Frame Store', 'Fruit & Vegetable Store', 'Furniture / Home Store', 'Gaming Cafe',
'Garden Center', 'Gas Station / Garage', 'Gift Shop', 'Gun Shop', 'Gym / Fitness Center',
'Boxing Gym', 'Climbing Gym', 'Cycle Studio', 'Gym Pool', 'Gymnastics Gym', 'Gym',
'Martial Arts Dojo', 'Track', 'Yoga Studio', 'Hardware Store', 'Herbs & Spices Store',
'Hobby Shop', 'Hunting Supply', 'IT Services', 'Internet Cafe', 'Jewelry Store',
'Knitting Store', 'Laundromat', 'Laundry Service', 'Lawyer', 'Leather Goods Store',
'Locksmith', 'Lottery Retailer', 'Luggage Store', 'Mall', 'Marijuana Dispensary', 'Market',
'Massage Studio', 'Mattress Store', 'Miscellaneous Shop', 'Mobile Phone Shop',
'Motorcycle Shop', 'Music Store', 'Nail Salon', 'Newsstand', 'Optical Shop',
'Other Repair Shop', 'Outdoor Supply Store', 'Outlet Store', 'Paper / Office Supplies Store',
'Pawn Shop', 'Perfume Shop', 'Pet Service', 'Pet Store', 'Photography Lab', 'Piercing Parlor',
'Pop-Up Shop', 'Print Shop', 'Real Estate Office', 'Record Shop', 'Recording Studio',
'Recycling Facility', 'Salon / Barbershop', 'Shipping Store', 'Shoe Repair', 'Smoke Shop',
'Smoothie Shop', 'Souvenir Shop', 'Spa', 'Sporting Goods Shop', 'Stationery Store',
'Storage Facility', 'Tailor Shop', 'Tanning Salon', 'Tattoo Parlor', 'Thrift / Vintage Store',
'Toy / Game Store', 'Travel Agency', 'Used Bookstore', 'Video Game Store', 'Video Store',
'Warehouse Store', 'Watch Repair Shop']
category_Travel_Transport = ['Cruise', 'Metro Station', 'Transportation Service', 'Airport', 'Airport Food Court',
'Airport Gate', 'Airport Lounge', 'Airport Terminal', 'Airport Tram', 'Plane',
'Bike Rental / Bike Share', 'Boat or Ferry', 'Border Crossing', 'Bus Station', 'Bus Line',
'Bus Stop', 'Cable Car', 'General Travel', 'Hotel', 'Bed & Breakfast', 'Boarding House',
'Hostel', 'Hotel Pool', 'Motel', 'Resort', 'Roof Deck', 'Intersection', 'Light Rail',
'Moving Target', 'Pier', 'RV Park', 'Rental Car Location', 'Rest Area', 'Road', 'Street',
'Subway', 'Taxi Stand', 'Taxi', 'Toll Booth', 'Toll Plaza', 'Tourist Information Center',
'Train Station', 'Platform', 'Train', 'Tram', 'Travel Lounge', 'Tunnel']
#reload(sys)
#sys.setdefaultencoding('utf-8')
h = httplib2.Http(disable_ssl_certificate_validation=True)
def get_raw_info(url):
success = 0
retry = 0
content = -1
while success == 0:
try:
resp, content = h.request(url, "GET")
success = 1
if resp['status'] != '200':
return -1
except:
time.sleep(3)
retry += 1
if retry == AUTO_RECONNECT_TIMES:
return -2
return content
| 86.187215 | 120 | 0.566305 | 1,629 | 18,875 | 6.539595 | 0.518109 | 0.012672 | 0.003379 | 0.00582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001294 | 0.304053 | 18,875 | 218 | 121 | 86.582569 | 0.809683 | 0.003338 | 0 | 0 | 0 | 0 | 0.577095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005181 | false | 0 | 0.015544 | 0 | 0.036269 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcb99c68a4fea8b76fac157150b52d863fa53f64 | 321 | py | Python | setup.py | MizukiSonoko/py4slide | 563fb0d31e8912c0c3baa071a7f972a9aafa7f13 | [
"BSD-3-Clause"
] | 2 | 2015-03-28T05:46:52.000Z | 2015-03-28T05:47:48.000Z | setup.py | MizukiSonoko/py4slide | 563fb0d31e8912c0c3baa071a7f972a9aafa7f13 | [
"BSD-3-Clause"
] | null | null | null | setup.py | MizukiSonoko/py4slide | 563fb0d31e8912c0c3baa071a7f972a9aafa7f13 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
requires = ['colorama']
NAME = 'py4slide'
VER = '0.0.1'
setup(
name=NAME,
version=VER,
description='slide application by python.',
author='Sonoko Mizuki',
url='http://mizuki.co/',
license='BSD',
install_requires=requires,
packages=[NAME]
)
| 16.894737 | 47 | 0.65109 | 38 | 321 | 5.447368 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.202492 | 321 | 18 | 48 | 17.833333 | 0.792969 | 0 | 0 | 0 | 0 | 0 | 0.255452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
dcba0eb1a04761250ce19ba6ebbc25099a074124 | 1,852 | py | Python | resources/lib/kodiutilsitem.py | lgaravaglia999/plugin.streaming.cava | 4d55bd5196d75f6a3cc9721fb7cff11f8af77bcb | [
"MIT"
] | 1 | 2020-04-06T16:55:13.000Z | 2020-04-06T16:55:13.000Z | resources/lib/kodiutilsitem.py | lgaravaglia999/plugin.streaming.cava | 4d55bd5196d75f6a3cc9721fb7cff11f8af77bcb | [
"MIT"
] | null | null | null | resources/lib/kodiutilsitem.py | lgaravaglia999/plugin.streaming.cava | 4d55bd5196d75f6a3cc9721fb7cff11f8af77bcb | [
"MIT"
] | null | null | null | import urlparse
import urlresolver
import sys
from urllib import urlencode
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
import sys
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
STREAMING_SOURCES = ["speedvideo", "openload", "rapidcrypt", "vcrypt"]
def build_url(query):
return '{0}?{1}'.format(base_url, urlencode(query))
def add_menu_item(url_dict, item_title, image=None):
url = build_url(url_dict)
if image is not None:
li = xbmcgui.ListItem(item_title, iconImage=image)
else:
li = xbmcgui.ListItem(item_title)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
def add_item(url_dict, title, is_folder=False, properties=None, info=None, arts=None):
url = build_url(url_dict)
kodi_item = xbmcgui.ListItem(title)
if arts is not None:
kodi_item.setArt(arts)
if info is not None:
kodi_item.setInfo('video', info)
else:
kodi_item.setInfo('video', {})
if properties is not None:
prop_key = properties["prop_key"]
prop_value = properties["prop_value"]
kodi_item.setProperty(prop_key, prop_value)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=kodi_item, isFolder=is_folder)
def end_directory():
xbmcplugin.endOfDirectory(addon_handle)
def get_streaming_source_name(url):
for source in STREAMING_SOURCES:
if source in url:
return source
return "n.d."
def user_input():
kb = xbmc.Keyboard('default', 'heading')
kb.setDefault('')
kb.setHeading('CercaFilm')
kb.setHiddenInput(False)
kb.doModal()
if (kb.isConfirmed()):
search_term = kb.getText()
return search_term
else:
return None
| 29.396825 | 86 | 0.650648 | 232 | 1,852 | 5.012931 | 0.366379 | 0.041273 | 0.030954 | 0.025795 | 0.209802 | 0.135856 | 0.098022 | 0.098022 | 0 | 0 | 0 | 0.002863 | 0.24568 | 1,852 | 62 | 87 | 29.870968 | 0.829635 | 0 | 0 | 0.173077 | 0 | 0 | 0.051836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0.019231 | 0.326923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4926420b807a0aaccc0281f369317eb80dac7e6 | 4,715 | py | Python | niftymic/application/propagate_mask.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 86 | 2017-11-23T01:37:42.000Z | 2022-03-10T01:46:48.000Z | niftymic/application/propagate_mask.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 20 | 2018-10-26T04:14:53.000Z | 2022-03-31T07:44:58.000Z | niftymic/application/propagate_mask.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 23 | 2018-01-26T12:56:37.000Z | 2022-01-24T05:20:18.000Z | ##
# \file propagate_mask.py
# \brief Script to propagate an image mask using rigid registration
#
# \author Michael Ebner (michael.ebner@kcl.ac.uk)
# \date Aug 2019
#
import os
import numpy as np
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
import niftymic.base.data_writer as dw
import niftymic.base.stack as st
import niftymic.registration.flirt as regflirt
import niftymic.registration.niftyreg as niftyreg
import niftymic.utilities.stack_mask_morphological_operations as stmorph
from niftymic.utilities.input_arparser import InputArgparser
from niftymic.definitions import V2V_METHOD_OPTIONS, ALLOWED_EXTENSIONS
def main():
time_start = ph.start_timing()
# Set print options for numpy
np.set_printoptions(precision=3)
input_parser = InputArgparser(
description="Propagate image mask using rigid registration.",
)
input_parser.add_moving(required=True)
input_parser.add_moving_mask(required=True)
input_parser.add_fixed(required=True)
input_parser.add_output(required=True)
input_parser.add_v2v_method(
option_string="--method",
help="Registration method used for the registration (%s)." % (
", or ".join(V2V_METHOD_OPTIONS)),
default="RegAladin",
)
input_parser.add_option(
option_string="--use-moving-mask",
type=int,
help="Turn on/off use of moving mask to constrain the registration.",
default=0,
)
input_parser.add_dilation_radius(default=1)
input_parser.add_verbose(default=0)
input_parser.add_log_config(default=0)
args = input_parser.parse_args()
input_parser.print_arguments(args)
if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
raise ValueError(
"output filename invalid; allowed extensions are: %s" %
", ".join(ALLOWED_EXTENSIONS))
if args.method not in V2V_METHOD_OPTIONS:
raise ValueError("method must be in {%s}" % (
", ".join(V2V_METHOD_OPTIONS)))
if args.log_config:
input_parser.log_config(os.path.abspath(__file__))
stack = st.Stack.from_filename(
file_path=args.fixed,
extract_slices=False,
)
template = st.Stack.from_filename(
file_path=args.moving,
file_path_mask=args.moving_mask,
extract_slices=False,
)
if args.method == "FLIRT":
# Define search angle ranges for FLIRT in all three dimensions
# search_angles = ["-searchr%s -%d %d" %
# (x, args.search_angle, args.search_angle)
# for x in ["x", "y", "z"]]
# options = (" ").join(search_angles)
# options += " -noresample"
registration = regflirt.FLIRT(
registration_type="Rigid",
fixed=stack,
moving=template,
use_fixed_mask=False,
use_moving_mask=args.use_moving_mask,
# options=options,
use_verbose=False,
)
else:
registration = niftyreg.RegAladin(
registration_type="Rigid",
fixed=stack,
moving=template,
use_fixed_mask=False,
use_moving_mask=args.use_moving_mask,
# options="-ln 2",
use_verbose=False,
)
try:
registration.run()
except RuntimeError as e:
raise RuntimeError(
"%s\n\n"
"Have you tried running the script with '--use-moving-mask 0'?" % e)
transform_sitk = registration.get_registration_transform_sitk()
stack.sitk_mask = sitk.Resample(
template.sitk_mask,
stack.sitk_mask,
transform_sitk,
sitk.sitkNearestNeighbor,
0,
template.sitk_mask.GetPixelIDValue()
)
if args.dilation_radius > 0:
stack_mask_morpher = stmorph.StackMaskMorphologicalOperations.from_sitk_mask(
mask_sitk=stack.sitk_mask,
dilation_radius=args.dilation_radius,
dilation_kernel="Ball",
use_dilation_in_plane_only=True,
)
stack_mask_morpher.run_dilation()
stack.sitk_mask = stack_mask_morpher.get_processed_mask_sitk()
dw.DataWriter.write_mask(stack.sitk_mask, args.output)
elapsed_time = ph.stop_timing(time_start)
if args.verbose:
ph.show_nifti(args.fixed, segmentation=args.output)
ph.print_title("Summary")
exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
print("%s | Computational Time for Segmentation Propagation: %s" % (
exe_file_info, elapsed_time))
return 0
if __name__ == '__main__':
main()
| 31.019737 | 85 | 0.654295 | 567 | 4,715 | 5.186949 | 0.320988 | 0.048623 | 0.042843 | 0.031282 | 0.15981 | 0.088405 | 0.088405 | 0.067324 | 0.067324 | 0.067324 | 0 | 0.005663 | 0.250901 | 4,715 | 151 | 86 | 31.225166 | 0.82701 | 0.10456 | 0 | 0.127273 | 0 | 0 | 0.102759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009091 | false | 0 | 0.109091 | 0 | 0.127273 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f49397c98376fff46903507b088abc59452d83f1 | 843 | py | Python | homework-3-su21-falkishi-main/helper.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | homework-3-su21-falkishi-main/helper.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | homework-3-su21-falkishi-main/helper.py | falkishi/Python-HWs | 04504c21a7fc5dc4b9fe7820549d9cdf98c7aa91 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# bars: list of length number of bins, with each entry having its histogram value
# filename: file to save plot to (in .png format)
# minrange: minimum value of leftmost bin
# maxrange: maximum value of rightmost bin
def plotHisto(bars, filename, minrange=0.0, maxrange=100.0, plotinline=False):
mrange = maxrange - minrange
binsize = mrange / len(bars)
# this is a "list comprehension" -- it's a quick way to process one
# list to produce another list
labels = [(mrange / len(bars)) * i + minrange for i in range(len(bars))]
plt.bar(labels, bars, align='edge', width=binsize)
if plotinline:
plt.show()
else:
plt.savefig(filename)
# plt.show()
plt.clf()
def getData(filename='input.txt'):
return np.loadtxt(filename)
| 29.068966 | 81 | 0.67497 | 122 | 843 | 4.663934 | 0.606557 | 0.036907 | 0.045694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009146 | 0.221827 | 843 | 28 | 82 | 30.107143 | 0.858232 | 0.372479 | 0 | 0 | 0 | 0 | 0.024952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0.071429 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f497869d4aba903a68e4d3dd6b1837e119ebdbf3 | 3,244 | py | Python | efficientdet/run_tflite.py | sujitahirrao/automl | e82d92d9ccca72e54e4c85188345f110ca7dfc3c | [
"Apache-2.0"
] | 5,277 | 2020-03-12T23:09:47.000Z | 2022-03-30T17:28:35.000Z | _modified-EfficientDet/run_tflite.py | fedezocco/MoreEffEffDetsAndWPBB-TensorFlow | 1f5402c665f351123a9e83face33e881acebbbce | [
"MIT"
] | 988 | 2020-03-17T02:53:40.000Z | 2022-03-17T19:34:10.000Z | _modified-EfficientDet/run_tflite.py | fedezocco/MoreEffEffDetsAndWPBB-TensorFlow | 1f5402c665f351123a9e83face33e881acebbbce | [
"MIT"
] | 1,486 | 2020-03-14T05:15:22.000Z | 2022-03-29T02:28:56.000Z | # Copyright 2021 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Run TF Lite model."""
from absl import app
from absl import flags
from PIL import Image
import tensorflow as tf
import inference
FLAGS = flags.FLAGS
def define_flags():
"""Define flags."""
flags.DEFINE_string('tflite_path', None, 'Path of tflite file.')
flags.DEFINE_string('sample_image', None, 'Sample image path')
flags.DEFINE_string('output_image', None, 'Output image path')
flags.DEFINE_string('image_size', '512x512', 'Image size "WxH".')
def load_image(image_path, image_size):
"""Loads an image, and returns numpy.ndarray.
Args:
image_path: str, path to image.
image_size: list of int, representing [width, height].
Returns:
image_batch: numpy.ndarray of shape [1, H, W, C].
"""
input_data = tf.io.gfile.GFile(image_path, 'rb').read()
image = tf.io.decode_image(input_data, channels=3, dtype=tf.uint8)
image = tf.image.resize(
image, image_size, method='bilinear', antialias=True)
return tf.expand_dims(tf.cast(image, tf.uint8), 0).numpy()
def save_visualized_image(image, prediction, output_path):
"""Saves the visualized image with prediction.
Args:
image: numpy.ndarray of shape [H, W, C].
prediction: numpy.ndarray of shape [num_predictions, 7].
output_path: str, output image path.
"""
output_image = inference.visualize_image_prediction(
image,
prediction,
label_map='coco')
Image.fromarray(output_image).save(output_path)
class TFLiteRunner:
"""Wrapper to run TFLite model."""
def __init__(self, model_path):
"""Init.
Args:
model_path: str, path to tflite model.
"""
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
self.input_index = self.interpreter.get_input_details()[0]['index']
self.output_index = self.interpreter.get_output_details()[0]['index']
def run(self, image):
"""Run inference on a single images.
Args:
image: numpy.ndarray of shape [1, H, W, C].
Returns:
prediction: numpy.ndarray of shape [1, num_detections, 7].
"""
self.interpreter.set_tensor(self.input_index, image)
self.interpreter.invoke()
return self.interpreter.get_tensor(self.output_index)
def main(_):
image_size = [int(dim) for dim in FLAGS.image_size.split('x')]
image = load_image(FLAGS.sample_image, image_size)
runner = TFLiteRunner(FLAGS.tflite_path)
prediction = runner.run(image)
save_visualized_image(image[0], prediction[0], FLAGS.output_image)
if __name__ == '__main__':
define_flags()
app.run(main)
| 30.037037 | 80 | 0.696979 | 452 | 3,244 | 4.845133 | 0.365044 | 0.032877 | 0.031963 | 0.043379 | 0.088584 | 0.0379 | 0.021005 | 0.021005 | 0 | 0 | 0 | 0.009967 | 0.16492 | 3,244 | 107 | 81 | 30.317757 | 0.79845 | 0.40783 | 0 | 0 | 0 | 0 | 0.085998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.116279 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f49ae3e2dbdfbcfdf2ef4fc9931242bf6edb0d11 | 3,679 | py | Python | examples/input/joystick.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | null | null | null | examples/input/joystick.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | null | null | null | examples/input/joystick.py | Torxed/pyglet | 0a35e67e43d069b952e3b02773cdf5b064124069 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import pyglet
from pyglet.gl import *
joysticks = pyglet.input.get_joysticks()
assert joysticks, 'No joystick device is connected'
joystick = joysticks[0]
joystick.open()
window = pyglet.window.Window()
main_batch = pyglet.graphics.Batch()
# Labels
pyglet.text.Label("Buttons:", x=15, y=window.height - 25,
font_size=14, batch=main_batch)
pyglet.text.Label("D Pad:", x=window.width - 125, y=window.height - 25,
font_size=14, batch=main_batch)
rows = len(joystick.buttons) // 2
buttton_labels = []
for i in range(len(joystick.buttons)):
y = window.height - 50 - 25 * (i % rows)
x = 35 + 60 * (i // rows)
label = pyglet.text.Label(f"{i}:", x=x, y=y, font_size=14,
anchor_x='right', batch=main_batch)
buttton_labels.append(label)
@window.event
def on_draw():
window.clear()
main_batch.draw()
x = round((.5 * joystick.x + 1), 2) * window.width / 2
y = round((-.5 * joystick.y + 1), 2) * window.height / 2
rx = (.5 * joystick.rx + 1) * 60
ry = (-.5 * joystick.ry + 1) * 60
z = joystick.z * 50
# Axes
joystick_rect = pyglet.shapes.Rectangle(x, y, 10 + rx + z, 10 + ry + z, color=(255, 0, 255))
joystick_rect.anchor_x = joystick_rect.width // 2
joystick_rect.anchor_y = joystick_rect.height // 2
joystick_rect.draw()
# Buttons
for i in range(len(joystick.buttons)):
x = buttton_labels[i].x
y = buttton_labels[i].y
rect = pyglet.shapes.Rectangle(x + 10, y + 1, 10, 10, color=(255, 0, 0))
if joystick.buttons[i]:
rect.color = (0, 255, 0)
rect.draw()
# Hat
x = window.width - 75
y = window.height - 100
d_pad_rect = pyglet.shapes.Rectangle(x + joystick.hat_x * 50, y + joystick.hat_y * 50, 10, 10)
d_pad_rect.color = (0, 0, 255)
d_pad_rect.draw()
pyglet.clock.schedule(lambda dt: None)
pyglet.app.run()
| 37.927835 | 98 | 0.654254 | 518 | 3,679 | 4.586873 | 0.378378 | 0.030303 | 0.021886 | 0.031566 | 0.167508 | 0.114478 | 0.114478 | 0.090067 | 0.090067 | 0.090067 | 0 | 0.036531 | 0.20386 | 3,679 | 96 | 99 | 38.322917 | 0.774667 | 0.466431 | 0 | 0.085106 | 0 | 0 | 0.028067 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.021277 | false | 0 | 0.042553 | 0 | 0.06383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f49b6ce5773d0c3d59ea23dab775ea53d196268f | 8,435 | py | Python | gradnet/layers/memory.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | gradnet/layers/memory.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | gradnet/layers/memory.py | imandr/gradnet | 72b9b140cb3f43224a11310b115480fb42820546 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from .rnn import RNNLayer
D = 2.0
def softmax(x):
expx = np.exp(x-np.max(x, axis=-1, keepdims=True))
return expx/np.sum(expx, axis=-1, keepdims=True)
def attention(key, data, sharpness):
# ++++
# returns [mb, capacity]
return softmax(np.einsum("mcl,ml->mc", data, key)*sharpness[:, None]*D)
def step(m, key_w, s_w, p, key_r, s_r):
a_w = attention(key_w, m, s_w)
n = m*(1-a_w[:,:,None]) + p[:,None,:]*a_w[:,:,None]
a_r = attention(key_r, n, s_r)
q = np.einsum("mc,mcl->ml", a_r, n)
return q, a_r, n, a_w
def softmax_jacobian(n, a):
# returns [m,c,c]
eye = np.eye(n)[None,:,:]
return a[:,None,:]*(eye-a[:,:,None])
def da_dk(key, data, a, sharpness):
# +++
# returns [m,c,l] = dA[m,c]/dK[m,l]
jac = softmax_jacobian(data.shape[1], a)
return D*sharpness[:,None,None]*np.einsum("mcx,mxl->mcl", jac, data)
def da_ds(data, key, a):
# +++
jac = softmax_jacobian(data.shape[1], a)
return D*np.einsum("mib,mba,ma->mi", jac, data, key)
def da_dd(key, data, a, sharpness):
jac = softmax_jacobian(data.shape[1], a)
#print("sharpness:", sharpness.shape)
return D*sharpness[:,None,None,None]*np.einsum("mij,mk->mijk", jac, key)
def dq_dar(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
return n.transpose((0,2,1))
def dq_dn(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
eye = np.eye(m.shape[-1])
dadn = da_dd(key_r, n, a_r, s_r)
#print("dadn:", dadn.shape)
return a_r[:,None,:,None]*eye[None,:,None,:] + np.einsum("maj,maik->mjik", n, dadn)
def dn_dp(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
eye = np.eye(m.shape[-1])
return a_w[:,:,None,None]*eye[None,None,:,:]
def dn_daw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
eye = np.eye(m.shape[1])
return (p[:,None,:,None] - m[:,:,:,None])*eye[None,:,None,:]
def dn_dm(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
eye_ki = np.eye(m.shape[1])[None,:,None,:,None]
eye_jn = np.eye(m.shape[-1])[None,None,:,None,:]
dadm = da_dd(key_w, m, a_w, s_w)
return (1-a_w[:,None,None,:,None])*eye_ki*eye_jn + \
np.einsum("mikn,mij->mijkn", dadm, p[:,None,:] - m[:,:,:])
#
# derived
#
def dq_dp(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# [m,l,l]
return np.einsum("miab,mabj->mij",
dq_dn(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
dn_dp(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w)
)
def dq_dkr(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# [m,l,l]
return np.einsum("mia,maj->mij",
dq_dar(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_dk(key_r, n, a_r, s_r)
)
def dq_dm(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dq/dn*dn/dm: [m,l,c,l]*[m,c,l,c,l] -> [m,l,c,l]
return np.einsum("miab,mabjk->mijk",
dq_dn(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
dn_dm(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w)
)
def dq_dkw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dq/dn*dn/dAw*dAw/dKw: [m,l,c,l]*[m,c,l,c]*[m,c,l] -> [m,l,l]
return np.einsum("miab,mabc,mcj->mij",
dq_dn(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
dn_daw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_dk(key_w, m, a_w, s_w)
)
def dq_dsw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dq/dn*dn/dAw*dAw/dSw: [m,l,c,l]*[m,c,l,c]*[m,c] -> [m,l]
return np.einsum("miab,mabc,mc->mi",
dq_dn(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
dn_daw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_ds(m, key_w, a_w)
)
def dq_dsr(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dq/dAr*dAr/dSr: [m,l,c]*[m,c] -> [m,l]
return np.einsum("mia,ma->mi",
dq_dar(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_ds(n, key_r, a_r)
)
def dn_dsw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dN/dAw*dAw/dSw: [m,c,l,c]*[m,c] -> [m,c,l]
return np.einsum("mija,ma->mij",
dn_daw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_ds(m, key_w, a_w)
)
def dn_dkw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w):
# +++
# dN/dAw*dAw/dKw: [m,c,l,c]*[m,c,l] -> [m,c,l,l]
return np.einsum("mija,mak->mijk",
dn_daw(m, key_w, s_w, p, key_r, s_r, a_r, n, a_w),
da_dk(key_w, m, a_w, s_w)
)
class Memory(RNNLayer):
#
# X: <Kw, P, Kr, Sw, Sr>
# Input state: N[t-1]=M[t] from previous step
# Output state: N[t]=M[t+1]
# Output: [Q]
RSharpness = 1.0
WSharpness = 1.0
Alpha = 0.5
def __init__(self, capacity, data_length, return_sequences=False, **args):
RNNLayer.__init__(self, return_sequences=return_sequences, **args)
self.L = data_length
self.C = capacity
self.D = 5.0
self.ReturnSequences = return_sequences
def configure(self, inputs):
assert len(inputs) == 1 and inputs[0].Shape[-1] == self.L*3+2
return (inputs[0].Shape[0], self.L) if self.ReturnSequences else (self.L,)
check_confgiuration = configure
def init_state(self, mb):
data = np.zeros((mb, self.C, self.L))
return data
@property
def params(self):
return []
def init_context(self, x, state_in):
T, b, d = x.shape # T, minibatch, width (=L*3)
assert x.shape[-1] == self.L*3+2
data_in = state_in
data_record = np.empty((T+1,)+data_in.shape)
data_record[-1,...] = data_in
Aw = np.empty((T, b, self.C))
Ar = np.empty((T, b, self.C))
context = (data_record, Aw, Ar, x.copy())
return context
def forward(self, t, x, s, context):
# X: <Kw, P, Kr, Sw, Sr>
#print("Memory.forward: x:", x.shape, x)
L, C = self.L, self.C
N_record, Aw_record, Ar_record, x_record = context
Kw = x[:,:L]
P = x[:,L:2*L]
Kr = x[:,2*L:3*L]
Sw = x[:,3*L]
Sr = x[:,3*L+1]
M = s
Q, Ar, N, Aw = step(M, Kw, Sw, P, Kr, Sr)
N_record[t] = N
Aw_record[t] = Aw
Ar_record[t] = Ar
return Q, N, context
def backward(self, t, gy_t, gstate_t, gw, context):
# given dL/dc = gc and dL/dy = gy and accumulated dL/dw = gw return dL/dx, dL/ds and updated dL/dw
# initial gw is None
L, C = self.L, self.C
N_record, Aw_record, Ar_record, x = context
Nt = N_record[t]
Mt = N_record[t-1]
Awt = Aw_record[t]
Art = Ar_record[t]
Kwt = x[t,:,:self.L]
Pt = x[t,:,self.L:self.L*2]
Krt = x[t,:,self.L*2:self.L*3]
Swt = x[t,:,3*L]
Srt = x[t,:,3*L+1]
jac_dq_dSw = dq_dsw(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dq_dKw = dq_dkw(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dq_dKr = dq_dkr(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dq_dP = dq_dp(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dq_dSr = dq_dsr(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dq_dSw = dq_dsw(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dn_dP = dn_dp(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dn_dSw = dn_dsw(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dn_dKw = dn_dkw(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dN_dM = dn_dm(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
jac_dQ_dM = dq_dm(Mt, Kwt, Swt, Pt, Krt, Srt, Art, Nt, Awt)
gx = np.concatenate(
[
np.einsum("mij,mi->mj", jac_dq_dKw, gy_t) + np.einsum("mabj,mab->mj", jac_dn_dKw, gstate_t),
np.einsum("mij,mi->mj", jac_dq_dP, gy_t) + np.einsum("mabj,mab->mj", jac_dn_dP, gstate_t),
np.einsum("mij,mi->mj", jac_dq_dKr, gy_t),
(np.einsum("mi,mi->m", jac_dq_dSw, gy_t) + np.einsum("mcl,mcl->m", jac_dn_dSw, gstate_t))[:,None],
np.einsum("mi,mi->m", jac_dq_dSr, gy_t)[:,None]
],
axis=-1
)
gs = np.einsum("mijkl,mij->mkl", jac_dN_dM, gstate_t) + \
np.einsum("macl,ma->mcl", jac_dQ_dM, gy_t)
return gx, gw, gs # gx is ndarray, not list !
| 31.950758 | 114 | 0.516301 | 1,624 | 8,435 | 2.474138 | 0.106527 | 0.017919 | 0.021653 | 0.038825 | 0.482827 | 0.447237 | 0.414883 | 0.38004 | 0.363614 | 0.310851 | 0 | 0.008984 | 0.287374 | 8,435 | 263 | 115 | 32.072243 | 0.659458 | 0.101245 | 0 | 0.138554 | 0 | 0 | 0.040467 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 1 | 0.162651 | false | 0 | 0.012048 | 0.066265 | 0.361446 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a0c71b3bfeeeaa10aa3ec693678f0d54ec57bd | 1,676 | py | Python | code/lihongyi/task1.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | 200 | 2019-04-23T01:13:31.000Z | 2021-08-01T07:56:46.000Z | code/lihongyi/task1.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | null | null | null | code/lihongyi/task1.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | 10 | 2019-04-24T10:18:59.000Z | 2021-04-19T12:58:59.000Z | """
自己实现
梯度下降解决线性回归问题
"""
import numpy as np
import matplotlib.pyplot as plt
def costFunction(X, y, theta=[0, 0]):
"""
损失函数
"""
m = y.size
h = X.dot(theta)
J = 1.0 / (2 * m) * (np.sum(np.square(h - y)))
return J
def gradientDescent(X, y, theta=[0, 0], alpha=0.01, num_iters=1500):
"""
梯度下降
"""
m = y.size
J_history = np.zeros(num_iters)
for iter in np.arange(num_iters):
h = X.dot(theta)
theta = theta - alpha * (1.0 / m) * (X.T.dot(h - y))
J_history[iter] = costFunction(X, y, theta)
return (theta, J_history)
def MaxMinNormalization(x):
"""
归一化
"""
Min = np.min(x)
Max = np.max(x)
x = (x - Min) / (Max - Min)
return x
# 使用外部训练集
# data = np.loadtxt('linear_regression_data1.txt', delimiter=',')
# X = np.c_[np.ones(data.shape[0]),data[:,0]]
# y = data[:,1]
# 自己构造数据集
X_row = 100 * np.random.rand(100)
X = MaxMinNormalization(X_row)
y = 0.5*X + 2 + np.random.normal(0,0.01,(100,))
# 数据可视化
plt.subplot(1, 2, 1)
plt.scatter(X_row, y, color='black')
plt.xlabel('x')
plt.ylabel('y')
X = np.c_[np.ones((X.shape[0],1)), X]
# training set
X_train = X[:80]
y_train = y[:80]
# test set
X_test = X[80:]
y_test = y[80:]
print(costFunction(X,y))
b = 0
w = 0
lr = 0.01
iteration = 10000
# 画出每一次迭代和损失函数变化
theta , Cost_J = gradientDescent(X_train, y_train, theta=[b, w], alpha= lr, num_iters= iteration)
print('最终b, w结果: ',theta)
testCost = costFunction(X_test, y_test, theta)
print('测试集误差: ',testCost)
h = X.dot(theta)
plt.plot(X_row, h, "b--")
plt.subplot(1, 2, 2)
plt.plot(Cost_J)
plt.ylabel('Cost J')
plt.xlabel('Iterations')
plt.show()
| 18.021505 | 97 | 0.587112 | 281 | 1,676 | 3.412811 | 0.316726 | 0.054223 | 0.043796 | 0.031283 | 0.039625 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047473 | 0.220764 | 1,676 | 92 | 98 | 18.217391 | 0.68683 | 0.127088 | 0 | 0.104167 | 0 | 0 | 0.031114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.041667 | 0 | 0.166667 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a0df6758b56df460891c378671b86d0721f408 | 2,106 | py | Python | tile_split_images.py | swj0418/stylegan2-pytorch | 3a785a3681a92ecc91fc6becedd3a5429906a8e8 | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tile_split_images.py | swj0418/stylegan2-pytorch | 3a785a3681a92ecc91fc6becedd3a5429906a8e8 | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tile_split_images.py | swj0418/stylegan2-pytorch | 3a785a3681a92ecc91fc6becedd3a5429906a8e8 | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import os
import sys
import rawpy
import cv2
import time
def split_image(img, side=3):
images = []
for i in range(side):
for j in range(side):
temp = img[1024*i:1024*(i+1),1024*j:1024*(j+1),:]
images.append(temp)
return images
if __name__ == '__main__':
src_name = 'asphalt-smoking-lot'
target_name = src_name + '-split'
src_folder = os.path.join('/home/sangwon/Downloads', src_name)
target_folder = os.path.join('/home/sangwon/Downloads', target_name)
try:
os.mkdir(target_folder)
except:
pass
files = [os.path.join(src_folder, f) for f in os.listdir(src_folder)]
counter = 0
for file in files:
if file.endswith('.dng'):
img = rawpy.imread(file)
img = img.postprocess()
else:
img = cv2.imread(file)
# Consider read photo size
# Case 3024 x 3024
if img.shape[0] == img.shape[1] == 3024:
reshaped = cv2.resize(img, (3072, 3072))
side = 3
elif (img.shape[0] == 5760 and img.shape[1] == 4312) or (img.shape[0] == 4312 and img.shape[1] == 5760):
reshaped = img[832:832 + 4096,108:108+4096,:]
reshaped = cv2.resize(reshaped, (3072, 3072))
side = 3
elif (img.shape[0] == 4032 and img.shape[1] == 3024) or (img.shape[0] == 4032 and img.shape[1] == 3024):
reshaped = img[504:504+3024,:,:]
reshaped = cv2.resize(reshaped, (3072, 3072))
side = 3
splitted = split_image(reshaped, side=side)
individual_count = 0
for i in splitted:
for rot in ['', cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]:
new_file_name = os.path.join(target_folder, str(time.time()) + '.png')
if rot != '':
rot_i = cv2.rotate(i, rot)
else:
rot_i = i
cv2.imwrite(new_file_name, rot_i)
individual_count += 1
counter += 1
print(counter / len(files))
| 29.661972 | 112 | 0.546059 | 280 | 2,106 | 3.982143 | 0.314286 | 0.071749 | 0.040359 | 0.043049 | 0.237668 | 0.211659 | 0.211659 | 0.147085 | 0.052018 | 0 | 0 | 0.105816 | 0.322412 | 2,106 | 70 | 113 | 30.085714 | 0.675543 | 0.019468 | 0 | 0.132075 | 0 | 0 | 0.042192 | 0.022308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0.018868 | 0.09434 | 0 | 0.132075 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a16fe5bef8b5d463b63caa52b444b12647f3b0 | 10,297 | py | Python | src/whylogs/app/config.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | src/whylogs/app/config.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | src/whylogs/app/config.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | """
Classes/functions for configuring the whylogs app
.. autodata:: ALL_SUPPORTED_FORMATS
"""
from logging import getLogger
from typing import List, Dict, Optional
# import typing
import yaml as yaml
from marshmallow import Schema, fields, post_load, validate
from whylogs.app.output_formats import SUPPORTED_OUTPUT_FORMATS
WHYLOGS_YML = ".whylogs.yaml"
ALL_SUPPORTED_FORMATS = ["all"] + SUPPORTED_OUTPUT_FORMATS
"""Supported output formats for whylogs writer configuration"""
SegmentTag = Dict[str, any]
SegmentTags = List[SegmentTag]
class WriterConfig:
"""
Config for whylogs writers
See also:
* :class:`WriterConfigSchema`
* :class:`whylogs.app.writers.Writer`
* :func:`whylogs.app.writers.writer_from_config`
Parameters
----------
type : str
Destination for the writer output, e.g. 'local' or 's3'
formats : list
All output formats. See :data:`ALL_SUPPORTED_FORMATS`
output_path : str
Prefix of where to output files. A directory for `type = 'local'`,
or key prefix for `type = 's3'`
path_template : str, optional
Templatized path output using standard python string templates.
Variables are accessed via $identifier or ${identifier}.
See :func:`whylogs.app.writers.Writer.template_params` for a list of
available identifers.
Default = :data:`whylogs.app.writers.DEFAULT_PATH_TEMPLATE`
filename_template : str, optional
Templatized output filename using standardized python string templates.
Variables are accessed via $identifier or ${identifier}.
See :func:`whylogs.app.writers.Writer.template_params` for a list of
available identifers.
Default = :data:`whylogs.app.writers.DEFAULT_FILENAME_TEMPLATE`
"""
def __init__(
self,
type: str,
formats: List[str],
output_path: str,
path_template: Optional[str] = None,
filename_template: Optional[str] = None,
):
self.type = type
self.formats = formats
self.output_path = output_path
self.path_template = path_template
self.filename_template = filename_template
def to_yaml(self, stream=None):
"""
Serialize this config to YAML
Parameters
----------
stream
If None (default) return a string, else dump the yaml into this
stream.
"""
dump = WriterConfigSchema().dump(self)
return yaml.dump(dump, stream)
@staticmethod
def from_yaml(stream, **kwargs):
"""
Load config from yaml
Parameters
----------
stream : str, file-obj
String or file-like object to load yaml from
kwargs
ignored
Returns
-------
config : `WriterConfig`
Generated config
"""
data = yaml.safe_load(stream)
return WriterConfigSchema().load(data)
class MetadataConfig:
"""
Config for whylogs metadata
See also:
* :class:`MetadataConfigSchema`
* :class:`whylogs.app.writers.Writer`
* :func:`whylogs.app.writers.writer_from_config`
Parameters
----------
type : str
Destination for the writer output, e.g. 'local' or 's3'
output_path : str
Prefix of where to output files. A directory for `type = 'local'`,
or key prefix for `type = 's3'`
path_template : str, optional
Templatized path output using standard python string templates.
Variables are accessed via $identifier or ${identifier}.
See :func:`whylogs.app.writers.Writer.template_params` for a list of
available identifers.
Default = :data:`whylogs.app.metadata_writer.DEFAULT_PATH_TEMPLATE`
"""
def __init__(
self,
type: str,
output_path: str,
path_template: Optional[str] = None,
):
self.type = type
self.output_path = output_path
self.path_template = path_template
def to_yaml(self, stream=None):
"""
Serialize this config to YAML
Parameters
----------
stream
If None (default) return a string, else dump the yaml into this
stream.
"""
dump = MetadataConfigSchema().dump(self)
return yaml.dump(dump, stream)
@staticmethod
def from_yaml(stream, **kwargs):
"""
Load config from yaml
Parameters
----------
stream : str, file-obj
String or file-like object to load yaml from
kwargs
ignored
Returns
-------
config : `WriterConfig`
Generated config
"""
data = yaml.safe_load(stream)
return MetadataConfigSchema().load(data)
class SessionConfig:
"""
Config for a whylogs session.
See also :class:`SessionConfigSchema`
Parameters
----------
project : str
Project associated with this whylogs session
pipeline : str
Name of the associated data pipeline
writers : list
A list of `WriterConfig` objects defining writer outputs
verbose : bool, default=False
Output verbosity
with_rotation_time: str, default = None, to rotate profiles with time, takes values of overall rotation interval,
"s" for seconds
"m" for minutes
"h" for hours
"d" for days
cache_size: int default =1, sets how many dataprofiles to cache in logger during rotation
"""
def __init__(
self,
project: str,
pipeline: str,
writers: List[WriterConfig],
metadata: MetadataConfig,
verbose: bool = False,
with_rotation_time: str = None,
cache_size: int = 1,
):
self.project = project
self.pipeline = pipeline
self.verbose = verbose
self.writers = writers
self.metadata = metadata
self.with_rotation_time = with_rotation_time
self.cache_size = cache_size
def to_yaml(self, stream=None):
"""
Serialize this config to YAML
Parameters
----------
stream
If None (default) return a string, else dump the yaml into this
stream.
"""
return yaml.dump(SessionConfigSchema().dump(self), stream)
@staticmethod
def from_yaml(stream):
"""
Load config from yaml
Parameters
----------
stream : str, file-obj
String or file-like object to load yaml from
Returns
-------
config : SessionConfig
Generated config
"""
return SessionConfigSchema().load(yaml.safe_load(stream=stream))
class WriterConfigSchema(Schema):
"""
Marshmallow schema for :class:`WriterConfig` class.
"""
type = fields.Str(validate=validate.OneOf(["local", "s3"]), required=True)
formats = fields.List(
fields.Str(validate=validate.OneOf(ALL_SUPPORTED_FORMATS)),
required=True,
validate=validate.Length(min=1),
)
output_path = fields.Str(required=True)
path_template = fields.Str(required=False, allow_none=True)
filename_template = fields.Str(required=False, allow_none=True)
@post_load
def make_writer(self, data, **kwargs):
return WriterConfig(**data)
class MetadataConfigSchema(Schema):
"""
Marshmallow schema for :class:`MetadataConfig` class.
"""
type = fields.Str(validate=validate.OneOf(["local", "s3"]), required=True)
output_path = fields.Str(required=True)
path_template = fields.Str(required=False, allow_none=True)
@post_load
def make_metadata(self, data, **kwargs):
return MetadataConfig(**data)
class SessionConfigSchema(Schema):
"""
Marshmallow schema for :class:`SessionConfig` class.
"""
project = fields.Str(required=True)
pipeline = fields.Str(required=True)
with_rotation_time = fields.Str(
required=False, validate=validate.OneOf(["s", "m", "h", "d"]))
cache = fields.Int(required=False)
verbose = fields.Bool(missing=False)
writers = fields.List(
fields.Nested(WriterConfigSchema),
validate=validate.Length(min=1),
required=True,
)
metadata = fields.Nested(MetadataConfigSchema, required=True)
@post_load
def make_session(self, data, **kwargs):
return SessionConfig(**data)
def load_config(path_to_config: str = None):
"""
Load logging configuration, from disk and from the environment.
Config is loaded by attempting to load files in the following order. The
first valid file will be used
1. Path set in ``WHYLOGS_CONFIG`` environment variable
2. Current directory's ``.whylogs.yaml`` file
3. ``~/.whylogs.yaml`` (home directory)
4. ``/opt/whylogs/.whylogs.yaml`` path
Returns
-------
config : SessionConfig, None
Config for the logger, if a valid config file is found, else returns
`None`.
"""
import os
logger = getLogger(__name__)
if path_to_config is None:
cfg_candidates = {
"enviroment": os.environ.get("WHYLOGS_CONFIG"),
"current_dir": WHYLOGS_YML,
"home_dir": os.path.join(os.path.expanduser("~"), WHYLOGS_YML),
"opt": os.path.join("/opt/whylogs/", WHYLOGS_YML),
}
for k, f_path in cfg_candidates.items():
logger.debug(f"Attempting to load config file: {f_path}")
if f_path is None or not os.path.isfile(f_path):
continue
try:
with open(f_path, "rt") as f:
session_config = SessionConfig.from_yaml(f)
return session_config
except IOError as e:
logger.warning("Failed to load YAML config", e)
pass
else:
try:
with open(path_to_config, "rt") as f:
session_config = SessionConfig.from_yaml(f)
return session_config
except IOError as e:
logger.warning("Failed to load YAML config", e)
pass
return None
| 28.924157 | 117 | 0.608041 | 1,145 | 10,297 | 5.355459 | 0.176419 | 0.019569 | 0.024951 | 0.026256 | 0.494292 | 0.462655 | 0.448956 | 0.448956 | 0.440639 | 0.426614 | 0 | 0.001925 | 0.293678 | 10,297 | 355 | 118 | 29.005634 | 0.841193 | 0.406915 | 0 | 0.426471 | 0 | 0 | 0.036758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095588 | false | 0.014706 | 0.044118 | 0.022059 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a1fa1c3fe526bc68d702fae1ef800c0d1fd935 | 3,895 | py | Python | java/quality.py | tagonzalez/backend-metrics | 5f6c1a59daced6fb8e56bc8fd62c0cf6d5cf046e | [
"MIT"
] | null | null | null | java/quality.py | tagonzalez/backend-metrics | 5f6c1a59daced6fb8e56bc8fd62c0cf6d5cf046e | [
"MIT"
] | null | null | null | java/quality.py | tagonzalez/backend-metrics | 5f6c1a59daced6fb8e56bc8fd62c0cf6d5cf046e | [
"MIT"
] | null | null | null | import os
import xml.etree.ElementTree as ET
class Quality:
def __init__(self, cyclomatic_complexity, lines_of_code):
self.cyclomatic_complexity = cyclomatic_complexity
self.lines_of_code = lines_of_code
def set_variables(self):
self.pmd_report = './build/reports/pmd/main.xml'
self.cpd_report = './build/reports/cpd/cpdCheck.xml'
def calculate_duplicate_code(self):
self.set_variables()
tree = ET.parse(self.cpd_report)
root = tree.getroot()
non_duplication_score = 100
for elem in root:
non_duplication_score -= 0.5
score = self.calculate_score_duplications(non_duplication_score)
print('Duplicated code metric:\n')
print('Non duplicate code score: ' + str(non_duplication_score))
print('------------------------------')
print('Score: ' + score)
print('------------------------------')
def calculate_code_smells(self):
self.set_variables()
tree = ET.parse(self.pmd_report)
root = tree.getroot()
total_issues = 0
high_priority = 0
medium_priority = 0
low_priority = 0
for referece_file in root:
for violation in referece_file:
priority = int(violation.attrib['priority'])
if(priority == 1 or priority == 2):
high_priority += 1
elif(priority == 3):
medium_priority += 1
elif(priority == 4 or priority == 5):
low_priority += 1
total_issues += 1
high_priority_percentage = high_priority * 100 / self.lines_of_code
medium_priority_percentage = medium_priority * 100 / self.lines_of_code
low_priority_percentage = low_priority * 100 / self.lines_of_code
high_priority_weighing = high_priority_percentage * 0.60
medium_priority_weighing = medium_priority_percentage * 0.30
low_priority_weighing = low_priority_percentage * 0.10
code_smells_ratio = round(((high_priority_weighing +
medium_priority_weighing + low_priority_weighing) / 3), 2)
score = str(self.calculate_score_code_smells(code_smells_ratio))
print('Code smells:\n')
print('Total issues: '+str(total_issues))
print('High priority: '+str(high_priority))
print('Medium priority: '+str(medium_priority))
print('Low priority: '+str(low_priority))
print('Code smells ratio: ' + str(code_smells_ratio) + "%")
print('Score: ' + score)
print('------------------------------')
return code_smells_ratio
def calculate_score_duplications(self, non_duplication_score):
score = ''
if(non_duplication_score <= 20):
score = 'E'
elif(non_duplication_score >= 21 and non_duplication_score <= 50):
score = 'D'
elif(non_duplication_score >= 51 and non_duplication_score <= 60):
score = 'C'
elif(non_duplication_score >= 61 and non_duplication_score <= 70):
score = 'B'
else:
score = 'A'
return score
def calculate_score_code_smells(self, code_smells_ratio):
score = ''
if(code_smells_ratio <= 5):
score = 'A'
elif(code_smells_ratio >= 6 and code_smells_ratio <= 10):
score = 'B'
elif(code_smells_ratio >= 11 and code_smells_ratio <= 20):
score = 'C'
elif(code_smells_ratio >= 21 and code_smells_ratio <= 50):
score = 'D'
else:
score = 'E'
return score
def calculate_quality(self):
self.set_variables()
self.calculate_duplicate_code()
self.calculate_code_smells()
print('Cyclomatic complexity: ' + str(self.cyclomatic_complexity)) | 36.745283 | 94 | 0.592555 | 438 | 3,895 | 4.958904 | 0.203196 | 0.082873 | 0.089779 | 0.027624 | 0.06814 | 0.06814 | 0.032228 | 0.032228 | 0 | 0 | 0 | 0.023205 | 0.291913 | 3,895 | 106 | 95 | 36.745283 | 0.764322 | 0 | 0 | 0.292135 | 0 | 0 | 0.089836 | 0.038501 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078652 | false | 0 | 0.022472 | 0 | 0.146067 | 0.157303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a2893f385556b94e0663b1aaa16fb98f1c2f23 | 2,561 | py | Python | lib/python/qmk/keymap.py | jskelcy/qmk_toolbox | 594ab30ea60b637a0bdee8ca3c6f6bf7fe703e98 | [
"MIT"
] | null | null | null | lib/python/qmk/keymap.py | jskelcy/qmk_toolbox | 594ab30ea60b637a0bdee8ca3c6f6bf7fe703e98 | [
"MIT"
] | null | null | null | lib/python/qmk/keymap.py | jskelcy/qmk_toolbox | 594ab30ea60b637a0bdee8ca3c6f6bf7fe703e98 | [
"MIT"
] | null | null | null | """Functions that help you work with QMK keymaps.
"""
import os
import qmk.path
# The `keymap.c` template to use when a keyboard doesn't have its own
DEFAULT_KEYMAP_C = """#include QMK_KEYBOARD_H
/* THIS FILE WAS GENERATED!
*
* This file was generated by qmk-compile-json. You may or may not want to
* edit it directly.
*/
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
__KEYMAP_GOES_HERE__
};
"""
def template(keyboard):
"""Returns the `keymap.c` template for a keyboard.
If a template exists in `keyboards/<keyboard>/templates/keymap.c` that
text will be used instead of `DEFAULT_KEYMAP_C`.
Args:
keyboard
The keyboard to return a template for.
"""
template_name = 'keyboards/%s/templates/keymap.c' % keyboard
if os.path.exists(template_name):
with open(template_name, 'r') as fd:
return fd.read()
return DEFAULT_KEYMAP_C
def generate(keyboard, layout, layers):
"""Returns a keymap.c for the specified keyboard, layout, and layers.
Args:
keyboard
The name of the keyboard
layout
The LAYOUT macro this keymap uses.
layers
An array of arrays describing the keymap. Each item in the inner array should be a string that is a valid QMK keycode.
"""
layer_txt = []
for layer_num, layer in enumerate(layers):
if layer_num != 0:
layer_txt[-1] = layer_txt[-1] + ','
layer_keys = ', '.join(layer)
layer_txt.append('\t[%s] = %s(%s)' % (layer_num, layout, layer_keys))
keymap = '\n'.join(layer_txt)
keymap_c = template(keyboard)
return keymap_c.replace('__KEYMAP_GOES_HERE__', keymap)
def write(keyboard, keymap, layout, layers):
"""Generate the `keymap.c` and write it to disk.
Returns the filename written to.
Args:
keyboard
The name of the keyboard
keymap
The name of the keymap
layout
The LAYOUT macro this keymap uses.
layers
An array of arrays describing the keymap. Each item in the inner array should be a string that is a valid QMK keycode.
"""
keymap_c = generate(keyboard, layout, layers)
keymap_path = qmk.path.keymap(keyboard)
keymap_dir = os.path.join(keymap_path, keymap)
keymap_file = os.path.join(keymap_dir, 'keymap.c')
if not os.path.exists(keymap_dir):
os.makedirs(keymap_dir)
with open(keymap_file, 'w') as keymap_fd:
keymap_fd.write(keymap_c)
return keymap_file
| 26.402062 | 130 | 0.647794 | 362 | 2,561 | 4.441989 | 0.303867 | 0.060945 | 0.018657 | 0.022388 | 0.206468 | 0.206468 | 0.206468 | 0.166667 | 0.166667 | 0.166667 | 0 | 0.002637 | 0.259664 | 2,561 | 96 | 131 | 26.677083 | 0.845464 | 0.404139 | 0 | 0 | 0 | 0 | 0.231977 | 0.047109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a6c1bc203ce037e567deeaffedb6ade02f0d23 | 8,025 | py | Python | env/lib/python3.6/site-packages/nacl/bindings/crypto_generichash.py | escacan/GymTracker | cda8f821bf9e77fa442f778661fc2123cb590dc5 | [
"Apache-2.0"
] | 3 | 2018-07-04T12:21:31.000Z | 2020-10-27T09:07:00.000Z | nacl/bindings/crypto_generichash.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 11 | 2020-06-05T20:57:31.000Z | 2021-09-22T18:35:03.000Z | flask/lib/python3.6/site-packages/nacl/bindings/crypto_generichash.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
] | 1 | 2018-09-19T05:55:27.000Z | 2018-09-19T05:55:27.000Z | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from six import integer_types
from nacl import exceptions as exc
from nacl._sodium import ffi, lib
from nacl.exceptions import ensure
crypto_generichash_BYTES = lib.crypto_generichash_blake2b_bytes()
crypto_generichash_BYTES_MIN = lib.crypto_generichash_blake2b_bytes_min()
crypto_generichash_BYTES_MAX = lib.crypto_generichash_blake2b_bytes_max()
crypto_generichash_KEYBYTES = lib.crypto_generichash_blake2b_keybytes()
crypto_generichash_KEYBYTES_MIN = lib.crypto_generichash_blake2b_keybytes_min()
crypto_generichash_KEYBYTES_MAX = lib.crypto_generichash_blake2b_keybytes_max()
crypto_generichash_SALTBYTES = lib.crypto_generichash_blake2b_saltbytes()
crypto_generichash_PERSONALBYTES = \
lib.crypto_generichash_blake2b_personalbytes()
crypto_generichash_STATEBYTES = lib.crypto_generichash_statebytes()
_OVERLONG = '{0} length greater than {1} bytes'
_TOOBIG = '{0} greater than {1}'
def _checkparams(digest_size, key, salt, person):
"""Check hash paramters"""
ensure(isinstance(key, bytes),
'Key must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(salt, bytes),
'Salt must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(person, bytes),
'Person must be a bytes sequence',
raising=exc.TypeError)
ensure(isinstance(digest_size, integer_types),
'Digest size must be an integer number',
raising=exc.TypeError)
ensure(digest_size <= crypto_generichash_BYTES_MAX,
_TOOBIG.format("Digest_size", crypto_generichash_BYTES_MAX),
raising=exc.ValueError)
ensure(len(key) <= crypto_generichash_KEYBYTES_MAX,
_OVERLONG.format("Key", crypto_generichash_KEYBYTES_MAX),
raising=exc.ValueError)
ensure(len(salt) <= crypto_generichash_SALTBYTES,
_OVERLONG.format("Salt", crypto_generichash_SALTBYTES),
raising=exc.ValueError)
ensure(len(person) <= crypto_generichash_PERSONALBYTES,
_OVERLONG.format("Person", crypto_generichash_PERSONALBYTES),
raising=exc.ValueError)
def generichash_blake2b_salt_personal(data,
digest_size=crypto_generichash_BYTES,
key=b'', salt=b'', person=b''):
"""One shot hash interface
:param data: the input data to the hash function
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:return: digest_size long digest
:rtype: bytes
"""
_checkparams(digest_size, key, salt, person)
ensure(isinstance(data, bytes),
'Input data must be a bytes sequence',
raising=exc.TypeError)
digest = ffi.new("unsigned char[]", digest_size)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_salt_personal(digest, digest_size,
data, len(data),
key, len(key),
_salt, _person)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return ffi.buffer(digest, digest_size)[:]
def generichash_blake2b_init(key=b'', salt=b'',
person=b'',
digest_size=crypto_generichash_BYTES):
"""
Create a new initialized blake2b hash state
:param key: must be at most
:py:data:`.crypto_generichash_KEYBYTES_MAX` long
:type key: bytes
:param salt: must be at most
:py:data:`.crypto_generichash_SALTBYTES` long;
will be zero-padded if needed
:type salt: bytes
:param person: must be at most
:py:data:`.crypto_generichash_PERSONALBYTES` long:
will be zero-padded if needed
:type person: bytes
:param digest_size: must be at most
:py:data:`.crypto_generichash_BYTES_MAX`;
the default digest size is
:py:data:`.crypto_generichash_BYTES`
:type digest_size: int
:return: an initizialized state buffer
:rtype: bytes
"""
_checkparams(digest_size, key, salt, person)
statebuf = ffi.new("unsigned char[]", crypto_generichash_STATEBYTES)
# both _salt and _personal must be zero-padded to the correct length
_salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
_person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
ffi.memmove(_salt, salt, len(salt))
ffi.memmove(_person, person, len(person))
rc = lib.crypto_generichash_blake2b_init_salt_personal(statebuf,
key, len(key),
digest_size,
_salt, _person)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return statebuf
def generichash_blake2b_update(statebuf, data):
"""Update the blake2b hash state
:param statebuf: an initialized blake2b state buffer as returned from
:py:func:`.crypto_generichash_blake2b_init`
:type name: bytes
:param data:
:type data: bytes
"""
ensure(isinstance(data, bytes),
'Input data must be a bytes sequence',
raising=exc.TypeError)
rc = lib.crypto_generichash_blake2b_update(statebuf, data, len(data))
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
def generichash_blake2b_final(statebuf, digest_size):
"""Finalize the blake2b hash state and return the digest.
:param statebuf:
:type statebuf: bytes
:param digest_size:
:type digest_size: int
:return: the blake2 digest of the passed-in data stream
:rtype: bytes
"""
_digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX)
rc = lib.crypto_generichash_blake2b_final(statebuf, _digest, digest_size)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return ffi.buffer(_digest, digest_size)[:]
def generichash_blake2b_state_copy(statebuf):
"""Return a copy of the given blake2b hash state"""
newstate = ffi.new("unsigned char[]", crypto_generichash_STATEBYTES)
ffi.memmove(newstate, statebuf, crypto_generichash_STATEBYTES)
return newstate
| 37.152778 | 79 | 0.653458 | 931 | 8,025 | 5.411386 | 0.179377 | 0.172092 | 0.051608 | 0.064311 | 0.57503 | 0.457523 | 0.411473 | 0.393609 | 0.366614 | 0.366614 | 0 | 0.006775 | 0.264299 | 8,025 | 215 | 80 | 37.325581 | 0.846545 | 0.33919 | 0 | 0.382979 | 0 | 0 | 0.092839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.053191 | 0 | 0.159574 | 0.010638 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a7d4123b602e4f54a9fced92d3bb756eeae008 | 1,791 | py | Python | examples/subdag_example.py | zdgriffith/pycondor | 3daf8ca32eb206988790880e040821e15f1088f8 | [
"MIT"
] | null | null | null | examples/subdag_example.py | zdgriffith/pycondor | 3daf8ca32eb206988790880e040821e15f1088f8 | [
"MIT"
] | null | null | null | examples/subdag_example.py | zdgriffith/pycondor | 3daf8ca32eb206988790880e040821e15f1088f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pycondor
if __name__ == "__main__":
# Declare the error, output, log, and submit directories for Condor Job
error = 'condor/error'
output = 'condor/output'
log = 'condor/log'
submit = 'condor/submit'
# Setting up first PyCondor Job
job1 = pycondor.Job('examplejob1', 'savelist.py',
error=error, output=output,
log=log, submit=submit, verbose=2)
# Adding arguments to job1
for i in range(10, 100, 10):
job1.add_arg('--length {}'.format(i), retry=7)
# Setting up second PyCondor Job
job2 = pycondor.Job('examplejob2', 'savelist.py',
error=error, output=output,
log=log, submit=submit, verbose=2)
# Adding arguments to job1
job2.add_arg('--length 200', name='200jobname')
job2.add_arg('--length 400', name='400jobname', retry=3)
# Setting up a PyCondor Dagman
subdag = pycondor.Dagman('example_subdag', submit=submit, verbose=2)
# Add job1 to dagman
subdag.add_job(job1)
subdag.add_job(job2)
# Setting up third PyCondor Job
job3 = pycondor.Job('examplejob3', 'savelist.py',
error=error, output=output,
log=log, submit=submit, verbose=2)
# Adding arguments to job1
for length in range(210, 220):
job3.add_arg('--length {}'.format(length))
# Add interjob reltionship.
# Ensure that the subdag is complete before job3 starts
subdag.add_child(job3)
# Setting up a PyCondor Dagman
dagman = pycondor.Dagman('exampledagman', submit=submit, verbose=2)
# Add jobs to dagman
dagman.add_job(job3)
dagman.add_subdag(subdag)
# Write all necessary submit files and submit job to Condor
dagman.build_submit()
| 33.166667 | 75 | 0.637074 | 230 | 1,791 | 4.878261 | 0.330435 | 0.058824 | 0.08467 | 0.089127 | 0.300357 | 0.216578 | 0.216578 | 0.216578 | 0.216578 | 0.216578 | 0 | 0.038003 | 0.250698 | 1,791 | 53 | 76 | 33.792453 | 0.798063 | 0.27359 | 0 | 0.206897 | 0 | 0 | 0.167185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4a8fd5090513da55d035c833ee6344677e6c096 | 14,774 | py | Python | hpbu/ment_rule_parser.py | skahl/hpbu | 72993961a7a064f59ca7c6305cd8cecb22ecc6b8 | [
"Apache-2.0"
] | null | null | null | hpbu/ment_rule_parser.py | skahl/hpbu | 72993961a7a064f59ca7c6305cd8cecb22ecc6b8 | [
"Apache-2.0"
] | null | null | null | hpbu/ment_rule_parser.py | skahl/hpbu | 72993961a7a064f59ca7c6305cd8cecb22ecc6b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Sebastian Kahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Rule_parser module for parsing xml mental state rules
Created on 22.02.2018
@author: skahl
"""
# Imports from __future__ in case we're running Python 2
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
import sys
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
from .ment import *
class Parser(object):
def __init__(self, filename):
try:
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
except ET.ParseError as v:
row, column = v.position
print(str(v))
return None
except IOError as e:
print(str(e))
except:
print(str(sys.exc_info()[0]))
return None
def select_parser(self):
node = self.root
if node.tag == "RULES":
parser_id = node.get("parser", None)
if parser_id is not None:
if parser_id == "goals":
return GoalsParser(self.filename)
if parser_id == "realizations":
return RealizationsParser(self.filename)
if parser_id == "personmodel":
return PersonmodelParser(self.filename)
else:
print("No parser information found in xml. Exit!")
sys.exit(1)
class PersonmodelParser(object):
def __init__(self, filename):
try:
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
except ET.ParseError as v:
row, column = v.position
print(str(v))
return None
except IOError as e:
print(str(e))
except:
print(str(sys.exc_info()[0]))
return None
def parse(self, node=None):
my_personmodels = {}
if node is None:
node = self.root
if node.tag == "RULES":
for personmodel in node:
my_id = personmodel.get("id", None)
# personmodel ID is necessary to identify agents in multiagent settings
if my_id is not None:
new_personmodel = {}
new_personmodel["me"] = []
new_personmodel["you"] = []
new_personmodel["we"] = []
new_personmodel["agents"] = {}
you_models = defaultdict(list)
# you_model contains several you, with several schemas, containing sequences
for state in personmodel:
if state.tag == "me":
for blf in state:
new_personmodel["me"].append( blf.text )
if state.tag == "we":
for blf in state:
new_personmodel["we"].append( blf.text )
if state.tag == "you":
you_id = state.get("id", None)
if you_id is not None:
you_id = you_id
for seq in state:
you_models[you_id].append( int(seq.text) )
you_present = state.get("present", None)
if you_present is not None:
if you_present == "true":
new_personmodel["agents"].update( {you_id: 0.} )
new_personmodel["you"] = you_models
my_personmodels[my_id] = PersonModel(me=new_personmodel["me"],
you=new_personmodel["you"],
we=new_personmodel["we"],
agents=new_personmodel["agents"])
return my_personmodels
class GoalsParser(object):
def __init__(self, filename):
try:
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
except ET.ParseError as v:
row, column = v.position
print(str(v))
return None
except IOError as e:
print(str(e))
except:
print(str(sys.exc_info()[0]))
return None
def parse(self, node=None):
goals_dict = {}
if node is None:
node = self.root
if node.tag == "RULES":
for stategoalpair in node:
new_goal = {}
new_goal["id"] = int(stategoalpair.get("id", None))
new_goal["comment"] = stategoalpair.get("comment", None)
realizations = []
for realization in stategoalpair:
if realization.tag == "state":
ms = realization.find("mentalstate")
if ms is not None:
me_blfs = []
me = ms.find("me")
if me is not None:
for blf in me:
me_blfs.append( Belief(blf.text) )
you_blfs = []
you = ms.find("you")
if you is not None:
for blf in you:
you_blfs.append( Belief(blf.text) )
we_blfs = []
we = ms.find("we")
if we is not None:
for blf in we:
we_blfs.append( Belief(blf.text) )
new_goal["state"] = MentalState(me=me_blfs, you=you_blfs, we=we_blfs)
if realization.tag == "goal":
ms = realization.find("mentalstate")
if ms is not None:
me_blfs = []
me = ms.find("me")
if me is not None:
for blf in me:
me_blfs.append( Belief(blf.text) )
you_blfs = []
you = ms.find("you")
if you is not None:
for blf in you:
you_blfs.append( Belief(blf.text) )
we_blfs = []
we = ms.find("we")
if we is not None:
for blf in we:
we_blfs.append( Belief(blf.text) )
new_goal["goal"] = MentalState(me=me_blfs, you=you_blfs, we=we_blfs)
if realization.tag == "realization":
realizations.append(int(realization.get("id")))
new_goal["realizations"] = realizations
goals_dict[new_goal["id"]] = Goals(idx=new_goal["id"],
comment=new_goal["comment"],
state=new_goal["state"],
realizations=new_goal["realizations"],
goal=new_goal["goal"])
return goals_dict
class RealizationsParser(object):
def __init__(self, filename):
try:
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
except ET.ParseError as v:
row, column = v.position
print(str(v))
return None
except IOError as e:
print(str(e))
except:
print(str(sys.exc_info()[0]))
return None
def parse(self, node=None):
realizations_dict = {}
if node is None:
node = self.root
""" Outermost tag == RULES """
if node.tag == "RULES":
for realization in node:
new_realization = {}
new_realization["id"] = int(realization.get("id", None))
new_realization["comment"] = realization.get("comment", None)
# state and goal pairs will also be contained in the substates list
substates = []
for state in realization:
if state.tag == "state":
ms = state.find("mentalstate")
if ms is not None:
me_blfs = []
me = ms.find("me")
if me is not None:
for blf in me:
me_blfs.append( Belief(blf.text) )
you_blfs = []
you = ms.find("you")
if you is not None:
for blf in you:
you_blfs.append( Belief(blf.text) )
we_blfs = []
we = ms.find("we")
if we is not None:
for blf in we:
we_blfs.append( Belief(blf.text) )
new_realization["state"] = MentalState(me=me_blfs, you=you_blfs, we=we_blfs)
if state.tag == "goal":
ms = state.find("mentalstate")
if ms is not None:
me_blfs = []
me = ms.find("me")
if me is not None:
for blf in me:
me_blfs.append( Belief(blf.text) )
you_blfs = []
you = ms.find("you")
if you is not None:
for blf in you:
you_blfs.append( Belief(blf.text) )
we_blfs = []
we = ms.find("we")
if we is not None:
for blf in we:
we_blfs.append( Belief(blf.text) )
new_realization["goal"] = MentalState(me=me_blfs, you=you_blfs, we=we_blfs)
if state.tag == "substates":
for child in state:
# motor intention acting out beliefs
if child.tag == "intention":
belief = child.get("belief", None)
is_signaling = child.get("signaling", None)
if belief is None:
print("error parsing goal realization with id=" + str(new_realization["id"]) + ": belief of intention cannot be None")
is_signaling = True if is_signaling is not None and is_signaling == "true" else False
intention = Intention(intent=belief, signaling=is_signaling)
substates.append( intention )
# check for intermittent mental state
if child.tag == "mentalstate":
me_blfs = []
me = child.find("me")
if me is not None:
for blf in me:
me_blfs.append( Belief(blf.text) )
you_blfs = []
you = child.find("you")
if you is not None:
for blf in you:
you_blfs.append( Belief(blf.text) )
we_blfs = []
we = child.find("we")
if we is not None:
for blf in we:
we_blfs.append( Belief(blf.text) )
substates.append( MentalState(me=me_blfs, you=you_blfs, we=we_blfs) )
# add state and goal MentalStates to substates list for better comparability
substates.insert(0, new_realization["state"])
substates.append(new_realization["goal"])
realizations_dict[new_realization["id"]] = Realization(idx=new_realization["id"],
comment=new_realization["comment"],
state=new_realization["state"],
goal=new_realization["goal"],
substates=substates)
return realizations_dict
if __name__ == "__main__":
realizations_path = "../../resource/goal_realizations.xml"
goals_path = "../../resource/state_goal_tuples.xml"
while realizations_path == "":
realizations_path = input("path and name of goal realizations xml file: ")
realizations_parser = RealizationsParser(filename=realizations_path)
realizations_dict = realizations_parser.parse()
while goals_path == "":
goals_path = input("path and name of goal state tuples xml file: ")
goals_parser = GoalsParser(filename=goals_path)
goals_dict = goals_parser.parse()
# printout
for realization_id, realization in realizations_dict.items():
print(realization)
for goal_id, goal in goals_dict.items():
print(goal)
| 37.402532 | 154 | 0.43651 | 1,400 | 14,774 | 4.475 | 0.15 | 0.019154 | 0.034477 | 0.028731 | 0.431764 | 0.422027 | 0.406065 | 0.393615 | 0.388508 | 0.388508 | 0 | 0.003138 | 0.482334 | 14,774 | 395 | 155 | 37.402532 | 0.81603 | 0.072086 | 0 | 0.557971 | 0 | 0 | 0.050396 | 0.005274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.021739 | 0 | 0.115942 | 0.061594 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4acf4fb502466b55ec177941d157a088dda3a06 | 222 | py | Python | echo-verify-adler32/test/subprocess-pipe.py | pjcon/ral-ceph-tools | ca97e3cea192727d81c924a7bb134e3738c9bc73 | [
"Apache-2.0"
] | null | null | null | echo-verify-adler32/test/subprocess-pipe.py | pjcon/ral-ceph-tools | ca97e3cea192727d81c924a7bb134e3738c9bc73 | [
"Apache-2.0"
] | null | null | null | echo-verify-adler32/test/subprocess-pipe.py | pjcon/ral-ceph-tools | ca97e3cea192727d81c924a7bb134e3738c9bc73 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import subprocess
ps = subprocess.Popen(('ls', '-l'), stdout=subprocess.PIPE)
output = subprocess.check_output(('grep', 'subprocess-pipe.py'), stdin=ps.stdout)
ps.wait()
print("success")
print(output)
| 20.181818 | 81 | 0.711712 | 30 | 222 | 5.233333 | 0.633333 | 0.178344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085586 | 222 | 10 | 82 | 22.2 | 0.773399 | 0.072072 | 0 | 0 | 0 | 0 | 0.160976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4b06ecbd6e31ca7ac46a39df0c950d320096e54 | 2,146 | py | Python | main_legacy.py | ApocalyVec/ApocalyWarDrive | df9aeabbfc9710c328090739af19d868226a235f | [
"MIT"
] | null | null | null | main_legacy.py | ApocalyVec/ApocalyWarDrive | df9aeabbfc9710c328090739af19d868226a235f | [
"MIT"
] | null | null | null | main_legacy.py | ApocalyVec/ApocalyWarDrive | df9aeabbfc9710c328090739af19d868226a235f | [
"MIT"
] | null | null | null | import argparse
import rssi
import numpy as np
"""
example usage
"""
def main(args):
"""
:param args: arguements given by the user
arguments for main:
Required:
-nwi: the name of your WIFI interface.
For MAC users, use this terminal command: system_profiler SPNetworkDataType | grep Wi-Fi -A10
The name is denoted by "BSD Device Name", in my case, it's en0
-itv: time interval between samples, unit = millisecond
-drt: the duration during which to capture samples, unit = millisecond
Optional:
-ave: take the average of give number of samples
"""
# start parsing arguments
nt_interface = args.nwInterface
sampling_interval = args.interval
sampling_duration = args.duration
num_samples = int(sampling_duration / sampling_interval)
config_msg = 'The WI-FI interface for scanning is ' + nt_interface
sampling_msg = 'This will take ' + str(num_samples) + ' samples in ' + str(
sampling_duration) + ' ms.\nPress enter to continue...'
input(config_msg + '\n' + sampling_msg)
# end of parsing arguments
# initialize scanner
rssi_scanner = rssi.RSSI_Scan(nt_interface)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-nwi', '--nwInterface', type=str, required=True,
help='the duration during which to capture samples, unit = millisecond')
parser.add_argument('-i', '--interval', type=int, required=True,
help='time interval between samples, unit = millisecond')
parser.add_argument('-d', '--duration', type=int, required=True,
help='the duration during which to capture samples, unit = millisecond')
# parser.add_argument("--nice", type=str2bool, nargs='?',
# const=True, default=NICE,
# help="Activate nice mode.")
# parser.add_argument('-fl', '--full_length', type=str2bool, nargs='?',
# help='group in full length', const=True, default=False)
args = parser.parse_args()
main(args)
| 32.515152 | 105 | 0.632805 | 255 | 2,146 | 5.2 | 0.443137 | 0.041478 | 0.082956 | 0.049774 | 0.266968 | 0.244344 | 0.169683 | 0.169683 | 0.169683 | 0.129713 | 0 | 0.003161 | 0.262815 | 2,146 | 65 | 106 | 33.015385 | 0.835019 | 0.401212 | 0 | 0.086957 | 0 | 0 | 0.269167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4b45bc34f7dd8a050fed193cf4002c91c78f733 | 2,089 | py | Python | config.py | DougForrest/plaquebox-paper | 8bbfbab84e022a753d26807323e2a0d776f4fb7a | [
"MIT"
] | null | null | null | config.py | DougForrest/plaquebox-paper | 8bbfbab84e022a753d26807323e2a0d776f4fb7a | [
"MIT"
] | null | null | null | config.py | DougForrest/plaquebox-paper | 8bbfbab84e022a753d26807323e2a0d776f4fb7a | [
"MIT"
] | null | null | null | from datetime import datetime
import os
from fastai.vision import models
experiment_name = 'original_w_negative'
experiment_description = """Using the original dataset including
the null observations"""
batch_size = 256
model_name = 'resnet18'
image_size = 256
model = models.resnet18
databunch_train_validation = 'databunch_train_validation.pkl'
databunch_test = 'databunch_test.pkl'
v1_epochs = 10
v2_epochs= 20
run_date = datetime.now().strftime('%Y_%m_%d')
if os.environ.get('USER', None) == 'jupyter':
input_path = os.path.join('/mnt', 'disks', 'disk-1', 'data', 'tiles')
output_path = os.path.join('/mnt', 'disks', 'disk-1', 'data')
csv_dir = os.path.join('data', 'CSVs')
gs_bucket = "gs://plaquebox-paper/experiment"
gs_results_dir = f"gs://plaquebox-paper/experiment/{experiment_name}/results"
gs_data_dir = f"gs://plaquebox-paper/experiment/{experiment_name}/data"
gs_model_dir = f"gs://plaquebox-paper/experiment/{experiment_name}/model"
else:
input_path = os.path.join('data')
output_path = input_path
csv_dir = os.path.join(input_path, 'CSVs')
results_dir = os.path.join(os.path.join(output_path,
experiment_name,
'results'))
data_dir = os.path.join(os.path.join(output_path,
experiment_name,
'data'))
model_dir = os.path.join(os.path.join(output_path,
experiment_name,
'model'))
for dir_name in [results_dir, data_dir, model_dir]:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
train = os.path.join(csv_dir, 'train_multilabel.csv')
validation = os.path.join(csv_dir, 'validation_multilabel.csv')
test = os.path.join(csv_dir, 'test_multilabel.csv')
img_path = os.path.join(input_path,
'tiles')
img_path_test = os.path.join(input_path,
'tiles')
image_classes = ['cored', 'diffuse', 'CAA']
| 33.693548 | 81 | 0.620393 | 266 | 2,089 | 4.635338 | 0.296992 | 0.082725 | 0.129765 | 0.052717 | 0.402271 | 0.310624 | 0.271695 | 0.271695 | 0.164639 | 0.114355 | 0 | 0.011509 | 0.251316 | 2,089 | 61 | 82 | 34.245902 | 0.776854 | 0 | 0 | 0.106383 | 0 | 0 | 0.258976 | 0.120632 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.06383 | 0 | 0.06383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4b57fe13f575b5758f30e03262479b85a342554 | 1,349 | py | Python | scripts/mir_trigger_client.py | El-Maco/mqtt_bridge | 2ac6e876de0037a7cd2a3a8a49798ca78ecff47c | [
"MIT"
] | null | null | null | scripts/mir_trigger_client.py | El-Maco/mqtt_bridge | 2ac6e876de0037a7cd2a3a8a49798ca78ecff47c | [
"MIT"
] | null | null | null | scripts/mir_trigger_client.py | El-Maco/mqtt_bridge | 2ac6e876de0037a7cd2a3a8a49798ca78ecff47c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_srvs.srv import Trigger, TriggerRequest
from std_msgs.msg import Bool, Int32, Float64
from move_base_msgs.msg import MoveBaseActionGoal
from geometry_msgs.msg import Point, Quaternion
from tf.transformations import quaternion_from_euler
# goal_x, goal_y = 11.8154, 9.47028
# orient_z, orient_w = 0.9985, -0.0552
target_point = Point(11.8154, 9.47028, 0.0)
q = quaternion_from_euler(0, 0, -137.678)
quaternion = Quaternion(q[0], q[1], q[2], q[3])
pickup_goal = MoveBaseActionGoal()
pickup_goal.goal.target_pose.pose.position = target_point
pickup_goal.goal.target_pose.pose.orientation = quaternion
pickup_goal.goal.target_pose.header.frame_id = "map"
def callback(data):
rospy.loginfo("{}: I heard {}".format(rospy.get_caller_id(), data.data))
pickup = data.data
rospy.loginfo("msg_type: {}".format(type(pickup)))
if pickup:
pub.publish(pickup_goal)
rospy.wait_for_service('/mir_trigger_service')
mir_trigger_service = rospy.ServiceProxy('/mir_trigger_service', Trigger)
trig_req = TriggerRequest()
res = mir_trigger_service(trig_req)
print(res)
rospy.init_node('mir_trigger_client')
pub = rospy.Publisher('/move_base/goal', MoveBaseActionGoal, queue_size=10)
rospy.Subscriber('/pickup', Int32, callback)
rospy.spin()
| 28.702128 | 81 | 0.73536 | 194 | 1,349 | 4.886598 | 0.438144 | 0.052743 | 0.07173 | 0.063291 | 0.084388 | 0.059072 | 0 | 0 | 0 | 0 | 0 | 0.048401 | 0.142328 | 1,349 | 46 | 82 | 29.326087 | 0.770959 | 0.067457 | 0 | 0 | 0 | 0 | 0.086991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.214286 | 0 | 0.25 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4bb459b9b8819b4aa0bdcba2e0ea135862bbe66 | 3,632 | py | Python | dynamic_disease_network_ddp/.ipynb_checkpoints/ncsr_in_ddp-checkpoint.py | sjaya09/Atrial-Fibrillation-UNCW-project-2021 | f612c130d5a4cffa5c8df197c589101e578a6447 | [
"Unlicense",
"MIT"
] | 1 | 2021-02-11T21:45:48.000Z | 2021-02-11T21:45:48.000Z | dynamic_disease_network_ddp/.ipynb_checkpoints/ncsr_in_ddp-checkpoint.py | sjaya09/Atrial-Fibrillation-UNCW-project-2021 | f612c130d5a4cffa5c8df197c589101e578a6447 | [
"Unlicense",
"MIT"
] | 1 | 2021-02-08T20:25:54.000Z | 2021-02-08T20:25:54.000Z | dynamic_disease_network_ddp/.ipynb_checkpoints/ncsr_in_ddp-checkpoint.py | sjaya09/Atrial-Fibrillation-UNCW-project-2021 | f612c130d5a4cffa5c8df197c589101e578a6447 | [
"Unlicense",
"MIT"
] | null | null | null | import pickle
import torch
import torch.optim as optim
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from dynamic_disease_network_ddp import data_loader
from dynamic_disease_network_ddp import models
import pandas as pd
ncsr = pd.read_csv('../justage_vars_init.csv', index_col=0)
pkl = open('../ncsr_for_ddp.pickle', "rb")
ddp_data = {}
ddp_data['ncsr'] = pickle.load(pkl)
pkl.close()
max_len = max([len(ddp_data['ncsr'][x]) for x in range(len(ddp_data['ncsr']))])
n_event_type = dim_process = len(ncsr.columns)
n_sample = len(ddp_data['ncsr'])
context_dim = 1
train_input = data_loader.process_seq(ddp_data, list(range(n_sample)), max_len=max_len, n_event_type=n_event_type, tag_batch = 'ncsr', dtype=np.float32)
batch_input_np = list(train_input)
df_patient_static_mat = np.ones((1, n_sample)).astype('float32')
batch_input_np.append(df_patient_static_mat)
gap = batch_input_np[0][:-1, :] - batch_input_np[0][1:, :]
gap_mean = np.mean(gap)
gap_std = np.std(gap)
alpha_init = np.float32(
np.log(
np.random.uniform(
low=0.5, high=1.5,
size=(dim_process, dim_process)
)
)
)
lambda_init = np.float32(
np.log(
np.random.uniform(
low=10.0, high=20.0,
size=(dim_process, dim_process)
)
)
)
ddp_model = models.DDP(
n_event_type=n_event_type,
n_context_dim=context_dim,
first_occurrence_only=False,
embedding_size=50,
rnn_hidden_size=50,
alpha_mat_np=alpha_init,
lambda_mat_np=lambda_init,
gap_mean=gap_mean,
gap_scale=gap_std
)
opt_ddp = optim.SGD(ddp_model.parameters(), lr=0.001, momentum=0.9)
c_hawkes_model = models.CHawkes(n_event_type=n_event_type, n_context_dim=context_dim,
first_occurrence_only=False, alpha_mat_np=alpha_init, lambda_mat_np=lambda_init)
opt_c_hawkes = optim.SGD(c_hawkes_model.parameters(), lr = 0.001, momentum=0.9)
with torch.no_grad():
test_batch = data_loader.get_whole_batch(batch_input_np)
with torch.no_grad():
test_batch = data_loader.get_whole_batch(batch_input_np)
mat_dist_ddp = list()
mat_dist_hawkes = list()
rnn_sd = list()
batch_size = 100
training_itr = 1000
report_step = 1
current_best = 10000
for i in range(training_itr):
if i % report_step == 0:
with torch.no_grad():
test_batch = data_loader.get_whole_batch(batch_input_np)
ddp_model.set_input(*test_batch)
weights = ddp_model.graph_weights_seq.numpy()
rnn_sd.append(np.std(weights))
avg_weight_list = list()
a = test_batch[4].numpy()
b = test_batch[2].numpy()
for j in range(n_event_type):
ind = np.logical_not(np.logical_and(a == 1, b == j))
weights_cp = np.copy(weights)
weights_cp[ind] = np.nan
avg_weight_list.append(np.nanmean(weights_cp))
avg_weight = np.array(avg_weight_list)
mat_dist_ddp.append(
np.sum(np.abs(torch.exp(ddp_model.alpha_mat).numpy() * avg_weight)))
mat_dist_hawkes.append(np.sum(np.abs(torch.exp(c_hawkes_model.alpha_mat).numpy())))
mini_batch = data_loader.get_mini_batch(batch_size, batch_input_np)
ddp_model.set_input(*mini_batch)
log_lik = ddp_model() * (-1.0)
models.cross_ent_one_step(log_lik, opt_ddp)
c_hawkes_model.set_input(*mini_batch)
log_lik2 = c_hawkes_model() * (-1.0)
models.cross_ent_one_step(log_lik2, opt_c_hawkes)
with open('ddp_1.pkl', 'wb') as output:
pickle.dump(ddp_model, output)
| 26.510949 | 152 | 0.675385 | 571 | 3,632 | 3.940455 | 0.259194 | 0.021333 | 0.035556 | 0.024444 | 0.389333 | 0.355556 | 0.3 | 0.259111 | 0.231556 | 0.172 | 0 | 0.022554 | 0.206498 | 3,632 | 136 | 153 | 26.705882 | 0.758154 | 0 | 0 | 0.126316 | 0 | 0 | 0.023678 | 0.012665 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.094737 | 0 | 0.094737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4bc22d214e74f90098990b5f144e4ab44ff73b7 | 5,295 | py | Python | src/averell/readers/ecpa.py | linhd-postdata/dalton | 8e03d5d721e3592cedde773ec5f6f9b6cff91ec0 | [
"Apache-2.0"
] | 2 | 2020-10-26T12:57:27.000Z | 2021-09-07T11:20:33.000Z | src/averell/readers/ecpa.py | linhd-postdata/dalton | 8e03d5d721e3592cedde773ec5f6f9b6cff91ec0 | [
"Apache-2.0"
] | 15 | 2020-01-09T15:48:44.000Z | 2021-07-05T09:39:24.000Z | src/averell/readers/ecpa.py | linhd-postdata/dalton | 8e03d5d721e3592cedde773ec5f6f9b6cff91ec0 | [
"Apache-2.0"
] | 1 | 2021-07-07T01:16:31.000Z | 2021-07-07T01:16:31.000Z | import json
import re
import xml.etree.ElementTree as ETree
from averell.utils import TEI_NAMESPACE as NS
from averell.utils import XML_NS
ECEP_NS = "{http://www.eighteenthcenturypoetry.org/ns}"
def get_poem_info(xml_file, lines_info, authors):
"""Poem parser for 'ECPA corpus'.
We read the data and find elements like title, author, year, etc. Then
we iterate over the poem text and we look for each stanza, line, word
and syllable data.
:param xml_file: Path for the poem xml file
:param lines_info: Path for the lines json file
:param authors: dict with authors info
:return: Dict with the data obtained from the poem
:rtype: dict
"""
poem = {}
corpus_name = xml_file.parts[-6]
tree = ETree.parse(str(xml_file))
root = tree.getroot()
manually_checked = False
metadata = root.attrib
title = root.find(f".//{NS}head[@type='main']")
poem_id = metadata.get(f"{XML_NS}id")
poem_info = authors[1].get(poem_id)
if poem_info:
title_text = poem_info.get("title")
else:
title_text = " ".join(word.text for word in title.findall(f"{NS}w"))
author = root.find(f"{NS}link[@type='author']").get("target").split("#")[1]
try:
author_name = next(aut.get("name") for aut in authors[0].values() if
aut.get("author") == author)
except StopIteration:
author_name = author
poem.update({
"poem_title": title_text,
"author": author_name,
})
alt_title = root.find(f".//{NS}head[@type='sub']")
if alt_title:
alt_title_text = re.sub(r"[\n ]+", " ",
"".join(alt_title.itertext())).strip()
poem.update({"poem_alt_title": alt_title_text})
line_group_list = root.findall(f".//{NS}lg")
line_group_list2 = []
for lg_number, lg in enumerate(line_group_list):
if not lg.find(f".//{NS}lg"):
if not lg.get("type") and not lg.get("met"):
line_group_list2.append(lg)
if lg.get("met"):
line_group_list2.append(lg)
stanza_list = []
line_number = 0
for stanza_number, line_group in enumerate(line_group_list2):
stanza_type = None
stanza_text = []
line_list = []
for n, line in enumerate(line_group.findall(f"{NS}l")):
line_dict = {}
line_id = line.attrib.get(f"{XML_NS}id")
line_length = None
met = None
foot = None
metre = None
line_info = lines_info.get(line_id)
if line_info is not None:
if n == 0:
stanza_type = line_info.get("stanzas").get("id")
syllab = line_info.get("syllab")
line_length = int(syllab) if syllab else None
met = line_info.get("met").strip("/") or None
foot = line_info.get("foot").get("id")
metre = line_info.get("footnum").get("id")
real = line_info.get("real")
if real:
manually_checked = True
met = real.strip("/")
foot = line_info.get("realfoot").get("id")
metre = line_info.get("realfootnum").get("id")
line_dict.update({
"metrical_pattern": met,
"line_length": line_length,
"foot": foot,
"metre": metre,
})
word_list = []
token_list = []
for token in line:
tag = token.tag
if tag == f"{NS}w":
word_list.append({"word_text": token.text})
if tag in [f"{NS}w", f"{NS}c", f"{NS}pc"]:
token_list.append(token.text or "")
line_text = "".join(token_list).strip()
line_dict.update({
"line_number": line_number + 1,
"line_text": "".join(line_text).strip(),
"words": word_list,
})
line_list.append(line_dict)
stanza_text.append(line_text)
line_number += 1
st = "\n".join(stanza_text)
stanza_list.append({
"stanza_number": stanza_number + 1,
"stanza_type": stanza_type,
"lines": line_list,
"stanza_text": st,
})
poem.update({
"manually_checked": manually_checked,
"stanzas": stanza_list,
"corpus": corpus_name,
})
return poem
def get_features(path):
"""Function to find each poem file and parse it
:param path: Corpus Path
:return: List of poem dicts
:rtype: list
"""
authors_file = (
path / "ECPA-master" / "web" / "resources"
/ "models" / "authwork_mdp.json"
)
authors = json.loads(authors_file.read_text())
xml_files = path / "ECPA-master" / "web" / "works"
feature_list = []
for filename in xml_files.rglob("*/*.xml"):
folder = filename.parent
lines_file = f"{filename.parts[-2]}_l.json"
lines_path = folder / lines_file
lines_info = json.loads(lines_path.read_text())
result = get_poem_info(filename, lines_info, authors)
feature_list.append(result)
return feature_list
| 35.777027 | 79 | 0.551464 | 669 | 5,295 | 4.171898 | 0.230194 | 0.011824 | 0.03153 | 0.011824 | 0.075958 | 0.053744 | 0.038696 | 0.021498 | 0 | 0 | 0 | 0.003889 | 0.320113 | 5,295 | 147 | 80 | 36.020408 | 0.771389 | 0.094051 | 0 | 0.089431 | 0 | 0 | 0.117709 | 0.021133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01626 | false | 0 | 0.04065 | 0 | 0.073171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4bfa083c95377c776a16447e87dff7b9584198a | 5,850 | py | Python | darkness-engine/tools/codegen/ShaderCompiler.py | Karmiska/Darkness | c87eaf067a2707a0141909125ff461f69a3812e0 | [
"MIT"
] | 6 | 2019-10-17T11:31:55.000Z | 2022-02-11T08:51:20.000Z | darkness-engine/tools/codegen/ShaderCompiler.py | Karmiska/Darkness | c87eaf067a2707a0141909125ff461f69a3812e0 | [
"MIT"
] | 1 | 2020-08-11T09:01:29.000Z | 2020-08-11T09:01:29.000Z | darkness-engine/tools/codegen/ShaderCompiler.py | Karmiska/Darkness | c87eaf067a2707a0141909125ff461f69a3812e0 | [
"MIT"
] | 1 | 2020-06-02T15:48:20.000Z | 2020-06-02T15:48:20.000Z | import os
import string
import random
from optparse import OptionParser
from PreprocessorHLSL import PreprocessorException
from PreprocessorHLSL import Preprocessor
from LexicalAnalyzerHLSL import LexicalAnalyzer
from SyntaxAnalyzerHLSL import SyntaxAnalyzer
def stage_from_filename(filename):
if filename[-7:] == 'cs.hlsl':
return 'Compute'
if filename[-7:] == 'vs.hlsl':
return 'Vertex'
if filename[-7:] == 'ps.hlsl':
return 'Pixel'
if filename[-7:] == 'gs.hlsl':
return 'Geometry'
if filename[-7:] == 'hs.hlsl':
return 'Hull'
if filename[-7:] == 'ds.hlsl':
return 'Domain'
VulkanStages = {
'Compute' : 'comp',
'Domain' : 'tesc',
'Geometry': 'geom',
'Hull' : 'tese',
'Pixel' : 'frag',
'Vertex' : 'vert'
}
class VulkanCompiler:
def __init__(self, defines, includes):
self.compiler_binary = 'C:\\VulkanSDK\\1.0.61.1\\Bin\\glslangValidator.exe'
self.input_flag = '-D --auto-map-bindings -e main -V'
self.output_flag = '-o'
self.include_paths = []
self.defines = []
if includes is not None:
self.include_paths.extend(includes)
if defines is not None:
self.defines.extend(defines)
def compile(self, input_file, output_file, bindless):
temporary_file_path = self.createPreprocessedFile(input_file)
os.system(self.compiler_binary+' -S '+VulkanStages[stage_from_filename(input_file)]+' '+self.input_flag+' '+temporary_file_path+' '+self.output_flag+' '+output_file)
self.removePreprocessedFile(temporary_file_path)
def removePreprocessedFile(self, input_file):
os.remove(input_file)
def createPreprocessedFile(self, input_file):
(dir, filename) = os.path.split(input_file)
temporary_file_path = os.path.join(dir, self.createTemporaryFilename(filename))
with open(temporary_file_path, 'w') as file:
with open(input_file, 'r') as input_file:
preprocessor = Preprocessor(input_file, self.defines, self.include_paths)
for chr in preprocessor:
file.write(chr)
return temporary_file_path
def createTemporaryFilename(self, inputFile):
random_part = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
return inputFile + '.' + random_part
DX12Stages = {
'Compute' : 'cs_5_1',
'Domain' : 'ds_5_1',
'Geometry': 'gs_5_1',
'Hull' : 'hs_5_1',
'Pixel' : 'ps_5_1',
'Vertex' : 'vs_5_1'
}
class DX12Compiler:
def __init__(self, defines, includes):
self.compilerBinary = '"C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0.16299.0\\x64\\fxc.exe"'
self.inputFlag = ''
# /Od for disable optimization
# /Zpr = Row major
self.outputFlag = '/nologo /Zpr /Od /Zi /Fo'
self.include_paths = []
self.defines = []
if includes is not None:
self.include_paths.extend(includes)
if defines is not None:
self.defines.extend(defines)
def profile(self, inputFile):
return DX12Stages[stage_from_filename(inputFile)]
def compile(self, input_file, output_file, bindless):
# check input_file for bindless texture
# /enable_unbounded_descriptor_tables
defineStr = ''
for i in range(len(self.defines)):
defineStr += '/D'+str(self.defines[i])
if i < len(self.defines)-1:
defineStr += ' '
filename, file_extension = os.path.splitext(output_file)
if not bindless:
cmd = self.compilerBinary+' /T '+self.profile(input_file)+' '+input_file+' '+self.outputFlag+' '+output_file+' /Fd '+filename+'.pdb'
if defineStr != '':
cmd += ' '+defineStr
os.system(cmd)
else:
cmd = self.compilerBinary+' /enable_unbounded_descriptor_tables /T '+self.profile(input_file)+' '+input_file+' '+self.outputFlag+' '+output_file+' /Fd '+filename+'.pdb'
if defineStr != '':
cmd += ' '+defineStr
os.system(cmd)
class Compiler:
def __init__(self, graphicsApi, defines, includes):
if graphicsApi.lower() == "vulkan":
self.compiler = VulkanCompiler(defines, includes)
elif graphicsApi.lower() == "dx12":
self.compiler = DX12Compiler(defines, includes)
def compile(self, inputFile, outputFile, bindless):
self.compiler.compile(inputFile, outputFile, bindless)
# cd "$(ProjectDir)..\..\data\engine\graphics\shaders" &&
# del %(Filename).frag.spv &&
# C:\VulkanSDK\1.0.21.1\Bin\glslangValidator.exe -s -V "%(FullPath)" &&
# rename frag.spv %(Filename).frag.spv
# -i C:\work\darkness\darkness-engine\shaders\core\culling\OcclusionCulling.cs.hlsl -t C:\work\darkness\darkness-engine\tools\codegen\ShaderLoadInterfaceTemplate.cpp -o C:\work\darkness\darkness-engine\include\shaders\core\culling\OcclusionCulling.cs.cpp -b C:\work\darkness\darkness-engine\data\shaders\dx12\core\culling\OcclusionCulling.cs.cso -s Compute -x C:\work\darkness\darkness-engine\data\shaders\dx12\core\culling\OcclusionCulling.cs.support
def main():
parser = OptionParser()
parser.add_option("-g", "--graphics-api", dest="graphicsapi", help="select graphics api. example 1: -g VULKAN , example 2: -g DX12")
parser.add_option("-i", "--input", dest="input", help="input file. example: -i C:\\work\\Test.frag")
parser.add_option("-o", "--output", dest="output", help="output file. example: -o C:\\work\\Test.frag.spv")
parser.add_option("-D", "--define", action='append', dest="define", help="example: -DDEBUG")
parser.add_option("-I", "--include", action='append', dest="include", help="example: -I ../inc")
options, arguments = parser.parse_args()
bindless = False
with open(options.input, 'r') as file:
preprocessor = Preprocessor(file, options.define, options.include)
lexical_analyzer = LexicalAnalyzer(preprocessor)
syntax_analyzer = SyntaxAnalyzer(lexical_analyzer)
for token in syntax_analyzer.root_level_declarations():
if token.type != 'cbuffer':
if 'Bindless' in token.type:
bindless = True
compiler = Compiler(options.graphicsapi, options.define, options.include)
compiler.compile(options.input, options.output, bindless)
if __name__ == "__main__":
main()
| 36.5625 | 451 | 0.713333 | 758 | 5,850 | 5.365435 | 0.274406 | 0.03762 | 0.016228 | 0.025818 | 0.215884 | 0.178264 | 0.163511 | 0.163511 | 0.144087 | 0.144087 | 0 | 0.012402 | 0.131624 | 5,850 | 159 | 452 | 36.792453 | 0.788189 | 0.130598 | 0 | 0.176 | 0 | 0.008 | 0.158195 | 0.029551 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096 | false | 0 | 0.064 | 0.008 | 0.256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4bfca7952db5a47886470624acc75dadc3c9a17 | 1,938 | py | Python | src/batch_pred_struct/get_146_features.py | vam-sin/deepcys | db56c43748147eeeff96d95294bf1df43fbbaf8a | [
"MIT"
] | null | null | null | src/batch_pred_struct/get_146_features.py | vam-sin/deepcys | db56c43748147eeeff96d95294bf1df43fbbaf8a | [
"MIT"
] | 4 | 2020-11-13T17:15:10.000Z | 2022-02-09T23:26:45.000Z | src/batch_pred_struct/get_146_features.py | vam-sin/deepcys | db56c43748147eeeff96d95294bf1df43fbbaf8a | [
"MIT"
] | null | null | null | '''
Compute all the 146 features
'''
# Libraries
import requests
import numpy as np
import pickle
import os
from feature_gen import get_nf1, get_nf2, get_nf3, get_nf4
from pka import get_pka
from Bf_rhpy import get_bf_rhpy
import os.path
# Take in PDB ID and residue ID. (Ex: 1b2l, 137, A, Output: Disulfide)
def get_features(pdb, res, chain):
# Parameters:
res = int(res)
# Get FASTA and PDB.
PROJECT_PATH = os.path.dirname(__file__) + "/"
print(PROJECT_PATH)
print("\nSteps.")
filename_pdb = 'PDB_Data/' + pdb.replace(' ', '') + '.pdb'
if os.path.isfile(filename_pdb) == False:
url = 'https://files.rcsb.org/download/' + pdb.upper() + '.pdb'
r = requests.get(url)
f = open(filename_pdb, 'wb')
f.write(r.content)
f.close()
print("Obtained PDB. ", res)
# BF_rHpy
BF, rHpy = get_bf_rhpy(pdb, res, chain)
print("Calculated BF and rHpy: " + str(BF) + ", " + str(rHpy))
# Secondary Structure Folds
nf1_7 = get_nf1(pdb, res, chain, 7)
print("Calculated NF1.")
# Amino Acid Signatures in Interaction Shells
nf2_8, nf2_7, nf2_6, nf2_5 = get_nf2(pdb, res, chain)
print("Calculated NF2.")
# Enzyme Class
nf3 = get_nf3(pdb)
print(nf3)
print("Calculated NF3")
# Motifs
nf4_3 = get_nf4(pdb, res, chain, 3)
nf4_5 = get_nf4(pdb, res, chain, 5)
nf4_7 = get_nf4(pdb, res, chain, 7)
nf4_9 = get_nf4(pdb, res, chain, 9)
nf4_11 = get_nf4(pdb, res, chain, 11)
nf4_13 = get_nf4(pdb, res, chain, 13)
print("Calculated NF4")
# # Compile X
X = []
X.append(BF)
X.append(rHpy)
for i in nf1_7:
X.append(i)
for i in nf2_5:
X.append(i)
for i in nf2_6:
X.append(i)
for i in nf2_7:
X.append(i)
for i in nf2_8:
X.append(i)
for i in nf3:
X.append(i)
for i in nf4_3:
X.append(i)
for i in nf4_5:
X.append(i)
for i in nf4_7:
X.append(i)
for i in nf4_9:
X.append(i)
for i in nf4_11:
X.append(i)
for i in nf4_13:
X.append(i)
X = np.asarray(X)
print(X.shape)
return X
| 18.634615 | 70 | 0.656863 | 354 | 1,938 | 3.443503 | 0.276836 | 0.080394 | 0.059065 | 0.099262 | 0.281378 | 0.155045 | 0.14356 | 0.029532 | 0 | 0 | 0 | 0.054768 | 0.199174 | 1,938 | 103 | 71 | 18.815534 | 0.73067 | 0.127967 | 0 | 0.179104 | 0 | 0 | 0.095267 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.119403 | 0 | 0.149254 | 0.149254 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4bfcaef14a24c47a59a3c83f37d8c4cb1c1127b | 2,914 | py | Python | tests/test_gui/test_layouting_gridlayout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | tests/test_gui/test_layouting_gridlayout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | 1 | 2022-03-21T06:24:29.000Z | 2022-03-21T06:24:29.000Z | tests/test_gui/test_layouting_gridlayout.py | Ibrahim2750mi/arcade | bf3229e64117931bffb8e50926a996a7a8fc9b8b | [
"MIT"
] | null | null | null | from arcade.gui import UIDummy
from arcade.gui.widgets import Rect
from arcade.gui.widgets.layout import UIGridLayout
def test_place_widget(window):
dummy1 = UIDummy(width=100, height=100)
dummy2 = UIDummy(width=100, height=100)
dummy3 = UIDummy(width=100, height=100)
dummy4 = UIDummy(width=100, height=100)
subject = UIGridLayout(
column_count=2,
row_count=2
)
subject.add(dummy1, 0, 0)
subject.add(dummy2, 0, 1)
subject.add(dummy3, 1, 0)
subject.add(dummy4, 1, 1)
subject.rect = Rect(0, 0, *subject.size_hint_min)
subject.do_layout()
# check that do_layout doesn't manipulate the rect
assert subject.rect == (0, 0, 200, 200)
assert dummy1.position == (0, 100)
assert dummy2.position == (0, 0)
assert dummy3.position == (100, 100)
assert dummy4.position == (100, 0)
def test_place_widget_with_different_sizes(window):
dummy1 = UIDummy(width=50, height=100)
dummy2 = UIDummy(width=100, height=100)
dummy3 = UIDummy(width=100, height=50)
dummy4 = UIDummy(width=50, height=50)
subject = UIGridLayout(
column_count=2,
row_count=2
)
subject.add(dummy1, 0, 0)
subject.add(dummy2, 0, 1)
subject.add(dummy3, 1, 0)
subject.add(dummy4, 1, 1)
subject.rect = Rect(0, 0, *subject.size_hint_min)
subject.do_layout()
assert subject.rect == (0, 0, 200, 200)
assert dummy1.position == (25, 100)
assert dummy2.position == (0, 0)
assert dummy3.position == (100, 125)
assert dummy4.position == (125, 25)
def test_place_widget_within_content_rect(window):
dummy1 = UIDummy(width=100, height=100)
subject = UIGridLayout(
column_count=1,
row_count=1
).with_padding(left=10, bottom=20)
subject.add(dummy1, 0, 0)
assert subject.size_hint_min == (110, 120)
subject.rect = Rect(0, 0, *subject.size_hint_min)
subject.do_layout()
assert dummy1.position == (10, 20)
def test_place_widgets_with_col_row_span(window):
dummy1 = UIDummy(width=100, height=100)
dummy2 = UIDummy(width=100, height=100)
dummy3 = UIDummy(width=100, height=100)
dummy4 = UIDummy(width=100, height=100)
dummy5 = UIDummy(width=200, height=100)
dummy6 = UIDummy(width=100, height=200)
subject = UIGridLayout(
column_count=3,
row_count=3,
)
subject.add(dummy1, 0, 0)
subject.add(dummy2, 0, 1)
subject.add(dummy3, 1, 0)
subject.add(dummy4, 1, 1)
subject.add(dummy5, 0, 2, col_span=2)
subject.add(dummy6, 2, 0, row_span=3)
subject.rect = Rect(0, 0, *subject.size_hint_min)
subject.do_layout()
assert dummy1.position == (0, 200)
assert dummy2.position == (0, 100)
assert dummy3.position == (100, 200)
assert dummy4.position == (100, 100)
assert dummy5.position == (0, 0)
assert dummy6.position == (200, 50)
| 26.733945 | 54 | 0.649279 | 410 | 2,914 | 4.507317 | 0.15122 | 0.097403 | 0.097403 | 0.136364 | 0.612013 | 0.602273 | 0.602273 | 0.595779 | 0.595779 | 0.566558 | 0 | 0.119048 | 0.221688 | 2,914 | 108 | 55 | 26.981481 | 0.695767 | 0.016472 | 0 | 0.544304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227848 | 1 | 0.050633 | false | 0 | 0.037975 | 0 | 0.088608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c2e268826f31d87197f0af925b13e626b7a753 | 4,217 | py | Python | src/python/zquantum/core/interfaces/optimizer_test.py | bartubisgin/z-quantum-core | b61aef12cc86f0a8234229b9b26b21cde950d6f1 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/core/interfaces/optimizer_test.py | bartubisgin/z-quantum-core | b61aef12cc86f0a8234229b9b26b21cde950d6f1 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/core/interfaces/optimizer_test.py | bartubisgin/z-quantum-core | b61aef12cc86f0a8234229b9b26b21cde950d6f1 | [
"Apache-2.0"
] | 1 | 2022-03-19T02:23:53.000Z | 2022-03-19T02:23:53.000Z | """Test case prototypes that can be used in other projects.
Note that this file won't be executed on its own by pytest.
You need to define your own test cases that inherit from the ones defined here.
"""
import numpy as np
import pytest
from zquantum.core.interfaces.functions import FunctionWithGradient
from ..gradients import finite_differences_gradient
from ..history.recorder import recorder
def rosenbrock_function(x):
"""The Rosenbrock function"""
return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)
def sum_x_squared(x):
return sum(x ** 2.0)
class OptimizerTests(object):
"""Base class for optimizers tests.
How to use:
1. Inherit this class (remember to start name of the class with "Test"
2. In the same module define fixture called "optimizer".
Basic usage pattern:
@pytest.fixture
def optimizer():
return MyOptimizer()
class TestMyOptimizer(OptimizerTests): # Inherits all tests from this class
def test_some_new_feature(self, optimizer): # new test
....
Notice that the `optimizer` fixture can be parametrized if you wish to
perform tests for various configurations of your optimizer.
"""
def test_optimizer_succeeds_with_optimizing_rosenbrock_function(self, optimizer):
cost_function = FunctionWithGradient(
rosenbrock_function, finite_differences_gradient(rosenbrock_function)
)
results = optimizer.minimize(cost_function, initial_params=np.array([0, 0]))
assert results.opt_value == pytest.approx(0, abs=1e-4)
assert results.opt_params == pytest.approx(np.ones(2), abs=1e-3)
assert "nfev" in results
assert "nit" in results
assert "opt_value" in results
assert "opt_params" in results
assert "history" in results
def test_optimizer_succeeds_with_optimizing_sum_of_squares_function(
self, optimizer
):
cost_function = FunctionWithGradient(
sum_x_squared, finite_differences_gradient(sum_x_squared)
)
results = optimizer.minimize(cost_function, initial_params=np.array([1, -1]))
assert results.opt_value == pytest.approx(0, abs=1e-5)
assert results.opt_params == pytest.approx(np.zeros(2), abs=1e-4)
assert "nfev" in results
assert "nit" in results
assert "opt_value" in results
assert "opt_params" in results
assert "history" in results
def test_optimizer_succeeds_on_cost_function_without_gradient(self, optimizer):
cost_function = sum_x_squared
results = optimizer.minimize(cost_function, initial_params=np.array([1, -1]))
assert results.opt_value == pytest.approx(0, abs=1e-5)
assert results.opt_params == pytest.approx(np.zeros(2), abs=1e-4)
assert "nfev" in results
assert "nit" in results
assert "opt_value" in results
assert "opt_params" in results
assert "history" in results
def test_optimizer_records_history_if_keep_value_history_is_added_as_option(
self, optimizer
):
optimizer.keep_value_history = True
# To check that history is recorded correctly, we wrap cost_function
# with a recorder. Optimizer should wrap it a second time and
# therefore we can compare two histories to see if they agree.
cost_function = recorder(sum_x_squared)
result = optimizer.minimize(cost_function, np.array([-1, 1]))
assert result.history == cost_function.history
def test_optimizier_does_not_record_history_if_keep_value_history_is_set_to_false(
self, optimizer
):
if getattr(self, "always_records_history", False):
return
optimizer.keep_value_history = False
result = optimizer.minimize(sum_x_squared, np.array([-2, 0.5]))
assert result.history == []
def test_optimizer_does_not_record_history_if_keep_value_history_by_default(
self, optimizer
):
if getattr(self, "always_records_history", False):
return
result = optimizer.minimize(sum_x_squared, np.array([-2, 0.5]))
assert result.history == []
| 32.945313 | 86 | 0.68461 | 556 | 4,217 | 4.985612 | 0.273381 | 0.048701 | 0.064935 | 0.038961 | 0.488817 | 0.479798 | 0.412338 | 0.399351 | 0.371934 | 0.337662 | 0 | 0.015418 | 0.23097 | 4,217 | 127 | 87 | 33.204724 | 0.839346 | 0.225042 | 0 | 0.573529 | 0 | 0 | 0.044743 | 0.013767 | 0 | 0 | 0 | 0 | 0.352941 | 1 | 0.117647 | false | 0 | 0.073529 | 0.014706 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c30a79671a722d061cb3fa90874faf266f592d | 1,272 | py | Python | integration-test/lit.cfg.py | m-carrasco/net-ssa | e00ba50350b4f17bb9558dd85332d16b08d7854e | [
"MIT"
] | 1 | 2022-03-28T09:41:15.000Z | 2022-03-28T09:41:15.000Z | integration-test/lit.cfg.py | m-carrasco/net-ssa | e00ba50350b4f17bb9558dd85332d16b08d7854e | [
"MIT"
] | 1 | 2022-03-14T16:39:16.000Z | 2022-03-14T16:39:16.000Z | integration-test/lit.cfg.py | m-carrasco/net-ssa | e00ba50350b4f17bb9558dd85332d16b08d7854e | [
"MIT"
] | null | null | null | import lit.formats
import shutil
import os
config.name = "Test suite"
config.test_format = lit.formats.ShTest(True)
config.suffixes = ['.cs', '.test', '.il']
config.test_source_root = os.path.dirname(__file__)
config.test_build_root = os.path.join(config.my_obj_root, 'integration-test')
config.substitutions.append(('%mono', config.mono_bin))
config.substitutions.append(('%mcs', config.mcs_bin))
config.substitutions.append(('%ilasm', config.ilasm_bin))
config.substitutions.append(('%ssa-query', os.path.join(config.souffle_bin_dir, "ssa-query")))
config.substitutions.append(('%net-ssa-cli', os.path.join(config.net_ssa_bin_dir, "net-ssa-cli")))
config.substitutions.append(('%FileCheck', os.path.join(config.llvm_bin_dir, "FileCheck")))
# This is useful if a custom dotnet installation is used.
if os.environ.get('DOTNET_ROOT') is not None:
config.environment['DOTNET_ROOT'] = os.environ.get('DOTNET_ROOT')
def _clean_test_directory(directory):
for entry in os.scandir(directory):
basename = os.path.basename(entry.path)
if basename == "lit.site.cfg.py":
continue
if entry.is_dir():
shutil.rmtree(entry.path)
else:
os.remove(entry.path)
_clean_test_directory(config.test_build_root)
| 35.333333 | 98 | 0.71934 | 180 | 1,272 | 4.9 | 0.377778 | 0.040816 | 0.170068 | 0.072562 | 0.049887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128145 | 1,272 | 35 | 99 | 36.342857 | 0.795311 | 0.043239 | 0 | 0 | 0 | 0 | 0.132619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c42a14e41f330413527220aea2630b6894ca83 | 9,450 | py | Python | pyfer/crypt.py | elbydata/pyfer | ffe261514bdfd4c019d4c132830422e453c89ec9 | [
"MIT"
] | null | null | null | pyfer/crypt.py | elbydata/pyfer | ffe261514bdfd4c019d4c132830422e453c89ec9 | [
"MIT"
] | null | null | null | pyfer/crypt.py | elbydata/pyfer | ffe261514bdfd4c019d4c132830422e453c89ec9 | [
"MIT"
] | null | null | null | """
PYFER - Encrypt and Decrypt messages.
-------------------------------------
CRYPT module:
Classes
-------
Machine: encryption and decryption machine.
Class Methods:
init: creates a Pyfer Crypt Machine.
-
scramble: uses the Crypt Machine to encrypt a message.
-
unscramble: uses the Crypt Machine to decrypt a message.
"""
import numpy as np
import string
import itertools
# ------------------------------------------------------------------------
class Machine:
"""
Class representing an encryption machine.
Attributes
----------
key (str): string of 30, 40, or 45 digits to serving as encryption
key.
-
char_list (list) optional/dependent on init: list of characters used
by the encryption machine.
-
char_grid (numpy-array) optional/dependent on init: unscrambled grid
version of list of characters used by the encryption machine.
-
scramble_grid (numpy-array) optional/dependent on init: scrambled
grid of characters to used for the encryption and decryption of
messages.
Methods
-------
init: constructs all the necessary attributes for the encryption
machine.
-
scramble: encrypts a message.
-
unscramble: decrypts a message.
"""
def __init__(self, key):
"""
Constructs all the necessary attributes for the Crypt encryption
machine.
Arguments:
key (str): string of 30, 40, or 45 digits to serving as
encryption key.
Returns:
Crypt encryption machine.
"""
lc_list = list(string.ascii_lowercase)
uc_list = list(string.ascii_uppercase)
d_list = list(string.digits)
p_med = ["!", "?"]
p_full = [
"!",
"?",
".",
",",
":",
";",
")",
"(",
"_",
"+",
"-",
"=",
"<",
">",
"%",
"*",
"/",
"$",
"&",
]
if type(key) is str:
pass
else:
raise Exception(f"key must be a string; {type(key)} given.")
if len(key) == 30:
self.key = key
self.char_list = [
x
for x in itertools.chain.from_iterable(
itertools.zip_longest(lc_list, d_list)
)
if x
]
elif len(key) == 40:
self.key = key
self.char_list = [
x
for x in itertools.chain.from_iterable(
itertools.zip_longest(
lc_list, uc_list, d_list, p_med
)
)
if x
]
elif len(key) == 45:
self.key = key
self.char_list = [
x
for x in itertools.chain.from_iterable(
itertools.zip_longest(
lc_list, uc_list, d_list, p_full
)
)
if x
]
else:
self.key = None
self.char_list = None
raise Exception(
"Invalid key type: must be string of 30, 40, or 45 digits."
)
if self.key is not None:
square = int(len(self.key) / 5)
try:
intkey = int(self.key)
except:
raise Exception(
"Invalid key type: must be string of 30, 40, or 45 digits."
)
finally:
key_x_elements = []
for i in self.key[0:square]:
key_x_elements.append(int(i))
x_key = np.argsort(np.array(key_x_elements))
key_y_elements = []
for i in self.key[square : (2 * square)]:
key_y_elements.append(int(i))
y_key = np.argsort(np.array(key_y_elements))
key_x2_elements = []
for i in self.key[(2 * square) : (3 * square)]:
key_x2_elements.append(int(i))
x2_key = np.argsort(np.array(key_x2_elements))
key_y2_elements = []
for i in self.key[(3 * square) : (4 * square)]:
key_y2_elements.append(int(i))
y2_key = np.argsort(np.array(key_y2_elements))
key_z_elements = []
for i in self.key[(-1 * square) :]:
key_z_elements.append(int(i))
z_key = np.argsort(np.array(key_z_elements))
self.char_grid = np.asarray(self.char_list).reshape(
square, square
)
reshuffle_1 = self.char_grid[:, x_key]
if len(self.key) == 40:
reshuffle_2 = reshuffle_1.reshape(
4, int((square ** 2) / 4)
).transpose()
else:
reshuffle_2 = reshuffle_1.reshape(
3, int((square ** 2) / 3)
).transpose()
reshuffle_3 = reshuffle_2.reshape(square, square)
reshuffle_4 = reshuffle_3[y_key, :]
reshuffle_5 = reshuffle_4[:, x2_key]
if len(self.key) == 40:
reshuffle_6 = reshuffle_5.reshape(
4, int((square ** 2) / 4)
).transpose()
else:
reshuffle_6 = reshuffle_5.reshape(
3, int((square ** 2) / 3)
).transpose()
reshuffle_7 = reshuffle_6.reshape(square, square)
reshuffle_8 = reshuffle_7[y2_key, :]
reshuffle_9 = reshuffle_8[:, z_key]
if len(self.key) == 40:
reshuffle_10 = reshuffle_9.reshape(
4, int((square ** 2) / 4)
).transpose()
else:
reshuffle_10 = reshuffle_9.reshape(
3, int((square ** 2) / 3)
).transpose()
reshuffle_11 = reshuffle_10.reshape(square, square)
reshuffle_12 = reshuffle_11[z_key, :]
self.scramble_grid = reshuffle_12
# ----------
def scramble(self, input_string):
"""
Scramble the input message using the Crypt Machine.
Arguments:
input_string (str): message to encrypt.
Returns:
output_string (str): encrypted message.
"""
if type(input_string) is str:
if np.mod(len(input_string), 2) == 0:
if len(input_string) > 1:
if all(i in self.char_list for i in input_string):
pass
else:
raise Exception(
"Disallowed characters in input string"
)
else:
raise Exception(
"Input string must have length greater than 1."
)
else:
raise Exception(
f"Input string must have even number of characters; {len(input_string)} given."
)
else:
raise Exception(
"Input must be string of even length greater than 1."
)
in_indices = []
for i in input_string:
in_indices.append(np.argwhere(self.scramble_grid == i)[0])
out_indices = np.reshape(
np.transpose(np.array(in_indices)), (len(input_string), 2)
)
output_list = []
for i in range(len(input_string)):
output_list.append(
self.scramble_grid[out_indices[i][0], out_indices[i][1]]
)
output_string = "".join(output_list)
return output_string
# ----------
def unscramble(self, input_string):
"""
Unscramble the input message using the Crypt Machine.
Arguments:
input_string (str): message to decrypt.
Returns:
output_string (str): decrypted message.
"""
if type(input_string) is str:
if np.mod(len(input_string), 2) == 0:
if len(input_string) > 1:
if all(i in self.char_list for i in input_string):
pass
else:
raise Exception(
"Disallowed characters in input string"
)
else:
raise Exception(
"Input string must have length greater than 1."
)
else:
raise Exception(
"Input string must have even number of characters and have length greater than 1."
)
else:
raise Exception(
"Input must be string of even length greater than 1."
)
in_indices = []
for i in input_string:
in_indices.append(np.argwhere(self.scramble_grid == i)[0])
out_indices = np.transpose(
np.reshape(np.array(in_indices), (2, len(input_string)))
)
output_list = []
for i in range(len(input_string)):
output_list.append(
self.scramble_grid[out_indices[i][0], out_indices[i][1]]
)
output_string = "".join(output_list)
return output_string
| 29.810726 | 102 | 0.473439 | 985 | 9,450 | 4.380711 | 0.147208 | 0.063731 | 0.015295 | 0.016222 | 0.614832 | 0.58007 | 0.530243 | 0.476014 | 0.412978 | 0.378216 | 0 | 0.023422 | 0.421693 | 9,450 | 316 | 103 | 29.905063 | 0.766148 | 0.176825 | 0 | 0.458937 | 0 | 0 | 0.07976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0.014493 | 0.014493 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c46ea0c0393f0694be241347c81ab110a1f8b4 | 5,639 | py | Python | src/pattern_creation/score_patterns.py | mnschmit/lm-lexical-inference | 85121102459a1f79ad5df68efce4987402fca415 | [
"MIT"
] | 5 | 2021-02-24T03:59:36.000Z | 2022-03-30T08:36:58.000Z | src/pattern_creation/score_patterns.py | mnschmit/lm-lexical-inference | 85121102459a1f79ad5df68efce4987402fca415 | [
"MIT"
] | 1 | 2022-03-03T15:32:17.000Z | 2022-03-11T09:01:38.000Z | src/pattern_creation/score_patterns.py | mnschmit/lm-lexical-inference | 85121102459a1f79ad5df68efce4987402fca415 | [
"MIT"
] | 2 | 2021-07-26T07:42:12.000Z | 2022-01-29T18:36:39.000Z | from typing import Iterable, Tuple, Dict
import argparse
from transformers import AutoModelForMaskedLM, PreTrainedTokenizer, AutoTokenizer
from transformers.tokenization_utils import BatchEncoding
import torch
import csv
from tqdm import tqdm
def put_on_gpu(encoding: BatchEncoding, device: int) -> Dict[str, torch.Tensor]:
res = {}
for k, v in encoding.items():
res[k] = v.cuda(device)
return res
def batch_generator(template, pos_pairs, insert_idx, tokenizer, device, batch_size):
sents, exp_words = [], []
for pair in pos_pairs:
sent = template.format(pair[insert_idx])
sents.append(sent)
expected_word: int = tokenizer.encode(
pair[1-insert_idx], add_special_tokens=False)[0]
exp_words.append(expected_word)
if len(exp_words) == batch_size:
expw_tensor = torch.LongTensor(exp_words).cuda(device)
sents_enc = tokenizer(sents, padding=True, truncation=True,
return_tensors='pt')
sents_enc = put_on_gpu(sents_enc, device)
mask_token_mask = sents_enc['input_ids'] == tokenizer.mask_token_id
mask_idx = torch.argmax(mask_token_mask.long(), dim=1)
yield sents_enc, mask_idx, expw_tensor
sents.clear()
exp_words.clear()
if sents:
expw_tensor = torch.LongTensor(exp_words).cuda(device)
sents_enc = tokenizer(sents, padding=True, truncation=True,
return_tensors='pt')
sents_enc = put_on_gpu(sents_enc, device)
mask_token_mask = sents_enc['input_ids'] == tokenizer.mask_token_id
mask_idx = torch.argmax(mask_token_mask.long(), dim=1)
yield sents_enc, mask_idx, expw_tensor
def count_hits(masked_sentence_template: str, lm_model: AutoModelForMaskedLM, k: int,
tokenizer: PreTrainedTokenizer,
pos_pairs: Iterable[Tuple[str, str]], insert_idx: int,
device: int, batch_size: int) -> int:
batches = batch_generator(
masked_sentence_template,
pos_pairs, insert_idx, tokenizer, device,
batch_size
)
hits = 0
for batch in batches:
masked_sent, mask_idx, expected_word = batch
out = lm_model(**masked_sent)
# (batch_size, seq_len, vocab_size)
logits = out[0]
# (batch_size, vocab_size)
mask_logits = torch.gather(
logits, 1, mask_idx[:, None, None].expand_as(logits)
)[:, 0, :]
# (batch_size, k)
scores, indices = mask_logits.topk(k)
hits += (indices == expected_word.unsqueeze(1).expand_as(indices)).sum().item()
return hits
def score_pattern(pattern: str, pos_pairs: Iterable[Tuple[str, str]],
lm_model: AutoModelForMaskedLM, tokenizer: PreTrainedTokenizer,
device: int, k: int = 100, batch_size: int = 2) -> int:
prem_masked_pattern = pattern.format(prem=tokenizer.mask_token, hypo='{}')
prem_hits = count_hits(prem_masked_pattern, lm_model,
k, tokenizer, pos_pairs, 1, device, batch_size)
hypo_masked_pattern = pattern.format(hypo=tokenizer.mask_token, prem='{}')
hypo_hits = count_hits(hypo_masked_pattern, lm_model,
k, tokenizer, pos_pairs, 0, device, batch_size)
return prem_hits + hypo_hits
def main(args: argparse.Namespace):
rel_idx = {}
with open(args.relation_index) as f:
for line in f:
idx, rel = line.strip().split('\t')
rel_idx[idx] = rel
pos_pairs = []
with open(args.sherliic_file) as f:
r = csv.reader(f)
next(r) # headers
for row in r:
cls = row[17] == 'yes'
if args.ent_cls != cls:
continue
prem_path = rel_idx[row[2]]
hypo_path = rel_idx[row[4]]
prem_idx = -2 if row[13] == 'True' else 1
hypo_idx = -2 if row[14] == 'True' else 1
prem = prem_path.split('___')[prem_idx]
hypo = hypo_path.split('___')[hypo_idx]
pos_pairs.append((prem, hypo))
lm_model = AutoModelForMaskedLM.from_pretrained(args.model_string)
tokenizer = AutoTokenizer.from_pretrained(args.model_string)
lm_model.cuda(args.gpu)
patterns = []
with open(args.pattern_file) as f:
for pat in f:
patterns.append(pat.strip())
if args.longest_first:
patterns.sort(key=len, reverse=True)
pattern_score = {}
for pat in tqdm(patterns):
score = score_pattern(
pat, pos_pairs, lm_model, tokenizer, args.gpu,
k=args.k, batch_size=args.batch_size
)
pattern_score[pat] = score
with open(args.scored_pattern_file, 'w') as fout:
for pat in sorted(pattern_score.keys(), key=pattern_score.__getitem__, reverse=True):
print(pattern_score[pat], pat, sep='\t', file=fout)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('pattern_file')
parser.add_argument('sherliic_file')
parser.add_argument('relation_index')
parser.add_argument('scored_pattern_file')
parser.add_argument('--negative-class',
action='store_false', dest='ent_cls')
parser.add_argument('-k', type=int, default=100)
parser.add_argument('--batch-size', type=int, default=4)
parser.add_argument('--model-string', default='roberta-base')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--longest-first', action='store_true')
args = parser.parse_args()
main(args)
| 38.623288 | 93 | 0.630963 | 717 | 5,639 | 4.698745 | 0.232915 | 0.034728 | 0.05046 | 0.0187 | 0.251707 | 0.217869 | 0.20184 | 0.20184 | 0.179282 | 0.150193 | 0 | 0.00763 | 0.256251 | 5,639 | 145 | 94 | 38.889655 | 0.79566 | 0.014542 | 0 | 0.113821 | 0 | 0 | 0.039265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04065 | false | 0 | 0.056911 | 0 | 0.121951 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c6b1e3c2f1d9d2ca30780960968fe3d4dfd365 | 1,733 | py | Python | alfred-search-unicode/unicode_search.py | blueset/alfred-search-unicode | 19356500c2ee4ccae9e55116aad4c5d5782ca0f0 | [
"MIT"
] | 28 | 2019-12-30T14:48:33.000Z | 2022-03-28T09:44:18.000Z | alfred-search-unicode/unicode_search.py | blueset/alfred-search-unicode | 19356500c2ee4ccae9e55116aad4c5d5782ca0f0 | [
"MIT"
] | 3 | 2020-06-20T02:48:16.000Z | 2021-10-22T02:55:52.000Z | alfred-search-unicode/unicode_search.py | blueset/alfred-search-unicode | 19356500c2ee4ccae9e55116aad4c5d5782ca0f0 | [
"MIT"
] | 1 | 2020-10-07T13:01:12.000Z | 2020-10-07T13:01:12.000Z | #!/usr/bin/python3
"""
Search for Unicode 12.1 Descriptions
uni binary from: https://github.com/arp242/uni
"""
import sys
import re
import subprocess
import json
if len(sys.argv) >= 2:
query = sys.argv[1]
try:
out: str = subprocess.check_output(["./uni", "-q", "search", query]).decode()
out = out.strip().splitlines()
except subprocess.CalledProcessError:
out = []
if re.match(r"((U\+)?[0-9A-Fa-f]+ ?)+$", query):
pr_out: str = subprocess.check_output(["./uni", "-q", "print"] + query.split()).decode()
if "unknown codepoint" not in pr_out:
out = pr_out.strip().splitlines() + out
else:
out = []
data = []
for i in out[:20]:
match = re.match(
r"^'(.+?)' +(U\+[0-9A-F]+) +(\d+) +((?:[0-9a-f ]+?)) +(&.+?;) +(.+)$", i)
if not match:
continue
char, c_hex, c_int, _, _, name = match.groups()
disp_char = char
out_char = chr(int(c_int))
name = name.title()
short_name = name[:name.rindex(" (")]
data.append({
"uid": f"unicode_{c_int}",
"title": f"{disp_char} — {short_name}",
"subtitle": f"{c_hex} ({c_int}) {name}",
"arg": out_char,
"text": {
"copy": out_char,
"largetype": out_char
},
"icon": {
"path": "unicode.png"
},
"mods": {
"alt": {
"subtitle": f"Copy name: {short_name}",
"arg": short_name,
"valid": True
},
"cmd": {
"subtitle": f"Copy hex code: {c_hex}",
"arg": c_hex,
"valid": True
},
},
})
json.dump({"items": data}, sys.stdout)
| 24.069444 | 96 | 0.473745 | 205 | 1,733 | 3.882927 | 0.429268 | 0.020101 | 0.030151 | 0.052764 | 0.138191 | 0.10804 | 0.077889 | 0 | 0 | 0 | 0 | 0.014542 | 0.325447 | 1,733 | 71 | 97 | 24.408451 | 0.665526 | 0.058857 | 0 | 0.074074 | 0 | 0.018519 | 0.213185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c757b0b97b8ad3ff1aceebe3353c4d01b65002 | 15,586 | py | Python | ptterm/terminal.py | julio641742/ptterm | ba78356f07afab8031ef364c1213072f947cf87a | [
"BSD-3-Clause"
] | null | null | null | ptterm/terminal.py | julio641742/ptterm | ba78356f07afab8031ef364c1213072f947cf87a | [
"BSD-3-Clause"
] | null | null | null | ptterm/terminal.py | julio641742/ptterm | ba78356f07afab8031ef364c1213072f947cf87a | [
"BSD-3-Clause"
] | null | null | null | """
The layout engine. This builds the prompt_toolkit layout.
"""
from typing import Callable, Iterable, List, Optional
from prompt_toolkit.application.current import get_app, get_app_or_none
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.filters import Condition, has_selection
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import (
ConditionalContainer,
Float,
FloatContainer,
HSplit,
VSplit,
Window,
)
from prompt_toolkit.layout.controls import (
BufferControl,
FormattedTextControl,
UIContent,
UIControl,
)
from prompt_toolkit.layout.processors import (
HighlightIncrementalSearchProcessor,
HighlightSearchProcessor,
HighlightSelectionProcessor,
Processor,
Transformation,
)
from prompt_toolkit.layout.screen import Point
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.utils import Event, is_windows
from prompt_toolkit.widgets.toolbars import SearchToolbar
from .backends import Backend
from .process import Process
__all__ = ["Terminal"]
class _TerminalControl(UIControl):
def __init__(
self,
backend: Backend,
done_callback: Optional[Callable[[], None]] = None,
bell_func: Optional[Callable[[], None]] = None,
) -> None:
def has_priority() -> bool:
# Give priority to the processing of this terminal output, if this
# user control has the focus.
app_or_none = get_app_or_none()
if app_or_none is None:
# The application has terminated before this process ended.
return False
return app_or_none.layout.has_focus(self)
self.process = Process(
lambda: self.on_content_changed.fire(),
backend=backend,
done_callback=done_callback,
bell_func=bell_func,
has_priority=has_priority,
)
self.on_content_changed = Event(self)
self._running = False
def create_content(self, width: int, height: int) -> UIContent:
# Report dimensions to the process.
self.process.set_size(width, height)
# The first time that this user control is rendered. Keep track of the
# 'app' object and start the process.
if not self._running:
self.process.start()
self._running = True
if not self.process.screen:
return UIContent()
pt_screen = self.process.screen.pt_screen
pt_cursor_position = self.process.screen.pt_cursor_position
data_buffer = pt_screen.data_buffer
cursor_y = pt_cursor_position.y
# Prompt_toolkit needs the amount of characters before the cursor in a
# UIControl. This doesn't correspond with the xpos in case of double
# width characters. That's why we compute the wcwidth.
cursor_row = data_buffer[pt_cursor_position.y]
text_before_cursor = "".join(
cursor_row[x].char for x in range(0, pt_cursor_position.x)
)
cursor_x = len(text_before_cursor)
def get_line(number: int) -> StyleAndTextTuples:
row = data_buffer[number]
empty = True
if row:
max_column = max(row)
empty = False
else:
max_column = 0
if number == cursor_y:
max_column = max(max_column, cursor_x)
empty = False
if empty:
return [("", " ")]
else:
cells = [row[i] for i in range(max_column + 1)]
return [(cell.style, cell.char) for cell in cells]
if data_buffer:
line_count = (
max(data_buffer) + 1
) # TODO: substract all empty lines from the beginning. (If we need to. Not sure.)
else:
line_count = 1
return UIContent(
get_line,
line_count=line_count,
show_cursor=pt_screen.show_cursor,
cursor_position=Point(x=cursor_x, y=cursor_y),
)
def get_key_bindings(self) -> KeyBindings:
bindings = KeyBindings()
@bindings.add(Keys.Any)
def handle_key(event):
"""
Handle any key binding -> write it to the stdin of this terminal.
"""
self.process.write_key(event.key_sequence[0].key)
@bindings.add(Keys.BracketedPaste)
def _(event):
self.process.write_input(event.data, paste=True)
return bindings
def get_invalidate_events(self) -> Iterable[Event]:
yield self.on_content_changed
def mouse_handler(self, mouse_event) -> None:
"""
Handle mouse events in a pane. A click in a non-active pane will select
it. A click in active pane will send the mouse event to the application
running inside it.
"""
app = get_app()
process = self.process
x = mouse_event.position.x
y = mouse_event.position.y
# The containing Window translates coordinates to the absolute position
# of the whole screen, but in this case, we need the relative
# coordinates of the visible area.
y -= self.process.screen.line_offset
if not app.layout.has_focus(self):
# Focus this process when the mouse has been clicked.
if mouse_event.event_type == MouseEventType.MOUSE_UP:
app.layout.focus(self)
else:
# Already focussed, send event to application when it requested
# mouse support.
if process.screen.sgr_mouse_support_enabled:
# Xterm SGR mode.
try:
ev, m = {
MouseEventType.MOUSE_DOWN: (0, "M"),
MouseEventType.MOUSE_UP: (0, "m"),
MouseEventType.SCROLL_UP: (64, "M"),
MouseEventType.SCROLL_DOWN: (65, "M"),
}[mouse_event.event_type]
except KeyError:
pass
else:
self.process.write_input("\x1b[<%s;%s;%s%s" % (ev, x + 1, y + 1, m))
elif process.screen.urxvt_mouse_support_enabled:
# Urxvt mode.
try:
ev = {
MouseEventType.MOUSE_DOWN: 32,
MouseEventType.MOUSE_UP: 35,
MouseEventType.SCROLL_UP: 96,
MouseEventType.SCROLL_DOWN: 97,
}[mouse_event.event_type]
except KeyError:
pass
else:
self.process.write_input("\x1b[%s;%s;%sM" % (ev, x + 1, y + 1))
elif process.screen.mouse_support_enabled:
# Fall back to old mode.
if x < 96 and y < 96:
try:
ev = {
MouseEventType.MOUSE_DOWN: 32,
MouseEventType.MOUSE_UP: 35,
MouseEventType.SCROLL_UP: 96,
MouseEventType.SCROLL_DOWN: 97,
}[mouse_event.event_type]
except KeyError:
pass
else:
self.process.write_input(
"\x1b[M%s%s%s" % (chr(ev), chr(x + 33), chr(y + 33))
)
def is_focusable(self) -> bool:
return not self.process.suspended
class _Window(Window):
"""
"""
def __init__(self, terminal_control: _TerminalControl, **kw) -> None:
self.terminal_control = terminal_control
super().__init__(**kw)
def write_to_screen(self, *a, **kw) -> None:
# Make sure that the bottom of the terminal is always visible.
screen = self.terminal_control.process.screen
# NOTE: the +1 is required because max_y starts counting at 0, while
# lines counts the numbers of lines, starting at 1 for one line.
self.vertical_scroll = screen.max_y - screen.lines + 1
super().write_to_screen(*a, **kw)
def create_backend(
command: List[str], before_exec_func: Optional[Callable[[], None]]
) -> Backend:
if is_windows():
from .backends.win32 import Win32Backend
return Win32Backend()
else:
from .backends.posix import PosixBackend
return PosixBackend.from_command(command, before_exec_func=before_exec_func)
class Terminal:
"""
Terminal widget for use in a prompt_toolkit layout.
:param commmand: List of command line arguments.
For instance: `['python', '-c', 'print("test")']`
:param before_exec_func: Function which is called in the child process,
right before calling `exec`. Useful for instance for changing the
current working directory or setting environment variables.
"""
def __init__(
self,
command=["/bin/bash"],
before_exec_func=None,
backend: Optional[Backend] = None,
bell_func: Optional[Callable[[], None]] = None,
style: str = "",
width: Optional[int] = None,
height: Optional[int] = None,
done_callback: Optional[Callable[[], None]] = None,
) -> None:
if backend is None:
backend = create_backend(command, before_exec_func)
self.terminal_control = _TerminalControl(
backend=backend, bell_func=bell_func, done_callback=done_callback,
)
self.terminal_window = _Window(
terminal_control=self.terminal_control,
content=self.terminal_control,
wrap_lines=False,
)
# Key bindigns for copy buffer.
kb = KeyBindings()
@kb.add("c-c")
def _exit(event):
self.exit_copy_mode()
@kb.add("space")
def _reset_selection(event):
" Reset selection. "
event.current_buffer.start_selection()
@kb.add("enter", filter=has_selection)
def _copy_selection(event):
" Copy selection. "
data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(data)
self.search_toolbar = SearchToolbar(
forward_search_prompt="Search down: ", backward_search_prompt="Search up: "
)
self.copy_buffer = Buffer(read_only=True)
self.copy_buffer_control = BufferControl(
buffer=self.copy_buffer,
search_buffer_control=self.search_toolbar.control,
include_default_input_processors=False,
input_processors=[
_UseStyledTextProcessor(self),
HighlightSelectionProcessor(),
HighlightSearchProcessor(),
HighlightIncrementalSearchProcessor(),
],
preview_search=True, # XXX: not sure why we need twice preview_search.
key_bindings=kb,
)
self.copy_window = Window(content=self.copy_buffer_control, wrap_lines=False)
self.is_copying = False
@Condition
def is_copying() -> bool:
return self.is_copying
self.container = FloatContainer(
content=HSplit(
[
# Either show terminal window or copy buffer.
VSplit(
[ # XXX: this nested VSplit should not have been necessary,
# but the ConditionalContainer which width can become
# zero will collapse the other elements.
ConditionalContainer(
self.terminal_window, filter=~is_copying
),
ConditionalContainer(self.copy_window, filter=is_copying),
]
),
ConditionalContainer(self.search_toolbar, filter=is_copying),
],
style=style,
width=width,
height=height,
),
floats=[
Float(
top=0,
right=0,
height=1,
content=ConditionalContainer(
Window(
content=FormattedTextControl(
text=self._copy_position_formatted_text
),
style="class:copy-mode-cursor-position",
),
filter=is_copying,
),
)
],
)
def _copy_position_formatted_text(self) -> str:
"""
Return the cursor position text to be displayed in copy mode.
"""
render_info = self.copy_window.render_info
if render_info:
return "[%s/%s]" % (
render_info.cursor_position.y + 1,
render_info.content_height,
)
else:
return "[0/0]"
def enter_copy_mode(self) -> None:
# Suspend process.
self.terminal_control.process.suspend()
# Copy content into copy buffer.
data_buffer = self.terminal_control.process.screen.pt_screen.data_buffer
text = []
styled_lines = []
if data_buffer:
for line_index in range(min(data_buffer), max(data_buffer) + 1):
line = data_buffer[line_index]
styled_line = []
if line:
for column_index in range(0, max(line) + 1):
char = line[column_index]
text.append(char.char)
styled_line.append((char.style, char.char))
text.append("\n")
styled_lines.append(styled_line)
text.pop() # Drop last line ending.
text_str = "".join(text)
self.copy_buffer.set_document(
Document(text=text_str, cursor_position=len(text_str)), bypass_readonly=True
)
self.styled_lines = styled_lines
# Enter copy mode.
self.is_copying = True
get_app().layout.focus(self.copy_window)
def exit_copy_mode(self) -> None:
# Resume process.
self.terminal_control.process.resume()
# focus terminal again.
self.is_copying = False
get_app().layout.focus(self.terminal_window)
def __pt_container__(self) -> FloatContainer:
return self.container
@property
def process(self):
return self.terminal_control.process
class _UseStyledTextProcessor(Processor):
"""
In order to allow highlighting of the copy region, we use a preprocessed
list of (style, text) tuples. This processor returns just that list for the
given pane.
This processor should go before all others, because it replaces the list of
(style, text) tuples.
"""
def __init__(self, terminal: Terminal) -> None:
self.terminal = terminal
def apply_transformation(self, transformation_input) -> Transformation:
try:
line = self.terminal.styled_lines[transformation_input.lineno]
except IndexError:
line = []
return Transformation(line)
| 33.735931 | 95 | 0.57064 | 1,659 | 15,586 | 5.16094 | 0.215793 | 0.025812 | 0.027797 | 0.015183 | 0.101495 | 0.074165 | 0.055711 | 0.047302 | 0.047302 | 0.047302 | 0 | 0.006114 | 0.349416 | 15,586 | 461 | 96 | 33.809111 | 0.838264 | 0.160721 | 0 | 0.177019 | 0 | 0 | 0.01402 | 0.002401 | 0 | 0 | 0 | 0.002169 | 0 | 1 | 0.07764 | false | 0.012422 | 0.059006 | 0.012422 | 0.198758 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4c7e60532d9f60226cbecdcec43fa7ac10677df | 2,707 | py | Python | app/main/views.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | null | null | null | app/main/views.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | null | null | null | app/main/views.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | null | null | null | from flask import render_template,request,redirect,url_for, abort
from . import main
from ..models import User, Pitch, Category, Comment
from flask_login import login_required, current_user
from .forms import PitchForm, CommentForm, CategoryForm
from .. import db
#Views
@main.route('/')
def index():
category = Category.get_categories()
return render_template('index.html', category = category)
@main.route('/add/category', methods=['GET','POST'])
@login_required
def new_category():
form = CategoryForm()
if form.validate_on_submit():
name = form.name.data
new_category = Category(name=name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New category'
return render_template('new_category.html', category_form = form,title=title)
@main.route('/categories/<int:id>')
def category(id):
category = Category.query.get(id)
if category is None:
abort(404)
return render_template('category.html', category=category)
@main.route('/categories/new-pitch/add/<int:id>', methods=['GET', 'POST'])
@login_required
def new_pitch(id):
form = PitchForm()
category = Category.query.filter_by(id=id).first()
if category is None:
abort(404)
if form.validate_on_submit():
pitch = form.pitch.data
title = form.title.data
new_pitch= Pitch( title=title, pitch=pitch, user_id=current_user.id)
new_pitch.save_pitch()
return redirect(url_for('.category', id=category.id))
title = 'New Pitch'
return render_template('new_pitch.html', title=title, pitch_form=form, category=category)
@main.route('/write_comment/<int:id>', methods=['GET', 'POST'])
@login_required
def post_comment(id):
'''
function to post comments
'''
form = CommentForm()
title = 'post comment'
pitches = Pitch.query.filter_by(id=id).first()
if pitches is None:
abort(404)
if form.validate_on_submit():
opinion = form.opinion.data
new_comment = Comments(opinion=opinion, user_id=current_user.id, pitches_id=pitches.id)
new_comment.save_comment()
return redirect(url_for('.view_pitch', id=pitches.id))
return render_template('post_comment.html', comment_form=form, title=title)
@main.route('/categories/view_pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def view_pitch(id):
print(id)
pitch = Pitch.get_pitches (id)
if pitch is None:
abort(404)
comment = Comments.get_comments(id)
return render_template('view-pitch.html', pitch=pitch, comment=comment, category_id=id)
| 26.028846 | 95 | 0.665682 | 348 | 2,707 | 5.014368 | 0.178161 | 0.05616 | 0.068768 | 0.043553 | 0.277937 | 0.210315 | 0.191977 | 0.101433 | 0.041261 | 0 | 0 | 0.005579 | 0.205393 | 2,707 | 103 | 96 | 26.281553 | 0.805672 | 0.011821 | 0 | 0.2 | 0 | 0 | 0.111027 | 0.03312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.092308 | 0 | 0.323077 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4ca9ce14e415ffd3cf457e7499bde21905e788d | 795 | py | Python | AMTraCInfo/AMTraCInfoScene.py | LeovR/amtrac-info | 319587d0c6d4665a31bface643d53de8895fdf66 | [
"Apache-2.0"
] | 1 | 2016-12-04T17:18:04.000Z | 2016-12-04T17:18:04.000Z | AMTraCInfo/AMTraCInfoScene.py | LeovR/amtrac-info | 319587d0c6d4665a31bface643d53de8895fdf66 | [
"Apache-2.0"
] | null | null | null | AMTraCInfo/AMTraCInfoScene.py | LeovR/amtrac-info | 319587d0c6d4665a31bface643d53de8895fdf66 | [
"Apache-2.0"
] | null | null | null | from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
class AMTraCInfoScene(ControlSurfaceComponent):
__module__ = __name__
__doc__ = " AMTraC-Info Scene "
def __init__(self, parent, scene):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._scene = scene
scene.add_is_triggered_listener(self.is_triggered_fired)
def is_triggered_fired(self):
if self._scene.is_triggered:
self._parent.log_message(self._scene.name + " is triggered")
self._parent.send_message('{NP|' + self._scene.name.split(' ||')[0][:16])
else:
self._parent.log_message(self._scene.name + " is playing")
self._parent.send_message('{CP|' + self._scene.name.split(' ||')[0][:16])
| 37.857143 | 85 | 0.67044 | 88 | 795 | 5.556818 | 0.363636 | 0.122699 | 0.106339 | 0.08589 | 0.229039 | 0.229039 | 0.143149 | 0.143149 | 0 | 0 | 0 | 0.009539 | 0.208805 | 795 | 20 | 86 | 39.75 | 0.767886 | 0 | 0 | 0 | 0 | 0 | 0.071698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4caec9522c036d8727e812da35c052bd3c4dd2f | 2,541 | py | Python | nicos_mlz/puma/devices/comb_ax.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/puma/devices/comb_ax.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4 | 2019-11-08T10:18:16.000Z | 2021-01-13T13:07:29.000Z | nicos_mlz/puma/devices/comb_ax.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Oleg Sobolev <oleg.sobolev@frm2.tum.de>
#
# *****************************************************************************
"""Class for PUMA phi axis."""
from nicos.core import Attach, Moveable, Param
from nicos.devices.generic.axis import Axis
class CombAxis(Axis):
"""Class for PUMA phi axis.
When psi axis must stay at the same angle relative to the incoming beam.
For example, when the magnet is used
"""
attached_devices = {
'fix_ax': Attach('axis that moves back', Moveable),
}
parameters = {
'iscomb': Param('If it is combined or normal axis',
type=bool, default=False, mandatory=True,
settable=True),
}
_fixpos = None
def doInit(self, mode):
Axis.doInit(self, mode)
self._update_fixpos(self.iscomb)
def doWriteIscomb(self, val):
self._update_fixpos(val)
def _update_fixpos(self, val):
self._fixpos = self.read(0) + self._attached_fix_ax.read(0) if val \
else None
def doIsAllowed(self, pos):
mainax = Axis.doIsAllowed(self, pos)
if not self.iscomb:
return mainax
relpos = self._fixpos - pos
fixax = self._attached_fix_ax.isAllowed(relpos)
if mainax[0] and fixax[0]:
return True, 'Ok'
return False, '%s: %s, %s: %s' % \
(self, mainax[1], self._attached_fix_ax, fixax[1])
def _postMoveAction(self):
if self.iscomb:
relpos = self._fixpos - self.read(0)
self._attached_fix_ax.maw(relpos)
| 33.434211 | 79 | 0.613538 | 333 | 2,541 | 4.606607 | 0.48048 | 0.016297 | 0.039113 | 0.044329 | 0.125163 | 0.083442 | 0.046936 | 0.046936 | 0.046936 | 0 | 0 | 0.016563 | 0.239669 | 2,541 | 75 | 80 | 33.88 | 0.777433 | 0.469894 | 0 | 0 | 0 | 0 | 0.061491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.058824 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4cb5a15a6b1b5f582e8e3be3107308992290816 | 12,945 | py | Python | NCube/NCube.py | mobigroup/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 41 | 2020-01-09T16:45:53.000Z | 2022-03-16T07:04:37.000Z | NCube/NCube.py | echinoids/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 1 | 2021-06-04T14:09:23.000Z | 2021-06-05T11:52:27.000Z | NCube/NCube.py | echinoids/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 6 | 2020-03-15T14:35:52.000Z | 2021-07-31T16:44:07.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Alexey Pechnikov. All rights reserved.
# https://orcid.org/0000-0001-9626-8615 (ORCID)
# pechnikov@mobigroup.ru (email)
# License: http://opensource.org/licenses/MIT
# process [multi]geometry
def _NCubeGeometryToPolyData(geometry, dem=None):
#from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from vtk import vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray, vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
import xarray as xr
import numpy as np
if geometry is None or geometry.is_empty:
return
vtk_points = vtkPoints()
vtk_cells = vtkCellArray()
# get part(s) of (multi)geometry
#if isinstance(geometry, (BaseMultipartGeometry)):
if geometry.type.startswith('Multi') or geometry.type == 'GeometryCollection':
geometries = [geom for geom in geometry]
else:
geometries = [geometry]
for geom in geometries:
# polygon
#print ("geom.type", geom.type)
if geom.type == 'Polygon':
coords = np.asarray(geom.exterior.coords)
else:
coords = np.asarray(geom.coords)
#print ("coords", coords)
xs = coords[:,0]
ys = coords[:,1]
if coords.shape[1] > 2:
zs = np.array(coords[:,2])
else:
zs = np.zeros(len(xs))
#print (xs)
# rasterize geometries (lines only, not points)
# alas, modern scipy or matplotlib don't work in ParaView 5.7 on MacOS
if dem is not None:
# print (dem)
if dem.res and len(xs)>1:
res = min(dem.res)
_xs = [xs[:1]]
_ys = [ys[:1]]
_zs = [zs[:1]]
for (x0,y0,z0,x,y,z) in zip(xs[:-1],ys[:-1],zs[:-1],xs[1:],ys[1:],zs[1:]):
length = max(abs(x-x0),abs(y-y0))
num = round(length/res+0.5)
# print ("num",num)
if num > 1:
_x = np.linspace(x0,x,num)
_y = np.linspace(y0,y,num)
_z = np.linspace(z0,z,num)
_xs.append(_x[1:])
_ys.append(_y[1:])
_zs.append(_z[1:])
else:
_xs.append([x])
_ys.append([y])
_zs.append([z])
xs = np.concatenate(_xs)
ys = np.concatenate(_ys)
zs = np.concatenate(_zs)
zs += dem.sel(x=xr.DataArray(xs), y=xr.DataArray(ys), method='nearest').values
#print ("xs", xs)
mask = np.where(~np.isnan(zs))[0]
mask2 = np.where(np.diff(mask)!=1)[0]+1
xs = np.split(xs[mask], mask2)
ys = np.split(ys[mask], mask2)
zs = np.split(zs[mask], mask2)
for (_xs,_ys,_zs) in zip(xs,ys,zs):
# need to have 2 point or more
#if len(_xs) <= 1:
# continue
vtk_cells.InsertNextCell(len(_xs))
for (x,y,z) in zip(_xs,_ys,_zs):
pointId = vtk_points.InsertNextPoint(x, y, z)
vtk_cells.InsertCellPoint(pointId)
# not enougth valid points
if vtk_points.GetNumberOfPoints() < 1:
return
#print ("GetNumberOfPoints", vtk_points.GetNumberOfPoints())
vtk_polyData = vtkPolyData()
vtk_polyData.SetPoints(vtk_points)
#if geometry.type in ['Point','MultiPoint']:
if geometry.type.endswith('Point'):
vtk_polyData.SetVerts(vtk_cells)
else:
vtk_polyData.SetLines(vtk_cells)
return vtk_polyData
# process geodataframe and xarray raster
def _NCubeGeometryOnTopography(df, dem):
from vtk import vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray, vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from shapely.geometry import box
#import xarray as xr
import numpy as np
#print ("_NCUBEGeometryOnTopography start")
dem_extent = dem_crs = None
if dem is not None:
# TODO: that's better to direct use NODATA values
if dem.values.dtype not in [np.dtype('float16'),np.dtype('float32'),np.dtype('float64'),np.dtype('float128')]:
dem.values = dem.values.astype("float32")
# dask array can't be processed by this way
dem.values[dem.values == dem.nodatavals[0]] = np.nan
# NaN border to easy lookup
dem.values[0,:] = np.nan
dem.values[-1,:] = np.nan
dem.values[:,0] = np.nan
dem.values[:,-1] = np.nan
dem_extent = box(dem.x.min(),dem.y.min(),dem.x.max(),dem.y.max())
dem_crs = dem.crs if 'crs' in dem.attrs.keys() else None
#print (dem.values)
df = _NCubeGeoDataFrameToTopography(df, dem_extent, dem_crs)
groups = df.index.unique() ;#[11454:11455]
#print ("groups",groups)
# TEST
#groups = groups[:1]
# iterate blocks
vtk_blocks = []
for group in groups:
#print ("group",group)
# Python 2 string issue wrapped
if hasattr(group, 'encode'):
# select only equals
_df = df[df.index.str.startswith(group)&df.index.str.endswith(group)&(df.index.str.len()==len(group))].reset_index()
else:
_df = df[df.index == group].reset_index()
#print (_df.geometry)
vtk_appendPolyData = vtkAppendPolyData()
# iterate rows with the same attributes and maybe multiple geometries
for rowidx,row in _df.iterrows():
#print ("row", row)
vtk_polyData = _NCubeGeometryToPolyData(row.geometry, dem)
if vtk_polyData is None:
#print ("vtk_polyData is None")
continue
vtk_arrays = _NCubeGeoDataFrameRowToVTKArrays(row.to_dict())
for (vtk_arr, val) in vtk_arrays:
if val is None:
continue
# for _ in range(vtk_polyData.GetNumberOfCells()):
# vtk_arr.InsertNextValue(val)
if isinstance(val, (tuple)):
# if np.any(np.isnan(val)):
# continue
# add vector
for _ in range(vtk_polyData.GetNumberOfCells()):
vtk_arr.InsertNextTuple(val)
vtk_polyData.GetCellData().AddArray(vtk_arr)
else:
# add scalar
for _ in range(vtk_polyData.GetNumberOfCells()):
vtk_arr.InsertNextValue(val)
vtk_polyData.GetCellData().AddArray(vtk_arr)
# compose vtkPolyData
vtk_appendPolyData.AddInputData(vtk_polyData)
# nothing to process
if vtk_appendPolyData.GetNumberOfInputConnections(0) == 0:
continue
vtk_appendPolyData.Update()
vtk_block = vtk_appendPolyData.GetOutput()
vtk_blocks.append((str(group),vtk_block))
#print ("_NCUBEGeometryOnTopography end")
return vtk_blocks
def _NCubeGeoDataFrameToTopography(df, dem_extent, dem_crs=None):
import geopandas as gpd
# extract the geometry coordinate system
if df.crs is not None and df.crs != {}:
df_crs = df.crs
else:
df_crs = None
print ("df_crs",df_crs,"dem_crs",dem_crs)
# reproject when the both coordinate systems are defined and these are different
if df_crs and dem_crs:
# load error fix for paraView 5.8.1rc1 Python3
try:
# ParaView 5.7 Python 2.7
df_extent = gpd.GeoDataFrame([], crs={'init' : dem_crs}, geometry=[dem_extent])
except:
# ParaView 5.8 RC2 Python 3.7
df_extent = gpd.GeoDataFrame([], crs=dem_crs, geometry=[dem_extent])
print ("df_extent", df_extent.crs, df_extent.geometry)
extent_reproj = df_extent.to_crs(df_crs)['geometry'][0]
# if original or reprojected raster extent is valid, use it to crop geometry
print ("crop geometry", extent_reproj.is_valid,extent_reproj.wkt)
if extent_reproj.is_valid:
# geometry intersection to raster extent in geometry coordinate system
df = df[df.geometry.intersects(extent_reproj)].copy()
# dangerous operation, see https://github.com/Toblerity/Shapely/issues/553
df['geometry'] = df.geometry.intersection(extent_reproj)
try:
# ParaView 5.7 Python 2.7
# reproject [cropped] geometry to original raster coordinates if needed
return df.to_crs({'init' : dem_crs})
except:
# ParaView 5.8 RC2 Python 3.7
return df.to_crs(dem_crs)
# let's assume the coordinate systems are the same
if dem_extent is not None:
df = df[df.geometry.intersects(dem_extent)]
# wrap issue with 3D geometry intersection by 2D extent
# if df.geometry[0].has_z:
# print ("df.geometry[0].has_z")
# else:
# df['geometry'] = df.geometry.intersection(dem_extent)
return df
# Load shapefile or geojson
def _NCubeGeoDataFrameLoad(shapename, shapecol=None, shapeencoding=None):
import geopandas as gpd
df = gpd.read_file(shapename, encoding=shapeencoding)
# very important check
df = df[df.geometry.notnull()]
if shapecol is not None:
df = df.sort_values(shapecol).set_index(shapecol)
else:
# to merge all geometries in output
df.index = len(df)*['None']
return df
def _NcubeDataFrameToVTKArrays(df):
from vtk import vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
arrays = []
# Create columns
for colname in df.columns:
dtype = df[colname].dtype
#print (colname, dtype)
if dtype in ['O','str','datetime64']:
vtk_arr = vtkStringArray()
elif dtype in ['int64']:
vtk_arr = vtkIntArray()
elif dtype in ['float64']:
vtk_arr = vtkFloatArray()
elif dtype in ['bool']:
vtk_arr = vtkBitArray()
else:
print ('Unknown Pandas column type', dtype)
vtk_arr = vtkStringArray()
vtk_arr.SetNumberOfComponents(1)
vtk_arr.SetName(colname)
for val in df[colname]:
# some different datatypes could be saved as strings
if isinstance(vtk_arr, vtkStringArray):
val = str(val)
vtk_arr.InsertNextValue(val)
arrays.append(vtk_arr)
return arrays
# list of list of VtkArray's
# we ignore case of scientific notation for numbers
# https://re-thought.com/how-to-suppress-scientific-notation-in-pandas/
def _NCubeGeoDataFrameRowToVTKArrays(items):
#vtkPolyData, vtkAppendPolyData, vtkPoints, vtkCellArray,
from vtk import vtkStringArray, vtkIntArray, vtkFloatArray, vtkBitArray
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
vtk_row = []
for (key,value) in items.items():
#print (key,value)
components = 1
# define attribute as array
if isinstance(value, (BaseMultipartGeometry)):
#print ('BaseMultipartGeometry')
continue
elif isinstance(value, (BaseGeometry)):
#print ('BaseGeometry')
continue
elif isinstance(value, (tuple)):
#print ('vtkFloatArray')
vtk_arr = vtkFloatArray()
components = len(value)
# elif isinstance(value, (int)) or (type(value)==str and value.replace('-','',1).isdigit()):
elif isinstance(value, (int)) \
or (type(value)==str and value[0] in ['-','+'] and value[1:].isdigit()) \
or (type(value)==str and value.isdigit()):
# ParaView category editor converts strings to numeric when it's possible
#print('vtkIntArray')
value = int(value)
vtk_arr = vtkIntArray()
# elif isinstance(value, (float)) or (type(value)==str and value.replace('-','',1).replace('.','',1).isdigit()):
elif isinstance(value, (float)) \
or (type(value)==str and value[0] in ['-','+'] and value[1:].replace('.','',1).isdigit()) \
or (type(value)==str and value.replace('.','',1).isdigit()):
# ParaView category editor converts strings to numeric when it's possible
#print ('vtkFloatArray')
value = float(value)
vtk_arr = vtkFloatArray()
elif isinstance(value, (bool)):
#print ('vtkBitArray')
vtk_arr = vtkBitArray()
else:
# some different datatypes could be saved as strings
value = str(value)
vtk_arr = vtkStringArray()
vtk_arr.SetNumberOfComponents(components)
vtk_arr.SetName(key)
vtk_row.append((vtk_arr, value))
return vtk_row
| 39.587156 | 132 | 0.586945 | 1,514 | 12,945 | 4.913474 | 0.233157 | 0.019357 | 0.017879 | 0.011292 | 0.286598 | 0.243581 | 0.204732 | 0.178922 | 0.131066 | 0.125017 | 0 | 0.015558 | 0.299884 | 12,945 | 326 | 133 | 39.708589 | 0.805252 | 0.262804 | 0 | 0.254902 | 0 | 0 | 0.022436 | 0 | 0 | 0 | 0 | 0.003067 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.137255 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4cbdc3057b4742a06cb7e225af01af3f5dd986a | 962 | py | Python | transform/transform_example.py | aprilove/OpenCV-Practice | d9253c79a089f036743c3cbeee617343c29fbe19 | [
"MIT"
] | null | null | null | transform/transform_example.py | aprilove/OpenCV-Practice | d9253c79a089f036743c3cbeee617343c29fbe19 | [
"MIT"
] | null | null | null | transform/transform_example.py | aprilove/OpenCV-Practice | d9253c79a089f036743c3cbeee617343c29fbe19 | [
"MIT"
] | null | null | null | from transform import four_point_transform
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image file")
ap.add_argument("-c", "--coords",
help = "comma seperated list of source points")
args = vars(ap.parse_args())
# load the image and grab the source coordinates (i.e. the list of
# of (x, y) points)
# NOTE: using the 'eval' function is bad form, but for this example
# let's just roll with it -- in future posts I'll show you how to
# automatically determine the coordinates without pre-supplying them
image = cv2.imread(args["image"])
pts = np.array(eval(args["coords"]), dtype = "float32")
# apply the four point tranform to obtain a "birds eye view" of
# the image
warped = four_point_transform(image, pts)
# show the original and warped images
cv2.imshow("Original", image)
cv2.imshow("Warped", warped)
cv2.waitKey(0) | 40.083333 | 68 | 0.741164 | 155 | 962 | 4.554839 | 0.574194 | 0.038244 | 0.050992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009744 | 0.14657 | 962 | 24 | 69 | 40.083333 | 0.850183 | 0.45738 | 0 | 0 | 0 | 0 | 0.214425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4cfa0d1aee460110dad81e1243f935703c652e3 | 5,852 | py | Python | httprequest_blueprints/execute_request.py | shipyardapp/httprequest-blueprints | 402aacd6a57d9bec594b54823665c9a9889c5b0e | [
"Apache-2.0"
] | null | null | null | httprequest_blueprints/execute_request.py | shipyardapp/httprequest-blueprints | 402aacd6a57d9bec594b54823665c9a9889c5b0e | [
"Apache-2.0"
] | null | null | null | httprequest_blueprints/execute_request.py | shipyardapp/httprequest-blueprints | 402aacd6a57d9bec594b54823665c9a9889c5b0e | [
"Apache-2.0"
] | null | null | null | import argparse
import requests
import os
import sys
import hashlib
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--method', dest='method', required=True,
choices={'GET', 'POST', 'PUT', 'PATCH'})
parser.add_argument('--url', dest='url', required=True)
parser.add_argument('--authorization-header', dest='authorization_header',
required=False, default=None)
parser.add_argument(
'--content-type',
dest='content_type',
required=False,
default=None,
choices={
'text/plain',
'application/xml',
'application/json',
'text/html'})
parser.add_argument('--message', dest='message', required=False)
parser.add_argument(
'--print-response',
dest='print_response',
default='FALSE',
choices={
'TRUE',
'FALSE'},
required=False)
parser.add_argument(
'--destination-file-name',
dest='destination_file_name',
default='response.txt',
required=False)
parser.add_argument(
'--destination-folder-name',
dest='destination_folder_name',
default='',
required=False)
args = parser.parse_args()
return args
def combine_folder_and_file_name(folder_name, file_name):
"""
Combine together the provided folder_name and file_name into one path variable.
"""
combined_name = os.path.normpath(
f'{folder_name}{"/" if folder_name else ""}{file_name}')
combined_name = os.path.normpath(combined_name)
return combined_name
def clean_folder_name(folder_name):
"""
Cleans folders name by removing duplicate '/' as well as leading and trailing '/' characters.
"""
folder_name = folder_name.strip('/')
if folder_name != '':
folder_name = os.path.normpath(folder_name)
return folder_name
def convert_to_boolean(string):
"""
Shipyard can't support passing Booleans to code, so we have to convert
string values to their boolean values.
"""
if string in ['True', 'true', 'TRUE']:
value = True
else:
value = False
return value
def execute_request(method, url, headers=None, message=None, params=None):
try:
if method == 'GET':
req = requests.get(url, headers=headers, params=params)
elif method == 'POST':
req = requests.post(
url,
headers=headers,
data=message,
params=params)
elif method == 'PUT':
req = requests.put(
url,
headers=headers,
data=message,
params=params)
elif method == 'PATCH':
req = requests.patch(
url, headers=headers, data=message, params=params)
except requests.exceptions.HTTPError as eh:
print(
'URL returned an HTTP Error.\n',
eh)
sys.exit(1)
except requests.exceptions.ConnectionError as ec:
print(
'Could not connect to the URL. Check to make sure that it was typed correctly.\n',
ec)
sys.exit(2)
except requests.exceptions.Timeout as et:
print('Timed out while connecting to the URL.\n', et)
sys.exit(3)
except requests.exceptions.RequestException as e:
print('Unexpected error occured. Please try again.\n', e)
exit(4)
return req
def add_to_headers(headers, key, value):
headers[key] = value
return headers
def create_folder_if_dne(destination_folder_name):
if not os.path.exists(destination_folder_name) and (
destination_folder_name != ''):
os.makedirs(destination_folder_name)
def write_response_to_file(req, destination_name):
with open(destination_name, 'w') as response_output:
response_output.write(req.text)
return
def print_response_to_output(req):
print(f'\n\n Response body: {req.content}')
def hash_text(text_var):
hashed_text = hashlib.sha256(text_var.encode('ascii')).hexdigest()
return hashed_text
def main():
args = get_args()
method = args.method
url = args.url
url_hash = hash_text(url)
authorization_header = args.authorization_header
content_type = args.content_type
message = args.message
print_response = convert_to_boolean(args.print_response)
artifact_directory_default = f'{os.environ.get("USER")}-artifacts'
base_folder_name = clean_folder_name(
f'{os.environ.get("SHIPYARD_ARTIFACTS_DIRECTORY",artifact_directory_default)}/httprequest-blueprints/responses')
artifact_directory_location = combine_folder_and_file_name(
base_folder_name, f'{method.lower()}_{url_hash}.txt')
create_folder_if_dne(base_folder_name)
destination_file_name = args.destination_file_name
destination_folder_name = clean_folder_name(args.destination_folder_name)
destination_name = combine_folder_and_file_name(
destination_folder_name, destination_file_name)
headers = {}
create_folder_if_dne(destination_folder_name)
if content_type:
headers = add_to_headers(headers, 'Content-Type', content_type)
if authorization_header:
headers = add_to_headers(
headers,
'Authorization',
authorization_header)
req = execute_request(method, url, headers, message)
write_response_to_file(req, destination_name)
print(
f'Successfully sent request {url} and stored response to {destination_name}.')
write_response_to_file(req, artifact_directory_location)
print(f'Artifact stored at {artifact_directory_location}')
if print_response:
print_response_to_output()
if __name__ == '__main__':
main()
| 30.8 | 120 | 0.644737 | 685 | 5,852 | 5.267153 | 0.248175 | 0.074834 | 0.058204 | 0.018293 | 0.222284 | 0.104213 | 0.081486 | 0.049889 | 0.027716 | 0 | 0 | 0.0016 | 0.252563 | 5,852 | 189 | 121 | 30.962963 | 0.823274 | 0.04836 | 0 | 0.134228 | 0 | 0 | 0.173763 | 0.057257 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073826 | false | 0 | 0.033557 | 0 | 0.161074 | 0.09396 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d1b52681aab3ad4c8a395bd364f92151952b3d | 790 | bzl | Python | locale/locale.bzl | floriankoch/distroless | 0251235107c8551087a42269fd31eed418755e6b | [
"Apache-2.0"
] | 6 | 2019-04-29T13:40:00.000Z | 2021-06-24T14:59:41.000Z | locale/locale.bzl | floriankoch/distroless | 0251235107c8551087a42269fd31eed418755e6b | [
"Apache-2.0"
] | 30 | 2019-05-06T13:46:36.000Z | 2021-09-15T17:50:36.000Z | locale/locale.bzl | floriankoch/distroless | 0251235107c8551087a42269fd31eed418755e6b | [
"Apache-2.0"
] | 19 | 2019-05-06T14:32:51.000Z | 2021-06-19T15:25:40.000Z | """A rule to unpack c locale from the debian package."""
def _impl(ctx):
ctx.actions.run(
executable = ctx.executable._extract,
arguments = [
ctx.file.deb.path,
ctx.outputs.tar.path,
],
inputs = [ctx.file.deb],
outputs = [ctx.outputs.tar],
)
locale = rule(
attrs = {
"deb": attr.label(
allow_single_file = [".deb"],
mandatory = True,
),
# Implicit dependencies.
"_extract": attr.label(
default = Label("//locale:extract_locale"),
cfg = "host",
executable = True,
allow_files = True,
),
},
executable = False,
outputs = {
"tar": "%{name}.tar",
},
implementation = _impl,
)
| 23.235294 | 56 | 0.489873 | 74 | 790 | 5.121622 | 0.540541 | 0.055409 | 0.05277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.372152 | 790 | 33 | 57 | 23.939394 | 0.764113 | 0.093671 | 0 | 0.068966 | 0 | 0 | 0.078873 | 0.032394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d1d07f00ec2a7661cbdb52eaf4b3bcf3a2ce68 | 2,094 | py | Python | src/dataprocessing/datasets/ds_mcd.py | bask0/h2m | 4505b7958b3bd524b059d9585294f27e8a22fc1e | [
"MIT"
] | 1 | 2022-03-27T00:43:13.000Z | 2022-03-27T00:43:13.000Z | src/dataprocessing/datasets/ds_mcd.py | bask0/h2m | 4505b7958b3bd524b059d9585294f27e8a22fc1e | [
"MIT"
] | null | null | null | src/dataprocessing/datasets/ds_mcd.py | bask0/h2m | 4505b7958b3bd524b059d9585294f27e8a22fc1e | [
"MIT"
] | 1 | 2022-03-23T14:30:07.000Z | 2022-03-23T14:30:07.000Z | """
Preprocess mcd (modis land cover) dataset.
MODIS land cover fractions
https://lpdaac.usgs.gov/product_search/?collections=Combined+MODIS&collections=Terra+MODIS&collections=Aqua+MODIS&view=list
In:
Spatial: 0.0083 deg
Out:
Spatial: 0.033 deg
Steps:
1) Harmonize
2) Regrid
"""
import os
import xarray as xr
import logging
import numpy as np
from utils.pyutils import exit_if_exists, rm_existing
from utils.cdo_wrappers import cdo_remap
from dataprocessing.plotting import plot_var
from dataprocessing.datasets.config import \
dir_source, \
dir_target, \
overwrite
logging.info('Processing dataset: mcd')
file_in = os.path.join(
dir_source, '0d0083_static/MCD12Q1/V005/Data/v005_2/MCD12Q1plusC4_fraction.GLOBAL01KM.2001001.LC.01KM.nc'
)
file_out = os.path.join(
dir_target, 'processed/0d033/static/mcd.nc'
)
file_tmp = file_out.replace('.nc', '_tmp.nc')
exit_if_exists(file_out, overwrite)
os.makedirs(os.path.dirname(file_out), exist_ok=True)
ds = xr.open_dataset(file_in)
ds = ds.rename({
'MCD12Q1plusC4_fraction': 'data',
'longitude': 'lon',
'latitude': 'lat'})
lat_attrs = dict(
long_name='Latitude',
standard_name='latitude',
units='degrees_north',
axis='Y',
valid_min=-90.0,
valid_max=90.0
)
lon_attrs = dict(
long_name='Longitude',
standard_name='longitude',
units='degrees_east',
axis='X',
modulo=360.0,
topology='circular',
valid_min=-180.0,
valid_max=180.0,
)
ds.lat.attrs.update(lat_attrs)
ds.lon.attrs.update(lon_attrs)
ds.attrs['classes'] = np.array([
l[1][1:].decode('utf-8').replace(' ', '_').lower()
for l in ds.Legend.values
])[:-2]
ds = ds.drop('Legend')
ds['data'] = ds.data.expand_dims('var', 0)
ds = ds.where(~ds.data.isnull(), 0)
ds.to_netcdf(file_tmp)
ds.close()
cdo_remap(
in_files=file_tmp,
out_files=file_out,
nlat_target=180*30,
nlon_target=360*30,
remap_alg='laf')
rm_existing(file_tmp)
plot_path = __file__.replace('.py', '.jpg')
plot_var(path=file_out, plot_path=plot_path)
logging.info('Done processing dataset: mcd')
| 21.151515 | 123 | 0.705826 | 317 | 2,094 | 4.460568 | 0.44795 | 0.029703 | 0.019802 | 0.018388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046343 | 0.144699 | 2,094 | 98 | 124 | 21.367347 | 0.74316 | 0.132283 | 0 | 0 | 0 | 0.015385 | 0.185841 | 0.07854 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.123077 | 0 | 0.123077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d1d7f0bc80a87731763d052f9a3952586a46db | 3,618 | py | Python | source/preprocessing_functions.py | elopezfune/Road_vs_Linear | 4c4839fe9d5c51907fd2ec8712deec63e409e506 | [
"Apache-2.0"
] | null | null | null | source/preprocessing_functions.py | elopezfune/Road_vs_Linear | 4c4839fe9d5c51907fd2ec8712deec63e409e506 | [
"Apache-2.0"
] | null | null | null | source/preprocessing_functions.py | elopezfune/Road_vs_Linear | 4c4839fe9d5c51907fd2ec8712deec63e409e506 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import json
from scipy.stats import ttest_ind
# Loads files provided their path
# ===============================
def load_data(path):#,index):
# Loads the data
with open(path) as f:
g = json.load(f)
# Converts json dataset from dictionary to dataframe
#print('Data loaded correctly')
df = pd.DataFrame.from_dict(g)
#df = df.set_index(index)
return df
# Replaces string by NaN and delete the missing values
# ====================================================
def replace_delete_na(df,cols,char):
df = df.copy()
for el in cols:
df[el] = df[el].replace(char,np.NaN)
df.dropna(subset = cols, inplace=True)
return df
# Checks for duplicated data and removes them
# ===========================================
def duplicated_data(df):
# Copies the dataframe
df = df.copy()
# Rows containing duplicate data
print("Removed ", df[df.duplicated()].shape[0], ' duplicate rows')
# Returns a dataframe with the duplicated rows removed
return df.drop_duplicates()
# Converts epoch time to datetime and sort by date
# I leave the format YY/mm/DD/HH:MM:SS since a priory we don't know the time scale of events
def to_datetime(df,var):
# Copies the dataframe
df = df.copy()
if df[var].dtype!=int:
df[var] = df[var].astype(int)
#df[var] = pd.to_datetime(df[var], utc=True, format = "%Y%m%d",errors = 'coerce').dt.strftime('%Y-%m-%d')
df[var] = pd.to_datetime(df[var], format = "%Y/%m/%d %H:%M:%S",errors = 'coerce').dt.strftime('%Y/%m/%d %H:%M:%S')
df.sort_values(by=[var],inplace=True)
df.reset_index(inplace=True,drop=True)
# Returns the dataframe
return df
# Checks for duplicated data
def duplicated_data(df):
# Copies the dataframe
df = df.copy()
# Rows containing duplicate data
print("Removed ", df[df.duplicated()].shape[0], ' duplicated rows.')
# Returns a dataframe with the duplicated rows removed
return df.drop_duplicates()
# Checks for columns with missing values (NaNs)
def check_missing_values(df,cols=None,axis=0):
# Copies the dataframe
df = df.copy()
if cols != None:
df = df[cols]
missing_num = df.isnull().sum(axis).to_frame().rename(columns={0:'missing_num'})
missing_num['missing_percent'] = df.isnull().mean(axis)*100
result = missing_num.sort_values(by='missing_percent',ascending = False)
# Returns a dataframe with columns with missing data as index and the number and percent of NaNs
return result[result["missing_percent"]>0.0]
def id_to_road_lin(df,variable,rules):
#Copies the dataframe
df = df.copy()
# Creates a new column with the type of distance
newcol = []
for el in df[variable]:
if el[0] in rules:
newcol.append('road')
else:
newcol.append('linear')
df[variable] = newcol
# Returns the dataframe
return df
def outlier_removal(df,variables):
#Copies the dataframe
df = df.copy()
#Filters the dataframe
df_vars = df[variables]
#Outliers removal
Q1 = df_vars.quantile(0.25)
Q3 = df_vars.quantile(0.75)
IQR = Q3 - Q1
df_vars = df_vars[~((df_vars < (Q1 - 1.5 * IQR)) | (df_vars > (Q3 + 1.5 * IQR))).any(axis=1)]
df = df.iloc[df_vars.index]
df.reset_index(inplace=True,drop=True)
return df
def t_student_test(x,y):
stat, p = ttest_ind(x, y)
print('stat=%.3f, p=%.3f' % (stat, p))
if p > 0.05:
print('Probably the same distribution.')
else:
print('Probably different distributions.') | 30.403361 | 118 | 0.62576 | 533 | 3,618 | 4.168856 | 0.318949 | 0.021602 | 0.025203 | 0.054005 | 0.328983 | 0.308281 | 0.252925 | 0.157516 | 0.157516 | 0.157516 | 0 | 0.01126 | 0.214483 | 3,618 | 119 | 119 | 30.403361 | 0.770584 | 0.336374 | 0 | 0.30303 | 0 | 0 | 0.099198 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.060606 | 0 | 0.318182 | 0.075758 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d2eb1713259d6b886df203505323126bb0b670 | 2,376 | py | Python | main.py | pan93412/tgggbot | d55eb2451bb2c7a351a7cf8e0bfdf56f3c7b5924 | [
"MIT"
] | 3 | 2018-08-21T16:10:40.000Z | 2021-02-23T02:25:13.000Z | main.py | pan93412/tgggbot | d55eb2451bb2c7a351a7cf8e0bfdf56f3c7b5924 | [
"MIT"
] | null | null | null | main.py | pan93412/tgggbot | d55eb2451bb2c7a351a7cf8e0bfdf56f3c7b5924 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
咕咕機器人 (實做)
請在 config.py 設定好 token 再啟動,
並確保已經在 @BotFather 關閉了 Privary 設定:
/setprivary -> 設定 Disable
接著私訊您的機器人,或者是放到您的群組即可。
'''
import config as c
import strings as s # 匯入字串
from libs import botHandler, randomText
import time, sys, random
# 相關參數
# 設定 botHandler
bot = botHandler(c.token)
botInf = bot.getMe()["result"]
print(botInf)
# 若未設定 token 憑證或 bot 使用者名稱
if c.token == "":
raise Exception(s.tokenNotSet)
if "username" not in botInf:
raise Exception(s.tokenInvaild)
# 機器人初始完成顯示之訊息
print(s.initFinished.format(botInf))
# while 迴圈
while True:
try:
updates = bot.getUpdates() # 抓取機器人收到的更新
if updates == None: # 若沒有更新
continue
if 'message' in updates[-1] and 'text' in updates[-1]['message']: # 如果接收到的訊息是文字訊息
msg = updates[-1]['message']['text']
else:
continue
thechat = updates[-1]['message']['chat']['id'] # 傳送者聊天室 ID
if 'username' not in updates[-1]['message']['from']:
updates[-1]['message']['from']['username'] = s.noUsername # 如果傳送訊息之使用者沒有設定 ID
# 訊息記錄
print(s.receivedMsgInfo.format(
updates[-1]['message']['from']['username'],
msg,
time.strftime(s.timeFormat, time.localtime(updates[-1]['message']['date']))
))
# 指令列表
if c.detectHelp:
if msg == "/help" or msg == "/help" + botInf["username"]:
bot.sendMessage(thechat, helptxt) # 傳送說明
continue
choicePhotoOrTxt = random.choice(range(0, 3)) # 抽籤決定要傳送的訊息
# 若訊息包含 c.detectText 中的文字
for i in c.detectText:
if msg.find(i) != -1:
if choicePhotoOrTxt == 0:
bot.sendMessage(thechat, randomText(c.randTxt))
elif choicePhotoOrTxt == 1:
bot.sendDocument(thechat, c.sendPhoto1)
else:
bot.sendDocument(thechat, c.sendPhoto2)
break
if c.detectStart:
if msg == "/start" or msg == "/start@" + botInf["username"]:
if msg.find(i) != -1:
if choicePhotoOrTxt == 0:
bot.sendMessage(thechat, randomText(c.randTxt))
elif choicePhotoOrTxt == 1:
bot.sendDocument(thechat, c.sendPhoto1)
else:
bot.sendDocument(thechat, c.sendPhoto2)
except KeyboardInterrupt:
raise sys.exc_info()[1] # 如果使用者輸入 Ctrl-C
except:
print(s.mainHappenErr.format(sys.exc_info())) # 顯示錯誤訊息
| 26.10989 | 85 | 0.613215 | 280 | 2,376 | 5.196429 | 0.421429 | 0.043986 | 0.072165 | 0.06323 | 0.257045 | 0.219931 | 0.219931 | 0.219931 | 0.219931 | 0.219931 | 0 | 0.012284 | 0.246212 | 2,376 | 90 | 86 | 26.4 | 0.800112 | 0.146886 | 0 | 0.321429 | 0 | 0 | 0.081459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d4acf282fdf3eb291e011f3ecea98d38570113 | 461 | py | Python | PMega/Section 17 - Computer Vision/batch_resizer.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PMega/Section 17 - Computer Vision/batch_resizer.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | PMega/Section 17 - Computer Vision/batch_resizer.py | peternewman22/Python_Courses | 07a798b6f264fc6069eb1205c9d429f00fb54bc5 | [
"MIT"
] | null | null | null | import os
import cv2
# get list of files:
# print(os.listdir())
file_list = os.listdir()
image_list = []
# now making my actual file list
for x in file_list:
if x[-3:] == "jpg":
image_list.append(x)
# print(image_list)
def batch_resize(img_list):
for x in img_list:
img = cv2.imread(x,1)
resized_img = cv2.resize(img,(100,100))
cv2.imwrite("{}_batchresized.jpg".format(x[:-4],), resized_img)
batch_resize(image_list)
| 20.954545 | 71 | 0.64859 | 74 | 461 | 3.864865 | 0.459459 | 0.125874 | 0.055944 | 0.06993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035422 | 0.203905 | 461 | 21 | 72 | 21.952381 | 0.743869 | 0.18872 | 0 | 0 | 0 | 0 | 0.059621 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d4f2942503f08f6704be2a9a81acd9cca8ac6b | 8,983 | py | Python | scripts/pcs_compute_forward_to_siem.py | lsmithpanw/pcs-toolbox | c713569fe6bf067b9284fca48e7fb0dbc395f5a9 | [
"ISC"
] | 18 | 2021-12-17T16:45:46.000Z | 2022-03-10T19:16:32.000Z | scripts/pcs_compute_forward_to_siem.py | lsmithpanw/pcs-toolbox | c713569fe6bf067b9284fca48e7fb0dbc395f5a9 | [
"ISC"
] | 13 | 2021-12-17T16:12:18.000Z | 2022-03-15T16:48:38.000Z | scripts/pcs_compute_forward_to_siem.py | lsmithpanw/pcs-toolbox | c713569fe6bf067b9284fca48e7fb0dbc395f5a9 | [
"ISC"
] | 15 | 2021-12-17T17:12:39.000Z | 2022-03-11T23:00:12.000Z | """ Collect Compute Audits, History, and Logs """
# Use this script to forward Audits, and Console History and Logs from Prisma Cloud Compute to a SIEM.
# It is expected to be called once an hour, by default, to read from the Prisma Cloud API and write to your SIEM API.
# It depends upon the SIEM to deduplicate data, and requires you to modify the `send_data_to_siem()` function for your SIEM API.
import concurrent.futures
import datetime
import json
import inspect
import time
from pathlib import Path
from typing import Union
import requests
from dateutil import parser, tz
# pylint: disable=import-error
from prismacloud.api import pc_api, pc_utility
# --Configuration-- #
ENABLE_PROFILING = False
OUTER_CONCURRENY = 1
INNER_CONCURRENY = 1
OUTPUT_DIRECTORY = '/tmp/prisma-cloud-compute-data'
DEFAULT_HOURS = 1
DEFAULT_MINUTES_OVERLAP = 1
DEFAULT_CONSOLE_LOG_LIMIT = 32768
this_parser = pc_utility.get_arg_parser()
this_parser.add_argument(
'--hours',
type=int,
default=DEFAULT_HOURS,
help=f'(Optional) - Time period to collect, in hours, from now. (Default: {DEFAULT_HOURS})')
this_parser.add_argument(
'--minutes_overlap',
type=int,
default=DEFAULT_MINUTES_OVERLAP,
help=f'(Optional) - Minutes of overlap for time period to collect. (Default: {DEFAULT_MINUTES_OVERLAP})')
this_parser.add_argument(
'--no_audit_events',
action='store_true',
help='(Optional) - Do not collect Audit Events. (Default: disabled)')
this_parser.add_argument(
'--host_forensic_activities',
action='store_true',
help='(Optional) - Collect Host Forensic Activity. Warning: high-volume/time-intensive. (Default: disabled)')
this_parser.add_argument(
'--console_history',
action='store_true',
help='(Optional) - Collect Console History. (Default: disabled)')
this_parser.add_argument(
'--console_logs',
action='store_true',
help='(Optional) - Collect Console Logs. (Default: disabled)')
this_parser.add_argument(
'--console_log_limit',
type=int,
default=DEFAULT_CONSOLE_LOG_LIMIT,
help=f'(Optional) - Number of console logs to collect, requires --console_logs. (Default: {DEFAULT_CONSOLE_LOG_LIMIT})')
args = this_parser.parse_args()
# -- User Defined Functions-- #
def outbound_api_call(data_type:str, data: Union[list, dict]):
# Transform data into the format expected by the request to your SIEM.
data['event'] = data_type
profile_log('OUTBOUND_API_CALL', 'STARTING')
req_method = 'POST'
req_url = ''
req_headers = {}
req_query_params = {}
req_body_params = data
connect_timeout = 4
retry_status_codes = [401, 429, 500, 502, 503, 504]
retry_limit = 4
retry_pause = 8
# Configure req_url to enable the request.
if not req_url:
print(f' OUTBOUND_API_CALL for {data_type} STUB ...')
profile_log('OUTBOUND_API_CALL', 'FINISHED')
return
print(f' OUTBOUND_API_CALL for {data_type} ...')
api_response = requests.request(req_method, req_url, headers=req_headers, params=req_query_params, data=json.dumps(req_body_params), timeout=connect_timeout, verify=False)
if api_response.status_code in retry_status_codes:
for _ in range(1, retry_limit):
time.sleep(retry_pause)
api_response = requests.request(req_method, req_url, headers=req_headers, params=req_query_params, data=json.dumps(req_body_params))
if api_response.ok:
break # break retry loop
if not api_response.ok:
print(f'API: {req_url} responded with an error: {api_response.status_code}')
profile_log('OUTBOUND_API_CALL', 'FINISHED')
# --Functions-- #
def process_audit_events(audit_type: str, query_params: dict):
audits = pc_api.audits_list_read(audit_type=audit_type, query_params=query_params)
send_data_to_siem(data_type=audit_type, data=audits)
def process_host_forensic_activities(query_params: dict):
audits = pc_api.host_forensic_activities_list_read(query_params=query_params)
send_data_to_siem(data_type='forensic/activities', data=audits)
def process_console_history(query_params: dict):
audits = pc_api.console_history_list_read(query_params=query_params)
send_data_to_siem(data_type='audits/mgmt', data=audits)
def process_console_logs(query_params: dict, time_range: dict):
matching_console_logs = []
console_logs = pc_api.console_logs_list_read(query_params=query_params)
for this_log in console_logs:
if this_log['time']:
log_datetime = parser.isoparse(this_log['time']).astimezone(tz.tzlocal())
if time_range['from'] <= log_datetime <= time_range['to']:
matching_console_logs.append(this_log)
send_data_to_siem(data_type='logs/console', data=matching_console_logs)
####
def send_data_to_siem(data_type: str, data: list, send_as_list=False):
profile_log(data_type, 'STARTING')
print(f' PROCESSING {len(data)} ({data_type}) records')
if send_as_list:
outbound_api_call(data_type, data)
else:
inner_futures = []
with concurrent.futures.ThreadPoolExecutor(INNER_CONCURRENY) as inner_executor:
for data_item in data:
inner_futures.append(inner_executor.submit(
#outbound_api_call(data_type, data_item)
outbound_api_call, data_type, data_item
)
)
concurrent.futures.wait(inner_futures)
profile_log(data_type, 'FINISHED')
####
def create_output_directory():
Path(OUTPUT_DIRECTORY).mkdir(parents=True, exist_ok=True)
####
def profile_log(detail: str, state: str, initialize=False):
if not ENABLE_PROFILING:
return
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# To output profile_log specific to each execution, use:
# log_file_name = '%s/%s_log.txt' % (OUTPUT_DIRECTORY, timestamp)
log_file_name = '%s/log.txt' % OUTPUT_DIRECTORY
if initialize:
mode = 'w'
else:
mode = 'a'
with open(log_file_name, mode) as log_file:
entry = '%s\t%s\t%s\t%s\n' % (timestamp, state, inspect.stack()[1][3], detail)
log_file.write(entry)
# --Initialize-- #
settings = pc_utility.get_settings(args)
pc_api.configure(settings)
# --Main-- #
profile_log('Collect Compute Audits, History, and Logs', 'STARTING', True)
create_output_directory()
print('Collect Compute Audits, History, and Logs')
print()
# Date Ranges
date_time_1 = datetime.datetime.now().replace(microsecond=0)
date_time_0 = date_time_1 - datetime.timedelta(hours=args.hours, minutes=args.minutes_overlap)
zone_time_1 = date_time_1.astimezone(tz.tzlocal())
zone_time_0 = zone_time_1 - datetime.timedelta(hours=args.hours, minutes=args.minutes_overlap)
audit_query_params = {
'from': f"{date_time_0.isoformat(sep='T')}Z",
'to': f"{date_time_1.isoformat(sep='T')}Z",
'sort': 'time'
}
console_log_query_params = {
'lines': args.console_log_limit
}
console_log_time_range = {
'from': zone_time_0,
'to': zone_time_1,
}
print('Query Period:')
print(f' From: {date_time_0}')
print(f' To: {date_time_1}')
print()
# Calculon Compute!
outer_futures = []
with concurrent.futures.ThreadPoolExecutor(OUTER_CONCURRENY) as executor:
if not args.no_audit_events:
print('Collecting Audits')
print()
for this_audit_type in pc_api.compute_audit_types():
outer_futures.append(executor.submit(
#process_audit_events(this_audit_type, audit_query_params)
process_audit_events, this_audit_type, audit_query_params
)
)
concurrent.futures.wait(outer_futures)
print()
if args.host_forensic_activities:
print('Collecting Host Forensic Activity Audits (high-volume/time-intensive, please wait)')
print()
outer_futures.append(executor.submit(
#process_host_forensic_activities(audit_query_params)
process_host_forensic_activities, audit_query_params
)
)
print()
if args.console_history:
print('Collecting Console History')
print()
outer_futures.append(executor.submit(
#process_console_history(audit_query_params)
process_console_history, audit_query_params
)
)
print()
if args.console_logs:
print(f'Collecting Console History (Log Limit: {args.console_log_limit})')
print()
outer_futures.append(executor.submit(
#process_console_logs(console_log_query_params, console_log_time_range)
process_console_logs, console_log_query_params, console_log_time_range
)
)
print()
concurrent.futures.wait(outer_futures)
profile_log('Collect Compute Audits, History, and Logs', 'FINISHED')
print('Done')
print()
| 34.817829 | 175 | 0.694089 | 1,187 | 8,983 | 4.951137 | 0.195451 | 0.046793 | 0.022971 | 0.025013 | 0.380296 | 0.312915 | 0.234814 | 0.157904 | 0.117067 | 0.101072 | 0 | 0.006545 | 0.200601 | 8,983 | 257 | 176 | 34.953307 | 0.811865 | 0.115997 | 0 | 0.194737 | 0 | 0.015789 | 0.207927 | 0.03571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0 | 0.052632 | 0 | 0.105263 | 0.126316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4d9e5c2c80d64d1658b3c322f0a7e56e7184d69 | 3,742 | py | Python | dppp/metrics.py | HedgehogCode/deep-plug-and-play-prior | d8240d6a60e11a22d67d46b99a2d17bbc59adc5b | [
"MIT"
] | 5 | 2021-06-25T12:01:35.000Z | 2022-01-14T21:19:17.000Z | dppp/metrics.py | HedgehogCode/deep-plug-and-play-prior | d8240d6a60e11a22d67d46b99a2d17bbc59adc5b | [
"MIT"
] | null | null | null | dppp/metrics.py | HedgehogCode/deep-plug-and-play-prior | d8240d6a60e11a22d67d46b99a2d17bbc59adc5b | [
"MIT"
] | 1 | 2021-07-05T01:27:24.000Z | 2021-07-05T01:27:24.000Z | import tensorflow as tf
from image_similarity_measures import quality_metrics
LPIPS_ALEX_MODEL_URL = "https://github.com/HedgehogCode/lpips-tf2/releases/download/0.1.0/lpips_lin_alex.h5"
LPIPS_ALEX_MODEL_NAME = "lpips_lin_alex_0.2.0"
LPIPS_ALEX_MODEL_MD5 = "a35b66a420f518161f715c0675d9bbfb"
lpips_model_alex = None
LPIPS_VGG_MODEL_URL = (
"https://github.com/HedgehogCode/lpips-tf2/releases/download/0.1.0/lpips_lin_vgg.h5"
)
LPIPS_VGG_MODEL_NAME = "lpips_lin_vgg_0.2.0"
LPIPS_VGG_MODEL_MD5 = "ef185d82115f86ac5736266e02f9222c"
lpips_model_vgg = None
def _handle_unbatched_inputs(metric_fn):
"""Decorator to allow using a function that is defined on batches on single images."""
def fn(imgs_a, imgs_b):
if tf.rank(imgs_a) == 3:
return metric_fn(imgs_a[None, ...], imgs_b[None, ...])[0]
return metric_fn(imgs_a, imgs_b)
return fn
@_handle_unbatched_inputs
def psnr(imgs_a, imgs_b):
return tf.image.psnr(imgs_a, imgs_b, max_val=1)
@_handle_unbatched_inputs
def ssim(imgs_a, imgs_b):
return tf.image.ssim(imgs_a, imgs_b, max_val=1)
@_handle_unbatched_inputs
def fsim(imgs_a, imgs_b):
"""FSIM: A Feature Similarity Index for Image Quality Assessment
Lin Zhang, Lei Zhang, Xuanqin Mou, and D. Zhang,
“FSIM: A Feature Similarity Index for Image Quality Assessment,”
IEEE Trans. on Image Process., vol. 20, no. 8, pp. 2378–2386, Aug. 2011,
doi: 10.1109/TIP.2011.2109730.
"""
# Function that runs FSIM on [H, W, 3, 2]
# where the last dimension has the two images to compare
def fsim_on_stacked_image(x):
fsim_val = tf.numpy_function(
quality_metrics.fsim, [x[..., 0], x[..., 1]], tf.float64
)
return tf.cast(fsim_val, tf.float32)
# Ensure the type is correct
a = tf.cast(imgs_a, tf.float32)
b = tf.cast(imgs_b, tf.float32)
# Stack the images and map the function over the batch
stacked = tf.stack([a, b], axis=-1)
return tf.map_fn(fsim_on_stacked_image, stacked) # type: ignore
@_handle_unbatched_inputs
def lpips_alex(imgs_a, imgs_b):
"""LPIPS: Learned Perceptual Image Patch Similarity metric
R. Zhang, P. Isola, A. A. Efros, E. Shechtman, and O. Wang,
“The Unreasonable Effectiveness of Deep Features as a Perceptual Metric,”
in 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition,
Salt Lake City, UT, Jun. 2018, pp. 586–595.
doi: 10.1109/CVPR.2018.00068.
"""
if lpips_model_alex is None:
init_lpips_model_alex()
return lpips_model_alex([imgs_a, imgs_b])
@_handle_unbatched_inputs
def lpips_vgg(imgs_a, imgs_b):
"""LPIPS: Learned Perceptual Image Patch Similarity metric
R. Zhang, P. Isola, A. A. Efros, E. Shechtman, and O. Wang,
“The Unreasonable Effectiveness of Deep Features as a Perceptual Metric,”
in 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition,
Salt Lake City, UT, Jun. 2018, pp. 586–595.
doi: 10.1109/CVPR.2018.00068.
"""
if lpips_model_vgg is None:
init_lpips_model_vgg()
return lpips_model_vgg([imgs_a, imgs_b])
def init_lpips_model_alex():
model_file = tf.keras.utils.get_file(
LPIPS_ALEX_MODEL_NAME,
LPIPS_ALEX_MODEL_URL,
file_hash=LPIPS_ALEX_MODEL_MD5,
hash_algorithm="md5",
)
global lpips_model_alex
lpips_model_alex = tf.keras.models.load_model(model_file, compile=False)
def init_lpips_model_vgg():
model_file = tf.keras.utils.get_file(
LPIPS_VGG_MODEL_NAME,
LPIPS_VGG_MODEL_URL,
file_hash=LPIPS_VGG_MODEL_MD5,
hash_algorithm="md5",
)
global lpips_model_vgg
lpips_model_vgg = tf.keras.models.load_model(model_file, compile=False)
| 32.824561 | 108 | 0.708979 | 586 | 3,742 | 4.266212 | 0.273038 | 0.056 | 0.0396 | 0.044 | 0.5972 | 0.4648 | 0.4648 | 0.4464 | 0.388 | 0.312 | 0 | 0.057171 | 0.191342 | 3,742 | 113 | 109 | 33.115044 | 0.768011 | 0.327098 | 0 | 0.142857 | 0 | 0.031746 | 0.11313 | 0.026424 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15873 | false | 0 | 0.031746 | 0.031746 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4ddc1192c65d35664b824de7ac1aba8d2c5928a | 1,121 | py | Python | MonteCarlo/PseudoRandNumbGen.py | ssklykov/collection_numCalc | f6c69aa582fc811b998a0989b99157b8566c884f | [
"Unlicense"
] | null | null | null | MonteCarlo/PseudoRandNumbGen.py | ssklykov/collection_numCalc | f6c69aa582fc811b998a0989b99157b8566c884f | [
"Unlicense"
] | null | null | null | MonteCarlo/PseudoRandNumbGen.py | ssklykov/collection_numCalc | f6c69aa582fc811b998a0989b99157b8566c884f | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Implementation of a simple linear congruental generator
@author: ssklykov
"""
# %% Import section
import numpy as np
import matplotlib.pyplot as plt
# %% The algorithm implementation
def simpleLCG(a: int = 1, c: int = 1, mod: int = 2**31, n: int = 10, seed: float = 0):
"""
Algorithm should return an array of float numbers potentially (depending on
input parameters) distributing within [0,1)
"""
if ((mod <= 0) or (a <= 0) or (a > mod) or (c <= 0) or (c > mod) or (n > mod) or (seed < 0) or (seed > 1)):
print("one or more input parameters is invalid")
return None
else:
x = np.zeros(n, dtype=int)
x[0] = seed
xRand = np.zeros(n, dtype=float)
xRand[0] = seed
for i in range(0, n-1):
x[i+1] = (a*x[i] + c) % mod
xRand[i+1] = x[i+1] / mod
return xRand
# Testing the implemented algorithm
a = 5; c = 1; mod = 10e6; n = 1000; seed = 0
xRand = simpleLCG(a, c, mod, n, seed)
(counts, bins) = np.histogram(xRand, 10, [0, 1])
# demonstrate historgram
plt.figure()
plt.hist(xRand)
| 27.341463 | 111 | 0.578947 | 174 | 1,121 | 3.729885 | 0.436782 | 0.01849 | 0.012327 | 0.040062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045343 | 0.272079 | 1,121 | 40 | 112 | 28.025 | 0.75 | 0.289028 | 0 | 0 | 0 | 0 | 0.050914 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.25 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4df54ce69a505121f3a25c5563bfe3ae78b4a03 | 2,890 | py | Python | aiowintest/tests/packet_test.py | hin/aiowintest | 6979033e0e27d0445af56c5729bd989c6aeb62c0 | [
"BSD-2-Clause"
] | 1 | 2020-08-15T19:21:33.000Z | 2020-08-15T19:21:33.000Z | aiowintest/tests/packet_test.py | hin/aiowintest | 6979033e0e27d0445af56c5729bd989c6aeb62c0 | [
"BSD-2-Clause"
] | null | null | null | aiowintest/tests/packet_test.py | hin/aiowintest | 6979033e0e27d0445af56c5729bd989c6aeb62c0 | [
"BSD-2-Clause"
] | null | null | null | import unittest
from ..packet import *
# Some packegs as captured on the network, each string is the payload
# of one UDP packet.
summary = [
b'SUMMARY: "MULT" "" 8220 "ID" "4.23.0" 129 "SJ0X" "JO99BM" "14" 200 1 3 1 0 7 7\x89\x00',
b'SUMMARY: "MULT" "" 8220 "HEADERS" 1 5 8 10 6 14 15\x9e\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 1 "160" 28 4 25 1 28 1.00\xa5\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 2 "80" 75 12 59 0 110 1.47\xe1\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 3 "40" 533 27 93 20 702 1.32\xc4\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 4 "20" 629 19 68 9 1021 1.62\xd1\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 5 "15" 89 28 84 0 206 2.31\xec\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 6 "10" 1 1 1 0 3 3.00\xcc\x00',
b'SUMMARY: "MULT" "" 8220 "ROW" 0 "TOTAL" 1355 91 330 30 2070 1.53\xf3\x00',
b'SUMMARY: "MULT" "" 8220 "SCORE" 1540654636 930 871470\xfd\x00',
]
gab = [
b'GAB: "RUN" "" "Seeeeeeegt"\x96\x00',
b'GAB: "MULT" "" "\\345\\344\\366 \\"test\\""\xb8\x00',
]
gab_parsed = [
WintestPacket('GAB', ['RUN', '', 'Seeeeeeegt']),
WintestPacket('GAB', ['MULT', '', 'åäö "test"']),
]
spot = [
b'RCVDPKT: "TELNET" "" "DX de 9A1CIG-#: 10122.80 EA1FL/P CW 15 dB 21 WPM CQ 1724Z\n"\xf4',
]
class TestWintestPacket(unittest.TestCase):
def test_checksum(self):
for msg in summary:
data = msg[:-2]
ch = WintestPacket.checksum(data)
self.assertEqual(ch, msg[-2])
def test_split_string(self):
r = split_data('"HEJ HOPP" "4"')
self.assertEqual(r, ['HEJ HOPP', '4'])
r = split_data('"HEJ HOPP" 4')
self.assertEqual(r, ['HEJ HOPP', 4])
r = split_data('"HEJ HOPP" 4 17 19.34')
self.assertEqual(r, ['HEJ HOPP', 4, 17, 19.34])
r = split_data('"\\345\\344\\366"')
self.assertEqual(r, ['åäö'])
r = split_data('"\\""')
self.assertEqual(r, ['"'])
r = split_data('"\\345\\344\\366 \\"test\\"" 4 17 19.34')
self.assertEqual(r, ['åäö "test"', 4, 17, 19.34])
def test_encode_gab(self):
for i, msg in enumerate(gab_parsed):
data = msg.encode()
self.assertEqual(data, gab[i])
def test_decode_gab(self):
for i, packet in enumerate(gab):
msg = WintestPacket.decode(packet)
self.assertEqual(msg.frame_type, 'GAB')
self.assertSequenceEqual(msg.data, gab_parsed[i].data)
def test_encode_string(self):
s = 'åäö"'
self.assertEqual(encode_string(s), '\\345\\344\\366\\"')
def test_decode_summary_row(self):
msg = WintestPacket.decode(summary[0])
self.assertEqual(msg.data, [
'MULT', '',
8220, 'ID', '4.23.0', 129, 'SJ0X', 'JO99BM', '14', 200, 1, 3, 1, 0, 7, 7
])
def test_decode_spot(self):
msg = WintestPacket.decode(spot[0])
| 36.582278 | 112 | 0.557785 | 439 | 2,890 | 3.615034 | 0.332574 | 0.055451 | 0.075614 | 0.100819 | 0.303718 | 0.270321 | 0.140517 | 0.112161 | 0.112161 | 0.112161 | 0 | 0.164728 | 0.256401 | 2,890 | 78 | 113 | 37.051282 | 0.573755 | 0.029758 | 0 | 0 | 0 | 0.15625 | 0.384506 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.109375 | false | 0 | 0.03125 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4df8bdce27a5516abd433c59665a1ea34e3cb94 | 3,560 | py | Python | AutotestWebD/apps/data_keyword/services/main_service.py | yangjourney/sosotest | 2e88099a829749910ca325253c9b1a2e368d21a0 | [
"MIT"
] | 422 | 2019-08-18T05:04:20.000Z | 2022-03-31T06:49:19.000Z | AutotestWebD/apps/data_keyword/services/main_service.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
] | 10 | 2019-10-24T09:55:38.000Z | 2021-09-29T17:28:43.000Z | AutotestWebD/apps/data_keyword/services/main_service.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
] | 202 | 2019-08-18T05:04:27.000Z | 2022-03-30T05:57:18.000Z | import apps.common.func.InitDjango
from all_models.models import *
from all_models.models.A0011_version_manage import TbVersionHttpInterface
from django.db import connection
from django.forms.models import model_to_dict
from apps.common.func.CommonFunc import *
from apps.common.func.ValidataFunc import *
from all_models_for_mock.models import *
from apps.common.model.Config import Config
class MainService(object):
@staticmethod
def addData(data,addBy):
newDataDict = {}
for k, v in data.items():
newDataDict[k] = data[k]
if newDataDict["keywordKey"] == "":
#数据关键字模式
newDataDict["type"] = "DATA_KEYWORD"
newDataDict["keywordKey"] = get_sub_string(data['keywordCode'], "def ", "(").strip()
if not data['keywordCode'].startswith("@keyword()\n"):
return 10001,"开头必须使用装饰器@keyword()"
if '(value,context,strTobeProcessed = ""):' not in data['keywordCode']:
return 10001,""""函数定义必须严格按照规范 def YOUR_KEYWORD_HER2E(value,context,strTobeProcessed = ""):"""
else:
newDataDict["type"] = "PYTHON_CODE"
newDataDict["addBy"] = addBy
newDataDict["status"] = 3 #默认设置为审核通过
if newDataDict["keywordKey"] == "":
return 10001,"key不能为空!"
if newDataDict["keywordKey"] == "YOUR_KEYWORD_HERE":
return 10001, "请不要使用默认函数名YOUR_KEYWORD_HERE"
if MainService.getDataKeywordByKey(newDataDict["keywordKey"]):
return 10002,"已经存在的KEY[%s]" % newDataDict["keywordKey"]
print(data['keywordCode'])
retVBl,retVMsg = verifyPythonMode(data['keywordCode'])
print(retVBl)
if retVBl == False:
return 10003,retVMsg
saveInterface = Tb4DataKeyword.objects.create(**newDataDict)
return 10000,"添加成功!"
@staticmethod
def getDataKeywordByKey(dataKey):
retdk = Tb4DataKeyword.objects.filter(keywordKey=dataKey).first()
if retdk:
return True
else:
return False
@staticmethod
def getDataById(id):
return Tb4DataKeyword.objects.filter(id=id)[0]
@staticmethod
def getDataByKey(key):
return Tb4DataKeyword.objects.filter(keywordKey=key)[0]
@staticmethod
def getDataByIdToDict(id):
return dbModelToDict(Tb4DataKeyword.objects.filter(id=id)[0])
@staticmethod
def dataSaveEdit(request,postData):
dataObj = Tb4DataKeyword.objects.filter(id=postData["id"])
if dataObj:
if dataObj[0].addBy == "" or dataObj[0].addBy == None:
postData['addBy'] = postData['modBy']
print(postData['keywordCode'])
retVBl,retVMsg = verifyPythonMode(postData['keywordCode'])
print(retVBl)
if retVBl == False:
return 10003,retVMsg
if postData["keywordKey"] == "":
#数据关键字模式
postData["keywordKey"] = get_sub_string(postData['keywordCode'], "def ", "(").strip()
if not postData['keywordCode'].startswith("@keyword()\n"):
return 10001,"开头必须使用装饰器@keyword()"
if '(value,context,strTobeProcessed = ""):' not in postData['keywordCode']:
return 10001,""""函数定义必须严格按照规范 def YOUR_KEYWORD_HER2E(value,context,strTobeProcessed = ""):"""
dataSaveRes = dataObj.update(**postData)
return 10000,dataSaveRes
@staticmethod
def delDataById(request,id):
dataObj = Tb4DataKeyword.objects.filter(id=id)
return dataObj.update(state=0)
| 35.959596 | 109 | 0.633427 | 347 | 3,560 | 6.432277 | 0.308357 | 0.047043 | 0.072581 | 0.051971 | 0.298387 | 0.243728 | 0.243728 | 0.243728 | 0.201613 | 0.154122 | 0 | 0.02784 | 0.243258 | 3,560 | 98 | 110 | 36.326531 | 0.800668 | 0.006461 | 0 | 0.269231 | 0 | 0 | 0.172375 | 0.05293 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089744 | false | 0 | 0.115385 | 0.038462 | 0.435897 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4dff16747375755b4e6a191d3d696955140401d | 2,861 | py | Python | tests/api_tests/base/test_auth.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | tests/api_tests/base/test_auth.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | tests/api_tests/base/test_auth.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | """
Tests related to authenticating API requests
"""
import mock
from nose.tools import * # flake8: noqa
from framework.auth import cas
from tests.base import ApiTestCase
from tests.factories import ProjectFactory, UserFactory
from api.base.settings import API_BASE
class TestOAuthValidation(ApiTestCase):
"""Test that OAuth2 requests can be validated"""
def setUp(self):
super(TestOAuthValidation, self).setUp()
self.user1 = UserFactory()
self.user2 = UserFactory()
# Test projects for which a given user DOES and DOES NOT have appropriate permissions
self.reachable_project = ProjectFactory(title="Private Project User 1", is_public=False, creator=self.user1)
self.unreachable_project = ProjectFactory(title="Private Project User 2", is_public=False, creator=self.user2)
self.reachable_url = "/{}nodes/{}/".format(API_BASE, self.reachable_project._id)
self.unreachable_url = "/{}nodes/{}/".format(API_BASE, self.unreachable_project._id)
def test_missing_token_fails(self):
res = self.app.get(self.reachable_url, auth=None, auth_type='jwt', expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json.get("detail"),
'Authentication credentials were not provided.')
@mock.patch('framework.auth.cas.CasClient.profile')
def test_invalid_token_fails(self, mock_user_info):
mock_user_info.return_value = cas.CasResponse(authenticated=False, user=None)
res = self.app.get(self.reachable_url, auth='invalid_token', auth_type='jwt', expect_errors=True)
assert_equal(res.status_code, 403, msg=res.json)
@mock.patch('framework.auth.cas.CasClient.profile')
def test_valid_token_returns_unknown_user_thus_fails(self, mock_user_info):
mock_user_info.return_value = cas.CasResponse(authenticated=True, user='fail')
res = self.app.get(self.reachable_url, auth='some_valid_token', auth_type='jwt', expect_errors=True)
assert_equal(res.status_code, 403, msg=res.json)
@mock.patch('framework.auth.cas.CasClient.profile')
def test_valid_token_authenticates_and_has_permissions(self, mock_user_info):
mock_user_info.return_value = cas.CasResponse(authenticated=True, user=self.user1._id)
res = self.app.get(self.reachable_url, auth='some_valid_token', auth_type='jwt')
assert_equal(res.status_code, 200, msg=res.json)
@mock.patch('framework.auth.cas.CasClient.profile')
def test_valid_token_authenticates_but_user_lacks_permissions(self, mock_user_info):
mock_user_info.return_value = cas.CasResponse(authenticated=True, user=self.user1._id)
res = self.app.get(self.unreachable_url, auth='some_valid_token', auth_type='jwt', expect_errors=True)
assert_equal(res.status_code, 403, msg=res.json)
| 44.703125 | 118 | 0.73366 | 387 | 2,861 | 5.183463 | 0.276486 | 0.031904 | 0.047856 | 0.032403 | 0.633101 | 0.597208 | 0.528415 | 0.528415 | 0.495513 | 0.471585 | 0 | 0.010343 | 0.15519 | 2,861 | 63 | 119 | 45.412698 | 0.819611 | 0.065012 | 0 | 0.225 | 0 | 0 | 0.128947 | 0.054135 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4e06142275bb0dc08b385a149b60bbb576e4e7c | 1,215 | py | Python | server.py | mindudekim/Restaurant-research | 3865b775ccc8617deaa450711024bcef9b981b6c | [
"MIT"
] | null | null | null | server.py | mindudekim/Restaurant-research | 3865b775ccc8617deaa450711024bcef9b981b6c | [
"MIT"
] | null | null | null | server.py | mindudekim/Restaurant-research | 3865b775ccc8617deaa450711024bcef9b981b6c | [
"MIT"
] | null | null | null | # import modules
from flask import Flask, jsonify
import requests
from pymongo import MongoClient
app = Flask(__name__)
mongo_uri = "mongodb://<mLab_username>:<mLab_password>@ds145299.mlab.com:45299/mydbinstance"
client = MongoClient(mongo_uri)
db = client.mydbinstance
yelp_collection = db.yelp
@app.route('/')
def index():
return "Hello"
@app.route('/LA')
def LA():
try:
query = {}
la_result = [item['restaurants']['Los Angeles'] for item in list(yelp_collection.find(query))]
except:
la_result = "failed"
finally:
return jsonify({'Restaurants':la_result})
@app.route('/SF')
def SF():
try:
query = {}
sf_result = [item['restaurants']['San Francisco'] for item in list(yelp_collection.find(query))]
except:
sf_result = "failed"
finally:
return jsonify({'Restaurants':sf_result})
@app.route('/NY')
def NY():
try:
query = {}
ny_result = [item['restaurants']['New York'] for item in list(yelp_collection.find(query))]
except:
ny_result = "failed"
finally:
return jsonify({'Restaurants':ny_result})
if __name__=='__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| 24.795918 | 104 | 0.641152 | 152 | 1,215 | 4.934211 | 0.388158 | 0.074667 | 0.084 | 0.052 | 0.34 | 0.34 | 0.168 | 0.168 | 0.168 | 0 | 0 | 0.01973 | 0.207407 | 1,215 | 48 | 105 | 25.3125 | 0.759086 | 0.011523 | 0 | 0.3 | 0 | 0 | 0.186822 | 0.065054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.025 | 0.075 | 0.025 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f4e1ab0a4b6c9f9c9b5739fea75b69932c0cfa70 | 3,810 | py | Python | padertorch/contrib/examples/audio_synthesis/wavenet/train.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 62 | 2019-12-22T08:30:29.000Z | 2022-03-22T11:02:59.000Z | padertorch/contrib/examples/audio_synthesis/wavenet/train.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 47 | 2020-01-06T09:23:47.000Z | 2022-01-24T16:55:06.000Z | padertorch/contrib/examples/audio_synthesis/wavenet/train.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 13 | 2019-12-16T08:12:46.000Z | 2021-11-08T14:37:06.000Z | """
Example call:
export STORAGE_ROOT=<your desired storage root>
python -m padertorch.contrib.examples.wavenet.train
"""
import os
from pathlib import Path
from lazy_dataset.database import JsonDatabase
from padertorch.contrib.examples.audio_synthesis.wavenet.data import \
prepare_dataset
from padertorch.contrib.examples.audio_synthesis.wavenet.model import WaveNet
from padertorch.io import get_new_storage_dir
from padertorch.train.optimizer import Adam
from padertorch.train.trainer import Trainer
from sacred import Experiment, commands
from sacred.observers import FileStorageObserver
ex = Experiment('wavenet')
@ex.config
def config():
database_json = (
str((Path(os.environ['NT_DATABASE_JSONS_DIR']) / 'librispeech.json').expanduser())
if 'NT_DATABASE_JSONS_DIR' in os.environ else None
)
assert database_json is not None, (
'database_json cannot be None.\n'
'Either start the training with "python -m padertorch.contrib.examples.'
'audio_synthesis.wavenet.train with database_json=</path/to/json>" '
'or make sure there is an environment variable "NT_DATABASE_JSONS_DIR"'
'pointing to a directory with a "librispeech.json" in it (see README '
'for the JSON format).'
)
training_sets = ['train_clean_100', 'train_clean_360']
validation_sets = ['dev_clean']
audio_reader = {
'source_sample_rate': 16000,
'target_sample_rate': 16000,
}
stft = {
'shift': 200,
'window_length': 800,
'size': 1024,
'fading': 'full',
'pad': True,
}
max_length_in_sec = 1.
batch_size = 3
number_of_mel_filters = 80
trainer = {
'model': {
'factory': WaveNet,
'wavenet': {
'n_cond_channels': number_of_mel_filters,
'upsamp_window': stft['window_length'],
'upsamp_stride': stft['shift'],
'fading': stft['fading'],
},
'sample_rate': audio_reader['target_sample_rate'],
'stft_size': stft['size'],
'number_of_mel_filters': number_of_mel_filters,
'lowest_frequency': 50
},
'optimizer': {
'factory': Adam,
'lr': 5e-4,
},
'storage_dir': get_new_storage_dir(
'wavenet', id_naming='time', mkdir=False
),
'summary_trigger': (1_000, 'iteration'),
'checkpoint_trigger': (10_000, 'iteration'),
'stop_trigger': (200_000, 'iteration'),
}
trainer = Trainer.get_config(trainer)
resume = False
ex.observers.append(FileStorageObserver.create(trainer['storage_dir']))
@ex.automain
def main(
_run, _log, trainer, database_json, training_sets, validation_sets,
audio_reader, stft, max_length_in_sec, batch_size, resume
):
commands.print_config(_run)
trainer = Trainer.from_config(trainer)
storage_dir = Path(trainer.storage_dir)
storage_dir.mkdir(parents=True, exist_ok=True)
commands.save_config(
_run.config, _log, config_filename=str(storage_dir / 'config.json')
)
db = JsonDatabase(database_json)
training_data = db.get_dataset(training_sets)
validation_data = db.get_dataset(validation_sets)
training_data = prepare_dataset(
training_data, audio_reader=audio_reader, stft=stft,
max_length_in_sec=max_length_in_sec, batch_size=batch_size, shuffle=True
)
validation_data = prepare_dataset(
validation_data, audio_reader=audio_reader, stft=stft,
max_length_in_sec=max_length_in_sec, batch_size=batch_size, shuffle=False
)
trainer.test_run(training_data, validation_data)
trainer.register_validation_hook(validation_data)
trainer.train(training_data, resume=resume)
| 34.324324 | 90 | 0.670341 | 458 | 3,810 | 5.277293 | 0.340611 | 0.033099 | 0.027307 | 0.034754 | 0.159702 | 0.141911 | 0.113364 | 0.07199 | 0.07199 | 0.07199 | 0 | 0.01661 | 0.225722 | 3,810 | 110 | 91 | 34.636364 | 0.802712 | 0.029921 | 0 | 0 | 0 | 0 | 0.217733 | 0.046909 | 0 | 0 | 0 | 0 | 0.010417 | 1 | 0.020833 | false | 0 | 0.104167 | 0 | 0.125 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |