hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
507c355484d5c5a8ddd1eb52517291afe9f90fe8 | 13,866 | py | Python | reveal/schemes.py | jasperlinthorst/reveal | 521f350260ae81eaf357c311a50b42e0fb774f64 | [
"MIT"
] | 46 | 2015-07-18T15:32:23.000Z | 2022-01-17T15:15:31.000Z | reveal/schemes.py | jasperlinthorst/reveal | 521f350260ae81eaf357c311a50b42e0fb774f64 | [
"MIT"
] | 31 | 2015-10-15T15:25:12.000Z | 2020-08-13T15:15:09.000Z | reveal/schemes.py | jasperlinthorst/reveal | 521f350260ae81eaf357c311a50b42e0fb774f64 | [
"MIT"
] | 4 | 2016-09-03T15:48:58.000Z | 2020-04-22T11:46:35.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 17:59:26 2015
@author: jasperlinthorst
"""
from intervaltree import IntervalTree, Interval
import networkx as nx
import sys
import math
import logging
import utils
import traceback
from utils import mem2mums
import math
# from matplotlib import pyplot as plt
def chain(mums,left,right,gcmodel="sumofpairs"):
if len(mums)==0:
return []
logging.debug("Number of anchors before chaining: %d",len(mums))
#use one coordinate system for sorting
ref=mums[0][2].keys()[0]
# logging.trace("Ref is %s"%ref)
mums.append(right)
mums.sort(key=lambda mum: mum[2][ref]) #sort by reference dimension
sp2mum=dict()
for mum in mums:
sp2mum[mum[2][ref]]=mum
minscore=-1*utils.gapcost([left[2][k] for k in right[2]],[right[2][k] for k in right[2]])
logging.debug("Initial cost is: %d"%minscore)
start=left[2][ref]
end=right[2][ref]
link=dict()
score=dict({left[2][ref]:0})
active=[left]
processed=[]
for mum in mums:
trace=False
#active=[ep2mum[ep] for ep in utils.range_search(mumeptree,(0,0),[sp-1 for sp in mum[2]])].sort(key=lambda x: score[x], reverse=True)
remove=[]
for pmum in processed:
for crd in pmum[2]:
if pmum[2][crd]+pmum[0]>mum[2][crd]:
break
else:
active.append(pmum)
remove.append(pmum)
for r in remove:
processed.remove(r)
active.sort(key=lambda x: score[x[2][ref]], reverse=True) #sort active by score decreasing, kind of priority queue
w=None
for amum in active:
for crd in amum[2]:
if amum[2][crd]+amum[0]>mum[2][crd]:
break
else:
s=score[amum[2][ref]] + (args.wscore*(mum[0]*((mum[1]*(mum[1]-1))/2)))
if w!=None:
if w > s: #as input is sorted by score
break
penalty=utils.gapcost([amum[2][k]+amum[0] for k in mum[2]],[mum[2][k] for k in mum[2]],model=gcmodel)
assert(penalty>=0)
# tmpw=score[amum[2][ref]] + (args.wscore*(mum[0]*((mum[1]*(mum[1]-1))/2))) - (args.wpen*penalty)
tmpw=s - (args.wpen*penalty)
if tmpw>w or w==None:
logging.trace("mum: %s --> %s = penalty: %d and score at amum: %d, score at mum: %d"%(str(mum),str(amum),penalty,s,tmpw))
w=tmpw
best=amum
link[mum[2][ref]]=best[2][ref]
score[mum[2][ref]]=w
processed.append(mum)
logging.debug("Best score is: %d"%score[end])
logging.trace("Min score is: %d"%minscore)
#backtrack
path=[]
while end!=start:
path.append((sp2mum[end],score[end]))
end=link[end]
return path[1:]
#determine a subset of genomes for which (length * n) is largest
def segment(mums):
d=dict()
for mum in mums:
k=tuple(sorted([gid for gid,sp in mum[2]]))
if k in d:
d[k].append(mum)
else:
d[k]=[mum]
best=0
for part in d:
z=sum([m[0] for m in d[part]])*len(part)
if z>best:
best=z
partition=part
logging.debug("Splitting input genomes: %s"%str(partition))
return d[partition]
def lookup(mum):
l,mmn,spd=mum
if isinstance(spd,dict):
sp=spd.values()
elif isinstance(spd,tuple):
sp=[sp for gid,sp in spd]
else:
logging.fatal("Unknown format: %s"%str(spd))
n=0
qlpoint=dict()
qrpoint=dict()
for pos in sp:
t=ts[pos]
assert(len(t)==1)
node=iter(t).next()
ndata=G.node[node]
nsamples=set([o for o in ndata['offsets'].keys() if not G.graph['id2path'][o].startswith("*")])
n+=len(nsamples)
rel=pos-node[0]
for k in nsamples:
v=ndata['offsets'][k]+rel
qlpoint[k]=v
qrpoint[k]=v+l
return (l,n,qlpoint)
def maptooffsets(mums):
mapping=dict()
relmums=[]
for mum in mums:
relmum=lookup(mum)
relmums.append(relmum)
mapping[tuple(relmum[2].values())]=mum
return relmums,mapping
def trim_overlap(mums):
coords=mums[0][2]
for coord in range(len(coords)):
if len(mums)<=1: #by definition no more overlaps
break
mums.sort(key=lambda m: (m[2][coord][1],-m[0])) #sort by start position, then -1*size
#filter the partial matches that are now contained
mums=[mum for i,mum in enumerate(mums) if (i==0 and mums[i+1][2][coord][1]+mums[i+1][0] > mum[2][coord][1]+mum[0] ) or mums[i-1][2][coord][1]+mums[i-1][0]<mum[2][coord][1]+mum[0]]
if len(mums)<=1: #by definition no more overlaps
break
trimmed=[mums[0]]
for mum in mums[1:]:
pmum=trimmed[-1]
overlap = (pmum[2][coord][1]+pmum[0]) - mum[2][coord][1]
if overlap>0:
if pmum[0]-overlap>0:
trimmed[-1] = (pmum[0]-overlap, pmum[1], pmum[2])
else:
del trimmed[-1]
if mum[0]-overlap>0:
trimmed.append( (mum[0]-overlap, mum[1], tuple((k,v+overlap) for k,v in mum[2]) ))
else:
trimmed.append(mum)
mums=trimmed
return mums
args=None
splitchain="largest"
maxdepth=None #stop recursion when max depth is reached
def graphmumpicker(mums,idx,precomputed=False,minlength=0):
try:
if len(mums)==0:
return ()
if not precomputed:
if maxdepth!=None:
if idx.depth>maxdepth:
return ()
if args.maxsize!=None:
rpaths=[p for p in G.graph['paths'] if not p.startswith('*')]
if idx.leftnode==None:
lo={G.graph['path2id'][p]: 0 for p in rpaths}
else:
lo={k: G.node[idx.leftnode]['offsets'][k]+(idx.leftnode[1]-idx.leftnode[0]) for k in G.node[idx.leftnode]['offsets']}
if idx.rightnode==None:
ro={G.graph['path2id'][p]: G.graph['id2end'][G.graph['path2id'][p]] for p in rpaths}
else:
ro=G.node[idx.rightnode]['offsets']
for k in set(lo.keys()) & set(ro.keys()):
if ro[k]-lo[k]>args.maxsize:
break
else:
return () #no break, so all fragments in bubbles are smaller than maxsize
logging.debug("Selecting input multimums (for %d samples) out of: %d mums"%(idx.nsamples, len(mums)))
mmums=[mum for mum in mums if mum[1]==idx.nsamples] #subset only those mums that apply to all indexed genomes/graphs
if len(mmums)==0 and idx.nsamples>2:
logging.debug("No MUMS that span all input genomes, segment genomes.")
mmums=segment(mums)
logging.debug("Segmented genomes/graphs into %s, now %d MUMS for chaining."%(mmums[0][2],len(mmums)))
if args.trim:
logging.debug("Trimming overlap between mums.")
mmums=trim_overlap(mmums)
if len(mmums)==0:
return ()
mmums.sort(key=lambda mum: mum[0], reverse=True) #sort by size
logging.debug("Mapping indexed positions to relative postions within genomes.")
relmums,mapping=maptooffsets(mmums) #and convert tuple to dict for fast lookup in chaining
logging.debug("Subset to same group of samples")
relmums.sort(key=lambda m: (m[1],m[0])) #sort by n, than l
relmums=[mum for mum in relmums if mum[2].keys()==relmums[-1][2].keys()] #subset to only those mums that apply to the same set
logging.debug("Left with %d mums"%len(relmums))
if idx.leftnode!=None:
spd=dict()
for k in relmums[-1][2].keys():
spd[k]=G.node[idx.leftnode]['offsets'][k]+(idx.leftnode[1]-idx.leftnode[0])-1
left=(0,0,spd)
else:
spd=dict()
for sid in relmums[-1][2].keys():
spd[sid]=-1
left=(0,0,spd)
if idx.rightnode!=None:
spd=dict()
for k in relmums[-1][2].keys():
spd[k]=G.node[idx.rightnode]['offsets'][k]
right=(0,0,spd)
else:
spd=dict()
for sid in relmums[-1][2].keys():
spd[sid]=G.graph['id2end'][sid]
right=(0,0,spd)
# if minlength==0: #autodetermine significant subset
# relmums=[mum for mum in relmums if 1-((1-((.25**(mum[1]-1))**mum[0]))**o)<pcutoff] #subset to only significant mums
if len(relmums)==0:
logging.debug("No more significant MUMs.")
return ()
skipleft=[]
skipright=[]
if len(relmums)==1:
splitmum=relmums[0]
else:
if len(relmums)>args.maxmums:
logging.debug("Number of MUMs exceeds cap (%d), taking largest %d"%(len(mmums),args.maxmums))
relmums=relmums[-args.maxmums:]
logging.debug("Chaining %d mums"%len(relmums))
chainedmums=chain(relmums,left,right,gcmodel=args.gcmodel)[::-1]
logging.debug("Selected chain of %d mums"%len(chainedmums))
if len(chainedmums)==0:
return ()
if splitchain=="balanced":
logging.debug("Selecting MUM from chain on position within chain.")
optsplit=None
for mum,score in chainedmums: #determine optimal split in chain
lseq=0
rseq=0
for crd in mum[2]:
lseq=mum[2][crd]
assert(lseq>=0)
rseq=right[2][crd]-mum[2][crd]+mum[0]
assert(rseq>=0)
if optsplit==None or abs(lseq-rseq)<optsplit:
optsplit=abs(lseq-rseq)
splitmum=mum
elif splitchain=="largest":
logging.debug("Selecting MUM from chain based on size.")
splitmum=sorted(chainedmums,key=lambda m:m[0][0])[-1][0]
else: #select at random
logging.debug("Selecting MUM from chain at random.")
splitmum=chainedmums[random.randint(0,len(chainedmums)-1)][0]
if args.seedsize>0:
t=skipleft
scoreatsplit=0
for mum,score in chainedmums:
if mum==splitmum:
scoreatsplit=score
t=skipright
continue
t.append( (mapping[tuple(mum[2].values())], score-scoreatsplit) )
# t.append( mapping[tuple(mum[2].values())] )
skipleft=[(mum,score) for mum,score in skipleft if mum[0]>=args.seedsize]
skipright=[(mum,score) for mum,score in skipright if mum[0]>=args.seedsize]
splitmum=mapping[tuple(splitmum[2].values())]
if minlength==0: #experimental, use significance to determine valid anchor length when minlength is set to 0
o=1
for p in left[2]:
o=o*(right[2][p]-left[2][p])
l=splitmum[0]
n=splitmum[1]
p=((.25**(n-1)))**l #probability of observing this match by random chance
if p>0:
p=1-math.exp(math.log(1-p) * o) #correct for the number of tests we actually did
if p>args.pcutoff:
logging.info("P-value for: %s (n=%d l=%d o=%d) is %.4g"%(str(splitmum),n,l,o,p))
return ()
else:
logging.debug("Selecting MUM from precomputed chain")
chainedmums=mums
splitmum=chainedmums[len(chainedmums)/2][0]
skipleft=chainedmums[:len(chainedmums)/2]
skipright=chainedmums[(len(chainedmums)/2)+1:]
logging.debug("Best MUM has length: %d"%splitmum[0])
logging.debug("Skipleft: %d"%len(skipleft))
logging.debug("Skipright: %d"%len(skipright))
return splitmum,skipleft,skipright
except Exception:
logging.fatal(traceback.format_exc())
def printSA(index,maxline=100,start=0,end=None,fn="sa.txt"):
sa=index.SA
lcp=index.LCP
t=index.T
#so=index.SO
if end==None:
end=len(sa)
with open(fn,'w') as f:
f.write("%d\t%d\n"%(len(sa), len(lcp)))
assert(len(sa)==len(lcp))
for i in xrange(len(sa)):
s=sa[i]
lcpi=lcp[i]
if i>0 and i<len(sa)-1:
l1=lcp[i]
l2=lcp[i+1]
elif i==len(sa)-1:
l1=max([lcp[i-1],lcp[i]])
l2=0
else:
l1=0
l2=lcp[i+1]
if i>=start and i<=end:
#f.write("%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline].ljust(maxline) if l1<=maxline else t[s:s+maxline]+"..."+t[s+l1-40:s+l1].ljust(maxline) ) )
f.write("%s\t%s\t%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline] ,t[s+l1-maxline:s+l1], t[s+l2-maxline:s+l2] ) )
| 35.192893 | 187 | 0.508654 | 1,850 | 13,866 | 3.81027 | 0.174054 | 0.037452 | 0.007661 | 0.010214 | 0.210101 | 0.14158 | 0.10427 | 0.079586 | 0.079586 | 0.079586 | 0 | 0.027919 | 0.349055 | 13,866 | 393 | 188 | 35.282443 | 0.753047 | 0.120078 | 0 | 0.181208 | 0 | 0.006711 | 0.085472 | 0 | 0 | 0 | 0 | 0 | 0.016779 | 1 | 0.02349 | false | 0 | 0.030201 | 0 | 0.100671 | 0.003356 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507d99771b07b2b4a3f2b7f90338d08aa9ba2f62 | 1,352 | py | Python | ML/50-mlps/nl-intro/step2_basic_model.py | saneravi/ML_Stuff | 74e1ed7ba9f4dccb555792315a14ba6071150304 | [
"MIT"
] | 209 | 2015-01-02T03:47:12.000Z | 2022-03-06T16:54:47.000Z | ML/50-mlps/nl-intro/step2_basic_model.py | Kerwin-Xie/algorithms | 4347a9b7bf54ef378d16d26ef9e357ddc710664b | [
"MIT"
] | 3 | 2015-12-06T14:40:34.000Z | 2021-03-22T17:40:24.000Z | ML/50-mlps/nl-intro/step2_basic_model.py | Kerwin-Xie/algorithms | 4347a9b7bf54ef378d16d26ef9e357ddc710664b | [
"MIT"
] | 114 | 2015-01-31T08:37:10.000Z | 2022-02-23T04:42:28.000Z | import hasy_tools
import numpy as np
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
# Load data
def load_data():
data = hasy_tools.load_data()
# One-Hot encoding
data['y_train'] = np.eye(hasy_tools.n_classes)[data['y_train'].squeeze()]
data['y_test'] = np.eye(hasy_tools.n_classes)[data['y_test'].squeeze()]
# Preprocessing
data['x_train'] = hasy_tools.preprocess(data['x_train'])
data['x_test'] = hasy_tools.preprocess(data['x_test'])
return data
data = load_data()
# Define the model
model = Sequential()
model.add(Flatten())
model.add(Dense(369, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit the model
csv_logger = CSVLogger('log.csv', append=True, separator=';')
checkpointer = ModelCheckpoint(filepath='checkpoint.h5',
verbose=1,
period=10,
save_best_only=True)
model.fit(data['x_train'], data['y_train'],
validation_data=(data['x_train'], data['y_train']),
epochs=2,
batch_size=128,
callbacks=[csv_logger, checkpointer])
# Serialize model
model.save('model.h5')
| 28.765957 | 77 | 0.64571 | 168 | 1,352 | 5.017857 | 0.434524 | 0.064057 | 0.04745 | 0.049822 | 0.168446 | 0.111507 | 0.064057 | 0.064057 | 0 | 0 | 0 | 0.011364 | 0.218935 | 1,352 | 46 | 78 | 29.391304 | 0.786932 | 0.074704 | 0 | 0 | 0 | 0 | 0.122285 | 0.019308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.166667 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507dd525c3ddffd50a42e1db9ea15d64dc8908f9 | 13,745 | py | Python | src/sentry/coreapi.py | vperron/sentry | 4ea0c8cb120a3165f0e0b185c64213b69ab621ea | [
"BSD-3-Clause"
] | null | null | null | src/sentry/coreapi.py | vperron/sentry | 4ea0c8cb120a3165f0e0b185c64213b69ab621ea | [
"BSD-3-Clause"
] | null | null | null | src/sentry/coreapi.py | vperron/sentry | 4ea0c8cb120a3165f0e0b185c64213b69ab621ea | [
"BSD-3-Clause"
] | null | null | null | """
sentry.coreapi
~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# TODO: We should make the API a class, and UDP/HTTP just inherit from it
# This will make it so we can more easily control logging with various
# metadata (rather than generic log messages which aren't useful).
from __future__ import absolute_import, print_function
import base64
import logging
import six
import uuid
import zlib
from datetime import datetime, timedelta
from django.utils.crypto import constant_time_compare
from django.utils.encoding import smart_str
from gzip import GzipFile
from time import time
from sentry.app import env
from sentry.cache import default_cache
from sentry.constants import (
CLIENT_RESERVED_ATTRS, DEFAULT_LOG_LEVEL, LOG_LEVELS, MAX_TAG_VALUE_LENGTH,
MAX_TAG_KEY_LENGTH
)
from sentry.exceptions import InvalidTimestamp
from sentry.interfaces.base import get_interface
from sentry.models import Project, ProjectKey
from sentry.tasks.store import preprocess_event
from sentry.utils import is_float, json
from sentry.utils.auth import parse_auth_header
from sentry.utils.compat import StringIO
from sentry.utils.strings import decompress
logger = logging.getLogger('sentry.coreapi')
LOG_LEVEL_REVERSE_MAP = dict((v, k) for k, v in LOG_LEVELS.iteritems())
class APIError(Exception):
http_status = 400
msg = 'Invalid request'
def __init__(self, msg=None):
if msg:
self.msg = msg
def __str__(self):
return self.msg or ''
class APIUnauthorized(APIError):
http_status = 401
msg = 'Unauthorized'
class APIForbidden(APIError):
http_status = 403
class APITimestampExpired(APIError):
http_status = 410
class APIRateLimited(APIError):
http_status = 429
msg = 'Creation of this event was denied due to rate limiting.'
def __init__(self, retry_after=None):
self.retry_after = retry_after
def client_metadata(client=None, project=None, exception=None, tags=None, extra=None):
if not extra:
extra = {}
if not tags:
tags = {}
extra['client'] = client
extra['request'] = env.request
extra['tags'] = tags
if project:
extra['project_slug'] = project.slug
extra['project_id'] = project.id
if project.team:
extra['team_slug'] = project.team.slug
extra['team_id'] = project.team.id
if project.organization:
extra['organization_slug'] = project.organization.slug
extra['organization_id'] = project.organization.id
tags['client'] = client
if exception:
tags['exc_type'] = type(exception).__name__
if project and project.organization:
tags['project'] = '%s/%s' % (project.organization.slug, project.slug)
result = {'extra': extra}
if exception:
result['exc_info'] = True
return result
def extract_auth_vars(request):
if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'):
return parse_auth_header(request.META['HTTP_X_SENTRY_AUTH'])
elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'):
return parse_auth_header(request.META['HTTP_AUTHORIZATION'])
else:
return dict(
(k, request.GET[k])
for k in request.GET.iterkeys()
if k.startswith('sentry_')
)
def project_from_auth_vars(auth_vars):
api_key = auth_vars.get('sentry_key')
if not api_key:
raise APIForbidden('Invalid api key')
try:
pk = ProjectKey.objects.get_from_cache(public_key=api_key)
except ProjectKey.DoesNotExist:
raise APIForbidden('Invalid api key')
if not constant_time_compare(pk.secret_key, auth_vars.get('sentry_secret', pk.secret_key)):
raise APIForbidden('Invalid api key')
if not pk.is_active:
raise APIForbidden('API key is disabled')
if not pk.roles.store:
raise APIForbidden('Key does not allow event storage access')
project = Project.objects.get_from_cache(pk=pk.project_id)
return project
def decompress_deflate(encoded_data):
try:
return zlib.decompress(encoded_data)
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data decoding request (%s, %s)' % (
e.__class__.__name__, e))
def decompress_gzip(encoded_data):
try:
fp = StringIO(encoded_data)
try:
f = GzipFile(fileobj=fp)
return f.read()
finally:
f.close()
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data decoding request (%s, %s)' % (
e.__class__.__name__, e))
def decode_and_decompress_data(encoded_data):
try:
try:
return decompress(encoded_data)
except zlib.error:
return base64.b64decode(encoded_data)
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data decoding request (%s, %s)' % (
e.__class__.__name__, e))
def safely_load_json_string(json_string):
try:
obj = json.loads(json_string)
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data reconstructing object (%s, %s)' % (
e.__class__.__name__, e))
# XXX: ensure keys are coerced to strings
return dict((smart_str(k), v) for k, v in obj.iteritems())
def process_data_timestamp(data, current_datetime=None):
if not data['timestamp']:
del data['timestamp']
return data
elif is_float(data['timestamp']):
try:
data['timestamp'] = datetime.fromtimestamp(float(data['timestamp']))
except Exception:
raise InvalidTimestamp('Invalid value for timestamp: %r' % data['timestamp'])
elif not isinstance(data['timestamp'], datetime):
if '.' in data['timestamp']:
format = '%Y-%m-%dT%H:%M:%S.%f'
else:
format = '%Y-%m-%dT%H:%M:%S'
if 'Z' in data['timestamp']:
# support UTC market, but not other timestamps
format += 'Z'
try:
data['timestamp'] = datetime.strptime(data['timestamp'], format)
except Exception:
raise InvalidTimestamp('Invalid value for timestamp: %r' % data['timestamp'])
if current_datetime is None:
current_datetime = datetime.now()
if data['timestamp'] > current_datetime + timedelta(minutes=1):
raise InvalidTimestamp('Invalid value for timestamp (in future): %r' % data['timestamp'])
if data['timestamp'] < current_datetime - timedelta(days=30):
raise InvalidTimestamp('Invalid value for timestamp (too old): %r' % data['timestamp'])
data['timestamp'] = float(data['timestamp'].strftime('%s'))
return data
def validate_data(project, data, client=None):
# TODO(dcramer): move project out of the data packet
data['project'] = project.id
if not data.get('message'):
data['message'] = '<no message value>'
elif not isinstance(data['message'], six.string_types):
raise APIError('Invalid value for message')
if data.get('culprit'):
if not isinstance(data['culprit'], six.string_types):
raise APIError('Invalid value for culprit')
if not data.get('event_id'):
data['event_id'] = uuid.uuid4().hex
elif not isinstance(data['event_id'], six.string_types):
raise APIError('Invalid value for event_id')
if len(data['event_id']) > 32:
logger.info(
'Discarded value for event_id due to length (%d chars)',
len(data['event_id']), **client_metadata(client, project))
data['event_id'] = uuid.uuid4().hex
if 'timestamp' in data:
try:
process_data_timestamp(data)
except InvalidTimestamp as e:
# Log the error, remove the timestamp, and continue
logger.info(
'Discarded invalid value for timestamp: %r', data['timestamp'],
**client_metadata(client, project, exception=e))
del data['timestamp']
if data.get('modules') and type(data['modules']) != dict:
logger.info(
'Discarded invalid type for modules: %s',
type(data['modules']), **client_metadata(client, project))
del data['modules']
if data.get('extra') is not None and type(data['extra']) != dict:
logger.info(
'Discarded invalid type for extra: %s',
type(data['extra']), **client_metadata(client, project))
del data['extra']
if data.get('tags') is not None:
if type(data['tags']) == dict:
data['tags'] = data['tags'].items()
elif not isinstance(data['tags'], (list, tuple)):
logger.info(
'Discarded invalid type for tags: %s',
type(data['tags']), **client_metadata(client, project))
del data['tags']
if data.get('tags'):
# remove any values which are over 32 characters
tags = []
for pair in data['tags']:
try:
k, v = pair
except ValueError:
logger.info('Discarded invalid tag value: %r',
pair, **client_metadata(client, project))
continue
if not isinstance(k, six.string_types):
try:
k = six.text_type(k)
except Exception:
logger.info('Discarded invalid tag key: %r',
type(k), **client_metadata(client, project))
continue
if not isinstance(v, six.string_types):
try:
v = six.text_type(v)
except Exception:
logger.info('Discarded invalid tag value: %s=%r',
k, type(v), **client_metadata(client, project))
continue
if len(k) > MAX_TAG_KEY_LENGTH or len(v) > MAX_TAG_VALUE_LENGTH:
logger.info('Discarded invalid tag: %s=%s',
k, v, **client_metadata(client, project))
continue
tags.append((k, v))
data['tags'] = tags
for k in data.keys():
if k in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(k)
if not value:
logger.info(
'Ignored empty interface value: %s', k,
**client_metadata(client, project))
continue
try:
interface = get_interface(k)
except ValueError:
logger.info(
'Ignored unknown attribute: %s', k,
**client_metadata(client, project))
continue
if type(value) != dict:
# HACK(dcramer): the exception interface supports a list as the
# value. We should change this in a new protocol version.
if type(value) in (list, tuple):
value = {'values': value}
else:
logger.info(
'Invalid parameters for value: %s', k,
type(value), **client_metadata(client, project))
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception as e:
if isinstance(e, AssertionError):
log = logger.info
else:
log = logger.error
log('Discarded invalid value for interface: %s', k,
**client_metadata(client, project, exception=e, extra={'value': value}))
level = data.get('level') or DEFAULT_LOG_LEVEL
if isinstance(level, six.string_types) and not level.isdigit():
# assume it's something like 'warning'
try:
data['level'] = LOG_LEVEL_REVERSE_MAP[level]
except KeyError as e:
logger.info(
'Discarded invalid logger value: %s', level,
**client_metadata(client, project, exception=e))
data['level'] = LOG_LEVEL_REVERSE_MAP.get(
DEFAULT_LOG_LEVEL, DEFAULT_LOG_LEVEL)
if data.get('release'):
data['release'] = unicode(data['release'])
return data
def ensure_does_not_have_ip(data):
if 'sentry.interfaces.Http' in data:
if 'env' in data['sentry.interfaces.Http']:
data['sentry.interfaces.Http']['env'].pop('REMOTE_ADDR', None)
if 'sentry.interfaces.User' in data:
data['sentry.interfaces.User'].pop('ip_address', None)
def ensure_has_ip(data, ip_address):
if data.get('sentry.interfaces.Http', {}).get('env', {}).get('REMOTE_ADDR'):
return
if data.get('sentry.interfaces.User', {}).get('ip_address'):
return
data.setdefault('sentry.interfaces.User', {})['ip_address'] = ip_address
def insert_data_to_database(data):
cache_key = 'e:{1}:{0}'.format(data['project'], data['event_id'])
default_cache.set(cache_key, data, timeout=3600)
preprocess_event.delay(cache_key=cache_key, start_time=time())
| 33.771499 | 97 | 0.61135 | 1,685 | 13,745 | 4.84273 | 0.191098 | 0.035049 | 0.036765 | 0.046324 | 0.295098 | 0.280515 | 0.192402 | 0.146691 | 0.118995 | 0.10625 | 0 | 0.004423 | 0.276173 | 13,745 | 406 | 98 | 33.85468 | 0.81576 | 0.082503 | 0 | 0.262458 | 0 | 0 | 0.161383 | 0.013985 | 0 | 0 | 0 | 0.002463 | 0.003322 | 1 | 0.049834 | false | 0 | 0.07309 | 0.003322 | 0.219269 | 0.003322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507dd79ccaceb7bc0588a98f26d0beacd4de3679 | 3,708 | py | Python | src/django_prefetch_utils/identity_map/persistent.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2019-09-26T10:32:47.000Z | 2019-09-26T10:32:47.000Z | src/django_prefetch_utils/identity_map/persistent.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2019-07-23T09:25:06.000Z | 2019-07-23T09:25:06.000Z | src/django_prefetch_utils/identity_map/persistent.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T14:38:20.000Z | 2021-12-22T14:38:20.000Z | import threading
from contextlib import ContextDecorator
from functools import partial
import wrapt
from django.db.models.query import QuerySet
from django_prefetch_utils.identity_map import get_default_prefetch_identity_map
from django_prefetch_utils.identity_map import prefetch_related_objects_impl
from django_prefetch_utils.selector import override_prefetch_related_objects
from .wrappers import wrap_identity_map_for_queryset
_active = threading.local()
original_fetch_all = QuerySet._fetch_all
class FetchAllDescriptor(object):
"""
This descriptor replaces ``QuerySet._fetch_all`` and applies
an identity map to any objects fetched in a queryset.
"""
def __get__(self, queryset, type=None):
if queryset is None:
return self
return partial(self._fetch_all, queryset)
def _fetch_all(self, queryset):
identity_map = getattr(_active, "value", None)
if identity_map is None:
return original_fetch_all(queryset)
identity_map = wrap_identity_map_for_queryset(identity_map, queryset)
if queryset._result_cache is None:
queryset._result_cache = [identity_map[obj] for obj in queryset._iterable_class(queryset)]
if queryset._prefetch_related_lookups and not queryset._prefetch_done:
queryset._prefetch_related_objects()
def enable_fetch_all_descriptor():
"""
Replaces ``QuerySet._fetch_all`` with an instance of
:class:`FetchAllDescriptor`.
"""
QuerySet._fetch_all = FetchAllDescriptor()
def disable_fetch_all_descriptor():
"""
Sets ``QuerySet._fetch_all`` to be the original method.
"""
QuerySet._fetch_all = original_fetch_all
class use_persistent_prefetch_identity_map(ContextDecorator):
"""
A context decorator which allows the same identity map to be used
across multiple calls to ``prefetch_related_objects``.
::
with use_persistent_prefetch_identity_map():
dogs = list(Dogs.objects.prefetch_related("toys"))
# The toy.dog instances will be identitical (not just equal)
# to the ones fetched on the line above
with self.assertNumQueries(1):
toys = list(Toy.objects.prefetch_related("dog"))
"""
previous_active = None
override_context_decorator = None
def __init__(self, identity_map=None, pass_identity_map=False):
self._identity_map = identity_map
self.pass_identity_map = pass_identity_map
def _recreate_cm(self):
return self
def __enter__(self):
if self._identity_map is not None:
identity_map = self._identity_map
else:
identity_map = get_default_prefetch_identity_map()
enable_fetch_all_descriptor()
self.previous_active = getattr(_active, "value", None)
_active.value = identity_map
self.override_context_decorator = override_prefetch_related_objects(
partial(prefetch_related_objects_impl, identity_map)
)
self.override_context_decorator.__enter__()
return identity_map
def __exit__(self, exc_type, exc_value, traceback):
_active.value = self.previous_active
self.previous_active = None
self.override_context_decorator.__exit__(exc_type, exc_value, traceback)
self.override_context_decorator = None
def __call__(self, func):
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
with self._recreate_cm() as identity_map:
if self.pass_identity_map:
args = (identity_map,) + args
return wrapped(*args, **kwargs)
return wrapper(func)
| 32.526316 | 102 | 0.704153 | 442 | 3,708 | 5.50905 | 0.262443 | 0.140041 | 0.054209 | 0.045996 | 0.209446 | 0.064887 | 0.032854 | 0 | 0 | 0 | 0 | 0.000348 | 0.225728 | 3,708 | 113 | 103 | 32.814159 | 0.847788 | 0.188242 | 0 | 0.031746 | 0 | 0 | 0.003439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15873 | false | 0.047619 | 0.142857 | 0.015873 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5084e45c999f71590d09d930c095c171c716c1fd | 1,863 | py | Python | setup.py | naojsoft/g2cam | 4f01cdccae7978d5c16af59a90ff7459ed6c2997 | [
"BSD-3-Clause"
] | null | null | null | setup.py | naojsoft/g2cam | 4f01cdccae7978d5c16af59a90ff7459ed6c2997 | [
"BSD-3-Clause"
] | null | null | null | setup.py | naojsoft/g2cam | 4f01cdccae7978d5c16af59a90ff7459ed6c2997 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
#
from g2cam.version import version
import os
srcdir = os.path.dirname(__file__)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
buf = open(os.path.join(srcdir, fname), 'r').read()
return buf
# not yet working...
def get_docs():
docdir = os.path.join(srcdir, 'doc')
res = []
# ['../../doc/Makefile', 'doc/conf.py', 'doc/*.rst',
# 'doc/manual/*.rst', 'doc/figures/*.png']
return res
setup(
name = "g2cam",
version = version,
author = "OCS Group, Subaru Telescope, NAOJ",
author_email = "ocs@naoj.org",
description = ("A toolkit for interfacing with the Subaru Telescope Observation Control System."),
long_description = read('README.txt'),
license = "BSD",
keywords = "subaru, telescope, instrument, toolkit, interface",
url = "http://naojsoft.github.com/g2cam",
packages = ['g2cam', 'g2base',
# Misc g2cam
'g2cam.util',
'g2cam.status',
# Misc g2base
'g2base.astro',
'g2base.alarm',
'g2base.remoteObjects',
'g2base.remoteObjects.pubsubs',
'g2base.remoteObjects.packers',
],
package_data = { #'g2cam.doc': ['manual/*.html'],
},
scripts = ['scripts/g2cam', 'scripts/stubgen', 'scripts/ro_shell',
'scripts/ro_mgr_svc', 'scripts/ro_name_svc',
'scripts/ro_ps_svc', 'scripts/get_status'],
classifiers = [
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
| 31.05 | 102 | 0.560923 | 191 | 1,863 | 5.387435 | 0.565445 | 0.034985 | 0.019436 | 0.031098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012149 | 0.293076 | 1,863 | 59 | 103 | 31.576271 | 0.769172 | 0.115405 | 0 | 0.043478 | 0 | 0 | 0.401829 | 0.047561 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.108696 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50856354f4ad0c7d2f074d903c89cd43aaff70d5 | 1,896 | py | Python | pkg/ampcor/correlators/UniformGrid.py | isce-framework/ampcor | eafadcbe4380a85320d8c7e884ebe4d6d279770e | [
"BSD-2-Clause"
] | 3 | 2019-05-08T05:48:21.000Z | 2021-09-26T23:19:45.000Z | pkg/ampcor/correlators/UniformGrid.py | isce-framework/ampcor | eafadcbe4380a85320d8c7e884ebe4d6d279770e | [
"BSD-2-Clause"
] | null | null | null | pkg/ampcor/correlators/UniformGrid.py | isce-framework/ampcor | eafadcbe4380a85320d8c7e884ebe4d6d279770e | [
"BSD-2-Clause"
] | 3 | 2019-10-28T12:26:23.000Z | 2021-09-26T23:19:55.000Z | # -*- Python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# parasim
# (c) 1998-2019 all rights reserved
#
# externals
import itertools
# framework
import ampcor
# my protocol
from .Domain import Domain as domain
# declaration
class UniformGrid(ampcor.component,
family="ampcor.correlators.domains.uniform", implements=domain):
"""
A domain that generates domain points on a uniform grid
"""
# user configurable state
shape = ampcor.properties.tuple(schema=ampcor.properties.int())
shape.default = (1,1)
shape.doc = "the shape of the grid of points to generate"
# protocol requirements
@ampcor.export
def points(self, bounds, **kwds):
"""
Generate a cloud of points within {extent} where reference tiles will be placed
"""
# get my shape
shape = self.shape
# split {bounds} into evenly spaced tiles
tile = tuple(b//s for b,s in zip(bounds, shape))
# compute the unallocated border around the raster
margin = tuple(b%s for b,s in zip(bounds, shape))
# build the sequences of coordinates for tile centers along each axis
ticks = tuple(
# by generating the locations
tuple(m//2 + n*t + t//2 for n in range(g))
# given the layout of each axis
for g, m, t in zip(shape, margin, tile)
)
# their cartesian product generates the centers of all the tiles in the grid
centers = tuple(itertools.product(*ticks))
# all done
return centers
# interface
def show(self, channel):
"""
Display my configuration
"""
# show who i am
channel.line(f" -- domain: {self.pyre_family()}")
channel.line(f" shape: {self.shape}")
# all done
return
# end of file
| 27.085714 | 87 | 0.604958 | 242 | 1,896 | 4.735537 | 0.520661 | 0.006981 | 0.024433 | 0.017452 | 0.048866 | 0.048866 | 0.048866 | 0.048866 | 0.048866 | 0.048866 | 0 | 0.009752 | 0.296941 | 1,896 | 69 | 88 | 27.478261 | 0.849962 | 0.389768 | 0 | 0 | 0 | 0 | 0.126984 | 0.031746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50859a1ab4e550255d5b54a54e244b55dbd52c38 | 12,432 | py | Python | ilm/utils/del_update_db.py | kalevhark/kroonika | a2d7a80666e742e62a3d2bcf72a597bf103ab9c7 | [
"MIT"
] | 1 | 2020-05-12T05:39:03.000Z | 2020-05-12T05:39:03.000Z | ilm/utils/del_update_db.py | kalevhark/kroonika | a2d7a80666e742e62a3d2bcf72a597bf103ab9c7 | [
"MIT"
] | 8 | 2019-12-25T21:32:49.000Z | 2022-02-11T19:28:33.000Z | ilm/utils/del_update_db.py | kalevhark/kroonika | a2d7a80666e742e62a3d2bcf72a597bf103ab9c7 | [
"MIT"
] | null | null | null | #!/home/ec2-user/django/kroonika_env/bin/python3
#
# Ilmaandmete regulaarseks uuendamiseks andmebaasis
# Käivitamiseks:
# /python-env-path-to/python3 /path-to-ilm-app/utils/update.py
from datetime import datetime, timedelta
import os
import re
import sys
import xml.etree.ElementTree as ET
from urllib.request import Request, urlopen
from urllib.error import URLError
from bs4 import BeautifulSoup
import psycopg2
from psycopg2.extras import RealDictCursor
from pytz import timezone
import pytz
import requests
from config import config
# from ilm.views import yrno_48h, owm_onecall
def utc2eesti_aeg(dt):
eesti_aeg = timezone('Europe/Tallinn')
return dt.astimezone(eesti_aeg)
# Decimal andmeväljade teisendamiseks, mis võivad olla tühjad <NULL>
def float_or_none(value):
try:
return float(value)
except:
return None
def ilm_praegu():
# Loeme Ilmateenistuse viimase mõõtmise andmed veebist
jaam = 'Valga'
href = 'http://www.ilmateenistus.ee/ilma_andmed/xml/observations.php'
r = requests.get(href)
try:
root = ET.fromstring(r.text)
except:
# Kontrollime kas vaatlusandmed ikkagi olemas
observation_exists = r.text.find('<observations')
if observation_exists > 0:
root = ET.fromstring(r.text[observation_exists:])
else:
return None
i = dict()
# Mõõtmise aeg
dt = datetime.fromtimestamp(int(root.attrib['timestamp']))
i['timestamp'] = pytz.timezone('Europe/Tallinn').localize(dt)
station = root.findall("./station/[name='"+jaam+"']")
for el in station:
for it in el:
data = it.text
# Kui ei ole tekstiväli, siis teisendame float tüübiks
if it.tag not in ['name',
'station',
'phenomenon',
'phenomenon_observer']:
data = float_or_none(data)
i[it.tag] = data
return i
def ilmaandmed_veebist(dt):
"""
Tagastab etteantud ajahetke (d) viimase möödunud täistunni ilmaandmed
ilmateenistus.ee veebilehelt
"""
jaam = 'Valga'
cols = ['airtemperature',
'relativehumidity',
'airpressure',
'airpressure_delta',
'winddirection',
'windspeed',
'windspeedmax',
'cloudiness',
'phenomenon',
'phenomenon_observer',
'precipitations',
'visibility']
href = 'http://ilmateenistus.ee/ilm/ilmavaatlused/vaatlusandmed/tunniandmed/'
dt = utc2eesti_aeg(dt)
p2ev = dt.strftime("%d.%m.%Y")
tund = dt.strftime("%H")
# Päringu aadress
p2ring = ''.join(
[href,
'?filter[date]=',
p2ev,
'&filter[hour]=',
tund]
)
andmed = dict()
# Loeme veebist andmed
req = Request(p2ring, headers={'User-Agent': 'Mozilla/5.0'})
try:
response = urlopen(req).read()
except URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
# Struktueerime
soup = BeautifulSoup(response, 'html.parser')
kontroll_hour = soup.find(attrs={"name": "filter[hour]"})
kontroll_date = soup.find(attrs={"name": "filter[date]"})
if kontroll_hour:
if kontroll_hour['value'].zfill(2) != tund.zfill(2) or kontroll_date['value'] != p2ev:
print(dt, 'Vale!')
# Kui vastus vale kellaajaga või kuupäevaga, saadame tagasi tühja tabeli
return andmed
# Leiame lehelt tabeli
table = soup.table
# Leiame tabelist rea
row = table.find(string=re.compile(jaam))
data = row.find_parent().find_next_siblings()
for i in range(len(data)):
if data[i]: # kui andmeväli pole tühi
if cols[i] in ['phenomenon', 'phenomenon_observer']: # tekstiväli
andmed[cols[i]] = data[i].text.strip()
else: # numbriväli
value = data[i].text.strip().replace(',', '.')
andmed[cols[i]] = float_or_none(value)
else:
andmed[cols[i]] = None
# andmed['station'] = Jaam.objects.filter(name=jaam).first()
andmed['station_id'] = 1
andmed['timestamp'] = pytz.timezone('Europe/Tallinn').localize(
datetime(dt.year, dt.month, dt.day, dt.hour))
# Ilmaandmed andmebaasi juhul kui põhiandmed olemas
# if andmed['airtemperature'] != None:
# i = Ilm(**andmed)
# i.save()
# print('Salvestan andmebaasi:', d)
return andmed
# The following connect() function connects to the suppliers database and prints out the PostgreSQL database version.
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
# Viimane mõõtmistulemus
def get_maxtimestamp(path=''):
""" query maxdate from the ilm_ilm table """
conn = None
try:
params = config(path)
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("SELECT max(timestamp) FROM ilm_ilm")
print("Viimane kanne: ", cur.rowcount)
row = cur.fetchone()
# d = pytz.timezone('Europe/Tallinn').localize(datetime.now())
# while row is not None:
# # print(row[0], (d - row[0]).seconds)
# row = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return row[0]
# Viimase ööpäeva mõõtmistulemused
def get_observations_24hours(path=''):
""" query maxdate from the ilm_ilm table """
conn = None
try:
params = config(path)
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("SELECT timestamp FROM ilm_ilm WHERE timestamp > now() - interval '1 day' ORDER BY timestamp")
print("Kandeid: ", cur.rowcount)
row = cur.fetchone()
while row is not None:
# print(row)
d = row[0]
print(utc2eesti_aeg(d))
row = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
# Mõõtmistulemuse olemasolu kontroll aja järgi
def check_observation_exists(dt, path=''):
""" query if exists timestamp from the ilm_ilm table """
conn = None
row = dict()
try:
params = config(path)
conn = psycopg2.connect(**params)
cur = conn.cursor(cursor_factory=RealDictCursor)
# cur = conn.cursor()
condition_y = f"date_part('year', timestamp) = {dt.year}"
condition_m = f"date_part('month', timestamp) = {dt.month}"
condition_d = f"date_part('day', timestamp) = {dt.day}"
condition_h = f"date_part('hour', timestamp) = {dt.hour}"
condition = ' and '.join([condition_y, condition_m, condition_d, condition_h])
query = f'SELECT * FROM ilm_ilm WHERE {condition}'
cur.execute(query)
# print("Kandeid: ", cur.rowcount)
row = cur.fetchone()
# while row is not None:
# # d = row[0]
# # print(f'PostgreSQL datetime : {d}')
# # print(f'PostgreSQL timezone : {d.tzname()}')
# # print(f'PostgreSQL offset UTC ajaga : {d.utcoffset()}')
# # print(f'PostgreSQL Eesti aeg : {utc2eesti_aeg(d)}')
# # print((d - row[0]).seconds)
# # print(row)
# row = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return row
# Lisab uue vaatlusandmete kirje
def insert_new_observations(observation_dict, path=''):
if not observation_dict:
return
# Eemaldame id, kui on
observation_dict.pop('id', None)
# Moodustame veergude loendi
cols = [key for key in observation_dict]
cols_str = ', '.join(cols)
# vals = [observation_dict[col] for col in cols]
vals_str = ", ".join([f"%({col})s" for col in cols])
sql = f"INSERT INTO ilm_ilm ({cols_str}) VALUES ({vals_str}) RETURNING id;"
conn = None
obs_id = None
try:
# read database configuration
params = config(path)
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(sql, {**observation_dict})
# get the generated id back
obs_id = cur.fetchone()[0]
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return obs_id
# Kustutab topeltkirjed
def delete_duplicate_observations(path=''):
conn = None
rows_deleted = 0
try:
# read database configuration
params = config(path)
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the UPDATE statement
cur.execute("DELETE FROM ilm_ilm a USING ilm_ilm b WHERE a.id < b.id AND a.timestamp = b.timestamp;")
# get the number of updated rows
rows_deleted = cur.rowcount
# Commit the changes to the database
conn.commit()
# Close communication with the PostgreSQL database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
# print(f'Kustutati: {rows_deleted}')
return rows_deleted
if __name__ == '__main__':
path = os.path.dirname(sys.argv[0])
# get_maxtimestamp()
rows_deleted = delete_duplicate_observations(path)
if rows_deleted > 0:
print(f'Kustutati: {rows_deleted} kirjet')
for hour in range(71, -1, -1): # Viimase 72 tunni andmed
observation_time = datetime.now() - timedelta(hours=hour)
observation = check_observation_exists(observation_time, path)
# print(observation_time, end=': ')
if not observation:
ilm_observation_veebist = ilmaandmed_veebist(observation_time)
# print(ilm_observation_veebist)
if ilm_observation_veebist:
id = insert_new_observations(ilm_observation_veebist, path)
print(f'{ilm_observation_veebist["timestamp"]} lisatud {id}')
else:
print(f'{observation_time} uuendamine ebaõnnestus')
else:
# print('olemas.')
pass
# y = yrno_48h()
# y_dt = y['forecast']['dt'][6]
# y_temp = y['forecast']['temperatures'][6]
# y_prec = y['forecast']['precipitations'][6]
# o = owm_onecall()
# o_dt = o['hourly'][6]['dt']
# o_temp = o['hourly'][6]['temp']
# try:
# o_prec = o['hourly'][6]['rain']['1h']
# except:
# o_prec = None
# line = ';'.join([str(y_dt), str(y_temp), str(y_prec), str(o_dt), str(o_temp), str(o_prec)])
# with open('forecast_6h.log', 'a') as f:
# f.write(line + '\n') | 33.329759 | 117 | 0.590412 | 1,426 | 12,432 | 5.053296 | 0.26087 | 0.016236 | 0.011241 | 0.020816 | 0.264502 | 0.234527 | 0.21607 | 0.207882 | 0.207882 | 0.207882 | 0 | 0.007281 | 0.292954 | 12,432 | 373 | 118 | 33.329759 | 0.812514 | 0.265203 | 0 | 0.369295 | 0 | 0.004149 | 0.159106 | 0.004225 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041494 | false | 0.004149 | 0.058091 | 0 | 0.149378 | 0.087137 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50866a9870cf7938d6279184b94058fb2ce8fdd9 | 3,719 | py | Python | feature_selections/mrmr.py | Pandalovepeace/FeatureSelectionsAndExtractions | 477020acb5cfd35b975e56eee6cb863a6ec81861 | [
"Apache-2.0"
] | 1 | 2022-03-27T13:38:45.000Z | 2022-03-27T13:38:45.000Z | feature_selections/mrmr.py | Pandalovepeace/FeatureSelectionsAndExtractions | 477020acb5cfd35b975e56eee6cb863a6ec81861 | [
"Apache-2.0"
] | null | null | null | feature_selections/mrmr.py | Pandalovepeace/FeatureSelectionsAndExtractions | 477020acb5cfd35b975e56eee6cb863a6ec81861 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# file : mrmr.py
# author : littlely
# description:
import warnings
import numpy as np
from bases import Base
class MRMR(Base):
def __init__(self, feature_num):
"""
mRMR is a feature selection which maximises the feature-label correlation and minimises
the feature-feature correlation. this implementation can only applied for numeric values,
read more about mRMR, please refer :ref:`https://blog.csdn.net/littlely_ll/article/details/71749776`.
:param feature_num: selected number of features
"""
self.feature_num = feature_num
self._selected_features = []
def fit(self, X, y):
"""
fit an array data
:param X: a numpy array
:param y: the label, a list or one dimension array
:return:
"""
X = self._check_array(X)
y = self._check_array(y)
assert X.shape[0] == len(y), "X and y not in the same length!"
if self.feature_num > X.shape[1]:
self.feature_num = X.shape[1]
warnings.warn("The feature_num has to be set less or equal to {}".format(X.shape[1]), UserWarning)
MIs = self.feature_label_MIs(X, y)
max_MI_arg = np.argmax(MIs)
selected_features = []
MIs = list(zip(range(len(MIs)), MIs))
selected_features.append(MIs.pop(int(max_MI_arg)))
while True:
max_theta = float("-inf")
max_theta_index = None
for mi_outset in MIs:
ff_mis = []
for mi_inset in selected_features:
ff_mi = self.feature_feature_MIs(X[:, mi_outset[0]], X[:, mi_inset[0]])
ff_mis.append(ff_mi)
theta = mi_outset[1] - 1 / len(selected_features) * sum(ff_mis)
if theta >= max_theta:
max_theta = theta
max_theta_index = mi_outset
selected_features.append(max_theta_index)
MIs.remove(max_theta_index)
if len(selected_features) >= self.feature_num:
break
self._selected_features = [ind for ind, mi in selected_features]
def transform(self, X):
return X[:, self._selected_features]
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
def entropy(self, c):
"""
entropy calculation
:param c:
:return:
"""
c_normalized = c / float(np.sum(c))
c_normalized = c_normalized[np.nonzero(c_normalized)]
H = -sum(c_normalized * np.log2(c_normalized))
return H
def feature_label_MIs(self, arr, y):
"""
calculate feature-label mutual information
:param arr:
:param y:
:return:
"""
m, n = arr.shape
MIs = []
p_y = np.histogram(y)[0]
h_y = self.entropy(p_y)
for i in range(n):
p_i = np.histogram(arr[:, i])[0]
p_iy = np.histogram2d(arr[:, 0], y)[0]
h_i = self.entropy(p_i)
h_iy = self.entropy(p_iy)
MI = h_i + h_y - h_iy
MIs.append(MI)
return MIs
def feature_feature_MIs(self, x, y):
"""
calculate feature-feature mutual information
:param x:
:param y:
:return:
"""
p_x = np.histogram(x)[0]
p_y = np.histogram(y)[0]
p_xy = np.histogram2d(x, y)[0]
h_x = self.entropy(p_x)
h_y = self.entropy(p_y)
h_xy = self.entropy(p_xy)
return h_x + h_y - h_xy
@property
def important_features(self):
return self._selected_features
| 26.564286 | 110 | 0.552837 | 493 | 3,719 | 3.971602 | 0.27789 | 0.089888 | 0.036772 | 0.022472 | 0.078652 | 0.052094 | 0 | 0 | 0 | 0 | 0 | 0.011007 | 0.340414 | 3,719 | 139 | 111 | 26.755396 | 0.787199 | 0.189836 | 0 | 0.057143 | 0 | 0 | 0.030194 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 1 | 0.114286 | false | 0 | 0.057143 | 0.028571 | 0.271429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50906519392d368cd6099c11e0b9bbaf41def05a | 7,088 | py | Python | moonlight/score/state/measure.py | gsy/moonlight | 1eca6c903b7334afca7555b9aeb7a212c76fef9d | [
"Apache-2.0"
] | null | null | null | moonlight/score/state/measure.py | gsy/moonlight | 1eca6c903b7334afca7555b9aeb7a212c76fef9d | [
"Apache-2.0"
] | null | null | null | moonlight/score/state/measure.py | gsy/moonlight | 1eca6c903b7334afca7555b9aeb7a212c76fef9d | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The score state which is not persisted between measures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum
from protobuf import music_pb2
from moonlight.protobuf import musicscore_pb2
from moonlight.score.elements import key_signature as key_signature_module
ACCIDENTAL_PITCH_SHIFT_ = {
# TODO(ringw): Detect 2 adjacent flats as a double flat.
musicscore_pb2.Glyph.FLAT: -1,
musicscore_pb2.Glyph.NATURAL: 0,
musicscore_pb2.Glyph.NONE: 0,
musicscore_pb2.Glyph.SHARP: +1,
musicscore_pb2.Glyph.DOUBLE_SHARP: +2,
}
class _KeySignatureState(enum.Enum):
KEY_SIGNATURE = 1
ACCIDENTALS = 2
class MeasureState(object):
"""State of a single measure of a staff.
Attributes:
clef: The current clef.
key_signature: The current `KeySignature`.
chords: A map from stem (tuple `((x0, y0), (x1, y1))`) to the first note
that was read and is attached to the stem. Subsequent notes attached to
the same stem will read their start and end time from the first note.
time: The current time in the measure. Absolute time relative to the start
of the score. float.
"""
def __init__(self, start_time, clef, key_signature=None):
"""Initializes a new measure.
Args:
start_time: The start time (in quarter notes) of the measure.
clef: A `Clef`.
key_signature: The previously detected key signature (optional). If
present, do not detect a key signature in this measure. This should be
taken from the previously measure on this staff if this is not the
first measure. It should not be propagated from one staff to the next,
because we expect the key signature to be repeated on each staff and
we will re-detect it.
"""
self.time = start_time
self.clef = clef
self.key_signature = (
key_signature or key_signature_module.KeySignature(clef))
self._accidentals = key_signature_module.Accidentals(clef)
self._key_signature_state = (
_KeySignatureState.ACCIDENTALS
if key_signature else _KeySignatureState.KEY_SIGNATURE)
self.chords = {}
def new_measure(self, start_time):
"""Constructs a new MeasureState for the next measure.
Args:
start_time: The start time of the new measure.
Returns:
A new MeasureState object.
"""
return MeasureState(
start_time,
clef=self.clef,
key_signature=copy.deepcopy(self.key_signature))
def set_accidental(self, y_position, accidental):
"""Adds a glyph to the key signature or accidentals.
Args:
y_position: The position of the accidental.
accidental: The accidental value.
"""
if self._key_signature_state == _KeySignatureState.KEY_SIGNATURE:
if self.key_signature.try_put(y_position, accidental):
return
self._key_signature_state = _KeySignatureState.ACCIDENTALS
self._accidentals.put(y_position, accidental)
def get_note(self, glyph):
"""Converts a Glyph to a Note.
Gets the note timing from an existing chord if available, or increments the
current measure time otherwise.
Args:
glyph: A Glyph message. Type must be one of NOTEHEAD_*.
Returns:
A Note message.
"""
accidental = self._accidentals.get_accidental_for_position(glyph.y_position)
if accidental == musicscore_pb2.Glyph.NONE:
accidental = self.key_signature.get_accidental_for_position(
glyph.y_position)
pitch = (
self.clef.y_position_to_midi(glyph.y_position) +
ACCIDENTAL_PITCH_SHIFT_[accidental])
first_note_in_chord = None
if glyph.HasField('stem'):
# Try to get the timing from another note in the same chord.
stem = ((glyph.stem.start.x, glyph.stem.start.y), (glyph.stem.end.x,
glyph.stem.end.y))
if stem in self.chords:
first_note_in_chord = self.chords[stem]
else:
stem = None
if first_note_in_chord:
start_time, end_time = (first_note_in_chord.start_time,
first_note_in_chord.end_time)
else:
# TODO(ringw): Check all note durations, not just the first seen in a
# chord, and use the median detected duration.
duration = _get_note_duration(glyph)
start_time, end_time = self.time, self.time + duration
self.time += duration
note = music_pb2.NoteSequence.Note(
pitch=pitch, start_time=start_time, end_time=end_time)
if stem:
self.chords[stem] = note
return note
def set_clef(self, clef):
"""Sets the clef, and resets the key signature if necessary."""
if clef != self.clef:
self._key_signature_state = _KeySignatureState.KEY_SIGNATURE
self.key_signature = key_signature_module.KeySignature(clef)
self._accidentals = key_signature_module.Accidentals(clef)
self.clef = clef
def on_read_notehead(self):
"""Called after a notehead has been read.
The key signature should occur before any noteheads in the measure. This
causes subsequent accidental glyphs to be read as accidentals, and not part
of the key signature.
"""
self._key_signature_state = _KeySignatureState.ACCIDENTALS
def _get_note_duration(note):
"""Determines the duration of a notehead glyph.
This depends on the glyph type, beams (which each halve the duration), and
dots (which each add a fractional duration). In the future, notes may be
recognized as a tuplet, which will result in a Fraction duration. For now, the
duration is a float, because the denominator is always a sum of powers of two.
Args:
note: A `Glyph` of a notehead type.
Returns:
The float duration of the note, in quarter notes.
Raises:
ValueError: If `note` is not a notehead type.
"""
if note.type == musicscore_pb2.Glyph.NOTEHEAD_FILLED:
# Quarter note: 2.0 ** 0 == 1
# Each beam halves the note duration.
duration = 2.0**-len(note.beam)
elif note.type == musicscore_pb2.Glyph.NOTEHEAD_EMPTY:
duration = 2.0
elif note.type == musicscore_pb2.Glyph.NOTEHEAD_WHOLE:
duration = 4.0
else:
raise ValueError('Expected a notehead, got: %s' % note)
# The first dot adds half the original duration, and further dots add half the
# value added by the previous dot.
dot_value = duration / 2.
for _ in note.dot:
duration += dot_value
dot_value /= 2.
return duration
| 35.089109 | 80 | 0.701891 | 997 | 7,088 | 4.832497 | 0.250752 | 0.082192 | 0.033209 | 0.021793 | 0.165006 | 0.149232 | 0.101287 | 0.035699 | 0.035699 | 0.035699 | 0 | 0.008009 | 0.224887 | 7,088 | 201 | 81 | 35.263682 | 0.868948 | 0.462613 | 0 | 0.1 | 0 | 0 | 0.008949 | 0 | 0 | 0 | 0 | 0.00995 | 0 | 1 | 0.077778 | false | 0 | 0.088889 | 0 | 0.255556 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5090b4a91145d1039988ef3dd90819fc2e5534ec | 19,185 | py | Python | federation/tests/entities/diaspora/test_mappers.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 93 | 2016-11-26T10:52:13.000Z | 2022-01-15T20:07:35.000Z | federation/tests/entities/diaspora/test_mappers.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 75 | 2016-10-18T10:15:44.000Z | 2019-10-05T22:16:32.000Z | federation/tests/entities/diaspora/test_mappers.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 9 | 2017-04-08T08:03:45.000Z | 2021-09-13T22:00:48.000Z | from datetime import datetime
from lxml import etree
from unittest.mock import patch, Mock
import pytest
from federation.entities.base import (
Comment, Post, Reaction, Relationship, Profile, Retraction,
Follow, Share)
from federation.entities.diaspora.entities import (
DiasporaPost, DiasporaComment, DiasporaLike,
DiasporaProfile, DiasporaRetraction, DiasporaContact, DiasporaReshare, DiasporaImage)
from federation.entities.diaspora.mappers import (
message_to_objects, get_outbound_entity, check_sender_and_entity_handle_match)
from federation.tests.fixtures.payloads import (
DIASPORA_POST_SIMPLE, DIASPORA_POST_COMMENT, DIASPORA_POST_LIKE,
DIASPORA_PROFILE, DIASPORA_POST_INVALID, DIASPORA_RETRACTION,
DIASPORA_POST_WITH_PHOTOS, DIASPORA_CONTACT,
DIASPORA_PROFILE_EMPTY_TAGS, DIASPORA_RESHARE,
DIASPORA_RESHARE_WITH_EXTRA_PROPERTIES, DIASPORA_POST_SIMPLE_WITH_MENTION,
DIASPORA_PROFILE_FIRST_NAME_ONLY, DIASPORA_POST_COMMENT_NESTED, DIASPORA_POST_ACTIVITYPUB_ID,
DIASPORA_POST_COMMENT_ACTIVITYPUB_ID, DIASPORA_PROFILE_ACTIVITYPUB_ID)
from federation.types import UserType, ReceiverVariant
class TestDiasporaEntityMappersReceive:
def test_message_to_objects_mentions_are_extracted(self):
entities = message_to_objects(
DIASPORA_POST_SIMPLE_WITH_MENTION, "alice@alice.diaspora.example.org"
)
assert len(entities) == 1
post = entities[0]
assert post._mentions == {'jaywink@jasonrobinson.me'}
def test_message_to_objects_post__with_activitypub_id(self):
entities = message_to_objects(DIASPORA_POST_ACTIVITYPUB_ID, "alice@alice.diaspora.example.org")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, DiasporaPost)
assert isinstance(post, Post)
assert post.guid == "((guidguidguidguidguidguidguid))"
assert post.handle == "alice@alice.diaspora.example.org"
assert post.id == "https://alice.diaspora.example.org/posts/1"
def test_message_to_objects_simple_post(self):
entities = message_to_objects(DIASPORA_POST_SIMPLE, "alice@alice.diaspora.example.org")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, DiasporaPost)
assert isinstance(post, Post)
assert post.raw_content == "((status message))"
assert post.guid == "((guidguidguidguidguidguidguid))"
assert post.handle == "alice@alice.diaspora.example.org"
assert post.public == False
assert post.created_at == datetime(2011, 7, 20, 1, 36, 7)
assert post.provider_display_name == "Socialhome"
def test_message_to_objects_post_with_photos(self):
entities = message_to_objects(DIASPORA_POST_WITH_PHOTOS, "alice@alice.diaspora.example.org")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, DiasporaPost)
photo = post._children[0]
assert isinstance(photo, DiasporaImage)
assert photo.url == "https://alice.diaspora.example.org/uploads/images/1234.jpg"
assert photo.name == ""
assert photo.raw_content == ""
assert photo.height == 120
assert photo.width == 120
assert photo.guid == "((guidguidguidguidguidguidguif))"
assert photo.handle == "alice@alice.diaspora.example.org"
assert photo.created_at == datetime(2011, 7, 20, 1, 36, 7)
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
def test_message_to_objects_comment(self, mock_validate):
entities = message_to_objects(DIASPORA_POST_COMMENT, "alice@alice.diaspora.example.org",
sender_key_fetcher=Mock())
assert len(entities) == 1
comment = entities[0]
assert isinstance(comment, DiasporaComment)
assert isinstance(comment, Comment)
assert comment.target_guid == "((parent_guidparent_guidparent_guidparent_guid))"
assert comment.root_target_guid == ""
assert comment.guid == "((guidguidguidguidguidguid))"
assert comment.handle == "alice@alice.diaspora.example.org"
assert comment.participation == "comment"
assert comment.raw_content == "((text))"
assert comment.signature == "((signature))"
assert comment._xml_tags == [
"guid", "parent_guid", "text", "author",
]
mock_validate.assert_called_once_with()
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
def test_message_to_objects_comment__activitypub_id(self, mock_validate):
entities = message_to_objects(DIASPORA_POST_COMMENT_ACTIVITYPUB_ID, "alice@alice.diaspora.example.org",
sender_key_fetcher=Mock())
assert len(entities) == 1
comment = entities[0]
assert isinstance(comment, DiasporaComment)
assert isinstance(comment, Comment)
assert comment.target_guid == "((parent_guidparent_guidparent_guidparent_guid))"
assert comment.root_target_guid == ""
assert comment.guid == "((guidguidguidguidguidguid))"
assert comment.handle == "alice@alice.diaspora.example.org"
assert comment.id == "https://alice.diaspora.example.org/comments/1"
mock_validate.assert_called_once_with()
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
def test_message_to_objects_nested_comment(self, mock_validate):
entities = message_to_objects(DIASPORA_POST_COMMENT_NESTED, "alice@alice.diaspora.example.org",
sender_key_fetcher=Mock())
assert len(entities) == 1
comment = entities[0]
assert isinstance(comment, DiasporaComment)
assert isinstance(comment, Comment)
assert comment.target_guid == "((parent_guidparent_guidparent_guidparent_guid))"
assert comment.root_target_guid == "((threadparentguid))"
assert comment.guid == "((guidguidguidguidguidguid))"
assert comment.handle == "alice@alice.diaspora.example.org"
assert comment.participation == "comment"
assert comment.raw_content == "((text))"
assert comment.signature == "((signature))"
assert comment._xml_tags == [
"guid", "parent_guid", "thread_parent_guid", "text", "author",
]
mock_validate.assert_called_once_with()
@patch("federation.entities.diaspora.mappers.DiasporaLike._validate_signatures")
def test_message_to_objects_like(self, mock_validate):
entities = message_to_objects(
DIASPORA_POST_LIKE, "alice@alice.diaspora.example.org", sender_key_fetcher=Mock()
)
assert len(entities) == 1
like = entities[0]
assert isinstance(like, DiasporaLike)
assert isinstance(like, Reaction)
assert like.target_guid == "((parent_guidparent_guidparent_guidparent_guid))"
assert like.guid == "((guidguidguidguidguidguid))"
assert like.handle == "alice@alice.diaspora.example.org"
assert like.participation == "reaction"
assert like.reaction == "like"
assert like.signature == "((signature))"
assert like._xml_tags == [
"parent_type", "guid", "parent_guid", "positive", "author",
]
mock_validate.assert_called_once_with()
@patch("federation.entities.diaspora.mappers.retrieve_and_parse_profile", return_value=Mock(
id="bob@example.com",
))
def test_message_to_objects_profile(self, mock_parse):
entities = message_to_objects(DIASPORA_PROFILE, "bob@example.com")
assert len(entities) == 1
profile = entities[0]
assert profile.handle == "bob@example.com"
assert profile.name == "Bob Bobertson"
assert profile.image_urls == {
"large": "https://example.com/uploads/images/thumb_large_c833747578b5.jpg",
"medium": "https://example.com/uploads/images/thumb_medium_c8b1aab04f3.jpg",
"small": "https://example.com/uploads/images/thumb_small_c8b147578b5.jpg",
}
assert profile.gender == ""
assert profile.raw_content == "A cool bio"
assert profile.location == "Helsinki"
assert profile.public == True
assert profile.nsfw == False
assert profile.tag_list == ["socialfederation", "federation"]
@patch("federation.entities.diaspora.mappers.retrieve_and_parse_profile", return_value=Mock(
id="bob@example.com",
))
def test_message_to_objects_profile__activitypub_id(self, mock_parse):
entities = message_to_objects(DIASPORA_PROFILE_ACTIVITYPUB_ID, "bob@example.com")
assert len(entities) == 1
profile = entities[0]
assert profile.handle == "bob@example.com"
assert profile.id == "https://example.com/bob"
@patch("federation.entities.diaspora.mappers.retrieve_and_parse_profile", return_value=Mock(
id="bob@example.com",
))
def test_message_to_objects_profile__first_name_only(self, mock_parse):
entities = message_to_objects(DIASPORA_PROFILE_FIRST_NAME_ONLY, "bob@example.com")
assert len(entities) == 1
profile = entities[0]
assert profile.name == "Bob"
@patch("federation.entities.diaspora.mappers.retrieve_and_parse_profile", return_value=Mock(
id="bob@example.com",
))
def test_message_to_objects_profile_survives_empty_tag_string(self, mock_parse):
entities = message_to_objects(DIASPORA_PROFILE_EMPTY_TAGS, "bob@example.com")
assert len(entities) == 1
def test_message_to_objects_receivers_are_saved__followers_receiver(self):
# noinspection PyTypeChecker
entities = message_to_objects(
DIASPORA_POST_SIMPLE,
"alice@alice.diaspora.example.org",
)
entity = entities[0]
assert entity._receivers == [UserType(
id="alice@alice.diaspora.example.org", receiver_variant=ReceiverVariant.FOLLOWERS,
)]
def test_message_to_objects_receivers_are_saved__single_receiver(self):
# noinspection PyTypeChecker
entities = message_to_objects(
DIASPORA_POST_SIMPLE,
"alice@alice.diaspora.example.org",
user=Mock(id="bob@example.com")
)
entity = entities[0]
assert entity._receivers == [UserType(id="bob@example.com", receiver_variant=ReceiverVariant.ACTOR)]
def test_message_to_objects_retraction(self):
entities = message_to_objects(DIASPORA_RETRACTION, "bob@example.com")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, DiasporaRetraction)
assert entity.handle == "bob@example.com"
assert entity.target_guid == "x" * 16
assert entity.entity_type == "Post"
def test_message_to_objects_contact(self):
entities = message_to_objects(DIASPORA_CONTACT, "alice@example.com")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, DiasporaContact)
assert entity.handle == "alice@example.com"
assert entity.target_handle == "bob@example.org"
assert entity.following is True
def test_message_to_objects_reshare(self):
entities = message_to_objects(DIASPORA_RESHARE, "alice@example.org")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, DiasporaReshare)
assert entity.handle == "alice@example.org"
assert entity.guid == "a0b53e5029f6013487753131731751e9"
assert entity.provider_display_name == ""
assert entity.target_handle == "bob@example.com"
assert entity.target_guid == "a0b53bc029f6013487753131731751e9"
assert entity.public is True
assert entity.entity_type == "Post"
assert entity.raw_content == ""
def test_message_to_objects_reshare_extra_properties(self):
entities = message_to_objects(DIASPORA_RESHARE_WITH_EXTRA_PROPERTIES, "alice@example.org")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, DiasporaReshare)
assert entity.raw_content == "Important note here"
assert entity.entity_type == "Comment"
@patch("federation.entities.diaspora.mappers.logger.error")
def test_invalid_entity_logs_an_error(self, mock_logger):
entities = message_to_objects(DIASPORA_POST_INVALID, "alice@alice.diaspora.example.org")
assert len(entities) == 0
assert mock_logger.called
def test_adds_source_protocol_to_entity(self):
entities = message_to_objects(DIASPORA_POST_SIMPLE, "alice@alice.diaspora.example.org")
assert entities[0]._source_protocol == "diaspora"
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
def test_source_object(self, mock_validate):
entities = message_to_objects(DIASPORA_POST_COMMENT, "alice@alice.diaspora.example.org",
sender_key_fetcher=Mock())
entity = entities[0]
assert entity._source_object == etree.tostring(etree.fromstring(DIASPORA_POST_COMMENT))
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
def test_element_to_objects_calls_sender_key_fetcher(self, mock_validate):
mock_fetcher = Mock()
message_to_objects(DIASPORA_POST_COMMENT, "alice@alice.diaspora.example.org", mock_fetcher)
mock_fetcher.assert_called_once_with(
"alice@alice.diaspora.example.org",
)
@patch("federation.entities.diaspora.mappers.DiasporaComment._validate_signatures")
@patch("federation.entities.diaspora.mappers.retrieve_and_parse_profile")
def test_element_to_objects_calls_retrieve_remote_profile(self, mock_retrieve, mock_validate):
message_to_objects(DIASPORA_POST_COMMENT, "alice@alice.diaspora.example.org")
mock_retrieve.assert_called_once_with("alice@alice.diaspora.example.org")
@patch("federation.entities.diaspora.mappers.check_sender_and_entity_handle_match")
def test_element_to_objects_verifies_handles_are_the_same(self, mock_check):
message_to_objects(DIASPORA_POST_SIMPLE, "bob@example.org")
mock_check.assert_called_once_with("bob@example.org", "alice@alice.diaspora.example.org")
def test_element_to_objects_returns_no_entity_if_handles_are_different(self):
entities = message_to_objects(DIASPORA_POST_SIMPLE, "bob@example.org")
assert not entities
class TestGetOutboundEntity:
def test_already_fine_entities_are_returned_as_is(self, private_key):
entity = DiasporaPost()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = DiasporaLike()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = DiasporaComment()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = DiasporaProfile(handle="foobar@example.com", guid="1234")
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = DiasporaContact()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = DiasporaReshare()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
@patch.object(DiasporaPost, "validate", new=Mock())
def test_post_is_converted_to_diasporapost(self, private_key):
entity = Post()
assert isinstance(get_outbound_entity(entity, private_key), DiasporaPost)
@patch.object(DiasporaComment, "validate", new=Mock())
def test_comment_is_converted_to_diasporacomment(self, private_key):
entity = Comment()
assert isinstance(get_outbound_entity(entity, private_key), DiasporaComment)
@patch.object(DiasporaLike, "validate", new=Mock())
def test_reaction_of_like_is_converted_to_diasporalike(self, private_key):
entity = Reaction(reaction="like")
assert isinstance(get_outbound_entity(entity, private_key), DiasporaLike)
@patch.object(DiasporaProfile, "validate", new=Mock())
def test_profile_is_converted_to_diasporaprofile(self, private_key):
entity = Profile(handle="foobar@example.com", guid="1234")
assert isinstance(get_outbound_entity(entity, private_key), DiasporaProfile)
def test_other_reaction_raises(self, private_key):
entity = Reaction(reaction="foo")
with pytest.raises(ValueError):
get_outbound_entity(entity, private_key)
def test_other_relation_raises(self, private_key):
entity = Relationship(relationship="foo")
with pytest.raises(ValueError):
get_outbound_entity(entity, private_key)
@patch.object(DiasporaRetraction, "validate", new=Mock())
def test_retraction_is_converted_to_diasporaretraction(self, private_key):
entity = Retraction()
assert isinstance(get_outbound_entity(entity, private_key), DiasporaRetraction)
@patch.object(DiasporaContact, "validate", new=Mock())
def test_follow_is_converted_to_diasporacontact(self, private_key):
entity = Follow()
assert isinstance(get_outbound_entity(entity, private_key), DiasporaContact)
@patch.object(DiasporaReshare, "validate", new=Mock())
def test_share_is_converted_to_diasporareshare(self, private_key):
entity = Share()
assert isinstance(get_outbound_entity(entity, private_key), DiasporaReshare)
def test_signs_relayable_if_no_signature(self, private_key):
entity = DiasporaComment()
entity.validate = Mock()
outbound = get_outbound_entity(entity, private_key)
assert outbound.signature != ""
def test_returns_entity_if_outbound_doc_on_entity(self, private_key, diasporacomment):
entity = Comment()
entity.outbound_doc = diasporacomment.to_xml()
assert get_outbound_entity(entity, private_key) == entity
def test_entity_is_validated__fail(self, private_key):
entity = Share(
actor_id="foobar@localhost.local",
handle="foobar@localhost.local",
id="1"*16,
guid="1"*16,
created_at=datetime.now(),
target_id="2" * 16,
)
with pytest.raises(ValueError):
get_outbound_entity(entity, private_key)
def test_entity_is_validated__success(self, private_key):
entity = Share(
actor_id="foobar@localhost.local",
handle="foobar@localhost.local",
id="1" * 16,
guid="1" * 16,
created_at=datetime.now(),
target_handle="barfoo@remote.local",
target_id="2" * 16,
target_guid="2" * 16,
)
get_outbound_entity(entity, private_key)
def test_check_sender_and_entity_handle_match():
assert not check_sender_and_entity_handle_match("foo", "bar")
assert check_sender_and_entity_handle_match("foo", "foo")
| 47.487624 | 111 | 0.699505 | 2,150 | 19,185 | 5.93814 | 0.107907 | 0.033837 | 0.055142 | 0.052244 | 0.688572 | 0.626537 | 0.564659 | 0.527297 | 0.458839 | 0.421947 | 0 | 0.01161 | 0.200834 | 19,185 | 403 | 112 | 47.605459 | 0.821093 | 0.002763 | 0 | 0.424157 | 0 | 0 | 0.187307 | 0.123007 | 0 | 0 | 0 | 0 | 0.390449 | 1 | 0.11236 | false | 0 | 0.02809 | 0 | 0.146067 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5091f460d48fb7e3ea87e1b65d7988d7b539358c | 1,221 | py | Python | test/test_edit_contact.py | dmitryvorobev/python_training | 6acbf94b3e20cff0c044b8c1f62ef60e466a33d6 | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact.py | dmitryvorobev/python_training | 6acbf94b3e20cff0c044b8c1f62ef60e466a33d6 | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact.py | dmitryvorobev/python_training | 6acbf94b3e20cff0c044b8c1f62ef60e466a33d6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.person import Person
from random import randrange
import random
def test_edit_contact(app, db, check_ui):
if app.contact.count() == 0:
app.contact.add_contact_fill_form(Person(firstname="John", lastname="Doe", company="paramount",
address="23168 CA, sunbeach blvd", home_phone_num="555111000", year="1980"))
old_contacts = db.get_contact_list()
index = old_contacts.index(random.choice(old_contacts))
contact = Person(firstname="UU", lastname="UU", company="paramount",
address="23168 CA, sunbeach blvd", home_phone_num="555111000", year="1980")
contact.id = old_contacts[index].id
app.contact.edit_contact_by_index(index,contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
if check_ui:
def clean(contact):
return Person(id=contact.id,firstname=contact.firstname.strip(),lastname=contact.lastname.strip())
new_contacts = map(clean,db.get_contact_list())
assert sorted(new_contacts, key = Person.id_or_max) == sorted(app.contact.get_contact_list(), key = Person.id_or_max)
| 48.84 | 125 | 0.667486 | 158 | 1,221 | 4.936709 | 0.379747 | 0.070513 | 0.071795 | 0.061538 | 0.320513 | 0.182051 | 0.182051 | 0.182051 | 0.182051 | 0.182051 | 0 | 0.039256 | 0.207207 | 1,221 | 24 | 126 | 50.875 | 0.766529 | 0.017199 | 0 | 0 | 0 | 0 | 0.084307 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.15 | 0.05 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5092d513fdadf913d9d88d4f071d619f95265b9e | 728 | py | Python | recodex/plugins/codex/plugin_config.py | ReCodEx/cli | 8bc92c3bac2dc823a1c5f1a99ff2e252cc8578e6 | [
"MIT"
] | 4 | 2018-03-13T22:34:08.000Z | 2021-02-02T03:43:22.000Z | recodex/plugins/codex/plugin_config.py | ReCodEx/cli | 8bc92c3bac2dc823a1c5f1a99ff2e252cc8578e6 | [
"MIT"
] | 1 | 2019-03-20T16:23:55.000Z | 2019-03-20T21:15:18.000Z | recodex/plugins/codex/plugin_config.py | ReCodEx/cli | 8bc92c3bac2dc823a1c5f1a99ff2e252cc8578e6 | [
"MIT"
] | null | null | null | from ruamel import yaml
from pathlib import Path
from typing import NamedTuple, Dict
class Config(NamedTuple):
locale: str = "cs"
extension_to_runtime: Dict[str, str] = {
"cs": "mono",
"c": "c-gcc-linux",
"pas": "freepascal-linux",
"java": "java",
"cpp": "cxx-gcc-linux",
"py": "python3"
}
judges: Dict[str, str] = {
"bin/codex_judge": "recodex-judge-normal",
"bin/codex_shufflejudge": "recodex-judge-shuffle",
"diff": "diff"
}
@classmethod
def load(cls, config_path: Path):
if not config_path.exists():
return cls()
config = yaml.safe_load(config_path.open("r"))
return cls(**config)
| 22.75 | 58 | 0.56456 | 85 | 728 | 4.741176 | 0.564706 | 0.066998 | 0.049628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001919 | 0.284341 | 728 | 31 | 59 | 23.483871 | 0.771593 | 0 | 0 | 0 | 0 | 0 | 0.218407 | 0.059066 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50931c271ee761dee43a0fdded41fed4984686ee | 938 | py | Python | bots/twi/main.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 13 | 2018-12-17T23:30:54.000Z | 2021-12-29T14:31:43.000Z | bots/twi/main.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 36 | 2018-06-07T21:34:13.000Z | 2022-03-13T21:01:43.000Z | bots/twi/main.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 2 | 2021-01-03T11:47:20.000Z | 2021-12-29T14:31:49.000Z | import tweepy
consumer_key = 'vveAVFha4hTcjUStnOf0hwEwQ'
consumer_secret = 'F8tFzORTE8DzAAYnz5hHxCBRAClPWf4ABhuGn03GHZ5w2QJtbP'
access_token = '4100776272-b1HK52akcdNp4kpWwfHsg8hFnWzbIlcAoKSstqq'
access_token_secret = 'CyRTkHMRrccPpTWRzc8LJM5nPHkp77G7w4djeUsOsEtzJ'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# print([i.list_members for i in api.me().lists()][0])
# Список пользователей в моих списках
alls = set()
for lis in ('list', 'list1'):
for member in tweepy.Cursor(api.list_members, 'kosyachniy', lis).items():
alls.add((member.id, member.screen_name))
login_all = [i[1] for i in alls]
print(login_all)
# Список подписок
count = 0
for i in tweepy.Cursor(api.friends, id='kosyachniy').items():
if i.screen_name not in login_all:
# Отписка
api.destroy_friendship(i.screen_name)
count += 1
if count == 50:
break | 24.684211 | 74 | 0.768657 | 123 | 938 | 5.691057 | 0.439024 | 0.078571 | 0.025714 | 0.062857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046005 | 0.119403 | 938 | 38 | 75 | 24.684211 | 0.801453 | 0.119403 | 0 | 0 | 0 | 0 | 0.242092 | 0.206813 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50950b04723a596307b1faf37e24df84c204328c | 3,976 | py | Python | toontown/hood/SellbotHQAI.py | MasterLoopyBM/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 1 | 2020-02-07T18:15:12.000Z | 2020-02-07T18:15:12.000Z | toontown/hood/SellbotHQAI.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | null | null | null | toontown/hood/SellbotHQAI.py | TrueBlueDogemon/Toontown | ebed7fc3f2ef06a529cf02eda7ab46361aceef9d | [
"MIT"
] | 2 | 2020-11-08T03:38:35.000Z | 2021-09-02T07:03:47.000Z | from toontown.building import DistributedVPElevatorAI
from toontown.building import DistributedBrutalVPElevatorAI
from toontown.building import FADoorCodes
from toontown.building.DistributedBoardingPartyAI import DistributedBoardingPartyAI
from toontown.coghq.DistributedFactoryElevatorExtAI import DistributedFactoryElevatorExtAI
from toontown.hood import CogHQAI
from toontown.suit import DistributedSellbotBossAI
from toontown.suit import DistributedBrutalSellbotBossAI
from toontown.suit import DistributedSuitPlannerAI
from toontown.toonbase import ToontownGlobals
class SellbotHQAI(CogHQAI.CogHQAI):
def __init__(self, air):
CogHQAI.CogHQAI.__init__(
self, air, ToontownGlobals.SellbotHQ, ToontownGlobals.SellbotLobby,
FADoorCodes.SB_DISGUISE_INCOMPLETE,
DistributedVPElevatorAI.DistributedVPElevatorAI,
DistributedSellbotBossAI.DistributedSellbotBossAI,
DistributedBrutalVPElevatorAI.DistributedBrutalVPElevatorAI,
DistributedBrutalSellbotBossAI.DistributedBrutalSellbotBossAI)
self.factoryElevators = []
self.factoryBoardingParty = None
self.suitPlanners = []
self.startup()
def startup(self):
CogHQAI.CogHQAI.startup(self)
# Sellbot HQ has not just one, but four lobby doors:
self.cogHQDoors = [self.extDoor]
for i in xrange(3): # CogHQAI already created one of the doors for us.
extDoor = self.makeCogHQDoor(self.lobbyZoneId, 0, i + 1, self.lobbyFADoorCode)
self.cogHQDoors.append(extDoor)
self.createFactoryElevators()
if simbase.config.GetBool('want-boarding-groups', True):
self.createFactoryBoardingParty()
if simbase.config.GetBool('want-suit-planners', True):
self.createSuitPlanners()
# Our suit planner needs the Cog HQ doors as well:
for sp in self.suitPlanners:
if sp.zoneId == self.zoneId:
sp.cogHQDoors = self.cogHQDoors
def createFactoryElevators(self):
# We only have two factory elevators: the front, and side elevators.
for i in xrange(2):
factoryElevator = DistributedFactoryElevatorExtAI(
self.air, self.air.factoryMgr, ToontownGlobals.SellbotFactoryInt, i)
factoryElevator.generateWithRequired(ToontownGlobals.SellbotFactoryExt)
self.factoryElevators.append(factoryElevator)
if simbase.config.GetBool('want-brutal-factory', True):
factoryElevator = DistributedFactoryElevatorExtAI(
self.air, self.air.factoryMgr, ToontownGlobals.SellbotBrutalFactoryInt, 2)
factoryElevator.generateWithRequired(ToontownGlobals.SellbotFactoryExt)
self.factoryElevators.append(factoryElevator)
def createFactoryBoardingParty(self):
factoryIdList = []
for factoryElevator in self.factoryElevators:
factoryIdList.append(factoryElevator.doId)
self.factoryBoardingParty = DistributedBoardingPartyAI(self.air, factoryIdList, 4)
self.factoryBoardingParty.generateWithRequired(ToontownGlobals.SellbotFactoryExt)
def createSuitPlanners(self):
suitPlanner = DistributedSuitPlannerAI.DistributedSuitPlannerAI(self.air, self.zoneId)
suitPlanner.generateWithRequired(self.zoneId)
suitPlanner.d_setZoneId(self.zoneId)
suitPlanner.initTasks()
self.suitPlanners.append(suitPlanner)
self.air.suitPlanners[self.zoneId] = suitPlanner
suitPlanner = DistributedSuitPlannerAI.DistributedSuitPlannerAI(self.air, ToontownGlobals.SellbotFactoryExt)
suitPlanner.generateWithRequired(ToontownGlobals.SellbotFactoryExt)
suitPlanner.d_setZoneId(ToontownGlobals.SellbotFactoryExt)
suitPlanner.initTasks()
self.suitPlanners.append(suitPlanner)
self.air.suitPlanners[ToontownGlobals.SellbotFactoryExt] = suitPlanner
| 47.903614 | 116 | 0.732897 | 332 | 3,976 | 8.740964 | 0.319277 | 0.026533 | 0.027567 | 0.026878 | 0.254997 | 0.182633 | 0.182633 | 0.182633 | 0.049621 | 0 | 0 | 0.001887 | 0.200201 | 3,976 | 82 | 117 | 48.487805 | 0.910692 | 0.054074 | 0 | 0.149254 | 0 | 0 | 0.015176 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074627 | false | 0 | 0.149254 | 0 | 0.238806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5096eee54a2cdae0d7a96418a52f3656a31d5b6f | 4,357 | py | Python | brickftp/client.py | Usio-Energy/BrickFTP | 24a563e899f14120d72b68903ed8215ba865a145 | [
"Apache-2.0"
] | 2 | 2019-03-18T13:45:56.000Z | 2020-12-10T16:02:41.000Z | brickftp/client.py | Usio-Energy/BrickFTP | 24a563e899f14120d72b68903ed8215ba865a145 | [
"Apache-2.0"
] | 2 | 2020-08-13T23:11:14.000Z | 2021-06-24T22:49:30.000Z | brickftp/client.py | octoenergy/BrickFTP | 24a563e899f14120d72b68903ed8215ba865a145 | [
"Apache-2.0"
] | 5 | 2019-05-02T14:59:18.000Z | 2021-11-05T08:40:29.000Z | import logging
from pathlib import Path
from tempfile import NamedTemporaryFile
from urllib.parse import urljoin
from codecs import open
from json.decoder import JSONDecodeError
from requests.exceptions import RequestException
import requests
logger = logging.getLogger(__name__)
class BrickFTPError(Exception):
pass
class BrickFTP:
def __init__(self, *, username, password, subdomain):
self._username = username
self._password = password
self._subdomain = subdomain
self._session_id = None
self._logged_in = False
def _login(self):
start_session_resp = self._post(
'/api/rest/v1/sessions.json',
json={'username': self._username, 'password': self._password}
)
self._session_id = start_session_resp['id']
self._logged_in = True
def _path(self, path):
return str(path).lstrip('/')
def dir(self, remote_path):
if not self._logged_in:
self._login()
return self._get(f'/api/rest/v1/folders/{self._path(remote_path)}')
def mkdir(self, remote_path):
if not self._logged_in:
self._login()
return self._post(f'/api/rest/v1/folders/{self._path(remote_path)}')
def upload(self, *, upload_path, local_path, encoding='utf-8'):
# NOTE: can currently only upload upto 5MB size files
# https://developers.brickftp.com/#requesting-additional-upload-urls
if not self._logged_in:
self._login()
upload_control_url = f'/api/rest/v1/files/{self._path(upload_path)}'
# Start upload
start_upload_resp_json = self._post(
upload_control_url,
json={'action': 'put'},
)
# Upload parts
ref = start_upload_resp_json['ref']
upload_uri = start_upload_resp_json['upload_uri']
with open(local_path, encoding=encoding) as input_file:
resp = requests.put(upload_uri, data=input_file.read())
if not resp.ok:
raise BrickFTPError(
f'Failed to upload part. Resp: {resp.text}'
)
# End upload
self._post(upload_control_url, json={'action': 'end', 'ref': ref})
def download_file(self, *, remote_path, local_path=None):
if not self._logged_in:
self._login()
if local_path is None:
remote_path = Path(remote_path)
local_path = NamedTemporaryFile(
delete=False,
prefix=f'{remote_path.stem}_',
suffix=remote_path.suffix,
).name
dl_info = self._get(f'/api/rest/v1/files/{self._path(remote_path)}')
resp = requests.get(dl_info['download_uri'])
resp.raise_for_status()
file_bytes = resp.content
with open(local_path, 'wb') as file_:
file_.write(file_bytes)
return local_path
def delete(self, remote_path):
if not self._logged_in:
self._login()
self._delete(
f'/api/rest/v1/files/{self._path(remote_path)}',
headers={'Depth': 'infinity'}
)
def _post(self, path, **kwargs):
return self._request(path=path, method='post', **kwargs)
def _get(self, path):
return self._request(path=path, method='get')
def _delete(self, path, **kwargs):
return self._request(path=path, method='delete', **kwargs)
def _request(self, *, path, method, **kwargs):
url = urljoin(f'https://{self._subdomain}.brickftp.com/', path)
try:
resp = getattr(requests, method)(
url, **{**self._default_request_kwargs, **kwargs}
)
except RequestException as exc:
raise BrickFTPError(exc) from exc
try:
resp_json = resp.json()
except JSONDecodeError:
raise BrickFTPError(f'Non-valid JSON response: {resp.text}')
if not resp.ok:
error = resp_json['error']
raise BrickFTPError(error)
return resp_json
@property
def _default_request_kwargs(self):
if self._logged_in:
return {'cookies': {'BrickAPI': self._session_id}}
elif not self._logged_in and self._session_id:
return {'auth': (self._session_id, 'x')}
else:
return {}
| 33.259542 | 76 | 0.602479 | 515 | 4,357 | 4.838835 | 0.260194 | 0.048154 | 0.043339 | 0.036116 | 0.223114 | 0.223114 | 0.206661 | 0.149278 | 0.149278 | 0.086677 | 0 | 0.002567 | 0.284829 | 4,357 | 130 | 77 | 33.515385 | 0.797176 | 0.035345 | 0 | 0.132075 | 0 | 0 | 0.120829 | 0.059581 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122642 | false | 0.037736 | 0.075472 | 0.037736 | 0.320755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5097622beacec4aaead3994497e8bd85b367e308 | 21,052 | py | Python | eventsourcing/system.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/system.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/system.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
from collections import defaultdict
from threading import Event, Lock, Thread
from typing import Dict, Iterable, Iterator, List, Set, Tuple, Type, TypeVar
from eventsourcing.application import Application, NotificationLog, Section
from eventsourcing.domain import Aggregate, AggregateEvent
from eventsourcing.persistence import (
Mapper,
Notification,
ProcessRecorder,
Tracking,
)
from eventsourcing.utils import get_topic, resolve_topic
class ProcessEvent:
"""
Keeps together a :class:`~eventsourcing.persistence.Tracking`
object, which represents the position of a domain event notification
in the notification log of a particular application, and the
new domain events that result from processing that notification.
"""
def __init__(self, tracking: Tracking):
"""
Initalises the process event with the given tracking object.
"""
self.tracking = tracking
self.events: List[AggregateEvent] = []
def save(self, *aggregates: Aggregate) -> None:
"""
Collects pending domain events from the given aggregate.
"""
for aggregate in aggregates:
self.events += aggregate.collect_events()
class Follower(Application):
"""
Extends the :class:`~eventsourcing.application.Application` class
by using a process recorder as its application recorder, by keeping
track of the applications it is following, and pulling and processing
new domain event notifications through its :func:`policy` method.
"""
def __init__(self) -> None:
super().__init__()
self.readers: Dict[
str,
Tuple[
NotificationLogReader,
Mapper[AggregateEvent],
],
] = {}
self.recorder: ProcessRecorder
def construct_recorder(self) -> ProcessRecorder:
"""
Constructs and returns a :class:`~eventsourcing.persistence.ProcessRecorder`
for the application to use as its application recorder.
"""
return self.factory.process_recorder()
def follow(self, name: str, log: NotificationLog) -> None:
"""
Constructs a notification log reader and a mapper for
the named application, and adds them to its collection
of readers.
"""
assert isinstance(self.recorder, ProcessRecorder)
reader = NotificationLogReader(log)
mapper = self.construct_mapper(name)
self.readers[name] = (reader, mapper)
def pull_and_process(self, name: str) -> None:
"""
Pulls and processes unseen domain event notifications
from the notification log reader of the names application.
Converts received event notifications to domain
event objects, and then calls the :func:`policy`
with a new :class:`ProcessEvent` object which
contains a :class:`~eventsourcing.persistence.Tracking`
object that keeps track of the name of the application
and the position in its notification log from which the
domain event notification was pulled. The policy will
save aggregates to the process event object, using its
:func:`~ProcessEvent.save` method, which collects pending
domain events using the aggregates'
:func:`~eventsourcing.domain.Aggregate.collect_events`
method, and the process event object will then be recorded
by calling the :func:`record` method.
"""
reader, mapper = self.readers[name]
start = self.recorder.max_tracking_id(name) + 1
for notification in reader.read(start=start):
domain_event = mapper.to_domain_event(notification)
process_event = ProcessEvent(
Tracking(
application_name=name,
notification_id=notification.id,
)
)
self.policy(
domain_event,
process_event,
)
self.record(process_event)
@abstractmethod
def policy(
self,
domain_event: AggregateEvent,
process_event: ProcessEvent,
) -> None:
"""
Abstract domain event processing policy method. Must be
implemented by event processing applications. When
processing the given domain event, event processing
applications must use the :func:`~ProcessEvent.save`
method of the given process event object (instead of
the application's :func:`~eventsourcing.application.Application.save`
method) to collect pending events from changed aggregates,
so that the new domain events will be recorded atomically
with tracking information about the position of the given
domain event's notification.
"""
def record(self, process_event: ProcessEvent) -> None:
"""
Records given process event in the application's process recorder.
"""
self.events.put(
**process_event.__dict__,
)
self.notify(process_event.events)
class Promptable(ABC):
"""
Abstract base class for "promptable" objects.
"""
@abstractmethod
def receive_prompt(self, leader_name: str) -> None:
"""
Receives the name of leader that has new domain
event notifications.
"""
class Leader(Application):
"""
Extends the :class:`~eventsourcing.application.Application`
class by also being responsible for keeping track of
followers, and prompting followers when there are new
domain event notifications to be pulled and processed.
"""
def __init__(self) -> None:
super().__init__()
self.followers: List[Promptable] = []
def lead(self, follower: Promptable) -> None:
"""
Adds given follower to a list of followers.
"""
self.followers.append(follower)
def notify(self, new_events: List[AggregateEvent]) -> None:
"""
Extends the application :func:`~eventsourcing.application.Application.notify`
method by calling :func:`prompt_followers` whenever new events have just
been saved.
"""
super().notify(new_events)
if len(new_events):
self.prompt_followers()
def prompt_followers(self) -> None:
"""
Prompts followers by calling their :func:`~Promptable.receive_prompt`
methods with the name of the application.
"""
name = self.__class__.__name__
for follower in self.followers:
follower.receive_prompt(name)
class ProcessApplication(Leader, Follower, ABC):
"""
Base class for event processing applications
that are both "leaders" and followers".
"""
class System:
"""
Defines a system of applications.
"""
def __init__(
self,
pipes: Iterable[Iterable[Type[Application]]],
):
nodes: Dict[str, Type[Application]] = {}
edges: Set[Tuple[str, str]] = set()
# Build nodes and edges.
for pipe in pipes:
follower_cls = None
for cls in pipe:
nodes[cls.__name__] = cls
if follower_cls is None:
follower_cls = cls
else:
leader_cls = follower_cls
follower_cls = cls
edges.add(
(
leader_cls.__name__,
follower_cls.__name__,
)
)
self.edges = list(edges)
self.nodes: Dict[str, str] = {}
for name in nodes:
topic = get_topic(nodes[name])
self.nodes[name] = topic
# Identify leaders and followers.
self.follows: Dict[str, List[str]] = defaultdict(list)
self.leads: Dict[str, List[str]] = defaultdict(list)
for edge in edges:
self.leads[edge[0]].append(edge[1])
self.follows[edge[1]].append(edge[0])
# Check followers are followers.
for name in self.follows:
if not issubclass(nodes[name], Follower):
raise TypeError("Not a follower class: %s" % nodes[name])
# Check each process is a process application class.
for name in self.processors:
if not issubclass(nodes[name], ProcessApplication):
raise TypeError("Not a process application class: %s" % nodes[name])
@property
def leaders(self) -> Iterable[str]:
return self.leads.keys()
@property
def leaders_only(self) -> Iterable[str]:
for name in self.leads.keys():
if name not in self.follows:
yield name
@property
def followers(self) -> Iterable[str]:
return self.follows.keys()
@property
def processors(self) -> Iterable[str]:
return set(self.leaders).intersection(self.followers)
def get_app_cls(self, name: str) -> Type[Application]:
cls = resolve_topic(self.nodes[name])
assert issubclass(cls, Application)
return cls
def leader_cls(self, name: str) -> Type[Leader]:
cls = self.get_app_cls(name)
if issubclass(cls, Leader):
return cls
else:
cls = type(
cls.__name__,
(Leader, cls),
{},
)
assert issubclass(cls, Leader)
return cls
def follower_cls(self, name: str) -> Type[Follower]:
cls = self.get_app_cls(name)
assert issubclass(cls, Follower)
return cls
A = TypeVar("A")
class Runner(ABC):
"""
Abstract base class for system runners.
"""
def __init__(self, system: System):
self.system = system
self.is_started = False
@abstractmethod
def start(self) -> None:
"""
Starts the runner.
"""
if self.is_started:
raise RunnerAlreadyStarted()
self.is_started = True
@abstractmethod
def stop(self) -> None:
"""
Stops the runner.
"""
@abstractmethod
def get(self, cls: Type[A]) -> A:
"""
Returns an application instance for given application class.
"""
class RunnerAlreadyStarted(Exception):
"""
Raised when runner is already started.
"""
class SingleThreadedRunner(Runner, Promptable):
"""
Runs a :class:`System` in a single thread.
A single threaded runner is a runner, and so implements the
:func:`start`, :func:`stop`, and :func:`get` methods.
A single threaded runner is also a :class:`Promptable` object, and
implements the :func:`receive_prompt` method by collecting prompted
names.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.prompts_received: List[str] = []
self.is_prompting = False
def start(self) -> None:
"""
Starts the runner.
The applications are constructed, and setup to lead and follow
each other, according to the system definition.
The followers are setup to follow the applications they follow
(have a notification log reader with the notification log of the
leader), and their leaders are setup to lead the runner itself
(send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
self.apps[name] = self.system.follower_cls(name)()
# Construct leaders.
for name in self.system.leaders_only:
self.apps[name] = self.system.leader_cls(name)()
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
leader.lead(self)
follower.follow(leader.__class__.__name__, leader.log)
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
Unless this method has previously been called but not
yet returned, it will then proceed to forward the prompts
received to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
if leader_name not in self.prompts_received:
self.prompts_received.append(leader_name)
if not self.is_prompting:
self.is_prompting = True
while self.prompts_received:
prompt = self.prompts_received.pop(0)
for name in self.system.leads[prompt]:
follower = self.apps[name]
assert isinstance(follower, Follower)
follower.pull_and_process(prompt)
self.is_prompting = False
def stop(self) -> None:
self.apps.clear()
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunner(Runner):
"""
Runs a :class:`System` with a :class:`MultiThreadedRunnerThread` for each
follower in the system definition.
It is a runner, and so implements the :func:`start`, :func:`stop`,
and :func:`get` methods.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.threads: Dict[str, MultiThreadedRunnerThread] = {}
self.is_stopping = Event()
def start(self) -> None:
"""
Starts the runner.
A multi-threaded runner thread is started for each
'follower' application in the system, and constructs
an instance of each non-follower leader application in
the system. The followers are then setup to follow the
applications they follow (have a notification log reader
with the notification log of the leader), and their leaders
are setup to lead the follower's thead (send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
app_class = self.system.follower_cls(name)
thread = MultiThreadedRunnerThread(
app_class=app_class,
is_stopping=self.is_stopping,
)
self.threads[name] = thread
thread.start()
if (not thread.is_running.wait(timeout=5)) or thread.has_stopped.is_set():
self.stop()
raise Exception(f"Thread for '{app_class.__name__}' failed to start")
self.apps[name] = thread.app
# Construct non-follower leaders.
for name in self.system.leaders_only:
app = self.system.leader_cls(name)()
self.apps[name] = app
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
follower.follow(leader.__class__.__name__, leader.log)
thread = self.threads[edge[1]]
leader.lead(thread)
def stop(self) -> None:
self.is_stopping.set()
for thread in self.threads.values():
thread.is_prompted.set()
thread.join()
@property
def has_stopped(self) -> bool:
return all([t.has_stopped.is_set() for t in self.threads.values()])
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunnerThread(Promptable, Thread):
"""
Runs one process application for a
:class:`~eventsourcing.system.MultiThreadedRunner`.
A multi-threaded runner thread is a :class:`~eventsourcing.system.Promptable`
object, and implements the :func:`receive_prompt` method by collecting
prompted names and setting its threading event 'is_prompted'.
A multi-threaded runner thread is a Python :class:`threading.Thread` object,
and implements the thread's :func:`run` method by waiting until the
'is_prompted' event has been set and then calling its process application's
:func:`~eventsourcing.system.Follower.pull_and_process`
method once for each prompted name. It is expected that
the process application will have been set up by the runner
with a notification log reader from which event notifications
will be pulled.
"""
def __init__(
self,
app_class: Type[Follower],
is_stopping: Event,
):
super().__init__()
self.app_class = app_class
self.is_stopping = is_stopping
self.has_stopped = Event()
self.has_errored = Event()
self.is_prompted = Event()
self.prompted_names: List[str] = []
self.prompted_names_lock = Lock()
self.setDaemon(True)
self.is_running = Event()
def run(self) -> None:
"""
Begins by constructing an application instance from
given application class and then loops forever until
stopped. The loop blocks on waiting for the 'is_prompted'
event to be set, then forwards the prompts already received
to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
try:
self.app: Follower = self.app_class()
except Exception:
self.has_errored.set()
self.has_stopped.set()
raise
finally:
self.is_running.set() # pragma: no cover
# -----------------------^ weird branch coverage thing with Python 3.9
try:
while True:
self.is_prompted.wait()
if self.is_stopping.is_set():
self.has_stopped.set()
break
with self.prompted_names_lock:
prompted_names = self.prompted_names
self.prompted_names = []
self.is_prompted.clear()
for name in prompted_names:
self.app.pull_and_process(name)
except Exception:
self.has_errored.set()
self.has_stopped.set()
self.is_stopping.is_set()
raise
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
"""
with self.prompted_names_lock:
if leader_name not in self.prompted_names:
self.prompted_names.append(leader_name)
self.is_prompted.set()
class NotificationLogReader:
"""
Reads domain event notifications from a notification log.
"""
DEFAULT_SECTION_SIZE = 10
def __init__(
self,
notification_log: NotificationLog,
section_size: int = DEFAULT_SECTION_SIZE,
):
"""
Initialises a reader with the given notification log,
and optionally a section size integer which determines
the requested number of domain event notifications in
each section retrieved from the notification log.
"""
self.notification_log = notification_log
self.section_size = section_size
def read(self, *, start: int) -> Iterator[Notification]:
"""
Returns a generator that yields event notifications
from the reader's notification log, starting from
given start position (a notification ID).
This method traverses the linked list of sections presented by
a notification log, and yields the individual event notifications
that are contained in each section. When all the event notifications
from a section have been yielded, the reader will retrieve the next
section, and continue yielding event notification until all subsequent
event notifications in the notification log from the start position
have been yielded.
"""
section_id = "{},{}".format(start, start + self.section_size - 1)
while True:
section: Section = self.notification_log[section_id]
for item in section.items:
# Todo: Reintroduce if supporting
# sections with regular alignment?
# if item.id < start:
# continue
yield item
if section.next_id is None:
break
else:
section_id = section.next_id
| 34.119935 | 86 | 0.610203 | 2,368 | 21,052 | 5.308277 | 0.140625 | 0.022673 | 0.00716 | 0.008274 | 0.275975 | 0.214002 | 0.187192 | 0.163166 | 0.157279 | 0.14288 | 0 | 0.001165 | 0.306859 | 21,052 | 616 | 87 | 34.175325 | 0.860266 | 0.34942 | 0 | 0.313291 | 0 | 0 | 0.009251 | 0.001785 | 0 | 0 | 0 | 0.001623 | 0.03481 | 1 | 0.126582 | false | 0 | 0.025316 | 0.012658 | 0.227848 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5099778037aa6f34d68606304b1e7bdd701654a2 | 3,103 | py | Python | project/app_quan_ly_nhap_hang.py | leoodz/do_an_m1 | e49d7bccf1bc6299a5a03385af28c4ab9d7fd0ea | [
"MIT"
] | 1 | 2021-09-02T14:16:40.000Z | 2021-09-02T14:16:40.000Z | project/app_quan_ly_nhap_hang.py | leoodz/do_an_m1 | e49d7bccf1bc6299a5a03385af28c4ab9d7fd0ea | [
"MIT"
] | null | null | null | project/app_quan_ly_nhap_hang.py | leoodz/do_an_m1 | e49d7bccf1bc6299a5a03385af28c4ab9d7fd0ea | [
"MIT"
] | null | null | null | from project import app
from flask import render_template, request, Markup, url_for, redirect , session
from datetime import datetime
from project.xu_ly.xuLy_quan_ly_nhap_hang import *
@app.route('/quan-ly-nhap-hang/dang-nhap',methods = ['GET','POST'])
def qlnh_dn():
tendn = ''
ketqua =''
if request.method == 'POST':
result = request.form
tendn = result.get('TH_tenDN')
mat_khau = result.get('TH_matkhau')
nhanvien = Thong_tin_nhan_vien(tendn,mat_khau)
if nhanvien == None:
ketqua = "Dang nhap khong thanh cong"
else:
session['quan_ly_nhap_hang'] = nhanvien
return redirect('/quan-ly-nhap-hang')
return render_template("quan_ly_nhap_hang/dang_nhap.html",tendn = tendn,ketqua = ketqua)
@app.route('/quan-ly-nhap-hang',methods = ['GET','POST'])
def qlnh():
if session.get('quan_ly_nhap_hang') == None:
return redirect('/quan-ly-nhap-hang/dang-nhap')
chuoi_tim =''
danh_sach_Tivi = Doc_danh_sach_Tivi()
if request.method == 'POST':
result = request.form
chuoi_tim = result.get('TH_gt_tim')
danh_sach_Tivi = Tra_cuu_Tivi(chuoi_tim,danh_sach_Tivi)
return render_template("quan_ly_nhap_hang/index.html",danh_sach_tivi = danh_sach_Tivi,chuoi_tim = chuoi_tim)
@app.route('/quan-ly-nhap-hang/cap-nhat/<string:Ma_so>/',methods = ['GET','POST'])
def qlnh_nh(Ma_so):
if session.get('quan_ly_nhap_hang') == None:
return redirect('/quan-ly-nhap-hang/dang-nhap')
thong_bao = ''
sl = 0
tivi = Thong_tin_tivi(Ma_so)
phieuban = ''
dangxuat = ''
if request.method == 'GET':
result = request.form
phieuban = request.values.get('TH_Phieu_Ban')
dangxuat = request.values.get('TH_Dang_Xuat')
if "phieuban" == phieuban:
return redirect('/nhan-vien-ban-hang/thong-ke-phieu-ban')
if 'dangxuat' == dangxuat:
session.pop('quan_ly_nhap_hang')
return redirect('/quan-ly-nhap-hang/dang-nhap')
if tivi == None:
thong_bao = Ma_so +' khong ton tai'
elif request.method == 'POST':
result = request.form
sl = result.get('TH_GiaMoi')
if int(sl) <= 0:
thong_bao ='So luong ko hop le'
else:
tivi['Don_gia_Nhap'] = sl
Danh_sach_slt = Dem_LT_toan_bo_nhom()
session['Thong_tin_ton'] = Danh_sach_slt
Ghi_Tivi(tivi)
return redirect('/nhan-vien-ban-hang/thong-ke-so-luong-ton')
return render_template("quan_ly_nhap_hang/ban_hang.html",tivi = tivi,thong_bao = thong_bao,sl = sl,phieuban=phieuban, dangxuat=dangxuat)
@app.route('/nhan-vien-ban-hang/thong-ke-so-luong-ton',methods = ['GET','POST'])
def qlnh_tkt():
if session.get('quan_ly_nhap_hang') == None:
return redirect('/nhan-vien-ban-hang/dang-nhap')
Ngay = datetime.now()
Nhom = session.get('Thong_tin_ton')
return render_template("quan_ly_nhap_hang/phieu_slt.html",Nhom = Nhom) | 39.278481 | 141 | 0.624557 | 429 | 3,103 | 4.275058 | 0.228438 | 0.055616 | 0.092694 | 0.129771 | 0.454744 | 0.379498 | 0.292803 | 0.216467 | 0.130862 | 0.095965 | 0 | 0.000847 | 0.239446 | 3,103 | 79 | 142 | 39.278481 | 0.776271 | 0 | 0 | 0.202899 | 0 | 0 | 0.252148 | 0.14111 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.057971 | 0 | 0.275362 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a005f6036c980a83989a65c38573145af82123 | 5,550 | py | Python | project_manager/omr_planner.py | MathiasMeuleman/crowd_task_manager | 3794da628ad3c55742cce6870874df258c5323f2 | [
"Apache-2.0"
] | null | null | null | project_manager/omr_planner.py | MathiasMeuleman/crowd_task_manager | 3794da628ad3c55742cce6870874df258c5323f2 | [
"Apache-2.0"
] | null | null | null | project_manager/omr_planner.py | MathiasMeuleman/crowd_task_manager | 3794da628ad3c55742cce6870874df258c5323f2 | [
"Apache-2.0"
] | null | null | null | import pika
import yaml
from datetime import datetime
import json
import sys
sys.path.append("..")
import common.settings as settings
import common.file_system_manager as fsm
from pymongo import MongoClient
from bson.objectid import ObjectId
with open("../settings.yaml", "r") as file:
config = yaml.safe_load(file.read())
rabbitmq_address = config['rabbitmq_address']
address = rabbitmq_address.split(":")
client = MongoClient(settings.mongo_address[0], int(settings.mongo_address[1]))
db = client.trompa_test
def read_message(queue_name):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=address[0],
port=address[1]))
channel = connection.channel()
msg = ''
method_frame, header_frame, body = channel.basic_get(queue_name)
if method_frame:
msg = json.loads(body.decode("utf-8"))
channel.basic_ack(method_frame.delivery_tag)
channel.close()
connection.close()
return msg
def send_message(queue_name, routing_key, message):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=address[0],
port=address[1]))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
channel.basic_publish(exchange='', routing_key=routing_key, body=message)
connection.close()
def check_for_omr_project(queue_name):
message = read_message(queue_name)
return message
def call_module(module_name, score_name, score_id):
queue_name = module_name + '_queue'
routing_key = queue_name
message = {'score_name': score_name, '_id': score_id}
send_message(queue_name, routing_key, json.dumps(message))
def main():
try:
print(datetime.now(), 'omr_planner started!')
while True:
score = read_message('omr_planner_queue')
if len(score) > 0:
print(
datetime.now(),
'new score: ',
score['score_name'],
'sending to measure_detector')
if score['_id'] is not None:
call_module(
'measure_detector',
score['score_name'],
score['_id'])
score_status = read_message('status_queue')
if len(score_status) > 0:
if score_status['module'] == 'measure_detector':
print(
datetime.now(),
'sending ',
score_status['name'], 'to slicer')
print(
datetime.now(),
'sending ',
score_status['name'], 'to github_init')
send_message(
'slicer_queue',
'slicer_queue',
json.dumps({
'_id': score_status['_id'],
'name': score_status['name']}))
send_message(
'github_init_queue',
'github_init_queue',
json.dumps({
'_id': score_status['_id'],
'name': score_status['name']}))
continue
if score_status['module'] == 'slicer':
print(
datetime.now(),
'sending ',
score_status['name'], 'task_scheduler')
send_message(
'task_scheduler_queue',
'task_scheduler_queue',
json.dumps({
'_id': score_status['_id'],
'name': score_status['name'],
'action': 'verify_measures'}))
continue
if score_status['module'] == 'aggregator':
# send message to score_rebuilder_queue
print(
datetime.now(),
'sending ',
score_status['name'], 'to score_rebuilder')
send_message(
'score_rebuilder_queue',
'score_rebuilder_queue',
json.dumps({
'task_id': score_status['_id'],
'name': score_status['name']}))
continue
if score_status['module'] == 'github_init':
# communicate to ce that github repo has been initiated
continue
if score_status['module'] == 'github_update':
# communicate with ce that github repo has been updated
continue
if score_status['module'] == 'score_rebuilder':
# send message to github update?
print(
datetime.now(),
'sending ',
score_status['name'], 'to github_update')
send_message(
'github_queue',
'github_queue',
json.dumps({
'task_id': score_status['task_id'],
'name': score_status['name']}))
continue
except KeyboardInterrupt:
print('INTERRUPTED!')
if __name__ == "__main__":
main()
| 36.513158 | 79 | 0.487748 | 491 | 5,550 | 5.238289 | 0.246436 | 0.098367 | 0.05832 | 0.044323 | 0.365086 | 0.34409 | 0.276439 | 0.257387 | 0.206843 | 0.171073 | 0 | 0.002767 | 0.413874 | 5,550 | 151 | 80 | 36.754967 | 0.787888 | 0.031712 | 0 | 0.484848 | 0 | 0 | 0.132986 | 0.007823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037879 | false | 0 | 0.068182 | 0 | 0.121212 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a0ec530390605bfb7a99495ecd98d96ad7c319 | 2,614 | py | Python | models/Nets.py | SohamMazumder/Federated_Segmentation | d4eb681441003ba20f8b251a42a811c8c436f04e | [
"MIT"
] | 1 | 2020-02-17T14:27:09.000Z | 2020-02-17T14:27:09.000Z | models/Nets.py | SohamMazumder/Federated_Segmentation | d4eb681441003ba20f8b251a42a811c8c436f04e | [
"MIT"
] | null | null | null | models/Nets.py | SohamMazumder/Federated_Segmentation | d4eb681441003ba20f8b251a42a811c8c436f04e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from nn_common_modules import modules as sm
from squeeze_and_excitation import squeeze_and_excitation as se
class ResnetDermo(nn.Module):
def __init__(self, args):
super(ResnetDermo, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained = True)
self.resnet.fc = nn.Linear(512, args.num_classes)
def run(self):
return self.resnet
class QuickNat(nn.Module):
"""
A PyTorch implementation of QuickNAT
"""
def __init__(self, params):
"""
:param params: {'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28
'se_block': False,
'drop_out':0.2}
"""
super(QuickNat, self).__init__()
self.encode1 = sm.EncoderBlock(params, se_block_type=se.SELayer.CSSE)
params['num_channels'] = 64
self.encode2 = sm.EncoderBlock(params, se_block_type=se.SELayer.CSSE)
self.encode3 = sm.EncoderBlock(params, se_block_type=se.SELayer.CSSE)
self.encode4 = sm.EncoderBlock(params, se_block_type=se.SELayer.CSSE)
self.bottleneck = sm.DenseBlock(params, se_block_type=se.SELayer.CSSE)
params['num_channels'] = 128
self.decode1 = sm.DecoderBlock(params, se_block_type=se.SELayer.CSSE)
self.decode2 = sm.DecoderBlock(params, se_block_type=se.SELayer.CSSE)
self.decode3 = sm.DecoderBlock(params, se_block_type=se.SELayer.CSSE)
self.decode4 = sm.DecoderBlock(params, se_block_type=se.SELayer.CSSE)
params['num_channels'] = 64
self.classifier = sm.ClassifierBlock(params)
def forward(self, input):
"""
:param input: X
:return: probabiliy map
"""
e1, out1, ind1 = self.encode1.forward(input)
e2, out2, ind2 = self.encode2.forward(e1)
e3, out3, ind3 = self.encode3.forward(e2)
e4, out4, ind4 = self.encode4.forward(e3)
bn = self.bottleneck.forward(e4)
d4 = self.decode4.forward(bn, out4, ind4)
d3 = self.decode1.forward(d4, out3, ind3)
d2 = self.decode2.forward(d3, out2, ind2)
d1 = self.decode3.forward(d2, out1, ind1)
prob = self.classifier.forward(d1)
return prob
| 33.948052 | 78 | 0.602907 | 320 | 2,614 | 4.759375 | 0.346875 | 0.045962 | 0.076822 | 0.10046 | 0.307945 | 0.307945 | 0.307945 | 0.307945 | 0.307945 | 0.28956 | 0 | 0.039936 | 0.281561 | 2,614 | 76 | 79 | 34.394737 | 0.771033 | 0.175593 | 0 | 0.04878 | 0 | 0 | 0.01791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.146341 | 0.02439 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a297f87a36350d09e37f4b088c38cda1aa4a1e | 1,488 | py | Python | trainer/checkpoint.py | kaylode/self-driving-car-sim | dc0e991dcf201de7c355917be2dd8c6fce396950 | [
"MIT"
] | 5 | 2021-07-10T14:46:05.000Z | 2021-12-13T01:33:08.000Z | trainer/checkpoint.py | kaylode/self-driving-car-sim | dc0e991dcf201de7c355917be2dd8c6fce396950 | [
"MIT"
] | null | null | null | trainer/checkpoint.py | kaylode/self-driving-car-sim | dc0e991dcf201de7c355917be2dd8c6fce396950 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import os
from datetime import datetime
class Checkpoint():
"""
Checkpoint for saving model state
:param save_per_epoch: (int)
:param path: (string)
"""
def __init__(self, save_per_epoch = 1, path = None):
self.path = path
self.save_per_epoch = save_per_epoch
# Create folder
if self.path is None:
self.path = os.path.join('weights',datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
def save(self, model, **kwargs):
"""
Save model and optimizer weights
:param model: Pytorch model with state dict
"""
if not os.path.exists(self.path):
os.mkdir(self.path)
epoch = kwargs['epoch'] if 'epoch' in kwargs else '0'
model_path = "_".join([model.model_name,str(epoch)])
if 'interrupted' in kwargs:
model_path +='_interrupted'
weights = {
'model': model.model.state_dict(),
'optimizer': model.optimizer.state_dict(),
}
torch.save(weights, os.path.join(self.path,model_path)+".pth")
def load_checkpoint(model, path):
"""
Load trained model checkpoint
:param model: (nn.Module)
:param path: (string) checkpoint path
"""
state = torch.load(path)
model.model.load_state_dict(state["model"])
model.optimizer.load_state_dict(state["optimizer"])
print("Loaded Successfully!")
| 28.615385 | 92 | 0.589382 | 182 | 1,488 | 4.675824 | 0.32967 | 0.056404 | 0.056404 | 0.037603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001876 | 0.283602 | 1,488 | 51 | 93 | 29.176471 | 0.796435 | 0.181452 | 0 | 0 | 0 | 0 | 0.097368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.148148 | 0 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a2998e8a59ce8e8e053df225f4908e785fd6cd | 1,194 | py | Python | deepl/settings.py | vladradishevsky/deepl-translate | 7d30bcc2b7d910411f22f9540fd72c8f4868a8c8 | [
"MIT"
] | 1 | 2022-02-18T10:12:51.000Z | 2022-02-18T10:12:51.000Z | deepl/settings.py | Lain1984/deepl-translate | ca61c63ff23031291fbaf220de92018fb85a57f0 | [
"MIT"
] | null | null | null | deepl/settings.py | Lain1984/deepl-translate | ca61c63ff23031291fbaf220de92018fb85a57f0 | [
"MIT"
] | null | null | null | API_URL = "https://www2.deepl.com/jsonrpc"
MAGIC_NUMBER = int("CAFEBABE", 16)
SUPPORTED_LANGUAGES = [
{"code": "BG", "language": "Bulgarian"},
{"code": "ZH", "language": "Chinese"},
{"code": "CS", "language": "Czech"},
{"code": "DA", "language": "Danish"},
{"code": "NL", "language": "Dutch"},
{"code": "EN", "language": "English"},
{"code": "ET", "language": "Estonian"},
{"code": "FI", "language": "Finnish"},
{"code": "FR", "language": "French"},
{"code": "DE", "language": "German"},
{"code": "EL", "language": "Greek"},
{"code": "HU", "language": "Hungarian"},
{"code": "IT", "language": "Italian"},
{"code": "JA", "language": "Japanese"},
{"code": "LV", "language": "Latvian"},
{"code": "LT", "language": "Lithuanian"},
{"code": "PL", "language": "Polish"},
{"code": "PT", "language": "Portuguese"},
{"code": "RO", "language": "Romanian"},
{"code": "RU", "language": "Russian"},
{"code": "SK", "language": "Slovak"},
{"code": "SL", "language": "Slovenian"},
{"code": "ES", "language": "Spanish"},
{"code": "SV", "language": "Swedish"},
]
SUPPORTED_FORMALITY_TONES = ["formal", "informal"]
| 36.181818 | 50 | 0.520938 | 115 | 1,194 | 5.365217 | 0.591304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003049 | 0.175879 | 1,194 | 32 | 51 | 37.3125 | 0.623984 | 0 | 0 | 0 | 0 | 0 | 0.469012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a57cc99727e42967e5f326c32613eda7d204c4 | 670 | py | Python | algorithm/14-Average and Median.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | algorithm/14-Average and Median.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | algorithm/14-Average and Median.py | LeeBeral/python | 9f0d360d69ee5245e3ef13a9dc9fc666374587a4 | [
"MIT"
] | null | null | null | # Average, sum of the numbers divided by how many numbers are in the list.
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,20, 2000]
def average(nums):
s = 0
for i in nums:
s += i
return '{:.1f}'.format(s / len(nums))
# In python, these can replace by fuction: sum
# return sum(nums)/len(nums)
print(average(nums))
# Median, the median is the value separating the higher half from the lower half of a data sample.
# 中位数:一个从小到大排序的列表,位置在中间的数,奇数列表有一个,偶数列表为中间两个数的平均值。
def median(nums):
nums.sort()
if len(nums) % 2:
return nums[len(nums)//2]
else:
return (nums[len(nums)//2-1] + nums[len(nums)//2])/2
print(median(nums))
| 29.130435 | 98 | 0.623881 | 112 | 670 | 3.732143 | 0.526786 | 0.100478 | 0.105263 | 0.086124 | 0.086124 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048638 | 0.232836 | 670 | 22 | 99 | 30.454545 | 0.764591 | 0.431343 | 0 | 0 | 0 | 0 | 0.016043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.357143 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a57dd4d6b04e6faa4332b2ede9585ac96ce5ce | 3,715 | py | Python | webscraping/course_webscraping.py | RadiateGold/UProductivity | ddc2617ff87bc4233eab85e60482812e7a0a1ba0 | [
"MIT"
] | 1 | 2021-11-18T16:15:25.000Z | 2021-11-18T16:15:25.000Z | webscraping/course_webscraping.py | RadiateGold/UProductivity | ddc2617ff87bc4233eab85e60482812e7a0a1ba0 | [
"MIT"
] | null | null | null | webscraping/course_webscraping.py | RadiateGold/UProductivity | ddc2617ff87bc4233eab85e60482812e7a0a1ba0 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import json
"""
This program attempts to gather data about every course that the University of Delaware offers using BeautifulSoup
to webscrape the courses page on the UD catalog. URL = "https://catalog.udel.edu/content.php?catoid=47&navoid=8868".
Data collected will be loaded onto the courses.json file.
"""
json_file = 'courses.json'
courses = {"": {
"credits": [
0
],
"title": "",
"group": None,
"dle": False,
"fys": False,
"multicultural": False,
"capstone": False,
"coe": False,
"pcp": False
},}
# Since there are 46 pages of courses, the program has to iterate through each page by manipulating the format of the
# URL.
first_half_of_url = "https://catalog.udel.edu/content.php?catoid=47&catoid=47&navoid=8868&filter%5Bitem_type%" \
"5D=3&filter%5Bonly_active%5D=1&filter%5B3%5D=1&filter%5Bcpage%5D="
second_half_of_url = "#acalog_template_course_filter"
for i in range(1, 47):
url = first_half_of_url + str(i) + second_half_of_url
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
# Iterates through each course on the courses page to find the course prefix (code), course number, course title
# and number of credits
find_courses = soup.find_all("td", class_="width")
for find_course in find_courses:
a = find_course.find("a")
course_elements = a.text.split()
course_prefix = course_elements[0]
course_num = course_elements[1]
course_code = course_prefix + " " + course_num
title = course_elements[3:]
course_title = ""
for word in title:
course_title = course_title + " " + word
course_title = course_title[1:]
# Fixes the error with apostrophe
if "’" in course_title:
apostrophe_position = None
position = 0
for char in course_title:
if char == "’":
apostrophe_position = position
position += 1
course_title = course_title[0:apostrophe_position] + "'" + course_title[apostrophe_position + 1:]
# Finds the link to the course page to find the number of credits
link = find_course.find('a', href=True).get('href')
new_url = "https://catalog.udel.edu/" + link
new_page = requests.get(new_url)
new_soup = BeautifulSoup(new_page.content, "html.parser")
# Finds the number of credits on the course page
find_credits = new_soup.find_all("p")
refined = find_credits[1].text.split()
found = False
credits_position = None
for position, word in enumerate(refined):
if not found:
if word == 'Credit(s):':
credits_position = position
found = True
num_credits = []
try:
if refined[credits_position + 1][1] == "-":
min = int(refined[credits_position + 1][0])
max = int(refined[credits_position + 1][2])
num_credits = list(range(min, max + 1))
else:
num_credits.append(int(refined[credits_position + 1][0]))
except:
num_credits.append(int(refined[credits_position + 1][0]))
# Adds the course information to the courses dictionary
courses[course_code] = {"credits": num_credits, "title": course_title}
# Storing the courses dictionary on the json file
with open(json_file, 'w') as file_object:
file_object.write(json.dumps(courses, indent=2))
| 37.525253 | 118 | 0.606191 | 466 | 3,715 | 4.67382 | 0.315451 | 0.060606 | 0.036731 | 0.052801 | 0.124885 | 0.088613 | 0.076217 | 0.076217 | 0.076217 | 0 | 0 | 0.020509 | 0.291252 | 3,715 | 98 | 119 | 37.908163 | 0.806684 | 0.134051 | 0 | 0.028571 | 0 | 0.028571 | 0.119132 | 0.033784 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.042857 | 0 | 0.042857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a64fc49d443a620cb242beadfc174fc8c7c939 | 1,557 | py | Python | data/dataset/cifer_10_dataset.py | ryuji0123/tmp | 030d1ae5a55657c4c29490b45011b6b42864c85b | [
"MIT"
] | 10 | 2021-02-20T17:12:35.000Z | 2022-03-23T09:51:01.000Z | data/dataset/cifer_10_dataset.py | ryuji0123/tmp | 030d1ae5a55657c4c29490b45011b6b42864c85b | [
"MIT"
] | 12 | 2021-03-01T01:04:32.000Z | 2021-04-04T12:43:36.000Z | data/dataset/cifer_10_dataset.py | ryuji0123/tmp | 030d1ae5a55657c4c29490b45011b6b42864c85b | [
"MIT"
] | 6 | 2021-03-03T08:08:35.000Z | 2021-11-09T10:47:33.000Z | import numpy as np
from torchvision.datasets import CIFAR10
from torch.utils.data.sampler import SubsetRandomSampler
class CIFAR10Dataset:
def __init__(self, root: str, transform, validation_size: float) -> None:
self.set_train_and_validation_data(
train=True,
download=True,
root=root,
transform=transform,
validation_size=validation_size,
)
self.set_test_data(
train=False, download=True, root=root, transform=transform
)
def set_train_and_validation_data(
self,
download: bool,
root: str,
train: bool,
transform,
validation_size: float,
) -> None:
train_dataset = CIFAR10(download=download, root=root, train=train, transform=transform)
train_num = len(train_dataset)
indices = list(range(train_num))
split = int(np.floor(validation_size * train_num))
train_indices, validation_indices = indices[split:], indices[:split]
self.train_data_dict = {
'dataset': train_dataset,
'train_sampler': SubsetRandomSampler(train_indices),
'validation_sampler': SubsetRandomSampler(validation_indices),
}
def set_test_data(
self,
download: bool,
root: str,
train: bool,
transform,
) -> None:
self.test_data_dict = {
'dataset': CIFAR10(
download=download, root=root, train=train, transform=transform
)
}
| 29.377358 | 95 | 0.608221 | 157 | 1,557 | 5.796178 | 0.273885 | 0.076923 | 0.075824 | 0.061538 | 0.438462 | 0.327473 | 0.243956 | 0.243956 | 0.243956 | 0.145055 | 0 | 0.007387 | 0.304432 | 1,557 | 52 | 96 | 29.942308 | 0.832872 | 0 | 0 | 0.266667 | 0 | 0 | 0.028902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.155556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a6ed9d7cd1be46ef91b5ad9ad6fcfa055225f1 | 3,344 | py | Python | STMPython/STM.py | alexlib/partracking3D | e3bb7aa48d20de8bb02a2f3549f07f3a411249f4 | [
"MIT"
] | null | null | null | STMPython/STM.py | alexlib/partracking3D | e3bb7aa48d20de8bb02a2f3549f07f3a411249f4 | [
"MIT"
] | null | null | null | STMPython/STM.py | alexlib/partracking3D | e3bb7aa48d20de8bb02a2f3549f07f3a411249f4 | [
"MIT"
] | 1 | 2021-09-06T12:00:33.000Z | 2021-09-06T12:00:33.000Z | import STMFunctions as stmf
import sys
import math
import numpy as np
import itertools as it
import copy
import struct
from datetime import datetime
if(len(sys.argv)==2):
filename = sys.argv[1] # First argument is filename
print(filename)
maxframes = 999999
tstart = datetime.now().timestamp()
neighbours = 6
boundingbox = [[0, 75], [-15, 60], [115,185]]
[nx,ny,nz] = [70,70,70]
cammatchfunc = lambda x: len(x)>=2 # We require a match to satisfy this requirement for the number of cameras (and thus the number of rays)
maxmatchesperray = 1 # Number of matches/ray
maxdistance = 0.15 # Max distance allowed for a match.
fileout = copy.copy(filename).split(".")
fileout = ".".join(fileout[0:len(fileout)-1])
filelog = fileout + ".log"
fileout = fileout.replace("rays","matched")+".dat"
fout = open(fileout, 'wb')
fin = open(filename, 'rb')
frameid = 0
numpts = fin.read(4) # Read 4 bytes header
while(len(numpts)>0 and frameid < maxframes): # If something is read
frameid += 1
numpts = struct.unpack('I', numpts)[0] # Interpret header as 4 byte uint
flog = open(filelog, 'a')
flog.write("#######\n")
flog.write("Frame: " + str(frameid) + "\nNumber of rays: " + str(numpts) + "\n")
flog.close()
print("Frame:",frameid,". # of rays:", numpts)
# Read rays
raydata = fin.read(numpts*27) # 27 bytes per line 2+1+6*4
raydata = struct.unpack('='+('BH6f'*numpts),raydata) # Create string '=BHFFFFFFBHFFFFFFBHFFFFFF...BHFFFFFF'
raydata = list(map(lambda i: list(raydata[8*i:8*i+8]) ,range(len(raydata)//8))) # Reshape to 8*N np.arreyreshape converts everything to floats...
# The actual call
output = stmf.SpaceTraversalMatching(list(raydata),boundingbox,nx=nx,nz=nz,ny=ny,cammatchfunc=cammatchfunc,neighbours=neighbours,logfile=filelog,maxdistance=maxdistance)
# Prepare output
print("Matches found:",len(output))
coutput = []
for o in output:
tmp = [len(o[0])]
tmp.extend(o[1])
tmp.append(o[2])
for j in o[0]:
tmp.extend(j)
coutput.append(tmp)
output = coutput
del coutput
# Output the output
buf = struct.pack('I', len(output)) # Write the number of matches
fout.write(buf)
for out in output:
buf = struct.pack('=B4f' + out[0]*"BH", *out) # Write each to the output file
fout.write(buf)
numpts = fin.read(4) # Read next header
#print("numpts:",numpts)
#print("type:",type(numpts))
fout.close()
fin.close()
print("Finished")
elapsed = datetime.now().timestamp() - tstart
print("Elapsed time:",elapsed)
print("Elapsed time/frame:",elapsed/frameid)
else:
print("There should be an argument with the filename!")
| 40.780488 | 178 | 0.532297 | 386 | 3,344 | 4.611399 | 0.393782 | 0.017978 | 0.018539 | 0.01573 | 0.020225 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028741 | 0.344498 | 3,344 | 81 | 179 | 41.283951 | 0.783303 | 0.17494 | 0 | 0.0625 | 0 | 0 | 0.072611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.109375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a7d58532c2b2427b66d784532d0bced84e397f | 4,480 | py | Python | test/python/compiler/test_assembler.py | dominik-steenken/qiskit-terra | 1e04bad5067610abda5e7cbba36939745075f3b9 | [
"Apache-2.0"
] | null | null | null | test/python/compiler/test_assembler.py | dominik-steenken/qiskit-terra | 1e04bad5067610abda5e7cbba36939745075f3b9 | [
"Apache-2.0"
] | null | null | null | test/python/compiler/test_assembler.py | dominik-steenken/qiskit-terra | 1e04bad5067610abda5e7cbba36939745075f3b9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Assembler Test."""
import unittest
import numpy as np
from qiskit.circuit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.circuit import Instruction
from qiskit.compiler import assemble_circuits
from qiskit.compiler import RunConfig
from qiskit.qobj import QasmQobj
from qiskit.test import QiskitTestCase
class TestAssembler(QiskitTestCase):
"""Tests for assembling circuits to qobj."""
def test_assemble_single_circuit(self):
"""Test assembling a single circuit.
"""
qr = QuantumRegister(2, name='q')
cr = ClassicalRegister(2, name='c')
circ = QuantumCircuit(qr, cr, name='circ')
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.measure(qr, cr)
run_config = RunConfig(shots=2000, memory=True)
qobj = assemble_circuits(circ, run_config=run_config)
self.assertIsInstance(qobj, QasmQobj)
self.assertEqual(qobj.config.shots, 2000)
self.assertEqual(qobj.config.memory, True)
self.assertEqual(len(qobj.experiments), 1)
self.assertEqual(qobj.experiments[0].instructions[1].name, 'cx')
def test_assemble_multiple_circuits(self):
"""Test assembling multiple circuits, all should have the same config.
"""
qr0 = QuantumRegister(2, name='q0')
qc0 = ClassicalRegister(2, name='c0')
circ0 = QuantumCircuit(qr0, qc0, name='circ0')
circ0.h(qr0[0])
circ0.cx(qr0[0], qr0[1])
circ0.measure(qr0, qc0)
qr1 = QuantumRegister(3, name='q1')
qc1 = ClassicalRegister(3, name='c1')
circ1 = QuantumCircuit(qr1, qc1, name='circ0')
circ1.h(qr1[0])
circ1.cx(qr1[0], qr1[1])
circ1.cx(qr1[0], qr1[2])
circ1.measure(qr1, qc1)
run_config = RunConfig(shots=100, memory=False, seed=6)
qobj = assemble_circuits([circ0, circ1], run_config=run_config)
self.assertIsInstance(qobj, QasmQobj)
self.assertEqual(qobj.config.seed, 6)
self.assertEqual(len(qobj.experiments), 2)
self.assertEqual(qobj.experiments[1].config.n_qubits, 3)
self.assertEqual(len(qobj.experiments), 2)
self.assertEqual(len(qobj.experiments[1].instructions), 6)
def test_assemble_no_run_config(self):
"""Test assembling with no run_config, relying on default.
"""
qr = QuantumRegister(2, name='q')
qc = ClassicalRegister(2, name='c')
circ = QuantumCircuit(qr, qc, name='circ')
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.measure(qr, qc)
qobj = assemble_circuits(circ)
self.assertIsInstance(qobj, QasmQobj)
self.assertIsNone(getattr(qobj.config, 'shots', None))
def test_assemble_initialize(self):
"""Test assembling a circuit with an initialize.
"""
q = QuantumRegister(2, name='q')
circ = QuantumCircuit(q, name='circ')
circ.initialize([1/np.sqrt(2), 0, 0, 1/np.sqrt(2)], q[:])
qobj = assemble_circuits(circ)
self.assertIsInstance(qobj, QasmQobj)
self.assertEqual(qobj.experiments[0].instructions[0].name, 'initialize')
np.testing.assert_almost_equal(qobj.experiments[0].instructions[0].params,
[0.7071067811865, 0, 0, 0.707106781186])
def test_assemble_opaque_inst(self):
"""Test opaque instruction is assembled as-is"""
opaque_inst = Instruction(name='my_inst', num_qubits=4,
num_clbits=2, params=[0.5, 0.4])
q = QuantumRegister(6, name='q')
c = ClassicalRegister(4, name='c')
circ = QuantumCircuit(q, c, name='circ')
circ.append(opaque_inst, [q[0], q[2], q[5], q[3]], [c[3], c[0]])
qobj = assemble_circuits(circ)
self.assertIsInstance(qobj, QasmQobj)
self.assertEqual(len(qobj.experiments[0].instructions), 1)
self.assertEqual(qobj.experiments[0].instructions[0].name, 'my_inst')
self.assertEqual(qobj.experiments[0].instructions[0].qubits, [0, 2, 5, 3])
self.assertEqual(qobj.experiments[0].instructions[0].memory, [3, 0])
self.assertEqual(qobj.experiments[0].instructions[0].params, [0.5, 0.4])
if __name__ == '__main__':
unittest.main(verbosity=2)
| 38.62069 | 82 | 0.638393 | 567 | 4,480 | 4.964727 | 0.231041 | 0.079929 | 0.067496 | 0.079574 | 0.387211 | 0.344938 | 0.326465 | 0.217762 | 0.154174 | 0.13286 | 0 | 0.047042 | 0.226563 | 4,480 | 115 | 83 | 38.956522 | 0.765368 | 0.116295 | 0 | 0.2 | 0 | 0 | 0.020413 | 0 | 0 | 0 | 0 | 0 | 0.275 | 1 | 0.0625 | false | 0 | 0.1 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50a978f04ebdc3b9bc640cf4f5ed8e19d9442112 | 2,061 | py | Python | setup.py | bollwyvl/ansible-jupyter-kernel | 09efa5d4073a72d1d56cce60574c743a1feb1954 | [
"Apache-2.0"
] | null | null | null | setup.py | bollwyvl/ansible-jupyter-kernel | 09efa5d4073a72d1d56cce60574c743a1feb1954 | [
"Apache-2.0"
] | null | null | null | setup.py | bollwyvl/ansible-jupyter-kernel | 09efa5d4073a72d1d56cce60574c743a1feb1954 | [
"Apache-2.0"
] | null | null | null | import json
import os
from setuptools import setup, find_packages
from setuptools.command.install import install
class Installer(install):
def run(self):
# Regular install
install.run(self)
# Post install
print('Installing Ansible Kernel kernelspec')
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
kernel_json = {
"argv": ["python", "-m", "ansible_kernel", "-f", "{connection_file}"],
"codemirror_mode": "yaml",
"display_name": "Ansible",
"language": "ansible"
}
with TemporaryDirectory() as td:
os.chmod(td, 0o755)
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
ksm = KernelSpecManager()
ksm.install_kernel_spec(td, 'ansible', user=self.user, replace=True, prefix=self.prefix)
setup(
name='ansible-kernel',
version='1.0.0',
description='An Ansible kernel for Jupyter notebooks',
long_description='An Ansible kernel for Jupyter notebooks',
long_description_content_type='text/plain',
packages=find_packages(),
package_data={'ansible_kernel': ['templates/ansible_playbook.tpl',
'templates/ansible_tasks.tpl',
'modules.yml',
'module_args.yml']},
cmdclass={'install': Installer},
license='Apache',
install_requires=[
'ansible',
'ansible-runner>=1.1.0',
'PyYAML',
'psutil',
'jupyter',
'tqdm',
'docopt',
'six',
'ipywidgets',
],
entry_points={
"nbconvert.exporters": [
'ansible_tasks=ansible_kernel.exporters:AnsibleTasksExporter',
'ansible_playbook=ansible_kernel.exporters:AnsiblePlaybookExporter',
'ansible_zip=ansible_kernel.exporters:AnsibleZipExporter']
},
zip_safe=False
)
| 33.241935 | 100 | 0.594372 | 201 | 2,061 | 5.945274 | 0.507463 | 0.097908 | 0.05523 | 0.043515 | 0.091213 | 0.091213 | 0.091213 | 0.091213 | 0.091213 | 0 | 0 | 0.006826 | 0.28918 | 2,061 | 61 | 101 | 33.786885 | 0.808874 | 0.013586 | 0 | 0 | 0 | 0 | 0.314286 | 0.126601 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.111111 | 0 | 0.148148 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50abf4a3d73b7e94dd0dee0b774daa77e26dd362 | 3,458 | py | Python | qsim/sim_parameters.py | pangtao22/quasistatic_simulator | 7c6f99cc7237dd922f6eb0b54c580303e86b5223 | [
"MIT"
] | 5 | 2021-07-15T03:58:55.000Z | 2021-12-23T17:26:16.000Z | qsim/sim_parameters.py | pangtao22/quasistatic_simulator | 7c6f99cc7237dd922f6eb0b54c580303e86b5223 | [
"MIT"
] | 5 | 2021-08-16T22:27:54.000Z | 2022-02-07T18:06:07.000Z | qsim/sim_parameters.py | pangtao22/quasistatic_simulator | 7c6f99cc7237dd922f6eb0b54c580303e86b5223 | [
"MIT"
] | null | null | null | import enum
import sys
from collections import namedtuple
from qsim_cpp import GradientMode, QuasistaticSimParametersCpp
import numpy as np
"""
:param nd_per_contact: int, number of extreme rays per contact point.
:param contact_detection_tolerance: Signed distance pairs whose distances are
greater than this value are ignored in the simulator's non-penetration
constraints. Unit is in meters.
:param is_quasi_dynamic: bool. If True, dynamics of unactauted objects is
given by sum(F) = M @ (v_(l+1) - 0). If False, it becomes sum(F) = 0
instead.
The mass matrix for unactuated objects is always added when the
unconstrained version of the problem is solved. Not having a mass
matrix can sometimes makes the unconstrained program unbounded.
/*----------------------------------------------------------------------*/
/*---------Experimental features only supported in python.--------------*/
:param mode: Union['qp_mp', 'qp_cvx', 'unconstrained'].
- 'qp_mp': solves the standard QP for system states at the next time
step, using MathematicalProgram.
- 'qp_mp': solves the standard QP using cvxpy.
- 'unconstrained': solves an unconstrained version of the QP, obtained by
moving inequality constraints into the objective with
log barrier functions.
:param log_barrier_weight: float, used only when is_unconstrained == True.
/*----------------------------------------------------------------------*/
:param requires_grad: whether the gradient of v_next w.r.t the parameters of
the QP are computed.
Note that this parameter is only effective in
QuasistaticSimulator.step_default(...),
which is only used by QuasistaticSystem, which almost never computes
gradients. In applications that does compute gradient, such as
QuasistaticDynamics from irs_lqr, QuasistaticSimulator.step function is
invoked and a separate GradientMode value is passed to it explicitly.
:param gradient_from_active_constraints: bool. Whether the dynamics gradient is
computed from all constraints or only the active constraints.
"""
field_names = [
"gravity", "nd_per_contact", "contact_detection_tolerance",
"is_quasi_dynamic", "mode", "log_barrier_weight", "gradient_mode",
"grad_from_active_constraints"
]
defaults = [np.array([0, 0, -9.81]), 4, 0.01,
False, "qp_mp", 1e4, GradientMode.kNone, True]
if sys.version_info >= (3, 7):
QuasistaticSimParameters = namedtuple(
"QuasistaticSimParameters",
field_names=field_names,
defaults=defaults)
else:
QuasistaticSimParameters = namedtuple(
"QuasistaticSimParameters",
field_names=field_names)
QuasistaticSimParameters.__new__.__defaults__ = tuple(defaults)
QuasistaticSimParameters = QuasistaticSimParameters
def cpp_params_from_py_params(
sim_params: QuasistaticSimParameters) -> QuasistaticSimParametersCpp:
sim_params_cpp = QuasistaticSimParametersCpp()
sim_params_cpp.gravity = sim_params.gravity
sim_params_cpp.nd_per_contact = sim_params.nd_per_contact
sim_params_cpp.contact_detection_tolerance = (
sim_params.contact_detection_tolerance)
sim_params_cpp.is_quasi_dynamic = sim_params.is_quasi_dynamic
sim_params_cpp.gradient_mode = sim_params.gradient_mode
sim_params_cpp.gradient_from_active_constraints = (
sim_params.grad_from_active_constraints)
return sim_params_cpp
| 47.369863 | 80 | 0.717756 | 422 | 3,458 | 5.654028 | 0.42891 | 0.05658 | 0.040235 | 0.020956 | 0.150042 | 0.084661 | 0.065381 | 0 | 0 | 0 | 0 | 0.005585 | 0.171486 | 3,458 | 72 | 81 | 48.027778 | 0.827225 | 0 | 0 | 0.114286 | 0 | 0 | 0.121951 | 0.069783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50ae99b4a8d1cf4a7f08aa7af523112b994d0216 | 3,666 | py | Python | data/featurizer_benchmark.py | kyledmiller/mit_model_code | ae46141ed8aa4686c7d7ca115baa6eb8fa9a130b | [
"MIT"
] | null | null | null | data/featurizer_benchmark.py | kyledmiller/mit_model_code | ae46141ed8aa4686c7d7ca115baa6eb8fa9a130b | [
"MIT"
] | null | null | null | data/featurizer_benchmark.py | kyledmiller/mit_model_code | ae46141ed8aa4686c7d7ca115baa6eb8fa9a130b | [
"MIT"
] | null | null | null | import os
import pandas as pd
import pymatgen as mg
from glob import glob
from zipfile import ZipFile
from sklearn.metrics import mean_squared_error, explained_variance_score, r2_score
# %% set up path constant
STRUCT_FOLDER_PATH = "../data/torrance_tables/benchmark_structures"
# %% check if the benchmark_structures.zip is unzipped
if not os.path.isdir(STRUCT_FOLDER_PATH):
# if still zipped, unzip the folder containing all the cif files
with ZipFile("".join([STRUCT_FOLDER_PATH, ".zip"])) as struct_files:
struct_files.extractall(path="../data/torrance_tables/")
# %%
def initialize_benchmark_df_helper(file_path):
"""Helper function for initialize_benchmark_df()"""
# read in the original structure
struct = mg.Structure.from_file(file_path)
# get the primitive cell
struct = struct.get_primitive_structure()
# make a supercell of 2a, 2b, 2c
struct.make_supercell([2, 2, 2])
# add oxidation states using the guess routine in Pymatgen
struct.add_oxidation_state_by_guess()
return {"formula": struct.composition.reduced_formula,
"structure_oxid": struct}
def initialize_benchmark_df():
"""Return a dataframe containing all the cif files in the target directory"""
# get all the cif file paths as a list
cif_file_paths = glob(STRUCT_FOLDER_PATH + "/*.cif")
# for each file path, read in the structure and get its reduced formula
cif_lst_dict = [initialize_benchmark_df_helper(file) for file in cif_file_paths]
return pd.DataFrame(cif_lst_dict)
# %%
def process_benchmark_df(df_input):
"""Take in the featurized the benchmark dataframe and clean it up"""
# select the relevant columns
df_output = df_input[["formula", "avg_mx_dists", "avg_mm_dists", "iv", "iv_p1",
"v_m", "v_x", "est_hubbard_u", "est_charge_trans"]]
# rename the column names to match those found in torrance_tabulated.xlsx
df_output = df_output.rename(columns={"avg_mx_dists": "d_mo", "avg_mm_dists": "d_mm", "v_x": "v_o",
"est_hubbard_u": "hubbard", "est_charge_trans": "charge_transfer"})
# drop rows containing NA values, sort by the formula and reindex the dataframe
return df_output.dropna().sort_values("formula").reset_index(drop=True)
# %%
def process_torrance_df(df_torr, df_bench):
"""
Take in the processed benchmark dataframe and the unprocessed torrance dataframe.
Then match the two dataframes
"""
# drop the irrelevant columns
df_torr = df_torr.drop(columns=["spacegroup_symbol", "spacegroup_number", "ref", "_em/s", "mu",
"optical_bandgap", "class_label", "v"])
# remove duplicate rows
df_torr = df_torr.drop_duplicates(ignore_index=True)
# use left join to select rows only present in the benchmark dataframe
df_torr = df_bench[["formula"]].join(df_torr.set_index("formula"), on="formula", sort=True)
return df_torr.reset_index(drop=True)
# %% calcuate rmse, explained variance and R^2 values for each feature
def evaluate_performance(df_true, df_pred):
"""
Evaluate the performance of the handbuilt featurizer
through RMSE, explained variance score and R^2 values
"""
rmse = mean_squared_error(df_true, df_pred, multioutput="raw_values", squared=False)
var_explained = explained_variance_score(df_true, df_pred, multioutput="raw_values")
r2_scores = r2_score(df_true, df_pred, multioutput="raw_values")
return pd.DataFrame(list(zip(df_pred.columns, rmse, var_explained, r2_scores)),
columns=["feature", "rmse", "var_explained", "r_squared"])
| 44.168675 | 109 | 0.708947 | 516 | 3,666 | 4.786822 | 0.354651 | 0.019433 | 0.025911 | 0.019433 | 0.100405 | 0.042915 | 0.042915 | 0.02996 | 0 | 0 | 0 | 0.004376 | 0.18958 | 3,666 | 82 | 110 | 44.707317 | 0.826994 | 0.317512 | 0 | 0 | 0 | 0 | 0.174074 | 0.027984 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128205 | false | 0 | 0.153846 | 0 | 0.410256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50b15a76976395bb453c2a0d86d1b989f8156d7c | 2,313 | py | Python | data_science/code/plot.py | markhend/demo | fed18b752a67645780ccbb56bd41d6ec51868d28 | [
"Apache-2.0"
] | 3 | 2020-05-12T22:56:48.000Z | 2020-11-10T04:03:19.000Z | data_science/code/plot.py | markhend/demo | fed18b752a67645780ccbb56bd41d6ec51868d28 | [
"Apache-2.0"
] | 3 | 2020-04-03T16:15:11.000Z | 2020-06-07T21:25:07.000Z | data_science/code/plot.py | markhend/demo | fed18b752a67645780ccbb56bd41d6ec51868d28 | [
"Apache-2.0"
] | 3 | 2020-06-07T03:08:51.000Z | 2022-02-18T20:09:27.000Z | import collections, json, re
import click
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
import conducto as co
# Data is downloaded from the United States Energy Information Administration.
# https://www.eia.gov/opendata/bulkfiles.php
@click.command()
@click.option("--dataset", required=True, help="dataset name")
def plot(dataset):
"""
Read in the downloaded data, extract the specified datasets, and plot them.
"""
data_text = co.data.user.gets("steo-data")
all_data = [json.loads(line) for line in data_text.splitlines()]
DATASETS = {
"heating" : r"^STEO.ZWHD_[^_]*\.M$",
"cooling" : r"^STEO.ZWCD_[^_]*.M$",
}
regex = DATASETS[dataset]
subset_data = [d for d in all_data if "series_id" in d and re.search(regex, d["series_id"])]
# Create a pandas DataFrame with the data grouped by month of the year.
# This could be implemented with vectorized pandas logic but this data
# is small enough not to worry.
data = {}
for i, d in enumerate(subset_data):
by_month = collections.defaultdict(list)
for yyyymm, value in d["data"]:
month = int(yyyymm[-2:])
by_month[month].append(value)
y = [np.mean(by_month[month]) for month in range(1, 13)]
data[d['name']] = y
df = pd.DataFrame(data=data)
df["Month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
df.set_index("Month", inplace=True)
# Graph each dataset as one line on a single plot.
colors = [cm.viridis(z) for z in np.linspace(0, .99, len(subset_data))]
for i, column in enumerate(df.columns):
y = df[column].values
plt.plot(y, label=column, color=colors[i])
plt.title(f"{dataset}, average by month")
plt.legend(loc="best", fontsize="x-small")
# Save to disk, and then to co.data.pipeline for url.
filename = "/tmp/image.png"
dataname = f"conducto/demo/data_science/{dataset}.png"
plt.savefig(filename)
co.data.pipeline.put(dataname, filename)
# Print out results as markdown
print(f"""<ConductoMarkdown>
})
{df.transpose().round(2).to_markdown()}
</ConductoMarkdown>""")
if __name__ == "__main__":
plot()
| 32.577465 | 102 | 0.645482 | 336 | 2,313 | 4.363095 | 0.505952 | 0.020464 | 0.028649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004357 | 0.206226 | 2,313 | 70 | 103 | 33.042857 | 0.794118 | 0.21444 | 0 | 0 | 0 | 0 | 0.20903 | 0.066332 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.155556 | 0 | 0.177778 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50b1d9d2d34355a672db53d3fa29f225e29dbf86 | 879 | py | Python | tests/fabric/test_probes.py | chaostoolkit-incubator/chaostoolkit-service-fabric | b8357705aa172c9d6d06b7f0dd41d77343a93619 | [
"Apache-2.0"
] | null | null | null | tests/fabric/test_probes.py | chaostoolkit-incubator/chaostoolkit-service-fabric | b8357705aa172c9d6d06b7f0dd41d77343a93619 | [
"Apache-2.0"
] | 3 | 2019-03-22T08:38:08.000Z | 2019-04-01T16:40:19.000Z | tests/fabric/test_probes.py | chaostoolkit-incubator/chaostoolkit-service-fabric | b8357705aa172c9d6d06b7f0dd41d77343a93619 | [
"Apache-2.0"
] | 1 | 2019-03-22T07:51:00.000Z | 2019-03-22T07:51:00.000Z | # -*- coding: utf-8 -*-
import os.path
import requests_mock
from urllib.parse import urlencode
from chaosservicefabric.cluster.probes import chaos_report
SF_BASE_URL = "https://localhost:19080"
CONFIG = {
"endpoint": SF_BASE_URL,
"verify_tls": False
}
SECRETS = {
"pem_path": os.path.abspath(
os.path.join(os.path.dirname(__file__), "cert.pem"))
}
def test_chaos_report():
# we don't match the start/end UTC
q = urlencode({
"api-version": "6.0",
"timeout": 60
})
url = "{}/Tools/Chaos/$/Report?{}".format(SF_BASE_URL, q)
with requests_mock.mock() as m:
m.get(url, json={})
result = chaos_report(
start_time_utc="4 minutes ago", end_time_utc="now",
configuration=CONFIG, secrets=SECRETS)
assert m.called
assert m.call_count == 1
assert result == {}
| 21.975 | 63 | 0.61661 | 116 | 879 | 4.474138 | 0.612069 | 0.046243 | 0.052023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018018 | 0.242321 | 879 | 39 | 64 | 22.538462 | 0.761261 | 0.062571 | 0 | 0 | 0 | 0 | 0.146163 | 0.031669 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50b2d47ea943ce86fb0c08699433db094d42a0c7 | 11,254 | py | Python | virtual/lib/python3.8/site-packages/dns/tsig.py | Lenus254/personal_blog | aac38e4b5372c86efa8e24db2e051fef8e5feef8 | [
"Unlicense"
] | 1,666 | 2015-01-02T17:46:14.000Z | 2022-03-30T07:27:32.000Z | dns/tsig.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 591 | 2015-01-16T12:19:49.000Z | 2022-03-30T21:32:11.000Z | dns/tsig.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 481 | 2015-01-14T04:14:43.000Z | 2022-03-30T19:28:52.000Z | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TSIG support."""
import base64
import hashlib
import hmac
import struct
import dns.exception
import dns.rdataclass
import dns.name
import dns.rcode
class BadTime(dns.exception.DNSException):
"""The current time is not within the TSIG's validity time."""
class BadSignature(dns.exception.DNSException):
"""The TSIG signature fails to verify."""
class BadKey(dns.exception.DNSException):
"""The TSIG record owner name does not match the key."""
class BadAlgorithm(dns.exception.DNSException):
"""The TSIG algorithm does not match the key."""
class PeerError(dns.exception.DNSException):
"""Base class for all TSIG errors generated by the remote peer"""
class PeerBadKey(PeerError):
"""The peer didn't know the key we used"""
class PeerBadSignature(PeerError):
"""The peer didn't like the signature we sent"""
class PeerBadTime(PeerError):
"""The peer didn't like the time we sent"""
class PeerBadTruncation(PeerError):
"""The peer didn't like amount of truncation in the TSIG we sent"""
# TSIG Algorithms
HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
HMAC_SHA1 = dns.name.from_text("hmac-sha1")
HMAC_SHA224 = dns.name.from_text("hmac-sha224")
HMAC_SHA256 = dns.name.from_text("hmac-sha256")
HMAC_SHA256_128 = dns.name.from_text("hmac-sha256-128")
HMAC_SHA384 = dns.name.from_text("hmac-sha384")
HMAC_SHA384_192 = dns.name.from_text("hmac-sha384-192")
HMAC_SHA512 = dns.name.from_text("hmac-sha512")
HMAC_SHA512_256 = dns.name.from_text("hmac-sha512-256")
GSS_TSIG = dns.name.from_text("gss-tsig")
default_algorithm = HMAC_SHA256
class GSSTSig:
"""
GSS-TSIG TSIG implementation. This uses the GSS-API context established
in the TKEY message handshake to sign messages using GSS-API message
integrity codes, per the RFC.
In order to avoid a direct GSSAPI dependency, the keyring holds a ref
to the GSSAPI object required, rather than the key itself.
"""
def __init__(self, gssapi_context):
self.gssapi_context = gssapi_context
self.data = b''
self.name = 'gss-tsig'
def update(self, data):
self.data += data
def sign(self):
# defer to the GSSAPI function to sign
return self.gssapi_context.get_signature(self.data)
def verify(self, expected):
try:
# defer to the GSSAPI function to verify
return self.gssapi_context.verify_signature(self.data, expected)
except Exception:
# note the usage of a bare exception
raise BadSignature
class GSSTSigAdapter:
def __init__(self, keyring):
self.keyring = keyring
def __call__(self, message, keyname):
if keyname in self.keyring:
key = self.keyring[keyname]
if isinstance(key, Key) and key.algorithm == GSS_TSIG:
if message:
GSSTSigAdapter.parse_tkey_and_step(key, message, keyname)
return key
else:
return None
@classmethod
def parse_tkey_and_step(cls, key, message, keyname):
# if the message is a TKEY type, absorb the key material
# into the context using step(); this is used to allow the
# client to complete the GSSAPI negotiation before attempting
# to verify the signed response to a TKEY message exchange
try:
rrset = message.find_rrset(message.answer, keyname,
dns.rdataclass.ANY,
dns.rdatatype.TKEY)
if rrset:
token = rrset[0].key
gssapi_context = key.secret
return gssapi_context.step(token)
except KeyError:
pass
class HMACTSig:
"""
HMAC TSIG implementation. This uses the HMAC python module to handle the
sign/verify operations.
"""
_hashes = {
HMAC_SHA1: hashlib.sha1,
HMAC_SHA224: hashlib.sha224,
HMAC_SHA256: hashlib.sha256,
HMAC_SHA256_128: (hashlib.sha256, 128),
HMAC_SHA384: hashlib.sha384,
HMAC_SHA384_192: (hashlib.sha384, 192),
HMAC_SHA512: hashlib.sha512,
HMAC_SHA512_256: (hashlib.sha512, 256),
HMAC_MD5: hashlib.md5,
}
def __init__(self, key, algorithm):
try:
hashinfo = self._hashes[algorithm]
except KeyError:
raise NotImplementedError(f"TSIG algorithm {algorithm} " +
"is not supported")
# create the HMAC context
if isinstance(hashinfo, tuple):
self.hmac_context = hmac.new(key, digestmod=hashinfo[0])
self.size = hashinfo[1]
else:
self.hmac_context = hmac.new(key, digestmod=hashinfo)
self.size = None
self.name = self.hmac_context.name
if self.size:
self.name += f'-{self.size}'
def update(self, data):
return self.hmac_context.update(data)
def sign(self):
# defer to the HMAC digest() function for that digestmod
digest = self.hmac_context.digest()
if self.size:
digest = digest[: (self.size // 8)]
return digest
def verify(self, expected):
# re-digest and compare the results
mac = self.sign()
if not hmac.compare_digest(mac, expected):
raise BadSignature
def _digest(wire, key, rdata, time=None, request_mac=None, ctx=None,
multi=None):
"""Return a context containing the TSIG rdata for the input parameters
@rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
first = not (ctx and multi)
if first:
ctx = get_context(key)
if request_mac:
ctx.update(struct.pack('!H', len(request_mac)))
ctx.update(request_mac)
ctx.update(struct.pack('!H', rdata.original_id))
ctx.update(wire[2:])
if first:
ctx.update(key.name.to_digestable())
ctx.update(struct.pack('!H', dns.rdataclass.ANY))
ctx.update(struct.pack('!I', 0))
if time is None:
time = rdata.time_signed
upper_time = (time >> 32) & 0xffff
lower_time = time & 0xffffffff
time_encoded = struct.pack('!HIH', upper_time, lower_time, rdata.fudge)
other_len = len(rdata.other)
if other_len > 65535:
raise ValueError('TSIG Other Data is > 65535 bytes')
if first:
ctx.update(key.algorithm.to_digestable() + time_encoded)
ctx.update(struct.pack('!HH', rdata.error, other_len) + rdata.other)
else:
ctx.update(time_encoded)
return ctx
def _maybe_start_digest(key, mac, multi):
"""If this is the first message in a multi-message sequence,
start a new context.
@rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object
"""
if multi:
ctx = get_context(key)
ctx.update(struct.pack('!H', len(mac)))
ctx.update(mac)
return ctx
else:
return None
def sign(wire, key, rdata, time=None, request_mac=None, ctx=None, multi=False):
"""Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
for the input parameters, the HMAC MAC calculated by applying the
TSIG signature algorithm, and the TSIG digest context.
@rtype: (string, dns.tsig.HMACTSig or dns.tsig.GSSTSig object)
@raises ValueError: I{other_data} is too long
@raises NotImplementedError: I{algorithm} is not supported
"""
ctx = _digest(wire, key, rdata, time, request_mac, ctx, multi)
mac = ctx.sign()
tsig = rdata.replace(time_signed=time, mac=mac)
return (tsig, _maybe_start_digest(key, mac, multi))
def validate(wire, key, owner, rdata, now, request_mac, tsig_start, ctx=None,
multi=False):
"""Validate the specified TSIG rdata against the other input parameters.
@raises FormError: The TSIG is badly formed.
@raises BadTime: There is too much time skew between the client and the
server.
@raises BadSignature: The TSIG signature did not validate
@rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object"""
(adcount,) = struct.unpack("!H", wire[10:12])
if adcount == 0:
raise dns.exception.FormError
adcount -= 1
new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
if rdata.error != 0:
if rdata.error == dns.rcode.BADSIG:
raise PeerBadSignature
elif rdata.error == dns.rcode.BADKEY:
raise PeerBadKey
elif rdata.error == dns.rcode.BADTIME:
raise PeerBadTime
elif rdata.error == dns.rcode.BADTRUNC:
raise PeerBadTruncation
else:
raise PeerError('unknown TSIG error code %d' % rdata.error)
if abs(rdata.time_signed - now) > rdata.fudge:
raise BadTime
if key.name != owner:
raise BadKey
if key.algorithm != rdata.algorithm:
raise BadAlgorithm
ctx = _digest(new_wire, key, rdata, None, request_mac, ctx, multi)
ctx.verify(rdata.mac)
return _maybe_start_digest(key, rdata.mac, multi)
def get_context(key):
"""Returns an HMAC context for the specified key.
@rtype: HMAC context
@raises NotImplementedError: I{algorithm} is not supported
"""
if key.algorithm == GSS_TSIG:
return GSSTSig(key.secret)
else:
return HMACTSig(key.secret, key.algorithm)
class Key:
def __init__(self, name, secret, algorithm=default_algorithm):
if isinstance(name, str):
name = dns.name.from_text(name)
self.name = name
if isinstance(secret, str):
secret = base64.decodebytes(secret.encode())
self.secret = secret
if isinstance(algorithm, str):
algorithm = dns.name.from_text(algorithm)
self.algorithm = algorithm
def __eq__(self, other):
return (isinstance(other, Key) and
self.name == other.name and
self.secret == other.secret and
self.algorithm == other.algorithm)
def __repr__(self):
r = f"<DNS key name='{self.name}', " + \
f"algorithm='{self.algorithm}'"
if self.algorithm != GSS_TSIG:
r += f", secret='{base64.b64encode(self.secret).decode()}'"
r += ">"
return r
| 32.432277 | 79 | 0.647681 | 1,472 | 11,254 | 4.855978 | 0.220109 | 0.012731 | 0.018467 | 0.025182 | 0.210688 | 0.157387 | 0.101007 | 0.070929 | 0.059177 | 0.046726 | 0 | 0.022326 | 0.255731 | 11,254 | 346 | 80 | 32.526012 | 0.831065 | 0.292963 | 0 | 0.15 | 0 | 0 | 0.049365 | 0.013086 | 0 | 0 | 0.002073 | 0 | 0 | 1 | 0.095 | false | 0.005 | 0.04 | 0.015 | 0.285 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50b2e7847b1ec99e0a3ddcfe13080542c95dacbd | 4,149 | py | Python | export.py | Leidos-CSS-IRAD/zfs-textfile-collector | 1da93f96310136b0de72aada206c6442970a58f6 | [
"Apache-2.0"
] | null | null | null | export.py | Leidos-CSS-IRAD/zfs-textfile-collector | 1da93f96310136b0de72aada206c6442970a58f6 | [
"Apache-2.0"
] | null | null | null | export.py | Leidos-CSS-IRAD/zfs-textfile-collector | 1da93f96310136b0de72aada206c6442970a58f6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from typing import List, Tuple
from enum import Enum, auto
import argparse
from zpool_parser import get_zpool_status, ZPoolState, DriveStatus, SubpoolType, SubpoolStatus, ZPoolStatus
def export_zfs_text(pool_data: List[ZPoolStatus]):
return export_zfs_pool_health(pool_data) \
+ export_zfs_drive_health(pool_data) \
+ export_zfs_resilver_status(pool_data) \
+ export_zfs_resilver_time(pool_data) \
+ export_zfs_resilver_last_time(pool_data) \
+ export_zfs_scrub_status(pool_data) \
+ export_zfs_scrub_time(pool_data) \
+ export_zfs_scrub_last_time(pool_data)
def export_zfs_pool_health(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Pool_Health: 0=healthy, 1=degraded\n"
"# TYPE ZFS_Pool_Health gauge\n")
for pool in pool_data:
export += "ZFS_Pool_Health{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.state.value)
return export + "\n"
def export_zfs_drive_health(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Drive_Health: 0=healthy, 1=degraded, 2=unavail\n"
"# TYPE ZFS_Drive_Health gauge\n")
for pool in pool_data:
for subpool in pool.subpools:
for drive in subpool.drives:
export += "ZFS_Drive_Health{{pool=\"{0}\", name=\"{1}\"}} {2}\n".format(
pool.name, drive.name, drive.state.value)
return export + "\n"
def export_zfs_resilver_status(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Status: 0=not resilvering, 1=resilvering\n"
"# TYPE ZFS_Resilver_Status gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Status{{pool=\"{0}\"}} {1}\n".format(
pool.name, 1 if pool.currently_resilvering else 0)
return export + "\n"
def export_zfs_resilver_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Time_Remaining: time in seconds\n"
"# TYPE ZFS_Resilver_Time_Remaining gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Time_Remaining{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.resilver_time_remaining)
return export + "\n"
def export_zfs_resilver_last_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Last_Time: time since epoch\n"
"# TYPE ZFS_Resilver_Last_Time gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Last_Time{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.last_resilver)
return export + "\n"
def export_zfs_scrub_status(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Status: 0=not scrubbing, 1=scrubbing\n"
"# TYPE ZFS_Scrub_Status gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Status{{pool=\"{0}\"}} {1}\n".format(
pool.name, 1 if pool.currently_scrubbing else 0)
return export + "\n"
def export_zfs_scrub_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Time_Remaining: time in seconds\n"
"# TYPE ZFS_Scrub_Time_Remaining gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Time_Remaining{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.scrub_time_remaining)
return export + "\n"
def export_zfs_scrub_last_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Last_Time: time since epoch\n"
"# TYPE ZFS_Scrub_Last_Time gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Last_Time{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.last_scrub)
return export + "\n"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate Prometheus formatted list.")
parser.add_argument("-o", "--output-file", type=str, help="path + filename to output to")
args = parser.parse_args()
# print(args)
if args.output_file:
with open(args.output_file, "w") as f:
f.write(export_zfs_text(get_zpool_status()))
else:
print(export_zfs_text(get_zpool_status()))
| 34.289256 | 107 | 0.651723 | 567 | 4,149 | 4.469136 | 0.151675 | 0.095896 | 0.077348 | 0.093923 | 0.707972 | 0.676401 | 0.610892 | 0.559195 | 0.375691 | 0.286898 | 0 | 0.00958 | 0.220053 | 4,149 | 120 | 108 | 34.575 | 0.773486 | 0.00699 | 0 | 0.2 | 0 | 0 | 0.266149 | 0.090092 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1125 | false | 0 | 0.05 | 0.0125 | 0.275 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50b3be040b5ac53c09fde0492bdbea38894cf9ee | 1,750 | py | Python | sigpy/mri/sim.py | kmjohnson3/sigpy | 6d5f9c66f7446a13b3615c31446bbce8adc5dfaa | [
"BSD-3-Clause"
] | 196 | 2018-07-07T00:42:42.000Z | 2022-03-22T02:30:24.000Z | sigpy/mri/sim.py | kmjohnson3/sigpy | 6d5f9c66f7446a13b3615c31446bbce8adc5dfaa | [
"BSD-3-Clause"
] | 79 | 2018-10-12T19:53:21.000Z | 2022-03-30T13:44:41.000Z | sigpy/mri/sim.py | kmjohnson3/sigpy | 6d5f9c66f7446a13b3615c31446bbce8adc5dfaa | [
"BSD-3-Clause"
] | 68 | 2018-09-26T03:46:42.000Z | 2022-03-11T03:51:49.000Z | # -*- coding: utf-8 -*-
"""MRI simulation functions.
"""
import numpy as np
__all__ = ['birdcage_maps']
def birdcage_maps(shape, r=1.5, nzz=8, dtype=np.complex):
"""Simulates birdcage coil sensitivies.
Args:
shape (tuple of ints): sensitivity maps shape,
can be of length 3, and 4.
r (float): relative radius of birdcage.
nzz (int): number of coils per ring.
dtype (Dtype): data type.
Returns:
array.
"""
if len(shape) == 3:
nc, ny, nx = shape
c, y, x = np.mgrid[:nc, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nc))
coily = r * np.sin(c * (2 * np.pi / nc))
coil_phs = -c * (2 * np.pi / nc)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
rr = np.sqrt(x_co ** 2 + y_co ** 2)
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1.0 / rr) * np.exp(1j * phi)
elif len(shape) == 4:
nc, nz, ny, nx = shape
c, z, y, x = np.mgrid[:nc, :nz, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nzz))
coily = r * np.sin(c * (2 * np.pi / nzz))
coilz = np.floor(c / nzz) - 0.5 * (np.ceil(nc / nzz) - 1)
coil_phs = -(c + np.floor(c / nzz)) * (2 * np.pi / nzz)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
z_co = (z - nz / 2.0) / (nz / 2.0) - coilz
rr = (x_co**2 + y_co**2 + z_co**2)**0.5
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1 / rr) * np.exp(1j * phi)
else:
raise ValueError('Can only generate shape with length 3 or 4')
rss = sum(abs(out) ** 2, 0)**0.5
out /= rss
return out.astype(dtype)
| 28.688525 | 70 | 0.474857 | 294 | 1,750 | 2.744898 | 0.306122 | 0.02974 | 0.037175 | 0.037175 | 0.349442 | 0.270136 | 0.25031 | 0.25031 | 0.208178 | 0.208178 | 0 | 0.049479 | 0.341714 | 1,750 | 60 | 71 | 29.166667 | 0.651042 | 0.179429 | 0 | 0.1875 | 0 | 0 | 0.039654 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50bc40bc9362dd4e1b304db3808ee6ce2b7ed4c7 | 744 | py | Python | TF-Demo/AlexNetDemo/demo.py | iViolinSolo/DeepLearning-GetStarted | d7281804f355ab4da8a7709ed735ebd5f5c91110 | [
"Apache-2.0"
] | null | null | null | TF-Demo/AlexNetDemo/demo.py | iViolinSolo/DeepLearning-GetStarted | d7281804f355ab4da8a7709ed735ebd5f5c91110 | [
"Apache-2.0"
] | null | null | null | TF-Demo/AlexNetDemo/demo.py | iViolinSolo/DeepLearning-GetStarted | d7281804f355ab4da8a7709ed735ebd5f5c91110 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 14/12/2017
import tensorflow as tf
# load data model
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./tmp/data/train", one_hot=True)
# define network hyper-params
learning_rate = 0.001
training_iter = 200000
batch_size = 128
display_step = 10
# set network parameters
n_input = 748 # 28 x 28 image shape
n_classes = 10 # 10 classes, count from 1 to 10
n_dropout = 0.75 # 0.75 probability to keep input
# set placeholders
X = tf.placeholder(dtype='float32', shape=[None, n_input], name='inputX')
y = tf.placeholder(dtype='float32', shape=[None, 10], name='resultY')
keep_prob = tf.placeholder(dtype='float32')
| 24 | 73 | 0.731183 | 117 | 744 | 4.529915 | 0.641026 | 0.073585 | 0.101887 | 0.141509 | 0.128302 | 0.128302 | 0 | 0 | 0 | 0 | 0 | 0.081761 | 0.145161 | 744 | 30 | 74 | 24.8 | 0.751572 | 0.329301 | 0 | 0 | 0 | 0 | 0.102669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50bc66ec3f4f317e3d7449094b39d452ee7ac94e | 1,219 | py | Python | ahem/utils.py | Axilent/oink | 623481fdac0120b4a68b74b11d2a54793d595fce | [
"BSD-3-Clause"
] | 6 | 2015-06-09T19:42:38.000Z | 2018-02-10T14:20:21.000Z | ahem/utils.py | Axilent/oink | 623481fdac0120b4a68b74b11d2a54793d595fce | [
"BSD-3-Clause"
] | 24 | 2015-06-05T20:27:04.000Z | 2021-06-10T17:41:46.000Z | ahem/utils.py | Axilent/oink | 623481fdac0120b4a68b74b11d2a54793d595fce | [
"BSD-3-Clause"
] | 4 | 2015-06-09T19:42:42.000Z | 2017-05-18T05:22:16.000Z | from __future__ import unicode_literals
from importlib import import_module
from django.conf import settings
from ahem.loader import notification_registry
from ahem.settings import AHEM_BACKENDS
def get_notification(notification_name):
return notification_registry[notification_name]()
def get_backend(backend_name):
if hasattr(settings, 'AHEM_BACKENDS'):
backend_paths = settings.AHEM_BACKENDS
else:
backend_paths = AHEM_BACKENDS
for path in backend_paths:
module, backend_class = path.rsplit(".", 1)
module = import_module(module)
backend = getattr(module, backend_class)
if backend.name == backend_name:
return backend()
raise Exception("The specifyed backend is not registered. Add it to AHEM_BACKENDS.")
def celery_is_available():
try:
import celery
except ImportError:
return False
else:
return True
def register_user(backend_name, user, **settings):
backend = get_backend(backend_name)
backend.register_user(user, **settings)
def schedule_notification(notification_name, **params):
notification = get_notification(notification_name)
notification.schedule(**params)
| 25.395833 | 88 | 0.727646 | 142 | 1,219 | 5.992958 | 0.366197 | 0.070505 | 0.098707 | 0.072855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001026 | 0.200164 | 1,219 | 47 | 89 | 25.93617 | 0.871795 | 0 | 0 | 0.0625 | 0 | 0 | 0.064807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.25 | 0.03125 | 0.53125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50be6d3e9a60579a13bebcb17b8d0a990168015a | 7,935 | py | Python | tests/test_compute_raw.py | zmitchell/trcdproc | dbae216deeb56774bdeba18b92657118f83928f5 | [
"MIT"
] | null | null | null | tests/test_compute_raw.py | zmitchell/trcdproc | dbae216deeb56774bdeba18b92657118f83928f5 | [
"MIT"
] | null | null | null | tests/test_compute_raw.py | zmitchell/trcdproc | dbae216deeb56774bdeba18b92657118f83928f5 | [
"MIT"
] | null | null | null | from os import remove
import h5py
import numpy as np
from pytest import fixture, raises
import trcdproc.compute.raw as comp
from trcdproc.core import Array, InputChannel, PumpStatus
@fixture(scope='function')
def data_for_compute_tests():
"""Produces an HDF5 file with the following structure:
File
round001
76487
pump
time
perp
par
ref
nopump
time
perp
par
ref
76715
(same structure)
round002
(same structure)
This file is intended to be used for testing the `trcdproc.compute` package, so the data in
the file is created with known means, noises, etc.
"""
filename = 'compute.h5'
file = h5py.File(filename, 'w', libver='latest')
points = 50_000
time_data = np.linspace(0, 4e-4, points, dtype=np.float64)
perp_mean = 0
par_mean = 1
ref_mean = 2
perp_noise = 0.1
par_noise = 0.2
ref_noise = 0.3
perp_data = np.random.normal(perp_mean, perp_noise, points)
par_data = np.random.normal(par_mean, par_noise, points)
ref_data = np.random.normal(ref_mean, ref_noise, points)
rounds_root = file.create_group('rounds')
for rnd in ['round001', 'round002']:
for wav in ['76487', '76715']:
for pump in ['pump', 'nopump']:
group_path = f'{rnd}/{wav}/{pump}'
group = rounds_root.require_group(group_path)
group.create_dataset('time', data=time_data)
group.create_dataset('perp', data=perp_data)
group.create_dataset('par', data=par_data)
group.create_dataset('ref', data=ref_data)
yield file
file.close()
remove(filename)
@fixture(scope='function')
def mean_bounds():
"""The upper and lower bounds for which the computed mean is considered correct
"""
bounds = {
'perp': {
'lower': -0.01,
'upper': 0.01,
},
'par': {
'lower': 0.99,
'upper': 1.01,
},
'ref': {
'lower': 1.98,
'upper': 2.02,
}
}
return bounds
@fixture(scope='function')
def noise_bounds():
"""The upper and lower bounds for which the computed noise is considered correct
"""
bounds = {
'perp': {
'lower': 0.099,
'upper': 0.101,
},
'par': {
'lower': 0.198,
'upper': 0.202,
},
'ref': {
'lower': 0.297,
'upper': 0.303,
}
}
return bounds
def test_channel_heatmap(data_for_compute_tests, mean_bounds):
"""Does a basic test of the heatmap data generator by making a heatmap of
the means in the pumped reference channel
"""
def pixel_mean(_: Array, signal: Array):
"""Computes the value of a pixel by computing the mean of the signal
"""
return signal.mean()
chan = InputChannel.perp
pump = PumpStatus.present
pixels = comp.channel_heatmap(data_for_compute_tests, pixel_mean, chan, pump)
assert pixels.shape == (2, 2)
for i in [0, 1]:
for j in [0, 1]:
assert pixels[i, j] < mean_bounds['perp']['upper']
assert pixels[i, j] > mean_bounds['perp']['lower']
def test_store_individual_means(data_for_compute_tests, mean_bounds):
"""Verifies that the correct means are stored for each signal dataset
"""
comp.store_individual_means(data_for_compute_tests)
rounds_root = data_for_compute_tests['rounds']
for rnd in ['round001', 'round002']:
for wav in ['76487', '76715']:
for pump in ['pump', 'nopump']:
with raises(KeyError): # make sure `mean` wasn't calculated for `time` datasets
time_path = f'{rnd}/{wav}/{pump}/time'
time_mean = rounds_root[time_path].attrs['mean']
for sig in ['perp', 'par', 'ref']:
dataset_path = f'{rnd}/{wav}/{pump}/{sig}'
sig_mean = rounds_root[dataset_path].attrs['mean']
assert sig_mean < mean_bounds[sig]['upper']
assert sig_mean > mean_bounds[sig]['lower']
def test_store_individual_noises(data_for_compute_tests, noise_bounds):
"""Verifies that the correct noises are stored for each signal dataset
"""
comp.store_individual_noises(data_for_compute_tests)
rounds_root = data_for_compute_tests['rounds']
for rnd in ['round001', 'round002']:
for wav in ['76487', '76715']:
for pump in ['pump', 'nopump']:
with raises(KeyError): # make sure `noise` wasn't calculated for `time` datasets
time_path = f'{rnd}/{wav}/{pump}/time'
time_noise = rounds_root[time_path].attrs['noise']
for sig in ['perp', 'par', 'ref']:
dataset_path = f'{rnd}/{wav}/{pump}/{sig}'
sig_noise = rounds_root[dataset_path].attrs['noise']
assert sig_noise < noise_bounds[sig]['upper']
assert sig_noise > noise_bounds[sig]['lower']
def test_store_noise_means(data_for_compute_tests, noise_bounds):
"""Verifies that the correct file-wide means of the noise are stored for each signal channel
Note:
The data for a given channel is the same throughout the file, so the mean noise should be
identical to the individual noises in the channel.
"""
comp.store_individual_noises(data_for_compute_tests)
comp.store_noise_means(data_for_compute_tests)
for sig in ['perp', 'par', 'ref']:
mean_noise = data_for_compute_tests.attrs[f'{sig}_noise_mean']
assert mean_noise < noise_bounds[sig]['upper']
assert mean_noise > noise_bounds[sig]['lower']
def test_store_overall_means(data_for_compute_tests, mean_bounds):
"""Verifies that the correct file-wide means are stored for each signal channel
Note:
The data for a given channel is the same throughout the file, so the overall mean should be
identical to the individual means in the channel.
"""
comp.store_individual_means(data_for_compute_tests)
comp.store_overall_means(data_for_compute_tests)
for sig in ['perp', 'par', 'ref']:
mean = data_for_compute_tests.attrs[f'{sig}_mean']
assert mean < mean_bounds[sig]['upper']
assert mean > mean_bounds[sig]['lower']
def test_store_std_dev_of_means(data_for_compute_tests):
"""Verify that the correct standard deviation of the individual means for each signal channel
are stored
Note:
The individual means for a given channel should be identical,
so the standard deviation should be very small
"""
comp.store_individual_means(data_for_compute_tests)
comp.store_overall_means(data_for_compute_tests)
comp.store_std_dev_of_means(data_for_compute_tests)
for sig in ['perp', 'par', 'ref']:
std_dev = data_for_compute_tests.attrs[f'{sig}_mean_std_dev']
assert std_dev < 1e-4
def test_store_std_dev_of_noises(data_for_compute_tests):
"""Verifies that the correct standard deviation of the individual noises for each signal channel
are stored
Note:
The individual noises for a given channel should be very similar,
so the standard deviation should be very small
"""
comp.store_individual_noises(data_for_compute_tests)
comp.store_noise_means(data_for_compute_tests)
comp.store_std_dev_of_noises(data_for_compute_tests)
for sig in ['perp', 'par', 'ref']:
std_dev = data_for_compute_tests.attrs[f'{sig}_noise_std_dev']
assert std_dev < 1e-2
| 36.068182 | 100 | 0.609578 | 1,034 | 7,935 | 4.462282 | 0.161509 | 0.043997 | 0.081925 | 0.111183 | 0.650629 | 0.622237 | 0.531643 | 0.493932 | 0.420676 | 0.362375 | 0 | 0.024676 | 0.290107 | 7,935 | 219 | 101 | 36.232877 | 0.794426 | 0.259105 | 0 | 0.340741 | 0 | 0 | 0.102509 | 0.016846 | 0 | 0 | 0 | 0 | 0.096296 | 1 | 0.081481 | false | 0 | 0.044444 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c2992ea05c32d4854faddbaa46b9f79ff00df8 | 673 | py | Python | tests/node/test_atanh.py | gglin001/onnx_jax | 08e2a1181250db48f4436f6430903fc895a3a1d6 | [
"Apache-2.0"
] | 9 | 2021-04-12T02:37:14.000Z | 2022-03-28T23:31:40.000Z | tests/node/test_atanh.py | gglin001/onnx-jax | 08e2a1181250db48f4436f6430903fc895a3a1d6 | [
"Apache-2.0"
] | null | null | null | tests/node/test_atanh.py | gglin001/onnx-jax | 08e2a1181250db48f4436f6430903fc895a3a1d6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import onnx
from tests.tools import expect
class Atanh:
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Atanh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]
expect(node, inputs=[x], outputs=[y], name='test_atanh_example')
x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)
y = np.arctanh(x)
expect(node, inputs=[x], outputs=[y], name='test_atanh')
if __name__ == '__main__':
Atanh.export()
| 24.925926 | 76 | 0.548291 | 92 | 673 | 3.880435 | 0.456522 | 0.058824 | 0.117647 | 0.12605 | 0.364146 | 0.364146 | 0.364146 | 0.364146 | 0.212885 | 0 | 0 | 0.072016 | 0.27786 | 673 | 26 | 77 | 25.884615 | 0.662551 | 0.095097 | 0 | 0.105263 | 0 | 0 | 0.070957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c3e355dcb8aa480f5c338c808a8de5a2fcb2b8 | 27,777 | py | Python | python/samples/pnp_perf_comp.py | Russ76/mrpt | 4a59edd8b3250acea27fcb94bf8e29bee1ba8e1c | [
"BSD-3-Clause"
] | 1,372 | 2015-07-25T00:33:22.000Z | 2022-03-30T12:55:33.000Z | python/samples/pnp_perf_comp.py | Russ76/mrpt | 4a59edd8b3250acea27fcb94bf8e29bee1ba8e1c | [
"BSD-3-Clause"
] | 772 | 2015-07-18T19:18:54.000Z | 2022-03-27T02:45:51.000Z | python/samples/pnp_perf_comp.py | Russ76/mrpt | 4a59edd8b3250acea27fcb94bf8e29bee1ba8e1c | [
"BSD-3-Clause"
] | 588 | 2015-07-23T01:13:18.000Z | 2022-03-31T08:05:40.000Z | #!/usr/bin/env python3
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import timeit
import mpld3
from mpld3 import plugins, utils
# Install tabulate using "pip-install tabulate"
from tabulate import tabulate
import os
def clear(): os.system('clear')
# Function to clear screen
import pymrpt
# Matplotlib figure parameters
mpl.rcParams['figure.figsize'] = (12.0, 8.0)
plt.rcParams.update({'axes.titlesize': 30, 'axes.labelsize': 20, 'xtick.labelsize': 15, 'ytick.labelsize': 15,
'figure.titlesize': 40})
# Define number of points and camera parameters
n = 10
sigma = 0.0005
n_range = list(range(5, 25))
sigma_range = np.arange(0.0001, 0.001, 0.0001)
f = 1.0
cx = 0.0
cy = 0.0
cam_intrinsic = np.array([[f, 0.0, cx], [0.0, f, cy], [0.0, 0.0, 1.0]])
# Define constants for serial outputting results
checkmark = '\u2713'
l_progress_bar = 50
# Instantiate pnp module
pnp = pymrpt.pnp(n)
# Define settings for comparison module
algos = [pnp.dls, pnp.epnp, pnp.p3p,
pnp.rpnp, pnp.ppnp, pnp.posit, pnp.lhm]
algo_names = ['dls', 'epnp', 'p3p', 'rpnp', 'ppnp', 'posit', 'lhm']
algo_ls = [':', '-', '--', '-', '--', '-', '-']
n_algos = len(algos)
n_iter = 100
class HighlightLines(plugins.PluginBase):
# css format for interactive d3 plots
"""A plugin for an interactive legend.
Inspired by http://bl.ocks.org/simzou/6439398
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["line_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var labels = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {}
obj.label = this.props.labels[i]
obj.line = mpld3.get_element(this.props.line_ids[i], this.fig)
obj.visible = false;
labels.push(obj);
}
var ax = this.fig.axes[0]
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(labels)
.enter().append("rect")
.attr("height",10)
.attr("width", 25)
.attr("x",ax.width+10+ax.position[0])
.attr("y",function(d,i) {
return ax.position[1]+ i * 25 - 10;})
.attr("stroke", function(d) {
return d.line.props.edgecolor})
.attr("class", "legend-box")
.style("fill", "white")
.on("click", click)
// add the text
legend.selectAll("text")
.data(labels)
.enter().append("text")
.attr("x", function (d) {
return ax.width+10+ax.position[0] + 25 + 15
})
.attr("y", function(d,i) {
return ax.position[1]+ i * 25
})
.text(function(d) { return d.label })
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
console.log(d)
var color = d.line.props.edgecolor
return d.visible ? color : "white";
})
d3.select(d.line.path[0][0])
.style("stroke-opacity", d.visible ? 1 : d.line.props.alpha);
}
};
"""
def __init__(self, lines, labels, css):
self.css_ = css or ""
self.lines = lines
self.dict_ = {"type": "interactive_legend",
"line_ids": [utils.get_id(line) for line in lines],
"labels": labels}
css = """
.legend-box {
cursor: pointer;
}
"""
def vector2RotMat(vec, theta=0):
# Function to convert from axis, angle to rotation matrix
# Rodrigues rotation formula
n_check = np.linalg.norm(vec)
kx = vec[0] / n_check
ky = vec[1] / n_check
kz = vec[2] / n_check
K = np.matrix([[0, -kz, ky], [kz, 0, -kx], [-ky, kx, 0]])
I = np.identity(3)
R = I + K * np.sin(theta) + K * K * (1 - np.cos(theta))
R = np.array(R)
return R
def quatvec2RotMat(q):
# Function to convert from quaternion to Rotaiton matrix
qw = np.sqrt(1 - np.linalg.norm(q) * np.linalg.norm(q))
qx = q[0]
qy = q[1]
qz = q[2]
R = [1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 * qy * qw,
2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 * qy * qz - 2 * qx * qw,
2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy]
R = np.reshape(R, [3, 3])
return R
def RotMat2quat(R):
# Function to convert from rotation matrix to Quaternion
qw = np.sqrt(1 + R[0, 0] + R[1, 1] + R[2, 2]) / 2
if qw > 0.01:
qx = (R[2, 1] - R[1, 2]) / 4 / qw
qy = (R[0, 2] - R[2, 0]) / 4 / qw
qz = (R[1, 0] - R[0, 1]) / 4 / qw
else:
l = np.array([R[0, 0], R[1, 1], R[2, 2]])
ind_max = np.argmax(l)
if ind_max == 0:
qx = np.sqrt((R[0, 0] + 1) / 2)
qy = (R[1, 0] + R[0, 1]) / 4 / qx
qz = (R[0, 2] + R[2, 0]) / 4 / qx
elif ind_max == 1:
qy = np.sqrt((R[1, 1] + 1) / 2)
qx = (R[1, 0] + R[0, 1]) / 4 / qy
qz = (R[2, 1] + R[1, 2]) / 4 / qy
else:
qz = np.sqrt((R[2, 2] + 1) / 2)
qx = (R[0, 2] + R[2, 0]) / 4 / qz
qy = (R[2, 1] + R[1, 2]) / 4 / qz
qw = np.sqrt(1 - qx * qx - qy * qy - qz * qz)
return [qw, qx, qy, qz]
def display_comparison_plot(t, arr, names, line_styles, title, xtitle, ytitle, ylim, figname):
f, ax = plt.subplots()
lines = []
for i in np.arange(0, len(names)):
l, = ax.plot(t, arr[:, i], label=names[i], lw=3, ls=line_styles[i])
lines.append(l)
leg = ax.legend(fancybox=True, shadow=True)
leg.get_frame().set_alpha(0.8)
lined = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(10)
lined[legline] = origline
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
ax.set_title(title)
ax = plt.gca()
ax.set_ylim(ylim)
ax.grid()
def onpick(event):
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
if vis:
legline.set_alpha(1)
else:
legline.set_alpha(0.2)
f.canvas.draw()
f.canvas.mpl_connect('pick_event', onpick)
plt.show()
plt.savefig(figname + '.pdf')
def display_comparison_plot_mpld3(t, arr, names, line_styles, title, xtitle, ytitle, ylim, figname):
# Function used to generate interactive d3 plots in html
f, ax = plt.subplots()
lines = []
for i in np.arange(0, len(names)):
l, = ax.plot(t, arr[:, i], label=names[i], lw=3, ls=line_styles[i], alpha=0.2)
lines.append(l)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
ax.set_title(title)
ax = plt.gca()
ax.set_ylim(ylim)
ax.grid()
plugins.connect(f, HighlightLines(lines, names, css))
mpld3.display()
#mpld3.save_html(f, figname + '.html')
return mpld3.fig_to_html(f)
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
# Print iterations progress
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = "\u2588" * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def printTestStatus(test_index):
# Print overall progress of all the tests
head_tests = ['#No.', 'Status', 'Description']
table_tests = [[str(1), '', 'Test for 100 iterations with each algorithm'], [str(2), '', 'Varitaion of error with image pixel noise standard deviation (' +
'\u03C3' + ')'], [str(3), '', ' Variation in error with number of 2d/3d correspondences (n)'], [str(4), '', 'Average computation time of each algorithm']]
for i in range(0, test_index):
table_tests[i][1] = checkmark
print(tabulate(table_tests, head_tests, tablefmt='fancy_grid'))
for i in range(0, test_index):
printProgress(l_progress_bar, l_progress_bar, prefix='Test' + str(i + 1) +
' Progress:', suffix='Complete', barLength=50)
def printTest1Results(vals):
# Function to print results of test 1
test1_headers = ['Algo', 'Translation Mean Error', 'Translation Median Error',
'Rotation Mean Error', 'Rotation Median Error']
test1_table = np.empty([7, 5], dtype=object)
test1_table[:, 1:] = vals
test1_table[:, 0] = algo_names
print(tabulate(test1_table, test1_headers, tablefmt='fancy_grid'))
def printTest4Results(vals):
# Function to print results of test 4
test4_headers = ['Algo', 'Translation Mean Error', 'Translation Median Error',
'Rotation Mean Error', 'Rotation Median Error']
vals = np.random.rand(7, 1)
test4_table = np.empty([7, 2], dtype=object)
test4_table[:, 1:] = vals
test4_table[:, 0] = algo_names
print(tabulate(test4_table, test4_headers, tablefmt='fancy_grid'))
def calc_err(pose1, pose2):
# FUnction to compute reprojection errors
if np.any(np.isnan(pose1)) or np.linalg.norm(pose1) > 1000000:
err = [0, 0]
return err
# Percent error in translation
t_est = np.array(pose1[0:3])
t = np.array(pose2[0:3])
err_t = (np.linalg.norm(t_est - t) / np.linalg.norm(t)) * 100
# Rotation error
q_est = pose1[3:6, 0]
q = pose2[3:6, 0]
if np.linalg.norm(q) != 0 and np.linalg.norm(q_est):
val = np.dot(q_est, q) / np.linalg.norm(q_est) / np.linalg.norm(q)
else:
val = 1
if val > 1:
val = 1
elif val < -1:
val = -1
elif val == np.nan:
val = 1
err_q = np.max(np.abs(np.arccos(val))) * 180 / np.pi
err_q = err_q if err_q < 180 - err_q else 180 - err_q
err = [err_t, err_q]
if np.isnan(err_t) or np.isnan(err_q):
print('pose_est=\n', pose2)
print('pose=\n', pose1)
return err
def err_plot():
# Test 1
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Generate random camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in np.arange(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
if np.isnan(e[0]) or np.isnan(e[1]):
print('algo=\n', algo_names[i])
print('err=\n', e)
err_net_t.append(err_t)
err_net_q.append(err_q)
printProgress(it, n_iter, prefix='Test' + str(1) +
' Progress:', suffix='Complete', barLength=50)
mean_err_t = np.mean(err_net_t, axis=0)
mean_err_q = np.mean(err_net_q, axis=0)
median_err_t = np.median(err_net_t, axis=0)
median_err_q = np.median(err_net_q, axis=0)
vals = np.empty([7, 4])
vals[:, 0] = mean_err_t
vals[:, 1] = median_err_t
vals[:, 2] = mean_err_q
vals[:, 3] = median_err_q
"""
for i in np.arange(0, n_algos):
print 'mean_err_t_' + algo_names[i] + '=', mean_err_t[i], 'median_err_t_' + algo_names[i] + '=', median_err_t[i]
print 'mean_err_q_' + algo_names[i] + '=', mean_err_q[i], 'median_err_q_' + algo_names[i] + '=', median_err_q[i]
"""
it = np.arange(0, n_iter)
err_net_t = np.array(err_net_t)
err_net_q = np.array(err_net_q)
s = '<h2> Translation error and Rotation error for 100 iterations (R - Randomly varying, t - fixed) </h2>'
s1 = display_comparison_plot_mpld3(it, err_net_t, names=algo_names, line_styles=algo_ls,
title='% Translation Error Plot', xtitle='Iteration', ytitle='e_t', ylim=[0, 2], figname='err_t')
s2 = display_comparison_plot_mpld3(it, err_net_q, names=algo_names, line_styles=algo_ls,
title='Rotation Error Plot (deg)', xtitle='Iteration', ytitle='e_q', ylim=[0, 1], figname='err_q')
s = s + '\n <table > \n <tr > \n <td > \n' + s1 + \
'</td > \n <td> \n' + s2 + '</td> \n </tr> \n </table> \n'
return s, vals
"""
comp_arr = np.zeros([n_iter,2])
for i in np.arange(0,n_iter):
comp_arr[i,:] = 2*mean_err_p3p
nconv_epnp = float(np.sum(err_epnp>comp_arr))/n_iter*100
nconv_dls = float(np.sum(err_dls>comp_arr))/n_iter*100
nconv_ppnp = float(np.sum(err_ppnp>comp_arr))/n_iter*100
nconv_posit = float(np.sum(err_posit>comp_arr))/n_iter*100
nconv_lhm = float(np.sum(err_lhm>comp_arr))/n_iter*100
nconv_p3p = float(np.sum(err_p3p>comp_arr))/n_iter*100
plt.figure()
xvals = ['epnp', 'dls', 'ppnp', 'posit','lhm', 'p3p']
xvals_int = np.arange(0,n_algos)
yvals = [nconv_epnp, nconv_dls, nconv_ppnp, nconv_posit, nconv_lhm, nconv_p3p]
plt.bar(xvals_int, yvals, align='center')
plt.xticks(xvals_int, xvals)
plt.title('%Divergence')
plt.show()
plt.savefig('divergence.pdf')
"""
def err_statistics_fcn_n():
# Test 2
mean_err_t_net = []
mean_err_q_net = []
median_err_t_net = []
median_err_q_net = []
for n in n_range:
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in range(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
err_net_t.append(err_t)
err_net_q.append(err_q)
mean_err_t_net.append(np.mean(err_net_t, axis=0))
mean_err_q_net.append(np.mean(err_net_q, axis=0))
median_err_t_net.append(np.median(err_net_t, axis=0))
median_err_q_net.append(np.median(err_net_q, axis=0))
printProgress(n - 5, len(n_range), prefix='Test' + str(3) +
' Progress:', suffix='Complete', barLength=50)
it = np.arange(5, 25)
mean_err_t_net = np.array(mean_err_t_net)
mean_err_q_net = np.array(mean_err_q_net)
median_err_t_net = np.array(median_err_t_net)
median_err_q_net = np.array(median_err_q_net)
s = '<h2> Mean and Median error in Translation and Rotation with varying 2d/3d correspondences (n) </h2>'
s1 = display_comparison_plot_mpld3(it, mean_err_t_net, names=algo_names, line_styles=algo_ls,
title='Mean Translation %Error Plot', xtitle='n', ytitle=r'% Translation error e_t', ylim=[0, 10], figname='mean_err_t')
s2 = display_comparison_plot_mpld3(it, mean_err_q_net, names=algo_names, line_styles=algo_ls,
title='Mean Rotation Error Plot (deg)', xtitle='n', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='mean_err_q')
s3 = display_comparison_plot_mpld3(it, median_err_t_net, names=algo_names, line_styles=algo_ls,
title='Median Translation %Error Plot', xtitle='n', ytitle=r'% Translation error e_t', ylim=[0, 1], figname='median_err_t')
s4 = display_comparison_plot_mpld3(it, median_err_q_net, names=algo_names, line_styles=algo_ls,
title='Median Rotation Error Plot (deg)', xtitle='n', ytitle=r'Rotation error e_q(deg)', ylim=[0, 0.5], figname='median_err_q')
s = s + '\n<table>\n <tr>\n <td>\n' + s1 + '</td>\n <td>\n' + s2 + '</td>\n </tr>\n' + \
'\n<tr>\n <td>\n' + s3 + '\n</td>\n <td>\n' + s4 + '\n</td>\n </tr>\n </table>\n'
return s
def err_statistics_fcn_sigma():
# Test 3
mean_err_t_net = []
mean_err_q_net = []
median_err_t_net = []
median_err_q_net = []
n = 10
for sigma in sigma_range:
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in range(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
err_net_t.append(err_t)
err_net_q.append(err_q)
mean_err_t_net.append(np.mean(err_net_t, axis=0))
mean_err_q_net.append(np.mean(err_net_q, axis=0))
median_err_t_net.append(np.median(err_net_t, axis=0))
median_err_q_net.append(np.median(err_net_q, axis=0))
printProgress(sigma * 1000, len(sigma_range), prefix='Test' + str(2) +
' Progress:', suffix='Complete', barLength=50)
it = np.arange(0.001, 0.010, 0.001)
mean_err_t_net = np.array(mean_err_t_net)
mean_err_q_net = np.array(mean_err_t_net)
median_err_t_net = np.array(median_err_t_net)
median_err_q_net = np.array(median_err_t_net)
s = '\n<h2>\n Mean and Median error in Translation and Rotation with varying noise variance (sigma)\n </h2>\n'
s1 = display_comparison_plot_mpld3(it, mean_err_t_net, names=algo_names, line_styles=algo_ls, title='Mean Translation %Error Plot',
xtitle=r'\sigma', ytitle=r'% Translation error e_t', ylim=[0, 10], figname='mean_sigma_err_t')
s2 = display_comparison_plot_mpld3(it, mean_err_q_net, names=algo_names, line_styles=algo_ls, title='Mean Rotation Error Plot (deg)',
xtitle=r'\sigma', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='mean_sigma_err_q')
s3 = display_comparison_plot_mpld3(it, median_err_t_net, names=algo_names, line_styles=algo_ls, title='Median Translation %Error Plot',
xtitle=r'\sigma', ytitle=r'% Translation error e_t', ylim=[0, 1], figname='median_sigma_err_t')
s4 = display_comparison_plot_mpld3(it, median_err_q_net, names=algo_names, line_styles=algo_ls, title='Median Rotation Error Plot (deg)',
xtitle=r'\sigma', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='median_sigma_err_q')
s = s + '\n<table>\n <tr>\n <td>\n' + s1 + '\n</td>\n <td>\n' + s2 + '\n</td>\n </tr>\n' + \
'\n<tr>\n <td>\n' + s3 + '\n</td> \n<td>\n' + s4 + '\n</td>\n </tr>\n </table>\n'
return s
def time_comp():
# Test 4
obj_pts_store = []
img_pts_store = []
n_max = 50
n_step = 1
n_start = 10
n_iter = 10
tcomp_storage = []
for n in np.arange(n_start, n_max, n_step):
# Generate object points and image points
for i in np.arange(0, n_iter):
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
# Add noise to measurements
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
obj_pts_store.append(obj_pts)
img_pts_store.append(img_pts)
tcomp = []
# Compute time for n_iter iterations
for it in np.arange(1, n_algos):
pose_est = np.empty([6, 1])
start = timeit.default_timer()
for i in np.arange(0, n_iter):
obj_pts = obj_pts_store[i]
img_pts = img_pts_store[i]
algos[it](obj_pts, img_pts, n, cam_intrinsic, pose_est)
end = timeit.default_timer()
tcomp.append((end - start) / float(n_iter) * 1000.0)
tcomp_storage.append(tcomp)
printProgress(n - n_start, len(list(range(n_start, n_max, n_step))), prefix='Test' + str(4) +
' Progress:', suffix='Complete', barLength=50)
it = np.arange(n_start, n_max, n_step)
tcomp_storage = np.array(tcomp_storage)
s = '\n<h2>\n Average compuational time for algorithm (ms) \n</h2>\n'
s = s + display_comparison_plot_mpld3(it, tcomp_storage, names=algo_names[1:], line_styles=algo_ls[
1:], title='Average Time for algorithm (ms)', xtitle=r'n', ytitle=r't(ms)', ylim=[0, 0.6], figname='mean_time')
return s
# Introduction and links to various files
ss = """<!DOCTYPE html>
<html>
<body bgcolor="#E6E6FA">
<br>
<CENTER>
<embed src = "https://www.dropbox.com/s/a266gqe3o0typpg/pnp_intro1.png?raw=1 #toolbar=0&navpanes=0&scrollbar=0" width = "1000" height = "1250" ALIGN=CENTER>
<br>
<embed src = "https://www.dropbox.com/s/5v8n4edc7g8q8tu/pnp_intro2.png?raw=1 #toolbar=0&navpanes=0&scrollbar=0" width = "1000" height = "1250" ALIGN=CENTER>
<br>
<embed src = "https://www.dropbox.com/s/6bdadtn99tthth0/pnp_intro3.png?raw=1 #toolbar=0&navpanes=0&scrollbar=0" width = "1000" height = "650" ALIGN=CENTER>
<br>
<h1> Sample Pose Estimation using Camera Calib application of MRPT </h1>
<br>
<iframe src = "https://www.youtube.com/embed/aGd7ZyrcwaE" width = "960" height = "540" frameborder = "0" allowfullscreen ALIGN=CENTER> </iframe >
<br>
<br>
<h1> Performance Comparison using python interface in MRPT(pnp_perf_comp.py) </h1>
<div>
<h3> Plots are interactive <br>
* Click on legend box to solidy the particular algorithm <br>
* Pan and Zoom at lower left corner </h3>
</div>"""
# sys.stderr.write('\x1b[2J\x1b[H')
clear()
printTestStatus(0)
printProgress(0, len(list(range(5, 25))), prefix='Test' + str(1) +
' Progress:', suffix='Complete', barLength=50)
s1, vals = err_plot()
# sys.stderr.write('\x1b[2J\x1b[H')
clear()
printTestStatus(1)
printProgress(0, len(list(range(5, 25))), prefix='Test' + str(2) +
' Progress:', suffix='Complete', barLength=50)
s2 = err_statistics_fcn_sigma()
# sys.stderr.write('\x1b[2J\x1b[H')
clear()
printTestStatus(2)
printProgress(0, len(list(range(5, 25))), prefix='Test' + str(3) +
' Progress:', suffix='Complete', barLength=50)
s3 = err_statistics_fcn_n()
# sys.stderr.write('\x1b[2J\x1b[H')
clear()
printTestStatus(3)
printProgress(0, len(list(range(5, 25))), prefix='Test' + str(4) +
' Progress:', suffix='Complete', barLength=50)
s4 = time_comp()
# sys.stderr.write('\x1b[2J\x1b[H')
clear()
printTestStatus(4)
print('\n\nResults of Test 1 \n\n')
printTest1Results(vals)
s5 = """<h1> MRPT Merge Pull Request </h1>
<h3>
<a href="https://github.com/MRPT/mrpt/pull/310">Link to PnP Algorithm Pull Request </a>
</h3>
</CENTER>
</body>
</html>
"""
ss = ss + s1 + s2 + s3 + s4 + s5
f = open("pnp_perf_comp.html", "w")
f.write(ss)
f.close()
| 34.808271 | 235 | 0.568204 | 4,194 | 27,777 | 3.610157 | 0.122794 | 0.012681 | 0.010171 | 0.020606 | 0.517601 | 0.493428 | 0.470312 | 0.452678 | 0.437356 | 0.398455 | 0 | 0.056163 | 0.27566 | 27,777 | 797 | 236 | 34.851945 | 0.696372 | 0.099471 | 0 | 0.381132 | 0 | 0.016981 | 0.24919 | 0.034956 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032075 | false | 0 | 0.018868 | 0 | 0.083019 | 0.050943 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c6012e8c46019de572aa0363ce571d6116f024 | 2,072 | py | Python | robot/robo.py | anekpattanakij/python-back-testing-trading-system | ccb157aa9f88af60c42add047ad6ce9ce090fa2b | [
"BSD-3-Clause"
] | null | null | null | robot/robo.py | anekpattanakij/python-back-testing-trading-system | ccb157aa9f88af60c42add047ad6ce9ce090fa2b | [
"BSD-3-Clause"
] | null | null | null | robot/robo.py | anekpattanakij/python-back-testing-trading-system | ccb157aa9f88af60c42add047ad6ce9ce090fa2b | [
"BSD-3-Clause"
] | null | null | null | from robot.base.robo_base import RoboTrade
from robot.base.robo_indicator import calculate_ema
from robot.base.robo_enum import PriceDataDictColumn, CommandType, OrderType, OrderSide
import math
class Ema12Cross50Robo(RoboTrade):
def action1d(self):
self.data1d = calculate_ema(self.data1d, PriceDataDictColumn.CLOSE, [
{'ema_level': 12, 'ema_column': 'ema_12'}, {'ema_level': 26, 'ema_column': 'ema_26'}])
if math.isnan(self.data1d[0]["ema_12"]) or math.isnan(self.data1d[0]["ema_26"]):
return []
command_list = []
if self.data1d[0]["ema_12"] >= self.data1d[0]["ema_26"] and self.data1d[1]["ema_12"] < self.data1d[1]["ema_26"] and self.data1d[0][PriceDataDictColumn.CLOSE] > self.data1d[0]["ema_12"]:
command_list.append({"type": CommandType.ALERT, "message": "EMA 12 Cross over EMAR 26 in 1d at price {0}".format(
self.data4h[0][PriceDataDictColumn.CLOSE])})
if len(self.position_list) == 0:
command_list.append({
"type": CommandType.ORDER,
"order": OrderType.LIMIT,
"side": OrderSide.LONG,
"qty": self.fund/self.data1d[0][PriceDataDictColumn.CLOSE],
"price": self.data1d[0][PriceDataDictColumn.CLOSE]
})
if self.data1d[0]["ema_12"] < self.data1d[0]["ema_26"] and self.data1d[1]["ema_12"] >= self.data1d[1]["ema_26"]:
command_list.append({"type": CommandType.ALERT, "message": "EMA 12 Cross under EMAR 26 in 1d at price {0}".format(
self.data4h[0][PriceDataDictColumn.CLOSE])})
if len(self.position_list) > 0:
command_list.append({
"type": CommandType.ORDER,
"order": OrderType.LIMIT,
"side": OrderSide.SHORT,
"qty": self.position_list[0]["qty"],
"price": self.data1d[0][PriceDataDictColumn.CLOSE]
})
return command_list
| 51.8 | 193 | 0.579151 | 242 | 2,072 | 4.830579 | 0.239669 | 0.145423 | 0.103507 | 0.083832 | 0.654405 | 0.57485 | 0.467066 | 0.467066 | 0.467066 | 0.467066 | 0 | 0.057487 | 0.277992 | 2,072 | 39 | 194 | 53.128205 | 0.72393 | 0 | 0 | 0.352941 | 0 | 0 | 0.131274 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c767b00bdf412f6414d81aaa122a7748c13a62 | 11,163 | py | Python | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | 1 | 2019-12-30T03:16:26.000Z | 2019-12-30T03:16:26.000Z | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | examples/reinforcement_learning/tutorial_DDPG.py | Helilysyt/tensorlayer | 2dc4482a13aff3833a246b4d85b69a5d9079f01d | [
"Apache-2.0"
] | null | null | null | """
Deep Deterministic Policy Gradient (DDPG)
-----------------------------------------
An algorithm concurrently learns a Q-function and a policy.
It uses off-policy data and the Bellman equation to learn the Q-function,
and uses the Q-function to learn the policy.
Reference
---------
Deterministic Policy Gradient Algorithms, Silver et al. 2014
Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016
MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/
Environment
-----------
Openai Gym Pendulum-v0, continual action space
Prerequisites
-------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
To run
------
python tutorial_DDPG.py --train/test
"""
import argparse
import os
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorlayer as tl
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false')
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 1 # random seed
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 10000 # size of replay buffer
BATCH_SIZE = 32 # update batchsize
MAX_EPISODES = 200 # total number of episodes for training
MAX_EP_STEPS = 200 # total number of steps for each episode
TEST_PER_EPISODES = 10 # test the model per episodes
VAR = 3 # control exploration
############################### DDPG ####################################
class DDPG(object):
"""
DDPG class
"""
def __init__(self, a_dim, s_dim, a_bound):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound
W_init = tf.random_normal_initializer(mean=0, stddev=0.3)
b_init = tf.constant_initializer(0.1)
def get_actor(input_state_shape, name=''):
"""
Build actor network
:param input_state_shape: state
:param name: name
:return: act
"""
inputs = tl.layers.Input(input_state_shape, name='A_input')
x = tl.layers.Dense(n_units=30, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='A_l1')(inputs)
x = tl.layers.Dense(n_units=a_dim, act=tf.nn.tanh, W_init=W_init, b_init=b_init, name='A_a')(x)
x = tl.layers.Lambda(lambda x: np.array(a_bound) * x)(x)
return tl.models.Model(inputs=inputs, outputs=x, name='Actor' + name)
def get_critic(input_state_shape, input_action_shape, name=''):
"""
Build critic network
:param input_state_shape: state
:param input_action_shape: act
:param name: name
:return: Q value Q(s,a)
"""
s = tl.layers.Input(input_state_shape, name='C_s_input')
a = tl.layers.Input(input_action_shape, name='C_a_input')
x = tl.layers.Concat(1)([s, a])
x = tl.layers.Dense(n_units=60, act=tf.nn.relu, W_init=W_init, b_init=b_init, name='C_l1')(x)
x = tl.layers.Dense(n_units=1, W_init=W_init, b_init=b_init, name='C_out')(x)
return tl.models.Model(inputs=[s, a], outputs=x, name='Critic' + name)
self.actor = get_actor([None, s_dim])
self.critic = get_critic([None, s_dim], [None, a_dim])
self.actor.train()
self.critic.train()
def copy_para(from_model, to_model):
"""
Copy parameters for soft updating
:param from_model: latest model
:param to_model: target model
:return: None
"""
for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):
j.assign(i)
self.actor_target = get_actor([None, s_dim], name='_target')
copy_para(self.actor, self.actor_target)
self.actor_target.eval()
self.critic_target = get_critic([None, s_dim], [None, a_dim], name='_target')
copy_para(self.critic, self.critic_target)
self.critic_target.eval()
self.R = tl.layers.Input([None, 1], tf.float32, 'r')
self.ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement
self.actor_opt = tf.optimizers.Adam(LR_A)
self.critic_opt = tf.optimizers.Adam(LR_C)
def ema_update(self):
"""
Soft updating by exponential smoothing
:return: None
"""
paras = self.actor.trainable_weights + self.critic.trainable_weights
self.ema.apply(paras)
for i, j in zip(self.actor_target.trainable_weights + self.critic_target.trainable_weights, paras):
i.assign(self.ema.average(j))
def choose_action(self, s):
"""
Choose action
:param s: state
:return: act
"""
return self.actor(np.array([s], dtype=np.float32))[0]
def learn(self):
"""
Update parameters
:return: None
"""
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim:self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1:-self.s_dim]
bs_ = bt[:, -self.s_dim:]
with tf.GradientTape() as tape:
a_ = self.actor_target(bs_)
q_ = self.critic_target([bs_, a_])
y = br + GAMMA * q_
q = self.critic([bs, ba])
td_error = tf.losses.mean_squared_error(y, q)
c_grads = tape.gradient(td_error, self.critic.trainable_weights)
self.critic_opt.apply_gradients(zip(c_grads, self.critic.trainable_weights))
with tf.GradientTape() as tape:
a = self.actor(bs)
q = self.critic([bs, a])
a_loss = -tf.reduce_mean(q) # maximize the q
a_grads = tape.gradient(a_loss, self.actor.trainable_weights)
self.actor_opt.apply_gradients(zip(a_grads, self.actor.trainable_weights))
self.ema_update()
def store_transition(self, s, a, r, s_):
"""
Store data in data buffer
:param s: state
:param a: act
:param r: reward
:param s_: next state
:return: None
"""
s = s.astype(np.float32)
s_ = s_.astype(np.float32)
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def save_ckpt(self):
"""
save trained weights
:return: None
"""
if not os.path.exists('model'):
os.makedirs('model')
tl.files.save_weights_to_hdf5('model/ddpg_actor.hdf5', self.actor)
tl.files.save_weights_to_hdf5('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.save_weights_to_hdf5('model/ddpg_critic.hdf5', self.critic)
tl.files.save_weights_to_hdf5('model/ddpg_critic_target.hdf5', self.critic_target)
def load_ckpt(self):
"""
load trained weights
:return: None
"""
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor.hdf5', self.actor)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_actor_target.hdf5', self.actor_target)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic.hdf5', self.critic)
tl.files.load_hdf5_to_weights_in_order('model/ddpg_critic_target.hdf5', self.critic_target)
if __name__ == '__main__':
env = gym.make(ENV_NAME)
env = env.unwrapped
# reproducible
env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
tf.random.set_seed(RANDOMSEED)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(a_dim, s_dim, a_bound)
if args.train: # train
reward_buffer = []
t0 = time.time()
for i in range(MAX_EPISODES):
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, VAR), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r / 10, s_)
if ddpg.pointer > MEMORY_CAPACITY:
ddpg.learn()
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
), end=''
)
plt.show()
# test
if i and not i % TEST_PER_EPISODES:
t1 = time.time()
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
a = ddpg.choose_action(s) # without exploration noise
s_, r, done, info = env.step(a)
s = s_
ep_reward += r
if j == MAX_EP_STEPS - 1:
print(
'\rEpisode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
i, MAX_EPISODES, ep_reward,
time.time() - t1
)
)
reward_buffer.append(ep_reward)
if reward_buffer:
plt.ion()
plt.cla()
plt.title('DDPG')
plt.plot(np.array(range(len(reward_buffer))) * TEST_PER_EPISODES, reward_buffer) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.ylim(-2000, 0)
plt.show()
plt.pause(0.1)
plt.ioff()
plt.show()
print('\nRunning time: ', time.time() - t0)
ddpg.save_ckpt()
# test
ddpg.load_ckpt()
while True:
s = env.reset()
for i in range(MAX_EP_STEPS):
env.render()
s, r, done, info = env.step(ddpg.choose_action(s))
if done:
break
| 35.325949 | 120 | 0.552002 | 1,414 | 11,163 | 4.157001 | 0.212871 | 0.029092 | 0.012249 | 0.009527 | 0.319496 | 0.254168 | 0.208234 | 0.178632 | 0.140524 | 0.117897 | 0 | 0.016428 | 0.323838 | 11,163 | 315 | 121 | 35.438095 | 0.762321 | 0.167518 | 0 | 0.175824 | 0 | 0 | 0.068694 | 0.023852 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054945 | false | 0 | 0.043956 | 0 | 0.120879 | 0.016484 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c7e4c2e13efe2a8cb4dcf164aa5694c23d2e6a | 508 | py | Python | robot_sim/end_effectors/gripper/cobotta_pipette/pipette_show.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | robot_sim/end_effectors/gripper/cobotta_pipette/pipette_show.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | robot_sim/end_effectors/gripper/cobotta_pipette/pipette_show.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
import numpy as np
import visualization.panda.world as wd
import modeling.geometric_model as gm
base = wd.World(cam_pos=[1, 1, 1], lookat_pos=[0, 0, .1])
pm_s = gm.GeometricModel("./meshes/p1000g.stl")
pm_s.attach_to(base)
pm_b_s = gm.GeometricModel("./meshes/p1000g_body.stl")
pm_b_s.set_scale(scale=[1.03,1.03,1.01])
pm_b_s.set_pos(np.array([0,0, 0.1463]))
pm_b_s.set_rgba(rgba=[.3, .4, .6, 1])
pm_b_s.attach_to(base)
base.run()
| 31.75 | 61 | 0.651575 | 94 | 508 | 3.212766 | 0.446809 | 0.049669 | 0.066225 | 0.069536 | 0.192053 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08134 | 0.177165 | 508 | 15 | 62 | 33.866667 | 0.641148 | 0 | 0 | 0 | 0 | 0 | 0.100592 | 0.047337 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50c9be7f42be4933dbac60f94eabd7aff7e39c46 | 883 | py | Python | data/test/python/50c9be7f42be4933dbac60f94eabd7aff7e39c46testbroker.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/test/python/50c9be7f42be4933dbac60f94eabd7aff7e39c46testbroker.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/test/python/50c9be7f42be4933dbac60f94eabd7aff7e39c46testbroker.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | import sys
import feedbroker
from feedbroker import *
logging.basicConfig(level=logging.DEBUG)
FeedConnOrig = FeedConn
FeedBrokerOrig = FeedBroker
class FeedBroker(FeedBrokerOrig):
def initdb(self):
pass
class FeedConn(FeedConnOrig):
def auth(self, ident, hash):
self.checkauth([{'identifier': str(ident), 'secret': 'secretsecret'},], hash)
def checkauth(self, r, hash):
akobj = r[0]
akhash = hashlib.sha1('{0}{1}'.format(self.rand, akobj['secret'])).digest()
self.idents.add(akobj['identifier'])
logging.info('Auth success by {0}, {1}.'.format(akobj['identifier'], self.conn.addr))
self.io_in(b'')
def may_publish(self, chan):
return True
def may_subscribe(self, chan):
return True
feedbroker.FeedConn = FeedConn
feedbroker.FeedBroker = FeedBroker
def main():
fb = FeedBroker()
loop()
return 0
if __name__ == '__main__':
sys.exit(main())
| 18.395833 | 87 | 0.706682 | 113 | 883 | 5.424779 | 0.495575 | 0.006525 | 0.026101 | 0.058728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009211 | 0.139298 | 883 | 47 | 88 | 18.787234 | 0.797368 | 0 | 0 | 0.066667 | 0 | 0 | 0.105442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.033333 | 0.1 | 0.066667 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50cc06cd882433493c8e0cdcbaffbfefb7d5d35a | 1,217 | py | Python | fooof/tests/test_data.py | varman-m/fooof | 6046c89bb3c87f30a8a368809a9d321c8c33e1a8 | [
"Apache-2.0"
] | null | null | null | fooof/tests/test_data.py | varman-m/fooof | 6046c89bb3c87f30a8a368809a9d321c8c33e1a8 | [
"Apache-2.0"
] | null | null | null | fooof/tests/test_data.py | varman-m/fooof | 6046c89bb3c87f30a8a368809a9d321c8c33e1a8 | [
"Apache-2.0"
] | null | null | null | """Tests for the fooof.data."""
from fooof.core.info import get_description
from fooof.data import *
###################################################################################################
###################################################################################################
def test_fooof_settings():
settings = FOOOFSettings([], None, None, None, None)
assert settings
# Check that the object has the correct fields, given the object description
settings_fields = get_description()['settings']
for field in settings_fields:
getattr(settings, field)
assert True
def test_fooof_results():
results = FOOOFResults([], [], None, None, [])
assert results
# Check that the object has the correct fields, given the object description
results_fields = get_description()['results']
for field in results_fields:
getattr(results, field.strip('_'))
assert True
def test_sim_params():
sim_params = SimParams([1, 1], [10, 1, 1], 0.05)
assert sim_params
# Check that the object has the correct fields
for field in ['aperiodic_params', 'gaussian_params', 'nlv']:
getattr(sim_params, field)
assert True
| 29.682927 | 99 | 0.573541 | 132 | 1,217 | 5.143939 | 0.310606 | 0.066274 | 0.053019 | 0.079529 | 0.237113 | 0.237113 | 0.237113 | 0.237113 | 0.182622 | 0.182622 | 0 | 0.009018 | 0.179951 | 1,217 | 40 | 100 | 30.425 | 0.671343 | 0.181594 | 0 | 0.136364 | 0 | 0 | 0.063291 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50cde846fc041e9e2ba6e83ee4ca4250a07327e0 | 6,430 | py | Python | csip/datasets/xview2.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | 1 | 2022-03-03T09:26:49.000Z | 2022-03-03T09:26:49.000Z | csip/datasets/xview2.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | 1 | 2022-03-09T08:50:01.000Z | 2022-03-09T08:51:11.000Z | csip/datasets/xview2.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | null | null | null | import glob
import os
import kornia.augmentation as K
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision.transforms as T
from einops import rearrange
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision.utils import draw_segmentation_masks
from csip.datasets.augmentations import default_augs
from csip.datasets.utils import dataset_split
class xView2(Dataset):
classes = ["background", "no-damage", "minor-damage", "major-damage", "destroyed"]
colormap = ["green", "blue", "orange", "red"]
def __init__(self, root="data", split="train", transforms=None):
self.root = root
self.split = split
self.transforms = transforms
self.class2idx = {c: i for i, c in enumerate(self.classes)}
self.files = self._load_files()
def __getitem__(self, idx):
files = self.files[idx]
image1 = self._load_image(files["image1"])
image2 = self._load_image(files["image2"])
mask1 = self._load_target(files["mask1"])
mask2 = self._load_target(files["mask2"])
image = torch.stack(tensors=[image1, image2], dim=0)
mask = torch.stack(tensors=[mask1, mask2], dim=0)
sample = {"image": image, "mask": mask}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self):
return len(self.files)
def _load_files(self):
files = []
image_root = os.path.join(self.root, self.split, "images")
mask_root = os.path.join(self.root, self.split, "targets")
images = glob.glob(os.path.join(image_root, "*.png"))
basenames = [os.path.basename(f) for f in images]
basenames = ["_".join(f.split("_")[:-2]) for f in basenames]
for name in sorted(set(basenames)):
image1 = os.path.join(image_root, f"{name}_pre_disaster.png")
image2 = os.path.join(image_root, f"{name}_post_disaster.png")
mask1 = os.path.join(mask_root, f"{name}_pre_disaster_target.png")
mask2 = os.path.join(mask_root, f"{name}_post_disaster_target.png")
files.append(dict(image1=image1, image2=image2, mask1=mask1, mask2=mask2))
return files
def _load_image(self, path):
with Image.open(path) as img:
array = np.array(img.convert("RGB"))
tensor = torch.from_numpy(array)
tensor = tensor.permute((2, 0, 1))
tensor = tensor.to(torch.float)
return tensor
def _load_target(self, path):
with Image.open(path) as img:
array = np.array(img.convert("L"))
tensor = torch.from_numpy(array)
tensor = tensor.to(torch.long)
return tensor
class xView2DataModule(pl.LightningDataModule):
def __init__(
self,
root,
batch_size=2,
num_workers=0,
num_prefetch=2,
val_split_pct=0.2,
patch_size=512,
augmentations=default_augs(),
) -> None:
super().__init__()
self.root = root
self.batch_size = batch_size
self.num_workers = num_workers
self.num_prefetch = num_prefetch
self.val_split_pct = val_split_pct
self.patch_size = patch_size
self.augmentations = augmentations
self.random_crop = K.AugmentationSequential(
K.RandomCrop((self.patch_size, self.patch_size), p=1.0),
data_keys=["input", "mask"],
)
def preprocess(self, sample):
sample["image"] = sample["image"] / 255
sample["image"] = torch.clamp(sample["image"], min=0.0, max=1.0)
sample["image"] = rearrange(sample["image"], "t c h w -> (t c) h w")
sample["mask"] = sample["mask"][1, ...]
sample["mask"] = rearrange(sample["mask"], "h w -> () h w")
return sample
def crop(self, sample):
sample["mask"] = sample["mask"].to(torch.float)
sample["image"], sample["mask"] = self.random_crop(
sample["image"], sample["mask"]
)
sample["mask"] = sample["mask"].to(torch.long)
sample["image"] = rearrange(sample["image"], "() c h w -> c h w")
sample["mask"] = rearrange(sample["mask"], "() c h w -> c h w")
return sample
def setup(self, stage=None):
transforms = T.Compose([self.preprocess, self.crop])
test_transforms = T.Compose([self.preprocess])
dataset = xView2(self.root, split="train", transforms=transforms)
self.train_dataset, self.val_dataset, _ = dataset_split(
dataset, val_pct=self.val_split_pct, test_pct=0.0
)
self.test_dataset = xView2(self.root, split="test", transforms=test_transforms)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
prefetch_factor=self.num_prefetch,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=1,
num_workers=self.num_workers,
prefetch_factor=self.num_prefetch,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=1,
num_workers=self.num_workers,
prefetch_factor=self.num_prefetch,
shuffle=False,
)
def on_after_batch_transfer(self, batch, dataloader_idx):
if self.trainer.training:
if self.augmentations is not None:
batch["mask"] = batch["mask"].to(torch.float)
batch["image"], batch["mask"] = self.augmentations(
batch["image"], batch["mask"]
)
batch["mask"] = batch["mask"].to(torch.long)
batch["mask"] = rearrange(batch["mask"], "b () h w -> b h w")
batch["image"] = (batch["image"][:, :3, ...], batch["image"][:, 3:, ...])
return batch
def plot(self, x, y):
x = (x.cpu() * 255).to(torch.uint8)
y = y.cpu().unsqueeze(dim=0)
classes = torch.tensor([1, 2, 3, 4])
class_masks = y == classes[:, None, None]
image = draw_segmentation_masks(
x, class_masks, alpha=0.5, colors=["green", "blue", "orange", "red"]
)
return image
| 35.921788 | 87 | 0.591602 | 794 | 6,430 | 4.630982 | 0.202771 | 0.032635 | 0.019037 | 0.018493 | 0.306772 | 0.184933 | 0.153386 | 0.107696 | 0.090835 | 0.090835 | 0 | 0.014976 | 0.273095 | 6,430 | 178 | 88 | 36.123596 | 0.771716 | 0 | 0 | 0.155844 | 0 | 0 | 0.080404 | 0.016796 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097403 | false | 0 | 0.084416 | 0.025974 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50d00645fd2b1d0aadda043128931ed03ed977a7 | 3,677 | py | Python | tests/integration/test_archiver.py | ewuerger/dbwily | bd606d480a6dbc920ede3b66fbf85fb7a5661bcd | [
"Apache-2.0"
] | null | null | null | tests/integration/test_archiver.py | ewuerger/dbwily | bd606d480a6dbc920ede3b66fbf85fb7a5661bcd | [
"Apache-2.0"
] | null | null | null | tests/integration/test_archiver.py | ewuerger/dbwily | bd606d480a6dbc920ede3b66fbf85fb7a5661bcd | [
"Apache-2.0"
] | null | null | null | import pathlib
import pytest
from git import Actor, Repo
from wily.archivers.git import DirtyGitRepositoryError, GitArchiver
from wily.config import DEFAULT_CONFIG
def test_git_end_to_end(tmpdir):
"""
Complete end-to-end test of the git integration
"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
index = repo.index
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
# First commit
with open(tmppath / ".gitignore", "w") as ignore:
ignore.write(".wily/")
index.add([".gitignore"])
commit1 = index.commit("commit1", author=author, committer=committer)
# Second commit
with open(tmppath / "test.py", "w") as file1:
file1.write("print(1)")
index.add(["test.py"])
commit2 = index.commit("commit2", author=author, committer=committer)
repo.close()
config = DEFAULT_CONFIG
config.path = tmpdir
archiver = GitArchiver(config)
assert archiver.config == config
revisions = archiver.revisions(tmpdir, 3)
assert len(revisions) == 2
assert revisions[0].message == "commit2"
assert revisions[0].author_email == "author@example.com"
assert revisions[0].author_name == "An author"
assert (
revisions[0].key in commit2.name_rev
and revisions[0].key not in commit1.name_rev
)
assert revisions[1].message == "commit1"
assert revisions[1].author_email == "author@example.com"
assert revisions[1].author_name == "An author"
assert (
revisions[1].key in commit1.name_rev
and revisions[1].key not in commit2.name_rev
)
checkout = archiver.checkout(revisions[1], None)
assert not (tmppath / "test.py").exists()
finish = archiver.finish()
assert (tmppath / "test.py").exists()
def test_dirty_git(tmpdir):
""" Check that repository fails to initialise if unchecked files are in the repo """
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
index = repo.index
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
# First commit
with open(tmppath / ".gitignore", "w") as ignore:
ignore.write(".wily/")
index.add([".gitignore"])
commit1 = index.commit("commit1", author=author, committer=committer)
# Write a test file to the repo
with open(tmppath / "blah.py", "w") as ignore:
ignore.write("*.py[co]\n")
index.add(["blah.py"])
repo.close()
config = DEFAULT_CONFIG
config.path = tmpdir
with pytest.raises(DirtyGitRepositoryError):
archiver = GitArchiver(config)
archiver.revisions(tmpdir, 2)
def test_detached_head(tmpdir):
""" Check that repo can initialize in detached head state"""
repo = Repo.init(path=tmpdir)
tmppath = pathlib.Path(tmpdir)
index = repo.index
author = Actor("An author", "author@example.com")
committer = Actor("A committer", "committer@example.com")
# First commit
with open(tmppath / "test.py", "w") as ignore:
ignore.write("print('hello world')")
index.add(["test.py"])
commit1 = index.commit("commit1", author=author, committer=committer)
# Second commit
with open(tmppath / "test.py", "w") as ignore:
ignore.write("print('hello world')\nprint(1)")
index.add(["test.py"])
commit2 = index.commit("commit2", author=author, committer=committer)
repo.git.checkout(commit2.hexsha)
repo.close()
config = DEFAULT_CONFIG
config.path = tmpdir
archiver = GitArchiver(config)
assert archiver.revisions(tmpdir, 1) is not None
| 28.952756 | 88 | 0.658961 | 461 | 3,677 | 5.212581 | 0.203905 | 0.037453 | 0.037453 | 0.043695 | 0.594257 | 0.594257 | 0.557636 | 0.52268 | 0.50437 | 0.50437 | 0 | 0.012341 | 0.20669 | 3,677 | 126 | 89 | 29.18254 | 0.81145 | 0.075333 | 0 | 0.542169 | 0 | 0 | 0.141372 | 0.018711 | 0 | 0 | 0 | 0 | 0.156627 | 1 | 0.036145 | false | 0 | 0.060241 | 0 | 0.096386 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50d20b21dc04c24519d3d04100287d3b1f62877c | 7,300 | py | Python | athina/configuration.py | athina-edu/athina | d8c879df4e1c3dec8b206373823a19fbb1adfc4a | [
"MIT"
] | 1 | 2020-01-27T01:54:47.000Z | 2020-01-27T01:54:47.000Z | athina/configuration.py | athina-edu/athina | d8c879df4e1c3dec8b206373823a19fbb1adfc4a | [
"MIT"
] | 111 | 2019-02-23T00:14:46.000Z | 2022-02-16T14:53:13.000Z | athina/configuration.py | athina-edu/athina | d8c879df4e1c3dec8b206373823a19fbb1adfc4a | [
"MIT"
] | null | null | null | # Hyper object that retains all execution parameters shared between modules
import glob
import multiprocessing
import os
import shutil
from datetime import datetime
import yaml
__all__ = ('Configuration',)
class Configuration:
logger = None
config_dir = "/tmp/athina_empty"
config_filename = "test_assignment"
auth_token = ""
course_id = 1
assignment_id = 1
total_points = 100
enforce_due_date = True
test_scripts = ["bash test", "bash test"] # this is defined as such for testing only
test_weights = [0.8, 0.2]
moss_id = 1
moss_lang = "C"
moss_pattern = "*.c"
moss_publish = False
check_plagiarism_hour = 1
git_username = "test"
git_password = "test"
group_assignment = False
same_url_limit = 1
submit_results_as_file = True
max_file_size = 1024
test_timeout = 90
no_repo = False
pass_extra_params = False
grade_update_frequency = 24
git_url = 'github.com'
processes = 1
due_date = datetime(2100, 1, 1, 0, 0)
use_docker = False
canvas_url = "www.instructure.com"
grade_publish = True
print_debug_msgs = False
docker_use_seccomp = True
docker_use_net_admin = False
use_webhook = False
gitlab_check_repo_is_private = False
docker_no_internet = False
# Set on the fly
db_filename = ""
athina_student_code_dir = ""
athina_test_tmp_dir = ""
extra_params = ""
athina_web_url = None
# global configs read through environment vars
global_memory_limit = 80
docker_memory_limit = "2g"
def __init__(self, logger):
self.logger = logger
self.default_dir()
@staticmethod
def find_yaml(directory):
if os.path.isdir(directory):
# Find a cfg file in the directory
try:
cfg_file = glob.glob('%s*.yaml' % directory)[0]
except IndexError:
cfg_file = directory # this will fail later on but we have done all that we can
else:
cfg_file = directory
return cfg_file
@staticmethod
def default_dir():
# mainly used for testing
os.makedirs("/tmp/athina_empty", exist_ok=True)
os.chmod("/tmp/athina_empty", 0o777)
@staticmethod
def in_docker():
""" Returns: True if running in a Docker container, else False """
with open('/proc/1/cgroup', 'rt') as ifh:
return 'docker' in ifh.read()
@staticmethod
def check_dependencies(packages: list):
# Verify requirements are available on OS
for software in packages:
if shutil.which(software) is None:
raise FileNotFoundError("%s is not available on the host system." % software)
return True
# This is not a static function since it accesses class items passed as parameters: configvar
def load_value(self, config, key, configvar):
value = config.get(key, None)
if value is not None:
setattr(self, key, value)
else:
pass # The default value as set in this configuration.py file remains
def load_configuration(self, directory):
# Load Configuration file
try:
with open(self.find_yaml(directory), 'r') as stream:
config = yaml.safe_load(stream)
except (yaml.YAMLError, IsADirectoryError) as exc:
self.logger.logger.error(exc)
raise yaml.YAMLError(exc)
# Global variables through environment
# Global memory limit in percentage that forked processes must obey.
self.global_memory_limit = int(os.environ.get('GLOBAL_MEMORY_LIMIT', 80))
# Max memory that can be used by docker in docker notation, 1m, 2g etc.
self.docker_memory_limit = os.environ.get('DOCKER_MEMORY_LIMIT', "2g")
# Read Configuration file
self.config_dir = os.path.dirname(directory)
self.config_filename = os.path.split(self.find_yaml(directory))[1] # cfg filename or dir name
# Set new log file
self.logger.set_assignment_log_file("%s/%s.log" % (self.config_dir, self.config_filename))
# Load arguments from config
self.load_value(config, 'print_debug_msgs', self.print_debug_msgs)
if self.print_debug_msgs:
self.logger.set_debug(True)
self.logger.logger.info("Reading %s in %s" % (self.config_filename, self.config_dir))
self.load_value(config, 'auth_token', self.auth_token)
self.load_value(config, 'course_id', self.course_id)
self.load_value(config, 'assignment_id', self.assignment_id)
self.load_value(config, 'total_points', self.total_points)
self.load_value(config, 'enforce_due_date', self.enforce_due_date)
self.load_value(config, 'test_scripts', self.test_scripts)
self.load_value(config, 'test_weights', self.test_weights)
self.load_value(config, 'moss_id', self.moss_id)
self.load_value(config, 'moss_lang', self.moss_lang)
self.load_value(config, 'moss_pattern', self.moss_pattern)
self.load_value(config, 'moss_publish', self.moss_publish)
self.load_value(config, 'git_username', self.git_username)
self.load_value(config, 'git_password', self.git_password)
self.load_value(config, 'group_assignment', self.group_assignment)
self.load_value(config, 'same_url_limit', self.same_url_limit)
self.load_value(config, 'check_plagiarism_hour', self.check_plagiarism_hour)
self.load_value(config, 'submit_results_as_file', self.submit_results_as_file)
self.load_value(config, 'max_file_size', self.max_file_size)
self.max_file_size = self.max_file_size * 1024 # Convert KB to bytes
self.load_value(config, 'test_timeout', self.test_timeout)
self.load_value(config, 'no_repo', self.no_repo)
self.load_value(config, 'pass_extra_params', self.pass_extra_params)
self.load_value(config, 'grade_update_frequency', self.grade_update_frequency)
self.grade_update_frequency -= 1
self.load_value(config, 'git_url', self.git_url)
self.load_value(config, 'canvas_url', self.canvas_url)
self.load_value(config, 'grade_publish', self.grade_publish)
self.load_value(config, 'use_docker', self.use_docker)
self.load_value(config, 'docker_use_seccomp', self.docker_use_seccomp)
self.load_value(config, 'docker_use_net_admin', self.docker_use_net_admin)
self.load_value(config, 'docker_no_internet', self.docker_no_internet)
self.load_value(config, 'use_webhook', self.use_webhook)
self.load_value(config, 'gitlab_check_repo_is_private', self.gitlab_check_repo_is_private)
self.processes = multiprocessing.cpu_count()
# If no repo then definitely pass extra params
if self.no_repo:
self.pass_extra_params = True
# If running from within a container then firejail is meaningless
if self.in_docker():
self.use_docker = True
# Verify software dependencies
packages = ["timeout", "git"]
if self.use_docker is True:
packages.append("docker")
else:
packages.append("firejail")
self.check_dependencies(packages)
| 38.020833 | 102 | 0.670548 | 968 | 7,300 | 4.794421 | 0.244835 | 0.063995 | 0.089636 | 0.131006 | 0.163111 | 0.050851 | 0.026718 | 0.012066 | 0.012066 | 0.012066 | 0 | 0.008822 | 0.239178 | 7,300 | 191 | 103 | 38.219895 | 0.826792 | 0.136027 | 0 | 0.075862 | 0 | 0 | 0.118172 | 0.014811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048276 | false | 0.041379 | 0.041379 | 0 | 0.42069 | 0.02069 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50d3ab7476c523575810a3e5c057906ca472dfdf | 286 | py | Python | 1/1.py | rixon/aoc2021 | 00527b45f14d404c9dca34937079d6d2f2cf75d7 | [
"WTFPL"
] | null | null | null | 1/1.py | rixon/aoc2021 | 00527b45f14d404c9dca34937079d6d2f2cf75d7 | [
"WTFPL"
] | null | null | null | 1/1.py | rixon/aoc2021 | 00527b45f14d404c9dca34937079d6d2f2cf75d7 | [
"WTFPL"
] | null | null | null | #!/opt/local/bin/python
f = open('1a.txt', 'r')
l = []
for n in f:
l.append(int(n))
f.closed
last = 0
count = 0
for n in l:
#print(n)
if not last == 0:
if n > last:
count += 1
#print("greater")
#else:
# print("Skipped first")
last = n
print(count)
| 11 | 27 | 0.520979 | 49 | 286 | 3.040816 | 0.55102 | 0.053691 | 0.080537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024752 | 0.293706 | 286 | 25 | 28 | 11.44 | 0.712871 | 0.262238 | 0 | 0 | 0 | 0 | 0.034146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50d3f89cfcab8f952bb87adfa339b131c732e304 | 388 | py | Python | II rok/I semestr/MIA/#Code 4/A. Gleb And Pizza.py | antonitomaszewski/Studia | ff2fa59e25cf3f5c86b59de9994b80a502ec1e7b | [
"MIT"
] | null | null | null | II rok/I semestr/MIA/#Code 4/A. Gleb And Pizza.py | antonitomaszewski/Studia | ff2fa59e25cf3f5c86b59de9994b80a502ec1e7b | [
"MIT"
] | null | null | null | II rok/I semestr/MIA/#Code 4/A. Gleb And Pizza.py | antonitomaszewski/Studia | ff2fa59e25cf3f5c86b59de9994b80a502ec1e7b | [
"MIT"
] | null | null | null | pobierz = lambda : list(map(int, input().split()))
def main():
r, d = pobierz()
[n] = pobierz()
l = r-d
def isTasty(xi, yi, ri):
dist = lambda xi, yi : pow(xi**2 + yi**2, .5)
di = dist(xi, yi)
dl = di-ri
dr = di+ri
return dl >= l and dr <= r
print(sum(map(lambda xyr : isTasty(*xyr), [pobierz() for i in range(n)])))
main()
| 21.555556 | 78 | 0.489691 | 62 | 388 | 3.064516 | 0.532258 | 0.063158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 0.319588 | 388 | 17 | 79 | 22.823529 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50d6572475b0aebb9532f0620c7d49b9b3c9ac66 | 447 | py | Python | linear_regression/linear_regression_poly.py | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | linear_regression/linear_regression_poly.py | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | linear_regression/linear_regression_poly.py | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('./data/data_poly.csv', header=None, names=['x', 'y'])
df['b'] = 1
df['poly'] = df['x'] * df['x']
X = df[['b', 'x', 'poly']].values
Y = df['y'].values
plt.scatter(X[:, 1], Y)
plt.show()
# calculate weights
w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Y))
y_hat = np.dot(X, w)
plt.scatter(X[:, 1], Y)
plt.plot(sorted(X[:, 1]), sorted(y_hat))
plt.show() | 21.285714 | 71 | 0.599553 | 89 | 447 | 2.966292 | 0.382022 | 0.022727 | 0.068182 | 0.090909 | 0.121212 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0.01039 | 0.138702 | 447 | 21 | 72 | 21.285714 | 0.675325 | 0.038031 | 0 | 0.266667 | 0 | 0 | 0.083916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50dab888c31c96d59e83e825951242229f8cf7db | 1,316 | py | Python | app/username/views.py | IoTServ/FlaskSimpleCMS | db0fc4464c6d514db14972156ca3e002a60a4876 | [
"MIT"
] | null | null | null | app/username/views.py | IoTServ/FlaskSimpleCMS | db0fc4464c6d514db14972156ca3e002a60a4876 | [
"MIT"
] | 4 | 2020-08-29T16:11:12.000Z | 2022-03-12T00:47:03.000Z | app/username/views.py | IoTServ/FlaskSimpleCMS | db0fc4464c6d514db14972156ca3e002a60a4876 | [
"MIT"
] | null | null | null | # coding: utf-8
from StringIO import StringIO
from flask import send_file,redirect,url_for,flash
from . import username
from flask import render_template,request
from flask_login import login_required
from ..models import User,Article
@username.route('/<int:id>')
def detials(id):
user=User.query.get_or_404(id)
if user.confirmed==False:
flash('用户未确认邮箱!','danger')
return redirect(url_for('main.index'))
if user.banded==True:
flash('用户由于某种原因处于禁止状态!','danger')
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
pagination = Article.query.filter_by(author_id=id).order_by(Article.update_time.desc()).paginate(
page, per_page=3,
error_out=False)
articles = pagination.items
return render_template('username/user_info.html', user=user, articles=articles,
pagination=pagination, endpoint='.detials',id=id)
@username.route('/qrcode/<int:id>')
def qrcode(id):
import qrcode
img = qrcode.make("http://www.jiakaozuche.com/zhuye/"+str(id))
#img.save("./test.png")
return _serve_pil_image(img)
def _serve_pil_image(pil_img):
img_io = StringIO()
pil_img.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png', cache_timeout=0) | 32.097561 | 101 | 0.68997 | 185 | 1,316 | 4.740541 | 0.459459 | 0.022805 | 0.047891 | 0.052452 | 0.079818 | 0.079818 | 0.079818 | 0 | 0 | 0 | 0 | 0.007333 | 0.170973 | 1,316 | 41 | 102 | 32.097561 | 0.796517 | 0.026596 | 0 | 0.0625 | 0 | 0 | 0.125098 | 0.017983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.21875 | 0 | 0.46875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50db76451a64bb6782a83f427cb87489cf608970 | 7,153 | py | Python | common/plots2D.py | noamroze/moser_flow | 0a9fe44cd4a408371222c557a5a49c0ad11617e8 | [
"MIT"
] | 5 | 2022-03-19T14:10:52.000Z | 2022-03-30T13:11:24.000Z | common/plots2D.py | noamroze/moser_flow | 0a9fe44cd4a408371222c557a5a49c0ad11617e8 | [
"MIT"
] | null | null | null | common/plots2D.py | noamroze/moser_flow | 0a9fe44cd4a408371222c557a5a49c0ad11617e8 | [
"MIT"
] | 1 | 2022-03-25T14:52:50.000Z | 2022-03-25T14:52:50.000Z | import torch
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import traceback
from torchdiffeq import odeint
from common.estimators import exact_divergence
from common.utils import run_func_in_batches
MAX_BATCH_SIZE = 100000
def create_fig():
plt.tight_layout()
fig = plt.figure(figsize=(5, 5), frameon=False)
fig.patch.set_visible(False)
ax = fig.add_axes([0, 0, 1, 1])
remove_ticks(ax)
ax.axis('off')
return fig, ax
def plot_vt_divergence(dataset, model, n_samples, n_timestamps):
samples = dataset[np.random.choice(len(dataset), n_samples)][0]
timestamps = torch.linspace(1, 0, n_timestamps, device=model.device)
results = odeint(model.ode_func, torch.Tensor(samples).to(model.device), timestamps, **model.config['ode_args'])
n_cols=3
n_rows = int(np.ceil(n_samples/n_cols))
div_vt = np.empty((n_samples, n_timestamps))
for i, t in enumerate(timestamps):
vt = lambda x: model.ode_func(t, x)
div_vt[:, i] = exact_divergence(vt, results[i], create_graph=False).detach().cpu().numpy()
results = results.detach().cpu().numpy()
n_cols = 3
n_rows = int(np.ceil(n_samples / n_cols))
fig, axes = plt.subplots(n_rows, n_cols)
for i in range(n_samples):
ax2 = axes[i // n_cols, i%n_cols]
ax2.plot(timestamps.cpu().numpy(), div_vt[i], label="start point=(%2.3f, %2.3f)" %(float(samples[i, 0]), float(samples[i, 1])))
return fig, axes
def plot_density_comparison(model, n_pixels, min_x, max_x, min_y, max_y, quad_mesh=None, ode_density=None, **kwargs):
"""plotting 2D density"""
fig1, fig2, fig3, fig4 = [plt.figure() for _ in range(4)]
ax1, ax2, ax3, ax4 = [fig.subplots() for fig in [fig1, fig2, fig3, fig4]]
for ax in (ax1, ax2, ax3, ax4):
remove_ticks(ax)
fig1, ax1, qm = plot_2D_func(fig1, ax1, model.mu_plus, model.device, min_x, max_x, min_y, max_y, N=n_pixels, quad_mesh=quad_mesh)
ax1.set_title("$\mu_+$")
fig2, ax2, qm2 = plot_2D_func(fig2, ax2, lambda x: model.mu_minus(x) + 1e-8, model.device, min_x, max_x, min_y, max_y, N=n_pixels, vmin=0, vmax=0.1)
ax2.set_title("$\mu_-$")
fig2.colorbar(qm2)
x = torch.linspace(min_x, max_x, n_pixels, dtype=torch.float32)
y = torch.linspace(min_y, max_y, n_pixels, dtype=torch.float32)
grid_x, grid_y = torch.meshgrid(x, y)
data = torch.cat([grid_x.flatten().unsqueeze(1), grid_y.flatten().unsqueeze(1)], 1).to(model.device)
if ode_density is None:
try:
f = lambda x: torch.exp(model.direct_log_likelihood(x)).detach()
ode_density = run_func_in_batches(f, data, max_batch_size=100000, out_dim=1).cpu().data.numpy()
except Exception as err:
traceback.print_exc()
return (fig1, fig2, fig3, fig4), (ax1, ax2, ax3)
ode_density = ode_density.reshape(n_pixels, n_pixels)
quad_mesh = ax3.pcolormesh(grid_x, grid_y, ode_density, norm=qm.norm, shading="auto")
ax3.set_title("density by ode")
ax3.invert_yaxis()
diff = np.abs(ode_density - model.density(data).detach().cpu().numpy().reshape(n_pixels, n_pixels))
qm = ax4.pcolormesh(grid_x, grid_y, diff, shading="auto", vmin=0, vmax=0.1)
ax4.set_title("densities difference")
ax4.invert_yaxis()
print(torch.max(model.density(data)))
fig4.colorbar(qm)
return (fig1, fig2, fig3, fig4), (ax1, ax2, ax3, ax4), p
def plot_moser_vector_field(model, min_x, max_x, min_y, max_y, N=50, ax=None):
grid_x = np.linspace(min_x, max_x, N)
grid_y = np.linspace(min_y, max_y, N)
x = np.empty((N**2, 2))
for i in range(N):
for j in range(N):
x[i * N + j, 0] = grid_x[i]
x[i * N + j, 1] = grid_y[j]
if ax is None:
fig, ax = plt.subplots()
color='b'
else:
fig = None
color = 'y'
f_x = model.u(torch.Tensor(x).to(model.device)).cpu().detach().numpy()
ax.quiver(x[:, 0], x[:, 1], f_x[:, 0], f_x[:, 1], color=color)
ax.set_title("grad(a) function")
ax.set(xlim=(min_x, max_x), ylim=(min_y, max_y))
return fig, ax
def plot_ode_vector_field(model, min_x, max_x, min_y, max_y, N=50, k=5):
fig = plt.figure()
n_rows = 2
n_cols = int(np.ceil(k/n_rows))
x = np.empty((N**2, 2))
grid_x = np.linspace(min_x, max_x, N)
grid_y = np.linspace(min_y, max_y, N)
for i in range(N):
for j in range(N):
x[i * N + j, 0] = grid_x[i]
x[i * N + j, 1] = grid_y[j]
for i in range(k):
t = i / (k - 1)
f_x = model.ode_func(t, torch.Tensor(x).to(model.device)).cpu().detach().numpy()
ax = fig.add_subplot(n_rows, n_cols, i + 1)
ax.quiver(x[:, 0], x[:, 1], f_x[:, 0], f_x[:, 1])
ax.set_title("time %s" %t)
ax.set(xlim=(min_x, max_x), ylim=(min_y, max_y))
ax.invert_yaxis()
fig.suptitle("ode function over different times")
return fig, ax
def plot_2D_func(fig, ax, func, device, min_x, max_x, min_y, max_y, quad_mesh=None, N=100, plot_type="pcolormesh", calc_integral=False, logscale=False, **kwargs):
if fig is None:
assert ax is None
fig, ax = plt.subplots()
ax.axis('equal')
x = np.linspace(min_x, max_x, N, dtype=np.float32)
y = np.linspace(min_y, max_y, N, dtype=np.float32)
x, y = np.meshgrid(x, y)
x1 = x.flatten()
y1 = y.flatten()
points = torch.FloatTensor(np.concatenate([x1[None].T, y1[None].T], axis=1).astype("float32")).to(device)
z = run_func_in_batches(func, points, max_batch_size=100000, out_dim=1)[:, 0].detach().cpu().numpy()
z = z.reshape(N, N)
torch.cuda.empty_cache()
if plot_type == "pcolormesh":
if logscale:
norm = colors.LogNorm(vmin=1e-8, vmax=1e-1)
else:
norm = quad_mesh.norm if quad_mesh is not None else None
out = ax.pcolormesh(x, y, z, norm=norm, shading="auto", **kwargs)
ax.set(xlim=(min_x, max_x), ylim=(min_y, max_y))
if calc_integral:
integral=np.sum(z) * (2./N) ** 2
ax.text(min_x, max_y + 0.3, "integral=%2.3f" %integral, bbox=dict(facecolor='red', alpha=0.5))
ax.invert_yaxis()
elif plot_type == "image":
out = ax.imshow(z, **kwargs)
# ax.invert_xaxis()
ax.axis('off')
plt.tight_layout()
else:
raise NotImplementedError
return fig, ax, out
def plot_samples(fig, ax, samples, x_lim, y_lim, **kwargs):
if fig is None:
assert ax is None
fig, ax = plt.subplots()
ax.axis('equal')
if isinstance(samples, torch.Tensor):
samples = samples.numpy()
if "quad_mesh" in kwargs:
quad_mesh = kwargs.pop("quad_mesh")
norm = quad_mesh.norm
else:
norm = None
_, _, _, args = ax.hist2d(samples[:, 0], samples[:, 1], range=[x_lim, y_lim], density=True, norm=norm, **kwargs)
remove_ticks(ax)
ax.set(xlim=x_lim, ylim=y_lim)
ax.invert_yaxis()
return fig, ax, args
def remove_ticks(ax):
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.axis('equal')
ax.axis('off')
| 38.047872 | 162 | 0.618622 | 1,184 | 7,153 | 3.55152 | 0.184122 | 0.013317 | 0.023306 | 0.024732 | 0.297503 | 0.239477 | 0.226159 | 0.208561 | 0.184304 | 0.182878 | 0 | 0.029459 | 0.221725 | 7,153 | 187 | 163 | 38.251337 | 0.725885 | 0.005312 | 0 | 0.304348 | 0 | 0 | 0.034182 | 0 | 0 | 0 | 0 | 0 | 0.012422 | 1 | 0.049689 | false | 0 | 0.049689 | 0 | 0.149068 | 0.012422 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50dcf211fbf13088a491d0a9810044c367561c6f | 5,182 | py | Python | main.py | Anurag-Mnda/Id-card-Genrator | 03ec43a0b3e906ec58c5b6fa194345395bcaf8c8 | [
"BSD-2-Clause"
] | 1 | 2022-02-04T15:13:00.000Z | 2022-02-04T15:13:00.000Z | main.py | Anurag-Mnda/Id-card-Genrator | 03ec43a0b3e906ec58c5b6fa194345395bcaf8c8 | [
"BSD-2-Clause"
] | null | null | null | main.py | Anurag-Mnda/Id-card-Genrator | 03ec43a0b3e906ec58c5b6fa194345395bcaf8c8 | [
"BSD-2-Clause"
] | null | null | null | import csv
import png
import pyqrcode
from tkinter import *
from datetime import *
from time import strftime
date_now = date.today()
with open('index.txt', 'r') as f:
index = [int(i) for i in f.read().split()][-1]
def time():
time_string = strftime('%H:%M:%S %p')
time_now.config(text=time_string)
time_now.after(1000, time)
def id_card():
global index
index += 1
with open('index.txt', 'a') as f:
f.write('%d' % index + '\n')
data = [i for i in csv.reader(open('data.csv'))]
card_window = Toplevel()
card_window.title("ID card")
card_window.geometry("300x150")
name = Label(card_window, text="Name:")
name.grid(row=1, column=0, ipadx="30")
Label(card_window, text=data[index-1][0]).grid(row=1, column=1)
course = Label(card_window, text="Course:")
course.grid(row=2, column=0)
Label(card_window, text=data[index-1][1]).grid(row=2, column=1)
form_no = Label(card_window, text="Roll No:")
form_no.grid(row=3, column=0)
Label(card_window, text=data[index-1][3]).grid(row=3, column=1)
contact_no = Label(card_window, text="Contact No:")
contact_no.grid(row=4, column=0)
Label(card_window, text=data[index-1][4]).grid(row=4, column=1)
Name = data[index-1][0]
RollNo = data[index-1][3]
Batch = data[index-1][1]
ContactNo = data[index-1][4]
dic = f"Name: {Name} \nRoll No: {RollNo} \nBatch: {Batch} \nContact No: {ContactNo}"
pyqrcode.create(dic).png(f"{Name}.png", scale=2)
qr_code = PhotoImage(file=f"{Name}.png")
Button(card_window, image=qr_code).grid(row=0, rowspan=4, column=2)
card_window.mainloop()
def login_info():
data_window = Toplevel()
data_window.title("Enter the data")
data_window.config(padx=20, pady=20, background="#219F94")
heading = Label(data_window, text="Information", bg="#219F94")
heading.grid(row=0, column=1, columnspan=2)
name = Label(data_window, text="Name:", bg="#219F94")
name.grid(row=1, column=0, ipadx="30")
course = Label(data_window, text="Course:", bg="#219F94")
course.grid(row=2, column=0)
sem = Label(data_window, text="Semester:", bg="#219F94")
sem.grid(row=3, column=0)
form_no = Label(data_window, text="Roll No.:", bg="#219F94")
form_no.grid(row=4, column=0)
contact_no = Label(data_window, text="Contact No:", bg="#219F94")
contact_no.grid(row=5, column=0)
email_id = Label(data_window, text="Email id:", bg="#219F94")
email_id.grid(row=6, column=0)
address = Label(data_window, text="Address:", bg="#219F94")
address.grid(row=7, column=0)
name_field = Entry(data_window, highlightthickness=0)
name_field.grid(row=1, column=1)
course_field = Entry(data_window, highlightthickness=0)
course_field.grid(row=2, column=1)
sem_field = Entry(data_window, highlightthickness=0)
sem_field.grid(row=3, column=1)
form_no_field = Entry(data_window, highlightthickness=0)
form_no_field.grid(row=4, column=1)
contact_no_field = Entry(data_window, highlightthickness=0)
contact_no_field.grid(row=5, column=1)
email_id_field = Entry(data_window, highlightthickness=0)
email_id_field.grid(row=6, column=1)
address_field = Entry(data_window, highlightthickness=0)
address_field.grid(row=7, column=1)
def entries():
entry_list = [
name_field.get(),
course_field.get(),
sem_field.get(),
form_no_field.get(),
contact_no_field.get(),
email_id_field.get(),
address_field.get()
]
with open("data.csv", 'a') as data_file:
writer_object = csv.writer(data_file)
writer_object.writerow(entry_list)
data_file.close()
submit_button = Button(
data_window,
text="Submit",
command=lambda: [entries(), id_card(), data_window.destroy()],
highlightbackground="#219F94")
submit_button.grid(row=8, column=1)
data_window.mainloop()
def login_page():
login_window = Toplevel()
login_window.title("Login")
login_window.config(padx=80, pady=60, background="#D3ECA7")
Label(login_window, text="Username: ", bg="#D3ECA7").pack()
username_entry = Entry(login_window, highlightthickness=0)
username_entry.pack()
Label(login_window, text="Password: ", bg="#D3ECA7").pack()
password_entry = Entry(login_window, highlightthickness=0, show="*")
password_entry.pack()
def checking_func():
if (username_entry.get() == "admin") and (password_entry.get() == "admin"):
login_info()
login_window.destroy()
else:
Label(login_window, text="Error! Enter correct login id and password", fg="red", bg="#D3ECA7").pack()
Button(login_window, text="Login", command=checking_func, highlightbackground="#D3ECA7").pack()
login_window.mainloop()
window = Tk()
window.title("Attendance Manager")
window.config()
bg_img = PhotoImage(file="images/backdrop.png")
start_img = PhotoImage(file="images/login_button.png")
canvas = Canvas(width=750, height=500)
canvas.create_image(375, 250, image=bg_img)
canvas.create_text(380, 330, text="WELCOME TO JKLU", font=("Montserrat", 50, "bold"))
canvas.create_text(70, 15, text=f"Date: {date_now}")
canvas.create_text(640, 15, text=f"Time: ")
canvas.pack()
Button(
window,
image=start_img,
highlightbackground='#ACACAC',
command=login_page
).place(x=310, y=400)
time_now = Label(window, text="00:00:00", background='#CBCBCB')
time_now.place(x=660, y=3)
time()
window.mainloop()
| 27.860215 | 104 | 0.710729 | 807 | 5,182 | 4.406444 | 0.213135 | 0.049213 | 0.035433 | 0.042745 | 0.238189 | 0.18279 | 0.076209 | 0.044994 | 0.030371 | 0 | 0 | 0.045128 | 0.114821 | 5,182 | 185 | 105 | 28.010811 | 0.730107 | 0 | 0 | 0.029197 | 0 | 0.007299 | 0.117522 | 0.004438 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043796 | false | 0.036496 | 0.043796 | 0 | 0.087591 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50dd69ff3b294b90b3f8a07a43d8a2b9d5e0c641 | 2,492 | py | Python | revscoring/utilities/fit.py | kevinbazira/revscoring | 625f8b8048eb3c0c1c872ed9c15687c56f125747 | [
"MIT"
] | 49 | 2015-07-15T14:53:06.000Z | 2018-08-20T15:00:31.000Z | revscoring/utilities/fit.py | kevinbazira/revscoring | 625f8b8048eb3c0c1c872ed9c15687c56f125747 | [
"MIT"
] | 224 | 2015-06-14T23:22:43.000Z | 2018-08-08T22:52:46.000Z | revscoring/utilities/fit.py | kevinbazira/revscoring | 625f8b8048eb3c0c1c872ed9c15687c56f125747 | [
"MIT"
] | 36 | 2015-07-03T03:25:01.000Z | 2018-05-25T10:21:08.000Z | """
``revscoring fit -h``
::
Fits a dependent (an extractable value like a Datasource or Feature) to
observed data. These are often used along with bag-of-words
methods to reduce the feature space prior to training and testing a model
or to train a sub-model.
Usage:
fit -h | --help
fit <dependent> <label>
[--input=<path>]
[--datasource-file=<path>]
[--debug]
Options:
-h --help Prints this documentation
<dependent> The classpath to `Dependent`
that can be fit to observations
<label> The label that should be predicted
--input=<path> Path to a file containing observations
[default: <stdin>]
--datasource-file=<math> Path to a file for writing out the trained
datasource [default: <stdout>]
--debug Print debug logging.
""" # noqa
import logging
import sys
import docopt
import yamlconf
from ..dependencies import solve
from .util import read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
dependent = yamlconf.import_path(args['<dependent>'])
label_name = args['<label>']
if args['--input'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--input']))
logger.info("Reading observations...")
value_labels = [
(list(solve(dependent.dependencies, cache=ob['cache'])),
ob[label_name])
for ob in observations]
logger.debug(" -- {0} observations gathered".format(len(value_labels)))
if args['--datasource-file'] == "<stdout>":
datasource_f = sys.stdout
else:
datasource_f = open(args['--datasource-file'], 'w')
debug = args['--debug']
run(dependent, label_name, value_labels, datasource_f, debug)
def run(dependent, label_name, value_labels, datasource_f, debug):
logger.info("Fitting {0} ({1})".format(dependent, type(dependent)))
dependent.fit(value_labels)
logger.info("Writing fitted selector to {0}".format(datasource_f))
dependent.dump(datasource_f)
| 31.15 | 77 | 0.603933 | 285 | 2,492 | 5.185965 | 0.4 | 0.044655 | 0.036536 | 0.014885 | 0.064953 | 0.064953 | 0.064953 | 0.064953 | 0.064953 | 0 | 0 | 0.002215 | 0.275281 | 2,492 | 79 | 78 | 31.544304 | 0.816168 | 0.414125 | 0 | 0.055556 | 0 | 0 | 0.171488 | 0.015152 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.194444 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50df0f98e46081e28b836f98f598701681f78512 | 1,940 | py | Python | functions.py | PandaWhoCodes/natural_cmd | db9714b63e397ccb1ddac57e308af5aec39d3cd4 | [
"MIT"
] | null | null | null | functions.py | PandaWhoCodes/natural_cmd | db9714b63e397ccb1ddac57e308af5aec39d3cd4 | [
"MIT"
] | null | null | null | functions.py | PandaWhoCodes/natural_cmd | db9714b63e397ccb1ddac57e308af5aec39d3cd4 | [
"MIT"
] | null | null | null | """
Rule based functions. Rules are represented as a graph of
meta-edges from list of nodes to various actions, and are stored as a
lookup tree.
"""
from subprocess import call
import os
import glob
def ls(x=None):
"""
Lists the files and directories in the given directory
Parameters:
params (dict): Path from current directory to directory to list
files / directories from
Returns:
listing (list): A list of the files and directories in params
"""
try:
if x != None:
os.chdir(x)
else:
x = os.getcwd()
for f in os.listdir('.'):
print(f, end="\t")
except FileNotFoundError:
print("I couldn't find file " + x)
def text2int(textnum, numwords={}):
"""
Returns integer number from its string representation, or False if the
string doesn't represent a number.
"""
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
current = result = 0
for word in textnum.split():
if word not in numwords:
return False
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
| 29.393939 | 101 | 0.565464 | 234 | 1,940 | 4.688034 | 0.534188 | 0.021878 | 0.027347 | 0.032817 | 0.101185 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012574 | 0.303093 | 1,940 | 65 | 102 | 29.846154 | 0.798817 | 0.25 | 0 | 0 | 0 | 0 | 0.158046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.194444 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50dfefd4bae1e15e1de89f776dbc437b2bba618c | 9,324 | py | Python | titta/Titta.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | 25 | 2019-09-06T13:26:31.000Z | 2021-12-09T08:44:26.000Z | titta/Titta.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | 13 | 2020-02-20T22:26:42.000Z | 2022-03-24T09:30:11.000Z | titta/Titta.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | 9 | 2019-04-29T08:22:22.000Z | 2022-01-20T18:54:43.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 08:39:57 2018
@author: Marcus
"""
from psychopy import core
import numpy as np
def get_defaults(et_name):
settings = Settings(et_name)
if et_name == 'IS4_Large_Peripheral':
settings.SAMPLING_RATE = 90
elif et_name == 'Tobii Pro Spectrum':
settings.SAMPLING_RATE = 600
settings.TRACKING_MODE = 'human'
elif et_name == 'Tobii Pro Nano':
settings.SAMPLING_RATE = 60
elif et_name == 'Tobii TX300':
settings.SAMPLING_RATE = 300
elif et_name == 'Tobii T60 XL':
settings.SAMPLING_RATE = 60
elif et_name == 'Tobii Pro X3-120 EPU':
settings.SAMPLING_RATE = 120
elif et_name == 'Tobii Pro X3-120':
settings.SAMPLING_RATE = 120
elif et_name == 'X2-60_Compact':
settings.SAMPLING_RATE = 60
elif et_name == 'X2-30_Compact':
settings.SAMPLING_RATE = 40
elif et_name == 'Tobii X60':
settings.SAMPLING_RATE = 60
elif et_name == 'Tobii X120':
settings.SAMPLING_RATE = 120
elif et_name == 'Tobii T60':
settings.SAMPLING_RATE = 60
elif et_name == 'Tobii T120':
settings.SAMPLING_RATE = 120
elif et_name == 'Tobii Pro Fusion':
settings.SAMPLING_RATE = 120
else:
print('eye tracker type not supported')
core.quit()
return settings
class Connect(object):
def __init__(self, in_arg='dummy'):
''' in_arg can be either string with eye tracker name
or 'settings' generated by calling (and optionally modifying)
the output from get_defaults()
'''
if isinstance(in_arg, str):
if 'dummy' in in_arg:
from titta import Tobii_dummy
self.__class__ = Tobii_dummy.Connect
self.__class__.__init__(self)
else:
from titta import Tobii
self.__class__ = Tobii.myTobii
self.__class__.__init__(self, in_arg)
else:
from titta import Tobii
self.__class__ = Tobii.myTobii
self.__class__.__init__(self, in_arg)
class Settings(object):
def __init__(self, et_name):
''' Default settings for eye tracker
'''
self.graphics = Graphics()
# Default name of et-data file
self.FILENAME = 'test.tsv'
# Tracking parameters
self.TRACKER_ADDRESS = '' # If none is given, find one on the network
self.SAMPLING_RATE = 600 # Set sampling rate of tracker
self.eye_tracker_name = et_name
self.TRACKING_MODE = 'Default'
# Parameters for calibration
self.PACING_INTERVAL = 1.0 # How long to present the dot until samples are collected
self.AUTO_PACE = 1 # autoaccept (2), semi autoaccept (1, accept first point, default)
# of accept with space bar (0)
self.ANIMATE_CALIBRATION = True # Static or animated calibration dots
self.RECORD_EYE_IMAGES_DURING_CALIBRATION = False
self.RECORD_EXTERNAL_SIGNAL_DURING_CALIBRATION = False
self.N_CAL_TARGETS = 5 # Valid: 0, 1, 5, 9, 13
# List all possible calibration points (in Tobii's coordinate system)
# (0.0, 0.0) is the upper left corner and (1.0, 1.0) is the lower right corner.
# Define the 13 point array (reading order)
self.CAL_TARGETS = np.array([[0.1, 0.1], [0.5, 0.1], [.9,.1],
[.3,.3], [.7,.3],
[.1,.5], [.5,.5], [.9,.5],
[.3,.7], [.7,.7],
[.1,.9], [.5,.9], [.9,.9]])
self.VAL_POS_TOBII = np.array([[0.2, 0.5], [0.5, 0.8], [0.8, 0.5], [0.5, 0.2]])
# CAL_POS_TOBII = np.array([[0.5, 0.5], [0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0],
# [0.5, 1.0], [0.5, 0.0], [0.0, 0.5], [1.0, 0.5]])
# if N_CAL_TARGETS == 0:
# CAL_POS_TOBII = []
# elif N_CAL_TARGETS == 1:
# CAL_POS_TOBII = CAL_POS_TOBII[0, :]
# elif N_CAL_TARGETS == 5:
# CAL_POS_TOBII = CAL_POS_TOBII[[0, 1, 2, 3, 4], :]
# # VAL_POS_TOBII = np.array([[0.2, 0.2], [0.2, 0.8], [0.8, 0.2], [0.8, 0.8]])
# VAL_POS_TOBII = np.array([[0.2, 0.5], [0.5, 0.8], [0.8, 0.5], [0.5, 0.2]])
# # Scale the positions so they look good on the screen
# scaling = 0.7
# corr = 0.5 - (scaling * 0.5)
# self.CAL_POS_TOBII = CAL_POS_TOBII * scaling + corr
# self.VAL_POS_TOBII = VAL_POS_TOBII * scaling + corr
class Graphics(object):
def __init__(self):
''' Default settings for graphics
'''
blue = tuple(np.array([37, 97, 163]) / 255.0 * 2 - 1)
blue_active = tuple(np.array([11, 122, 244]) / 255.0 * 2 - 1)
green = tuple(np.array([0, 120, 0]) / 255.0 * 2 - 1)
red = tuple(np.array([150, 0, 0]) / 255.0 * 2 - 1)
yellow = tuple(np.array([255, 255, 0]) / 255.0 * 2 - 1)
yellow_linecolor = tuple(np.array([255, 255, 0]) / 255.0 * 2 - 1)
self.blue = blue
self.blue_active = blue_active
self.TEXT_SIZE = 0.04 # Size of text
self.TEXT_COLOR = 'white'
self.ET_SAMPLE_RADIUS = 0.1 # in deg
# SIze of calibration dots
self.TARGET_SIZE=0.6 # in deg
self.TARGET_SIZE_INNER=self.TARGET_SIZE / float(5) # inner diameter of dot
# Theses parameters are changed directly in the EThead class
# self.HEAD_POS_CIRCLE_FIXED_COLOR = blue
# self.HEAD_POS_CIRCLE_FIXED_RADIUS = 0.25
# self.HEAD_POS_CIRCLE_MOVING_COLOR = yellow
# self.HEAD_POS_CIRCLE_MOVING_FILLCOLOR = yellow
# self.HEAD_POS_CIRCLE_MOVING_RADIUS = 0.25
# self.HEAD_POS_CIRCLE_MOVING_MIN_RADIUS = 0.05
self.POS_CAL_BUTTON = (0.5, -0.8)
self.COLOR_CAL_BUTTON = green
self.WIDTH_CAL_BUTTON = 0.30
self.HEIGHT_CAL_BUTTON = 0.08
self.CAL_BUTTON = 'space'
self.CAL_BUTTON_TEXT = 'calibrate (spacebar)'
self.POS_RECAL_BUTTON = (-0.5, -0.8)
self.COLOR_RECAL_BUTTON = red
self.WIDTH_RECAL_BUTTON = 0.30
self.HEIGHT_RECAL_BUTTON = 0.08
self.RECAL_BUTTON = 'c'
self.RECAL_BUTTON_TEXT = 're-calibrate (c)'
self.POS_REVAL_BUTTON = (-0.21, -0.8)
self.COLOR_REVAL_BUTTON = red
self.WIDTH_REVAL_BUTTON = 0.30
self.HEIGHT_REVAL_BUTTON = 0.08
self.REVAL_BUTTON = 'v'
self.REVAL_BUTTON_TEXT = 're-validate (v)'
# Button for showing eye images
self.POS_SETUP_BUTTON = (-0.5, -0.8)
self.COLOR_SETUP_BUTTON = blue
self.WIDTH_SETUP_BUTTON = 0.30
self.HEIGHT_SETUP_BUTTON = 0.08
self.SETUP_BUTTON = 'e'
self.SETUP_BUTTON_TEXT = 'eye images (e)'
self.POS_ACCEPT_BUTTON = (0.5, -0.8)
self.COLOR_ACCEPT_BUTTON = green
self.WIDTH_ACCEPT_BUTTON = 0.30
self.HEIGHT_ACCEPT_BUTTON = 0.08
self.ACCEPT_BUTTON = 'space'
self.ACCEPT_BUTTON_TEXT = 'accept (spacebar)'
self.POS_BACK_BUTTON = (-0.5, -0.8)
self.COLOR_BACK_BUTTON = blue
self.WIDTH_BACK_BUTTON = 0.30
self.HEIGHT_BACK_BUTTON = 0.08
self.BACK_BUTTON = 'b'
self.BACK_BUTTON_TEXT = 'basic (b)'
self.POS_GAZE_BUTTON = (0.8, 0.8)
self.COLOR_GAZE_BUTTON = blue
self.WIDTH_GAZE_BUTTON = 0.25
self.HEIGHT_GAZE_BUTTON = 0.08
self.GAZE_BUTTON = 'g'
self.GAZE_BUTTON_TEXT = 'show gaze (g)'
self.POS_CAL_IMAGE_BUTTON = (-0.8, 0.8)
self.COLOR_CAL_IMAGE_BUTTON = (0.2, 0.2, 0.2)
self.WIDTH_CAL_IMAGE_BUTTON = 0.25
self.HEIGHT_CAL_IMAGE_BUTTON = 0.08
self.CAL_IMAGE_BUTTON = 's'
self.CAL_IMAGE_BUTTON_TEXT = 'Show calibration (s)'
self.SETUP_DOT_OUTER_DIAMETER = 0.03 # Height unit
self.SETUP_DOT_INNER_DIAMETER = 0.005
# Parameters for eye images (default values are for Spectrum)
self.EYE_IMAGE_SIZE = (0.5, 0.25)
# self.EYE_IMAGE_SIZE_PIX = (175, 496)
# self.EYE_IMAGE_SIZE_PIX_FULL_FRAME = (512, 640)
self.EYE_IMAGE_POS_L = (-0.5, -0.4)
self.EYE_IMAGE_POS_R = (0.5, -0.4)
self.EYE_IMAGE_POS_L_1 = (-0.5, -0.4) # Used for the two additional fusion images
self.EYE_IMAGE_POS_R_1 = (0.5, -0.4)
# Parameters for tracking monitor (norm units)
self.EYE_SIZE = 0.03
self.EYE_COLOR_VALID = green
self.EYE_COLOR_INVALID = red
self.TRACKING_MONITOR_SIZE = [0.5, 0.5]
self.TRACKING_MONITOR_POS = [0, 0.4]
self.TRACKING_MONITOR_COLOR = [0.2, 0.2, 0.2]
| 38.37037 | 112 | 0.547726 | 1,275 | 9,324 | 3.756078 | 0.209412 | 0.012111 | 0.014408 | 0.034454 | 0.304865 | 0.220923 | 0.19503 | 0.137189 | 0.069743 | 0.052621 | 0 | 0.076448 | 0.340626 | 9,324 | 243 | 113 | 38.37037 | 0.702505 | 0.240133 | 0 | 0.128378 | 0 | 0 | 0.056693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.033784 | 0 | 0.087838 | 0.006757 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50e070140fce4298615b699e791d3992d2be779c | 4,880 | py | Python | kornia/geometry/transform/homography_warper.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 418 | 2018-10-02T22:31:36.000Z | 2019-01-16T14:15:45.000Z | kornia/geometry/transform/homography_warper.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 94 | 2019-01-17T22:10:45.000Z | 2019-05-22T23:47:58.000Z | kornia/geometry/transform/homography_warper.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 25 | 2018-10-02T22:50:04.000Z | 2019-01-13T18:14:11.000Z | from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.utils import create_meshgrid
from .imgwarp import homography_warp, warp_grid
__all__ = [
"HomographyWarper",
]
class HomographyWarper(nn.Module):
r"""Warp tensors by homographies.
.. math::
X_{dst} = H_{src}^{\{dst\}} * X_{src}
Args:
height: The height of the destination tensor.
width: The width of the destination tensor.
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values
``'zeros'`` | ``'border'`` | ``'reflection'``.
normalized_coordinates: whether to use a grid with normalized coordinates.
align_corners: interpolation flag.
"""
_warped_grid: Optional[torch.Tensor]
def __init__(
self,
height: int,
width: int,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
normalized_coordinates: bool = True,
align_corners: bool = False,
) -> None:
super().__init__()
self.width: int = width
self.height: int = height
self.mode: str = mode
self.padding_mode: str = padding_mode
self.normalized_coordinates: bool = normalized_coordinates
self.align_corners: bool = align_corners
# create base grid to compute the flow
self.grid: torch.Tensor = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
# initialice the warped destination grid
self._warped_grid = None
def precompute_warp_grid(self, src_homo_dst: torch.Tensor) -> None:
r"""Compute and store internally the transformations of the points.
Useful when the same homography/homographies are reused.
Args:
src_homo_dst: Homography or homographies (stacked) to
transform all points in the grid. Shape of the homography
has to be :math:`(1, 3, 3)` or :math:`(N, 1, 3, 3)`.
The homography assumes normalized coordinates [-1, 1] if
normalized_coordinates is True.
"""
self._warped_grid = warp_grid(self.grid, src_homo_dst)
def forward(self, patch_src: torch.Tensor, src_homo_dst: Optional[torch.Tensor] = None) -> torch.Tensor:
r"""Warp a tensor from source into reference frame.
Args:
patch_src: The tensor to warp.
src_homo_dst: The homography or stack of
homographies from destination to source. The homography assumes
normalized coordinates [-1, 1] if normalized_coordinates is True.
Return:
Patch sampled at locations from source to destination.
Shape:
- Input: :math:`(N, C, H, W)` and :math:`(N, 3, 3)`
- Output: :math:`(N, C, H, W)`
Example:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> warper = HomographyWarper(32, 32)
>>> # without precomputing the warp
>>> output = warper(input, homography) # NxCxHxW
>>> # precomputing the warp
>>> warper.precompute_warp_grid(homography)
>>> output = warper(input) # NxCxHxW
"""
_warped_grid = self._warped_grid
if src_homo_dst is not None:
warped_patch = homography_warp(
patch_src,
src_homo_dst,
(self.height, self.width),
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
normalized_coordinates=self.normalized_coordinates,
)
elif _warped_grid is not None:
if not _warped_grid.device == patch_src.device:
raise TypeError(
"Patch and warped grid must be on the same device. \
Got patch.device: {} warped_grid.device: {}. Whether \
recall precompute_warp_grid() with the correct device \
for the homograhy or change the patch device.".format(
patch_src.device, _warped_grid.device
)
)
warped_patch = F.grid_sample(
patch_src,
_warped_grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
else:
raise RuntimeError(
"Unknown warping. If homographies are not provided \
they must be preset using the method: \
precompute_warp_grid()."
)
return warped_patch
| 36.969697 | 111 | 0.576025 | 538 | 4,880 | 5.042751 | 0.271375 | 0.100627 | 0.025802 | 0.02101 | 0.10763 | 0.101732 | 0.101732 | 0.101732 | 0.101732 | 0.101732 | 0 | 0.008062 | 0.339139 | 4,880 | 131 | 112 | 37.251908 | 0.833178 | 0.367008 | 0 | 0.114286 | 0 | 0 | 0.010353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.085714 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50e6a3134d4cb7fb96e6b9f08a56700b7945939a | 3,183 | py | Python | thinkindicator/indicator.py | przemub/thinkindicator | a8fa40ea172e0f80c61b2d56a6097bc9c1cdd23b | [
"MIT"
] | null | null | null | thinkindicator/indicator.py | przemub/thinkindicator | a8fa40ea172e0f80c61b2d56a6097bc9c1cdd23b | [
"MIT"
] | null | null | null | thinkindicator/indicator.py | przemub/thinkindicator | a8fa40ea172e0f80c61b2d56a6097bc9c1cdd23b | [
"MIT"
] | null | null | null | import os
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("AppIndicator3", "0.1")
from gi.repository import Gtk, Gdk, AppIndicator3, GLib
from .fan import get_level, LEVELS, set_level
APPINDICATOR_ID = "thinkindicator"
SCROLL_SPEED = 5
class ThinkIndicator:
script_dir = os.path.dirname(os.path.realpath(__file__))
def _get_icon(self):
icon = self.current_level
if icon == "auto":
icon = "A"
elif icon == "full-speed":
icon = "F"
elif icon == "disengaged":
icon = "D"
return f"{self.script_dir}/icons/{icon}.png"
def _update_level(self):
self.current_level = get_level()
self.indicator.set_icon_full(self._get_icon(), self.current_level)
def _update_level_timeout(self):
self._update_level()
GLib.timeout_add_seconds(1, self._update_level)
def _set_level_action(self, _menu: Gtk.MenuItem, level: str):
set_level(level)
self._update_level()
def _get_menu(self):
menu = Gtk.Menu()
group = None
for level in LEVELS:
item_level = Gtk.RadioMenuItem(
group=group, label=level.replace("-", " ")
)
group = item_level
item_level.show()
item_level.connect("activate", self._set_level_action, level)
if level == self.current_level:
item_level.set_active(True)
menu.append(item_level)
item_quit = Gtk.MenuItem("Quit")
item_quit.show()
item_quit.connect("activate", Gtk.main_quit)
menu.append(item_quit)
return menu
def scroll(
self,
_ind: AppIndicator3.Indicator,
steps: int,
direction: Gdk.ScrollDirection,
):
# Check if we are in manual mode
try:
current_level_int = int(self.current_level)
except ValueError:
return
if direction == Gdk.ScrollDirection.DOWN:
self.scroll_progress -= steps
elif direction == Gdk.ScrollDirection.UP:
self.scroll_progress += steps
if self.scroll_progress >= SCROLL_SPEED:
if current_level_int != 7:
set_level(str(current_level_int + 1))
self._update_level()
self.scroll_progress = 0
elif self.scroll_progress <= -SCROLL_SPEED:
# Not below one so we don't turn off fan completely by accident
# and fry someone's computer
if current_level_int != 1:
set_level(str(current_level_int - 1))
self._update_level()
self.scroll_progress = 0
def __init__(self):
self.current_level = None
self.scroll_progress = 0
self.indicator = AppIndicator3.Indicator.new(
APPINDICATOR_ID, "", AppIndicator3.IndicatorCategory.HARDWARE
)
def run(self):
self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
self._update_level_timeout()
self.indicator.set_menu(self._get_menu())
self.indicator.connect("scroll-event", self.scroll)
Gtk.main()
| 28.936364 | 75 | 0.602576 | 371 | 3,183 | 4.90566 | 0.304582 | 0.072527 | 0.069231 | 0.026374 | 0.098901 | 0.067033 | 0.067033 | 0.067033 | 0.067033 | 0.067033 | 0 | 0.008532 | 0.300346 | 3,183 | 109 | 76 | 29.201835 | 0.808711 | 0.037386 | 0 | 0.085366 | 0 | 0 | 0.04281 | 0.011111 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.04878 | 0 | 0.207317 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50e8018a906859e15f7734520b012605d4754d6a | 612 | py | Python | Python3/Conv-Autoencoder/test.py | benchislett/Skunkworks | c673609665aeaa040f5db18173221a86526d61d5 | [
"MIT"
] | 6 | 2019-07-17T10:41:02.000Z | 2020-11-26T15:01:08.000Z | Python3/Conv-Autoencoder/test.py | benchislett/Skunkworks | c673609665aeaa040f5db18173221a86526d61d5 | [
"MIT"
] | 2 | 2019-07-17T21:47:16.000Z | 2021-02-22T21:22:46.000Z | Python3/Conv-Autoencoder/test.py | benchislett/Skunkworks | c673609665aeaa040f5db18173221a86526d61d5 | [
"MIT"
] | 1 | 2019-07-17T18:05:48.000Z | 2019-07-17T18:05:48.000Z | def test_batch(model, loss_fn, batch):
"""Test the model on a batch of data,
and return the loss and number of correct predictions
"""
x = batch[0].cuda()
out = model(x)
loss = loss_fn(out, x).item()
return loss
def test(model, loss_fn, loader):
"""Test the model over all batches in a given dataset,
and return the total mean loss and prediction accuracy
"""
model.eval()
loss_acc = 0.0
batches = 0
for batch in loader:
loss = test_batch(model, loss_fn, batch)
loss_acc += loss
batches += 1
return loss_acc / batches
| 19.741935 | 58 | 0.616013 | 92 | 612 | 4 | 0.391304 | 0.065217 | 0.089674 | 0.097826 | 0.13587 | 0.13587 | 0 | 0 | 0 | 0 | 0 | 0.011521 | 0.29085 | 612 | 30 | 59 | 20.4 | 0.836406 | 0.318627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50eaa1f2acbab12054671b3c3d50193f4f654f5c | 5,755 | py | Python | gamestonk_terminal/cryptocurrency/discovery/dappradar_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | 1 | 2022-02-18T04:02:52.000Z | 2022-02-18T04:02:52.000Z | gamestonk_terminal/cryptocurrency/discovery/dappradar_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | gamestonk_terminal/cryptocurrency/discovery/dappradar_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | """DappRadar model"""
__docformat__ = "numpy"
# pylint: disable=C0301,E1137
from typing import Optional
import logging
import pandas as pd
import requests
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import get_user_agent
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
NFT_COLUMNS = [
"Name",
"Protocols",
"Floor Price [$]",
"Avg Price [$]",
"Market Cap [$]",
"Volume [$]",
]
DAPPS_COLUMNS = [
"Name",
"Category",
"Protocols",
"Daily Users",
"Daily Volume [$]",
]
DEX_COLUMNS = [
"Name",
"Daily Users",
"Daily Volume [$]",
]
@log_start_end(log=logger)
def _make_request(url: str) -> Optional[dict]:
"""Helper method handles dappradar api requests. [Source: https://dappradar.com/]
Parameters
----------
url: str
endpoint url
Returns
-------
dict:
dictionary with response data
"""
response = requests.get(
url, headers={"Accept": "application/json", "User-Agent": get_user_agent()}
)
if not 200 <= response.status_code < 300:
console.print(f"[red]dappradar api exception: {response.text}[/red]")
return None
try:
return response.json()
except Exception as _: # noqa: F841
console.print(f"[red]Invalid Response:: {response.text}[/red]")
return None
@log_start_end(log=logger)
def get_top_nfts() -> pd.DataFrame:
"""Get top nft collections [Source: https://dappradar.com/]
Parameters
----------
Returns
-------
pd.DataFrame
NFT collections. Columns: Name, Protocols, Floor Price [$], Avg Price [$], Market Cap [$], Volume [$]
"""
response = _make_request(
"https://nft-sales-service.dappradar.com/v2/collection/day?limit=20&page=1¤cy=USD&sort=marketCapInFiat&order=desc" # noqa
)
if response:
data = response.get("results")
df = pd.DataFrame(
data,
columns=[
"name",
"activeProtocols",
"floorPriceInFiat",
"avgPriceInFiat",
"marketCapInFiat",
"volumeInFiat",
],
)
df = df.set_axis(
NFT_COLUMNS,
axis=1,
inplace=False,
)
df["Protocols"] = df["Protocols"].apply(lambda x: ",".join(x))
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_dexes() -> pd.DataFrame:
"""Get top dexes by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
Returns
-------
pd.DataFrame
Top decentralized exchanges. Columns: Name, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMXRZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbU5oZEdWbmIzSjVQV1Y0WTJoaGJtZGxjeVp6YjNKMFBYUnZkR0ZzVm05c2RXMWxTVzVHYVdGMEptOXlaR1Z5UFdSbGMyTW1iR2x0YVhROU1qWT0=" # noqa
)
if data:
arr = []
for dex in data["dapps"]:
arr.append(
[
dex["name"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
)
df = pd.DataFrame(arr, columns=DEX_COLUMNS)
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_games() -> pd.DataFrame:
"""Get top blockchain games by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
Returns
-------
pd.DataFrame
Top blockchain games. Columns: Name, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMXRZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbU5oZEdWbmIzSjVQV2RoYldWekpuTnZjblE5ZFhObGNpWnZjbVJsY2oxa1pYTmpKbXhwYldsMFBUSTI=" # noqa
)
if data:
arr = []
for dex in data["dapps"]:
arr.append(
[
dex["name"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
)
df = pd.DataFrame(
arr,
columns=DEX_COLUMNS,
).sort_values("Daily Users", ascending=False)
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_dapps() -> pd.DataFrame:
"""Get top decentralized applications by daily volume and users [Source: https://dappradar.com/]
Parameters
----------
Returns
-------
pd.DataFrame
Top decentralized exchanges. Columns: Name, Category, Protocols, Daily Users, Daily Volume [$]
"""
data = _make_request(
"https://dappradar.com/v2/api/dapps?params=WkdGd2NISmhaR0Z5Y0dGblpUMHhKbk5uY205MWNEMXRZWGdtWTNWeWNtVnVZM2s5VlZORUptWmxZWFIxY21Wa1BURW1jbUZ1WjJVOVpHRjVKbk52Y25ROWRYTmxjaVp2Y21SbGNqMWtaWE5qSm14cGJXbDBQVEky" # noqa
)
if data:
arr = []
for dex in data["dapps"]:
arr.append(
[
dex["name"],
dex["category"],
dex["activeProtocols"],
dex["statistic"]["userActivity"],
dex["statistic"]["totalVolumeInFiat"],
]
)
df = pd.DataFrame(
arr,
columns=DAPPS_COLUMNS,
).sort_values("Daily Users", ascending=False)
df["Protocols"] = df["Protocols"].apply(lambda x: ",".join(x))
return df
return pd.DataFrame()
| 28.073171 | 280 | 0.587837 | 522 | 5,755 | 6.367816 | 0.273946 | 0.052948 | 0.040915 | 0.031588 | 0.491276 | 0.466306 | 0.449759 | 0.4287 | 0.384777 | 0.384777 | 0 | 0.024325 | 0.285665 | 5,755 | 204 | 281 | 28.210784 | 0.784237 | 0.196872 | 0 | 0.435115 | 0 | 0.007634 | 0.321002 | 0.009474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038168 | false | 0 | 0.053435 | 0 | 0.175573 | 0.015267 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50ec927b8699c2de865108f994b3246ab53caf11 | 910 | py | Python | apps/tool/apis/bd_push.py | hpp0hpp/daydayup | 7fb61f1882a1f085a49075294c3d74b1e9366bc2 | [
"MIT"
] | null | null | null | apps/tool/apis/bd_push.py | hpp0hpp/daydayup | 7fb61f1882a1f085a49075294c3d74b1e9366bc2 | [
"MIT"
] | null | null | null | apps/tool/apis/bd_push.py | hpp0hpp/daydayup | 7fb61f1882a1f085a49075294c3d74b1e9366bc2 | [
"MIT"
] | null | null | null | import requests
import re
def push_urls(url, urls):
'''根据百度站长提供的API推送链接'''
headers = {
'User-Agent': 'curl/7.12.1',
'Host': 'data.zz.baidu.com',
'Content-Type': 'text/plain',
'Content-Length': '83'
}
try:
html = requests.post(url, headers=headers, data=urls, timeout=5).text
return html
except:
return "{'error':404,'message':'请求超时,接口地址错误!'}"
def get_urls(url):
'''提取网站sitemap中所有链接,参数必须是sitemap的链接'''
try:
html = requests.get(url, timeout=5).text
except:
return 'miss'
else:
urls = re.findall('<loc>\s*?(.*?)\s*?</loc>', html)
return '\n'.join(urls)
if __name__ == '__main__':
url = 'http://data.zz.baidu.com/urls?site=https://www.daydayupclub.co&token=e3iDTbFWgB6ZEHcO'
urls = get_urls('https://www.daydayupclub.co/sitemap.xml')
r = push_urls(url,urls)
print(r)
| 24.594595 | 97 | 0.584615 | 113 | 910 | 4.60177 | 0.557522 | 0.040385 | 0.042308 | 0.057692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018598 | 0.231868 | 910 | 36 | 98 | 25.277778 | 0.725322 | 0.053846 | 0 | 0.148148 | 0 | 0.037037 | 0.3298 | 0.073027 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50ecd5277ee74788913306707d9f8fe468e76342 | 930 | py | Python | setup.py | hankbesser/brownian-manifold | 870b2f11995d73d7083bd937846cf33a624c62a8 | [
"MIT"
] | 1 | 2019-06-25T15:17:11.000Z | 2019-06-25T15:17:11.000Z | setup.py | hankbesser/brownian-manifold | 870b2f11995d73d7083bd937846cf33a624c62a8 | [
"MIT"
] | null | null | null | setup.py | hankbesser/brownian-manifold | 870b2f11995d73d7083bd937846cf33a624c62a8 | [
"MIT"
] | 2 | 2017-04-06T15:26:14.000Z | 2019-10-07T16:37:28.000Z | from setuptools import find_packages
from numpy.distutils.core import setup
descr = """Tools to simulate and visualize Brownian motion on manifolds."""
DISTNAME = 'brownian-manifold'
DESCRIPTION = descr
MAINTAINER = 'Hank Besser'
MAINTAINER_EMAIL = 'hbess1113@gmail.com'
LICENSE = 'MIT'
URL = 'https://github.com/hankbesser/brownian-manifold'
DOWNLOAD_URL = 'https://github.com/hankbesser/brownian-manifold.git'
VERSION = '0.1.dev0'
if __name__ == "__main__":
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=open('README.md').read(),
classifiers=[
'Programming Language :: Python',
],
platforms='any',
packages=['brownian_manifold'],
)
| 30 | 75 | 0.654839 | 95 | 930 | 6.231579 | 0.578947 | 0.108108 | 0.047297 | 0.057432 | 0.14527 | 0.14527 | 0.14527 | 0 | 0 | 0 | 0 | 0.009818 | 0.233333 | 930 | 30 | 76 | 31 | 0.820477 | 0 | 0 | 0 | 0 | 0 | 0.305376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50ed1e794453f6867736f405d29b25f60e297168 | 21,478 | py | Python | src/hyde/driver/dataset/expert_forecast/drv_data_ef_io.py | c-hydro/hyde | 3a3ff92d442077ce353b071d5afe726fc5465201 | [
"MIT"
] | null | null | null | src/hyde/driver/dataset/expert_forecast/drv_data_ef_io.py | c-hydro/hyde | 3a3ff92d442077ce353b071d5afe726fc5465201 | [
"MIT"
] | 18 | 2020-04-07T16:34:59.000Z | 2021-07-02T07:32:39.000Z | src/hyde/driver/dataset/expert_forecast/drv_data_ef_io.py | c-hydro/fp-hyde | b0728397522aceebec3e7ff115aff160a10efede | [
"MIT"
] | null | null | null | """
Class Features
Name: drv_data_ef_io
Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20201202'
Version: '1.0.0'
"""
#######################################################################################
# -------------------------------------------------------------------------------------
# Libraries
import logging
import os
import time
import numpy as np
import xarray as xr
import pandas as pd
from src.hyde.algorithm.analysis.expert_forecast.lib_ef_analysis import find_slopes
from src.hyde.algorithm.settings.expert_forecast.lib_ef_conventions import conventions_vars
from src.hyde.algorithm.io.expert_forecast.lib_ef_io_generic import write_obj, read_obj, read_file_csv, \
save_file_json, create_default_dframe
from src.hyde.algorithm.utils.expert_forecast.lib_ef_generic import make_folder, fill_tags2string, list_folder, \
get_root_path, get_dict_values
#from src.hyde.algorithm.analysis.satellite.modis.lib_modis_analysis_interpolation_grid import interp_grid2index
#from src.hyde.driver.dataset.satellite.modis.cpl_data_variables_modis import DriverVariable
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class driver dynamic data
class DriverData:
def __init__(self, time_step, geo_collections=None,
src_dict=None, ancillary_dict=None, dst_dict=None,
variable_src_dict=None, variable_dst_dict=None, time_dict=None, template_dict=None, info_dict=None,
flag_updating_ancillary=True, flag_updating_destination=True, flag_cleaning_tmp=True):
self.time_step = time_step
self.geo_collections = geo_collections
self.src_dict = src_dict
self.ancillary_dict = ancillary_dict
self.dst_dict = dst_dict
self.time_dict = time_dict
self.variable_src_dict = variable_src_dict
self.variable_dst_dict = variable_dst_dict
self.template_dict = template_dict
self.tag_folder_name = 'folder_name'
self.tag_file_name = 'file_name'
self.tag_file_compression = 'file_compression'
self.tag_file_path_grid = 'file_path_grid'
self.domain_info = info_dict['domain']
self.group_collection = info_dict['group']
self.group_variables_list_in = ['rain_average', 'rain_peak']
self.group_variables_list_out = ['slope_x', 'slope_y', 'slope_t']
self.group_subdomain_list = []
for group_key, group_data in self.group_collection.items():
self.group_subdomain_list.append(group_data['name'])
self.variable_src_list = list(self.variable_src_dict.keys())
self.variable_dst_list = list(self.variable_dst_dict.keys())
self.time_file_range = self.collect_file_time()
self.time_dataset_collection = self.collect_dataset_time(self.time_file_range)
self.folder_name_src_raw = self.src_dict[self.tag_folder_name]
self.file_name_src_raw = self.src_dict[self.tag_file_name]
self.file_path_src_collections = self.collect_file_list(self.folder_name_src_raw, self.file_name_src_raw,
file_time=self.time_file_range)
self.folder_name_anc_raw = self.ancillary_dict[self.tag_folder_name]
self.file_name_anc_raw = self.ancillary_dict[self.tag_file_name]
self.file_path_anc = self.collect_file_list(self.folder_name_anc_raw, self.file_name_anc_raw,
file_time=self.time_step)[0]
self.folder_name_dst_raw = self.dst_dict[self.tag_folder_name]
self.file_name_dst_raw = self.dst_dict[self.tag_file_name]
self.file_path_dst_collections = self.collect_file_list(
self.folder_name_dst_raw, self.file_name_dst_raw,
file_time=self.time_step, file_extra_info={'alert_area_name': self.group_subdomain_list})
self.flag_updating_ancillary = flag_updating_ancillary
self.flag_updating_destination = flag_updating_destination
self.flag_cleaning_tmp = flag_cleaning_tmp
self.folder_name_anc_root = get_root_path(self.folder_name_anc_raw)
self.tag_dim_time = 'time'
self.tag_coord_time = 'time'
self.tag_slopes_data = 'slopes'
self.tag_geo_data = 'terrain'
self.vm_data = self.geo_collections[self.tag_slopes_data]['vm']
self.rain_avg_array = self.geo_collections[self.tag_slopes_data]['r_avg']
self.slope_x_array = self.geo_collections[self.tag_slopes_data]['sx']
self.slope_y_array = self.geo_collections[self.tag_slopes_data]['sy']
self.slope_t_array = self.geo_collections[self.tag_slopes_data]['st']
self.var_nodata = 0.0
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to collect time(s)
def collect_file_time(self, reverse=True):
time_period = self.time_dict["time_file_period"]
time_frequency = self.time_dict["time_file_frequency"]
time_rounding = self.time_dict["time_file_rounding"]
time_eta = self.time_dict["time_file_eta"]
time_end = self.time_step.floor(time_rounding)
time_end = time_end.replace(hour=int(time_eta))
time_range = pd.date_range(end=time_end, periods=time_period, freq=time_frequency)
if reverse:
time_range = time_range[::-1]
return time_range
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to collect dataset time(s)
def collect_dataset_time(self, time_file_range):
time_period = self.time_dict["time_dataset_period"]
time_frequency = self.time_dict["time_dataset_frequency"]
time_rounding = self.time_dict["time_dataset_rounding"]
time_eta = self.time_dict["time_dataset_eta"]
time_dataset_collection = {}
for time_step in time_file_range:
time_start = time_step.floor(time_rounding)
time_start = time_start.replace(hour=int(time_eta))
time_dataset_range = pd.date_range(start=time_start, periods=time_period, freq=time_frequency)
time_dataset_collection[time_step] = time_dataset_range
return time_dataset_collection
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to collect ancillary file
def collect_file_list(self, folder_name_raw, file_name_raw, file_time=None, file_variable=None,
file_extra_info=None):
domain_info = self.domain_info
file_name_list = []
if file_time is None:
file_time = self.time_file_range
if isinstance(file_time, pd.Timestamp):
file_time = pd.DatetimeIndex([file_time])
elif isinstance(file_time, pd.DatetimeIndex):
pass
else:
logging.error(' ===> File time list format is not allowed')
raise NotImplementedError('Case not implemented yet')
for datetime_step in file_time:
template_values_step = {
'domain_name': domain_info,
'source_var_name': file_variable,
'ancillary_var_name': file_variable,
'destination_var_name': file_variable,
'source_datetime': datetime_step, 'source_sub_path_time': datetime_step,
'ancillary_datetime': datetime_step, 'ancillary_sub_path_time': datetime_step,
'destination_datetime': datetime_step, 'destination_sub_path_time': datetime_step}
if file_extra_info is None:
folder_name_def = fill_tags2string(
folder_name_raw, self.template_dict, template_values_step)
file_name_def = fill_tags2string(
file_name_raw, self.template_dict, template_values_step)
file_path_def = os.path.join(folder_name_def, file_name_def)
file_name_list.append(file_path_def)
else:
for file_key, file_fields in file_extra_info.items():
for file_field in file_fields:
template_values_step[file_key] = file_field
folder_name_def = fill_tags2string(
folder_name_raw, self.template_dict, template_values_step)
file_name_def = fill_tags2string(
file_name_raw, self.template_dict, template_values_step)
file_path_def = os.path.join(folder_name_def, file_name_def)
file_name_list.append(file_path_def)
return file_name_list
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write dataset in dictionary format
@staticmethod
def write_dset_obj(file_name, file_obj):
if isinstance(file_obj, xr.Dataset):
file_data = file_obj.to_dict()
else:
file_data = file_obj
write_obj(file_name, file_data)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to load dataset saved in dictionary format
@staticmethod
def read_dset_obj(file_name, data_format='dictionary'):
file_data = read_obj(file_name)
if data_format == 'dataset':
file_obj = xr.Dataset.from_dict(file_data)
else:
file_obj = file_data
return file_obj
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to add global attributes
def add_global_attributes(self, global_attrs=None):
if global_attrs is None:
global_attrs = {}
if 'file_date' not in list(global_attrs.keys()):
global_attrs['file_date'] = 'Created ' + time.ctime(time.time())
if 'nodata_value' not in list(global_attrs.keys()):
global_attrs['nodata_value'] = self.var_nodata
return global_attrs
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to dump datasets
def dump_data(self):
logging.info(' ----> Dump datasets ... ')
time_step = self.time_step
var_file_path_anc = self.file_path_anc
var_file_path_dst_collections = self.file_path_dst_collections
var_dict_dst = self.variable_dst_dict
flag_upd_dst = self.flag_updating_destination
logging.info(' -----> Time ' + str(time_step) + ' ... ')
if flag_upd_dst:
for var_file_path_dst in var_file_path_dst_collections:
if os.path.exists(var_file_path_dst):
os.remove(var_file_path_dst)
if os.path.exists(var_file_path_anc):
logging.info(' ------> Save dataset object ... ')
# Get data
var_dict_anc = self.read_dset_obj(var_file_path_anc)
# Iterate over variables
for (var_key_anc, var_dframe_anc), var_file_path_dst in zip(
var_dict_anc.items(), var_file_path_dst_collections):
logging.info(' -------> Alert area ' + var_key_anc + ' ... ')
global_attrs = self.add_global_attributes({'name': var_key_anc})
var_folder_name_dst, var_file_name_dst = os.path.split(var_file_path_dst)
make_folder(var_folder_name_dst)
var_dict_anc = var_dframe_anc.to_dict()
save_file_json(var_file_path_dst, var_dict_anc, file_attrs=global_attrs)
logging.info(' -------> Alert area ' + var_key_anc + ' ... DONE')
logging.info(' ------> Save dataset object ... DONE')
logging.info(' -----> Time ' + str(time_step) + ' ... DONE.')
else:
logging.info(' -----> Time ' + str(time_step) + ' ... SKIPPED. All datasets are undefined')
logging.info(' ----> Dump datasets ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to organize datasets
def organize_data(self):
logging.info(' ----> Organize datasets ... ')
time_step = self.time_step
time_file_range = self.time_file_range
time_dataset_collection = self.time_dataset_collection
slopes_collections = self.geo_collections[self.tag_slopes_data]
group_collection = self.group_collection
group_variables_list_in = self.group_variables_list_in
group_variables_list_out = self.group_variables_list_out
file_path_src_list = self.file_path_src_collections
var_src_name = self.variable_src_list[0]
var_src_dict = self.variable_src_dict
flag_upd_anc = self.flag_updating_ancillary
var_file_path_anc = self.file_path_anc
if flag_upd_anc:
if os.path.exists(var_file_path_anc):
os.remove(var_file_path_anc)
if not os.path.exists(var_file_path_anc):
var_src_collection = {}
for id, time_file_step in enumerate(time_file_range):
logging.info(' -----> Time ' + str(time_file_step) + ' ... ')
var_src_fields = var_src_dict[var_src_name]
var_src_mode = var_src_fields['var_mode']
var_src_name = var_src_fields['var_name']
var_src_method_compute = var_src_fields['var_method_compute']
var_src_attributes = var_src_fields['var_attributes']
var_file_path_src = file_path_src_list[id]
logging.info(' ------> Get data ... ')
if var_src_mode:
if os.path.exists(var_file_path_src):
var_file_data_src = read_file_csv(var_file_path_src)
logging.info(' ------> Get data ... DONE')
else:
logging.info(' ------> Get data ... FAILED')
logging.warning(' ===> File not found ' + var_file_path_src)
var_file_data_src = None
else:
logging.info(' ------> Get data ... SKIPPED. Variable ' + var_src_name + ' not activated')
var_file_data_src = None
var_src_collection[time_file_step] = {}
var_src_collection[time_file_step] = var_file_data_src
logging.info(' -----> Time ' + str(time_file_step) + ' ... DONE')
logging.info(' -----> Compute data ... ')
for var_src_key, var_src_data in var_src_collection.items():
var_src_check = False
if var_src_data is not None:
var_src_shape = var_src_data.shape
var_src_columns = list(var_src_data.columns)
var_default_data = create_default_dframe(var_src_columns, var_src_shape,
df_nodata=self.var_nodata)
var_src_check = True
break
if var_src_check:
var_collections = None
for (var_src_key, var_src_data), var_time_data in zip(var_src_collection.items(),
time_dataset_collection.values()):
if var_collections is None:
var_collections = {}
for group_key, group_data in group_collection.items():
group_variables = group_data['variables']
var_dict = {}
for group_key_step_in in group_variables_list_in:
group_data_step = group_variables[group_key_step_in]
for var_time, (var_key, var_data) in zip(var_time_data, group_data_step.items()):
var_tag = var_data['tag']
if var_src_data is not None:
if var_tag in list(var_src_data.columns):
var_value = var_src_data[var_tag].values[0]
var_value = np.float64(var_value)
else:
var_value = self.var_nodata
else:
var_value = var_default_data[var_tag].values[0]
if group_key_step_in not in var_dict:
var_dict[group_key_step_in] = {}
var_dict[group_key_step_in][var_time] = var_value
var_df_step = pd.DataFrame.from_dict(var_dict)
var_df_step.index.name = self.tag_dim_time
# compute slope(s)
var_rain_avg_step = var_df_step[self.group_variables_list_in[0]].values
var_rain_peak_step = var_df_step[self.group_variables_list_in[1]].values
var_slope_step = find_slopes(var_rain_avg_step, var_rain_peak_step, self.rain_avg_array,
self.slope_x_array, self.slope_y_array, self.slope_t_array, self.vm_data)
for group_key_step_out, var_slope_step_out in zip(group_variables_list_out, var_slope_step):
var_df_step[group_key_step_out] = var_slope_step_out
# select datasets by time step
var_df_step = var_df_step.loc[var_df_step.index >= time_step]
if group_key not in list(var_collections.keys()):
var_collections[group_key] = var_df_step
else:
var_df_tmp = var_collections[group_key]
var_df_concatenated = pd.concat([var_df_tmp, var_df_step], axis=0)
var_df_concatenated = var_df_concatenated[~var_df_concatenated.index.duplicated(keep='first')]
var_df_concatenated.sort_index(inplace=True)
var_df_step.index.name = self.tag_dim_time
var_collections[group_key] = var_df_concatenated
if var_collections is not None:
var_folder_name_anc, var_file_name_anc = os.path.split(var_file_path_anc)
make_folder(var_folder_name_anc)
self.write_dset_obj(var_file_path_anc, var_collections)
logging.info(' -----> Compute data ... DONE')
else:
logging.info(' -----> Compute data ... SKIPPED. All datasets are not available')
logging.info(' ----> Organize datasets ... DONE')
else:
logging.info(' ----> Organize datasets ... SKIPPED. Datasets are previously computed')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to clean temporary information
def clean_tmp(self):
file_path_anc_list = self.file_path_anc
clean_tmp = self.flag_cleaning_tmp
folder_name_anc_main = self.folder_name_anc_root
if not isinstance(file_path_anc_list, list):
file_path_anc_list = [file_path_anc_list]
if clean_tmp:
# Remove tmp file and folder(s)
for file_path_step in file_path_anc_list:
if os.path.exists(file_path_step):
if os.path.isfile(file_path_step):
os.remove(file_path_step)
var_folder_name_step, var_file_name_step = os.path.split(file_path_step)
if var_folder_name_step != '':
if os.path.exists(var_folder_name_step):
if not os.listdir(var_folder_name_step):
os.rmdir(var_folder_name_step)
# Remove empty folder(s)
folder_name_anc_list = list_folder(folder_name_anc_main)
for folder_name_anc_step in folder_name_anc_list:
if os.path.exists(folder_name_anc_step):
os.rmdir(folder_name_anc_step)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
| 44.467909 | 122 | 0.550424 | 2,413 | 21,478 | 4.46208 | 0.100705 | 0.035665 | 0.022476 | 0.013374 | 0.4133 | 0.289681 | 0.21278 | 0.137643 | 0.053683 | 0.040866 | 0 | 0.001868 | 0.277214 | 21,478 | 482 | 123 | 44.560166 | 0.691703 | 0.135022 | 0 | 0.123779 | 0 | 0 | 0.076026 | 0.004935 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032573 | false | 0.003257 | 0.032573 | 0 | 0.084691 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50f3113900e63792311bd5b71c254cb994a34d7a | 4,961 | py | Python | bundlegen/rabbitmq/main.py | ProphetUnckleJoe/BundleGen | 27d11effc8461c924e62e7dad3ecbf633e99972e | [
"Apache-2.0"
] | null | null | null | bundlegen/rabbitmq/main.py | ProphetUnckleJoe/BundleGen | 27d11effc8461c924e62e7dad3ecbf633e99972e | [
"Apache-2.0"
] | null | null | null | bundlegen/rabbitmq/main.py | ProphetUnckleJoe/BundleGen | 27d11effc8461c924e62e7dad3ecbf633e99972e | [
"Apache-2.0"
] | null | null | null | # If not stated otherwise in this file or this component's license file the
# following copyright and licenses apply:
#
# Copyright 2021 Consult Red
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import signal
import click
import pika
from time import sleep
from dotenv import load_dotenv, find_dotenv
from loguru import logger
from bundlegen.rabbitmq.message_handler import msg_received
def signal_handler(s, frame):
"""
Disconnect from rabbitmq and quit when we receive a signal
"""
logger.debug(f"Received signal {s}")
logger.info("Shutting down. . .")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
@click.group()
@click.option('-v', '--verbose', count=True, help='Set logging level')
def cli(verbose):
"""RabbitMQ front-end for BundleGen
"""
# Set up logging
logger.remove()
if verbose > 3:
verbose = 3
log_levels = {
0: 'SUCCESS',
1: 'INFO',
2: 'DEBUG',
3: 'TRACE'
}
logger.add(sys.stderr, level=log_levels.get(verbose))
def create_directory_from_env_var(env_var_name):
"""
Create any directories we need to work if they don't exist
"""
dir_path = os.environ.get(env_var_name)
if not dir_path:
logger.error(f"Required setting {env_var_name} not set")
return False
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
@click.command()
def start():
"""
Starts BundleGen RabbitMQ consumer
"""
# Setup signal handlers
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Read settings from a .env file for development
load_dotenv(find_dotenv())
# Set up our directories
if not create_directory_from_env_var("BUNDLE_STORE_DIR"):
sys.exit(1)
if not create_directory_from_env_var("BUNDLEGEN_TMP_DIR"):
sys.exit(1)
logger.info("Starting RabbitMQ BundleGen consumer. . .")
successful_connection = False
max_retry_count = 5
retry_count = 0
# Connect to RabbitMQ
while True:
try:
if os.environ.get('RABBITMQ_PORT'):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ.get('RABBITMQ_HOST'),
port=os.environ.get('RABBITMQ_PORT'),
connection_attempts=3, retry_delay=1))
else:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ.get('RABBITMQ_HOST'),
connection_attempts=3, retry_delay=1))
channel = connection.channel()
# will only create if queue doesn't exist
channel.queue_declare(queue="bundlegen-requests", durable=True)
channel.basic_consume(queue='bundlegen-requests',
on_message_callback=msg_received)
successful_connection = True
logger.success(
"Connected to RabbitMQ broker. Waiting for messages. . .")
channel.start_consuming()
except pika.exceptions.ConnectionClosedByBroker:
# Don't recover if connection was closed by broker
logger.error("Connection was closed by broker")
sys.exit(1)
except pika.exceptions.AMQPChannelError:
# Don't recover on channel errors
logger.error("AMPQ Channel error, cannot recover")
sys.exit(1)
except pika.exceptions.AMQPConnectionError:
# Recover on all other connection errors (assuming we've managed to connect at least once before)
if successful_connection:
if retry_count < max_retry_count:
logger.warning(
f"Lost connection to rabbitmq - attempting to reconnect... ({retry_count}/{max_retry_count})")
retry_count += 1
sleep(2)
continue
else:
logger.error(
"Lost connection to rabbitmq - max retries hit, giving up")
sys.exit(1)
else:
logger.error(
f"Cannot connect to rabbitmq at {os.environ.get('RABBITMQ_HOST')}")
sys.exit(1)
cli.add_command(start)
| 31.398734 | 118 | 0.617416 | 591 | 4,961 | 5.069374 | 0.375635 | 0.016355 | 0.024032 | 0.033378 | 0.180574 | 0.130841 | 0.076101 | 0.056075 | 0.056075 | 0.056075 | 0 | 0.008631 | 0.299335 | 4,961 | 157 | 119 | 31.598726 | 0.85328 | 0.240677 | 0 | 0.206522 | 0 | 0 | 0.166847 | 0.017876 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.097826 | 0 | 0.163043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50f4d91af70d1d2e26d37eb6369512608b006dc5 | 2,831 | py | Python | seq2seq/models/modules/state.py | B0BBB/seq2seq.pytorch | 54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4 | [
"MIT"
] | null | null | null | seq2seq/models/modules/state.py | B0BBB/seq2seq.pytorch | 54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4 | [
"MIT"
] | null | null | null | seq2seq/models/modules/state.py | B0BBB/seq2seq.pytorch | 54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
class State(object):
__slots__ = ['batch_first', 'hidden', 'inputs', 'outputs', 'context',
'attention', 'attention_score', 'mask']
def __init__(self, hidden=None, inputs=None, outputs=None, context=None, attention=None,
attention_score=None, mask=None, batch_first=False):
self.hidden = hidden
self.outputs = outputs
self.inputs = inputs
self.context = context
self.attention = attention
self.mask = mask
self.batch_first = batch_first
self.attention_score = attention_score
def __select_state(self, state, i, type_state='hidden'):
if isinstance(state, tuple):
return tuple(self.__select_state(s, i, type_state) for s in state)
elif isinstance(state, Variable) or torch.is_tensor(state):
if type_state == 'hidden':
batch_dim = 0 if state.dim() < 3 else 1
else:
batch_dim = 0 if self.batch_first else 1
return state.narrow(batch_dim, i, 1)
else:
return state
def __merge_states(self, state_list, type_state='hidden'):
if state_list is None:
return None
if isinstance(state_list[0], State):
return State().from_list(state_list)
if isinstance(state_list[0], tuple):
return tuple([self.__merge_states(s, type_state) for s in zip(*state_list)])
else:
if isinstance(state_list[0], Variable) or torch.is_tensor(state_list[0]):
if type_state == 'hidden':
batch_dim = 0 if state_list[0].dim() < 3 else 1
else:
batch_dim = 0 if self.batch_first else 1
return torch.cat(state_list, batch_dim)
else:
assert state_list[1:] == state_list[:-1] # all items are equal
return state_list[0]
def __getitem__(self, index):
if isinstance(index, slice):
state_list = [self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1)]
return State().from_list(state_list)
else:
item = State()
for s in self.__slots__:
value = getattr(self, s, None)
if isinstance(value, State):
selected_value = value[index]
else:
selected_value = self.__select_state(value, index, s)
setattr(item, s, selected_value)
return item
def from_list(self, state_list):
for s in self.__slots__:
values = [getattr(item, s, None) for item in state_list]
setattr(self, s, self.__merge_states(values, s))
return self
| 39.873239 | 92 | 0.573649 | 351 | 2,831 | 4.384615 | 0.193732 | 0.099415 | 0.038986 | 0.02859 | 0.261209 | 0.179337 | 0.106563 | 0.106563 | 0.106563 | 0.063678 | 0 | 0.011094 | 0.331332 | 2,831 | 70 | 93 | 40.442857 | 0.801902 | 0.006711 | 0 | 0.238095 | 0 | 0 | 0.031673 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 1 | 0.079365 | false | 0 | 0.031746 | 0 | 0.31746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50f53995c121fabd85967940ccea9b6966d951c2 | 7,383 | py | Python | connect-four/player.py | pietermarsman/alpha-connect-four | ba880d3dc6307edafdbc3645b701a6734889c7ed | [
"MIT"
] | 3 | 2020-05-19T08:52:57.000Z | 2021-06-25T14:42:06.000Z | connect-four/player.py | pietermarsman/alpha-connect-four | ba880d3dc6307edafdbc3645b701a6734889c7ed | [
"MIT"
] | null | null | null | connect-four/player.py | pietermarsman/alpha-connect-four | ba880d3dc6307edafdbc3645b701a6734889c7ed | [
"MIT"
] | 1 | 2021-07-01T08:30:34.000Z | 2021-07-01T08:30:34.000Z | import time
from abc import ABCMeta, abstractmethod
from operator import itemgetter
from random import choice
from typing import Union
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.saving import load_model
from analyzer import player_value
from state import State, FOUR, Action
from tree import MiniMaxNode, MonteCarloNode, AlphaConnectNode, BatchEvaluator
from util import format_in_action_grid
class Player(metaclass=ABCMeta):
def __init__(self, name: str = None):
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return '%s()' % (self.__class__.__name__)
@abstractmethod
def decide(self, state: State):
pass
class ConsolePlayer(Player):
def decide(self, state: State):
while True:
print('Possible actions:')
print(format_in_action_grid({action: str(action) for action in Action.iter_actions()},
cell_format='{:.2s}', default_value=' '))
user_input = input('Choose your action: ')
try:
action = Action.from_hex(user_input)
if action in state.allowed_actions:
print()
return action
else:
print('Action %s not allowed' % action)
except ValueError:
print('User input is not an action')
class RandomPlayer(Player):
def decide(self, state: State):
actions = state.allowed_actions
return choice(list(actions))
class GreedyPlayer(Player):
def decide(self, state: State):
action_values = {}
for action in state.allowed_actions:
new_state = state.take_action(action)
action_values[action] = player_value(new_state, state.next_color)
_, max_value = max(action_values.items(), key=itemgetter(1))
best_actions = [action for action, value in action_values.items() if value == max_value]
random_best_action = choice(best_actions)
return random_best_action
class MiniMaxPlayer(Player):
def __init__(self, name: str = None, depth=2):
super().__init__(name)
self._depth = depth
self.expands = sum(((FOUR * FOUR) ** d for d in range(depth + 1)))
def __repr__(self):
return '%s(depth=%d)' % (self.__class__.__name__, self._depth)
def decide(self, state: State):
root = MiniMaxNode(state, state.next_color)
frontier = [root]
for i in range(self.expands):
if len(frontier) > 0:
next_node = frontier.pop(0)
frontier.extend(next_node.expand())
action_values = {action: node.value for action, node in root.children.items()}
_, max_value = max(action_values.items(), key=itemgetter(1))
best_actions = [action for action, value in action_values.items() if value == max_value]
random_best_action = choice(best_actions)
return random_best_action
class MonteCarloPlayer(Player):
def __init__(self, name: str = None, exploration=1.0, budget=1000):
self.root = MonteCarloNode(State.empty(), exploration=exploration)
self.exploration = exploration
self.budget = budget
super().__init__(name)
def __repr__(self):
return '%s(exploration=%.3f, budget=%d)' % (self.__class__.__name__, self.exploration, self.budget)
def decide(self, state: State):
t0 = time.time()
self.root = self.root.find_state(state)
if self.root is None:
self.root = MonteCarloNode(state, exploration=self.exploration)
self.root.parent = None
while time.time() - t0 < self.budget / 1000:
self.root.search()
return self.root.best_action()
class AlphaConnectPlayer(Player):
def __init__(self, model_path, name: str = None, exploration=1.0, start_temperature=1.0, time_budget=None,
search_budget=None, self_play=False, batch_size=16):
self._model_path = model_path
self.model = self.load_model(model_path, batch_size)
self.exploration = exploration
self._temperature = start_temperature
self.is_self_play = self_play
self.root = None # type: Union[None, AlphaConnectNode]
self.set_root_node()
if search_budget is None and time_budget is not None:
self.budget_type = 'time'
self.budget = time_budget
elif search_budget is not None and time_budget is None:
self.budget_type = 'search'
self.budget = search_budget
else:
raise ValueError('Either time_budget xor search_budget should be None, not neither or both')
self.history = []
super().__init__(name)
def __repr__(self):
if self.budget_type == 'time':
args = (self.budget, None)
else:
args = (None, self.budget)
return '%s(model_path=%r, exploration=%r, start_temperature=%r, time_budget=%r, search_budget=%r, ' \
'self_play=%r, batch_size=%r)' % (self.__class__.__name__, self._model_path, self.exploration,
self._temperature, args[0], args[1], self.is_self_play,
self.model.batch_size)
@staticmethod
def load_model(model_path, batch_size):
model = load_model(model_path)
# first prediction takes more time
model.predict(np.array([State.empty().to_numpy()]))
return BatchEvaluator(model, batch_size)
def set_root_node(self, state: State = None):
if state is None:
state = State.empty()
if self.root is not None:
self.root = self.root.find_state(state)
if self.root is None:
self.root = AlphaConnectNode(state, action_prob=1.0)
if self.is_self_play:
self.root.add_dirichlet_noise = True
if self.root.is_played:
self.root.add_dirichlet_noise_to_action_probs()
self.root.parent = None
def clear_session(self):
K.clear_session()
def decide(self, state: State):
t0 = time.time()
self.set_root_node(state)
if self.budget_type == 'time':
while time.time() - t0 < self.budget / 1000:
self.root.search(self.model, self.exploration)
else:
for _ in range(self.budget):
self.root.search(self.model, self.exploration)
self.save_policy()
action = self.root.sample_action(self.temperature(state))
return action
def temperature(self, state: State):
"""AlphaGo lowers the temperature to infinitesimal after 30 moves
Connect Four is a smaller game, so we use 16 moves
When playing for real an infinitesimal temperature is always used
"""
if self.is_self_play and state.number_of_stones < 16:
return self._temperature
return None
def save_policy(self):
self.history.append({
'policy': self.root.policy(1.0),
'total_value': self.root.total_value,
'visit_count': self.root.visit_count
})
| 34.825472 | 110 | 0.618719 | 900 | 7,383 | 4.827778 | 0.207778 | 0.044189 | 0.028999 | 0.028999 | 0.309091 | 0.228539 | 0.164787 | 0.137629 | 0.137629 | 0.121519 | 0 | 0.00833 | 0.284573 | 7,383 | 211 | 111 | 34.990521 | 0.814275 | 0.033997 | 0 | 0.279503 | 0 | 0.006211 | 0.052958 | 0.002958 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0.006211 | 0.074534 | 0.024845 | 0.335404 | 0.031056 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50f54eeaf492e134b83613d506cfd2be6bff7e34 | 2,861 | py | Python | package/tests/test_disable.py | nikoladze/prmon | 5f69f056e47119d2ed8a9379d4f4ad4290f27457 | [
"Apache-2.0"
] | 35 | 2018-03-28T11:32:16.000Z | 2022-03-11T19:05:11.000Z | package/tests/test_disable.py | nikoladze/prmon | 5f69f056e47119d2ed8a9379d4f4ad4290f27457 | [
"Apache-2.0"
] | 166 | 2018-03-13T20:19:09.000Z | 2022-01-31T08:54:22.000Z | package/tests/test_disable.py | HEP-SF/prmon | 33ce283183cbb31dc779f43fe899d26a699b1cc6 | [
"Apache-2.0"
] | 13 | 2018-03-16T09:37:26.000Z | 2022-01-26T07:38:39.000Z | #! /usr/bin/env python3
#
# Copyright (C) 2020 CERN
# License Apache2 - see LICENCE file
"""prmon test harness to check monitors can be disabled correctly"""
import argparse
import json
import os
import subprocess
import sys
import unittest
def setup_configurable_test(disable=[]):
"""Wrap the class definition in a function to allow arguments to be passed"""
class ConfigurableProcessMonitor(unittest.TestCase):
"""Test class for a specific set of parameters"""
__monitor_fields__ = {
"countmon": "nprocs",
"cpumon": "stime",
"iomon": "rchar",
"memmon": "vmem",
"netmon": "rx_bytes",
"nvidiamon": "ngpus",
}
def setup_and_run(self, envdisable):
"""Baseline test run"""
burn_cmd = ["./burner", "--time", "3"]
prmon_cmd = ["../prmon", "--interval", "1"]
if envdisable:
os.environ["PRMON_DISABLE_MONITOR"] = ",".join(disable)
else:
for d in disable:
prmon_cmd.extend(("-d", d))
prmon_cmd.append("--")
prmon_cmd.extend(burn_cmd)
prmon_p = subprocess.Popen(prmon_cmd, shell=False)
prmon_rc = prmon_p.wait()
self.assertEqual(prmon_rc, 0, "Non-zero return code from prmon")
with open("prmon.json") as infile:
prmon_json = json.load(infile)
# Check we don't have fields corresponding to disabled monitors
for d in disable:
if d in ConfigurableProcessMonitor.__monitor_fields__:
self.assertFalse(
ConfigurableProcessMonitor.__monitor_fields__[d]
in prmon_json["Max"]
)
def test_disable_cli(self):
"""Thin wrapper around selective test, CLI version"""
self.setup_and_run(False)
def test_disable_envvar(self):
"""Thin wrapper around selective test, envvar version"""
self.setup_and_run(True)
return ConfigurableProcessMonitor
def main_parse_args_and_get_test():
"""Parse arguments and call test class generator
returning the test case (which is unusual for a
main() function)"""
parser = argparse.ArgumentParser(
description="Configurable test runner - disable monitors"
)
parser.add_argument("--disable", nargs="+")
args = parser.parse_args()
# Stop unittest from being confused by the arguments
sys.argv = sys.argv[:1]
return setup_configurable_test(args.disable)
if __name__ == "__main__":
# As unitest will only run tests in the global namespace
# we return the test instance from main()
the_test = main_parse_args_and_get_test()
unittest.main()
| 31.097826 | 81 | 0.595945 | 319 | 2,861 | 5.137931 | 0.467085 | 0.024405 | 0.020134 | 0.015863 | 0.0964 | 0.069555 | 0 | 0 | 0 | 0 | 0 | 0.005028 | 0.304789 | 2,861 | 91 | 82 | 31.43956 | 0.819005 | 0.243272 | 0 | 0.037736 | 0 | 0 | 0.112583 | 0.009934 | 0 | 0 | 0 | 0 | 0.037736 | 1 | 0.09434 | false | 0 | 0.113208 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50f65841293a134d5f3cf0743932c5665777889e | 2,352 | py | Python | src/test_vid4.py | hahazh/OFR-BRN | 0a3b71ee040b916ae94fb3dbaf7d837506344632 | [
"Apache-2.0"
] | 1 | 2022-03-24T08:41:34.000Z | 2022-03-24T08:41:34.000Z | src/test_vid4.py | hahazh/OFR-BRN | 0a3b71ee040b916ae94fb3dbaf7d837506344632 | [
"Apache-2.0"
] | null | null | null | src/test_vid4.py | hahazh/OFR-BRN | 0a3b71ee040b916ae94fb3dbaf7d837506344632 | [
"Apache-2.0"
] | null | null | null | from ast import arg
import os
import cv2
import torch
import numpy as np
from tqdm import tqdm
from OFR_BRN import Flow_STSR
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from mydata.load_data import get_vid4_loader
def get_model_total_params(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return (1.0*params/(1000*1000))
def main():
os.environ["CUDA_VISIBLE_DEVICES"]='2'
model = Flow_STSR()
model = model.cuda()
model.eval()
print("para ",get_model_total_params(model))
seq_name_tuple = ('calendar','city','foliage','walk')
scale = 4
seq_length_list = (41,33,49,47)
batch_size = 1
model_dict = torch.load(args.weight)
model.load_state_dict(model_dict , strict=True)
with torch.no_grad():
for q in range(4):
tag = seq_name_tuple[q]
seq_length = seq_length_list[q]
vid4_dataloader = get_vid4_loader(args.datapath,scale,tag,seq_length,batch_size,0)
for i, (gt_image,images,tag_ix,crop_shape) in enumerate(tqdm(vid4_dataloader)):
images = images.cuda()
out = model(images)
img_p = args.outputpath+'/'+tag
if not os.path.exists(img_p):
os.makedirs(img_p)
out_cp = out.squeeze(0)
for j in range(seq_length):
img = out_cp[j].permute(1,2,0).detach().cpu().clamp(0,1).numpy()*255.0
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x1,x2,y1,y2 = crop_shape
img = img[x1:x2,y1:y2,:]
cv2.imwrite(img_p+'/'+str(int(tag_ix[j])).zfill(8)+'.png',img)
if __name__ == "__main__":
parser = ArgumentParser(description="validation script for vid4",formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--datapath', default='xxx', type=str, help='dataset path')
parser.add_argument('--outputpath', default='../output/vid4', type=str, help='outputpath of test sequence')
parser.add_argument('--weight', default='../pretrained_weight/ofr-brn.pth', type=str, help='weight of model')
args = parser.parse_args()
main()
| 33.6 | 115 | 0.618622 | 314 | 2,352 | 4.426752 | 0.43949 | 0.032374 | 0.036691 | 0.027338 | 0.034532 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030234 | 0.254677 | 2,352 | 70 | 116 | 33.6 | 0.762693 | 0 | 0 | 0 | 0 | 0 | 0.094348 | 0.0136 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.18 | 0 | 0.24 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50fa110249fecb401efe96e6af26a4a70b8e561f | 702 | py | Python | gobblegobble/apps.py | ejesse/gobblegobble | 3958f9257a58e15a6c89dd6075739fa7edb150e9 | [
"MIT"
] | 1 | 2016-04-04T23:39:35.000Z | 2016-04-04T23:39:35.000Z | gobblegobble/apps.py | ejesse/gobblegobble | 3958f9257a58e15a6c89dd6075739fa7edb150e9 | [
"MIT"
] | null | null | null | gobblegobble/apps.py | ejesse/gobblegobble | 3958f9257a58e15a6c89dd6075739fa7edb150e9 | [
"MIT"
] | null | null | null | import logging
from django.apps import AppConfig
from django.conf import settings
from gobblegobble.bot import GobbleBot, import_bot_handlers
LOGGER = logging.getLogger()
def copy_prefixed_settings(prefix, settings_from, settings_to):
for setting in dir(settings_from):
if setting.startswith(prefix):
new_name = setting.replace(prefix,'')
LOGGER.info("Copying %s setting to slackbot as %s" %(setting, new_name))
setattr(settings_to, new_name, getattr(settings_from,setting))
class GobbleGobbleConfig(AppConfig):
name = 'gobblegobble'
def ready(self):
AppConfig.ready(self)
bot = GobbleBot()
import_bot_handlers()
| 26 | 84 | 0.706553 | 84 | 702 | 5.738095 | 0.464286 | 0.099585 | 0.074689 | 0.107884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.205128 | 702 | 26 | 85 | 27 | 0.863799 | 0 | 0 | 0 | 0 | 0 | 0.068376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.294118 | 0 | 0.529412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50fb750d9548058a28d2610ee8ee9249514a6c2b | 4,708 | py | Python | src/bmp.py | aravmaxim/filebleacher-py | 5eb3843c05b32d2de7a578dd8c450678df4c5bd7 | [
"MIT"
] | null | null | null | src/bmp.py | aravmaxim/filebleacher-py | 5eb3843c05b32d2de7a578dd8c450678df4c5bd7 | [
"MIT"
] | null | null | null | src/bmp.py | aravmaxim/filebleacher-py | 5eb3843c05b32d2de7a578dd8c450678df4c5bd7 | [
"MIT"
] | null | null | null | BMP_HEADER_SIZE = 14
BMP_MAGIC_NUMBERS = {
'BM': 'Windows 3.1x, 95, NT, ... etc',
'BA': 'OS/2 struct bitmap array',
'CI': 'OS/2 struct color icon',
'CP': 'OS/2 const color pointer',
'IC': 'OS/2 struct icon',
'PT': 'OS/2 pointer'
}
BITMAPCOREHEADER_SIZE = 12
BITMAPCOREHEADER2_SIZE = 64
OS22XBITMAPHEADER_SIZE = 16
BITMAPINFOHEADER_SIZE = 40
BITMAPV2INFOHEADER_SIZE = 52
BITMAPV3INFOHEADER_SIZE = 56
BITMAPV4HEADER_SIZE = 108
BITMAPV5HEADER_SIZE = 124
DIB_HEADER_TYPES = {
BITMAPCOREHEADER_SIZE : 'BITMAPCOREHEADER',
BITMAPCOREHEADER2_SIZE : 'BITMAPCOREHEADER2',
OS22XBITMAPHEADER_SIZE : 'OS22XBITMAPHEADER',
BITMAPINFOHEADER_SIZE : 'BITMAPINFOHEADER',
BITMAPV2INFOHEADER_SIZE : 'BITMAPV2INFOHEADER',
BITMAPV3INFOHEADER_SIZE : 'BITMAPV3INFOHEADER',
BITMAPV4HEADER_SIZE : 'BITMAPV4HEADER',
BITMAPV5HEADER_SIZE : 'BITMAPV5HEADER'
}
def __read_bmp_header(data : bytes) -> dict:
header_data : bytes = data[:BMP_HEADER_SIZE]
bmpHeader = {
'magic_number' : (header_data[:2]).decode("ascii") ,
'bmp_size' : int.from_bytes(header_data[2:6], byteorder='little', signed=False),
'reserved1' : int.from_bytes(header_data[6:8], byteorder='little', signed=False),
'reserved2' : int.from_bytes(header_data[8:10], byteorder='little', signed=False),
'offset_to_data' : int.from_bytes(header_data[10:14], byteorder='little', signed=False)
}
return bmpHeader
def __check_bmp_header(data : bytes, bmpHeader : dict) -> bool:
if (bmpHeader['magic_number'] not in BMP_MAGIC_NUMBERS):
print('Bad Magic number:%s' % bmpHeader['magic_number'])
return False
if (bmpHeader['bmp_size'] != len(data)):
print('Bad BMP length expected %d got %d' % (bmpHeader['bmp_size'], len(data)))
return False
if (bmpHeader['reserved1'] != 0 or bmpHeader['reserved2'] != 0):
print('Bad reserved')
return False
if (bmpHeader['offset_to_data'] > len(data)):
print('Bad offset')
return False
return True
def __read_dib_header(data : bytes) -> dict:
if (len(data) < BMP_HEADER_SIZE + 4):
return None
dib_size = int.from_bytes(data[BMP_HEADER_SIZE:BMP_HEADER_SIZE + 4], byteorder='little', signed=False)
if dib_size not in DIB_HEADER_TYPES:
return None
if (len(data) < BMP_HEADER_SIZE + dib_size):
return None
dib_type = DIB_HEADER_TYPES[dib_size]
if dib_type == 'BITMAPINFOHEADER':
return __read_BITMAPINFOHEADER(data[BMP_HEADER_SIZE: BMP_HEADER_SIZE + dib_size])
return None
def __read_BITMAPINFOHEADER(data : bytes) -> dict:
header = {
'type' : 'BITMAPINFOHEADER',
'width' : int.from_bytes(data[4:8], byteorder='little', signed=False),
'height ' : int.from_bytes(data[8:12], byteorder='little', signed=False),
'color_planes' : int.from_bytes(data[12:14], byteorder='little', signed=False),
'bits_per_pixel' : int.from_bytes(data[14:16], byteorder='little', signed=False),
'compression_method' : int.from_bytes(data[16:20], byteorder='little', signed=False),
'image_size' : int.from_bytes(data[20:24], byteorder='little', signed=False),
'horizontal_resolution' : int.from_bytes(data[24:28], byteorder='little', signed=False),
'vertical_resolution' : int.from_bytes(data[28:32], byteorder='little', signed=False),
'number_of_colors' : int.from_bytes(data[32:36], byteorder='little', signed=False),
'number_of_important_colors' : int.from_bytes(data[36:40], byteorder='little', signed=False)
}
return header
def __check_dib_header(data : bytes, dib_header : dict) -> bool:
if dib_header['type'] == 'BITMAPINFOHEADER':
return __check_BITMAPINFOHEADER(data, dib_header)
return False
def __check_BITMAPINFOHEADER(data : bytes, dib_header : dict) -> bool:
# TODO : implament
return True
def __check_data(data : bytes, bmp_header : dict, dib_header : dict) -> bool:
# TODO : implament
return True
def check_bmp(data : bytes, opts: dict) -> bool:
"""Checks given bitmap file data for format coplince
Parameters:
data (bytes): bitmap file data
opts (dict): dictionary contains configuration
Returns:
bool: If true file format is ok, else false
"""
# Check minmum size for BMP header
if (len(data) < BMP_HEADER_SIZE):
return False
# Read bmp header
bmpHeader = __read_bmp_header(data)
# Check bmp header
if(not __check_bmp_header(data, bmpHeader)):
return False
# Reads dib header
dib_header = __read_dib_header(data)
# Check if readed dib header
if dib_header == None:
return False
# Checks dib header
if not __check_dib_header(data, dib_header):
return False
# Check data
if not __check_data(data, bmpHeader, dib_header):
return False
return True
| 31.386667 | 104 | 0.707519 | 624 | 4,708 | 5.089744 | 0.205128 | 0.053841 | 0.056675 | 0.122796 | 0.265428 | 0.113035 | 0.063917 | 0.030227 | 0.030227 | 0.030227 | 0 | 0.027247 | 0.165888 | 4,708 | 149 | 105 | 31.597315 | 0.781513 | 0.078165 | 0 | 0.18 | 0 | 0 | 0.177603 | 0.010897 | 0 | 0 | 0 | 0.006711 | 0 | 1 | 0.08 | false | 0 | 0.01 | 0.02 | 0.31 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50fb779ffdcd87491a692cc2c7384107c789ebd7 | 2,411 | py | Python | scripts/conll_evaluation/booknlp_vs_dekkeretal_gs.py | therosko/Thesis-NER-in-English-novels | 7988c3aa4f904e91b1e674090dbdc6487b4ad042 | [
"Apache-2.0"
] | null | null | null | scripts/conll_evaluation/booknlp_vs_dekkeretal_gs.py | therosko/Thesis-NER-in-English-novels | 7988c3aa4f904e91b1e674090dbdc6487b4ad042 | [
"Apache-2.0"
] | null | null | null | scripts/conll_evaluation/booknlp_vs_dekkeretal_gs.py | therosko/Thesis-NER-in-English-novels | 7988c3aa4f904e91b1e674090dbdc6487b4ad042 | [
"Apache-2.0"
] | null | null | null | # BookNLP Dekker
import pandas as pd
import csv
import os
# import own script
from hyphens import *
from check_inconsistencies import *
directory = os.fsencode('/mnt/book-nlp/data/tokens/overlap/')
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".tokens"):
booknlp_filepath = "/mnt/book-nlp/data/tokens/overlap/" + filename
dekker_filepath = "/mnt/data/gold_standard/overlap/dekker_et_al/" + str(filename.replace('.tokens','.gs'))
#####################################
# get output file BookNLP
current_file = pd.read_csv(booknlp_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=["originalWord","ner"])
current_file = current_file.rename(columns={"originalWord": "original_word", "ner": "booknlp"})
# alternatively convert all PERSON to PER
current_file["booknlp"].replace('PERSON', 'I-PERSON', inplace = True)
# replace rest of entities with O
current_file.loc[~current_file["booknlp"].isin(['I-PERSON']), "booknlp"] = "O"
# correct hyphened words from booknlp (note: stanford CoreNLP only splits on "most hyphens")
current_file = correct_hyphened(current_file)
# reset the index to avoid all parts of hyphened words having same index
current_file = current_file.reset_index()
del current_file['index']
# remove chapter separation with stars"
if str(filename) == "AliceInWonderland.tokens":
current_file = current_file.drop(current_file.index[1911:1931])
current_file = current_file.reset_index(drop=True)
#####################################
# get gold standard - Dekker
gs_d = pd.read_csv(dekker_filepath, sep=' ', quoting=csv.QUOTE_NONE, usecols=[0,1], names=["original_word", "gs"])
gs_d = correct_hyphened(gs_d)
gs_d.loc[~gs_d["gs"].isin(['I-PERSON']), "gs"] = "O"
check_for_inconsistencies_dekker(current_file,gs_d)
# merge the two dataframes
merged_df = pd.merge(gs_d, current_file, left_index=True, right_index=True)
del merged_df['original_word_y']
#actually it is a space separated value
merged_df.to_csv("/mnt/Git/scripts/conll_evaluation/data_conll_format/booknlp_dekkeretal/" + str(filename.replace('.tokens','.tsv')), sep=' ', index=False, encoding='utf-8', quoting=csv.QUOTE_NONE, header=False) | 54.795455 | 219 | 0.660307 | 310 | 2,411 | 4.945161 | 0.409677 | 0.129159 | 0.046967 | 0.057404 | 0.110894 | 0.076973 | 0 | 0 | 0 | 0 | 0 | 0.005607 | 0.18623 | 2,411 | 44 | 219 | 54.795455 | 0.775739 | 0.173372 | 0 | 0 | 0 | 0 | 0.20283 | 0.109015 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.178571 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50fd8dd86b10221234f1f797db5ec6d2c7319946 | 439 | py | Python | src/sms_verifier/urls.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | null | null | null | src/sms_verifier/urls.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 10 | 2020-02-12T02:51:31.000Z | 2022-02-10T13:33:43.000Z | src/sms_verifier/urls.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 1 | 2022-02-22T18:56:22.000Z | 2022-02-22T18:56:22.000Z |
from django.contrib import admin
from django.conf.urls import include, url
urlpatterns = [
url(r'^', include('sms_verifier_app.urls')),
# oauth urls
url(r'^', include('oauth2_provider.urls', namespace='oauth2_provider')),
# django auth urls
url('^', include('django.contrib.auth.urls')),
# Social auth urls
url('', include('social_django.urls', namespace='social')),
url(r'^admin/', admin.site.urls),
]
| 25.823529 | 76 | 0.660592 | 56 | 439 | 5.089286 | 0.375 | 0.042105 | 0.077193 | 0.126316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00545 | 0.164009 | 439 | 16 | 77 | 27.4375 | 0.771117 | 0.100228 | 0 | 0 | 0 | 0 | 0.292308 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50fe54708d6b180613a1b07cf1d2cceb355cbb18 | 4,053 | py | Python | tasks/datasets.py | bit-ml/continual-learning | 5386876e01c4a05a8d88c2e4a6e762c552b017d2 | [
"MIT"
] | null | null | null | tasks/datasets.py | bit-ml/continual-learning | 5386876e01c4a05a8d88c2e4a6e762c552b017d2 | [
"MIT"
] | null | null | null | tasks/datasets.py | bit-ml/continual-learning | 5386876e01c4a05a8d88c2e4a6e762c552b017d2 | [
"MIT"
] | null | null | null | from typing import List, Tuple
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from .data_loader import InMemoryDataLoader
KNOWN_DATASETS = ["CIFAR10", "FashionMNIST", "MNIST"]
ORIGINAL_SIZE = {
"CIFAR10": (3, 32, 32),
"FashionMNIST": (1, 28, 28),
"MNIST": (1, 28, 28),
}
MEAN_STD = {
"CIFAR10": {(3, 32, 32): (0.4736, 0.2516)},
"FashionMNIST": {
(1, 28, 28): (0.2859, 0.353),
(1, 32, 32): (0.2189, 0.3318),
(3, 32, 32): (0.2189, 0.3318),
},
"MNIST": {
(1, 28, 28): (0.1305, 0.3081),
(1, 32, 32): (0.1003, 0.2752),
(3, 32, 32): (0.1003, 0.2752),
},
}
def padding(in_sz: List[int], out_sz: List[int]) -> Tuple[int, int, int, int]:
d_h, d_w = out_sz[-2] - in_sz[-2], out_sz[-1] - in_sz[-1]
p_h1, p_w1 = d_h // 2, d_w // 2
p_h2, p_w2 = d_h - p_h1, d_w - p_w1
return p_h1, p_h2, p_w1, p_w2
def get_torch_loader( # pylint: disable=C0330
dataset_name: str,
train: bool = True,
in_size: List[int] = None,
batch_size: int = 1,
shuffle: bool = True,
normalize: bool = True,
) -> DataLoader:
transfs = []
if in_size is not None:
if in_size[-2:] != ORIGINAL_SIZE[dataset_name][-2:]:
_padding = padding(ORIGINAL_SIZE[dataset_name], in_size)
transfs.append(transforms.Pad(_padding))
transfs.append(transforms.ToTensor())
if in_size[0] != ORIGINAL_SIZE[dataset_name][0]:
transfs.append(transforms.Lambda(lambda t: t.expand(in_size)))
else:
in_size = ORIGINAL_SIZE[dataset_name]
transfs.append(transforms.ToTensor())
if normalize:
mean, std = MEAN_STD[dataset_name][tuple(in_size)]
transfs.append(transforms.Normalize((mean,), (std,)))
dataset = getattr(datasets, dataset_name)(
f"./.data/{dataset_name:s}",
train=train,
download=True,
transform=transforms.Compose(transfs),
)
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
def to_memory( # pylint: disable=C0330
data_loader: DataLoader,
batch_size: int = 1,
shuffle: bool = True,
order_by_class: bool = False,
shuffle_classes: bool = False,
allow_mixed_batches: bool = True,
limit: int = None,
device=None,
classes: List[int] = None,
) -> InMemoryDataLoader:
all_data, all_target = [], []
data_len = 0
for data, target in data_loader:
all_data.append(data)
all_target.append(target)
data_len += len(data)
if limit and limit <= data_len:
break
data = torch.cat(tuple(all_data), dim=0)
target = torch.cat(tuple(all_target), dim=0)
if limit and limit < len(data):
data = data[:limit]
target = target[:limit]
if device is not None:
data = data.to(device)
target = target.to(device)
return InMemoryDataLoader(
(data, target),
allow_mixed_batches=allow_mixed_batches,
batch_size=batch_size,
order_by_class=order_by_class,
shuffle=shuffle,
shuffle_classes=shuffle_classes,
classes=classes,
)
def get_loader( # pylint: disable=C0330
dataset_name: str,
train: bool = True,
in_size: List[int] = None,
batch_size: int = 1,
classes: List[int] = None,
shuffle: bool = True,
normalize: bool = True,
allow_mixed_batches: bool = True,
order_by_class: bool = False,
shuffle_classes: bool = False,
device=None,
limit=None,
):
torch_loader = get_torch_loader(
dataset_name,
train=train,
in_size=in_size,
batch_size=128,
shuffle=False,
normalize=normalize,
)
return to_memory(
torch_loader,
batch_size=batch_size,
shuffle=shuffle,
shuffle_classes=shuffle_classes,
order_by_class=order_by_class,
allow_mixed_batches=allow_mixed_batches,
limit=limit,
device=device,
classes=classes,
)
| 27.201342 | 78 | 0.607698 | 537 | 4,053 | 4.374302 | 0.195531 | 0.028097 | 0.030651 | 0.039166 | 0.349936 | 0.279268 | 0.12814 | 0.11324 | 0.11324 | 0.11324 | 0 | 0.055611 | 0.263509 | 4,053 | 148 | 79 | 27.385135 | 0.731323 | 0.016038 | 0 | 0.314961 | 0 | 0 | 0.024096 | 0.006024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031496 | false | 0 | 0.03937 | 0 | 0.102362 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50feb92114eb37983350ccb2f51df0e98ab78a44 | 952 | py | Python | addons14/remove_odoo_enterprise/models/res_config_settings.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-06-10T14:59:13.000Z | 2021-06-10T14:59:13.000Z | addons14/remove_odoo_enterprise/models/res_config_settings.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | null | null | null | addons14/remove_odoo_enterprise/models/res_config_settings.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-04-09T09:44:44.000Z | 2021-04-09T09:44:44.000Z | # Copyright 2016 LasLabs Inc.
# Copyright 2018 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo import api, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
@api.model
def fields_view_get(
self, view_id=None, view_type="form", toolbar=False, submenu=False
):
ret_val = super(ResConfigSettings, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu
)
page_name = ret_val["name"]
if not page_name == "res.config.settings.view.form":
return ret_val
doc = etree.XML(ret_val["arch"])
query = "//div[div[field[@widget='upgrade_boolean']]]"
for item in doc.xpath(query):
item.getparent().remove(item)
ret_val["arch"] = etree.tostring(doc)
return ret_val
| 28.848485 | 82 | 0.64916 | 126 | 952 | 4.746032 | 0.555556 | 0.060201 | 0.056856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013569 | 0.22584 | 952 | 32 | 83 | 29.75 | 0.797829 | 0.148109 | 0 | 0.1 | 0 | 0 | 0.133829 | 0.090458 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f98aaa91b5b977fd6d211f7d9569c79ce941321 | 597 | py | Python | Challenges/Quartiles.py | adarsh2104/Hacker-Rank-Days-of-Statistics | 30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9 | [
"MIT"
] | 2 | 2021-02-26T14:28:08.000Z | 2021-02-26T18:51:51.000Z | Challenges/Quartiles.py | adarsh2104/Hacker-Rank-Days-of-Statistics | 30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9 | [
"MIT"
] | null | null | null | Challenges/Quartiles.py | adarsh2104/Hacker-Rank-Days-of-Statistics | 30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9 | [
"MIT"
] | null | null | null |
# Github : https://github.com/adarsh2104
# HR-Profile: https://www.hackerrank.com/adarsh_2104
# Challenge : https://www.hackerrank.com/challenges/s10-quartiles
# Max Score : 30
def find_median(array):
if len(array) % 2 == 1:
return array[len(array) // 2]
else:
return (array[len(array) // 2] + array[len(array) // 2 - 1]) // 2
n = input()
input_array = sorted([int(x) for x in input().split()])
print(find_median(input_array[:len(input_array)//2]))
print(find_median(input_array))
print(find_median(input_array[len(input_array) // 2 + len(input_array) % 2:]))
| 28.428571 | 78 | 0.658291 | 89 | 597 | 4.280899 | 0.404494 | 0.110236 | 0.094488 | 0.110236 | 0.375328 | 0.204724 | 0.204724 | 0.204724 | 0.204724 | 0 | 0 | 0.044 | 0.162479 | 597 | 20 | 79 | 29.85 | 0.718 | 0.291457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.3 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f99325732175259fcba8a736f249b44b4db79d9 | 7,900 | py | Python | fossology/license.py | thsetz/fossology-python | 1c7394624f8bf2deb0aece6ef0db443cf10c791b | [
"MIT"
] | null | null | null | fossology/license.py | thsetz/fossology-python | 1c7394624f8bf2deb0aece6ef0db443cf10c791b | [
"MIT"
] | null | null | null | fossology/license.py | thsetz/fossology-python | 1c7394624f8bf2deb0aece6ef0db443cf10c791b | [
"MIT"
] | null | null | null | # Copyright 2019-2021 Siemens AG
# SPDX-License-Identifier: MIT
import json
from json.decoder import JSONDecodeError
import logging
from urllib.parse import quote
from typing import List, Tuple
import fossology
from fossology.exceptions import FossologyApiError, FossologyUnsupported
from fossology.obj import License, LicenseType, Obligation
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def check_empty_response(response) -> bool:
try:
message = response.json().get("message")
if message and message == "Can not exceed total pages: 0":
return True
except JSONDecodeError:
# No JSON response available
pass
return False
class LicenseEndpoint:
"""Class dedicated to all "license" related endpoints"""
def list_licenses(
self,
active: bool = False,
kind: LicenseType = LicenseType.ALL,
page_size: int = 100,
page: int = 1,
all_pages: bool = False,
) -> List[License]:
"""Get all license from the DB
API Endpoint: GET /license
:param active: list only active licenses (default: False)
:param kind: list only licenses from type LicenseType (default: LicenseType.ALL)
:param page_size: the maximum number of results per page (default: 100)
:param page: the number of pages to be retrieved (default: 1)
:param all_pages: get all licenses (default: False)
:type active: bool
:type kind: LicenseType
:type page_size: int
:type page: int
:type all_pages: boolean
:return: a list of licenses
:rtype: List[License]
:raises FossologyApiError: if the REST call failed
"""
if fossology.versiontuple(self.version) < fossology.versiontuple("1.3.0"):
description = f"Endpoint /license is not supported by your Fossology API version {self.version}"
raise FossologyUnsupported(description)
license_list = list()
headers = {"limit": str(page_size)}
if active:
headers["active"] = json.dumps(True)
if all_pages:
# will be reset after the total number of pages has been retrieved from the API
x_total_pages = 2
else:
x_total_pages = page
while page <= x_total_pages:
headers["page"] = str(page)
response = self.session.get(
f"{self.api}/license?kind={kind.value}", headers=headers
)
if response.status_code == 200:
for license in response.json():
license_list.append(License.from_json(license))
x_total_pages = int(response.headers.get("X-TOTAL-PAGES", 0))
if not all_pages or x_total_pages == 0:
logger.info(
f"Retrieved page {page} of license, {x_total_pages} pages are in total available"
)
return license_list, x_total_pages
page += 1
else:
if check_empty_response(response):
return license_list, 0
description = (
f"Unable to retrieve the list of licenses from page {page}"
)
raise FossologyApiError(description, response)
logger.info(f"Retrieved all {x_total_pages} pages of licenses")
return license_list, x_total_pages
def detail_license(
self, shortname, group=None
) -> Tuple[int, License, List[Obligation]]:
"""Get a license from the DB
API Endpoint: GET /license/{shortname}
:param shortname: Short name of the license
:param group: the group this license belongs to (default: None)
:type name: str
:type group: int
:return: the license id, the license data and the associated obligations
:rtype: tuple(int, License, List[Obligation])
:raises FossologyApiError: if the REST call failed
"""
if fossology.versiontuple(self.version) < fossology.versiontuple("1.3.0"):
description = (
f"Endpoint /license/{shortname} is not supported by your API version ",
f"{self.version}",
)
raise FossologyUnsupported(description)
headers = dict()
if group:
headers["groupName"] = group
response = self.session.get(
f"{self.api}/license/{quote(shortname)}", headers=headers
)
if response.status_code == 200:
return License.from_json(response.json())
elif response.status_code == 404:
description = f"License {shortname} not found"
raise FossologyApiError(description, response)
else:
description = f"Error while getting license {shortname}"
raise FossologyApiError(description, response)
def add_license(self, license: License, merge_request: bool = False):
"""Add a new license to the DB
API Endpoint: POST /license
License data are added to the request body, here is an example:
>>> new_license = License(
... "GPL-1.0",
... "GNU General Public License 1.0",
... "Text of the license...",
... "http://www.gnu.org/licenses/gpl-1.0.txt",
... "red",
... "false"
... )
>>> foss.add_license(new_license, merge_request=True) # doctest: +SKIP
:param license: the license data
:param merge_request: open a merge request for the license candidate? (default: False)
:type license: License
:type merge_request: bool
:raises FossologyApiError: if the REST call failed
"""
headers = {"Content-Type": "application/json"}
license_data = license.to_dict()
if merge_request:
license_data["mergeRequest"] = json.dumps(True)
response = self.session.post(
f"{self.api}/license", headers=headers, data=json.dumps(license_data)
)
if response.status_code == 201:
logger.info(f"License {license.shortName} has been added to the DB")
elif response.status_code == 409:
logger.info(f"License {license.shortName} already exists")
else:
description = f"Error while adding new license {license.shortName}"
raise FossologyApiError(description, response)
def update_license(
self,
shortname,
fullname: str = "",
text: str = "",
url: str = "",
risk: int = 2,
):
"""Update a license
API Endpoint: PATCH /license/{shortname}
:param shortName: the short name of the license to be updated
:param fullName: the new fullName of the license (optional)
:param text: the new text of the license (optional)
:param url: the new url of the license (optional)
:param risk: the new risk of the license (default: 2)
:type shortName: str
:type fullName: str
:type text: str
:type url: str
:type risk: int
:raises FossologyApiError: if the REST call failed
"""
headers = {"Content-Type": "application/json"}
license_data = {
"fullName": fullname,
"text": text,
"url": url,
"risk": str(risk),
}
response = self.session.patch(
f"{self.api}/license/{quote(shortname)}",
data=json.dumps(license_data),
headers=headers,
)
if response.status_code == 200:
logger.info(f"License {shortname} has been updated")
return
else:
description = f"Unable to update license {shortname}"
raise FossologyApiError(description, response)
| 37.089202 | 108 | 0.593038 | 890 | 7,900 | 5.189888 | 0.211236 | 0.023815 | 0.023815 | 0.044382 | 0.311323 | 0.222775 | 0.17428 | 0.124269 | 0.092228 | 0.092228 | 0 | 0.00998 | 0.315063 | 7,900 | 212 | 109 | 37.264151 | 0.843652 | 0.289241 | 0 | 0.214286 | 0 | 0 | 0.179223 | 0.021359 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039683 | false | 0.007937 | 0.063492 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f9c9af62c9d06f1634a32fc2dd1234b5aff872b | 2,385 | py | Python | src/apps/backup/models.py | tuxis/BuckuPy | 1af2aa330bcfde11edbf1748af8629289cd084c0 | [
"MIT"
] | null | null | null | src/apps/backup/models.py | tuxis/BuckuPy | 1af2aa330bcfde11edbf1748af8629289cd084c0 | [
"MIT"
] | 1 | 2015-03-14T13:59:31.000Z | 2015-03-14T13:59:31.000Z | src/apps/backup/models.py | tuxis/BackuPy | 1af2aa330bcfde11edbf1748af8629289cd084c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
import datetime
# comentario
BACKUP_TYPE_CHOICES = (
(0, 'Incremental'),
(1, 'Full'),
)
BACKUP_PERIODIC_CHOICES = (
(0, 'Diario'),
(1, 'Semanal'),
(2, 'Mensual'),
)
INCREM_PERIODIC_CHOICES = (
(0, 'Semanal'),
(1, 'Mensual'),
)
BACKUP_SEMANAL_DAY_CHOICES = (
('Mon', 'Lunes'),
('Tue', 'Martes'),
('Web', 'Miercoles'),
('Thu', 'Jueves'),
('Fri', 'Viernes'),
('Sat', 'Sabado'),
('Sun', 'Domingo'),
)
BACKUP_MENSUAL_DAY_CHOICES = (
(0, 'Primer dia'),
(1, 'Ultimo dia'),
)
class Backup(models.Model):
name = models.CharField(max_length=20)
description = models.TextField(max_length=150, blank=True, null=True)
source = models.CharField(max_length=300, blank=True, null=True)
destination = models.CharField(max_length=300, blank=True, null=True)
backup_type = models.IntegerField(
choices=BACKUP_TYPE_CHOICES, default=0
)
periodic = models.IntegerField(
choices=BACKUP_PERIODIC_CHOICES, default=0
)
semanal_day = models.CharField(
choices=BACKUP_SEMANAL_DAY_CHOICES, blank=True, null=True, max_length=3
)
incremental_period = models.IntegerField(
choices=INCREM_PERIODIC_CHOICES, blank=True, null=True
)
mensual_day = models.IntegerField(
choices=BACKUP_MENSUAL_DAY_CHOICES, blank=True, null=True
)
active = models.BooleanField(default=False)
last_excecute_ok = models.BooleanField(default=False)
last_excecute_date = models.DateTimeField(default=datetime.datetime.now)
modifier_by = models.ForeignKey(
User, blank=True, null=True, related_name="modifier_backup"
)
date_modifier = models.DateTimeField(auto_now=True, blank=True, null=True)
author = models.ForeignKey(User, blank=True, null=True)
date_created = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "%s" % (self.name,)
class BackupHistory(models.Model):
date_created = models.DateTimeField(default=datetime.datetime.now)
backup = models.ForeignKey(Backup, blank=True, null=True)
result = models.TextField(max_length=350, blank=True, null=True)
last_excecute_ok = models.BooleanField(default=True)
def __unicode__(self):
return "%s" % (self.backup,)
| 29.444444 | 79 | 0.680503 | 281 | 2,385 | 5.580071 | 0.313167 | 0.063138 | 0.091199 | 0.11926 | 0.364796 | 0.34949 | 0.174745 | 0.127551 | 0.056122 | 0 | 0 | 0.013853 | 0.182809 | 2,385 | 80 | 80 | 29.8125 | 0.790662 | 0.013417 | 0 | 0.059701 | 0 | 0 | 0.065957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.044776 | 0.029851 | 0.432836 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f9d8396c9b503b1773fde4adceb19ba431bbcdb | 2,072 | py | Python | Server/account_io.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 30 | 2019-07-28T18:05:33.000Z | 2021-12-27T10:19:31.000Z | Server/account_io.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 2 | 2019-09-03T19:53:03.000Z | 2019-10-18T11:00:44.000Z | Server/account_io.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 4 | 2019-10-02T04:54:50.000Z | 2020-08-10T13:28:58.000Z | # This module handles xlsx files
import openpyxl
from database_management import user_management
class io_manager:
def read_file(filename = './accounts.xlsx'):
u_list = []
p_list = []
t_list = []
try:
workbook_object = openpyxl.load_workbook(filename)
# get active sheet
sheet_object = workbook_object.active
# Get number of rows
number_of_rows = sheet_object.max_row
except:
print('[ ERROR ] Could not open ' + filename)
return u_list, ['FNF'] , t_list
try:
for i in range(2, number_of_rows + 1):
cell_object = sheet_object.cell(row = i, column = 1)
u_list.append(cell_object.value)
cell_object = sheet_object.cell(row = i, column = 2)
p_list.append(cell_object.value)
cell_object = sheet_object.cell(row = i, column = 3)
t_list.append(cell_object.value)
return u_list, p_list, t_list
except Exception as error:
print('[ ERROR ] File could not be read properly!')
return [], [], []
def write_file(u_list, p_list, t_list, filename = './accounts.xlsx'):
try:
workbook_object = openpyxl.Workbook()
# get active sheet
sheet_object = workbook_object.active
except Exception as error:
print('[ ERROR ] ' + str(error))
try:
cell_object = sheet_object.cell(row = 1, column = 1)
cell_object.value = 'Team'
cell_object = sheet_object.cell(row = 1, column = 2)
cell_object.value = 'Password'
cell_object = sheet_object.cell(row = 1, column = 3)
cell_object.value = 'Type'
for i in range(2, 2 + len(u_list)):
cell_object = sheet_object.cell(row = i, column = 1)
cell_object.value = u_list[i-2]
cell_object = sheet_object.cell(row = i, column = 2)
cell_object.value = p_list[i-2]
cell_object = sheet_object.cell(row = i, column = 3)
cell_object.value = t_list[i-2]
workbook_object.save(filename)
except Exception as error:
print('[ ERROR ] File could not be read properly: ' + str(error))
if __name__ == '__main__':
io_manager.write_file(['team1', 'team2'], ['abc', 'def'], ['Client', 'Client'] )
io_manager.read_file()
| 29.6 | 81 | 0.677124 | 307 | 2,072 | 4.322476 | 0.234528 | 0.135644 | 0.101733 | 0.142427 | 0.608892 | 0.501884 | 0.443858 | 0.443858 | 0.29691 | 0.239638 | 0 | 0.012651 | 0.198842 | 2,072 | 69 | 82 | 30.028986 | 0.786747 | 0.040058 | 0 | 0.288462 | 0 | 0 | 0.103379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.019231 | 0.038462 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fa1ee5901c494ffc339be76235d3eeb80fbed43 | 3,977 | py | Python | egs/sre21-av-a/v1.16k/steps_be/eval-fusion-v2.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 14 | 2021-12-19T04:24:15.000Z | 2022-03-18T03:24:04.000Z | egs/sre21-av-a/v1.16k/steps_be/eval-fusion-v2.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | null | null | null | egs/sre21-av-a/v1.16k/steps_be/eval-fusion-v2.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 5 | 2021-12-14T20:41:27.000Z | 2022-02-24T14:18:11.000Z | #!/usr/bin/env python
"""
Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Evals greedy fusion
"""
import sys
import os
from jsonargparse import ArgumentParser, namespace_to_dict
import time
import logging
import numpy as np
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import TrialScores, TrialKey, TrialNdx, Utt2Info
from hyperion.utils.list_utils import ismember
from hyperion.classifiers import GreedyFusionBinaryLR as GF
def read_ndx(ndx_file):
logging.info("load ndx: %s" % ndx_file)
try:
ndx = TrialNdx.load_txt(ndx_file)
except:
ndx = TrialKey.load_txt(ndx_file)
return ndx
def read_sources(ndx_file, ndx):
ns = np.sum(ndx.trial_mask)
src = np.zeros((ns, 3), dtype=float_cpu())
sources = ["CTS", "AFV"]
k = 0
for i in range(len(sources)):
for j in range(i, len(sources)):
li = sources[i]
lj = sources[j]
logging.info("load key: %s", f"{ndx_file}_{li}_{lj}")
try:
ndx_c = TrialKey.load_txt(f"{ndx_file}_{li}_{lj}").to_ndx()
except:
ndx_c = TrialNdx.load_txt(f"{ndx_file}_{li}_{lj}")
mask = np.zeros_like(ndx.trial_mask, dtype=float_cpu())
f, enr_idx = ismember(ndx_c.model_set, ndx.model_set)
f, test_idx = ismember(ndx_c.seg_set, ndx.seg_set)
mask[np.ix_(enr_idx, test_idx)] = ndx_c.trial_mask
cond_c = TrialScores(ndx.model_set, ndx.seg_set, mask)
src[:, k] = cond_c.scores[ndx.trial_mask]
k += 1
assert np.all(np.sum(src, axis=1)), "non all trials have source info"
return src
def read_scores(in_score_files, ndx):
num_systems = len(in_score_files)
in_scores = []
for i in range(num_systems):
logging.info("load scores: %s", in_score_files[i])
scr = TrialScores.load_txt(in_score_files[i])
scr = scr.align_with_ndx(ndx)
in_scores.append(scr.scores[ndx.trial_mask][:, None])
in_scores = np.concatenate(tuple(in_scores), axis=1)
return in_scores
def load_models(model_file):
fusions = []
sources = ["CTS_CTS", "CTS_AFV", "AFV_AFV"]
for i in range(len(sources)):
source = sources[i]
model_file_i = f"{model_file}_{source}.h5"
logging.info("load model: %s", model_file_i)
gf = GF.load(model_file_i)
fusions.append(gf)
return fusions
def eval_fusion(in_score_files, ndx_file, model_file, out_score_file, fus_idx):
ndx = read_ndx(ndx_file)
src = read_sources(ndx_file, ndx)
in_scores = read_scores(in_score_files, ndx)
fusions = load_models(model_file)
logging.info("apply fusion")
out_scores = np.zeros((in_scores.shape[0],), dtype=float_cpu())
for i in range(3):
mask = src[:, i] == 1
if np.any(mask):
out_scores[mask] = fusions[i].predict(in_scores[mask], fus_idx=fus_idx)
scr = TrialScores(
ndx.model_set,
ndx.seg_set,
np.zeros_like(ndx.trial_mask, dtype=float_cpu()),
ndx.trial_mask,
)
scr.scores[ndx.trial_mask] = out_scores
logging.info("save scores: %s" % out_score_file)
scr.save_txt(out_score_file)
if __name__ == "__main__":
parser = ArgumentParser(
description="Evals linear fusion from greedy fusion trainer"
)
parser.add_argument("--in-score-files", required=True, nargs="+")
parser.add_argument("--out-score-file", required=True)
parser.add_argument("--ndx-file", required=True)
parser.add_argument("--model-file", required=True)
parser.add_argument("--fus-idx", required=True, type=int)
parser.add_argument("-v", "--verbose", default=1, choices=[0, 1, 2, 3], type=int)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
eval_fusion(**namespace_to_dict(args))
| 30.592308 | 85 | 0.648982 | 583 | 3,977 | 4.183533 | 0.25729 | 0.03444 | 0.03444 | 0.01804 | 0.209512 | 0.148831 | 0.070521 | 0.02952 | 0.02952 | 0 | 0 | 0.007424 | 0.221021 | 3,977 | 129 | 86 | 30.829457 | 0.779858 | 0.04174 | 0 | 0.063158 | 0 | 0 | 0.092368 | 0.006316 | 0 | 0 | 0 | 0 | 0.010526 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fa66e1f184d6d4119c31d5e1037b03647996820 | 697 | py | Python | src/MusicTheory/scales/people/PelogScale.py | ytyaru/Python.Audio.Chord.2017081743 | f9bad6c9c013c216aff586bed56ea646f26d1236 | [
"CC0-1.0"
] | 1 | 2019-11-14T07:30:23.000Z | 2019-11-14T07:30:23.000Z | src/MusicTheory/scales/people/PelogScale.py | ytyaru/Python.Audio.Scale.201708102021 | 6f5e47c7af00ff793cce0893dff29b1e6904cb4e | [
"CC0-1.0"
] | null | null | null | src/MusicTheory/scales/people/PelogScale.py | ytyaru/Python.Audio.Scale.201708102021 | 6f5e47c7af00ff793cce0893dff29b1e6904cb4e | [
"CC0-1.0"
] | null | null | null | #https://ja.wikipedia.org/wiki/%E9%9F%B3%E9%9A%8E
#https://ja.wikipedia.org/wiki/%E3%83%9E%E3%82%AB%E3%83%BC%E3%83%A0
#インドネシアの音階(ペロッグ)
#本来はおおざっぱに1オクターブを5つに不等分したもの。「広い音程」と「狭い音程」の2種の音程がある。狭い音程+狭い音程+広い音程+狭い音程+広い音程。C#, D, E, G#, A, C#もしくはC#, D, E, G, A, C#程度の感じ。
class PelogScale:
def __init__(self):
#C Db Eb G Ab
# 1 2 4 1
self.__intervals = [[1,2,4,1]] #(1/4音さげるのを表現できない)
self.__names = ['ヨナ抜き長音階', 'ヨナ抜き短音階', 'ニロ抜き短音階', 'ニロ抜き長音階', '雲井音階', '岩戸音階']
@property
def Intervals(self): return self.__intervals
if __name__ == '__main__':
s = PelogScale()
print('========== インドネシアの音階(ペロッグ)・スケール ==========')
print('s.Intervals', s.Intervals)
| 34.85 | 123 | 0.606887 | 105 | 697 | 3.866667 | 0.561905 | 0.029557 | 0.078818 | 0.093596 | 0.137931 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057391 | 0.175036 | 697 | 19 | 124 | 36.684211 | 0.646957 | 0.408895 | 0 | 0 | 0 | 0 | 0.2425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0.1 | 0.3 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fa702ae3405bcd2ab080aaf782ee4eb3b52fd5a | 3,725 | py | Python | main/apis/views.py | burjee/mock-yo | bb1ac41006e253bf263cb266ee02bae3bb1bdf10 | [
"MIT"
] | null | null | null | main/apis/views.py | burjee/mock-yo | bb1ac41006e253bf263cb266ee02bae3bb1bdf10 | [
"MIT"
] | null | null | null | main/apis/views.py | burjee/mock-yo | bb1ac41006e253bf263cb266ee02bae3bb1bdf10 | [
"MIT"
] | null | null | null | from django.http import HttpResponse, JsonResponse, FileResponse
from django.views.decorators.csrf import csrf_exempt
from datetime import datetime
from os import path
from random import random
from .models import Kind, Item
@csrf_exempt
def directory(request, kind):
# 取得列表
if request.method == "GET":
if kind == "home":
result = Kind.objects.all()
result_dict = {"status": 200, "data": Kind.to_list(result)}
return JsonResponse(result_dict)
else:
_kind = Kind.objects.get(name=kind)
result = Item.objects.filter(kind=_kind)
result_dict = {"status": 200, "data": Item.to_list(result)}
return JsonResponse(result_dict)
# 新增項目
elif request.method == "POST":
_kind = Kind.objects.get(name=kind)
item = Item()
# 名稱種類
item.name = request.POST["name"]
item.kind = _kind
# 封面圖
if "isImageUpload" in request.POST:
item.image = request.FILES["image"]
extension = path.splitext(item.image.name)[1]
item.image.name = "{}{}".format(get_random_name(), extension)
else:
item.image.name = "uploads/{}.jpg".format(_kind.name)
# 內容
if "isMedia" in request.POST:
extension = path.splitext(request.FILES["content"].name)[1]
media_name = "{}{}".format(get_random_name(), extension)
handle_uploaded_file(request.FILES["content"], media_name)
item.content = "uploads/{}".format(media_name)
else:
item.content = request.POST["content"]
# image預設封面圖
if _kind.name == "image":
item.image.name = item.content
item.save()
return JsonResponse({"status": 200})
else:
return HttpResponse(status=404)
@csrf_exempt
def content(request, kind, id):
if request.method == "GET":
result = Item.objects.get(kind__name=kind, id=id)
media = ["image", "video", "sound"]
if kind == "file":
size = path.getsize(result.content)
result_dict = {"status": 200, "data": {
"name": result.name, "size": size, "utime": result.date.timestamp()
}}
return JsonResponse(result_dict)
elif kind in media:
content = result.content.replace("uploads", "static")
result_dict = {"status": 200, "data": content, "name": result.name}
return JsonResponse(result_dict)
else:
result_dict = {"status": 200, "data": result.content, "name": result.name}
return JsonResponse(result_dict)
elif request.method == "DELETE":
result = Item.objects.get(kind__name=kind, id=id)
result.delete()
return JsonResponse({"status": 200})
else:
return HttpResponse(status=404)
@csrf_exempt
def like(request, kind, id):
if request.method == "PUT":
result = Item.objects.get(kind__name=kind, id=id)
result.like = not result.like
result.save()
return JsonResponse({"status": 200})
else:
return HttpResponse(status=404)
def download(request, id):
result = Item.objects.get(kind__name="file", id=id)
return FileResponse(open(result.content, "rb"), as_attachment=True, filename=result.name)
def get_random_name():
timestamp = str(int(datetime.now().timestamp() * 1000000))
random_number = str(int(random() * 1000))
name = "{}{}".format(timestamp, random_number)
return name
def handle_uploaded_file(f, name):
with open("uploads/{}".format(name), "wb+") as destination:
for chunk in f.chunks():
destination.write(chunk)
| 31.302521 | 93 | 0.599463 | 427 | 3,725 | 5.12178 | 0.23185 | 0.045725 | 0.03658 | 0.043439 | 0.395519 | 0.322817 | 0.231367 | 0.194787 | 0.149977 | 0.133516 | 0 | 0.016837 | 0.266577 | 3,725 | 118 | 94 | 31.567797 | 0.783675 | 0.008591 | 0 | 0.321839 | 0 | 0 | 0.068909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0 | 0.287356 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fa7e822d653bf6c701bbda193b902d023f99d6a | 8,391 | py | Python | clairttn/ttn_handler.py | ClairBerlin/clair-ttn | b8bbce56e79622340b0b89a691a16295ac11872a | [
"BSD-3-Clause"
] | 2 | 2020-11-22T17:07:20.000Z | 2020-11-26T07:53:00.000Z | clairttn/ttn_handler.py | ClairBerlin/clair-ttn | b8bbce56e79622340b0b89a691a16295ac11872a | [
"BSD-3-Clause"
] | 7 | 2020-12-01T17:12:30.000Z | 2021-09-23T15:11:57.000Z | clairttn/ttn_handler.py | ClairBerlin/clair-ttn | b8bbce56e79622340b0b89a691a16295ac11872a | [
"BSD-3-Clause"
] | null | null | null | import logging
import paho.mqtt.client as mqtt
import json
import traceback
import base64
import dateutil.parser as dtparser
import clairttn.types as types
class RxMessage:
"""Core parts of the TTN message received from a node"""
def __init__(self, raw_data, device_id, device_eui, rx_datetime, rx_port, mcs):
self.raw_data = raw_data
self.device_id = device_id
self.device_eui = device_eui
self.rx_datetime = rx_datetime
self.rx_port = rx_port
self.mcs = mcs
class _TtnHandler:
def _handle_message(self, ttn_rxmsg):
raise NotImplementedError("Needs to be provided as callback.")
def _on_connect(self, client, _userdata, _flags, rc):
if rc == 0:
logging.info("Connect success!")
client.subscribe(self._sub_topics)
logging.debug("Subscribed to topic %s", self._sub_topics)
else:
logging.error("Failed to connect, return code %d", rc)
def _on_message(self, _client, _userdata, message):
# Decode UTF-8 bytes to Unicode,
mqtt_payload = message.payload.decode("utf8")
# Parse the string into a JSON object.
ttn_rxmsg = json.loads(mqtt_payload)
topic = message.topic
logging.debug("Uplink message received on topic %s", topic)
logging.debug("Message payload: %s", ttn_rxmsg)
rx_message = self._extract_rx_message(ttn_rxmsg)
if not rx_message:
logging.warning("Skipping message...")
return
try:
self.handle_message(rx_message)
except Exception as e2:
logging.error("exception during message handling: %s", e2)
logging.error(traceback.format_exc())
def __init__(self, app_id, access_key, broker_host, sub_topics):
logging.debug("Application ID: %s", app_id)
self._app_id = app_id
self._broker_port = 8883 # TTN uses the default MQTT TLS port.
self._broker_host = broker_host
self._sub_topics = sub_topics
self._mqtt_client = mqtt.Client(
client_id="Clair-Berlin",
clean_session=False,
userdata=None,
protocol=mqtt.MQTTv311, # TTN supports MQTT v 3.1.1 only
transport="tcp",
)
self._mqtt_client.username_pw_set(username=app_id, password=access_key)
self._mqtt_client.tls_set()
# Attach callbacks to client.
self._mqtt_client.on_message = self._on_message
self._mqtt_client.on_connect = self._on_connect
# Fake callback. Must be provided by appplication-layer node handler
self.handle_message = self._handle_message
def _extract_rx_message(self, ttn_rxmsg):
raise NotImplementedError("needs to be implemented by subclass")
def _create_tx_message(self, port, payload):
raise NotImplementedError("needs to be implemented by subclass")
def connect(self):
if self._broker_host:
logging.debug("Connecting to the TTN MQTT broker at %s", self._broker_host)
self._mqtt_client.connect_async(
host=self._broker_host, port=self._broker_port
)
self._mqtt_client.loop_start()
logging.debug("Message handling loop started.")
else:
raise NotImplementedError("must be called from concrete subclass")
def disconnect_and_close(self):
self._mqtt_client.loop_stop()
logging.debug("Message handling loop stopped.")
self._mqtt_client.disconnect()
logging.debug("Disconnected from %s", self._broker_host)
def send(self, dev_id, port, payload):
raise NotImplementedError("Must be implemented by subclass")
class TtnV2Handler(_TtnHandler):
def __init__(self, app_id, access_key):
logging.info("Configuring TTN Stack V2")
sub_topics = app_id + "/devices/+/up"
super().__init__(app_id, access_key, "eu.thethings.network", sub_topics)
def _extract_rx_message(self, ttn_rxmsg):
if "payload_raw" not in ttn_rxmsg:
logging.warning("Message without payload.")
return None
try:
device_eui = bytes.fromhex(ttn_rxmsg["hardware_serial"])
logging.info("device eui: %s", device_eui.hex())
device_id = ttn_rxmsg["dev_id"]
logging.info("device name: %s", device_id)
raw_payload = ttn_rxmsg["payload_raw"]
raw_data = base64.b64decode(raw_payload)
logging.debug("raw data: %s", raw_data.hex("-").upper())
metadata = ttn_rxmsg["metadata"]
rx_datetime = dtparser.parse(metadata["time"])
logging.debug("received at: %s", rx_datetime.isoformat())
rx_port = ttn_rxmsg.get("port", 5) # Default Elsys ERS uplink port is 5.
lora_rate = metadata["data_rate"]
except Exception as e1:
logging.error(
"Exception decoding the MQTT message: %s \n error %s", ttn_rxmsg, e1
)
return None
try:
mcs = types.LoRaWanMcs[lora_rate]
except KeyError:
logging.warning("message without data rate, assuming simulated uplink")
mcs = types.LoRaWanMcs.SF9BW125
logging.info("MCS: %s", mcs)
return RxMessage(raw_data, device_id, device_eui, rx_datetime, rx_port, mcs)
def _create_tx_message(self, port, payload):
"""Message format: https://www.thethingsnetwork.org/docs/applications/mqtt/api/#downlink-messages"""
tx_message = {"port": port, "payload_raw": payload}
json_tx_message = json.dumps(tx_message)
return str(json_tx_message)
def send(self, dev_id, port, payload):
topic = self._app_id + "/devices/" + dev_id + "/down"
message = self._create_tx_message(port, payload)
self._mqtt_client.publish(topic, message)
class TtnV3Handler(_TtnHandler):
def __init__(self, app_id, access_key):
logging.info("Configuring TTN Stack V3")
sub_topics = "v3/" + app_id + "@ttn/devices/+/up"
super().__init__(app_id, access_key, "eu1.cloud.thethings.network", sub_topics)
def _extract_rx_message(self, ttn_rxmsg):
if "frm_payload" not in ttn_rxmsg["uplink_message"]:
logging.warning("Message without payload.")
return None
try:
device_ids = ttn_rxmsg["end_device_ids"]
device_eui = bytes.fromhex(device_ids["dev_eui"])
logging.info("device eui: %s", device_eui.hex())
device_id = device_ids["device_id"]
logging.info("device name: %s", device_id)
uplink_message = ttn_rxmsg["uplink_message"]
raw_payload = uplink_message["frm_payload"]
raw_data = base64.b64decode(raw_payload)
logging.debug("raw data: %s", raw_data.hex("-").upper())
rx_datetime = dtparser.parse(uplink_message["received_at"])
logging.debug("received at: %s", rx_datetime.isoformat())
# Default Elsys ERS uplink port is 5.
rx_port = uplink_message.get("f_port", 5)
lora_rate = uplink_message["settings"].get("data_rate_index")
if lora_rate is None:
logging.warning("message without data rate, assuming simulated uplink")
mcs = types.LoRaWanMcs.SF9BW125
else:
mcs = types.DATA_RATE_INDEX[lora_rate]
except Exception as e1:
logging.error(
"Exception decoding the MQTT message: %s \n error %s", ttn_rxmsg, e1
)
return None
logging.info("MCS: %s", mcs)
return RxMessage(raw_data, device_id, device_eui, rx_datetime, rx_port, mcs)
def _create_tx_message(self, port, payload):
"""Message format: https://www.thethingsindustries.com/docs/reference/data-formats/#downlink-messages"""
tx_frame = {"f_port": port, "frm_payload": payload, "priority": "NORMAL"}
tx_message = {"downlinks": [tx_frame]}
json_tx_message = json.dumps(tx_message)
return str(json_tx_message)
def send(self, dev_id, port, payload):
topic = "v3/" + self._app_id + "@ttn/devices/" + dev_id + "/down/push"
message = self._create_tx_message(port, payload)
self._mqtt_client.publish(topic, message)
| 39.957143 | 112 | 0.634728 | 1,044 | 8,391 | 4.832375 | 0.211686 | 0.028543 | 0.030525 | 0.013875 | 0.426165 | 0.413875 | 0.413875 | 0.384143 | 0.342121 | 0.284044 | 0 | 0.007758 | 0.262662 | 8,391 | 209 | 113 | 40.148325 | 0.807661 | 0.065189 | 0 | 0.351515 | 0 | 0 | 0.166347 | 0.003452 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0.006061 | 0.042424 | 0 | 0.230303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fa838099d5fb55649da932f3d6fd1c83e7b8248 | 987 | py | Python | Python3/1145-Binary-Tree-Coloring-Game/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1145-Binary-Tree-Coloring-Game/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1145-Binary-Tree-Coloring-Game/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def btreeGameWinningMove(self, root: TreeNode, n: int, x: int) -> int:
memo = {}
tree = {}
def dfs(node):
if node is not None:
tree[node.val] = node
l = dfs(node.left)
r = dfs(node.right)
memo[node.val] = l + r + 1
return l + r + 1
else:
return 0
def is_child(node, x):
if node is not None:
if node.val == x:
return True
return is_child(node.left, x) or is_child(node.right, x)
else:
return False
dfs(root)
red = memo[x]
blue = max(memo[i] - is_child(tree[i], x) * red for i in range(1, n + 1) if i != x)
return blue > n - blue
| 30.84375 | 91 | 0.451874 | 128 | 987 | 3.421875 | 0.328125 | 0.063927 | 0.075342 | 0.050228 | 0.068493 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009091 | 0.442756 | 987 | 31 | 92 | 31.83871 | 0.787273 | 0.150963 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0faa8bd895cde9bf26aece88377d12329c9e189a | 4,357 | py | Python | clustering/post_process_simfile.py | rmuniappan/SPREAD_model | a1b9f2a620c0628b93ecf3a3ec02d140e172d3e2 | [
"Apache-2.0"
] | null | null | null | clustering/post_process_simfile.py | rmuniappan/SPREAD_model | a1b9f2a620c0628b93ecf3a3ec02d140e172d3e2 | [
"Apache-2.0"
] | null | null | null | clustering/post_process_simfile.py | rmuniappan/SPREAD_model | a1b9f2a620c0628b93ecf3a3ec02d140e172d3e2 | [
"Apache-2.0"
] | null | null | null | # tags: argparse pandas csv pandas python map
import argparse
import pandas as pd
import pdb
import os
DESC="""description:
This function does the following given a simulation output file (format
given below). In each case, a different output file is generated.
1. Computes expected time of infection for each cell.
2. Sorts cells in ascending order by their expected time of infection.
3. Concatenates infection probabilities into one vector ordered by cell id.
In addition, it can also filter cells by country/region and output a subset
of relevant cells.
"""
## TYPE="""sort: 1st output type (as mentioned in description).
## expected_time: 2nd output type.
## """
CELL_COUNTRY_MAP="../../cellular_automata/obj/ca_mapspam_pop.csv"
def filterCells(filename,countryFilter="",monitoredCellsFilter=False):
"""
Filters cells based on constraints. This is where cells corresponding
to a country can be selected.
"""
### read simulation output file
infectionTimeline = pd.read_csv(filename,index_col="cell_id")
### apply country filter
if countryFilter != "":
cellCountryMap=pd.read_csv(CELL_COUNTRY_MAP,index_col="cell id")
cellCountryMap=cellCountryMap[cellCountryMap['admin_id'].str.contains(countryFilter)]
infectionTimeline=infectionTimeline[infectionTimeline.index.isin(cellCountryMap.index)]
### apply monitored cells filter
return infectionTimeline
def sortByInfected(infectionTimeline):
### add an extra time step to account for probability that a cell is
### not infected within the time interval. The value is the residual
### probability.
timeSteps=map(int,infectionTimeline.columns.values.tolist())
timeSteps+=[timeSteps[-1]+1]
infectionTimeline['%d' %(timeSteps[-1])]=1-infectionTimeline.sum(axis=1)
### expected time
expectedTimes=infectionTimeline.dot(timeSteps).to_frame()
expectedTimes.columns=['t']
### sort
sortedCells=expectedTimes.sort_values(by=['t'])
### concat
dim=infectionTimeline.shape
timelineVec=infectionTimeline.sort_index().values.reshape((1,dim[0]*dim[1]))[0]
return expectedTimes.sort_index(), sortedCells.index.tolist(), timelineVec
if __name__ == "__main__":
parser=argparse.ArgumentParser(description=DESC,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("input_sim_file",help="simulation file: <cell_id,t1,t2,...> where each column ti contains the empirical probability of infection at ti.")
#parser.add_argument("-m","--mode",help=TYPE,default="sort")
parser.add_argument("-o","--out_prefix",default="out",help="output file name prefix: <prefix>_time.csv and <prefix>_sorted.csv ")
parser.add_argument("-c","--country_filter",default="", help= \
"""This can be used to choose cells of a particular country or region. It is
specified by the code or its prefix in the This can be used to choose
cells of a particular country or region. It is specified by the code or
its prefix in the admin_id\"admin_id\" field of %s.""" %CELL_COUNTRY_MAP)
#parser.add_argument("--step", default=0, type=float)
args=parser.parse_args()
selectedCells=filterCells(args.input_sim_file,args.country_filter)
[expectedTimes,sortedCells,infVec]=sortByInfected(selectedCells)
# expected times file
with open(args.out_prefix+"_time.csv",'w') as f:
f.write("instance")
for i in expectedTimes.index.tolist():
f.write(",%d" %i)
f.write("\n")
f.write(os.path.basename(args.input_sim_file).lstrip("res_").rstrip(".csv")) # This step should ideally come as input.
for e in expectedTimes["t"].tolist():
f.write(",%g" %e)
f.write("\n")
# sorted cells file
with open(args.out_prefix+"_sorted.csv",'w') as f:
f.write(os.path.basename(args.input_sim_file).lstrip("res_").rstrip(".csv")) # This step should ideally come as input.
for e in sortedCells:
f.write(",%d" %e)
f.write("\n")
# infection vector file
with open(args.out_prefix+"_infvec.csv",'w') as f:
f.write(os.path.basename(args.input_sim_file).lstrip("res_").rstrip(".csv").replace('_',',').replace('-',',').replace('a,','')) # This step should ideally come as input.
for e in infVec:
f.write(",%g" %e)
f.write("\n")
| 42.300971 | 177 | 0.699564 | 588 | 4,357 | 5.076531 | 0.331633 | 0.024121 | 0.028476 | 0.021441 | 0.198325 | 0.198325 | 0.168844 | 0.158794 | 0.158794 | 0.158794 | 0 | 0.004699 | 0.169612 | 4,357 | 102 | 178 | 42.715686 | 0.820343 | 0.178793 | 0 | 0.133333 | 0 | 0.016667 | 0.261714 | 0.01418 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fab926181dd2f0f4e60cb9a023a085e21b6e0cc | 1,604 | py | Python | examples/calc_pmv_ppd.py | ElsevierSoftwareX/SOFTX_2020_250 | 1c7d2b986f6a4ab8e2916bb2997efef36427b0a1 | [
"MIT"
] | null | null | null | examples/calc_pmv_ppd.py | ElsevierSoftwareX/SOFTX_2020_250 | 1c7d2b986f6a4ab8e2916bb2997efef36427b0a1 | [
"MIT"
] | null | null | null | examples/calc_pmv_ppd.py | ElsevierSoftwareX/SOFTX_2020_250 | 1c7d2b986f6a4ab8e2916bb2997efef36427b0a1 | [
"MIT"
] | 1 | 2021-01-21T20:28:33.000Z | 2021-01-21T20:28:33.000Z | from pythermalcomfort.models import pmv_ppd
from pythermalcomfort.psychrometrics import v_relative
from pythermalcomfort.utilities import met_typical_tasks
from pythermalcomfort.utilities import clo_individual_garments
# input variables
tdb = 27 # dry-bulb air temperature, [$^{\circ}$C]
tr = 25 # mean radiant temperature, [$^{\circ}$C]
v = 0.1 # average air velocity, [m/s]
rh = 50 # relative humidity, [%]
activity = "Typing" # participant's activity description
garments = ["Sweatpants", "T-shirt", "Shoes or sandals"]
met = met_typical_tasks[activity] # activity met, [met]
icl = sum([clo_individual_garments[item] for item in garments]) # calculate total clothing insulation
# calculate the relative air velocity
vr = v_relative(v=v, met=met)
# calculate PMV in accordance with the ASHRAE 55 2017
results = pmv_ppd(tdb=tdb, tr=tr, vr=vr, rh=rh, met=met, clo=icl, standard="ASHRAE")
# print the results
print(results)
# print PMV value
print(f"pmv={results['pmv']}, ppd={results['ppd']}%")
# for users who wants to use the IP system
results_ip = pmv_ppd(tdb=77, tr=77, vr=0.4, rh=50, met=1.2, clo=0.5, units="IP")
print(results_ip)
import pandas as pd
import os
df = pd.read_csv(os.getcwd() + "/examples/template-SI.csv")
df['PMV'] = None
df['PPD'] = None
for index, row in df.iterrows():
vr = v_relative(v=row['v'], met=row['met'])
results = pmv_ppd(tdb=row['tdb'], tr=row['tr'], vr=vr, rh=row['rh'], met=row['met'], clo=row['clo'], standard="ashrae")
df.loc[index, 'PMV'] = results['pmv']
df.loc[index, 'PPD'] = results['ppd']
print(df)
df.to_csv('results.csv')
| 32.734694 | 123 | 0.700125 | 255 | 1,604 | 4.329412 | 0.384314 | 0.027174 | 0.035326 | 0.063406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018827 | 0.139027 | 1,604 | 48 | 124 | 33.416667 | 0.780594 | 0.249377 | 0 | 0 | 0 | 0 | 0.140336 | 0.056303 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fabab9c3d439bf8059fbf7316c78d5221d18696 | 3,284 | py | Python | pets/views.py | fabrilopez/django_backend_irobot | 3085f6247c9cf5c852f973e8207066d90b940f5e | [
"Apache-2.0"
] | 1 | 2021-06-25T12:40:20.000Z | 2021-06-25T12:40:20.000Z | pets/views.py | fabrilopez/django_backend_irobot | 3085f6247c9cf5c852f973e8207066d90b940f5e | [
"Apache-2.0"
] | null | null | null | pets/views.py | fabrilopez/django_backend_irobot | 3085f6247c9cf5c852f973e8207066d90b940f5e | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from .models import Pet
from .serializers import PetSerializer
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
@api_view(['GET','POST'])
def pet_list(request):
# GET a full list of pets, POST create a new pet
if request.method == 'GET':
pets = Pet.objects.all()
user = request.user
print('user_id: ', user.id)
# read search criteria
name = request.GET.get('name', None)
age = request.GET.get('age', None)
# filters by name
if name is not None and age is None:
pets = pets.filter(name__icontains=name)
# filters by age
elif age is not None and name is None:
# just in case a STRING appears, who knows..
try:
pets = pets.filter(age=age)
except ValueError:
return JsonResponse({'message': 'Field (age) expected a number but got ({})'.format(age)}, status=status.HTTP_404_NOT_FOUND)
# serialize response
pets_serializer = PetSerializer(pets, many=True)
return JsonResponse(pets_serializer.data, safe=False)
elif request.method == 'POST':
pet_data = JSONParser().parse(request)
pet_serializer = PetSerializer(data=pet_data)
# if "valid", persist
if pet_serializer.is_valid():
pet_serializer.save()
return JsonResponse(pet_serializer.data, status=status.HTTP_201_CREATED)
# return error
return JsonResponse(pet_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def pet_list_delete(request):
# DELETE erase all pets
if request.method == 'DELETE':
count = Pet.objects.all().delete()
return JsonResponse({'message': '{} All Pets records were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'PUT', 'DELETE',])
def pet_detail(request, pk):
# GET pet detail, PUT modifies pet details, DELETE erase pet record
# first checks if find pet by pk (id)
try:
pet = Pet.objects.get(pk=pk)
except Pet.DoesNotExist:
return JsonResponse({'message': 'The pet record does not exist'}, status=status.HTTP_404_NOT_FOUND)
# done, now GET, PUT and DELETE by pk
if request.method == 'GET':
pet_serializer = PetSerializer(pet)
return JsonResponse(pet_serializer.data)
elif request.method == 'PUT':
pet_data = JSONParser().parse(request)
pet_serializer = PetSerializer(pet, data=pet_data)
if pet_serializer.is_valid():
pet_serializer.save()
return JsonResponse(pet_serializer.data)
return JsonResponse(pet_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
pet.delete()
return JsonResponse({'message': 'Pet record was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT) | 35.695652 | 142 | 0.669001 | 414 | 3,284 | 5.164251 | 0.304348 | 0.072965 | 0.052385 | 0.072498 | 0.250234 | 0.233863 | 0.182413 | 0.182413 | 0.130964 | 0.130964 | 0 | 0.009932 | 0.233557 | 3,284 | 92 | 143 | 35.695652 | 0.839491 | 0.107491 | 0 | 0.241379 | 0 | 0 | 0.084618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.172414 | 0 | 0.396552 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fabce9d0f51782141dfd35b544139487c5b58c4 | 3,025 | py | Python | finance/gm/main.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | 2 | 2016-02-17T06:00:35.000Z | 2020-11-23T13:34:00.000Z | finance/gm/main.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | null | null | null | finance/gm/main.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | null | null | null | # coding=utf-8
from __future__ import print_function, absolute_import
from gm.api import *
"""
本策略采用布林线进行均值回归交易。当价格触及布林线上轨的时候进行卖出,当触及下轨的时候,进行买入。
使用600004在 2009-09-17 13:00:00 到 2020-03-21 15:00:00 进行了回测。
注意:
1:实盘中,如果在收盘的那一根bar或tick触发交易信号,需要自行处理,实盘可能不会成交。
"""
# 策略中必须有init方法
def init(context):
# 设置布林线的三个参数
context.maPeriod = 26 # 计算BOLL布林线中轨的参数
context.stdPeriod = 26 # 计算BOLL 标准差的参数
context.stdRange = 1 # 计算BOLL 上下轨和中轨距离的参数
# 设置要进行回测的合约
context.symbol = 'SHSE.600004' # 订阅&交易标的, 此处订阅的是600004
context.period = max(context.maPeriod, context.stdPeriod, context.stdRange) + 1 # 订阅数据滑窗长度
# 订阅行情
subscribe(symbols= context.symbol, frequency='1d', count=context.period)
def on_bar(context, bars):
# 获取数据滑窗,只要在init里面有订阅,在这里就可以取的到,返回值是pandas.DataFrame
data = context.data(symbol=context.symbol, frequency='1d', count=context.period, fields='close')
# 计算boll的上下界
bollUpper = data['close'].rolling(context.maPeriod).mean() \
+ context.stdRange * data['close'].rolling(context.stdPeriod).std()
bollBottom = data['close'].rolling(context.maPeriod).mean() \
- context.stdRange * data['close'].rolling(context.stdPeriod).std()
# 获取现有持仓
pos = context.account().position(symbol=context.symbol, side=PositionSide_Long)
# 交易逻辑与下单
# 当有持仓,且股价穿过BOLL上界的时候卖出股票。
if data.close.values[-1] > bollUpper.values[-1] and data.close.values[-2] < bollUpper.values[-2]:
if pos: # 有持仓就市价卖出股票。
order_volume(symbol=context.symbol, volume=100, side=OrderSide_Sell,
order_type=OrderType_Market, position_effect=PositionEffect_Close)
print('以市价单卖出一手')
# 当没有持仓,且股价穿过BOLL下界的时候买出股票。
elif data.close.values[-1] < bollBottom.values[-1] and data.close.values[-2] > bollBottom.values[-2]:
if not pos: # 没有持仓就买入一百股。
order_volume(symbol=context.symbol, volume=100, side=OrderSide_Buy,
order_type=OrderType_Market, position_effect=PositionEffect_Open)
print('以市价单买入一手')
if __name__ == '__main__':
'''
strategy_id策略ID,由系统生成
filename文件名,请与本文件名保持一致
mode实时模式:MODE_LIVE回测模式:MODE_BACKTEST
token绑定计算机的ID,可在系统设置-密钥管理中生成
backtest_start_time回测开始时间
backtest_end_time回测结束时间
backtest_adjust股票复权方式不复权:ADJUST_NONE前复权:ADJUST_PREV后复权:ADJUST_POST
backtest_initial_cash回测初始资金
backtest_commission_ratio回测佣金比例
backtest_slippage_ratio回测滑点比例
'''
run(strategy_id='38faa4f9-2fdc-11ec-bc94-58961d9a4ac1',
filename='main.py',
mode=MODE_BACKTEST,
token='803ab887a9562b630907ce9a28367e280b463594',
backtest_start_time='2009-09-17 13:00:00',
backtest_end_time='2020-03-21 15:00:00',
backtest_adjust=ADJUST_PREV,
backtest_initial_cash=1000,
backtest_commission_ratio=0.0001,
backtest_slippage_ratio=0.0001) | 38.291139 | 106 | 0.671074 | 327 | 3,025 | 6.027523 | 0.489297 | 0.03653 | 0.038559 | 0.046677 | 0.289193 | 0.289193 | 0.260781 | 0.139016 | 0.139016 | 0.086251 | 0 | 0.067998 | 0.222149 | 3,025 | 79 | 107 | 38.291139 | 0.769656 | 0.093554 | 0 | 0 | 0 | 0 | 0.089588 | 0.036804 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.108108 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0faf9720bff6effda17def72bb727c17fafe596c | 3,564 | py | Python | src/classes/Identifier.py | kikei/asin-jan-converter | 311f9a4660d3a3da6aa8114094b15d1114223bc1 | [
"MIT"
] | null | null | null | src/classes/Identifier.py | kikei/asin-jan-converter | 311f9a4660d3a3da6aa8114094b15d1114223bc1 | [
"MIT"
] | null | null | null | src/classes/Identifier.py | kikei/asin-jan-converter | 311f9a4660d3a3da6aa8114094b15d1114223bc1 | [
"MIT"
] | null | null | null | import datetime
class Identifier(object):
"""
EAN can contain EAN, JAN, ISBN, etc.
"""
SUCCESS = 0
FAILED = 1
NEW = 2
OBJ_EAN = 'ean'
OBJ_ASIN = 'asin'
OBJ_STATUS = 'status'
OBJ_UPDATED = 'updated'
OBJ_TITLE = 'title'
def __init__(self, ean=None, asin=None,
status=None, updated=None, title=None):
assert ean is not None or asin is not None
self.ean = ean
self.asin = asin
if status is None:
self.status = Identifier.NEW
else:
self.status = status
if updated is None:
self.updated = timestamp()
else:
self.updated = updated
self.title = title
def __str__(self):
return ('Identifier(asin={a}, ean={e}, status={s}, title={t})'
.format(a=self.asin, e=self.ean, s=self.status, t=self.title))
@staticmethod
def from_obj(obj):
assert obj is not None
if Identifier.OBJ_STATUS not in obj:
status = Identifier.NEW
else:
status = obj[Identifier.OBJ_STATUS]
if Identifier.OBJ_UPDATED not in obj:
updated = timestamp()
else:
updated = obj[Identifier.OBJ_UPDATED]
if Identifier.OBJ_TITLE not in obj:
title = None
else:
title = obj[Identifier.OBJ_TITLE]
return Identifier(ean=obj[Identifier.OBJ_EAN],
asin=obj[Identifier.OBJ_ASIN],
status=status,
updated=updated,
title=title)
@staticmethod
def filter_asin(asin):
assert isinstance(asin, str)
return {
Identifier.OBJ_ASIN: asin
}
@staticmethod
def filter_ean(ean):
assert isinstance(ean, str)
return {
Identifier.OBJ_EAN: ean
}
@staticmethod
def filter_intersection(ean=None, asin=None):
assert ean is not None or asin is not None
filters = []
if ean is not None:
filters.append(Identifier.filter_ean(ean=ean))
if asin is not None:
filters.append(Identifier.filter_asin(asin=asin))
assert len(filters) > 0
if len(filters) == 1:
return filters[0]
else:
return {'$or': filters}
@staticmethod
def extend(a, b):
if b.status == Identifier.SUCCESS:
# SUCCESS, SUCCESS
# FAILED , SUCCESS
# NEW , SUCCESS
status = Identifier.SUCCESS
elif a.status == Identifier.SUCCESS:
# SUCCESS, FAILED
# SUCCESS, NEW
a, b = b, a
status = Identifier.SUCCESS
elif a.status == Identifier.FAILED and b.status == Identifier.FAILED:
# FAILED , FAILED
status = Identifier.FAILED
elif b.status == Identifier.NEW:
# FAILED , NEW
# NEW , NEW
status = Identifier.NEW
elif a.status == Identifier.NEW:
# NEW , FAILED
status = b.status
a, b = b, a
ean = b.ean or a.ean
asin = b.asin or a.asin
title = b.title or a.title
updated = max(b.updated, a.updated)
return Identifier(asin=asin, ean=ean,
status=status, title=title, updated=updated)
def to_obj(self):
return {
Identifier.OBJ_EAN: self.ean,
Identifier.OBJ_ASIN: self.asin,
Identifier.OBJ_STATUS: self.status,
Identifier.OBJ_UPDATED: self.updated,
Identifier.OBJ_TITLE: self.title
}
def asin_available(self):
if self.status == Identifier.SUCCESS:
return self.asin
else:
return None
def ean_available(self):
if self.status == Identifier.SUCCESS:
return self.ean
else:
return None
def timestamp():
return datetime.datetime.now().timestamp()
| 25.457143 | 74 | 0.607744 | 453 | 3,564 | 4.699779 | 0.125828 | 0.112729 | 0.029591 | 0.016909 | 0.193988 | 0.160639 | 0.160639 | 0.083607 | 0.083607 | 0.034758 | 0 | 0.002372 | 0.290123 | 3,564 | 139 | 75 | 25.640288 | 0.83913 | 0.049102 | 0 | 0.25 | 0 | 0 | 0.023753 | 0 | 0 | 0 | 0 | 0 | 0.053571 | 1 | 0.098214 | false | 0 | 0.008929 | 0.026786 | 0.303571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb1129d5afc521e9c9aa914b234066cbf042147 | 6,891 | py | Python | src/model/RansacAbsFromRel.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | 3 | 2021-06-08T14:57:32.000Z | 2021-12-03T17:20:11.000Z | src/model/RansacAbsFromRel.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | null | null | null | src/model/RansacAbsFromRel.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | null | null | null | import itertools
import numpy as np
import tensorflow as tf
from src.utils.RMatrix import RMatrix
from src.utils.TMatrix import TMatrix
from src.utils.triangulation.Triangulation import Triangulation
class RansacAbsFromRel():
def __init__(self, rel_pose_estimation_model, use_scale=True, use_uncertainty=True, debug=False):
self.rel_pose_estimation_model = rel_pose_estimation_model
self.use_scale = use_scale
self.use_uncertainty = use_uncertainty
self.debug = debug
#@tf.function
def abs_pose_from_pair(self, ref_to_query_1, ref_pose_1, ref_to_query_2, ref_pose_2):
""" Triangulate an absolute pose estimate from the two given relative pose estimates """
# Compute all possible estimates for the rotation of the query image (for ExReNet R1 = R2)
R_r1 = RMatrix.inverse(ref_pose_1["R"])
dR1_q_r1 = ref_to_query_1["R1"]
dR2_q_r1 = ref_to_query_1["R2"]
R1_q_r1 = RMatrix.apply(dR1_q_r1, R_r1)
R2_q_r1 = RMatrix.apply(dR2_q_r1, R_r1)
R_r2 = RMatrix.inverse(ref_pose_2["R"])
dR1_q_r2 = ref_to_query_2["R1"]
dR2_q_r2 = ref_to_query_2["R2"]
R1_q_r2 = RMatrix.apply(dR1_q_r2, R_r2)
R2_q_r2 = RMatrix.apply(dR2_q_r2, R_r2)
pairs = [(R1_q_r1, R1_q_r2), (R1_q_r1, R2_q_r2), (R2_q_r1, R1_q_r2), (R2_q_r1, R2_q_r2)]
# Compute difference between all rotation pairs
diffs = []
for pair in pairs:
diffs.append(RMatrix.rotation_diff(pair[0], pair[1]))
# Translation directions
dir_1 = np.expand_dims(ref_to_query_1["t"], 1)
dir_2 = np.expand_dims(ref_to_query_2["t"], 1)
# Triangulate
t_abs = Triangulation.triangulate(
ref_pose_1["t"],
RMatrix.apply(RMatrix.inverse(R_r1), RMatrix.apply(RMatrix.inverse(dR1_q_r1), dir_1))[:3, 0],
ref_pose_2["t"],
RMatrix.apply(RMatrix.inverse(R_r2), RMatrix.apply(RMatrix.inverse(dR1_q_r2), dir_2))[:3, 0],
)
# Use rotation estimates which are closest
R1_abs = np.stack([p[0] for p in pairs], 0)[np.argmin(diffs)]
R2_abs = np.stack([p[1] for p in pairs], 0)[np.argmin(diffs)]
return {"t": t_abs, "R1": R1_abs, "R2": R2_abs}
def _run_model(self, ref_obs, query_obs, use_uncertainty, legacy_pose_transform=False):
# Prepare input
selected_query_obs = query_obs["image"]
selected_ref_obs = [r["image"] for r in ref_obs]
# Run relative pose estimation model
ref_to_query_T, uncertainty = self.rel_pose_estimation_model.predict_using_raw_data(selected_ref_obs, selected_query_obs, use_uncertainty, legacy_pose_transform)
return ref_to_query_T, uncertainty
def predict(self, ref_obs, reference_cam_poses, query_obs, query_pose, legacy_pose_transform=False):
# Get relative pose estimates and optional also uncertainty
ref_to_query, t_uncertainty = self._run_model(ref_obs, query_obs, self.use_uncertainty, legacy_pose_transform)
# Sort out invalid pose estimations
valid_pairs = []
for i, delta_pair in enumerate(ref_to_query):
if delta_pair is not None:
valid_pairs.append(i)
# We need at least two
if len(valid_pairs) < 2:
return None
best_estimate = None
max_inlier = -1
max_inlier_mean_uncertainty = None
with tf.device("/cpu:0"):
# Go over all combination of pairs
pairs = list(itertools.combinations(valid_pairs, 2))
for i, j in pairs:
# Triangulate absolute pose based on the two estimates
abs_pose_est = self.abs_pose_from_pair(ref_to_query[i], reference_cam_poses[i], ref_to_query[j], reference_cam_poses[j])
# Go over all reference images
inliers = 0
for k in valid_pairs:
third_pair = ref_to_query[k]
# Compute relative rotation between pose prediction and ref image
R_r3 = RMatrix.inverse(reference_cam_poses[k]["R"])
dR1_q_r3 = third_pair["R1"]
# Compute relative translation between pose prediction and ref image
dir_3 = np.expand_dims(abs_pose_est["t"] - reference_cam_poses[k]["t"], 1)
t_pred = RMatrix.apply(R_r3, dir_3)[:3, 0]
# Compute predicted relative translation by ref image
tk = np.expand_dims(third_pair["t"], 1)
tk = -RMatrix.apply(RMatrix.inverse(dR1_q_r3), tk)[:3, 0]
# Compuate angle between
alpha = np.arccos(np.clip(np.dot(t_pred, tk) / (np.linalg.norm(t_pred) * np.linalg.norm(tk)), -1, 1))
alpha = alpha / np.pi * 180
# Check if we count it as an inlier:
# - check that angle < 15deg
# - check that 0.5 < (actual scale / predicted scale) < 2 (optional)
s_min = 0.5
s_max = 2
if alpha < 15 and (not self.use_scale or (s_min < np.linalg.norm(abs_pose_est["t"] - reference_cam_poses[k]["t"]) / np.linalg.norm(third_pair["t"]) < s_max or abs(np.linalg.norm(abs_pose_est["t"] - reference_cam_poses[k]["t"]) - np.linalg.norm(third_pair["t"])) < 0)):
inliers += 1
# Compute uncertainty of hypothesis
if self.use_uncertainty:
mean_uncertainty = t_uncertainty[i] + t_uncertainty[j]
else:
mean_uncertainty = None
# Check if there has been a hypothesis with more inlier or higher uncertainty
if inliers > max_inlier or (self.use_uncertainty and inliers == max_inlier and max_inlier_mean_uncertainty > mean_uncertainty):
max_inlier = inliers
best_estimate = abs_pose_est
max_inlier_mean_uncertainty = mean_uncertainty
if self.debug:
print(inliers, i, j, np.linalg.norm(abs_pose_est["t"] - query_pose[:3]), 180.0 / np.pi * RMatrix.rotation_diff(query_pose[:3, :3], RMatrix.inverse(RMatrix.from_quaternion(RMatrix.average_quaternions(np.stack((RMatrix.to_quaternion(abs_pose_est["R1"]), RMatrix.to_quaternion(abs_pose_est["R2"])), 0))))), mean_uncertainty)
if best_estimate is None:
return None
# Take the mean of the two rotation estimates of query image
best_rot = RMatrix.from_quaternion(RMatrix.average_quaternions(np.stack((RMatrix.to_quaternion(best_estimate["R1"]), RMatrix.to_quaternion(best_estimate["R2"])), 0)))
# Return the best estimate
return {"t": best_estimate["t"], "R": RMatrix.inverse(best_rot)}
| 46.560811 | 341 | 0.622116 | 969 | 6,891 | 4.133127 | 0.184727 | 0.018727 | 0.037453 | 0.032459 | 0.285643 | 0.233708 | 0.114107 | 0.08789 | 0.075406 | 0.067915 | 0 | 0.029026 | 0.280075 | 6,891 | 147 | 342 | 46.877551 | 0.778271 | 0.161225 | 0 | 0.022989 | 0 | 0 | 0.010264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.068966 | 0 | 0.183908 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb1e27fa192e9d6eccde0c31ce54f32dc4cf2ee | 3,569 | py | Python | sa/interfaces/igetinventory.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | null | null | null | sa/interfaces/igetinventory.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | null | null | null | sa/interfaces/igetinventory.py | sbworth/getnoc | a9a5647df31822062db3db7afe7ae1c005d166f7 | [
"BSD-3-Clause"
] | null | null | null | # ---------------------------------------------------------------------
# IGetInventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2013 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC Modules
from noc.core.interface.base import BaseInterface
from .base import (
DictListParameter,
StringParameter,
BooleanParameter,
FloatParameter,
StringListParameter,
REStringParameter,
OIDParameter,
LabelListParameter,
)
class IGetInventory(BaseInterface):
returns = DictListParameter(
attrs={
# Object type, used in ConnectionRule
"type": StringParameter(required=False),
# Object number as reported by script
"number": StringParameter(required=False),
# Builtin modules apply ConnectionRule scopes
# But does not submitted into database
"builtin": BooleanParameter(default=False),
# Object vendor. Must match Vendor.code
"vendor": StringParameter(),
# List of part numbers
# May contain
# * NOC model name
# * asset.part_no* value (Part numbers)
# * asset.order_part_no* value (FRU numbers)
"part_no": StringListParameter(convert=True),
# Optional revision
"revision": StringParameter(required=False),
# Serial number
"serial": StringParameter(required=False),
#
"mfg_date": REStringParameter(r"^\d{4}-\d{2}-\d{2}$", required=False),
# Optional description
"description": StringParameter(required=False),
# Optional internal crossing
"crossing": DictListParameter(
attrs={
# Input connection name, according to model
"in": StringParameter(),
# Output connection name, according to model
"out": StringParameter(),
# Power gain, in dB
"gain": FloatParameter(),
},
required=False,
),
# Optional Sensors
"sensors": DictListParameter(
attrs={
# Sensor number inside object, for deduplicate
# "number": StringParameter(),
# Sensor name. Must be unique
"name": StringParameter(required=True),
# Sensor operational status
# True - ok (agent can obtain the sensor value)
# False - nonoperational (agent believes the sensor is broken)
"status": BooleanParameter(default=True),
# Optional description
"description": StringParameter(required=False),
#
"labels": LabelListParameter(required=False),
# MeasurementUnit Name
"measurement": StringParameter(default="Scalar"),
# Collected hints
# OID for collecting by SNMP
"snmp_oid": OIDParameter(required=False),
# ID for IPMI collected
"ipmi_id": StringParameter(required=False),
# Optional internals Thresholds ?
},
required=False,
),
}
)
preview = "NOC.sa.managedobject.scripts.ShowInventory"
| 40.101124 | 82 | 0.504063 | 260 | 3,569 | 6.892308 | 0.484615 | 0.087054 | 0.109375 | 0.050223 | 0.098214 | 0.064732 | 0 | 0 | 0 | 0 | 0 | 0.004822 | 0.360885 | 3,569 | 88 | 83 | 40.556818 | 0.780798 | 0.334828 | 0 | 0.195652 | 0 | 0 | 0.088689 | 0.017995 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.108696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb2cdd8380ea25ac46fe20f818293fa64be311d | 1,969 | py | Python | csv_to_tex.py | AIReproducibility2018/UTILS_tablegen | 65db8198aecc48f32859b9d78f1a65e7d8783d02 | [
"BSD-3-Clause"
] | null | null | null | csv_to_tex.py | AIReproducibility2018/UTILS_tablegen | 65db8198aecc48f32859b9d78f1a65e7d8783d02 | [
"BSD-3-Clause"
] | null | null | null | csv_to_tex.py | AIReproducibility2018/UTILS_tablegen | 65db8198aecc48f32859b9d78f1a65e7d8783d02 | [
"BSD-3-Clause"
] | null | null | null | from gentable import load_rows
import os
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
import numpy as np
def df_to_tex(df, coltitles_to_wrap, savepath, escape=True):
wrap_these = lambda x: x.lower() in [y.lower() for y in coltitles_to_wrap]
latex_col_formats = ['X' if wrap_these(col) else 'l' for col in df.columns]
tex = df.to_latex(index=False, column_format=''.join(latex_col_formats), escape=escape)
tex = tex.replace("\\begin{tabular}", "\\begin{tabularx}{\\textwidth}")
tex = tex.replace("\\end{tabular}", "\\end{tabularx}")
#print(tex)
with open(savepath, 'w') as f:
print("Writing to {}".format(savepath))
f.write(tex)
# This code is awful.
def main():
pd.set_option('display.max_colwidth', 0)
names = ['pcat.csv', 'acat.csv', 'ecat.csv', 'papers.csv']
for name in names:
df = load_rows(name)
if name == 'papers.csv':
# Move the year column right after article
cols = df.columns.tolist()
cols.remove('Year')
idx = cols.index('Article')
cols.insert(idx+1, 'Year')
df = df[cols]
coltitles_to_wrap = ['Article', 'Problem Category', 'Error Category', 'Assumption Category']
filename = "{}.tex".format(name)
savepath = os.path.join("output_figures", filename)
df_to_tex(df, coltitles_to_wrap, savepath)
all_papers_dir = 'all_papers_used'
for csv in os.listdir(all_papers_dir):
if csv[-4:] != '.csv':
continue
inpath = os.path.join(all_papers_dir, csv)
df = load_rows(inpath)
df['Citation'] = df['Citation'].map(lambda x: "{%s}" % x)
coltitles_to_wrap = ['Title']
new_filename = "{}.tex".format(csv)
savepath = os.path.join("all_papers_used", new_filename)
df_to_tex(df, coltitles_to_wrap, savepath, escape=False)
if __name__ == '__main__':
main() | 32.816667 | 100 | 0.622651 | 272 | 1,969 | 4.316176 | 0.393382 | 0.056218 | 0.076661 | 0.022998 | 0.13799 | 0.105622 | 0.105622 | 0.105622 | 0.105622 | 0 | 0 | 0.001987 | 0.233113 | 1,969 | 60 | 101 | 32.816667 | 0.775497 | 0.036059 | 0 | 0 | 0 | 0 | 0.168249 | 0.015823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.139535 | 0 | 0.186047 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb2fac020d67c3e308aca59cffdbfed2b80ebef | 2,922 | py | Python | env.py | Arrabonae/openai_DDDQN | 33f8ba87e3c0a69381129d6ae805b3dc2b0398fe | [
"Apache-2.0"
] | 6 | 2020-10-28T09:05:55.000Z | 2022-03-17T06:27:05.000Z | env.py | Arrabonae/openai_DDDQN | 33f8ba87e3c0a69381129d6ae805b3dc2b0398fe | [
"Apache-2.0"
] | null | null | null | env.py | Arrabonae/openai_DDDQN | 33f8ba87e3c0a69381129d6ae805b3dc2b0398fe | [
"Apache-2.0"
] | 1 | 2021-12-25T05:16:55.000Z | 2021-12-25T05:16:55.000Z | import numpy as np
import gym
import collections
import cv2
class RepeatActionAndMaxFrame(gym.Wrapper):
def __init__(self, env=None, repeat=4):
super(RepeatActionAndMaxFrame, self).__init__(env)
self.repeat = repeat
self.shape = env.observation_space.low.shape
self.frame_buffer = np.zeros_like((2,self.shape), dtype=object)
def step(self, action):
t_reward = 0.0
done = False
for i in range(self.repeat):
obs, reward, done, info = self.env.step(action)
t_reward += reward
idx = i % 2
self.frame_buffer[idx] = obs
if done:
break
max_frame = np.maximum(self.frame_buffer[0], self.frame_buffer[1])
return max_frame, t_reward, done, info
def reset(self):
obs = self.env.reset()
self.frame_buffer = np.zeros_like((2,self.shape), dtype=object)
self.frame_buffer[0] = obs
return obs
class PreprocessFrame(gym.ObservationWrapper):
def __init__(self, shape, env=None):
super(PreprocessFrame, self).__init__(env)
self.shape=(shape[2], shape[0], shape[1])
self.observation_space = gym.spaces.Box(low=np.float32(0), high=np.float32(1.0),
shape=self.shape, dtype=np.float32)
def observation(self, obs):
new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
resized_screen = cv2.resize(new_frame, self.shape[1:],
interpolation=cv2.INTER_AREA)
new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
new_obs = np.swapaxes(new_obs, 2,0)
new_obs = new_obs / 255.0
return new_obs
class StackFrames(gym.ObservationWrapper):
def __init__(self, env, n_steps):
super(StackFrames, self).__init__(env)
self.observation_space = gym.spaces.Box(
np.float32(env.observation_space.low.repeat(n_steps, axis=0)),
np.float32(env.observation_space.high.repeat(n_steps, axis=0)),
dtype=np.float32)
self.stack = collections.deque(maxlen=n_steps)
def reset(self):
self.stack.clear()
observation = self.env.reset()
for _ in range(self.stack.maxlen):
self.stack.append(observation)
return np.array(self.stack).reshape(self.observation_space.low.shape)
def observation(self, observation):
self.stack.append(observation)
obs = np.array(self.stack).reshape(self.observation_space.low.shape)
return obs
def make_env(env_name, shape=(84,84,1), skip=4):
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
env = gym.make(env_name)
env = RepeatActionAndMaxFrame(env, skip)
env = PreprocessFrame(shape, env)
env = StackFrames(env, skip)
return env
| 35.634146 | 92 | 0.621834 | 369 | 2,922 | 4.750678 | 0.241192 | 0.041072 | 0.051341 | 0.02567 | 0.236167 | 0.148317 | 0.111808 | 0.111808 | 0.111808 | 0.111808 | 0 | 0.022833 | 0.265572 | 2,922 | 81 | 93 | 36.074074 | 0.794035 | 0 | 0 | 0.121212 | 0 | 0 | 0.002053 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.060606 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb49b5becb583e7009cba76cd4ece027c71be49 | 491 | py | Python | setup.py | ashafer01/python-ambisync | 4eb6d33f98a400f32f2b0bddcfd8c296d2cf96bc | [
"MIT"
] | null | null | null | setup.py | ashafer01/python-ambisync | 4eb6d33f98a400f32f2b0bddcfd8c296d2cf96bc | [
"MIT"
] | null | null | null | setup.py | ashafer01/python-ambisync | 4eb6d33f98a400f32f2b0bddcfd8c296d2cf96bc | [
"MIT"
] | null | null | null | from setuptools import setup
desc = 'Define methods that can dynamically shift between synchronous and async'
long_desc = f'''{desc}
Please star the repo on `GitHub <https://github.com/ashafer01/python-ambisync>`_!
'''
setup(
name='ambisync',
version='0.2.0',
author='Alex Shafer',
author_email='ashafer@pm.me',
url='https://github.com/ashafer01/python-ambisync',
license='MIT',
description=desc,
long_description=long_desc,
py_modules=['ambisync'],
)
| 24.55 | 81 | 0.696538 | 64 | 491 | 5.25 | 0.703125 | 0.047619 | 0.083333 | 0.136905 | 0.220238 | 0.220238 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0.158859 | 491 | 19 | 82 | 25.842105 | 0.79661 | 0 | 0 | 0 | 0 | 0.0625 | 0.515275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb650a7e0ec0c918131916642eea39d14bd2220 | 1,470 | py | Python | examples/simple_example.py | florisrc/n-beats | 46828b07a08641ab5eaef8613debd0f3a3c99ed8 | [
"MIT"
] | 1 | 2020-09-20T18:23:56.000Z | 2020-09-20T18:23:56.000Z | examples/simple_example.py | florisrc/n-beats | 46828b07a08641ab5eaef8613debd0f3a3c99ed8 | [
"MIT"
] | null | null | null | examples/simple_example.py | florisrc/n-beats | 46828b07a08641ab5eaef8613debd0f3a3c99ed8 | [
"MIT"
] | 1 | 2021-04-16T09:41:58.000Z | 2021-04-16T09:41:58.000Z | import numpy as np
from nbeats_keras.model import NBeatsNet
def main():
# https://keras.io/layers/recurrent/
num_samples, time_steps, input_dim, output_dim = 50_000, 10, 1, 1
# Definition of the model.
model = NBeatsNet(backcast_length=time_steps, forecast_length=output_dim,
stack_types=(NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK), nb_blocks_per_stack=2,
thetas_dim=(4, 4), share_weights_in_stack=True, hidden_layer_units=64)
# Definition of the objective function and the optimizer.
model.compile_model(loss='mae', learning_rate=1e-5)
# Definition of the data. The problem to solve is to find f such as | f(x) - y | -> 0.
x = np.random.uniform(size=(num_samples, time_steps, input_dim))
y = np.mean(x, axis=1, keepdims=True)
# Split data into training and testing datasets.
c = num_samples // 10
x_train, y_train, x_test, y_test = x[c:], y[c:], x[:c], y[:c]
# Train the model.
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=2, batch_size=128)
# Save the model for later.
model.save('n_beats_model.h5')
# Predict on the testing set.
predictions = model.predict(x_test)
print(predictions.shape)
# Load the model.
model2 = NBeatsNet.load('n_beats_model.h5')
predictions2 = model2.predict(x_test)
np.testing.assert_almost_equal(predictions, predictions2)
if __name__ == '__main__':
main()
| 32.666667 | 108 | 0.683673 | 221 | 1,470 | 4.303167 | 0.502262 | 0.033649 | 0.047319 | 0.039958 | 0.056782 | 0.056782 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.204082 | 1,470 | 44 | 109 | 33.409091 | 0.787179 | 0.227211 | 0 | 0 | 0 | 0 | 0.038188 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb7fb95900ecf367a17b0e5bad2f5a6a92e2e5d | 3,961 | py | Python | workflow/scripts/VennDiagrams.py | sanjaynagi/rna-seq-pop | fedbdd374837876947be5c4d113f05a1577045ca | [
"MIT"
] | 2 | 2021-06-22T13:05:43.000Z | 2022-01-31T08:00:33.000Z | workflow/scripts/VennDiagrams.py | sanjaynagi/rna-seq-pop | fedbdd374837876947be5c4d113f05a1577045ca | [
"MIT"
] | 3 | 2021-06-18T09:22:29.000Z | 2022-03-26T19:52:11.000Z | workflow/scripts/VennDiagrams.py | sanjaynagi/rna-seq-pop | fedbdd374837876947be5c4d113f05a1577045ca | [
"MIT"
] | 1 | 2021-12-16T03:11:02.000Z | 2021-12-16T03:11:02.000Z | #!/usr/bin/env python3
"""
A script to get the intersections of Differential expression results, Fst, and differential SNPs analysis.
Draws Venn diagrams and adds columns to RNA-seq-diff.xlsx, whether the gene has high Fst/PBS/diffsnps.
"""
import sys
sys.stderr = open(snakemake.log[0], "w")
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib_venn import *
import pandas as pd
import numpy as np
from pathlib import Path
def plotvenn2(name, group1, group2, nboth,stat="DE_PBS", group1name='Significant up DE genes', group2name='High PBS'):
print(f"There are {group2} high Fst genes in {name}")
print(f"There are {nboth} shared in {name}")
venn2(subsets = (group1, group2, nboth), set_labels = (group1name, group2name),
set_colors=('r', 'g'),
alpha = 0.5);
venn2_circles(subsets = (group1, group2, nboth))
plt.title(f"{name}")
plt.savefig(f"results/venn/{name}_{stat}.venn.png")
plt.close()
def intersect2(one, two, df, write=True, path=None):
inter = [x for x in list(one.GeneID) if x in list(two.GeneID)]
length = len(inter)
intersected_df = df[df.GeneID.isin(inter)]
intersected_df.to_csv(f"{path}", sep="\t")
return(length, intersected_df)
def add_columns_xlsx(name, de, fst, highfst, diffsnps, diffsnpsdf=None):
rnaxlsx = pd.read_excel("results/genediff/RNA-Seq_diff.xlsx",
sheet_name=name)
highfst_bool = de.GeneID.isin(highfst.GeneID).astype(str)
rnaxlsx['HighFst'] = highfst_bool
if diffsnps:
diffsnps_bool = de.GeneID.isin(diffsnpsdf.GeneID).astype(str)
rnaxlsx['DiffSNPs'] = diffsnps_bool
# add column of number of SNPs
merged = pd.merge(de, fst, how="outer")
rnaxlsx['nSNPs'] = merged['nSNPs']
return(rnaxlsx)
#### Main ####
# Read contrasts in and other snakemake params
comparisons = pd.DataFrame(snakemake.params['DEcontrasts'], columns=['contrast'])
comparisons = comparisons.contrast.str.split("_", expand=True)
comparisons = [list(row) for i,row in comparisons.iterrows()]
percentile = snakemake.params['percentile']
diffsnps = snakemake.params['diffsnps']
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('results/RNA-Seq-full.xlsx', engine='xlsxwriter')
#### Differential expression v Fst venn diagram
for comp1,comp2 in comparisons:
name = comp1 + "_" + comp2
print(f"\n-------------- Venn Diagram for {name} --------------")
de = pd.read_csv(f"results/genediff/{name}.csv")
fst = pd.read_csv("results/variantAnalysis/selection/FstPerGene.tsv", sep="\t")
#compare sig DE genes and top 5% fst genes?
#get sig up and down diffexp genes
sigde = de[de['padj'] < pval_threshold]
sigde_up = sigde[sigde['FC'] > upper_fc]
sigde_down = sigde[sigde['FC'] < lower_fc]
#take top percentile of fst genes
highfst = fst.nlargest(int(fst.shape[0]*percentile),f"{name}_zFst")
#how many fst? how many sig de up and down?
nfst = highfst.shape[0]
nde_up = sigde_up.shape[0]
nde_down = sigde_down.shape[0]
print(f"There are {nde_up} significantly upregulated genes in {name}")
print(f"There are {nde_down} significantly downregulated genes in {name}")
nboth, _ = intersect2(sigde_up,
highfst,
de,
write=True,
path=f"results/venn/{name}.DE.Fst.intersection.tsv")
###### XLSX file ######
if diffsnps:
diffsnpsDE = pd.read_csv("results/diffsnps/{name}.sig.kissDE.tsv", sep="\t")
sheet = add_columns_xlsx(name, de, fst, highfst, diffsnps, diffsnpsDE)
else:
sheet = add_columns_xlsx(name, de, fst, highfst, diffsnps, diffsnpsDE=None)
# Write each dataframe to a different worksheet.
sheet.to_excel(writer, sheet_name=name)
# Close the Pandas Excel writer and output the Excel file.
writer.save() | 35.053097 | 118 | 0.666246 | 544 | 3,961 | 4.770221 | 0.347426 | 0.011561 | 0.016956 | 0.02158 | 0.082466 | 0.074759 | 0.074759 | 0.055491 | 0.040848 | 0.040848 | 0 | 0.009122 | 0.197425 | 3,961 | 113 | 119 | 35.053097 | 0.807172 | 0.171674 | 0 | 0.029412 | 0 | 0 | 0.205184 | 0.077137 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.102941 | 0 | 0.147059 | 0.073529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb8296b4cebc1434177ec2303ca7f75de2ac98f | 2,698 | py | Python | cogs/help_and_setup.py | samarth-1729/EngineeringTimes-Contest-Bot | f96d288e1c11f50ca4f09519c467bbad852912ce | [
"Apache-2.0"
] | null | null | null | cogs/help_and_setup.py | samarth-1729/EngineeringTimes-Contest-Bot | f96d288e1c11f50ca4f09519c467bbad852912ce | [
"Apache-2.0"
] | null | null | null | cogs/help_and_setup.py | samarth-1729/EngineeringTimes-Contest-Bot | f96d288e1c11f50ca4f09519c467bbad852912ce | [
"Apache-2.0"
] | null | null | null | # from cogs.code_chef import PORT
import discord
import os
from discord.ext import commands
# importing database manager
import psycopg2
# import datetime
# from datetime import datetime as dtime
from dotenv import load_dotenv
load_dotenv()
PASS = os.getenv("PASSWORD")
PORT = os.getenv("PORT")
conn_info = psycopg2.connect(f"dbname=guild_info.db host=localhost port={PORT} user=postgres password={PASS}")
class Help(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.has_permissions(manage_messages=True)
async def setup(self, ctx):
cursor_info = conn_info.cursor()
channel = ctx.channel
# getting server ID as string to navigate the database
server = str(ctx.guild.id)
cursor_info.execute("SELECT CHANNEL FROM info WHERE GUILD = %s", (server, ))
# storing the row which contains this server ID
old_channel = cursor_info.fetchone()
# in case of no such row in database, new row will be made
if old_channel is None:
cursor_info.execute(("INSERT INTO info VALUES (%s, %s, %s)"), (server, str(channel.id), ctx.guild.name))
await ctx.send(f"Your channel has been set to {channel.mention}")
# if row is already there, channel ID will be updated
elif old_channel is not None:
cursor_info.execute(("UPDATE info SET CHANNEL = %s WHERE GUILD = %s"), (str(channel.id), server))
await ctx.send(f"Your channel has been updated to {channel.mention}")
# save changes and close connection
conn_info.commit()
cursor_info.close()
# help command
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title='**Commands**',
description='',
colour=discord.Colour.dark_blue()
)
name_set = "**__setup__**: *Command to setup the new bot*"
val_set = "*No Subcommands*"
embed.add_field(name=name_set, value=val_set, inline=False)
name_cc = "**__codechef__**: *Command to give codechef data*"
val_cc = "__Subcommands__: *present/future*, *lt/lc/cf*"
embed.add_field(name=name_cc, value=val_cc, inline=False)
name_cf = "**__codeforces__**: *Command to give codeforces data*"
val_cf = "*No Subcommands*"
embed.add_field(name=name_cf, value=val_cf, inline=False)
# name_ed = "**editorials**: *Command to give codechef editorials*"
val_cf = "*No Subcommands*"
embed.add_field(name=name_cf, value=val_cf, inline=False)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Help(client))
| 33.725 | 116 | 0.651594 | 361 | 2,698 | 4.703601 | 0.360111 | 0.035336 | 0.030624 | 0.040047 | 0.141932 | 0.129564 | 0.129564 | 0.109541 | 0.073027 | 0.073027 | 0 | 0.000972 | 0.237213 | 2,698 | 79 | 117 | 34.151899 | 0.824101 | 0.16086 | 0 | 0.122449 | 0 | 0 | 0.248224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0.040816 | 0.102041 | 0 | 0.163265 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb9af684f7f7e5af11847504d3f706a352d0b72 | 278 | py | Python | python-101/idiomatic/idio_module.py | carmine/uni-py | b2311c311b7f6c91242888130c9c207e9d056052 | [
"Apache-2.0"
] | null | null | null | python-101/idiomatic/idio_module.py | carmine/uni-py | b2311c311b7f6c91242888130c9c207e9d056052 | [
"Apache-2.0"
] | null | null | null | python-101/idiomatic/idio_module.py | carmine/uni-py | b2311c311b7f6c91242888130c9c207e9d056052 | [
"Apache-2.0"
] | null | null | null | """idiomatic module structure"""
# imports
from os import sys
# constants
# exception classes
# interface functions
# classes
# internal functions & classes
def main():
print("\nHello Python World\n")
if __name__ == '__main__':
status = main()
sys.exit(status)
| 14.631579 | 35 | 0.68705 | 32 | 278 | 5.71875 | 0.78125 | 0.174863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194245 | 278 | 18 | 36 | 15.444444 | 0.816964 | 0.431655 | 0 | 0 | 0 | 0 | 0.204082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fb9b21c6640ceaf51e844038aab2022977f7de3 | 1,569 | py | Python | news_collector/news_collector/spiders/minutouno.py | mfalcon/chequeabot | 380ac0f19f27b29237c36205bdb94412eb4f7cd3 | [
"MIT"
] | 11 | 2019-02-05T06:59:21.000Z | 2021-02-04T10:00:28.000Z | news_collector/news_collector/spiders/minutouno.py | mfalcon/chequeabot | 380ac0f19f27b29237c36205bdb94412eb4f7cd3 | [
"MIT"
] | 8 | 2021-03-18T21:37:54.000Z | 2022-03-11T23:36:04.000Z | news_collector/news_collector/spiders/minutouno.py | chequeado/chequeabot | 682289952d6160aa6a6e70b002564e6b9c4be094 | [
"MIT"
] | 4 | 2019-11-18T21:48:35.000Z | 2020-11-04T13:39:39.000Z | import datetime
import newspaper
import scrapy
import locale
import datetime
locale.setlocale(locale.LC_ALL, "es_AR.utf8")
BASE_URL = 'http://www.minutouno.com'
class MinutoUnoSpider(scrapy.Spider):
name = "m1"
def start_requests(self):
urls = [
'https://www.minutouno.com/politica',
'https://www.minutouno.com/economia'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_seccion)
def parse_seccion(self, response):
noticias = set(response.xpath('//div[@class="note"]/article//a[contains(@href,"notas")]/@href').extract())
for noticia_url in noticias:
yield scrapy.Request(url=noticia_url, callback=self.parse_noticia)
def parse_noticia(self, response):
ff = newspaper.Article(response.url)
ff.download()
ff.parse()
noticia_fecha = ff.publish_date
if not noticia_fecha:
try:
fecha_texto = response.xpath('//span[@class="date"]/text()').extract()[0].split('-')[0].lower().strip()
noticia_fecha = datetime.datetime.strptime(fecha_texto, '%d de %B de %Y')
except:
noticia_fecha = datetime.datetime.now()
noticia_cuerpo = ff.text
data = {
'titulo': ff.title,
'fecha': noticia_fecha,
'noticia_texto': noticia_cuerpo,
'noticia_url': response.url,
'source': 'minuto1',
'formato': 'web'
}
yield data
| 26.15 | 120 | 0.578075 | 173 | 1,569 | 5.115607 | 0.462428 | 0.067797 | 0.050847 | 0.045198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0045 | 0.291906 | 1,569 | 59 | 121 | 26.59322 | 0.792079 | 0 | 0 | 0.04878 | 0 | 0 | 0.170172 | 0.057361 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.121951 | 0 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fba397f34a6439a61d5d532f96208d823cbbddb | 2,524 | py | Python | tests/oauth.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 6 | 2018-11-11T02:10:56.000Z | 2022-02-07T10:21:25.000Z | tests/oauth.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 2 | 2018-08-10T02:32:29.000Z | 2018-08-21T15:34:13.000Z | tests/oauth.py | chaselgrove/hypothesis | 07a9c436208ec62b6d8b33c1cb4305a67baff8c6 | [
"BSD-2-Clause"
] | 2 | 2018-07-24T03:31:32.000Z | 2018-08-10T02:59:09.000Z | # See file COPYING distributed with python-hypothesis for copyright and
# license.
import unittest
import h_annot.oauth
from . import config, utils
class TestOAuthBasics(unittest.TestCase):
def test_url(self):
url = h_annot.api.oauth_url('client12345')
self.assertEqual(url, 'https://hypothes.is/oauth/authorize?response_type=code&client_id=client12345')
url = h_annot.api.oauth_url('client12345', 'state54321')
self.assertEqual(url, 'https://hypothes.is/oauth/authorize?response_type=code&client_id=client12345&state=state54321')
return
def test_auth_url(self):
client = h_annot.oauth.OAuthClient('client12345', 'secret24680')
self.assertEqual(client.auth_url('state54321'), 'https://hypothes.is/oauth/authorize?response_type=code&client_id=client12345&state=state54321')
return
def test_auth_url_no_state(self):
client = h_annot.oauth.OAuthClient('client12345', 'secret24680')
self.assertEqual(client.auth_url(), 'https://hypothes.is/oauth/authorize?response_type=code&client_id=client12345')
return
class TestOAuth(unittest.TestCase):
@config.server_test
@config.oauth_test
def setUp(self, client_id, client_secret, username, password):
self.client = h_annot.oauth.OAuthClient(client_id, client_secret)
self.code = utils.get_oauth_code(client_id, username, password)
self.creds = self.client.get_credentials(self.code)
return
@config.server_test
def test_oauth_credentials(self):
self.assertIsInstance(self.creds, h_annot.oauth.OAuthCredentials)
return
@config.server_test
def test_refresh_credentials(self):
access_token_0 = self.creds.access_token
expires_0 = self.creds.expires
refresh_token_0 = self.creds.refresh_token
self.creds.refresh()
self.assertNotEqual(self.creds.access_token, access_token_0)
self.assertNotEqual(self.creds.refresh_token, refresh_token_0)
self.assertGreater(self.creds.expires, expires_0)
return
@config.server_test
def test_revoke_credentials(self):
self.assertFalse(self.creds.revoked)
self.creds.revoke()
self.assertTrue(self.creds.revoked)
self.assertIsNone(self.creds.access_token)
self.assertIsNone(self.creds.token_type)
self.assertIsNone(self.creds.scope)
self.assertIsNone(self.creds.expires)
self.assertIsNone(self.creds.refresh_token)
return
# eof
| 38.242424 | 152 | 0.717908 | 310 | 2,524 | 5.645161 | 0.225806 | 0.087429 | 0.031429 | 0.071429 | 0.404 | 0.404 | 0.336 | 0.300571 | 0.300571 | 0.300571 | 0 | 0.036644 | 0.178288 | 2,524 | 65 | 153 | 38.830769 | 0.807136 | 0.032884 | 0 | 0.254902 | 0 | 0.039216 | 0.173984 | 0 | 0 | 0 | 0 | 0 | 0.294118 | 1 | 0.137255 | false | 0.039216 | 0.058824 | 0 | 0.372549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0fba5912b7de62c9d124952991004a24914a0f8a | 2,852 | py | Python | miditoolkit/pianoroll/parser.py | jzq2000/miditoolkit | 735007cdf2673fb56c277de79af48dca4029ea40 | [
"MIT"
] | 85 | 2020-02-03T05:28:09.000Z | 2022-03-16T02:27:13.000Z | miditoolkit/pianoroll/parser.py | jzq2000/miditoolkit | 735007cdf2673fb56c277de79af48dca4029ea40 | [
"MIT"
] | 6 | 2020-02-11T09:02:01.000Z | 2021-11-22T18:56:45.000Z | miditoolkit/pianoroll/parser.py | jzq2000/miditoolkit | 735007cdf2673fb56c277de79af48dca4029ea40 | [
"MIT"
] | 13 | 2021-02-28T15:09:42.000Z | 2022-02-14T04:39:24.000Z | import numpy as np
from copy import deepcopy
from scipy.sparse import csc_matrix
import miditoolkit.midi.containers as ct
PITCH_RANGE = 128
# def get_onsets_pianoroll():
# pass
# def get_offsets_pianoroll():
# pass
def notes2pianoroll(
note_stream_ori,
ticks_per_beat=480,
downbeat=None,
resample_factor=1.0,
resample_method=round,
binary_thres=None,
max_tick=None,
to_sparse=False,
keep_note=True):
# pass by value
note_stream = deepcopy(note_stream_ori)
# sort by end time
note_stream = sorted(note_stream, key=lambda x: x.end)
# set max tick
if max_tick is None:
max_tick = 0 if len(note_stream) == 0 else note_stream[-1].end
# resampling
if resample_factor != 1.0:
max_tick = int(resample_method(max_tick * resample_factor))
for note in note_stream:
note.start = int(resample_method(note.start * resample_factor))
note.end = int(resample_method(note.end * resample_factor))
# create pianoroll
time_coo = []
pitch_coo = []
velocity = []
for note in note_stream:
# discard notes having no velocity
if note.velocity == 0:
continue
# duration
duration = note.end - note.start
# keep notes with zero length (set to 1)
if keep_note and (duration == 0):
duration = 1
note.end += 1
# set time
time_coo.extend(np.arange(note.start, note.end))
# set pitch
pitch_coo.extend([note.pitch] * duration)
# set velocity
v_tmp = note.velocity
if binary_thres is not None:
v_tmp = v_tmp > binary_thres
velocity.extend([v_tmp] * duration)
# output
pianoroll = csc_matrix((velocity, (time_coo, pitch_coo)), shape=(max_tick, PITCH_RANGE))
pianoroll = pianoroll if to_sparse else pianoroll.toarray()
return pianoroll
def pianoroll2notes(
pianoroll,
resample_factor=1.0):
binarized = pianoroll > 0
padded = np.pad(binarized, ((1, 1), (0, 0)), "constant")
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_offs = np.nonzero((diff < 0).T)[1]
notes = []
for idx, pitch in enumerate(pitches):
st = note_ons[idx]
ed = note_offs[idx]
velocity = pianoroll[st, pitch]
velocity = max(0, min(127, velocity))
notes.append(
ct.Note(
velocity=int(velocity),
pitch=pitch,
start=int(st*resample_factor),
end=int(ed*resample_factor)))
notes.sort(key=lambda x: x.start)
return notes | 26.165138 | 92 | 0.589411 | 360 | 2,852 | 4.502778 | 0.308333 | 0.055521 | 0.027761 | 0.029611 | 0.041949 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019408 | 0.313464 | 2,852 | 109 | 93 | 26.165138 | 0.808478 | 0.093268 | 0 | 0.029851 | 0 | 0 | 0.003113 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.059701 | 0 | 0.119403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |