hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
408378ae2d1cd6ca599deacc2843f436a637b9b1 | 7,472 | py | Python | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2019-11-21T08:42:37.000Z | 2021-08-13T15:49:18.000Z | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2021-05-08T08:22:38.000Z | 2022-01-20T23:43:03.000Z | import sys, numpy, argparse, os
def loadSamplelist(fin_samples, sample_fin_list, sample_header, sample_name_field, sample_size):
for l in open(fin_samples):
ls=l.strip()
sample_fin_list.append(ls)
for r in open(ls):
rs=map(lambda x:x.split('/')[-sample_name_field].split('.bam')[0],r.strip().strip(',').split(','))
#rs=map(lambda x:x.split('/')[-2],r.strip().strip(',').split(','))
if sample_name_field==2:
sn_list=r.strip().strip(',').split(',')
for e,sn in enumerate(rs):
if len(sn)==0:
rs[e]=sn_list[e].split('/')[-1].split('.')[0]
sample_header+=rs
sample_size[ls]=len(r.split(','))
return sample_fin_list, sample_header, sample_size
def mergeEvents(events_fin_list):
total_event_dict={}
for events_fin in events_fin_list:
for index,event_l in enumerate(open(events_fin)):
if index==0:
continue
event_ls=event_l.strip().split('\t')
events_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if events_cord in total_event_dict:
continue
total_event_dict[events_cord]=''
return total_event_dict
def writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path):
total_event_dict=mergeEvents(events_fin_list)
print len(total_event_dict)
total_event_list=sorted(total_event_dict.keys())
fout=open(fout_path+'/prefilter_events.splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
for e in total_event_list:
fout.write(e.strip()+'\n')
fout.close()
return total_event_list
def mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path):
for b in range(0,len(total_event_list),batch):
Intercep_Matrix={}
print '[INFO] Merging in progress. Working on batch ',b
batch_event_list= total_event_list[b:min(b+batch,len(total_event_list))]
batch_event_dict= dict.fromkeys(batch_event_list, 0)
for n,fin in enumerate(fin_list):
eventID={}
for index,event_l in enumerate(open(events_fin_list[n])):
if index==0:
continue
event_ls=event_l.strip().split('\t')
event_cord=event_ls[1].strip('"')+'\t'+event_ls[2].strip('"')+'\t'+'\t'.join(event_ls[3:7]+event_ls[8:10])
if event_cord in batch_event_dict:
eventID[event_ls[0]]=event_cord
print '[INFO] Merging file: ', fin, len(eventID)
for index,r in enumerate(open(fin)):
if index==0:
continue
rs=r.strip().split('\t')
if rs[0] not in eventID:
continue
Incl=map(float,rs[1].split(','))
Skip=map(float,rs[2].split(','))
Cov=[num+Skip[o] for o,num in enumerate(Incl)]
psi_values=[]
for i,I in enumerate(Incl):
if int(I)+int(Skip[i])==0:
psi_values.append('NaN')
else:
psi_values.append(str(round(I/int(rs[5])/(I/int(rs[5])+Skip[i]/int(rs[6])),4)))
if eventID[rs[0]] not in Intercep_Matrix:
Intercep_Matrix[eventID[rs[0]]]={}
if sample_fin_list[n] not in Intercep_Matrix[eventID[rs[0]]]:
Intercep_Matrix[eventID[rs[0]]][sample_fin_list[n]]=(psi_values,Cov)
if len(psi_values)!=sample_size[sample_fin_list[n]]:
exit('[Abort] Sample number does not match observations in JC file.')
file_batch_list.append(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt')
fout=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_'+str(b)+'.txt','w')
fout.write('AC\tGeneName\tchr\tstrand\texonStart\texonEnd\tupstreamEE\tdownstreamES\t'+'\t'.join(sample_header)+'\n')
for k in sorted(Intercep_Matrix.keys()):
psi_value_all=[]
cov_all=[]
for sample in sample_fin_list:
if sample in Intercep_Matrix[k]:
psi_value_all+=Intercep_Matrix[k][sample][0]
cov_all+=Intercep_Matrix[k][sample][1]
else:
psi_value_all+=['NaN']*sample_size[sample]
mean=numpy.mean(cov_all)
if mean>=cov_cutoff:
fout.write(k+'\t'+'\t'.join(psi_value_all)+'\n')
fout.close()
return file_batch_list
def mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path):
fout_merge=open(fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt','w')
header=0
for file_batch in file_batch_list:
for j,l in enumerate(open(file_batch)):
if j==0:
if header==0:
header+=1
fout_merge.write(l)
continue
fout_merge.write(l)
fout_merge.close()
os.system('rm '+fout_path+'/splicing_matrix/splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt.batch_*.txt')
return 'splicing_matrix.'+splicing_event_type+'.cov'+str(cov_cutoff)+'.'+data_name+'.txt'
def index_PsiMatrix(fn,outdir,delim):
out_fp = outdir+'/'+fn.split('/')[-1]+'.idx'
line_formatter = "{id}\t{offset}\n"
offset = 0
with open(fn, 'r') as fin:
with open(out_fp, 'w') as fout:
offset += len(fin.readline())
for line in fin:
ele = line.strip().split(delim)
eid = ':'.join([ele[0].split('_')[0].split('.')[0]]+ele[1:8])
fout.write( line_formatter.format(id=eid, offset=offset) )
offset += len(line)
return
def main(args):
cov_cutoff=args.cov_cutoff
data_name=args.data_name
sample_name_field=args.sample_name_field
splicing_event_type=args.splicing_event_type
if sample_name_field==1:
print '[INFO] Sample name parsed from bam file. (alternatively can be parsed from up level folder)'
if sample_name_field==2:
print '[INFO] Sample name parsed from folder name above the bam file. (alternatively can be parsed from bam file)'
db_dir=args.iris_db_path.rstrip('/')
#prepare files/folders in IRIS db directory
os.system('mkdir -p '+db_dir+'/'+data_name+' '+db_dir+'/'+data_name+'/splicing_matrix')
fout_path=db_dir+'/'+data_name
print '[INFO] output path: '+fout_path
fin_list=[]
sample_fin_list=[]
events_fin_list=[]
sample_size={}
sample_header=[]
file_batch_list=[]
#PARSING INPUT FILE LISTS
fin_list=[l.strip().rstrip('/')+'/JC.raw.input.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
events_fin_list=[l.strip().rstrip('/')+'/fromGTF.'+splicing_event_type+'.txt' for l in open(args.rmats_mat_path_manifest)]
sample_fin_list, sample_header, sample_size= loadSamplelist(args.rmats_sample_order,sample_fin_list, sample_header,sample_name_field, sample_size)
#MAKING MERGED EVENTS LIST
total_event_list= writeMergedEvents(events_fin_list, splicing_event_type, cov_cutoff, data_name, fout_path)
if args.merge_events_only:
exit('[INFO] Done merging events only.')
print '[INFO] Done loading file dir', len(total_event_list)
#START MERGING MATRICES IN BATCH MODE FOLLOWING EVENTS LIST GENERATED.
batch=20000
file_batch_list=mergeMatrixInBatch(fin_list, events_fin_list, sample_fin_list, cov_cutoff, data_name, splicing_event_type, sample_header, sample_size, total_event_list, file_batch_list, batch, fout_path)
print '[INFO] Done merging matrices by batch.'
merged_file_name=mergeMatrixInOne(file_batch_list, cov_cutoff, data_name, splicing_event_type, fout_path)
print '[INFO] Done merging matrices: '+merged_file_name
#create index in IRIS db directory
index_PsiMatrix(fout_path+'/splicing_matrix/'+merged_file_name,fout_path+'/splicing_matrix','\t')
print '[INFO] Finished. Created matrix: '+fout_path
if __name__ == '__main__':
main()
| 42.454545 | 204 | 0.722564 | 1,196 | 7,472 | 4.233278 | 0.158027 | 0.03733 | 0.053723 | 0.04365 | 0.417737 | 0.364408 | 0.340707 | 0.301995 | 0.301995 | 0.286984 | 0 | 0.008779 | 0.115766 | 7,472 | 175 | 205 | 42.697143 | 0.75753 | 0.034529 | 0 | 0.125 | 0 | 0.006579 | 0.1447 | 0.033158 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.006579 | null | null | 0.065789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
408784a24cae84367d1864aa02a8ff6e4a8e197a | 1,109 | py | Python | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | 38.241379 | 102 | 0.734896 | 122 | 1,109 | 6.565574 | 0.368852 | 0.082397 | 0.149813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001087 | 0.170424 | 1,109 | 29 | 103 | 38.241379 | 0.869565 | 0.097385 | 0 | 0 | 0 | 0 | 0.053053 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.307692 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4087ac882a0e642cb2645d67bfb2e7473130d2e9 | 265 | py | Python | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | null | null | null | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:58:26.000Z | 2021-06-02T00:58:26.000Z | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | null | null | null | # 英制单位英寸和公制单位厘米互换
value =float(input('请输入长度:'))
unit =input('请输入单位:')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == '厘米' or unit == 'cm':
print('%f 厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位') | 26.5 | 49 | 0.558491 | 38 | 265 | 3.894737 | 0.578947 | 0.081081 | 0.148649 | 0.175676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028302 | 0.2 | 265 | 10 | 50 | 26.5 | 0.669811 | 0.056604 | 0 | 0 | 0 | 0 | 0.204819 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4088dc579c34d53321481174879bd2850ab8f43e | 485 | py | Python | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 4 | 2021-07-05T20:21:41.000Z | 2021-09-02T14:13:26.000Z | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | null | null | null | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 1 | 2021-03-25T15:04:12.000Z | 2021-03-25T15:04:12.000Z | from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
def test1(network, alice_wallet, dtfactory_address):
dtfactory = DTFactory(dtfactory_address)
dt_address = dtfactory.createToken('foo_blob', 'DT1', 'DT1', to_base_18(1000), from_wallet=alice_wallet)
dt = DataToken(dtfactory.get_token_address(dt_address))
assert isinstance(dt, DataToken)
assert dt.blob() == 'foo_blob'
| 37.307692 | 108 | 0.781443 | 68 | 485 | 5.294118 | 0.411765 | 0.075 | 0.1 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025822 | 0.121649 | 485 | 12 | 109 | 40.416667 | 0.819249 | 0 | 0 | 0 | 0 | 0 | 0.045361 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.111111 | false | 0 | 0.333333 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
408fa80f7b62ab2142b5ebe87fafa4317281b530 | 6,779 | py | Python | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, arn=None, domain=None, id=None, key_types=None, most_recent=None, statuses=None, tags=None, types=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_types and not isinstance(key_types, list):
raise TypeError("Expected argument 'key_types' to be a list")
pulumi.set(__self__, "key_types", key_types)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if statuses and not isinstance(statuses, list):
raise TypeError("Expected argument 'statuses' to be a list")
pulumi.set(__self__, "statuses", statuses)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if types and not isinstance(types, list):
raise TypeError("Expected argument 'types' to be a list")
pulumi.set(__self__, "types", types)
@property
@pulumi.getter
def arn(self) -> str:
"""
Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyTypes")
def key_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_types")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter
def statuses(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags for the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "types")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
arn=self.arn,
domain=self.domain,
id=self.id,
key_types=self.key_types,
most_recent=self.most_recent,
statuses=self.statuses,
tags=self.tags,
types=self.types)
def get_certificate(domain: Optional[str] = None,
key_types: Optional[Sequence[str]] = None,
most_recent: Optional[bool] = None,
statuses: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
types: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to get the ARN of a certificate in AWS Certificate
Manager (ACM), you can reference
it by domain without having to hard code the ARNs as input.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
issued = aws.acm.get_certificate(domain="tf.example.com",
statuses=["ISSUED"])
amazon_issued = aws.acm.get_certificate(domain="tf.example.com",
most_recent=True,
types=["AMAZON_ISSUED"])
rsa4096 = aws.acm.get_certificate(domain="tf.example.com",
key_types=["RSA_4096"])
```
:param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
:param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`.
:param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false.
:param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
`INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
are returned.
:param Mapping[str, str] tags: A mapping of tags for the resource.
:param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['keyTypes'] = key_types
__args__['mostRecent'] = most_recent
__args__['statuses'] = statuses
__args__['tags'] = tags
__args__['types'] = types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acm/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
arn=__ret__.arn,
domain=__ret__.domain,
id=__ret__.id,
key_types=__ret__.key_types,
most_recent=__ret__.most_recent,
statuses=__ret__.statuses,
tags=__ret__.tags,
types=__ret__.types)
| 39.184971 | 260 | 0.651424 | 845 | 6,779 | 5.022485 | 0.227219 | 0.040057 | 0.03016 | 0.05655 | 0.207352 | 0.146795 | 0.128417 | 0.095193 | 0.063619 | 0.020735 | 0 | 0.006459 | 0.246349 | 6,779 | 172 | 261 | 39.412791 | 0.824232 | 0.315828 | 0 | 0.127273 | 1 | 0 | 0.127052 | 0.014841 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.045455 | 0.045455 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40902a024648160483f25a5dd670916ae7cc2c01 | 2,688 | py | Python | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | 20 | 2016-08-08T12:16:51.000Z | 2022-03-26T19:56:09.000Z | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | null | null | null | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | 8 | 2016-08-08T14:22:13.000Z | 2020-05-30T07:05:36.000Z | import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| 29.866667 | 74 | 0.553943 | 376 | 2,688 | 3.867021 | 0.223404 | 0.013755 | 0.053645 | 0.057772 | 0.514443 | 0.467675 | 0.447043 | 0.447043 | 0.430536 | 0.372765 | 0 | 0.030822 | 0.239583 | 2,688 | 89 | 75 | 30.202247 | 0.680528 | 0.105655 | 0 | 0.4 | 1 | 0 | 0.016277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.046154 | 0.030769 | 0.184615 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4096eff81f78a7602d75dd243df5e2e64ac51f0d | 429 | py | Python | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | null | null | null | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | 3 | 2021-03-19T01:19:04.000Z | 2021-06-04T22:44:35.000Z | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2020-04-05 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saraswati', '0003_auto_20200402_1918'),
]
operations = [
migrations.AddField(
model_name='ritual',
name='people_name',
field=models.TextField(blank=True, default=None, max_length=108, null=True),
),
]
| 22.578947 | 88 | 0.617716 | 49 | 429 | 5.285714 | 0.816327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107937 | 0.265734 | 429 | 18 | 89 | 23.833333 | 0.714286 | 0.104895 | 0 | 0 | 1 | 0 | 0.128272 | 0.060209 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40997199af5c3427ea68e5bd37b9d827653408fe | 14,709 | py | Python | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
class NoSuchJobException( Exception ):
def __init__( self, jobStoreID ):
super( NoSuchJobException, self ).__init__( "The job '%s' does not exist" % jobStoreID )
class ConcurrentFileModificationException( Exception ):
def __init__( self, jobStoreFileID ):
super( ConcurrentFileModificationException, self ).__init__(
'Concurrent update to file %s detected.' % jobStoreFileID )
class NoSuchFileException( Exception ):
def __init__( self, fileJobStoreID ):
super( NoSuchFileException, self ).__init__( "The file '%s' does not exist" % fileJobStoreID )
class JobStoreCreationException( Exception ):
def __init__( self, message ):
super( JobStoreCreationException, self ).__init__( message )
class AbstractJobStore( object ):
"""
Represents the physical storage for the jobs and associated files in a toil.
"""
__metaclass__ = ABCMeta
def __init__( self, config=None ):
"""
:param config: If config is not None then the
given configuration object will be written to the shared file "config.pickle" which can
later be retrieved using the readSharedFileStream. See writeConfigToStore.
If this file already exists it will be overwritten. If config is None,
the shared file "config.pickle" is assumed to exist and is retrieved. See loadConfigFromStore.
"""
#Now get on with reading or writing the config
if config is None:
with self.readSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
self.__config = cPickle.load(fileHandle)
else:
self.__config = config
self.writeConfigToStore()
def writeConfigToStore(self):
"""
Re-writes the config attribute to the jobStore, so that its values can be retrieved
if the jobStore is reloaded.
"""
with self.writeSharedFileStream( "config.pickle", isProtected=False ) as fileHandle:
cPickle.dump(self.__config, fileHandle, cPickle.HIGHEST_PROTOCOL)
@property
def config( self ):
return self.__config
@staticmethod
def _checkJobStoreCreation(create, exists, jobStoreString):
"""
Consistency checks which will result in exceptions if we attempt to overwrite an existing
jobStore.
:type create: boolean
:type exists: boolean
:raise JobStoreCreationException: Thrown if create=True and exists=True or create=False
and exists=False
"""
if create and exists:
raise JobStoreCreationException("The job store '%s' already exists. "
"Use --restart or 'toil restart' to resume this jobStore, "
"else remove it to start from scratch" % jobStoreString)
if not create and not exists:
raise JobStoreCreationException("The job store '%s' does not exist, so there "
"is nothing to restart." % jobStoreString)
@abstractmethod
def deleteJobStore( self ):
"""
Removes the jobStore from the disk/store. Careful!
"""
raise NotImplementedError( )
##Cleanup functions
def clean(self):
"""
Function to cleanup the state of a jobStore after a restart.
Fixes jobs that might have been partially updated.
Resets the try counts.
"""
#Collate any jobs that were in the process of being created/deleted
jobsToDelete = set()
for job in self.jobs():
for updateID in job.jobsToDelete:
jobsToDelete.add(updateID)
#Delete the jobs that should be deleted
if len(jobsToDelete) > 0:
for job in self.jobs():
if job.updateID in jobsToDelete:
self.delete(job.jobStoreID)
#Cleanup the state of each job
for job in self.jobs():
changed = False #Flag to indicate if we need to update the job
#on disk
if len(job.jobsToDelete) != 0:
job.jobsToDelete = set()
changed = True
#While jobs at the end of the stack are already deleted remove
#those jobs from the stack (this cleans up the case that the job
#had successors to run, but had not been updated to reflect this)
while len(job.stack) > 0:
jobs = [ command for command in job.stack[-1] if self.exists(command[0]) ]
if len(jobs) < len(job.stack[-1]):
changed = True
if len(jobs) > 0:
job.stack[-1] = jobs
break
else:
job.stack.pop()
else:
break
#Reset the retry count of the job
if job.remainingRetryCount < self._defaultTryCount():
job.remainingRetryCount = self._defaultTryCount()
changed = True
#This cleans the old log file which may
#have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(self)
changed = True
if changed: #Update, but only if a change has occurred
self.update(job)
#Remove any crufty stats/logging files from the previous run
self.readStatsAndLogging(lambda x : None)
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
@abstractmethod
def create( self, command, memory, cores, disk, updateID=None,
predecessorNumber=0 ):
"""
Creates a job, adding it to the store.
Command, memory, cores, updateID, predecessorNumber
are all arguments to the job's constructor.
:rtype : toil.jobWrapper.JobWrapper
"""
raise NotImplementedError( )
@abstractmethod
def exists( self, jobStoreID ):
"""
Returns true if the job is in the store, else false.
:rtype : bool
"""
raise NotImplementedError( )
@abstractmethod
def getPublicUrl( self, FileName):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl( self, jobStoreFileID):
"""
Returns a publicly accessible URL to the given file in the job store.
The returned URL starts with 'http:', 'https:' or 'file:'.
The returned URL may expire as early as 1h after its been returned.
Throw an exception if the file does not exist.
:param jobStoreFileID:
:return:
"""
raise NotImplementedError()
@abstractmethod
def load( self, jobStoreID ):
"""
Loads a job for the given jobStoreID and returns it.
:rtype: toil.jobWrapper.JobWrapper
:raises: NoSuchJobException if there is no job with the given jobStoreID
"""
raise NotImplementedError( )
@abstractmethod
def update( self, job ):
"""
Persists the job in this store atomically.
"""
raise NotImplementedError( )
@abstractmethod
def delete( self, jobStoreID ):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
"""
raise NotImplementedError( )
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
@abstractmethod
def writeFile( self, localFilePath, jobStoreID=None ):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def writeFileStream( self, jobStoreID=None ):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def getEmptyFileStoreID( self, jobStoreID=None ):
"""
:rtype : string, the ID of a new, empty file.
jobStoreID is the id of a job, or None. If specified, when delete(job)
is called all files written with the given job.jobStoreID will be
removed from the jobStore.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
"""
raise NotImplementedError( )
@abstractmethod
def readFile( self, jobStoreFileID, localFilePath ):
"""
Copies the file referenced by jobStoreFileID to the given local file path. The version
will be consistent with the last copy of the file written/updated.
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readFileStream( self, jobStoreFileID ):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
"""
raise NotImplementedError( )
@abstractmethod
def deleteFile( self, jobStoreFileID ):
"""
Deletes the file with the given ID from this job store.
This operation is idempotent, i.e. deleting a file twice or deleting a non-existent file
will succeed silently.
"""
raise NotImplementedError( )
@abstractmethod
def fileExists(self, jobStoreFileID ):
"""
:rtype : True if the jobStoreFileID exists in the jobStore, else False
"""
raise NotImplementedError()
@abstractmethod
def updateFile( self, jobStoreFileID, localFilePath ):
"""
Replaces the existing version of a file in the jobStore. Throws an exception if the file
does not exist.
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
the physical storage represented by this job store
:raises ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError( )
@abstractmethod
@contextmanager
def readSharedFileStream( self, sharedFileName, isProtected=True ):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
"""
raise NotImplementedError( )
@abstractmethod
def writeStatsAndLogging( self, statsAndLoggingString ):
"""
Adds the given statistics/logging string to the store of statistics info.
"""
raise NotImplementedError( )
@abstractmethod
def readStatsAndLogging( self, statsAndLoggingCallBackFn):
"""
Reads stats/logging strings accumulated by "writeStatsAndLogging" function.
For each stats/logging file calls the statsAndLoggingCallBackFn with
an open, readable file-handle that can be used to parse the stats.
Returns the number of stat/logging strings processed.
Stats/logging files are only read once and are removed from the
file store after being written to the given file handle.
"""
raise NotImplementedError( )
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
@classmethod
def _validateSharedFileName( cls, sharedFileName ):
return bool( cls.sharedFileNameRegex.match( sharedFileName ) )
| 37.143939 | 102 | 0.615066 | 1,595 | 14,709 | 5.634483 | 0.253919 | 0.056081 | 0.067653 | 0.059308 | 0.258039 | 0.212529 | 0.191165 | 0.172916 | 0.161344 | 0.161344 | 0 | 0.002351 | 0.306003 | 14,709 | 395 | 103 | 37.237975 | 0.878037 | 0.441634 | 0 | 0.375 | 0 | 0 | 0.048501 | 0 | 0 | 0 | 0 | 0.002532 | 0 | 1 | 0.210526 | false | 0 | 0.046053 | 0.019737 | 0.322368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
409c909153fb2318680014346b00ba060e9d1ace | 699 | py | Python | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | 3 | 2021-10-31T08:23:44.000Z | 2022-01-13T03:59:22.000Z | import torch
from transformers import PreTrainedTokenizerFast
from transformers import BartForConditionalGeneration
class AbsSummarization():
def __init__(self):
self.tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-summarization')
self.model = BartForConditionalGeneration.from_pretrained('gogamza/kobart-summarization')
def predict(self, text):
raw_input_ids = self.tokenizer.encode(text)
input_ids = [self.tokenizer.bos_token_id] + raw_input_ids + [self.tokenizer.eos_token_id]
summary_ids = self.model.generate(torch.tensor([input_ids]))
return self.tokenizer.decode(summary_ids.squeeze().tolist(), skip_special_tokens=True)
abs_summary = AbsSummarization() | 38.833333 | 91 | 0.815451 | 81 | 699 | 6.777778 | 0.481481 | 0.118397 | 0.065574 | 0.114754 | 0.233151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085837 | 699 | 18 | 92 | 38.833333 | 0.859155 | 0 | 0 | 0 | 0 | 0 | 0.08 | 0.08 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
40a02814845a829728726e29b79dfead7feb2132 | 3,401 | py | Python | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Demo37_PythonforDataScience.ipynb
# PYTHON FOR DATA SCIENCE
We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!!
This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done.
This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial.
- Python Basics
- Object Oriented Python
- **Python for Data Science**
- NumPy
- Pandas
- Plotting
- Matplotlib
- Seaborn
Let's get coding !!
"""
#Variables can not start with a number
12var = 1
_13var = 1
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {} {} and I am {} years old.".format(name, surname, age))
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age))
"""### INDEXING AND SLICING
One of the most important Python concept for data scientists is the slicing operator ':'
"""
str = "ONE TWO THREE FOUR FIVE"
print(str[0])
print(str[5])
print(str[len(str)-1])
str[:5]
str[5:]
str[1]="a"
nested = [1,2,3,['_1','_2','_3',['__1']]]
nested[0]
nested[3][0]
len(nested)
len(nested[3])
nested[3][3]
nested[3][3][0]
dict = {'key1':'value1', \
'key2': 'value2', \
'key3':'value3'}
dict['key1']
T = True
F = False
var = 10
for i in range(var):
print(i)
for i in range(var):
bool = (i==2)
if bool:
break
print(i)
[1,2,3,1,1,2,3,4]
(1,2,3,1,1,2,3,4)
{1,2,3,1,1,2,3,4}
new_set = set([1,2,3,1,1,2,3,4])
new_set.add(5)
new_set
for item in new_set:
print(item)
list(range(4))
my_list = list(range(5,10))
output = []
for number in my_list:
output.append(number**3)
output
output = [num**3 for num in my_list]
output
"""### FUNCTIONS"""
def my_function(parameter):
print(parameter)
my_function("Jalebi (Hungry okay?)")
def my_function(parameter="Default"):
print(parameter)
my_function()
num = 4
def change(par):
par =5
return par
change(num)
num
num = 4
def change(par):
par =5
return par
change(num)
num
num = [4]
def change(par):
par.append(5)
del par[0]
return par
change(num)
num
my_list
"""### LAMBDA EXPRESSIONS"""
def square(x): return x*x
list(map(square, my_list))
list(map(lambda x:x*x, my_list))
"""### BUILT-IN FUNCTIONS"""
s = "We have a hulk !!!"
s.lower()
s.upper()
s.split()
dict = {'key1':1,'key2':2}
dict.keys()
dict.values()
dict.items()
my_list.pop()
my_list
"""### TUPLE UNPACKING"""
list_of_tuples =[(1,2),(3,4),(5,6)]
for (a,b) in list_of_tuples:
print (a)
print (b)
"""### WELCOME TO THE END OF THE TUTORIAL
You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding.
---------------------------------------------------------------------------------
Copyrights © 2018, All Rights Reserved.
- Author: Mahnoor Anjum.
- Course: The Complete Hands-On Machine Learning Course
- Date Created: 2018-06-27
- Date Modified: -
""" | 18.284946 | 259 | 0.643046 | 569 | 3,401 | 3.787346 | 0.377856 | 0.011137 | 0.015313 | 0.011137 | 0.122506 | 0.099768 | 0.099768 | 0.099768 | 0.099768 | 0.067285 | 0 | 0.043904 | 0.18965 | 3,401 | 186 | 260 | 18.284946 | 0.737663 | 0.017054 | 0 | 0.329897 | 0 | 0 | 0.118523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.123711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40a5c13d7bfe8ebdc535f6e928718db2cd73a81f | 623 | py | Python | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 11 | 2020-09-20T15:17:11.000Z | 2022-03-17T12:43:33.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 3 | 2021-10-30T07:51:36.000Z | 2022-03-09T05:19:23.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 13 | 2021-01-21T03:19:08.000Z | 2022-03-28T10:44:58.000Z | """
11367. Report Card Time
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 18일
"""
def main():
for _ in range(int(input())):
name, score = input().split()
score = int(score)
if score < 60: grade = 'F'
elif score < 67: grade = 'D'
elif score < 70: grade = 'D+'
elif score < 77: grade = 'C'
elif score < 80: grade = 'C+'
elif score < 87: grade = 'B'
elif score < 90: grade = 'B+'
elif score < 97: grade = 'A'
else: grade = 'A+'
print(name + ' ' + grade)
if __name__ == '__main__':
main()
| 20.096774 | 37 | 0.499197 | 87 | 623 | 3.471264 | 0.586207 | 0.208609 | 0.066225 | 0.099338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091358 | 0.34992 | 623 | 30 | 38 | 20.766667 | 0.654321 | 0.163724 | 0 | 0 | 0 | 0 | 0.042969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.0625 | 0.0625 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40a99a0c9d4869b889926f6fe54b50b768c6cb98 | 4,160 | py | Python | git_talk/lib/changelog/main.py | cove9988/git-talk | 9f549d8565948a150834bcaa704b55ae15c094c1 | [
"MIT"
] | 5 | 2020-04-06T11:00:27.000Z | 2020-09-30T15:16:56.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
] | 3 | 2020-09-26T02:53:30.000Z | 2020-10-09T01:46:37.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
] | 1 | 2020-09-25T23:41:54.000Z | 2020-09-25T23:41:54.000Z |
import os
import logging
from typing import Optional
import click
from git_talk.lib.changelog import generate_changelog
from git_talk.lib.changelog.presenter import MarkdownPresenter
from git_talk.lib.changelog.repository import GitRepository
# @click.command()
# @click.option(
# "-r",
# "--repo",
# type=click.Path(exists=True),
# default=".",
# help="Path to the repository's root directory [Default: .]",
# )
# @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]")
# @click.option("-d", "--description", help="Your project's description")
# @click.option(
# "-o",
# "--output",
# type=click.File("w"),
# default="CHANGELOG.md",
# help="The place to save the generated changelog [Default: CHANGELOG.md]",
# )
# @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links")
# @click.option("-v", "--latest-version", type=str, help="use specified version as latest release")
# @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes")
# @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags")
# @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id")
# @click.option(
# "--issue-pattern",
# default=r"(#([\w-]+))",
# help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used "
# "by issue-url.",
# )
# @click.option(
# "--tag-pattern",
# default=None,
# help="override regex pattern for release tags. "
# "By default use semver tag names semantic. "
# "tag should be contain in one group named 'version'.",
# )
# @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ')
# @click.option("--stdout", is_flag=True)
# @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags")
# @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="")
# @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD")
# @click.option(
# "--debug", is_flag=True, help="set logging level to DEBUG",
# )
def main(
repo,
description,
latest_version,
title="Changelog",
output="CHANGELOG.md",
remote ="origin",
unreleased=False,
diff_url=None,
issue_url=r"(#([\w-]+))",
issue_pattern=None,
tag_prefix="",
stdout=True,
tag_pattern=None,
starting_commit="",
stopping_commit ="HEAD",
debug = False
):
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Logging level has been set to DEBUG")
# Convert the repository name to an absolute path
repo = os.path.abspath(repo)
repository = GitRepository(
repo,
latest_version=latest_version,
skip_unreleased=not unreleased,
tag_prefix=tag_prefix,
tag_pattern=tag_pattern,
)
presenter = MarkdownPresenter()
changelog = generate_changelog(
repository,
presenter,
title,
description,
remote=remote,
issue_pattern=issue_pattern,
issue_url=issue_url,
diff_url=diff_url,
starting_commit=starting_commit,
stopping_commit=stopping_commit,
)
# if stdout:
# print(changelog)
# else:
# output.write(changelog)
changelog_file = os.path.join(repo, "CHANGELOG.md")
write_changelog(changelog_file, changelog)
def write_changelog(changelog_file, changelog):
if os.path.exists(changelog_file):
with open(changelog_file, 'r') as f:
data = f.read()
with open(changelog_file, 'w') as f:
# f.write(changelog + '\n\n' + data)
f.write(changelog)
else:
with open(changelog_file, 'w') as f:
f.write(changelog)
if __name__ == "__main__":
main() | 33.821138 | 122 | 0.629567 | 494 | 4,160 | 5.200405 | 0.265182 | 0.072791 | 0.023355 | 0.035812 | 0.207084 | 0.141689 | 0.141689 | 0.085636 | 0.085636 | 0.085636 | 0 | 0 | 0.226683 | 4,160 | 123 | 123 | 33.821138 | 0.79857 | 0.512019 | 0 | 0.129032 | 1 | 0 | 0.053908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.112903 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40ac4ec777b7bc387be14a996d46bdf5f0da5291 | 2,416 | py | Python | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | import unittest
import random
from time import sleep
import os
from bingmaps import *
class BingMapsTestError(Exception):
"""Bing Maps test exception"""
def __init__(self, reason):
self.reason = unicode(reason)
def __str__(self):
return self.reason
# TODO: enter your key for testing
api_key = ''
class DirectionsTests(unittest.TestCase):
def setUp(self):
self.api = BingMapsAPI(api_key=api_key)
def testBasicNav(self):
# start - 717 Market St
# end - Ferry Plaza, San Francisco, CA
# we shrunk the precision to match return values for easier comparison
start_lat = "37.786861"
start_lon = "-122.403689"
end_lat = "37.795556"
end_lon = "-122.392124"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start, end])
# verify start and end points are reflected in response
self.assertNotEqual(ret, {})
estimated_total = ret['resourceSets'][0]['estimatedTotal']
self.assertEqual(estimated_total, 1)
routeLegs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertEqual(len(routeLegs), 1)
itinerary_items = routeLegs[0]['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Driving')
def testPedestrianNav(self):
start_lat = "37.7868609332517"
start_lon = "-122.403689949149"
end_lat = "37.795556930015"
end_lon = "-122.392124051039"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start,end], travelMode='Walking')
self.assertNotEqual(ret, {})
legs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertNotEqual(legs, [])
legs = legs[0]
itinerary_items = legs['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Walking')
if __name__ == '__main__':
unittest.main() | 29.82716 | 78 | 0.598096 | 272 | 2,416 | 5.154412 | 0.400735 | 0.059914 | 0.034237 | 0.025678 | 0.349501 | 0.349501 | 0.349501 | 0.293866 | 0.293866 | 0.293866 | 0 | 0.061921 | 0.284768 | 2,416 | 81 | 79 | 29.82716 | 0.749421 | 0.146109 | 0 | 0.204082 | 0 | 0 | 0.133528 | 0 | 0 | 0 | 0 | 0.012346 | 0.183673 | 1 | 0.102041 | false | 0 | 0.102041 | 0.020408 | 0.265306 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40adb16a80ad4faf260352c08db6efc0124c7ac3 | 450 | py | Python | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awardapp', '0003_auto_20191024_1606'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link',
field=models.TextField(max_length=130),
),
]
| 21.428571 | 51 | 0.617778 | 49 | 450 | 5.469388 | 0.836735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106061 | 0.266667 | 450 | 20 | 52 | 22.5 | 0.706061 | 0.146667 | 0 | 0 | 1 | 0 | 0.110236 | 0.060367 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40b182cffd8ba6689e9b3d11caa57c733d863c65 | 2,646 | py | Python | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | null | null | null | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | 2 | 2021-04-10T18:12:44.000Z | 2022-02-09T23:36:43.000Z | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | null | null | null | from global_utils import *
# target word
TARGET_WORD = 'right'
def display_lowpass_normal(wav, lowpass_signal, fs, label=''):
fig, (axs_raw, axs_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
# display the plot
axs_raw.plot(wav)
# label the axes
axs_raw.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_raw.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_raw.set_title("Audio sample : {}".format(label), fontsize=FONT_SIZE)
axs_low.plot(lowpass_signal)
# label the axes
axs_low.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_low.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_low.set_title("Audio sample with low pass filter", fontsize=FONT_SIZE)
f_raw, periodogram_raw = signal.periodogram(wav, fs)
f_raw, periodogram_low = signal.periodogram(lowpass_signal, fs)
fig, (axs_periodogram_raw, axs_periodogram_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
axs_periodogram_raw.semilogy(f_raw, periodogram_raw)
axs_periodogram_raw.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_title("Periodogram raw signal", fontsize=FONT_SIZE)
axs_periodogram_low.semilogy(f_raw, periodogram_low)
axs_periodogram_low.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_title("Periodogram low pass filtered signal", fontsize=FONT_SIZE)
def main(args):
if args.wavfile:
fs, wav = wavfile.read(args.wavfile, "wb")
lowpass_signal = low_pass_filter(wav, sample_rate=fs, cutoff_frequency=1000)
display_lowpass_normal(wav, lowpass_signal, fs)
plt.show()
elif args.indir:
data_dict = get_data(args.indir)
word_samples = data_dict[TARGET_WORD]
mean_lowpass_array, normal_array = mean_low_pass_filter(word_samples, SAMPLE_RATE, CUTOFF_FREQ)
display_lowpass_normal(normal_array, mean_lowpass_array, SAMPLE_RATE, TARGET_WORD)
plt.show()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wavfile',
help='Path to the .wav files',
required=False
)
parser.add_argument(
'--indir',
help='Absolute path to data directory containing .wav files',
required=False
)
args = parser.parse_args()
main(args)
| 29.730337 | 103 | 0.708239 | 368 | 2,646 | 4.769022 | 0.252717 | 0.082051 | 0.109402 | 0.08661 | 0.378348 | 0.364103 | 0.345299 | 0.259829 | 0.249573 | 0.192593 | 0 | 0.006032 | 0.185563 | 2,646 | 88 | 104 | 30.068182 | 0.808353 | 0.032502 | 0 | 0.214286 | 0 | 0 | 0.115159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.160714 | 0.017857 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
40bab880835594679397baae0088587d6d0269a6 | 2,904 | py | Python | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | 1 | 2019-12-18T17:28:11.000Z | 2019-12-18T17:28:11.000Z | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | null | null | null | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | null | null | null | # Importing required packages:
import pandas as pd
from tkinter import *
from tkinter.ttk import *
root = Tk()
# To visualize input DataFrame:
def generate_plot(gui_root, df, x_axis, y_axis=None,
plot={'type':None, 'hue':None},
aesthetics={'style':'whitegrid', 'palette':'hsv',
'size':(10,7), 'dpi':100}):
"""
DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame.
"""
# Importing external dependencies:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style=aesthetics['style'], palette=aesthetics['palette'])
import warnings
warnings.filterwarnings('ignore')
# Defining Tableau colors:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199,
199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling over RGB values to [0,1] range (Matplotlib acceptable format):
for i in range(len(tableau_20)):
r,g,b = tableau_20[i]
tableau_20[i] = (r/255., g/255., b/255.)
# Setting up Tkinter Frame:
lf = Labelframe(gui_root)
lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3)
# Setting up Canvas backed by Matplotlib:
fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi'])
ax = fig.add_subplot(111)
# Drawing various plots with Seaborn:
if plot['type']=='lineplot': # Lineplot
g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax)
elif plot['type']=='regplot': # Regplot
g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax)
elif plot['type']=='distplot': # Distplot
g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7],
hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax)
elif plot['type']=='barplot': # Grouped Barplot
g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df,
kind="bar", palette='rocket', ax=ax)
g.despine(left=True)
else:
# More to be added later
pass
# Displaying plot on Canvas:
canvas = FigureCanvasTkAgg(fig, master=lf)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
generate_plot()
root.mainloop()
| 38.72 | 107 | 0.587466 | 384 | 2,904 | 4.377604 | 0.515625 | 0.032124 | 0.014277 | 0.012493 | 0.074955 | 0.028554 | 0.021416 | 0.021416 | 0 | 0 | 0 | 0.100047 | 0.266873 | 2,904 | 74 | 108 | 39.243243 | 0.689526 | 0.219353 | 0 | 0 | 1 | 0 | 0.05855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0.022222 | 0.177778 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40bc7b5a674a2b504a89d6769ec57fdcc5fda4af | 357 | py | Python | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0fbc21c514a8df5bfffb8db926e451232c6c08bf | [
"MIT"
] | null | null | null | def fibonacci_iterative(n):
previous = 0
current = 1
for i in range(n - 1):
current_old = current
current = previous + current
previous = current_old
return current
def fibonacci_recursive(n):
if n == 0 or n == 1:
return n
else:
return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
| 23.8 | 70 | 0.602241 | 47 | 357 | 4.446809 | 0.404255 | 0.028708 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028689 | 0.316527 | 357 | 14 | 71 | 25.5 | 0.827869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40c0c0515519976b7d3396916ff20c4b1d6edd0a | 126 | py | Python | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | __all__ = [
'session',
'event',
'profile',
'consent',
'segment',
'source',
'rule',
'entity'
]
| 11.454545 | 14 | 0.444444 | 9 | 126 | 5.777778 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.349206 | 126 | 10 | 15 | 12.6 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0.388889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40c712bda8811c80835db84231a9e91605ae40b6 | 675 | py | Python | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | # polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
class Command(BaseCommand):
help = 'Creates the initial admin user'
def handle(self, *args, **options):
if User.objects.filter(username="admin").exists():
print("admin exists")
else:
u = User(username='admin')
u.set_password('website-portfolio-project')
u.is_superuser = True
u.is_staff = True
u.save()
print("admin created")
sys.exit()
| 28.125 | 65 | 0.642963 | 81 | 675 | 5.296296 | 0.641975 | 0.06993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.253333 | 675 | 23 | 66 | 29.347826 | 0.85119 | 0.068148 | 0 | 0 | 0 | 0 | 0.143541 | 0.039872 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.055556 | 0.277778 | 0 | 0.444444 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
40c7452a82c23c82f183d4188dfd8d42aa979d41 | 1,597 | py | Python | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | import unirest
import json
import requests
import os
import subprocess
import time
import argparse
rootUrl = "https://api.unsplash.com/"
unirest.default_header("Accept", "application/json")
unirest.default_header("Accept-Version", "v1")
unirest.default_header("Authorization","<CLIENT-ID>")
def downloadPic(randomPic_response):
content = randomPic_response.body
print 'getting an amazing photo from Unsplash by %s ' % content["user"]["username"]
picData = requests.get(randomPic_response.body["urls"]["regular"]).content#, callback=applyWallpaper)#.body["urls"]["regular"]
applyWallpaper(picData)
def applyWallpaper(picStream):
path = os.path.expanduser('~')+'/.tempWallpaper.jpg'
with open(path, 'wb') as handler:
print "saving"
handler.write(picStream)
print "enjoy your new wallpaper."
if os.environ.get('DESKTOP_SESSION') == "xubuntu":
os.system('xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image && xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image -s %s' %path)
else:
os.system('gsettings set org.gnome.desktop.background picture-uri file:///%s' % path)
while True:
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)')
args = parser.parse_args()
print "waiting for %s seconds" % args.integers
time.sleep(args.integers)
downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
| 40.948718 | 204 | 0.707577 | 194 | 1,597 | 5.778351 | 0.572165 | 0.037467 | 0.053524 | 0.046387 | 0.119536 | 0.119536 | 0.119536 | 0.119536 | 0.119536 | 0.119536 | 0 | 0.006608 | 0.147151 | 1,597 | 38 | 205 | 42.026316 | 0.816446 | 0.038197 | 0 | 0 | 0 | 0.030303 | 0.375082 | 0.080887 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.212121 | null | null | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40c934e19e6344f536502d3f0e951d55cb483721 | 5,641 | py | Python | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | 1 | 2021-12-10T14:58:11.000Z | 2021-12-10T14:58:11.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | 5 | 2020-08-12T15:20:31.000Z | 2021-06-10T13:43:02.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | null | null | null | from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
class Stop(CachingMixin, models.Model):
stop_id = models.PositiveIntegerField(primary_key=True)
agency_description = models.CharField(max_length=100)
agency = models.ForeignKey("Agency", null=True, related_name="stops", on_delete=models.CASCADE)
date = models.DateTimeField(db_index=True)
purpose = models.PositiveSmallIntegerField(choices=PURPOSE_CHOICES)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
driver_arrest = models.BooleanField(default=False)
passenger_arrest = models.BooleanField(default=False)
encounter_force = models.BooleanField(default=False)
engage_force = models.BooleanField(default=False)
officer_injury = models.BooleanField(default=False)
driver_injury = models.BooleanField(default=False)
passenger_injury = models.BooleanField(default=False)
officer_id = models.CharField(max_length=15) # todo: keys
stop_location = models.CharField(max_length=15) # todo: keys
stop_city = models.CharField(max_length=20)
objects = CachingManager()
class Person(CachingMixin, models.Model):
person_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=PERSON_TYPE_CHOICES)
age = models.PositiveSmallIntegerField()
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
ethnicity = models.CharField(max_length=2, choices=ETHNICITY_CHOICES)
race = models.CharField(max_length=2, choices=RACE_CHOICES)
objects = CachingManager()
class Search(CachingMixin, models.Model):
search_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.PositiveSmallIntegerField(choices=SEARCH_TYPE_CHOICES)
vehicle_search = models.BooleanField(default=False)
driver_search = models.BooleanField(default=False)
passenger_search = models.BooleanField(default=False)
property_search = models.BooleanField(default=False)
vehicle_siezed = models.BooleanField(default=False)
personal_property_siezed = models.BooleanField(default=False)
other_property_sized = models.BooleanField(default=False)
objects = CachingManager()
class Contraband(CachingMixin, models.Model):
contraband_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
ounces = models.FloatField(default=0, null=True)
pounds = models.FloatField(default=0, null=True)
pints = models.FloatField(default=0, null=True)
gallons = models.FloatField(default=0, null=True)
dosages = models.FloatField(default=0, null=True)
grams = models.FloatField(default=0, null=True)
kilos = models.FloatField(default=0, null=True)
money = models.FloatField(default=0, null=True)
weapons = models.FloatField(default=0, null=True)
dollar_amount = models.FloatField(default=0, null=True)
objects = CachingManager()
class SearchBasis(CachingMixin, models.Model):
search_basis_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
basis = models.CharField(max_length=4, choices=SEARCH_BASIS_CHOICES)
objects = CachingManager()
class Agency(CachingMixin, models.Model):
name = models.CharField(max_length=255)
# link to CensusProfile (no cross-database foreign key)
census_profile_id = models.CharField(max_length=16, blank=True, default="")
last_reported_stop = models.DateField(null=True)
objects = CachingManager()
class Meta(object):
verbose_name_plural = "Agencies"
def __str__(self):
return self.name
@property
def census_profile(self):
if self.census_profile_id:
profile = CensusProfile.objects.get(id=self.census_profile_id)
return profile.get_census_dict()
else:
return dict()
| 34.607362 | 99 | 0.710867 | 651 | 5,641 | 6.015361 | 0.294931 | 0.064351 | 0.089377 | 0.107252 | 0.435649 | 0.280644 | 0.162666 | 0.162666 | 0.143258 | 0.143258 | 0 | 0.010586 | 0.162737 | 5,641 | 162 | 100 | 34.820988 | 0.818548 | 0.013296 | 0 | 0.119048 | 0 | 0 | 0.113269 | 0 | 0 | 0 | 0 | 0.006173 | 0 | 1 | 0.015873 | false | 0.031746 | 0.02381 | 0.007937 | 0.611111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
40d5ed5ea76d8603996be2780920650b434417e6 | 9,213 | py | Python | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | import os
import platform
import numpy
class Matrix:
def __init__(self):
if platform.system() == "Windows":
self.delimiterForPath = "\\"
else:
self.delimiterForPath = "/"
self.labelsDType = numpy.int32
self.imagesDType = numpy.float32
def deleteRows(self, array, rows, axis):
return numpy.delete(array, rows, axis)
def swapAxes(self, array, axe1, axe2):
return numpy.swapaxes(array, axe1, axe2)
def getImageCategoryFromPath(self, imagePath):
# path in format : ..\\Category\\ImageName
return numpy.array(imagePath.split(self.delimiterForPath, len(imagePath))[
len(imagePath.split(self.delimiterForPath, len(imagePath))) - 2], dtype=self.labelsDType)
def getNumberOfClasses(self, array):
return len(numpy.unique(array))
def getImagesInDirectory(self, folderPath, extensions=('.jpg', '.jpeg', '.png', '.bmp', '.gif')):
imagesList = []
assert os.path.isdir(folderPath), 'No folder with that name exists : %r ' % folderPath
# for all images in folder path
for root, dirs, files in os.walk(folderPath):
for name in files:
if name.endswith(extensions):
imagesList.append(root + self.delimiterForPath + name)
return imagesList
def addDimension(self, array, axis):
return numpy.expand_dims(a=array, axis=axis)
def ExtractImages(self, folderPath, image_size=(256, 256), convertion=None, imageChannels=3,
preprocessImages=False ,normalize=True ,normalizeRange=(0,1) ):
from Images.ImageProcessing import ImageProcessing
imageList = self.getImagesInDirectory(folderPath=folderPath)
assert len(imageList) > 0, 'No images in folder : %r' % folderPath
if convertion != "Grayscale" and imageChannels != 3:
if convertion == None:
convertion = "RGB"
raise ValueError(' %r supports only 3 image channels!' % convertion)
images_list = []
labels_list = []
# for all images in folder path
for imagePath in imageList:
# get category of image and add category to array
labels_list.append(
self.getImageCategoryFromPath(imagePath=imagePath))
# get image array and add image to array
images_list.append(
ImageProcessing().getImageArray(imagePath=imagePath, imageSize=image_size, convertion=convertion,
imageChannels=imageChannels,preprocessImages=preprocessImages,
Normalize=normalize,NormalizeRange=normalizeRange))
# convert lists to numpy array
allLabelsArray = numpy.array(labels_list).reshape(len(labels_list))
allImagesArray = numpy.array(images_list).reshape(len(imageList), imageChannels, image_size[0], image_size[1])
return [allImagesArray, allLabelsArray]
# returns batches from data with size batchSize
def chunker(self,data, batchSize):
return (data[pos:pos + batchSize] for pos in xrange(0, len(data), batchSize))
def shuffleMatrix(self,array):
numpy.random.shuffle(array)
def shuffleMatrixAlongWithLabels(self, array1, array2):
# shuffle array1 (images) with corresponding labels array2
from random import shuffle
array1_shuf = []
array2_shuf = []
index_shuf = range(len(array1))
shuffle(index_shuf)
for i in index_shuf:
array1_shuf.append(array1[i])
array2_shuf.append(array2[i])
return [numpy.array(array1_shuf, dtype=self.imagesDType).astype('float32'), numpy.array(array2_shuf, dtype=self.labelsDType).astype('float32')]
def TakeExamplesFromEachCategory(self,features,labels,maxImagesPerCategory=10):
import gc
import os
validationArray = []
validation_labels=[]
# for 0 to number of output classes
for index in range(0,self.getNumberOfClasses(labels)):
print ('mpika 1')
# find indexes of category index
indexes = numpy.where(labels == index)
# if train has 1 instance don't take it for validation
if len(indexes[0]) in [ 0 , 1 ]:
continue
# if instances are less than max categories given
if len(indexes[0]) <= maxImagesPerCategory:
# take half for validation
maxImagesPerCategory= len(indexes[0])/2
print ('mpika 2')
assert len(indexes[0]) >= maxImagesPerCategory ,\
"Error : Validation examples per category more than train instances. Category: {0}" \
" validation pes category : {1} , training examples : {2} ".format(index,maxImagesPerCategory,len(indexes[0]),)
count = 0
# for indexes in category
for catIndex in indexes[0]:
print ('mpika 3')
count +=1
if count > maxImagesPerCategory:
print ('mpika 3.1')
break
print ('mpika 3.2')
validationArray.append(features[catIndex])
print ('mpika 3.3')
validation_labels.append(labels[catIndex ])
print ('mpika 3.4 catIndex' , catIndex)
features = numpy.delete(features,catIndex,axis=0)
print ('mpika 3.5')
labels = numpy.delete(labels,catIndex,axis=0)
print ('mpika 3.6')
gc.collect()
print ('mpika 4')
return [features, numpy.array(validationArray,dtype=self.imagesDType).astype('float32'), labels,
numpy.array(validation_labels,dtype=self.labelsDType).astype('int32')]
def takeLastExamples(self,trainArray, train_labels, validationPercentage=.2):
# take validationPercentage of training data for validation
validationExamples = int(validationPercentage * len(trainArray))
# We reserve the last validationExamples training examples for validation.
trainArray, validationArray = trainArray[:-validationExamples], trainArray[-validationExamples:]
train_labels, validation_labels = train_labels[:-validationExamples], train_labels[-validationExamples:]
return [trainArray, validationArray, train_labels, validation_labels]
def SplitTrainValidation(self, trainArray, train_labels, validationPercentage=.2,takeLastExamples=False,maxImagesPerCategory=10):
if takeLastExamples:
return self.takeLastExamples(trainArray, train_labels, validationPercentage)
else:
return self.TakeExamplesFromEachCategory(trainArray, train_labels,maxImagesPerCategory)
def moveFile(self, src, dest):
import shutil
shutil.move(src, dest)
if __name__ == '__main__':
trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet'
testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet'
#
# trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy'
# testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy'
#
# # trainFolder = '/home/leonidas/Desktop/images/train'
# # testFolder = '/home/leonidas/Desktop/images/test'
#
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \
# load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1)
#
# print trainArray.shape
# print trainArray
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
#
# trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt'
# testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt'
# trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt'
# testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt'
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels,
# outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath,
# testLabels=testLabelPath);
i=0;
for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15):
print (trainArray.shape)
print (train_labels.shape)
i+=len(trainArray)
print "aaasdasdas d : ",i
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
| 43.457547 | 151 | 0.64485 | 945 | 9,213 | 6.201058 | 0.240212 | 0.030034 | 0.009556 | 0.021843 | 0.235666 | 0.221331 | 0.181741 | 0.147782 | 0.131741 | 0.131741 | 0 | 0.02185 | 0.254857 | 9,213 | 211 | 152 | 43.663507 | 0.831755 | 0.242592 | 0 | 0.03252 | 0 | 0 | 0.07364 | 0.013166 | 0 | 0 | 0 | 0 | 0.02439 | 0 | null | null | 0 | 0.065041 | null | null | 0.105691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40d7ebe962811bafc69c16d6ae16e6cb4f35d53d | 3,955 | py | Python | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | '''
Homework assignment for the 'Python is easy' course by Pirple.
Written be Ed Yablonsky.
Snowman(Hangman) game.
'''
from os import (
name as os_name,
system as system_call,
)
from os.path import (
abspath,
dirname,
join as join_path,
)
'''
Screen displays game output
'''
class Screen:
def clear(self):
if os_name == 'nt':
system_call('cls')
else:
system_call('clear')
def draw(self, frame):
for line in frame:
print(line)
'''
Input represents game input device
'''
class Input:
def ask(self, message):
answer = ''
while answer == '':
answer = input(message)
return answer
'''
Art is a game art which is set of frames that get loaded from a text file.
Draws its current frame on a screen.
'''
class Art:
def __init__(self):
self.frames = []
self.current_frame = 0
def load(self, name):
frames = []
art_path = join_path(dirname(abspath(__file__)), join_path('arts', name))
with open(art_path, 'r') as art_file:
frame_height = int(art_file.readline())
frame = []
line_count = 0
for line in art_file:
frame.append(line.strip('\n\r'))
line_count += 1
if line_count % frame_height == 0:
frames.append(frame)
frame = []
self.frames = frames
self.current_frame = 0
def draw(self, screen):
screen.draw(self.frames[self.current_frame])
def frames_number(self):
return len(self.frames)
def next_frame(self):
self.current_frame = (self.current_frame + 1) % self.frames_number()
return self.current_frame
'''
Riddle holds secret word and gets solved by guesses
'''
class Riddle:
def __init__(self, key):
self.key = key
self.clue = ['_'] * len(key)
def length(self):
return len(self.key)
def range(self):
return range(0, self.length())
def guess(self, g):
guess_count = 0
for i in self.range():
if g == self.key[i]:
guess_count += 1
self.clue[i] = g
return guess_count
def solved(self):
for i in self.range():
if self.clue[i] != self.key[i]:
return False
return True
def unsolved(self):
return self.solved() == False
def draw(self, screen):
screen.draw([' '.join(self.clue)])
'''
Game is a game itself
'''
class Game:
def __init__(self):
self.screen = Screen()
self.input = Input()
self.art = Art()
self.riddle = Riddle('riddle')
def play(self):
self.start()
self.propose_riddle()
while self.in_progress():
self.play_round()
self.display_result()
def start(self):
self.art.load('snowman')
self.game_over = False
def propose_riddle(self):
self.riddle = Riddle(self.input.ask('Player 1 pick a word: ').lower())
def in_progress(self):
return self.riddle.unsolved() and self.game_over == False
def draw_frame(self):
self.screen.clear()
self.art.draw(self.screen)
self.riddle.draw(self.screen)
def play_round(self):
self.draw_frame()
clue = input('Player 2 guess a letter: ').lower()
if len(clue) > 0:
if clue[0] == '.':
self.stop()
elif self.riddle.guess(clue[0]) == 0:
self.art.next_frame()
if self.art.current_frame == self.art.frames_number() - 1:
self.stop()
def stop(self):
self.game_over = True
def display_result(self):
self.draw_frame()
if self.game_over:
self.screen.draw(['Player 2 lost'])
else:
self.screen.draw(['Player 2 wins'])
Game().play()
| 23.682635 | 81 | 0.551707 | 501 | 3,955 | 4.225549 | 0.241517 | 0.037789 | 0.045347 | 0.031176 | 0.117619 | 0.064714 | 0 | 0 | 0 | 0 | 0 | 0.006767 | 0.327434 | 3,955 | 166 | 82 | 23.825301 | 0.789098 | 0.028319 | 0 | 0.137931 | 0 | 0 | 0.030414 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.206897 | false | 0 | 0.017241 | 0.043103 | 0.353448 | 0.008621 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40dc4792e5546b69652c162537bffd53c76ae2d8 | 3,949 | py | Python | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | 9 | 2015-11-19T06:03:08.000Z | 2021-02-16T19:14:42.000Z | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | null | null | null | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | 1 | 2018-10-09T23:12:34.000Z | 2018-10-09T23:12:34.000Z | #!/usr/bin/python
import argparse
import re
parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text')
parser.add_argument('files', nargs='*',
help='Files to process')
args = parser.parse_args()
for file in args.files:
outfile = open(file + ".joined-pagebreak", "w")
text = ''.join(open(file).readlines())
pages = re.split("PAGEBREAK\n", text)
# Remove empty pages
pages = [x for x in pages if x]
for i in xrange(0, len(pages) - 1):
# Remove extraneous blank lines
pages[i] = re.sub("\n\n\n+", "\n\n", pages[i])
# Undo HTML entities
pages[i] = re.sub("&", "&", pages[i])
pages[i] = re.sub("<", "<", pages[i])
pages[i] = re.sub(">", ">", pages[i])
# Do the following a second time to handle cases of
# &amp;, which are common
pages[i] = re.sub("&", "&", pages[i])
m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S)
if m:
pages[i] = m.group(2)
print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1))
m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S)
if m:
pages[i] = m.group(1) + m.group(3)
print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2))
while pages[i] and pages[i][-1] == "\n":
pages[i] = pages[i][0:-1]
if "\n" not in pages[i]:
lastlinelen = len(pages[i])
else:
m = re.match(".*\n([^\n]*)$", pages[i], re.S)
assert m
lastlinelen = len(m.group(1))
shortline = lastlinelen < 60
join = False
hyphenjoin = False
if not pages[i]:
continue
if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED"
else:
msg = "PAGEBREAK HYPHEN-JOINED"
hyphenjoin = True
join = True
elif pages[i + 1] and pages[i + 1][0].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED"
else:
msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED"
join = True
elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']:
if shortline:
msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED"
else:
msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED"
join = True
elif pages[i][-1] == '.':
msg = "PAGEBREAK ENDS PERIOD, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED"
elif pages[i][-1] == ':':
msg = "PAGEBREAK ENDS COLON, NOT JOINED"
elif pages[i][-1] == ',':
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED"
else:
msg = "PAGEBREAK ENDS COMMA, JOINED"
join = True
else:
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED"
else:
msg = "PAGEBREAK ENDS OTHER, JOINED"
join = True
print "Page %s: %s" % (i, msg)
if hyphenjoin:
outfile.write(pages[i][0:-1])
elif join:
outfile.write(pages[i] + " ")
else:
outfile.write(pages[i])
outfile.write("\n\n")
outfile.write("\n%s\n" % msg)
outfile.close()
| 36.564815 | 171 | 0.52469 | 584 | 3,949 | 3.542808 | 0.207192 | 0.136298 | 0.047849 | 0.0232 | 0.464476 | 0.42581 | 0.318028 | 0.200097 | 0.19043 | 0.182214 | 0 | 0.013527 | 0.251203 | 3,949 | 107 | 172 | 36.906542 | 0.686168 | 0.041023 | 0 | 0.273684 | 0 | 0.021053 | 0.306085 | 0.035979 | 0 | 0 | 0 | 0 | 0.010526 | 0 | null | null | 0 | 0.021053 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40e031fd64128f14855fedd41208af0c66f89410 | 886 | py | Python | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | null | null | null | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | 16 | 2017-08-06T09:49:01.000Z | 2021-09-01T08:40:58.000Z | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | null | null | null | # from django.conf.urls import patterns, url, include
# from django.views.generic import TemplateView
# from . import views, APP_NAME
#
# urlpatterns = patterns('',
# url(r'^$', views.index, name='%s.index' % APP_NAME),
# )
from django.urls import path, re_path, include
from . import views, APP_NAME
from .api import LayerResource
from tastypie.api import Api
Resources_api = Api(api_name="api")
Resources_api.register(LayerResource())
urlpatterns = [
re_path(r'^$', views.index, name='%s.index' % APP_NAME),
path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME),
path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME),
re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME),
re_path(r'^', include(Resources_api.urls)),
]
| 34.076923 | 110 | 0.705418 | 128 | 886 | 4.703125 | 0.273438 | 0.081395 | 0.034884 | 0.059801 | 0.212625 | 0.093023 | 0.093023 | 0.093023 | 0 | 0 | 0 | 0 | 0.123025 | 886 | 25 | 111 | 35.44 | 0.774775 | 0.240406 | 0 | 0 | 0 | 0 | 0.233083 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.307692 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
40e13f8b874a94920da4e07d42899e93081c3e2f | 4,284 | py | Python | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1 | 2019-05-28T13:04:32.000Z | 2019-05-28T13:04:32.000Z | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by Bart Kiers
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
def replace_suppress_warnings(line):
return PTRN_SUPPRESS_WARNINGS.sub('@SuppressWarnings("all")', line)
def replace_rulectx(line):
return line.replace("(RuleContext)_localctx", "_localctx")
def replace_localctx(line):
return re.sub(r'\(\((([a-zA-Z]*?_?)*[a-zA-Z]*)\)_localctx\)', '_localctx', line)
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
def postprocess(file):
lines = []
for line in file:
for transform in TRANSFORMS:
line = transform(line)
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| 37.911504 | 88 | 0.722222 | 616 | 4,284 | 4.978896 | 0.345779 | 0.068145 | 0.022824 | 0.009782 | 0.374959 | 0.358005 | 0.339746 | 0.339746 | 0.30388 | 0.278448 | 0 | 0.007011 | 0.20098 | 4,284 | 112 | 89 | 38.25 | 0.888986 | 0.448413 | 0 | 0 | 0 | 0 | 0.607143 | 0.038296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.033333 | 0.05 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40e6bbe29a59bd4a98298179d233b2bfddb4c1e0 | 971 | py | Python | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 10 | 2017-11-25T01:47:20.000Z | 2020-03-24T18:28:24.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 319 | 2017-11-16T09:56:03.000Z | 2022-03-28T00:24:37.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 6 | 2017-11-12T14:04:08.000Z | 2021-03-10T09:41:18.000Z | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView
from .models import Committee
class CommitteeList(ListView):
model = Committee
template_name = 'groups/committee_list.html'
context_object_name = 'committees'
class CommitteeDetailView(DetailView):
model = Committee
template_name = 'groups/committee_detail.html'
context_object_name = 'committee'
class EditCommitteeView(PermissionRequiredMixin, UpdateView):
permission_required = ('groups.change_committee',)
model = Committee
fields = ('clickbait', 'description', 'email', 'image')
success_url = reverse_lazy('committee_list')
class CommitteeAdminView(PermissionRequiredMixin, ListView):
permission_required = ('groups.change_committee',)
model = Committee
template_name = 'groups/committee_admin.html'
context_object_name = 'committees'
| 30.34375 | 65 | 0.77137 | 98 | 971 | 7.438776 | 0.438776 | 0.076818 | 0.090535 | 0.106996 | 0.379973 | 0.294925 | 0.145405 | 0 | 0 | 0 | 0 | 0 | 0.142122 | 971 | 31 | 66 | 31.322581 | 0.87515 | 0 | 0 | 0.363636 | 0 | 0 | 0.205973 | 0.130793 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
40ea5c5e0176d43f5d51fa89b969ce72cc0fce56 | 1,219 | py | Python | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | 3 | 2020-03-24T08:06:37.000Z | 2020-03-29T08:53:55.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | 7 | 2020-03-23T12:36:01.000Z | 2020-04-11T08:14:06.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | null | null | null | from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
class Commit(BaseObject):
__tablename__ = 'commits'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
sha = Column(String(40), unique=True, nullable=False)
message = Column(Text)
parent_a = Column(String(40))
parent_b = Column(String(40))
production_reported = Column(Integer)
class Issue(BaseObject):
__tablename__ = 'issues'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
number = Column(Integer, unique=True, nullable=False)
state = Column(String(10))
title = Column(Text)
body = Column(Text)
labels = Column(String(128))
assignee = Column(String(128))
| 31.25641 | 108 | 0.721903 | 139 | 1,219 | 6.158273 | 0.366906 | 0.130841 | 0.074766 | 0.107477 | 0.446262 | 0.446262 | 0.446262 | 0.446262 | 0.446262 | 0.446262 | 0 | 0.013807 | 0.168171 | 1,219 | 38 | 109 | 32.078947 | 0.830375 | 0 | 0 | 0.193548 | 0 | 0 | 0.010664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.774194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
40eb080a05a597358c0a6ee395b1cbd8baf803e7 | 7,211 | py | Python | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | import os
import random
import string
import base64
from django.utils import timezone
from django.contrib.auth.hashers import make_password, check_password
from django.test import TestCase
from parameterized import parameterized
from core.models import Module, EntryPoint, ExternalAuthorizationSession, User
AUTHORIZATION_MODULE_LIST = ["ihna", "google", "mailru"]
class TestApplicationProcess(TestCase):
PASSWORD_LENGTH = 25
auth_sessions = None
uuid_list = None
@classmethod
def setUpTestData(cls):
cls.auth_sessions = {}
cls.session_keys = {}
user = User(login="sergei.kozhukhov")
user.save()
for module in AUTHORIZATION_MODULE_LIST:
password = cls.generate_random_password()
password_hash = make_password(password)
module_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=module)
session = ExternalAuthorizationSession(
authorization_module=module_app,
session_key=password_hash,
session_key_expiry_date=timezone.now()
)
session.save()
session_key = base64.encodebytes((str(session.id) + ":" + password).encode("utf-8")).decode("utf-8")
cls.auth_sessions[module] = session_key
Account = cls.get_account_class(module)
Account(user=user, email="no-reply@ihna.ru").save()
cls.uuid_list = {}
for apps_used in ['imaging', 'roi']:
cls.uuid_list[apps_used] = Module.objects.get(alias=apps_used).uuid
@parameterized.expand([
(["core", "authorizations"], [
("standard", None),
("ihna", "<div class='auth ihna'></div>"),
("google", "<div class='auth google'></div>"),
("mailru", "<div class='auth mailru'></div>"),
("unix", None),
("cookie", None),
("password_recovery", None),
("auto", None),
]),
(["core", "synchronizations"], [
("ihna_employees", None),
]),
(["core", "projects"], [
("imaging", None),
]),
(["core", "projects", "imaging", "processors"], [
("roi", None),
]),
])
def test_widgets_show(self, route, expected_widget_list):
app = None
entry_point = None
current_route = list(route)
current_look = "app"
while len(current_route) > 0:
route_element = current_route.pop(0)
if current_look == "app":
app = Module.objects.get(alias=route_element, parent_entry_point=entry_point)
current_look = "ep"
elif current_look == "ep":
entry_point = EntryPoint.objects.get(alias=route_element, belonging_module=app)
current_look = "app"
self.assertEquals(current_look, "app")
values = Module.objects.filter(parent_entry_point=entry_point).values("alias", "html_code")
self.assertEquals(len(values), len(expected_widget_list),
"Number of modules attached to this entry point is not the same as expected")
for value in values:
alias = value['alias']
html_code = value['html_code']
expected_widget_found = False
for expected_alias, expected_widget in expected_widget_list:
if expected_alias == alias:
expected_widget_found = True
if html_code is not None and expected_widget is None:
self.fail("HTML code for module '%s' does not exist but expected" % alias)
if html_code is None and expected_widget is not None:
self.fail("HTML code for module '%s' exists but not expected" % alias)
if html_code is not None and expected_widget is not None:
self.assertHTMLEqual(html_code, expected_widget,
"HTML code for module '%s' is not the same as expected" % html_code)
break
self.assertTrue(expected_widget_found, "the module '%s' is not within the list of expected modules" %
alias)
@parameterized.expand([
("standard", "core.authorizations.StandardAuthorization"),
("ihna", "authorizations.ihna.App"),
("google", "authorizations.google.App"),
("mailru", "authorizations.mailru.App"),
("unix", "core.authorizations.UnixAuthorization"),
("cookie", "authorizations.cookie.App"),
("password_recovery", "core.authorizations.PasswordRecoveryAuthorization"),
("auto", "core.authorizations.AutomaticAuthorization"),
])
def test_authorization_modules(self, alias, expected_authorization_module):
authorization_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=alias)
authorization_module = authorization_app.app_class
self.assertEquals(authorization_module, expected_authorization_module)
def test_authorization_sessions(self):
for module, session_key in self.auth_sessions.items():
session_info = base64.decodebytes(session_key.encode("utf-8")).decode("utf-8")
session_id, session_password = session_info.split(":", 1)
session = ExternalAuthorizationSession.objects.get(authorization_module__alias=module, id=session_id)
stored_password_hash = session.session_key
self.assertTrue(check_password(session_password, stored_password_hash))
module_class = session.authorization_module.app_class
session.delete()
self.assertTrue(module_class.split('.')[1], module)
def test_find_user(self):
for module in AUTHORIZATION_MODULE_LIST:
account_class = self.get_account_class(module)
account = account_class.objects.get(email="no-reply@ihna.ru")
self.assertEquals(account.user.login, "sergei.kozhukhov")
def test_account_contigency(self):
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 1)
User.objects.get(login="sergei.kozhukhov").delete()
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 0)
def test_access_by_uuid(self):
for module_name, uuid in self.uuid_list.items():
module_class = Module.objects.get(uuid=uuid).app_class
actual_module_name, module_class = module_class.split('.')
self.assertEquals(actual_module_name, module_name)
self.assertEquals(module_class, "App")
@classmethod
def generate_random_password(cls):
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
return ''.join(random.choice(chars) for i in range(cls.PASSWORD_LENGTH))
@classmethod
def get_account_class(cls, module):
import authorizations
auth_module = getattr(authorizations, module)
return auth_module.models.Account
| 43.969512 | 113 | 0.627791 | 779 | 7,211 | 5.593068 | 0.215661 | 0.05233 | 0.026394 | 0.022034 | 0.202662 | 0.152169 | 0.116364 | 0.106725 | 0.084462 | 0.084462 | 0 | 0.004146 | 0.26418 | 7,211 | 163 | 114 | 44.239264 | 0.817 | 0 | 0 | 0.118881 | 0 | 0 | 0.1488 | 0.037027 | 0 | 0 | 0 | 0 | 0.083916 | 1 | 0.062937 | false | 0.090909 | 0.06993 | 0 | 0.174825 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
40f1379ab73e0f4b4e9297a1caebe96d0365e7e2 | 577 | py | Python | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | # coding=utf-8
from app.route.stats.processor import *
from app.api.base.base_router import BaseRouter
from app.api.base import base_name as names
class Stats(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.face, names.post, names.socnet, names.likes, names.views, names.comments]
def get(self):
self._read_args()
print(self.data)
answer = get_stat(self.data)
return answer or {}
def put(self):
self._read_args()
answer = update_stat(self.data)
return answer or {}
| 25.086957 | 100 | 0.646447 | 79 | 577 | 4.518987 | 0.481013 | 0.058824 | 0.056022 | 0.078431 | 0.145658 | 0.145658 | 0 | 0 | 0 | 0 | 0 | 0.002283 | 0.240901 | 577 | 22 | 101 | 26.227273 | 0.812785 | 0.020797 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.1875 | 0 | 0.5625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
40f5e193e0cc75def4b2ba8e4e082e5183a4bea7 | 4,748 | py | Python | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | 3 | 2021-05-14T08:13:09.000Z | 2021-05-26T11:25:35.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | 27 | 2021-05-13T08:43:19.000Z | 2021-08-24T17:19:36.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | null | null | null | """
Copyright (C) 2021 Clariteia SL
This file is part of minos framework.
Minos framework can not be copied and/or distributed without the express permission of Clariteia SL.
"""
import unittest
from minos.api_gateway.common import (
EmptyMinosModelSequenceException,
MinosAttributeValidationException,
MinosConfigDefaultAlreadySetException,
MinosConfigException,
MinosException,
MinosMalformedAttributeException,
MinosModelAttributeException,
MinosModelException,
MinosParseAttributeException,
MinosRepositoryAggregateNotFoundException,
MinosRepositoryDeletedAggregateException,
MinosRepositoryException,
MinosRepositoryManuallySetAggregateIdException,
MinosRepositoryManuallySetAggregateVersionException,
MinosRepositoryNonProvidedException,
MinosRepositoryUnknownActionException,
MinosReqAttributeException,
MinosTypeAttributeException,
MultiTypeMinosModelSequenceException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosException, Exception))
def test_base_repr(self):
exception = MinosException("test")
self.assertEqual("MinosException(message='test')", repr(exception))
def test_base_str(self):
exception = MinosException("test")
self.assertEqual("test", str(exception))
def test_config(self):
self.assertTrue(issubclass(MinosConfigException, MinosException))
def test_config_default_already_set(self):
self.assertTrue(issubclass(MinosConfigDefaultAlreadySetException, MinosConfigException))
def test_repository_aggregate_not_found(self):
self.assertTrue(issubclass(MinosRepositoryAggregateNotFoundException, MinosRepositoryException))
def test_repository_deleted_aggregate(self):
self.assertTrue(issubclass(MinosRepositoryDeletedAggregateException, MinosRepositoryException))
def test_repository_manually_set_aggregate_id(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateIdException, MinosRepositoryException))
def test_repository_manually_set_aggregate_version(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateVersionException, MinosRepositoryException,))
def test_repository_bad_action(self):
self.assertTrue(issubclass(MinosRepositoryUnknownActionException, MinosRepositoryException))
def test_repository_non_set(self):
self.assertTrue(issubclass(MinosRepositoryNonProvidedException, MinosRepositoryException))
def test_model(self):
self.assertTrue(issubclass(MinosModelException, MinosException))
def test_model_emtpy_sequence(self):
self.assertTrue(issubclass(EmptyMinosModelSequenceException, MinosModelException))
def test_model_multi_type_sequence(self):
self.assertTrue(issubclass(MultiTypeMinosModelSequenceException, MinosModelException))
def test_model_attribute(self):
self.assertTrue(issubclass(MinosModelAttributeException, MinosException))
def test_required_attribute(self):
self.assertTrue(issubclass(MinosReqAttributeException, MinosModelAttributeException))
def test_type_attribute(self):
self.assertTrue(issubclass(MinosTypeAttributeException, MinosModelAttributeException))
def test_type_attribute_repr(self):
exception = MinosTypeAttributeException("foo", float, True)
message = (
"MinosTypeAttributeException(message=\"The <class 'float'> expected type for 'foo' "
"does not match with the given data type: <class 'bool'>\")"
)
self.assertEqual(message, repr(exception))
def test_malformed_attribute(self):
self.assertTrue(issubclass(MinosMalformedAttributeException, MinosModelAttributeException))
def test_parse_attribute(self):
self.assertTrue(issubclass(MinosParseAttributeException, MinosModelAttributeException))
def test_attribute_parse_repr(self):
exception = MinosParseAttributeException("foo", 34, ValueError())
message = (
'MinosParseAttributeException(message="ValueError() '
"was raised while parsing 'foo' field with 34 value.\")"
)
self.assertEqual(message, repr(exception))
def test_attribute_validation(self):
self.assertTrue(issubclass(MinosAttributeValidationException, MinosModelAttributeException))
def test_attribute_validation_repr(self):
exception = MinosAttributeValidationException("foo", 34)
message = "MinosAttributeValidationException(message=\"34 value does not pass the 'foo' field validation.\")"
self.assertEqual(message, repr(exception))
if __name__ == "__main__":
unittest.main()
| 39.566667 | 117 | 0.771272 | 377 | 4,748 | 9.525199 | 0.289125 | 0.044834 | 0.090226 | 0.140351 | 0.205792 | 0.082985 | 0.057366 | 0 | 0 | 0 | 0 | 0.00299 | 0.154802 | 4,748 | 119 | 118 | 39.89916 | 0.891851 | 0.036226 | 0 | 0.082353 | 0 | 0 | 0.067863 | 0.035026 | 0 | 0 | 0 | 0 | 0.270588 | 1 | 0.270588 | false | 0.011765 | 0.023529 | 0 | 0.305882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40f7a744294465f0d9fa2d8e7fd481a7d36370d7 | 977 | py | Python | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 177 | 2018-01-05T01:46:07.000Z | 2018-03-09T05:32:45.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 15 | 2018-01-05T03:28:38.000Z | 2018-01-17T03:04:06.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 55 | 2018-01-05T05:24:55.000Z | 2018-01-25T11:53:38.000Z | # coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
import webbrowser
questions = []
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd=" + question)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
| 25.710526 | 87 | 0.58956 | 118 | 977 | 4.771186 | 0.559322 | 0.056838 | 0.056838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006906 | 0.258956 | 977 | 37 | 88 | 26.405405 | 0.769337 | 0.060389 | 0 | 0.076923 | 0 | 0 | 0.195205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
40f9e62c7e463cdddcd04524566bd56b8cb59940 | 1,407 | py | Python | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | import math
import numpy as np
# return an array K of size (d_max, d_max, N, N), K[i][j] is kernel value of depth i + 1 with first j layers fixed
def kernel_value_batch(X, d_max):
K = np.zeros((d_max, d_max, X.shape[0], X.shape[0]))
for fix_dep in range(d_max):
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep][fix_dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K
# return an array K of size (N, N), depth d_max, first fix_dep layers fixed
def kernel_value(X, d_max, fix_dep):
K = np.zeros((d_max, X.shape[0], X.shape[0]))
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K[d_max - 1] | 40.2 | 115 | 0.509595 | 276 | 1,407 | 2.485507 | 0.206522 | 0.069971 | 0.040816 | 0.081633 | 0.814869 | 0.69242 | 0.634111 | 0.634111 | 0.581633 | 0.581633 | 0 | 0.027225 | 0.321251 | 1,407 | 35 | 116 | 40.2 | 0.691099 | 0.132907 | 0 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dc01dc4bc345b863361dbfcbff2946a74c676b49 | 1,261 | py | Python | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | 4 | 2021-04-22T19:19:13.000Z | 2022-02-10T09:26:58.000Z | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | null | null | null | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | 1 | 2022-02-03T19:29:46.000Z | 2022-02-03T19:29:46.000Z | import subprocess
import sys
import time
import os
#############################
# COLORING YOUR SHELL #
#############################
R = "\033[1;31m" #
B = "\033[1;34m" #
Y = "\033[1;33m" #
G = "\033[1;32m" #
RS = "\033[0m" #
W = "\033[1;37m" #
#############################
os.system("clear")
print(" ")
print(R + "[" + G + "User Summary " + R + "]" + RS)
print("""
Shows extra information about IPv6 addresses, such as embedded MAC or IPv4 addresses when available.
Some IP address formats encode extra information; for example some IPv6 addresses encode an IPv4 address or MAC address
script can decode these address formats:
• IPv4-compatible IPv6 addresses,
• IPv4-mapped IPv6 addresses,
• Teredo IPv6 addresses,
• 6to4 IPv6 addresses,
• IPv6 addresses using an EUI-64 interface ID,
• IPv4-embedded IPv6 addresses,
• ISATAP Modified EUI-64 IPv6 addresses.
• IPv4-translated IPv6 addresses and
See RFC 4291 for general IPv6 addressing architecture and the definitions of some terms.
""")
print(" ")
webb = input("" + RS + "[" + B + "ENTER TARGET " + R + "WEBSITE " + Y + "IP" + RS + "]" + G + ": " + RS)
subprocess.check_call(['nmap', '-sV', '-sC', webb])
| 32.333333 | 120 | 0.57732 | 166 | 1,261 | 4.427711 | 0.524096 | 0.176871 | 0.114286 | 0.04898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.062887 | 0.230769 | 1,261 | 38 | 121 | 33.184211 | 0.686598 | 0.015067 | 0 | 0.066667 | 0 | 0 | 0.66813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dc0d2dd1628c5437389a9030a61c8c8847b09265 | 1,331 | py | Python | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | import flingclient as fc
from flingclient.rest import ApiException
from datetime import datetime
# Per default the dockerized fling service runs on localhost:3000 In case you
# run your own instance, change the base url
configuration = fc.Configuration(host="http://localhost:3000")
# Every call, with the exception of `/api/auth`, is has to be authorized by a
# bearer token. Get a token by authenticating as admin and set it into the
# configuration. All subsequent calls will send this token in the header as
# `Authorization: Bearer <token> header`
def authenticate(admin_user, admin_password):
with fc.ApiClient(configuration) as api_client:
auth_client = fc.AuthApi(api_client)
admin_auth = fc.AdminAuth(admin_user, admin_password)
configuration.access_token = auth_client.authenticate_owner(admin_auth=admin_auth)
admin_user = input("Username: ")
admin_password = input("Password: ")
authenticate(admin_user, admin_password)
with fc.ApiClient(configuration) as api_client:
# Create a new fling
fling_client = fc.FlingApi(api_client)
fling = fc.Fling(name="A Fling from Python", auth_code="secret",
direct_download=False, allow_upload=True,
expiration_time=datetime(2099, 12, 12))
fling = fling_client.post_fling()
print(f"Created a new fling: {fling}")
#
| 40.333333 | 86 | 0.75432 | 189 | 1,331 | 5.174603 | 0.502646 | 0.03681 | 0.042945 | 0.067485 | 0.149284 | 0.149284 | 0.149284 | 0.149284 | 0.149284 | 0.149284 | 0 | 0.014324 | 0.160781 | 1,331 | 32 | 87 | 41.59375 | 0.861235 | 0.299775 | 0 | 0.105263 | 0 | 0 | 0.101842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.210526 | 0.157895 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
dc0e5e9f0de144528e9e2fd2507b7d3b024c5594 | 1,408 | py | Python | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 83 | 2019-08-20T09:34:27.000Z | 2022-03-24T13:42:36.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 15 | 2019-08-20T06:34:16.000Z | 2020-05-17T21:22:52.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 6 | 2020-02-09T09:35:17.000Z | 2022-03-19T18:43:17.000Z | import base64
import requests
class RemotePkcs1Signer(object):
""" Client-side Signer subclass, that calls the Signing Service over HTTP to sign things """
# standard headers for request
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, host, port, key, algorithm="SIGNATURE_RSA_PKCS1_SHA256", keyfile=None):
"""
:param host: host of the remote HTTP service
:param port: port of the remote HTTP service
:param key: see signing_service.py, in our case we use the hash of the related cert to identify the key
:param algorithm: which algorithm to use
:param keyfile: unused, this is a wart :(
"""
self.endpoint = "http://{}:{}/".format(host, port)
self.key = key
self.algorithm = algorithm
def sign(self, data):
plaintext_base64 = base64.b64encode(data)
plaintext_key = u'0'
payload = {
"key": self.key,
"plaintext": [{
"key": plaintext_key,
"value": plaintext_base64
}],
"algorithm": self.algorithm
}
response = requests.post(self.endpoint,
headers=self.__class__.headers,
json=payload).json()
signature = base64.b64decode(response[u'signature'][plaintext_key])
return signature
| 32.744186 | 106 | 0.599432 | 157 | 1,408 | 5.267516 | 0.477707 | 0.058041 | 0.026602 | 0.036276 | 0.065296 | 0.065296 | 0 | 0 | 0 | 0 | 0 | 0.020222 | 0.297585 | 1,408 | 42 | 107 | 33.52381 | 0.815976 | 0.019886 | 0 | 0 | 0 | 0 | 0.129032 | 0.02621 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.074074 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
dc19c0faf717f2a11500ab0d47cd0b71aa1f7557 | 4,638 | py | Python | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | 2 | 2020-06-22T13:33:28.000Z | 2020-12-30T15:09:00.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | 37 | 2020-02-18T12:15:00.000Z | 2021-12-13T20:01:14.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | null | null | null | from musicscore.dtd.dtd import Sequence, GroupReference, Choice, Element
from musicscore.musicxml.attributes.optional_unique_id import OptionalUniqueId
from musicscore.musicxml.attributes.printobject import PrintObject
from musicscore.musicxml.groups.common import Editorial
from musicscore.musicxml.elements.xml_element import XMLElement
from musicscore.musicxml.types.complextypes.arpeggiate import ComplexTypeArpeggiate
from musicscore.musicxml.types.complextypes.articulations import ComplexTypeArticulations
from musicscore.musicxml.types.complextypes.complextype import ComplexType
from musicscore.musicxml.types.complextypes.dynamics import Dynamics
from musicscore.musicxml.types.complextypes.fermata import ComplexTypeFermata
from musicscore.musicxml.types.complextypes.ornaments import ComplexTypeOrnaments
from musicscore.musicxml.types.complextypes.slide import ComplexTypeSlide
from musicscore.musicxml.types.complextypes.slur import ComplexTypeSlur
from musicscore.musicxml.types.complextypes.technical import ComplexTypeTechnical
from musicscore.musicxml.types.complextypes.tied import ComplexTypeTied
from musicscore.musicxml.types.complextypes.tuplet import ComplexTypeTuplet
class Tied(ComplexTypeTied):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Slur(ComplexTypeSlur):
_TAG = 'slur'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Tuplet(ComplexTypeTuplet):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Glissando(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='glissando', value=value, *args, **kwargs)
raise NotImplementedError()
class Slide(ComplexTypeSlide):
""""""
_TAG = 'slide'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Ornaments(ComplexTypeOrnaments):
""""""
_TAG = 'ornaments'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Technical(ComplexTypeTechnical):
""""""
_TAG = 'technical'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Articulations(ComplexTypeArticulations):
""""""
_TAG = 'articulations'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Fermata(ComplexTypeFermata):
""""""
_TAG = 'fermata'
def __init__(self, value='normal', *args, **kwargs):
super().__init__(tag=self._TAG, value=value, *args, **kwargs)
class Arpeggiate(ComplexTypeArpeggiate):
""""""
_TAG = 'arpeggiate'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class NonArpeggiate(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='non-arpeggiate', value=value, *args, **kwargs)
raise NotImplementedError()
class AccidentalMark(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='accidental-mark', value=value, *args, **kwargs)
raise NotImplementedError()
class OtherNotation(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='other-notation', value=value, *args, **kwargs)
raise NotImplementedError()
class ComplexTypeNotations(ComplexType, PrintObject, OptionalUniqueId):
"""
Notations refer to musical notations, not XML notations. Multiple notations are allowed in order to represent
multiple editorial levels. The print-object attribute, added in Version 3.0, allows notations to represent details
of performance technique, such as fingerings, without having them appear in the score.
"""
_DTD = Sequence(
GroupReference(Editorial),
Choice(
Element(Tied),
Element(Slur),
Element(Tuplet),
Element(Glissando),
Element(Slide),
Element(Ornaments),
Element(Technical),
Element(Articulations),
Element(Dynamics),
Element(Fermata),
Element(Arpeggiate),
Element(NonArpeggiate),
Element(AccidentalMark),
Element(OtherNotation),
min_occurrence=0,
max_occurrence=None
)
)
def __init__(self, *args, **kwargs):
super().__init__(tag='notations', *args, **kwargs)
| 30.715232 | 118 | 0.684994 | 461 | 4,638 | 6.605206 | 0.223427 | 0.091954 | 0.108374 | 0.087356 | 0.434483 | 0.293596 | 0.293596 | 0.219704 | 0.208867 | 0.208867 | 0 | 0.000798 | 0.189737 | 4,638 | 150 | 119 | 30.92 | 0.809473 | 0.067055 | 0 | 0.277778 | 0 | 0 | 0.029307 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.177778 | 0 | 0.577778 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
90541de92a1d97d772f070e495cb4dccfca0eef7 | 1,416 | py | Python | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def browser_init():
option = webdriver.ChromeOptions()
browser = webdriver.Chrome(executable_path='/Library/Application Support/Google/chromedriver', chrome_options=option)
return browser
def insta_login(browser):
browser.get('https://www.instagram.com')
#Find username/pass fields
username = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="username"]')))
password = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="password"]')))
#input username and pass
username.clear()
username.send_keys('itanikarim')
password.clear()
password.send_keys('1995PPrr')
#Login
Login_button = WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="loginForm"]/div/div[3]'))).click()
#Skip buttons
not_now = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
not_now2 = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
print("everything ok") | 40.457143 | 136 | 0.738701 | 179 | 1,416 | 5.703911 | 0.407821 | 0.058766 | 0.06856 | 0.078355 | 0.32713 | 0.32713 | 0.32713 | 0.32713 | 0.29383 | 0.29383 | 0 | 0.011962 | 0.114407 | 1,416 | 35 | 137 | 40.457143 | 0.802233 | 0.045904 | 0 | 0 | 0 | 0 | 0.192137 | 0.117211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.136364 | 0.272727 | 0 | 0.409091 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
905515ca4421e0d997a1e7e93a11455f5f918cff | 380 | py | Python | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='osmuf',
version='0.1',
install_requires=[
"seaborn",
],
description='Urban Form analysis from OpenStreetMap',
url='http://github.com/atelierlibre/osmuf',
author='AtelierLibre',
author_email='mail@atelierlibre.org',
license='MIT',
packages=['osmuf'],
zip_safe=False)
| 25.333333 | 59 | 0.615789 | 39 | 380 | 5.923077 | 0.820513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006969 | 0.244737 | 380 | 14 | 60 | 27.142857 | 0.797909 | 0 | 0 | 0 | 0 | 0 | 0.342105 | 0.055263 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90572919b03e5c9195f95e3b9733b72ece7106bb | 5,623 | py | Python | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | import numpy as np
import openturns as ot
def func_overflow(X, model=1, h_power=0.6):
"""Overflow model function.
Parameters
----------
X : np.ndarray, shape : N x 8
Input variables
- x1 : Flow,
- x2 : Krisler Coefficient,
- x3 : Zv, etc...
model : bool, optional(default=1)
If 1, the classical model. If 2, the economic model.
Returns
-------
Overflow S (if model=1) or Cost Cp (if model=2).
"""
X = np.asarray(X)
if X.shape[0] == X.size: # It's a vector
n = 1
dim = X.size
ids = None
else:
n, dim = X.shape
ids = range(n)
assert dim == 8, "Incorect dimension : dim = %d != 8" % dim
Q = X[ids, 0]
Ks = X[ids, 1]
Zv = X[ids, 2]
Zm = X[ids, 3]
Hd = X[ids, 4]
Cb = X[ids, 5]
L = X[ids, 6]
B = X[ids, 7]
H = (Q / (B * Ks * np.sqrt((Zm - Zv) / L)))**h_power
S = Zv + H - Hd - Cb
if model == 1:
return S
elif model == 2:
Cp = (S > 0.) + (0.2 + 0.8 * (1. - np.exp(-1000. / (S**4)))) * (S <= 0.) + 1./20. * (Hd * (Hd > 8.) + 8*(Hd <= 8.))
return Cp
else:
raise AttributeError('Unknow model.')
tmp = ot.Gumbel()
tmp.setParameter(ot.GumbelMuSigma()([1013., 558.]))
dist_Q = ot.TruncatedDistribution(tmp, 500., 3000.)
dist_Ks = ot.TruncatedNormal(30., 8., 15., np.inf)
dist_Zv = ot.Triangular(49., 50., 51.)
dist_Zm = ot.Triangular(54., 55., 56.)
dist_Hd = ot.Uniform(7., 9.)
dist_Cb = ot.Triangular(55., 55.5, 56.)
dist_L = ot.Triangular(4990., 5000., 5010.)
dist_B = ot.Triangular(295., 300., 305.)
margins_overflow = [dist_Q, dist_Ks, dist_Zv, dist_Zm, dist_Hd, dist_Cb, dist_L, dist_B]
var_names_overflow = ["Q", "K_s", "Z_v", "Z_m", "H_d", "C_b", "L", "B"]
def func_sum(x, a=None):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.dot(x, a)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_prod(x, a=None):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.sum(x, axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_spec(x, a=[0.58, -1, -1.0, 0, 0., 0.]):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
y = a[0]*(x**2).prod(axis=1) + \
a[1]*x.prod(axis=1) + \
a[2]*(x**2).sum(axis=1) + \
a[3] * x.sum(axis=1) + \
a[4] * np.sin(x).sum(axis=1) + \
a[5] * np.cos(x).sum(axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_cum_sum_weight(x, weights=None, use_sum=True, const=[0., 0., 0., 1., 0., 0.]):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
weights : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if weights is None:
weights = np.zeros((dim, dim))
corr_dim = dim * (dim-1)/2
k = 1
for i in range(1, dim):
for j in range(i):
weights[i, j] = k
k += 1
weights /= corr_dim
if weights.ndim == 1:
weights = weights.reshape(-1, 1)
assert weights.shape[0] == dim, "Shape not good"
elif weights.ndim > 2:
raise AttributeError('Dimension problem for constant a')
if use_sum:
y = 1
for i in range(1, dim):
for j in range(i):
y *= (1. + weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const))
else:
y = 0
for i in range(1, dim):
for j in range(i):
y += weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const)
return y
def multi_output_func_sum(x, output_dim=2):
"""Additive model function with multi output.
Parameters
----------
x : np.ndarray
The input values.
output_dim : int
The number of output dimension.
Returns
-------
y : [i * x]
"""
return np.asarray([x.sum(axis=1)*a for a in range(output_dim)]).T | 24.554585 | 123 | 0.486395 | 831 | 5,623 | 3.23586 | 0.194946 | 0.012272 | 0.040164 | 0.056898 | 0.500186 | 0.489029 | 0.476757 | 0.454816 | 0.454816 | 0.435478 | 0 | 0.046309 | 0.347146 | 5,623 | 229 | 124 | 24.554585 | 0.685917 | 0.002312 | 0 | 0.443548 | 0 | 0 | 0.049939 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0 | null | null | 0 | 0.016129 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9059540a6a1df436a316a8b4d0bf19c43271fcb4 | 1,699 | py | Python | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review title',validators=[Required()])
review = TextAreaField('Movie review', validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
# class LoginForm(FlaskForm):
# email = StringField('Your Email Address',validators=[Required(),Email()])
# password = PasswordField('Password',validators =[Required()])
# remember = BooleanField('Remember me')
# submit = SubmitField('Sign In')
class BlogForm(FlaskForm):
# my_category = StringField('Category', validators=[Required()])
title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
post = SubmitField('Post Blog')
class CommentForm(FlaskForm):
name = StringField('Name',validators=[Required()])
# email = StringField('Email', validators=[Required()],render_kw={"placeholder": "Email"})
comment = TextAreaField('Enter Comment', validators=[Required()])
post = SubmitField('Post Comment')
class SubscriptionForm(FlaskForm):
name = StringField('First Name', validators=[Required()])
subscription_data = StringField('Email', validators=[Required()])
subscribe = SubmitField('Subscribe')
class UpdatePostForm(FlaskForm):
# title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
submit=SubmitField('SUBMIT')
| 42.475 | 94 | 0.712772 | 162 | 1,699 | 7.438272 | 0.333333 | 0.224066 | 0.057261 | 0.087137 | 0.291286 | 0.225726 | 0.149378 | 0.149378 | 0.149378 | 0.149378 | 0 | 0 | 0.140082 | 1,699 | 39 | 95 | 43.564103 | 0.824778 | 0.268393 | 0 | 0.16 | 0 | 0 | 0.12571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
905fb1174dc9f76a043ce3432db2989539fb3eae | 1,212 | py | Python | surface/ex_surface02.py | orbingol/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 48 | 2017-12-14T09:54:48.000Z | 2020-03-30T13:34:44.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 7 | 2020-05-27T04:27:24.000Z | 2021-05-25T16:11:39.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 37 | 2017-10-14T08:11:11.000Z | 2020-05-04T02:51:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by Onur Rauf Bingol (c) 2016-2017
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
from geomdl.visualization import VisPlotly
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Create a BSpline surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.set_ctrlpts(*exchange.import_txt("ex_surface02.cpt", two_dimensional=True))
# Set knot vectors
surf.knotvector_u = utilities.generate_knot_vector(surf.degree_u, 6)
surf.knotvector_v = utilities.generate_knot_vector(surf.degree_v, 6)
# Set evaluation delta
surf.delta = 0.025
# Evaluate surface
surf.evaluate()
# Plot the control point grid and the evaluated surface
vis_comp = VisPlotly.VisSurface()
surf.vis = vis_comp
surf.render()
# Evaluate surface tangent and normal at the given u and v
uv = [0.2, 0.9]
surf_tangent = operations.tangent(surf, uv)
surf_normal = operations.normal(surf, uv)
# Good to have something here to put a breakpoint
pass
| 22.867925 | 80 | 0.763201 | 186 | 1,212 | 4.854839 | 0.516129 | 0.055371 | 0.070875 | 0.059801 | 0.081949 | 0.081949 | 0 | 0 | 0 | 0 | 0 | 0.022201 | 0.145215 | 1,212 | 52 | 81 | 23.307692 | 0.849421 | 0.366337 | 0 | 0 | 0 | 0 | 0.02171 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.045455 | 0.318182 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
90600f2b374617aa571df4d29f498ce0b363ef8b | 1,380 | bzl | Python | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 169 | 2020-03-30T09:13:05.000Z | 2022-03-15T11:12:36.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 1,198 | 2020-03-24T17:26:18.000Z | 2022-03-31T08:06:15.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 75 | 2020-03-30T11:39:58.000Z | 2022-03-26T05:16:20.000Z | #===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
load("@onedal//dev/bazel:repos.bzl", "repos")
micromkl_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
"%{os}/include",
],
libs = [
"%{os}/lib/intel64/libdaal_mkl_thread.a",
"%{os}/lib/intel64/libdaal_mkl_sequential.a",
"%{os}/lib/intel64/libdaal_vmlipp_core.a",
],
build_template = "@onedal//dev/bazel/deps:micromkl.tpl.BUILD",
)
micromkl_dpc_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
],
libs = [
"lib/intel64/libdaal_sycl.a",
],
build_template = "@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD",
)
| 33.658537 | 80 | 0.603623 | 165 | 1,380 | 4.939394 | 0.563636 | 0.07362 | 0.083436 | 0.069939 | 0.266258 | 0.186503 | 0.186503 | 0.107975 | 0 | 0 | 0 | 0.017528 | 0.173188 | 1,380 | 40 | 81 | 34.5 | 0.696757 | 0.52029 | 0 | 0.454545 | 0 | 0 | 0.451314 | 0.401855 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9061aefc06f55a6c43c18d036ea605173b84260a | 3,580 | py | Python | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | ## Generated by pyxsdgen
from xml.etree import ElementTree as ET
# types
class OrderedStpType(object):
def __init__(self, order, stp):
self.order = order # int
self.stp = stp # StpIdType -> string
@classmethod
def build(self, element):
return OrderedStpType(
element.get('order'),
element.findtext('stp')
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'order' : str(self.order)})
ET.SubElement(r, 'stp').text = self.stp
return r
class TypeValueType(object):
def __init__(self, type_, value):
self.type_ = type_
self.value = value
@classmethod
def build(self, element):
return TypeValueType(
element.get('type'),
element.text
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'type' : self.type_})
r.text = self.value
return r
class P2PServiceBaseType(object):
def __init__(self, capacity, directionality, symmetricPath, sourceSTP, destSTP, ero, parameter):
self.capacity = capacity # long
self.directionality = directionality # DirectionalityType -> string
self.symmetricPath = symmetricPath # boolean
self.sourceSTP = sourceSTP # StpIdType -> string
self.destSTP = destSTP # StpIdType -> string
self.ero = ero # [ OrderedStpType ]
self.parameter = parameter # [ TypeValueType ]
@classmethod
def build(self, element):
return P2PServiceBaseType(
int(element.findtext('capacity')),
element.findtext('directionality'),
True if element.findtext('symmetricPath') == 'true' else False if element.find('symmetricPath') is not None else None,
element.findtext('sourceSTP'),
element.findtext('destSTP'),
[ OrderedStpType.build(e) for e in element.find('ero') ] if element.find('ero') is not None else None,
[ TypeValueType.build(e) for e in element.findall('parameter') ] if element.find('parameter') is not None else None
)
def xml(self, elementName):
r = ET.Element(elementName)
ET.SubElement(r, 'capacity').text = str(self.capacity)
ET.SubElement(r, 'directionality').text = self.directionality
if self.symmetricPath is not None:
ET.SubElement(r, 'symmetricPath').text = 'true' if self.symmetricPath else 'false'
ET.SubElement(r, 'sourceSTP').text = self.sourceSTP
ET.SubElement(r, 'destSTP').text = self.destSTP
if self.ero is not None:
ET.SubElement(r, 'ero').extend( [ e.xml('orderedSTP') for e in self.ero ] )
if self.parameter is not None:
for p in self.parameter:
ET.SubElement(r, 'parameter', attrib={'type': p.type_}).text = p.value
return r
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
def parse(input_):
root = ET.fromstring(input_)
return parseElement(root)
def parseElement(element):
type_map = {
str(p2ps) : P2PServiceBaseType,
str(parameter) : TypeValueType
}
if not element.tag in type_map:
raise ValueError('No type mapping for tag %s' % element.tag)
type_ = type_map[element.tag]
return type_.build(element)
| 33.773585 | 134 | 0.613966 | 397 | 3,580 | 5.465995 | 0.219144 | 0.04424 | 0.047926 | 0.023502 | 0.174654 | 0.151152 | 0.063594 | 0.063594 | 0.04424 | 0 | 0 | 0.006528 | 0.272626 | 3,580 | 105 | 135 | 34.095238 | 0.826805 | 0.047486 | 0 | 0.151899 | 1 | 0 | 0.089491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139241 | false | 0 | 0.012658 | 0.037975 | 0.291139 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9066b9980c0b3869cc716e1c22a3fe141c968868 | 1,705 | py | Python | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | 2 | 2018-07-04T01:43:36.000Z | 2018-07-04T06:12:47.000Z | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.timeout = 40
self.browser = webdriver.Chrome()
self.browser.set_page_load_timeout(self.timeout)
self.wait = WebDriverWait(self.browser, self.timeout)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get('https://www.baidu.com')
self.assertIn('百度', self.browser.title)
login_link = self.wait.until(
EC.element_to_be_clickable((By.LINK_TEXT, '登录')))
login_link.click()
login_link_2 = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__footerULoginBtn')))
login_link_2.click()
username_input = self.wait.until(
EC.presence_of_element_located((By.ID, 'TANGRAM__PSP_10__userName')))
username_input.clear()
username_input.send_keys('橙色烟月')
password_input = self.wait.until(
EC.presence_of_element_located((By.ID, 'TANGRAM__PSP_10__password')))
password_input.clear()
password_input.send_keys('1659636840sec')
login_submit_button = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__submit')))
login_submit_button.click()
username_span = self.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#s_username_top > span')))
self.assertEqual(username_span.text, 'PebbleApp')
# user_login_link = self.browser.find_element_by_id('TANGRAM__PSP_10__footerULoginBtn')
# user_login_link.click()
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 31.574074 | 89 | 0.775367 | 242 | 1,705 | 5.07438 | 0.371901 | 0.062704 | 0.063518 | 0.07329 | 0.281759 | 0.281759 | 0.2443 | 0.2443 | 0.2443 | 0.180782 | 0 | 0.015727 | 0.104985 | 1,705 | 53 | 90 | 32.169811 | 0.788991 | 0.06393 | 0 | 0 | 0 | 0 | 0.120527 | 0.065913 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.078947 | false | 0.105263 | 0.131579 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
906e5ccc6b995d3e3569837e29fff36deedc118c | 1,174 | py | Python | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
currency = Column(String)
price = Column(Float)
size = Column(Float)
gdax_order_id = Column(String)
created_at = Column(DateTime)
class Withdrawal(Base):
__tablename__ = 'withdrawals'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
crypto_address = Column(String)
gdax_withdrawal_id = Column(String)
class Deposit(Base):
__tablename__ = 'deposits'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
payment_method_id = Column(String)
payout_at = Column(DateTime)
gdax_deposit_id = Column(String)
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
| 24.458333 | 63 | 0.721465 | 139 | 1,174 | 5.863309 | 0.359712 | 0.132515 | 0.068712 | 0.080982 | 0.222086 | 0.222086 | 0.222086 | 0.222086 | 0.222086 | 0.161963 | 0 | 0.001048 | 0.187394 | 1,174 | 47 | 64 | 24.978723 | 0.853249 | 0.017888 | 0 | 0.235294 | 0 | 0 | 0.021701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.852941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
907488d52d48e24b4d69fb2af57f6618dc2c3ce3 | 2,836 | py | Python | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | Calculator.py | KunalKatiyar/Calculator | 74044d32b08738ef288ccfae6bb322e6ab05f608 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout,QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Calculator'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createGridLayout()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
self.textbox = QLineEdit(self)
self.textbox.move(20, 40)
self.textbox.resize(600,35)
# Original Approach
# buttonp = QPushButton('+', self)
# buttonp.setToolTip('Addition Operator')
# buttonp.move(100,70)
# buttonp.clicked.connect(self.on_click)
# buttonm = QPushButton('-', self)
# buttonm.setToolTip('Subtraction Operator')
# buttonm.move(100,100)
# buttonm.clicked.connect(self.on_click)
self.show()
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox("Grid")
layout = QGridLayout()
# layout.setColumnStretch(1, 2)
# layout.setColumnStretch(2, 4)
layout.addWidget(QPushButton('1'),0,0)
layout.addWidget(QPushButton('2'),0,1)
layout.addWidget(QPushButton('3'),0,2)
layout.addWidget(QPushButton('4'),1,0)
layout.addWidget(QPushButton('5'),1,1)
layout.addWidget(QPushButton('6'),1,2)
layout.addWidget(QPushButton('7'),2,0)
layout.addWidget(QPushButton('8'),2,1)
layout.addWidget(QPushButton('9'),2,2)
layout.addWidget(QPushButton('0'),3,1)
layout.addWidget(QPushButton('.'),3,0)
layout.addWidget(QPushButton('='),3,2)
layout.addWidget(QPushButton('+'),0,4)
layout.addWidget(QPushButton('-'),1,4)
layout.addWidget(QPushButton('*'),2,4)
layout.addWidget(QPushButton('/'),3,4)
self.horizontalGroupBox.setLayout(layout)
# @pyqtSlot()
# def on_click(self):
# print('Button click')
@pyqtSlot()
def on_click(self):
textboxValue = "Good"
QMessageBox.question(self, 'Message - pythonspot.com', "You typed: " + textboxValue, QMessageBox.Ok, QMessageBox.Ok)
self.textbox.setText("Good")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | 35.45 | 203 | 0.605783 | 292 | 2,836 | 5.811644 | 0.335616 | 0.141426 | 0.245138 | 0.063642 | 0.172658 | 0.034178 | 0 | 0 | 0 | 0 | 0 | 0.037655 | 0.260226 | 2,836 | 80 | 204 | 35.45 | 0.771211 | 0.142807 | 0 | 0 | 0 | 0 | 0.034645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.075472 | 0 | 0.169811 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
907638a652d8418902c98ee951701aa5ff8b7dc1 | 2,279 | py | Python | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 1a8639d6f2a2535ece30dde840c99ba8261b5d7d | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/diff/UniversalDiff.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v3.diff import Transaction_pb2 as v3_dot_diff_dot_Transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/diff/UniversalDiff.proto',
package='v3.diff',
syntax='proto3',
serialized_pb=_b('\n\x1bv3/diff/UniversalDiff.proto\x12\x07v3.diff\x1a\x19v3/diff/Transaction.proto\";\n\rUniversalDiff\x12*\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x14.v3.diff.Transactionb\x06proto3')
,
dependencies=[v3_dot_diff_dot_Transaction__pb2.DESCRIPTOR,])
_UNIVERSALDIFF = _descriptor.Descriptor(
name='UniversalDiff',
full_name='v3.diff.UniversalDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactions', full_name='v3.diff.UniversalDiff.transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=126,
)
_UNIVERSALDIFF.fields_by_name['transactions'].message_type = v3_dot_diff_dot_Transaction__pb2._TRANSACTION
DESCRIPTOR.message_types_by_name['UniversalDiff'] = _UNIVERSALDIFF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UniversalDiff = _reflection.GeneratedProtocolMessageType('UniversalDiff', (_message.Message,), dict(
DESCRIPTOR = _UNIVERSALDIFF,
__module__ = 'v3.diff.UniversalDiff_pb2'
# @@protoc_insertion_point(class_scope:v3.diff.UniversalDiff)
))
_sym_db.RegisterMessage(UniversalDiff)
# @@protoc_insertion_point(module_scope)
| 31.219178 | 203 | 0.777095 | 283 | 2,279 | 5.957597 | 0.388693 | 0.032028 | 0.067616 | 0.071174 | 0.130486 | 0.058126 | 0.042705 | 0 | 0 | 0 | 0 | 0.031065 | 0.110136 | 2,279 | 72 | 204 | 31.652778 | 0.800296 | 0.098728 | 0 | 0.113208 | 1 | 0.018868 | 0.183594 | 0.140137 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.132075 | 0 | 0.132075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
907746020f32a1228d26593b0db9dbd5b8907c24 | 2,087 | py | Python | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | 119 | 2017-07-22T15:02:30.000Z | 2021-08-02T10:42:59.000Z | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | null | null | null | dataviz/euvotes.py | Udzu/pudzu | 5a0302830b052fc54feba891eb7bf634957a9d90 | [
"MIT"
] | 28 | 2017-08-04T14:28:41.000Z | 2019-11-27T23:46:14.000Z | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
| 44.404255 | 148 | 0.684236 | 351 | 2,087 | 4 | 0.387464 | 0.083333 | 0.074074 | 0.029915 | 0.10114 | 0.071225 | 0.031339 | 0.031339 | 0 | 0 | 0 | 0.075791 | 0.121227 | 2,087 | 46 | 149 | 45.369565 | 0.689749 | 0.009104 | 0 | 0 | 1 | 0 | 0.17046 | 0.011138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.088235 | 0.058824 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9082f22e3410593d0f53f454a62bd2d756d1a9be | 554 | py | Python | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | rsbroker/urls.py | land-pack/RsBroker | d556fda09582e0540cac0eabc163a984e8fc1c44 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import os
from tornado.web import StaticFileHandler
from rsbroker.views import websocket
from rsbroker.views.error import NotFoundErrorHandler
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
handlers = [
# Http api
# Events WebSocket API
(r"/api/ws", websocket.BrokerServerHandler),
# Static
(r"/static/(.*)", StaticFileHandler),
# Error
(r".*", NotFoundErrorHandler)
]
| 20.518519 | 71 | 0.714801 | 63 | 554 | 6.047619 | 0.460317 | 0.062992 | 0.089239 | 0.073491 | 0.16273 | 0.16273 | 0.16273 | 0.16273 | 0 | 0 | 0 | 0 | 0.16426 | 554 | 26 | 72 | 21.307692 | 0.822894 | 0.075812 | 0 | 0 | 0 | 0 | 0.071006 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.357143 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9089cafc79c7a1e8e0abc38c3cabc190f618f305 | 1,648 | py | Python | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | wpa-psk/wpa-psk.py | ranisalt/rsaur | 8b8e8f596a35e8aff53ccff0fc941deacdc885a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from getpass import getpass
from hashlib import pbkdf2_hmac
from signal import signal, SIGINT
def die(*_, **__):
sys.exit()
signal = signal(SIGINT, die)
iwd = """[Security]
PreSharedKey={psk}"""
supplicant = """network={{
ssid={ssid}
#psk={passphrase}
psk={psk}
}}"""
parser = ArgumentParser(
description="%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."
)
parser.add_argument("ssid", help="The SSID whose passphrase should be derived.")
parser.add_argument(
"passphrase",
help="The passphrase to use. If not included on the command line, passphrase will be read from standard input.",
nargs="?",
)
parser.add_argument(
"--iwd",
"-i",
dest="template",
action="store_const",
const=iwd,
default=supplicant,
help="Generate for iwd (default: generate for wpa_supplicant).",
)
args = parser.parse_args()
if not args.passphrase:
print("# reading passphrase from stdin", file=sys.stderr)
args.passphrase = getpass(prompt="")
if not 8 <= len(args.passphrase) <= 63:
print("Passphrase must be 8..63 characters", file=sys.stderr)
sys.exit(1)
passphrase = args.passphrase.encode()
if any(b < 32 or b == 127 for b in passphrase):
print("Invalid passphrase character", file=sys.stderr)
sys.exit(1)
ssid = args.ssid.encode()
psk = pbkdf2_hmac("sha1", passphrase, ssid, iterations=4096, dklen=32)
print(args.template.format(ssid=args.ssid, passphrase=args.passphrase, psk=psk.hex()))
| 28.912281 | 182 | 0.703277 | 227 | 1,648 | 5.052863 | 0.462555 | 0.061029 | 0.044464 | 0.027899 | 0.036617 | 0.036617 | 0 | 0 | 0 | 0 | 0 | 0.018841 | 0.162621 | 1,648 | 56 | 183 | 29.428571 | 0.812319 | 0.012743 | 0 | 0.085106 | 0 | 0.042553 | 0.369619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0.340426 | 0.106383 | 0 | 0.12766 | 0.085106 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
908b0f1eabec4449e380288689a4979deb9e601d | 424 | py | Python | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | 1 | 2020-10-25T08:14:33.000Z | 2020-10-25T08:14:33.000Z | # Generated by Django 2.1.2 on 2018-10-14 18:37
from django.db import migrations
import picklefield.fields
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0014_mlmodel_type_num'),
]
operations = [
migrations.AlterField(
model_name='mlmodel',
name='data',
field=picklefield.fields.PickledObjectField(editable=False),
),
]
| 21.2 | 72 | 0.629717 | 44 | 424 | 5.977273 | 0.772727 | 0.129278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060897 | 0.264151 | 424 | 19 | 73 | 22.315789 | 0.782051 | 0.106132 | 0 | 0 | 1 | 0 | 0.106101 | 0.055703 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9092b9fc5566c9c58a04dd93c04224cbbceb0b64 | 1,911 | py | Python | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 222 | 2017-08-19T00:51:59.000Z | 2022-02-05T19:39:33.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 103 | 2017-08-20T17:13:05.000Z | 2022-02-05T20:20:01.000Z | sdl2/blendmode.py | namelivia/py-sdl2 | c1bdf43501224d5f0a125dbce70198100ec7be82 | [
"CC0-1.0"
] | 54 | 2017-08-20T17:13:00.000Z | 2022-01-14T23:51:13.000Z | from ctypes import c_int
from .dll import _bind
__all__ = [
# Enums
"SDL_BlendMode",
"SDL_BLENDMODE_NONE", "SDL_BLENDMODE_BLEND", "SDL_BLENDMODE_ADD",
"SDL_BLENDMODE_MOD", "SDL_BLENDMODE_MUL", "SDL_BLENDMODE_INVALID",
"SDL_BlendOperation",
"SDL_BLENDOPERATION_ADD", "SDL_BLENDOPERATION_SUBTRACT",
"SDL_BLENDOPERATION_REV_SUBTRACT", "SDL_BLENDOPERATION_MINIMUM",
"SDL_BLENDOPERATION_MAXIMUM",
"SDL_BlendFactor",
"SDL_BLENDFACTOR_ZERO", "SDL_BLENDFACTOR_ONE",
"SDL_BLENDFACTOR_SRC_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR",
"SDL_BLENDFACTOR_SRC_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA",
"SDL_BLENDFACTOR_DST_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR",
"SDL_BLENDFACTOR_DST_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA",
# Functions
"SDL_ComposeCustomBlendMode"
]
SDL_BlendMode = c_int
SDL_BLENDMODE_NONE = 0x00000000
SDL_BLENDMODE_BLEND = 0x00000001
SDL_BLENDMODE_ADD = 0x00000002
SDL_BLENDMODE_MOD = 0x00000004
SDL_BLENDMODE_MUL = 0x00000008
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
SDL_BlendOperation = c_int
SDL_BLENDOPERATION_ADD = 0x1
SDL_BLENDOPERATION_SUBTRACT = 0x2
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3
SDL_BLENDOPERATION_MINIMUM = 0x4
SDL_BLENDOPERATION_MAXIMUM = 0x5
SDL_BlendFactor = c_int
SDL_BLENDFACTOR_ZERO = 0x1
SDL_BLENDFACTOR_ONE = 0x2
SDL_BLENDFACTOR_SRC_COLOR = 0x3
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4
SDL_BLENDFACTOR_SRC_ALPHA = 0x5
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6
SDL_BLENDFACTOR_DST_COLOR = 0x7
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8
SDL_BLENDFACTOR_DST_ALPHA = 0x9
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA
SDL_ComposeCustomBlendMode = _bind("SDL_ComposeCustomBlendMode", [SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation, SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation], SDL_BlendMode, added='2.0.6')
| 31.327869 | 204 | 0.791209 | 236 | 1,911 | 5.79661 | 0.220339 | 0.266082 | 0.124269 | 0.128655 | 0.258041 | 0.243421 | 0.067982 | 0 | 0 | 0 | 0 | 0.048289 | 0.143904 | 1,911 | 60 | 205 | 31.85 | 0.787897 | 0.007849 | 0 | 0 | 0 | 0 | 0.329107 | 0.235077 | 0 | 0 | 0.055468 | 0 | 0 | 1 | 0 | false | 0 | 0.046512 | 0 | 0.046512 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
909acbc6fed7077e7d615e7ea5b4fd6ba9538288 | 954 | py | Python | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
] | null | null | null | '''
Auteur : Joël Dendaletche
But : tracé une figure géométrique à l'aide de la bibliothèque Turtle
Le projet utilise l'objet file pour itérer le calcul de chaque nouveau point
Les coordonnées des points d'un polygone sont placés dans une file
l'algorithme consiste à calculer les coordonnées d'un point pour tracer une droite qui part du premier points
de la file et passe par le deuxième en prolongeant le segment d'une fraction déterminée de la longueur entre les
deux points. Le deuxième point est remplacé par le nouveau. A la prochaine itération, le segment va partir du
nouveau point pour passer par le suivant dans la file, qui sera remplacé par le nouveau point et ainsi de
suite.
'''
import turtle
board = turtle.Turtle()
listePoints = [(0,0),(10,0),(5, int(10*75**.5)]
print(listePoints)
for x, y in listePoints :
board.goto(x, y)
turtle.done() | 45.428571 | 121 | 0.697065 | 149 | 954 | 4.463087 | 0.563758 | 0.030075 | 0.039098 | 0.06015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01532 | 0.247379 | 954 | 21 | 122 | 45.428571 | 0.910864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.142857 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
909acc24e11a5c6671af7463f6c79ae6bbfe3286 | 20,420 | py | Python | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
import pdb
pdb.set_trace()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1)
# up2e = self.upBlock3(up3e, down2b)
# up1e = self.upBlock4(up2e, down1b)
# up0e = self.upBlock5(up1e, down0b)
# up0e_gap = nn.AdaptiveAvgPool3d((1))(up0e)
# up0e_gap = F.interpolate(up0e_gap, size=(up0e.size()[2:]), mode='trilinear', align_corners=True)
# up0e = torch.cat((up0e, up0e_gap), dim=1)
logits = self.logits(up0e)
y = logits.dense()
# y = logits.permute(0, 1, 3, 4, 2)
return y
| 41.588595 | 145 | 0.645495 | 2,725 | 20,420 | 4.615046 | 0.09211 | 0.072281 | 0.032204 | 0.038645 | 0.760814 | 0.708969 | 0.681218 | 0.64806 | 0.626749 | 0.596056 | 0 | 0.043211 | 0.235015 | 20,420 | 490 | 146 | 41.673469 | 0.761859 | 0.166552 | 0 | 0.503356 | 0 | 0 | 0.016119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077181 | false | 0 | 0.02349 | 0.02349 | 0.181208 | 0.003356 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
909b464aebeffe98a01bbc3d1080af46d979ef36 | 5,690 | py | Python | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
] | null | null | null | from zope.interface import implementer
from sqlalchemy import (
Column,
String,
Integer,
Float,
ForeignKey,
CheckConstraint,
)
from sqlalchemy.orm import relationship, backref
from clld import interfaces
from clld.db.meta import Base, CustomModelMixin
from clld.db.versioned import Versioned
from clld.db.models.common import (
Contribution, Parameter, IdNameDescriptionMixin, Language
)
from clld_glottologfamily_plugin.models import HasFamilyMixin, Family
from .interfaces import IDependency, ITransition, IStability, IDeepFamily, ISupport, IHasSupport
@implementer(interfaces.ILanguage)
class CulturebankLanguage(CustomModelMixin, Language, HasFamilyMixin):
pk = Column(Integer, ForeignKey('language.pk'), primary_key=True)
@implementer(interfaces.IParameter)
class Feature(CustomModelMixin, Parameter, Versioned):
"""Parameters in CultureBank are called features. They are always related to one Designer.
"""
pk = Column(Integer, ForeignKey('parameter.pk'), primary_key=True)
doc = Column(String)
patron = Column(String)
newdoc = Column(String)
vdoc = Column(String)
std_comments = Column(String)
name_french = Column(String)
clarification = Column(String)
alternative_id = Column(String)
representation = Column(Integer)
designer = Column(String)
abbreviation = Column(String)
sortkey_str = Column(String)
sortkey_int = Column(Integer)
jl_relevant_unit = Column(String)
jl_function = Column(String)
jl_formal_means = Column(String)
legacy_status = Column(String)
culturebank_status = Column(String)
wip_comments = Column(String)
nts_culturebank = Column(String)
hard_to_deny = Column(String)
prone_misunderstanding = Column(String)
requires_extensive_data = Column(String)
last_edited = Column(String)
other_survey = Column(String)
@implementer(IStability)
class Stability(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
parsimony_stability_value = Column(Float)
parsimony_retentions = Column(Float)
parsimony_transitions = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk, backref = "stability")
@implementer(IDependency)
class Dependency(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
feature1_pk = Column(Integer, ForeignKey('feature.pk'))
feature1 = relationship(Feature, lazy='joined', foreign_keys = feature1_pk)
feature2_pk = Column(Integer, ForeignKey('feature.pk'))
feature2 = relationship(Feature, lazy='joined', foreign_keys = feature2_pk)
strength = Column(Float)
representation = Column(Integer)
combinatory_status = Column(String)
@implementer(ITransition)
class Transition(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
fromnode = Column(String)
fromvalue = Column(String)
tonode = Column(String)
tovalue = Column(String)
stability_pk = Column(Integer, ForeignKey('stability.pk'))
stability = relationship(Stability, lazy='joined', foreign_keys = stability_pk)
family_pk = Column(Integer, ForeignKey('family.pk'))
family = relationship(Family, backref='transitions')
retention_innovation = Column(String)
@implementer(interfaces.IContribution)
class CulturebankContribution(CustomModelMixin, Contribution):
pk = Column(Integer, ForeignKey('contribution.pk'), primary_key=True)
desc = Column(String)
@implementer(IDeepFamily)
class DeepFamily(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
family1_pk = Column(Integer, ForeignKey('family.pk'))
family1 = relationship(Family, lazy='joined', foreign_keys = family1_pk)
family1_longitude = Column(
Float(),
CheckConstraint('-180 <= family1_longitude and family1_longitude <= 180 '),
doc='geographical longitude in WGS84')
family1_latitude = Column(
Float(),
CheckConstraint('-90 <= family1_latitude and family1_latitude <= 90'),
doc='geographical latitude in WGS84')
family2_pk = Column(Integer, ForeignKey('family.pk'))
family2 = relationship(Family, lazy='joined', foreign_keys = family2_pk)
family2_longitude = Column(
Float(),
CheckConstraint('-180 <= family2_longitude and family2_longitude <= 180 '),
doc='geographical longitude in WGS84')
family2_latitude = Column(
Float(),
CheckConstraint('-90 <= family2_latitude and family2_latitude <= 90'),
doc='geographical latitude in WGS84')
support_value = Column(Float)
significance = Column(Float)
geographic_plausibility = Column(Float)
@implementer(ISupport)
class Support(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
value1 = Column(String)
value2 = Column(String)
historical_score = Column(Float)
independent_score = Column(Float)
support_score = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk)
@implementer(IHasSupport)
class HasSupport(Base, CustomModelMixin):
id = Column(String)
deepfamily_pk = Column(Integer, ForeignKey('deepfamily.pk'), primary_key=True)
deepfamily = relationship(DeepFamily, lazy='joined', foreign_keys = deepfamily_pk)
support_pk = Column(Integer, ForeignKey('support.pk'), primary_key=True)
support = relationship(Support, lazy='joined', foreign_keys = support_pk)
| 38.187919 | 100 | 0.727065 | 610 | 5,690 | 6.644262 | 0.229508 | 0.11547 | 0.066617 | 0.080188 | 0.288675 | 0.252159 | 0.171971 | 0.131014 | 0.131014 | 0.131014 | 0 | 0.011446 | 0.170826 | 5,690 | 148 | 101 | 38.445946 | 0.847605 | 0.01529 | 0 | 0.175573 | 0 | 0 | 0.097639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068702 | 0 | 0.770992 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
909bb64980267ae4a08d2d7a1f0a4d2581917497 | 1,579 | py | Python | sandbox/graph-size.py | maarten1983/khmer | 417aaa57f0659685c01887a6910de1c08d0a73e5 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T15:12:44.000Z | 2019-11-02T15:12:44.000Z | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
] | null | null | null | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import khmer
import sys
import screed
import os.path
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fasta_iter
K = 32
HASHTABLE_SIZE = int(4e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
GROUPSIZE = 100
###
def main():
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphsize'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'input file to graphsize filter: %s' % infile
print 'filtering to output:', outfile
print '-- settings:'
print 'K', K
print 'HASHTABLE SIZE %g' % HASHTABLE_SIZE
print 'N HASHTABLES %d' % N_HT
print 'THRESHOLD', THRESHOLD
print 'N THREADS', WORKER_THREADS
print '--'
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
###
def process_fn(record, ht=ht):
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
return record['name'], record['sequence']
return None, None
tsp = ThreadedSequenceProcessor(process_fn, WORKER_THREADS, GROUPSIZE)
###
tsp.start(verbose_fasta_iter(infile), outfp)
if __name__ == '__main__':
main()
| 23.567164 | 76 | 0.664345 | 212 | 1,579 | 4.801887 | 0.537736 | 0.051081 | 0.031434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01937 | 0.215326 | 1,579 | 66 | 77 | 23.924242 | 0.80226 | 0.157061 | 0 | 0 | 0 | 0 | 0.135671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.125 | null | null | 0.275 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90a3bca5369f1537b322d1766cb9151ec9a0af0c | 3,181 | py | Python | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
] | null | null | null | from torchvision.models import resnet18
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch
import pdb
##############################
# Encoder
##############################
class Encoder(nn.Module):
def __init__(self, latent_dim):
super(Encoder, self).__init__()
""" The encoder used in both cVAE-GAN and cLR-GAN, which encode image B or B_hat to latent vector
This encoder uses resnet-18 to extract features, and further encode them into a distribution
similar to VAE encoder.
Note: You may either add "reparametrization trick" and "KL divergence" or in the train.py file
Args in constructor:
latent_dim: latent dimension for z
Args in forward function:
img: image input (from domain B)
Returns:
mu: mean of the latent code
logvar: sigma of the latent code
"""
# Extracts features at the last fully-connected
resnet18_model = resnet18(pretrained=True)
self.feature_extractor = nn.Sequential(*list(resnet18_model.children())[:-3])
self.pooling = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
# Output is mu and log(var) for reparameterization trick used in VAEs
self.fc_mu = nn.Linear(256, latent_dim)
self.fc_logvar = nn.Linear(256, latent_dim)
def forward(self, img):
out = self.feature_extractor(img)
out = self.pooling(out)
out = out.view(out.size(0), -1)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
return mu, logvar
##############################
# Generator
##############################
class Generator(nn.Module):
""" The generator used in both cVAE-GAN and cLR-GAN, which transform A to B
Args in constructor:
latent_dim: latent dimension for z
image_shape: (channel, h, w), you may need this to specify the output dimension (optional)
Args in forward function:
x: image input (from domain A)
z: latent vector (encoded B)
Returns:
fake_B: generated image in domain B
"""
def __init__(self, latent_dim, img_shape):
super(Generator, self).__init__()
channels, self.h, self.w = img_shape
# (TODO: add layers...)
def forward(self, x, z):
# (TODO: add layers...)
return
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
""" The discriminator used in both cVAE-GAN and cLR-GAN
Args in constructor:
in_channels: number of channel in image (default: 3 for RGB)
Args in forward function:
x: image input (real_B, fake_B)
Returns:
discriminator output: could be a single value or a matrix depending on the type of GAN
"""
def forward(self, x):
return
| 30.586538 | 106 | 0.563345 | 388 | 3,181 | 4.494845 | 0.371134 | 0.030963 | 0.018922 | 0.024083 | 0.200115 | 0.138761 | 0.138761 | 0.102064 | 0.087156 | 0 | 0 | 0.011353 | 0.307765 | 3,181 | 103 | 107 | 30.883495 | 0.780654 | 0.185476 | 0 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 1 | 0.1875 | false | 0 | 0.1875 | 0.0625 | 0.5625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
90a450c6bb8a1da60bd0c096428df1ba30321115 | 1,565 | py | Python | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T09:16:32.000Z | 2020-07-22T09:16:32.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
@CONFIG_CTX()
def v8(c):
soln = c.solutions.add()
soln.name = 'v8'
soln.url = ChromiumGitURL(c, 'v8', 'v8')
c.got_revision_reverse_mapping['got_revision'] = 'v8'
# Needed to get the testers to properly sync the right revision.
# TODO(infra): Upload full buildspecs for every build to isolate and then use
# them instead of this gclient garbage.
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
p = c.patch_projects
p['icu'] = ('v8/third_party/icu', 'HEAD')
@CONFIG_CTX(includes=['v8'])
def dynamorio(c):
soln = c.solutions.add()
soln.name = 'dynamorio'
soln.url = ChromiumGitURL(c, 'external', 'dynamorio')
@CONFIG_CTX(includes=['v8'])
def llvm_compiler_rt(c):
c.solutions[0].custom_deps['v8/third_party/llvm/projects/compiler-rt'] = (
ChromiumGitURL(c, 'external', 'llvm.org', 'compiler-rt'))
@CONFIG_CTX()
def node_js(c):
soln = c.solutions.add()
soln.name = 'node.js'
soln.url = ChromiumGitURL(c, 'external', 'github.com', 'v8', 'node')
soln.revision = 'vee-eight-lkgr:HEAD'
c.got_revision_reverse_mapping['got_node_js_revision'] = soln.name
@CONFIG_CTX(includes=['v8'])
def v8_valgrind(c):
c.solutions[0].custom_deps['v8/third_party/valgrind'] = (
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries'))
| 30.686275 | 79 | 0.709904 | 229 | 1,565 | 4.707424 | 0.39738 | 0.058442 | 0.016698 | 0.041744 | 0.306122 | 0.189239 | 0.135436 | 0.06308 | 0.06308 | 0 | 0 | 0.014064 | 0.136741 | 1,565 | 50 | 80 | 31.3 | 0.783864 | 0.212141 | 0 | 0.25 | 0 | 0 | 0.252855 | 0.051387 | 0 | 0 | 0 | 0.02 | 0 | 1 | 0.15625 | false | 0 | 0.03125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90a5135d7b2c7cb2a555e6f77c99a227c0fdaa11 | 2,386 | py | Python | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | 1 | 2018-05-08T09:26:45.000Z | 2018-05-08T09:26:45.000Z | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | null | null | null | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
] | 1 | 2020-12-13T18:04:00.000Z | 2020-12-13T18:04:00.000Z | import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
def _download_from_url(url: str, location: str) -> bool:
try:
urllib.request.urlretrieve(url, location)
return True
except (IOError, urllib.error.ContentTooShortError):
# If a connection can't be made, IOError is raised
# If the download gets interrupted (ContentTooShortError), we
# should try again later
# TODO: can we tell if it was a bad filename (and should stop
# requesting it), or internet connectivity (and should tell
# us), or just a fluke (and should retry)?
return False
def download_podcast(
directory: RadioDirectory,
channel: Channel,
podcast: Podcast) -> Podcast:
location = download_location(directory, channel, podcast)
url = get_podcast_audio_link(podcast)
# TODO: This takes some time, especially when there are a lot to
# download. I could have this spawn threads, or add priorities,
# and so on. For now, since it runs every few hours, and is more
# of a push than a pull situation for the user, I'm leaving it
# simple
success = _download_from_url(url, location)
if success:
return podcast._replace(status=NewStatus())
else:
return podcast
def download_channel(directory: RadioDirectory, channel: Channel) -> Channel:
updated_podcasts = []
for known_podcast in channel.known_podcasts:
if type(known_podcast.status).__name__ == 'RequestedStatus':
known_podcast = download_podcast(directory, channel, known_podcast)
updated_podcasts.append(known_podcast)
return channel._replace(known_podcasts=updated_podcasts)
def download_radio(radio: Radio) -> typing.Tuple[Radio, InfoContent]:
downloaded_channels = [
download_channel(radio.directory, channel)
for channel in radio.channels
]
radio = radio._replace(channels=downloaded_channels)
info_content = build_info_content()
return (radio, info_content)
| 33.605634 | 79 | 0.723386 | 302 | 2,386 | 5.569536 | 0.390728 | 0.058859 | 0.060642 | 0.082045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.209556 | 2,386 | 70 | 80 | 34.085714 | 0.891835 | 0.228835 | 0 | 0 | 0 | 0 | 0.008206 | 0 | 0 | 0 | 0 | 0.014286 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90a821eadcd600fc9ceb85786e62d6539b2c7ae3 | 9,603 | py | Python | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | 5 | 2019-12-15T09:47:02.000Z | 2022-03-16T03:18:55.000Z | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | null | null | null | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
] | 1 | 2021-03-27T08:11:53.000Z | 2021-03-27T08:11:53.000Z | #!/usr/bin/python
"""composes the config from user definitions."""
import argparse
import os
import users
import users.__config__
import importlib
import csv
# file indicators
IND_DELIM = "_"
USER_INDICATOR = "user" + IND_DELIM
VLAN_INDICATOR = "vlan" + IND_DELIM
AUTH_PHASE_ONE = "PEAP"
AUTH_PHASE_TWO = "MSCHAPV2"
class ConfigMeta(object):
"""configuration meta information."""
def __init__(self):
"""init the instance."""
self.passwords = []
self.macs = []
self.vlans = []
self.all_vlans = []
self.user_name = []
self.vlan_users = []
self.vlan_initiate = []
self.extras = []
def password(self, password):
"""password group validation(s)."""
if password in self.passwords:
print("password duplicated")
exit(-1)
self.passwords.append(password)
def extra(self, macs):
"""Limited macs."""
for mac in macs:
if mac in self.extras:
print("mac already known as extra: " + mac)
exit(-1)
self.extras.append(mac)
def user_macs(self, macs):
"""user+mac combos."""
self.macs = self.macs + macs
self.macs = list(set(self.macs))
def verify(self):
"""verify meta data."""
for mac in self.macs:
if mac in self.extras:
print("mac is flagged extra: " + mac)
exit(-1)
for mac in self.extras:
if mac in self.macs:
print("mac is user assigned: " + mac)
exit(-1)
used_vlans = set(self.vlans + self.vlan_initiate)
if len(used_vlans) != len(set(self.all_vlans)):
print("unused vlans detected")
exit(-1)
for ref in used_vlans:
if ref not in self.all_vlans:
print("reference to unknown vlan: " + ref)
exit(-1)
def vlan_user(self, vlan, user):
"""indicate a vlan was used."""
self.vlans.append(vlan)
self.vlan_users.append(vlan + "." + user)
self.user_name.append(user)
def vlan_to_vlan(self, vlan_to):
"""VLAN to VLAN mappings."""
self.vlan_initiate.append(vlan_to)
def _get_mod(name):
"""import the module dynamically."""
return importlib.import_module("users." + name)
def _load_objs(name, typed):
mod = _get_mod(name)
for key in dir(mod):
obj = getattr(mod, key)
if not isinstance(obj, typed):
continue
yield obj
def _get_by_indicator(indicator):
"""get by a file type indicator."""
return [x for x in sorted(users.__all__) if x.startswith(indicator)]
def _common_call(common, method, entity):
"""make a common mod call."""
obj = entity
if common is not None and method in dir(common):
call = getattr(common, method)
if call is not None:
obj = call(obj)
return obj
def check_object(obj):
"""Check an object."""
return obj.check()
def _process(output):
"""process the composition of users."""
common_mod = None
try:
common_mod = _get_mod("common")
print("loaded common definitions...")
except Exception as e:
print("defaults only...")
vlans = None
meta = ConfigMeta()
for v_name in _get_by_indicator(VLAN_INDICATOR):
print("loading vlan..." + v_name)
for obj in _load_objs(v_name, users.__config__.VLAN):
if vlans is None:
vlans = {}
if not check_object(obj):
exit(-1)
num_str = str(obj.num)
for vk in vlans.keys():
if num_str == vlans[vk]:
print("vlan number defined multiple times...")
exit(-1)
vlans[obj.name] = num_str
if obj.initiate is not None and len(obj.initiate) > 0:
for init_to in obj.initiate:
meta.vlan_to_vlan(init_to)
if vlans is None:
raise Exception("missing required config settings...")
meta.all_vlans = vlans.keys()
store = Store()
for f_name in _get_by_indicator(USER_INDICATOR):
print("composing..." + f_name)
for obj in _load_objs(f_name, users.__config__.Assignment):
obj = _common_call(common_mod, 'ready', obj)
key = f_name.replace(USER_INDICATOR, "")
if not key.isalnum():
print("does not meet naming requirements...")
exit(-1)
vlan = obj.vlan
if vlan not in vlans:
raise Exception("no vlan defined for " + key)
store.add_vlan(vlan, vlans[vlan])
meta.vlan_user(vlan, key)
fqdn = vlan + "." + key
if not check_object(obj):
print("did not pass check...")
exit(-1)
if obj.disabled:
print("account is disabled")
continue
macs = sorted(obj.macs)
password = obj.password
bypassed = sorted(obj.bypassed())
owned = sorted(obj.owns)
# meta checks
meta.user_macs(macs)
if not obj.inherits:
meta.password(password)
meta.extra(bypassed)
meta.extra(owned)
store.add_user(fqdn, macs, password)
if obj.mab_only:
store.set_mab(fqdn)
if len(bypassed) > 0:
for m in bypassed:
store.add_mab(m, obj.bypass_vlan(m))
user_all = []
for l in [obj.macs, obj.owns, bypassed]:
user_all += list(l)
store.add_audit(fqdn, sorted(set(user_all)))
meta.verify()
# audit outputs
with open(output + "audit.csv", 'w') as f:
csv_writer = csv.writer(f, lineterminator=os.linesep)
for a in sorted(store.get_tag(store.audit)):
p = a[0].split(".")
for m in a[1]:
csv_writer.writerow([p[1], p[0], m])
# eap_users and preauth
manifest = []
with open(output + "eap_users", 'w') as f:
for u in store.get_eap_user():
f.write('"{}" {}\n\n'.format(u[0], AUTH_PHASE_ONE))
f.write('"{}" {} hash:{} [2]\n'.format(u[0], AUTH_PHASE_TWO, u[1]))
write_vlan(f, u[2])
for u in store.get_eap_mab():
up = u[0].upper()
f.write('"{}" MD5 "{}"\n'.format(up, up))
write_vlan(f, u[1])
manifest.append((u[0], u[0]))
for u in store.get_tag(store.umac):
manifest.append((u[0], u[1]))
with open(output + "manifest", 'w') as f:
for m in sorted(manifest):
f.write("{}.{}\n".format(m[0], m[1]).lower())
def write_vlan(f, vlan_id):
"""Write vlan assignment for login."""
f.write('radius_accept_attr=64:d:13\n')
f.write('radius_accept_attr=65:d:6\n')
f.write('radius_accept_attr=81:s:{}\n\n'.format(vlan_id))
class Store(object):
"""Storage object."""
def __init__(self):
"""Init the instance."""
self._data = []
self.umac = "UMAC"
self.pwd = "PWD"
self.mac = "MAC"
self.audit = "AUDIT"
self._users = []
self._mab = []
self._macs = []
self._vlans = {}
def set_mab(self, username):
"""Set a user as MAB-only, no login set."""
self._mab.append(username)
def get_tag(self, tag):
"""Get tagged items."""
for item in self._data:
if item[0] == tag:
yield item[1:]
def add_vlan(self, vlan_name, vlan_id):
"""Add a vlan item."""
self._vlans[vlan_name] = vlan_id
def _add(self, tag, key, value):
"""Backing tagged add."""
self._data.append([tag, key, value])
def add_user(self, username, macs, password):
"""Add a user definition."""
if username in self._users:
raise Exception("{} already defined".format(username))
self._users.append(username)
for m in macs:
self._add(self.umac, username, m)
self._add(self.pwd, username, password)
def add_mab(self, mac, vlan):
"""Add a MAB."""
if mac in self._macs:
raise Exception("{} already defined".format(mac))
self._macs.append(mac)
self._add(self.mac, mac, vlan)
def add_audit(self, user, objs):
"""Add an audit entry."""
self._add(self.audit, user, objs)
def get_eap_mab(self):
"""Get eap entries for MAB."""
for m in self.get_tag(self.mac):
v = m[1]
if not isinstance(v, int):
v = self._get_vlan(v)
yield [m[0], v]
def get_eap_user(self):
"""Get eap users."""
for u in self.get_tag(self.pwd):
if u[0] in self._mab:
continue
vlan = u[0].split(".")[0]
yield [u[0], u[1], self._get_vlan(vlan)]
def _get_vlan(self, name):
"""Get vlans."""
return self._vlans[name]
def main():
"""main entry."""
success = False
try:
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
_process(args.output)
success = True
except Exception as e:
print('unable to compose')
print(str(e))
if success:
print("success")
exit(0)
else:
print("failure")
exit(1)
if __name__ == "__main__":
main()
| 30.389241 | 79 | 0.53754 | 1,229 | 9,603 | 4.042311 | 0.184703 | 0.0157 | 0.01087 | 0.008857 | 0.12037 | 0.055153 | 0.023752 | 0.011675 | 0 | 0 | 0 | 0.007927 | 0.330001 | 9,603 | 315 | 80 | 30.485714 | 0.764221 | 0.07456 | 0 | 0.103734 | 0 | 0 | 0.078472 | 0.009723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107884 | false | 0.070539 | 0.029046 | 0 | 0.165975 | 0.074689 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90a9c694ad7055aeb7e214346c75ba596c28d602 | 3,673 | py | Python | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
] | null | null | null | # Import the Twython class
from twython import Twython, TwythonStreamer
import json
# import pandas as pd
import csv
import datetime
def process_tweet(tweet):
# Filter out unwanted data
d = {}
d['hashtags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]
try:
for key in {
'created_at', 'id', 'text', 'source', 'truncated',
'in_reply_to_status_id', 'in_reply_to_user_id',
'in_reply_to_screen_name', 'user', 'coordinates',
'place', 'quoted_status_id', 'is_quote_status', 'quoted_status',
'retweeted_status', 'quote_count', 'reply_count', 'retweet_count',
'favorite_count', 'favorited', 'retweeted', 'entities', 'extended_entities',
'possibly_sensitive', 'filter_level', 'lang', 'matching_rules'}:
if key == 'user':
pass
elif key == 'place':
pass
elif key == 'quoted_status' or key == 'retweeted_status':
pass
elif key == 'entities':
pass
elif key == 'extended_entities':
pass
else:
d[key] = tweet[key]
except KeyError as e:
pass
# d['text'] = tweet['text']
# d['user'] = tweet['user']['screen_name']
# d['user_loc'] = tweet['user']['location']
# d['date'] = tweet['created_at']
return d
# Create a class that inherits TwythonStreamer
class MyStreamer(TwythonStreamer):
# Received data
def on_success(self, data):
# # Only collect tweets in English
# if data['lang'] == 'en':
# tweet_data = process_tweet(data)
print(datetime.datetime.now())
# self.save_to_csv(tweet_data)
self.save_to_json(data)
# Problem with the API
def on_error(self, status_code, data):
print(status_code, data)
self.disconnect()
# Save each tweet to csv file
def save_to_csv(self, tweet):
# with open(r'saved_tweets.csv', 'a') as out_file:
with open(r'saved_tweets_big.csv', 'a') as out_file:
writer = csv.writer(out_file)
writer.writerow(list(tweet.values()))
def save_to_json(self, tweet):
with open('saved_tweets_big.json', 'a') as out_file:
json.dump(tweet, out_file)
def main():
# Load credentials from json file
with open("twitter_credentials.json", "r") as tw_creds:
creds = json.load(tw_creds)
# Instantiate an object
# python_tweets = Twython(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])
# Instantiate from our streaming class
stream = MyStreamer(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'],
creds['ACCESS_TOKEN'], creds['ACCESS_SECRET'])
# Start the stream
# stream.statuses.filter(track='madrid')
stream.statuses.filter(locations='-7.876154,37.460012,3.699873,43.374723')
# # Create our query
# query = {
# 'q': 'futbol',
# 'result_type': 'mixed',
# 'lang': 'es',
# 'count': '100',
# }
#
# dict_ = {'user': [], 'date': [], 'text': [], 'favorite_count': []}
# for status in python_tweets.search(**query)['statuses']:
# print(format(status))
# dict_['user'].append(status['user']['screen_name'])
# dict_['date'].append(status['created_at'])
# dict_['text'].append(status['text'])
# dict_['favorite_count'].append(status['favorite_count'])
#
# df = pd.DataFrame(dict_)
# df.sort_values(by='favorite_count', inplace=True, ascending=False)
# print(df.values)
if __name__ == "__main__":
main()
| 33.390909 | 88 | 0.58263 | 429 | 3,673 | 4.773893 | 0.354312 | 0.031738 | 0.021484 | 0.014648 | 0.066406 | 0.03418 | 0 | 0 | 0 | 0 | 0 | 0.012327 | 0.271168 | 3,673 | 109 | 89 | 33.697248 | 0.752708 | 0.34985 | 0 | 0.113208 | 0 | 0 | 0.243601 | 0.054181 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0.113208 | 0.075472 | 0 | 0.226415 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90aa48820bf97867a9816268e697f65885c29466 | 389 | py | Python | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
import sys;
from yaml import load, dump, load_all
from cassandra_attributes import *
def main():
attributes = dict()
for i in range(1, len(sys.argv)):
attributes.update(load(open(sys.argv[i], 'r')))
print dump(dict(filter(lambda (a, b): a in cassandra_attributes, attributes.items())))
if __name__ == "__main__":
main()
| 25.933333 | 109 | 0.637532 | 54 | 389 | 4.388889 | 0.62963 | 0.160338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.218509 | 389 | 14 | 110 | 27.785714 | 0.773026 | 0.053985 | 0 | 0 | 0 | 0 | 0.024523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.3 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90ab4c6f6273b660fe6334ebc9b6fb8fce97ce8e | 868 | py | Python | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
] | null | null | null | input = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
def validate(passport):
passport_fields = { "byr": False, "iyr": False, "eyr": False, "hgt": False, "hcl": False, "ecl": False, "pid": False }
for line in passport.split("\n"):
values = line.split(" ")
for value in values:
field = value.split(":")[0]
if field == "cid":
continue
passport_fields[field] = True
if False in passport_fields.values():
return False
return True
count = 0
for i in input.strip().split("\n\n"):
if validate(i):
count += 1
print(count) | 25.529412 | 122 | 0.615207 | 128 | 868 | 4.148438 | 0.445313 | 0.079096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157576 | 0.239631 | 868 | 34 | 123 | 25.529412 | 0.64697 | 0 | 0 | 0 | 0 | 0 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.172414 | 0 | 0 | 0.103448 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90af463579adb14e899b746a24caf95a35d80b1b | 3,017 | py | Python | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
] | null | null | null | import datetime
import logging
from typing import Optional
from betfairlightweight.resources.bettingresources import MarketBook, MarketCatalogue
from .blotter import Blotter
from ..events import events
logger = logging.getLogger(__name__)
class Market:
def __init__(
self,
flumine,
market_id: str,
market_book: MarketBook,
market_catalogue: MarketCatalogue = None,
):
self.flumine = flumine
self.market_id = market_id
self.closed = False
self.date_time_closed = None
self.market_book = market_book
self.market_catalogue = market_catalogue
self.context = {"simulated": {}} # data store (raceCard / scores etc)
self.blotter = Blotter(self)
def __call__(self, market_book: MarketBook):
self.market_book = market_book
def open_market(self) -> None:
self.closed = False
def close_market(self) -> None:
self.closed = True
self.date_time_closed = datetime.datetime.utcnow()
# order
def place_order(self, order, execute: bool = True) -> None:
order.place(self.market_book.publish_time)
if order.id not in self.blotter:
self.blotter[order.id] = order
if order.trade.market_notes is None:
order.trade.update_market_notes(self.market_book)
self.flumine.log_control(events.TradeEvent(order.trade)) # todo dupes?
else:
return # retry attempt so ignore?
if execute: # handles replaceOrder
self.blotter.pending_place.append(order)
def cancel_order(self, order, size_reduction: float = None) -> None:
order.cancel(size_reduction)
self.blotter.pending_cancel.append(order)
def update_order(self, order, new_persistence_type: str) -> None:
order.update(new_persistence_type)
self.blotter.pending_update.append(order)
def replace_order(self, order, new_price: float) -> None:
order.replace(new_price)
self.blotter.pending_replace.append(order)
@property
def event_type_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_type_id
@property
def event_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_id
@property
def seconds_to_start(self):
return (self.market_start_datetime - datetime.datetime.utcnow()).total_seconds()
@property
def elapsed_seconds_closed(self) -> Optional[float]:
if self.closed and self.date_time_closed:
return (datetime.datetime.utcnow() - self.date_time_closed).total_seconds()
@property
def market_start_datetime(self):
if self.market_catalogue:
return self.market_catalogue.market_start_time
elif self.market_book:
return self.market_book.market_definition.market_time
else:
return datetime.datetime.utcfromtimestamp(0)
| 33.153846 | 88 | 0.670534 | 360 | 3,017 | 5.377778 | 0.252778 | 0.082645 | 0.079545 | 0.051653 | 0.143595 | 0.094008 | 0.094008 | 0.094008 | 0.094008 | 0.068182 | 0 | 0.000439 | 0.244614 | 3,017 | 90 | 89 | 33.522222 | 0.849057 | 0.032483 | 0 | 0.178082 | 0 | 0 | 0.00309 | 0 | 0 | 0 | 0 | 0.011111 | 0 | 1 | 0.178082 | false | 0 | 0.082192 | 0.013699 | 0.383562 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90b264bddefd9c5d8b81c5073da1b99d48704da6 | 2,228 | py | Python | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | 9 | 2020-01-02T23:07:36.000Z | 2022-01-26T10:04:04.000Z | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | null | null | null | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
] | 1 | 2020-01-02T23:08:26.000Z | 2020-01-02T23:08:26.000Z |
import itertools
import multiprocessing
import json
import numpy as np
from tqdm import tqdm
from lp_generators.features import coeff_features, solution_features
from lp_generators.performance import clp_simplex_performance
from search_operators import lp_column_neighbour, lp_row_neighbour
from seeds import cli_seeds
from search_common import condition, objective, start_instance
def calculate_features(instance):
return dict(
**coeff_features(instance),
**solution_features(instance))
def generate_by_search(seed):
results = []
pass_condition = 0
step_change = 0
random_state = np.random.RandomState(seed)
current_instance = start_instance(random_state)
current_features = calculate_features(current_instance)
for step in range(10001):
if (step % 100) == 0:
results.append(dict(
**coeff_features(current_instance),
**solution_features(current_instance),
**clp_simplex_performance(current_instance),
pass_condition=pass_condition,
step_change=step_change,
step=step, seed=seed))
if (step % 2) == 0:
new_instance = lp_row_neighbour(random_state, current_instance, 1)
else:
new_instance = lp_column_neighbour(random_state, current_instance, 1)
new_features = calculate_features(new_instance)
if condition(new_features):
pass_condition += 1
if objective(new_features) < objective(current_features):
step_change += 1
current_instance = new_instance
current_features = new_features
return results
@cli_seeds
def run(seed_values):
''' Generate the required number of instances and store feature results. '''
pool = multiprocessing.Pool()
mapper = pool.imap_unordered
print('Generating instances by naive search.')
features = list(tqdm(
mapper(generate_by_search, seed_values),
total=len(seed_values), smoothing=0))
features = list(itertools.chain(*features))
with open('data/naive_search.json', 'w') as outfile:
json.dump(features, outfile, indent=4, sort_keys=True)
run()
| 33.253731 | 81 | 0.685817 | 259 | 2,228 | 5.629344 | 0.343629 | 0.082305 | 0.037037 | 0.027435 | 0.049383 | 0.049383 | 0 | 0 | 0 | 0 | 0 | 0.01117 | 0.236535 | 2,228 | 66 | 82 | 33.757576 | 0.845973 | 0.030521 | 0 | 0 | 0 | 0 | 0.027894 | 0.010228 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.055556 | 0.185185 | 0.018519 | 0.277778 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90b769e3d5d7b99ed6ee9f9dfa67655328ca1e58 | 1,571 | py | Python | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | 1 | 2017-09-09T13:53:09.000Z | 2017-09-09T13:53:09.000Z | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | null | null | null | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import sys
import math
#HOMEMADE WITHOUT ONLINE CODE by Aris
#LIENCE BY ARIS
class ProgressBar:
def __init__(self,max_size=36):
ProgressBar.max_size = max_size
ProgressBar.tick = 20.0/max_size
ProgressBar.progress_counter = 0.0
ProgressBar.counter = 0
spaces = ' ' * 20
hashes = '█' * 0
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, 0))
sys.stdout.flush()
def update(self):
ProgressBar.counter += 1
if ProgressBar.counter == ProgressBar.max_size:
hashes = '█' * 20
spaces = ' ' * 0
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, 100))
print
print "Finished Successfully!"
sys.stdout.flush()
return
elif ProgressBar.counter >= ProgressBar.max_size:
return
ProgressBar.progress_counter += ProgressBar.tick
hashes = '█' * int(ProgressBar.progress_counter)
spaces = ' ' * (20 - int(ProgressBar.progress_counter))
percentage = int(round(ProgressBar.progress_counter * 5))
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, percentage))
sys.stdout.flush()
return
class SupportBar:
def __init__(self):
SupportBar.counter = 0
def increase(self):
SupportBar.counter += 1
def init(self):
SupportBar.counter = 0
def get(self):
return SupportBar.counter
| 29.092593 | 84 | 0.57352 | 175 | 1,571 | 5.091429 | 0.297143 | 0.047138 | 0.145903 | 0.074074 | 0.30303 | 0.222222 | 0.222222 | 0.150393 | 0.150393 | 0.150393 | 0 | 0.029412 | 0.307447 | 1,571 | 53 | 85 | 29.641509 | 0.78125 | 0.045194 | 0 | 0.195122 | 0 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.073171 | null | null | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90b801d343545a11009f0b5ecc8dd2af2c9f92ca | 3,189 | py | Python | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-27 20:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('quantity', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.category')),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('iva', models.DecimalField(decimal_places=2, max_digits=5)),
('subtotal', models.DecimalField(decimal_places=2, max_digits=5)),
('total', models.DecimalField(decimal_places=2, max_digits=5)),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.customer')),
],
),
migrations.CreateModel(
name='PurchaseProducts',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField()),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.product')),
('purchase_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.purchase')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='users.user')),
],
),
migrations.AddField(
model_name='customer',
name='person_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.person'),
),
]
| 41.415584 | 123 | 0.54688 | 288 | 3,189 | 5.934028 | 0.253472 | 0.051492 | 0.057343 | 0.090111 | 0.609128 | 0.609128 | 0.590989 | 0.590989 | 0.492686 | 0.40316 | 0 | 0.017857 | 0.315146 | 3,189 | 76 | 124 | 41.960526 | 0.764652 | 0.014111 | 0 | 0.550725 | 1 | 0 | 0.112524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028986 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90bd59aae81c9889080df91dbd28e4a9b304ffd9 | 1,384 | py | Python | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 36 | 2019-02-22T23:07:14.000Z | 2022-02-10T13:24:27.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 717 | 2019-02-21T22:07:55.000Z | 2022-02-26T15:17:49.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
] | 19 | 2019-04-14T14:37:56.000Z | 2022-02-14T22:05:16.000Z | import uuid
from authtools import models as authtools_models
from django.core.validators import URLValidator
from django.db import models
from django.utils import timezone
from solo.models import SingletonModel
class User(authtools_models.AbstractEmailUser):
# django-allauth puts Google or EA.org SSO data in those fields only, not Profile
# because they have a slightly inflexible architecture
first_name = models.CharField(max_length=256, blank=True)
last_name = models.CharField(max_length=256, blank=True)
def has_profile(self) -> bool:
return hasattr(self, "profile")
class FeedbackURLConfig(SingletonModel):
site_url = models.TextField(
default="https://feedback.eahub.org", validators=[URLValidator()]
)
def __str__(self):
return "Feedback URL"
class Meta:
verbose_name = "Feedback URL"
class MessagingLog(models.Model):
USER = "USER"
GROUP = "GROUP"
RECIPIENT_TYPE_CHOICES = [
(USER, "User"),
(GROUP, "Group"),
]
sender_email = models.EmailField(max_length=254)
recipient_email = models.EmailField(max_length=254)
recipient_type = models.CharField(
max_length=5,
choices=RECIPIENT_TYPE_CHOICES,
default=USER,
)
send_action_uuid = models.UUIDField(default=uuid.uuid4)
time = models.DateTimeField(default=timezone.now)
| 28.833333 | 85 | 0.710983 | 166 | 1,384 | 5.777108 | 0.5 | 0.046924 | 0.056309 | 0.075078 | 0.171011 | 0.171011 | 0.171011 | 0.08342 | 0 | 0 | 0 | 0.012624 | 0.198699 | 1,384 | 47 | 86 | 29.446809 | 0.852119 | 0.095376 | 0 | 0 | 0 | 0 | 0.060048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.171429 | 0.057143 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
90bee561f7ee7014b2253c39a50c061487d0ec34 | 2,106 | py | Python | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 96 | 2018-08-23T16:49:05.000Z | 2021-11-25T00:47:16.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 1,011 | 2018-08-17T12:25:21.000Z | 2021-11-18T09:30:19.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
] | 65 | 2018-08-20T20:05:40.000Z | 2022-02-26T23:54:35.000Z | import numpy as np
types = ["int", "float", "double"]
def randi(*args):
return np.random.randint(-10, 10, size=args)
rngs = {"int": randi, "float": np.random.randn, "double": np.random.randn}
embodiments = {
"function": "R.%s(A,B).AllClose(C)",
"op": "(A %s B).AllClose(C)",
"inline_op": "(R = A, R %s B).AllClose(C)",
"inline_function": "( R = A, R.%s(B) ).AllClose(C)"
}
tests = {
'+': ("Addition", "Add", [], []),
'*': ("Multiplication", "Multiply", [], []),
'-': ("Subtraction", "Subtract", [], []),
'/': ("Division", "Divide", ["int"], []),
'dp': ("Dot product", "Dot", [], ["op", "inline_op"])
}
for type in types:
rng = rngs[type]
for op, details in tests.iteritems():
test_title, function, exclude, ignore = details
if type in exclude:
break
iop = op + "="
ifunction = "Inline" + function
names = {
"function": function,
"op": op,
"inline_op": iop,
"inline_function": ifunction
}
n = 7
m = 7
A = rng(n, m)
B = rng(n, m)
if op == "+":
C = A + B
elif op == "/":
C = A / B
elif op == "-":
C = A - B
elif op == "*":
C = A * B
elif op == "dp":
C = np.dot(A, B)
m1 = " ;\n".join([" ".join([str(y) for y in x]) for x in A])
m2 = " ;\n".join([" ".join([str(y) for y in x]) for x in B])
m3 = " ;\n".join([" ".join([str(y) for y in x]) for x in C])
print """
SCENARIO("%s") {
_M<%s> A,B,C,R;
R.Resize( %d, %d );
A = _M<%s>(R\"(\n%s\n)\");
B = _M<%s>(R\"(\n%s\n)\");
C = _M<%s>(R\"(\n%s\n)\");
""" % (test_title + " for " + type, type, n, m, type, m1, type, m2, type, m3)
for method, emb in embodiments.iteritems():
if method in ignore:
continue
name = names[method]
tt = emb % name
print "EXPECT( %s );" % tt
print "};"
print
| 25.071429 | 85 | 0.417854 | 271 | 2,106 | 3.206642 | 0.280443 | 0.01611 | 0.04603 | 0.023015 | 0.212888 | 0.186421 | 0.165708 | 0.133487 | 0.133487 | 0.133487 | 0 | 0.008955 | 0.363723 | 2,106 | 83 | 86 | 25.373494 | 0.639552 | 0 | 0 | 0 | 0 | 0 | 0.254036 | 0.041311 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.015385 | null | null | 0.061538 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90c06ceec71cc460139a2abcafcd42b40b0a56a8 | 315 | py | Python | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
] | null | null | null | from enum import Enum
from sys import stderr
class PlayerMovement(Enum):
STOPPED = 0
WALKING = 1
RUNNING = 2
SLIPPED = 3
def to_json_representation(self):
return (str(self).split('.')[1]).lower()
class MovementNotAllowedError(ValueError):
def __init__(self, message):
super().__init__(message) | 22.5 | 44 | 0.714286 | 40 | 315 | 5.375 | 0.725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019084 | 0.168254 | 315 | 14 | 45 | 22.5 | 0.801527 | 0 | 0 | 0 | 0 | 0 | 0.003165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0.083333 | 0.916667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
90c531d029592c14df121556138eab86864faa16 | 2,927 | py | Python | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
] | null | null | null | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
] | null | null | null | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
] | null | null | null | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms import ValidationError, EmailField
from user import models
class MyAuthenticationForm(AuthenticationForm):
""""
Overide method clean from AuthenticationForm to show that a user hasn't activate their account
"""
error_messages = {
'invalid_login': (
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': ("This Account hasn't been activated yet, Please check your email :)"),
}
def confirm_login_allowed(self, user):
if not user.is_active:
raise ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
print(username)
try:
user_temp = User.objects.get(username=username)
except:
user_temp = None
print(user_temp)
if user_temp is not None:
self.confirm_login_allowed(user_temp)
else:
raise ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return self.cleaned_data
class CreateUserForm(UserCreationForm):
""""
Override UserCreationForm to include email field
"""
email = EmailField(required=True, label='Email')
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
error_messages = {
'password_mismatch': ('The two password fields didn’t match.'),
'email_taken': 'Your email has been taken'
}
def clean_email(self):
"""
Check if the email had already been taken
"""
email = self.cleaned_data.get('email')
num = User.objects.filter(email=email)
if num.count() > 0:
raise ValidationError(
self.error_messages['email_taken'],
code='email_taken',
)
return email
def save(self, commit= True):
user = super(CreateUserForm, self).save(commit=False)
email = self.cleaned_data.get('email')
user.email = email
user.is_active=False
if commit:
user.save()
return user
| 32.522222 | 98 | 0.585924 | 306 | 2,927 | 5.496732 | 0.349673 | 0.029727 | 0.04459 | 0.042806 | 0.099287 | 0.033294 | 0 | 0 | 0 | 0 | 0 | 0.00152 | 0.325589 | 2,927 | 89 | 99 | 32.88764 | 0.850557 | 0.064571 | 0 | 0.104478 | 0 | 0 | 0.151154 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0.089552 | 0.089552 | 0 | 0.283582 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90c608f7d84094a6c38930b235bc4cc2b22ca8af | 2,044 | py | Python | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
] | null | null | null | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
] | 8 | 2020-04-18T20:20:08.000Z | 2020-05-06T13:39:03.000Z | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
] | null | null | null | import curses
import itertools
import os
from pathlib import Path
import stat
from typing import Optional, Union
import config
from colors import Colors
class Item:
"""An item inside of a Directory."""
def __init__(self, path: Union[Path, str]):
self._path = Path(path)
self._selected = False
@property
def name(self) -> str:
"""The name of the item, not including parents."""
return self._path.name
@property
def color(self) -> curses.color_pair:
"""An initialized ncurses color pair associated with the type of file
for this Item.
"""
if self.selected:
return Colors.black_on_white()
if self._path.is_dir():
return Colors.blue_on_black()
return Colors.default()
@property
def selected(self) -> Optional[bool]:
"""Return whether this item should appear as selected"""
return self._selected
@selected.setter
def selected(self, value: bool):
self._selected = value
def is_hidden(self) -> bool:
"""Return whether or not the file should be hidden."""
return self._has_hidden_attribute() or self._path.name.startswith(".")
def _has_hidden_attribute(self) -> bool:
return bool(os.stat(self._path.resolve()).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN)
class Directory:
"""A list of items inside of a directory."""
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
def __iter__(self):
elements = self.path.iterdir()
if config.SORT_FOLDERS_ON_TOP:
element1, element2 = itertools.tee(elements)
elements = itertools.chain(
(item for item in element1 if item.is_dir()),
(item for item in element2 if not item.is_dir()),
)
for element in elements:
item = Item(element)
if item.is_hidden() and not config.SHOW_HIDDEN_FILES:
continue
yield Item(element)
| 27.253333 | 98 | 0.620841 | 257 | 2,044 | 4.762646 | 0.33463 | 0.058824 | 0.014706 | 0.029412 | 0.099673 | 0.099673 | 0.099673 | 0.099673 | 0.099673 | 0.099673 | 0 | 0.002732 | 0.283757 | 2,044 | 74 | 99 | 27.621622 | 0.833333 | 0.144814 | 0 | 0.104167 | 0 | 0 | 0.000588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.166667 | 0.020833 | 0.541667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
90c806da308d72b961e15453707e27dd9643ad2b | 1,897 | py | Python | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
] | 4 | 2019-06-28T23:27:43.000Z | 2021-09-27T03:17:58.000Z | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
] | 2 | 2020-01-16T19:39:44.000Z | 2021-02-24T22:45:37.000Z | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
] | 1 | 2019-09-09T07:40:45.000Z | 2019-09-09T07:40:45.000Z | """Actev module: get-system git
Actev modules are used to parse actev commands in order to get arguments
before calling associated entry point methods to execute systems.
Warning: this file should not be modified: see src/entry_points to add your source code.
"""
from diva_evaluation_cli.bin.commands.actev_command import ActevCommand
class ActevGetSystemGit(ActevCommand):
"""Clones a git repository
Command Args:
* location or l: path to store the system
* user or U: url to get the system
* password or p: password to access the url
* token or t: token to access the url
* install-cli or i: install the cli to use it
"""
def __init__(self):
super(ActevGetSystemGit, self).__init__('git', "get_git.sh")
def cli_parser(self, arg_parser):
"""Configure the description and the arguments (positional and optional) to parse.
Args:
arg_parser(:obj:`ArgParser`): Python arg parser to describe how to parse the command
"""
arg_parser.description= "Downloads a git repository"
required_named = arg_parser.add_argument_group('required named arguments')
arg_parser.add_argument("-U", "--user", help="username to access the url")
arg_parser.add_argument("-p", "--password", help="password to access the url"
"Warning: if password starts with \'-\', use this: --password=<your password>")
arg_parser.add_argument("-l", "--location", help="path to store the system")
arg_parser.add_argument("-t", "--token", help="token to access the url"
"Warning: if token starts with \'-\', use this: --token=<your token>",
type=str)
arg_parser.add_argument("-i", "--install-cli", help="install the cli to use it", action='store_true')
| 43.113636 | 111 | 0.641012 | 251 | 1,897 | 4.717131 | 0.394422 | 0.076014 | 0.060811 | 0.101351 | 0.152027 | 0.072635 | 0 | 0 | 0 | 0 | 0 | 0 | 0.256194 | 1,897 | 43 | 112 | 44.116279 | 0.839121 | 0.377965 | 0 | 0 | 0 | 0 | 0.358696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.133333 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90c8e1c41e29404a0d0d0511d6b46db43890fb89 | 4,106 | py | Python | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
] | 8 | 2020-12-05T00:13:03.000Z | 2022-01-11T11:35:51.000Z | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
] | 12 | 2020-12-14T18:21:21.000Z | 2022-03-29T17:06:20.000Z | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
] | 2 | 2021-06-27T09:32:55.000Z | 2022-02-27T23:17:36.000Z | import unittest
from html import escape
from src.markdown_parser import convert_github_markdown_to_asana_xml
class TestConvertGithubMarkdownToAsanaXml(unittest.TestCase):
def test_basic_markdown(self):
md = """~~strike~~ **bold** _italic_ `code` [link](asana.com)"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<s>strike</s> <strong>bold</strong> <em>italic</em> <code>code</code> <a href="asana.com">link</a>\n',
)
def test_ul_tag(self):
md = """* bullet one\n* bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ul>\n<li>bullet one</li>\n<li>bullet two</li>\n</ul>\n""",
)
def test_ol_tag(self):
md = """1. bullet one\n2. bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ol>\n<li>bullet one</li>\n<li>bullet two</li>\n</ol>\n""",
)
def test_paragraph(self):
md = "we don't wrap random text in p tags"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(md + "\n", xml)
def test_block_quote(self):
md = "> block quote"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> block quote\n</em>")
def test_horizontal_rule(self):
# Asana doesn't support <hr /> tags, so we just ignore them
md = "hello\n\n---\nworld\n"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, md) # unchanged
def test_auto_linking(self):
md = "https://asana.com/ [still works](www.test.com)"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<a href="https://asana.com/">https://asana.com/</a> <a href="www.test.com">still works</a>\n',
)
def test_converts_headings_to_bold(self):
md = "## heading"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "\n<b>heading</b>\n")
def test_nested_code_within_block_quote(self):
md = "> abc `123`"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> abc <code>123</code>\n</em>")
def test_removes_pre_tags_inline(self):
md = """```test```"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<code>test</code>\n")
def test_removes_pre_tags_block(self):
md = """see:
```
function foo = () => null;
```
"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "see:\n<code>function foo = () => null;\n</code>\n")
def test_escapes_raw_html_mixed_with_markdown(self):
md = """## <img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>"
+ escape('<img href="link" />')
+ "still here "
+ escape("<h3>header</h3>")
+ "</b>\n",
)
def test_escapes_raw_html_on_own_lines(self):
md = """## blah blah blah
<img href="link">
still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>blah blah blah</b>\n"
+ escape('<img href="link">\n')
+ "still here "
+ escape("<h3>header</h3>"),
)
def test_escapes_raw_html(self):
md = """<img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
escape('<img href="link" />') + "still here " + escape("<h3>header</h3>\n"),
)
def test_removes_images(self):
md = """"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, '<a href="https://image.com">image</a>\n')
if __name__ == "__main__":
from unittest import main as run_tests
run_tests()
| 33.933884 | 115 | 0.585485 | 550 | 4,106 | 4.116364 | 0.198182 | 0.091873 | 0.14841 | 0.162544 | 0.563163 | 0.535336 | 0.491166 | 0.491166 | 0.491166 | 0.491166 | 0 | 0.006545 | 0.255723 | 4,106 | 120 | 116 | 34.216667 | 0.734293 | 0.016318 | 0 | 0.313131 | 0 | 0.040404 | 0.285183 | 0.028989 | 0 | 0 | 0 | 0 | 0.151515 | 1 | 0.151515 | false | 0 | 0.040404 | 0 | 0.20202 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90cceb9760edd56688a99f6ee73a68bdf983a84e | 619 | py | Python | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
] | 32 | 2015-01-23T16:13:20.000Z | 2021-05-29T21:11:42.000Z | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
] | null | null | null | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
] | 16 | 2015-08-25T08:02:52.000Z | 2022-01-19T19:16:16.000Z | import os.path
import tcprepl
import BigWorld
def echo(s):
'''Send string to client'''
if tcprepl.write_client is not None:
tcprepl.write_client(s)
def exec_file(filename, exec_globals=None):
'''
Execute file
Try to find file named `filename` and execute it. If `exec_globals` is
specified it is used as globals-dict in exec context.
'''
if exec_globals is None:
exec_globals = {}
if not os.path.isfile(filename):
filename = BigWorld.wg_resolveFileName(filename)
with open(filename, 'r') as f:
code = f.read()
exec code in exec_globals
| 22.107143 | 74 | 0.660743 | 89 | 619 | 4.494382 | 0.47191 | 0.1375 | 0.09 | 0.075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.248788 | 619 | 27 | 75 | 22.925926 | 0.860215 | 0 | 0 | 0 | 0 | 0 | 0.002326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.214286 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90d1622a98abf59a298f4b58a00b76f812b4c744 | 604 | py | Python | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
] | null | null | null | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
] | 3 | 2021-04-08T17:44:07.000Z | 2021-04-12T09:38:26.000Z | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-10 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books", "0003_auto_20210410_1231")]
operations = [
migrations.AlterField(
model_name="book",
name="category",
field=models.CharField(
choices=[
("fiction", "Fiction"),
("regular", "Regular"),
("novel", "Novel"),
],
default="regular",
max_length=7,
),
)
]
| 24.16 | 57 | 0.47351 | 50 | 604 | 5.62 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086111 | 0.403974 | 604 | 24 | 58 | 25.166667 | 0.694444 | 0.071192 | 0 | 0 | 1 | 0 | 0.152057 | 0.041145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90d9a5726836680355d0f136ca02e9d3ff263f57 | 1,087 | py | Python | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
] | 2 | 2021-04-08T06:16:21.000Z | 2022-01-25T18:18:51.000Z | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
] | 3 | 2020-11-16T15:24:53.000Z | 2021-11-10T10:27:50.000Z | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
] | 2 | 2021-01-13T15:36:46.000Z | 2021-04-08T06:24:25.000Z | """Allows the user to call the library as a cli-module."""
from argparse import ArgumentParser
from .modularcmaes import evaluate_bbob
parser = ArgumentParser(description="Run single function CMAES")
parser.add_argument(
"-f", "--fid", type=int, help="bbob function id", required=False, default=5
)
parser.add_argument(
"-d", "--dim", type=int, help="dimension", required=False, default=5
)
parser.add_argument(
"-i",
"--iterations",
type=int,
help="number of iterations per agent",
required=False,
default=50,
)
parser.add_argument(
"-l", "--logging", required=False, action="store_true", default=False
)
parser.add_argument("-L", "--label", type=str, required=False, default="")
parser.add_argument("-s", "--seed", type=int, required=False, default=42)
parser.add_argument("-p", "--data_folder", type=str, required=False)
parser.add_argument("-a", "--arguments", nargs="+", required=False)
args = vars(parser.parse_args())
for arg in args.pop("arguments") or []:
# pylint: disable=exec-used
exec(arg, None, args)
evaluate_bbob(**args)
| 29.378378 | 79 | 0.689052 | 146 | 1,087 | 5.041096 | 0.5 | 0.097826 | 0.184783 | 0.057065 | 0.103261 | 0.103261 | 0.103261 | 0 | 0 | 0 | 0 | 0.006369 | 0.133395 | 1,087 | 36 | 80 | 30.194444 | 0.774947 | 0.072677 | 0 | 0.142857 | 0 | 0 | 0.183633 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90decc71935f62f946a40921c43b3f8580f075de | 2,398 | py | Python | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
] | 6 | 2021-12-27T07:55:16.000Z | 2022-01-26T04:36:53.000Z | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
] | null | null | null | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
] | 3 | 2022-01-06T13:54:48.000Z | 2022-01-26T04:36:54.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open("src/sdfconf/_version.py", "rt") as vf:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in vf:
mo = re.search(VSRE, line, re.M)
if mo:
verstr = mo.group(1)
break
if not mo:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name = 'sdfconf',
version = verstr,
description = ("Diverse manipulation and analysis tool for .sdf files."),
long_description = read('README.rst'),
install_requires = ['numpy>=1.7.1','matplotlib>=1.4.2'],
author = 'Sakari Lätti',
author_email = 'sakari.latti@jyu.fi',
maintainer = 'Sakari Lätti',
maintainer_email = 'sakari.latti@jyu.fi',
packages = ['sdfconf'],
package_dir = {'sdfconf':'src/sdfconf'},
keywords = 'sdf mol2 conformation analyze histogram',
url = 'http://users.jyu.fi/~pentikai/',
license = 'MIT/expat',
entry_points =
{'console_scripts': ['sdfconf = sdfconf.runner:main'],
'setuptools.installation': ['eggsecutable = sdfconf.runner:main',],
},
classifiers= ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry' ,
'Topic :: Software Development :: Libraries',
],
##FIXME
#'''
#package_data = {
# 'sample':['sample_data.sdf']
# },
#'''
) | 42.821429 | 97 | 0.470809 | 205 | 2,398 | 5.414634 | 0.629268 | 0.013514 | 0.028829 | 0.034234 | 0.037838 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008997 | 0.397415 | 2,398 | 56 | 98 | 42.821429 | 0.75917 | 0.05171 | 0 | 0 | 0 | 0 | 0.35894 | 0.039735 | 0 | 0 | 0 | 0.017857 | 0 | 1 | 0.022727 | false | 0 | 0.068182 | 0.022727 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90e49b3af233dbd74b52999ca2aa64df02b0beff | 368 | py | Python | core/migrations/0008_auto_20190528_1802.py | peterson-dev/code-snippet-app | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
] | 2 | 2019-05-22T21:54:43.000Z | 2019-05-26T22:22:14.000Z | core/migrations/0008_auto_20190528_1802.py | peterson-dev/code-snippet-app | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
] | 14 | 2020-02-12T00:04:05.000Z | 2022-03-11T23:51:10.000Z | core/migrations/0008_auto_20190528_1802.py | peterson-dev/kode-kangaroo | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-28 22:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20190523_1740'),
]
operations = [
migrations.RenameField(
model_name='snippet',
old_name='post_content',
new_name='content',
),
]
| 19.368421 | 47 | 0.589674 | 40 | 368 | 5.25 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120155 | 0.298913 | 368 | 18 | 48 | 20.444444 | 0.693798 | 0.122283 | 0 | 0 | 1 | 0 | 0.165109 | 0.071651 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90e4c87211faae293a93093b6b860b2f8d021a50 | 2,740 | py | Python | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
] | 22 | 2021-04-28T03:48:53.000Z | 2022-01-24T09:42:53.000Z | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
] | 1 | 2021-08-08T20:10:18.000Z | 2021-08-23T07:33:38.000Z | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
] | 5 | 2021-09-17T08:02:06.000Z | 2022-01-24T09:43:03.000Z | import os.path as osp
import sys
import subprocess
subprocess.call(['pip', 'install', 'cvbase'])
import cvbase as cvb
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from biupdownsample import biupsample_naive, BiupsampleNaive
from biupdownsample import bidownsample_naive, BidownsampleNaive
feat = torch.randn(2, 64, 2, 2, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 4, 4, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for biupsample naive...')
test = gradcheck(BiupsampleNaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = biupsample_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBiupsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
# ---------------------------------------------------------------
feat = torch.randn(2, 64, 4, 4, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 16, 4, 4, requires_grad=True, device='cuda:0').double()
print('Gradcheck for bidownsample naive...')
test = gradcheck(BidownsampleNaive(4, 1, 1), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 512, 200, 200, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 100, 100, 100, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = bidownsample_naive(feat.clone(), mask.clone(), 10, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBidownsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
| 31.494253 | 79 | 0.691606 | 400 | 2,740 | 4.5675 | 0.215 | 0.059113 | 0.048166 | 0.096333 | 0.739464 | 0.692392 | 0.692392 | 0.68856 | 0.659551 | 0.639299 | 0 | 0.049111 | 0.137956 | 2,740 | 86 | 80 | 31.860465 | 0.724386 | 0.022993 | 0 | 0.59375 | 0 | 0 | 0.125701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90e95d3f579e468dcd63f6bfea79961b11c3e5b8 | 1,953 | py | Python | jupyterlab_bigquery/jupyterlab_bigquery/__init__.py | shunr/jupyter-extensions | a2fb310215664e29fd7252e5fe353f60a91a0aba | [
"Apache-2.0"
] | null | null | null | jupyterlab_bigquery/jupyterlab_bigquery/__init__.py | shunr/jupyter-extensions | a2fb310215664e29fd7252e5fe353f60a91a0aba | [
"Apache-2.0"
] | 1 | 2020-07-20T23:09:46.000Z | 2020-07-20T23:09:46.000Z | jupyterlab_bigquery/jupyterlab_bigquery/__init__.py | shunr/jupyter-extensions | a2fb310215664e29fd7252e5fe353f60a91a0aba | [
"Apache-2.0"
] | null | null | null | from notebook.utils import url_path_join
from jupyterlab_bigquery.list_items_handler import handlers
from jupyterlab_bigquery.details_handler import DatasetDetailsHandler, TablePreviewHandler, TableDetailsHandler
from jupyterlab_bigquery.version import VERSION
from jupyterlab_bigquery.pagedAPI_handler import PagedQueryHandler
from jupyterlab_bigquery.query_incell_editor import QueryIncellEditor, _cell_magic
__version__ = VERSION
def _jupyter_server_extension_paths():
return [{'module': 'jupyterlab_bigquery'}]
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
host_pattern = '.*$'
app = nb_server_app.web_app
gcp_v1_endpoint = url_path_join(app.settings['base_url'], 'bigquery', 'v1')
def make_endpoint(endPoint, handler):
return url_path_join(gcp_v1_endpoint, endPoint) + '(.*)', handler
app.add_handlers(
host_pattern,
[
(url_path_join(gcp_v1_endpoint, k) + "(.*)", v)
for (k, v) in handlers.items()
],
)
app.add_handlers(host_pattern, [
# TODO(cbwilkes): Add auth checking if needed.
# (url_path_join(gcp_v1_endpoint, auth'), AuthHandler)
make_endpoint('list', ListHandler),
make_endpoint('datasetdetails', DatasetDetailsHandler),
make_endpoint('tabledetails', TableDetailsHandler),
make_endpoint('tablepreview', TablePreviewHandler),
make_endpoint('query', PagedQueryHandler)
])
def load_ipython_extension(ipython):
"""Called by IPython when this module is loaded as an IPython extension."""
ipython.register_magic_function(
_cell_magic, magic_kind="line", magic_name="bigquery_editor"
)
ipython.register_magic_function(
_cell_magic, magic_kind="cell", magic_name="bigquery_editor"
)
| 35.509091 | 111 | 0.721966 | 219 | 1,953 | 6.082192 | 0.374429 | 0.081081 | 0.041291 | 0.031532 | 0.160661 | 0.123123 | 0.069069 | 0.069069 | 0 | 0 | 0 | 0.003145 | 0.185868 | 1,953 | 54 | 112 | 36.166667 | 0.834591 | 0.152586 | 0 | 0.055556 | 0 | 0 | 0.085962 | 0 | 0 | 0 | 0 | 0.018519 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0.055556 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90eb050355216ee7d1a8b303ce6104092d1b2ec7 | 581 | py | Python | ios_notifications/migrations/0004_auto_20141105_1515.py | chillbear/django-ios-notifications | d48a7862eaa499672f27c192a3cf6f06e06f8117 | [
"BSD-3-Clause"
] | 2 | 2021-12-01T21:34:49.000Z | 2021-12-13T19:22:12.000Z | ios_notifications/migrations/0004_auto_20141105_1515.py | chillbear/django-ios-notifications | d48a7862eaa499672f27c192a3cf6f06e06f8117 | [
"BSD-3-Clause"
] | 1 | 2019-10-04T01:18:32.000Z | 2019-10-04T01:18:32.000Z | ios_notifications/migrations/0004_auto_20141105_1515.py | chillbear/django-ios-notifications | d48a7862eaa499672f27c192a3cf6f06e06f8117 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_fields.fields
class Migration(migrations.Migration):
dependencies = [
('ios_notifications', '0003_notification_loc_payload'),
]
operations = [
migrations.AlterField(
model_name='apnservice',
name='passphrase',
field=django_fields.fields.EncryptedCharField(help_text=b'Passphrase for the private key', max_length=101, null=True, blank=True),
preserve_default=True,
),
]
| 26.409091 | 142 | 0.667814 | 60 | 581 | 6.216667 | 0.75 | 0.064343 | 0.096515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017897 | 0.230637 | 581 | 21 | 143 | 27.666667 | 0.816555 | 0.036145 | 0 | 0 | 0 | 0 | 0.172043 | 0.051971 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.133333 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
90f5a5e0a26c00e35828acb499a24e15b010c10d | 1,574 | py | Python | awx/main/migrations/0156_capture_mesh_topology.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | 1 | 2019-07-21T11:19:50.000Z | 2019-07-21T11:19:50.000Z | awx/main/migrations/0156_capture_mesh_topology.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | 2 | 2022-02-10T11:57:21.000Z | 2022-02-27T22:43:44.000Z | awx/main/migrations/0156_capture_mesh_topology.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.20 on 2021-12-17 19:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0155_improved_health_check'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='node_type',
field=models.CharField(
choices=[
('control', 'Control plane node'),
('execution', 'Execution plane node'),
('hybrid', 'Controller and execution'),
('hop', 'Message-passing node, no execution capability'),
],
default='hybrid',
max_length=16,
),
),
migrations.CreateModel(
name='InstanceLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='main.Instance')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reverse_peers', to='main.Instance')),
],
options={
'unique_together': {('source', 'target')},
},
),
migrations.AddField(
model_name='instance',
name='peers',
field=models.ManyToManyField(through='main.InstanceLink', to='main.Instance'),
),
]
| 34.977778 | 141 | 0.540661 | 142 | 1,574 | 5.880282 | 0.549296 | 0.038323 | 0.050299 | 0.079042 | 0.153293 | 0.153293 | 0.153293 | 0.153293 | 0.153293 | 0.153293 | 0 | 0.020735 | 0.325921 | 1,574 | 44 | 142 | 35.772727 | 0.766258 | 0.029225 | 0 | 0.210526 | 1 | 0 | 0.211664 | 0.017038 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.026316 | 0.052632 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
90f9829385890920a9abc0c7d59d052db4801faf | 4,427 | py | Python | commands/data/fusion_data.py | Christ0ph990/Fusion360DevTools | fce10e34b3b92a058d275956c07d1b891ce02192 | [
"MIT"
] | 3 | 2022-02-12T21:00:39.000Z | 2022-03-18T13:17:17.000Z | commands/data/fusion_data.py | Christ0ph990/Fusion360DevTools | fce10e34b3b92a058d275956c07d1b891ce02192 | [
"MIT"
] | null | null | null | commands/data/fusion_data.py | Christ0ph990/Fusion360DevTools | fce10e34b3b92a058d275956c07d1b891ce02192 | [
"MIT"
] | null | null | null | # Copyright 2022 by Autodesk, Inc.
# Permission to use, copy, modify, and distribute this software in object code form
# for any purpose and without fee is hereby granted, provided that the above copyright
# notice appears in all copies and that both that copyright notice and the limited
# warranty and restricted rights notice below appear in all supporting documentation.
#
# AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY
# DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE.
# AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR FREE.
from dataclasses import dataclass, field
import base64
import pprint
import adsk.core
app = adsk.core.Application.get()
def b64_url_safe_encode(string):
encoded_bytes = base64.urlsafe_b64encode(string.encode("utf-8"))
encoded_str = str(encoded_bytes, "utf-8")
return encoded_str.rstrip("=")
def b64_url_safe_decode(string):
return str(base64.urlsafe_b64decode(string.lstrip('a.') + "==="), "utf-8")
def link_for_url(url: str) -> str:
return f"<a href={url}>{url}</a>"
@dataclass
class FusionData:
# This should be set at creation or at least validity checked BEFORE calling this
data_file: adsk.core.DataFile = field(repr=False, default=None)
# THe following are computed based on current state of Fusion and are not "printed" by default
hub: adsk.core.DataHub = field(repr=False, init=False)
project: adsk.core.DataProject = field(repr=False, init=False)
folder: adsk.core.DataFolder = field(repr=False, init=False)
user: adsk.core.User = field(repr=False, init=False)
# All String Properties
file_name: str = field(init=False)
user_email: str = field(init=False)
hub_name: str = field(init=False)
hub_id: str = field(init=False)
hub_id_decoded: str = field(init=False)
hub_team_name: str = field(init=False)
project_name: str = field(init=False)
project_id: str = field(init=False)
project_id_decoded: str = field(init=False)
folder_name: str = field(init=False)
folder_id: str = field(init=False)
lineage_urn: str = field(init=False)
version_urn: str = field(init=False)
base64_lineage_urn: str = field(init=False)
base64_version_urn: str = field(init=False)
open_from_web: str = field(init=False)
fusion_team_url: str = field(init=False)
fusion_team_link: str = field(init=False)
def __post_init__(self):
# THe following are computed based on current state of Fusion and are not "printed" by default
self.hub = app.data.activeHub
self.project = self.data_file.parentProject
self.folder = self.data_file.parentFolder
self.user = app.currentUser
# All String Properties
self.file_name: str = self.data_file.name
self.user_email: str = self.user.email
self.hub_name: str = self.hub.name
self.hub_id: str = self.hub.id
self.hub_id_decoded: str = b64_url_safe_decode(self.hub_id)
self.hub_team_name: str = self.hub_id_decoded.split(':')[-1]
self.project_name: str = self.project.name
self.project_id: str = self.project.id
self.project_id_decoded: str = b64_url_safe_decode(self.project_id)
self.folder_name: str = self.folder.name
self.folder_id: str = self.folder.id
self.lineage_urn: str = self.data_file.id
self.version_urn: str = self.data_file.versionId
self.base64_lineage_urn: str = b64_url_safe_encode(self.lineage_urn)
self.base64_version_urn: str = b64_url_safe_encode(self.version_urn)
team_base_url: str = 'autodesk360'
self.open_from_web: str = f"fusion360://userEmail={self.user_email}&" \
f"lineageUrn={self.lineage_urn}&" \
f"hubUrl=https://{self.hub_team_name}.{team_base_url}.com&" \
f"documentName={self.file_name}"
self.fusion_team_url: str = f"https://{self.hub_team_name}.{team_base_url}.com/g/data/{self.base64_lineage_urn}"
self.fusion_team_link = link_for_url(self.fusion_team_url)
def str_dict(self):
return {k: v
for k, v in self.__dict__.items()
if isinstance(v, str)}
def pretty_string(self):
return pprint.pformat(self.str_dict())
| 42.161905 | 120 | 0.688954 | 642 | 4,427 | 4.562305 | 0.267913 | 0.0676 | 0.073745 | 0.104473 | 0.320929 | 0.233527 | 0.114032 | 0.096279 | 0.074428 | 0.051212 | 0 | 0.013154 | 0.210075 | 4,427 | 104 | 121 | 42.567308 | 0.824421 | 0.214592 | 0 | 0 | 0 | 0 | 0.084393 | 0.028613 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084507 | false | 0 | 0.056338 | 0.056338 | 0.549296 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
29057d1781f0e8f9898d6f1c32f5772d89c7df3a | 1,889 | py | Python | darts_search_space/imagenet/rlnas/evolution_search/config.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 17 | 2021-05-17T04:54:17.000Z | 2022-01-23T09:59:02.000Z | darts_search_space/imagenet/rlnas/evolution_search/config.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 2 | 2021-07-09T05:14:29.000Z | 2022-02-05T10:15:31.000Z | darts_search_space/imagenet/rlnas/evolution_search/config.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 8 | 2021-05-28T00:04:20.000Z | 2021-10-18T02:41:34.000Z | import os
class config:
host = 'zhangxuanyang.zhangxuanyang.ws2.hh-c.brainpp.cn'
username = 'admin'
port = 5672
exp_name = os.path.dirname(os.path.abspath(__file__))
exp_name = '-'.join(i for i in exp_name.split(os.path.sep) if i);
test_send_pipe = exp_name + '-test-send_pipe'
test_recv_pipe = exp_name + '-test-recv_pipe'
net_cache = 'model_and_data/checkpoint_epoch_50.pth.tar'
initial_net_cache = 'model_and_data/checkpoint_epoch_0.pth.tar'
layers = 14
edges = 14
model_input_size = (1, 3, 224, 224)
# Candidate operators
blocks_keys = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
op_num = len(blocks_keys)
# Operators encoding
NONE = 0
MAX_POOLING_3x3 = 1
AVG_POOL_3x3 = 2
SKIP_CONNECT = 3
SEP_CONV_3x3 = 4
SEP_CONV_5x5 = 5
DIL_CONV_3x3 = 6
DIL_CONV_5x5 = 7
time_limit=None
#time_limit=0.050
speed_input_shape=[32,3,224,224]
flops_limit=True
max_flops=600*1e6
# max_flops=None
max_epochs=20
select_num = 10
population_num = 50
mutation_num = 25
m_prob = 0.1
crossover_num = 25
momentum = 0.7
eps = 1e-5
# Enumerate all paths of a single cell
paths = [[0, 2, 3, 4, 5], [0, 2, 3, 5], [0, 2, 4, 5], [0, 2, 5], [0, 3, 4, 5], [0, 3, 5], [0, 4, 5], [0, 5],
[1, 2, 3, 4, 5], [1, 2, 3, 5], [1, 2, 4, 5], [1, 2, 5], [1, 3, 4, 5], [1, 3, 5], [1, 4, 5], [1, 5],
[0, 2, 3, 4], [0, 2, 4], [0, 3, 4], [0, 4],
[1, 2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 4],
[0, 2, 3], [0, 3],
[1, 2, 3], [1, 3],
[0, 2],
[1, 2]]
for i in ['exp_name']:
print('{}: {}'.format(i,eval('config.{}'.format(i))))
| 25.527027 | 112 | 0.528322 | 312 | 1,889 | 2.971154 | 0.349359 | 0.01726 | 0.012945 | 0.019417 | 0.10356 | 0.075512 | 0.075512 | 0 | 0 | 0 | 0 | 0.136778 | 0.303335 | 1,889 | 73 | 113 | 25.876712 | 0.567629 | 0.056114 | 0 | 0 | 0 | 0 | 0.155881 | 0.073157 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.018182 | 0 | 0.672727 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
290a002c458607061f9182749313cea5d389910f | 1,757 | py | Python | src/admin.py | kappa243/agh-db-proj | 73a3e69fa11e65e196b3d8a34be0b1051654a7eb | [
"MIT"
] | null | null | null | src/admin.py | kappa243/agh-db-proj | 73a3e69fa11e65e196b3d8a34be0b1051654a7eb | [
"MIT"
] | null | null | null | src/admin.py | kappa243/agh-db-proj | 73a3e69fa11e65e196b3d8a34be0b1051654a7eb | [
"MIT"
] | null | null | null | from flask import Blueprint, request, render_template, flash, redirect, url_for
from flask_login import login_user, login_required, current_user, logout_user
from models import User
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
admin = Blueprint('admin', __name__)
@admin.route('/admin', methods=['POST', 'GET'])
@login_required
def admin_panel():
if current_user.is_authenticated:
user = User.query.get(int(current_user.get_id()))
if not user.admin:
return redirect(url_for('index'))
users = User.query.order_by(User.id).all()
if request.method == 'POST':
if 'edit_user' in request.form:
old_username = request.form['edit_user']
user = db.session.query(User).filter_by(username=old_username).with_for_update().first()
username = request.form['username']
password = request.form['password']
if len(username) > 0:
user.username = username
if len(password) > 0:
if len(password) >= 3:
user.password = generate_password_hash(password, method='sha256')
else:
flash('Password must be minimum 3 characters long')
if 'grant_admin' in request.form:
user.admin = True
if 'remove_admin' in request.form:
user.admin = False
if 'delete' in request.form:
old_username = request.form['delete']
User.query.filter_by(username=old_username).with_for_update().delete()
db.session.commit()
return redirect(url_for('admin.admin_panel'))
return render_template('admin_panel.html', users=users)
| 39.931818 | 100 | 0.636881 | 216 | 1,757 | 4.976852 | 0.347222 | 0.08186 | 0.048372 | 0.037209 | 0.189767 | 0.189767 | 0.139535 | 0.074419 | 0 | 0 | 0 | 0.005356 | 0.256118 | 1,757 | 43 | 101 | 40.860465 | 0.817138 | 0 | 0 | 0 | 0 | 0 | 0.100912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0.162162 | 0.135135 | 0 | 0.243243 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
290c6742df1f8f4ad0e590b81b60add7140d2294 | 4,321 | py | Python | test/lib/test_map.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | 1 | 2015-10-15T04:18:14.000Z | 2015-10-15T04:18:14.000Z | test/lib/test_map.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | null | null | null | test/lib/test_map.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | null | null | null | import datetime
import types
from nose.tools import eq_
from nose.tools import ok_
from inferno.lib.map import keyset_map
from inferno.lib.rule import InfernoRule
class TestKeysetMap(object):
def setUp(self):
self.data = {
'city': 'toronto',
'country': 'canada',
'population': 100,
'size': 1000,
'date': datetime.date(2012, 12, 01)}
self.rule = InfernoRule(
key_parts=['country', 'city'],
value_parts=['population', 'size'])
def test_keys_and_parts(self):
expected = [('["_default","canada","toronto"]', [100, 1000])]
self._assert_map(self.data, self.rule, expected)
def test_missing_key_part_should_not_yield_result(self):
del self.data['city']
expected = []
self._assert_map(self.data, self.rule, expected)
def test_missing_value_part_should_yield_result(self):
del self.data['size']
expected = [('["_default","canada","toronto"]', [100, 0])]
self._assert_map(self.data, self.rule, expected)
def test_null_key_part_should_not_yield_result(self):
self.data['city'] = None
expected = []
self._assert_map(self.data, self.rule, expected)
def test_null_value_part_should_yield_result(self):
self.data['size'] = None
expected = [('["_default","canada","toronto"]', [100, None])]
self._assert_map(self.data, self.rule, expected)
def test_empty_key_part_should_yield_result(self):
self.data['city'] = ''
expected = [('["_default","canada",""]', [100, 1000])]
self._assert_map(self.data, self.rule, expected)
def test_empty_value_part_should_yield_result(self):
self.data['size'] = ''
expected = [('["_default","canada","toronto"]', [100, ''])]
self._assert_map(self.data, self.rule, expected)
def test_map_serialization(self):
# key parts are str casted & json serialized, value parts are are not
# (note the difference between the key date and value date results)
rule = InfernoRule(
key_parts=['date'],
value_parts=['date'])
expected = [('["_default","2012-12-01"]', [datetime.date(2012, 12, 1)])]
self._assert_map(self.data, rule, expected)
def test_field_transforms(self):
def upper(val):
return val.upper()
rule = InfernoRule(
key_parts=['country', 'city'],
value_parts=['population', 'size'],
field_transforms={'city': upper, 'country': upper})
expected = [('["_default","CANADA","TORONTO"]', [100, 1000])]
self._assert_map(self.data, rule, expected)
def test_parts_preprocess_that_yields_multiple_parts(self):
def lookup_language(parts, params):
for language in ['french', 'english']:
parts_copy = parts.copy()
parts_copy['language'] = language
yield parts_copy
rule = InfernoRule(
key_parts=['country'],
value_parts=['language'],
parts_preprocess=[lookup_language])
expected = [
('["_default","canada"]', ['french']),
('["_default","canada"]', ['english'])]
self._assert_map(self.data, rule, expected)
def test_field_transforms_happen_after_parts_preprocess(self):
def lookup_language(parts, params):
for language in ['french', 'english']:
parts_copy = parts.copy()
parts_copy['language'] = language
yield parts_copy
def upper(val):
return val.upper()
rule = InfernoRule(
key_parts=['country'],
value_parts=['language'],
parts_preprocess=[lookup_language],
field_transforms={'language': upper})
expected = [
('["_default","canada"]', ['FRENCH']),
('["_default","canada"]', ['ENGLISH'])]
self._assert_map(self.data, rule, expected)
def _assert_map(self, parts, rule, expected):
# turn disco_debug on for more code coverage
rule.params.disco_debug = True
actual = keyset_map(parts, rule.params)
ok_(isinstance(actual, types.GeneratorType))
eq_(list(actual), expected)
| 36.008333 | 80 | 0.592918 | 480 | 4,321 | 5.079167 | 0.2 | 0.059065 | 0.063987 | 0.076702 | 0.659147 | 0.646432 | 0.621821 | 0.582855 | 0.552502 | 0.510254 | 0 | 0.019213 | 0.265216 | 4,321 | 119 | 81 | 36.310924 | 0.748661 | 0.040731 | 0 | 0.410526 | 0 | 0 | 0.127747 | 0.069548 | 0 | 0 | 0 | 0 | 0.126316 | 0 | null | null | 0 | 0.063158 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29145e423185507525f6b1aaf218e49896993e52 | 22,417 | py | Python | geotrek/tourism/models.py | ker2x/Geotrek-admin | 78e154894d1d78dbb35789285c7def8deaaa2dd3 | [
"BSD-2-Clause"
] | null | null | null | geotrek/tourism/models.py | ker2x/Geotrek-admin | 78e154894d1d78dbb35789285c7def8deaaa2dd3 | [
"BSD-2-Clause"
] | null | null | null | geotrek/tourism/models.py | ker2x/Geotrek-admin | 78e154894d1d78dbb35789285c7def8deaaa2dd3 | [
"BSD-2-Clause"
] | null | null | null | import os
import re
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.formats import date_format
from easy_thumbnails.alias import aliases
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
from mapentity.registry import registry
from mapentity.models import MapEntityMixin
from mapentity.serializers import plain_text, smart_plain_text
from geotrek.authent.models import StructureRelated
from geotrek.core.models import Topology
from geotrek.common.mixins import (NoDeleteMixin, TimeStampedModelMixin,
PictogramMixin, OptionalPictogramMixin,
PublishableMixin, PicturesMixin,
AddPropertyMixin)
from geotrek.common.models import Theme
from geotrek.common.utils import intersecting
from extended_choices import Choices
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.manager import MultilingualManager
else:
from django.db.models import Manager as MultilingualManager
logger = logging.getLogger(__name__)
def _get_target_choices():
""" Populate choices using installed apps names.
"""
apps = [('public', _("Public website"))]
for model, entity in registry.registry.items():
if entity.menu:
appname = model._meta.app_label.lower()
apps.append((appname, unicode(entity.label)))
return tuple(apps)
class InformationDeskType(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='label')
class Meta:
db_table = 't_b_type_renseignement'
verbose_name = _(u"Information desk type")
verbose_name_plural = _(u"Information desk types")
ordering = ['label']
def __unicode__(self):
return self.label
class InformationDesk(models.Model):
name = models.CharField(verbose_name=_(u"Title"), max_length=256, db_column='nom')
type = models.ForeignKey(InformationDeskType, verbose_name=_(u"Type"),
related_name='desks', db_column='type')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Brief description"))
phone = models.CharField(verbose_name=_(u"Phone"), max_length=32,
blank=True, null=True, db_column='telephone')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
photo = models.FileField(verbose_name=_(u"Photo"), upload_to=settings.UPLOAD_DIR,
db_column='photo', max_length=512, blank=True, null=True)
street = models.CharField(verbose_name=_(u"Street"), max_length=256,
blank=True, null=True, db_column='rue')
postal_code = models.CharField(verbose_name=_(u"Postal code"), max_length=8,
blank=True, null=True, db_column='code')
municipality = models.CharField(verbose_name=_(u"Municipality"),
blank=True, null=True,
max_length=256, db_column='commune')
geom = models.PointField(verbose_name=_(u"Emplacement"), db_column='geom',
blank=True, null=True,
srid=settings.SRID, spatial_index=False)
objects = models.GeoManager()
class Meta:
db_table = 't_b_renseignement'
verbose_name = _(u"Information desk")
verbose_name_plural = _(u"Information desks")
ordering = ['name']
def __unicode__(self):
return self.name
@property
def description_strip(self):
"""Used in trek public template.
"""
nobr = re.compile(r'(\s*<br.*?>)+\s*', re.I)
newlines = nobr.sub("\n", self.description)
return smart_plain_text(newlines)
@property
def serializable_type(self):
return {
'id': self.type.id,
'label': self.type.label,
'pictogram': self.type.pictogram.url,
}
@property
def latitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.y
return None
@property
def longitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.x
return None
@property
def thumbnail(self):
if not self.photo:
return None
thumbnailer = get_thumbnailer(self.photo)
try:
return thumbnailer.get_thumbnail(aliases.get('thumbnail'))
except InvalidImageFormatError:
logger.warning(_("Image %s invalid or missing from disk.") % self.photo)
return None
@property
def photo_url(self):
thumbnail = self.thumbnail
if not thumbnail:
return None
return os.path.join(settings.MEDIA_URL, thumbnail.name)
GEOMETRY_TYPES = Choices(
('POINT', 'point', _('Point')),
('LINE', 'line', _('Line')),
('POLYGON', 'polygon', _('Polygon')),
('ANY', 'any', _('Any')),
)
class TouristicContentCategory(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
geometry_type = models.CharField(db_column="type_geometrie", max_length=16,
choices=GEOMETRY_TYPES, default=GEOMETRY_TYPES.POINT)
type1_label = models.CharField(verbose_name=_(u"First list label"), max_length=128,
db_column='label_type1', blank=True)
type2_label = models.CharField(verbose_name=_(u"Second list label"), max_length=128,
db_column='label_type2', blank=True)
order = models.IntegerField(verbose_name=_(u"Order"), null=True, blank=True, db_column='tri',
help_text=_(u"Alphabetical order if blank"))
id_prefix = 'C'
class Meta:
db_table = 't_b_contenu_touristique_categorie'
verbose_name = _(u"Touristic content category")
verbose_name_plural = _(u"Touristic content categories")
ordering = ['order', 'label']
def __unicode__(self):
return self.label
@property
def prefixed_id(self):
return '{prefix}{id}'.format(prefix=self.id_prefix, id=self.id)
class TouristicContentType(OptionalPictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
category = models.ForeignKey(TouristicContentCategory, related_name='types',
verbose_name=_(u"Category"), db_column='categorie')
# Choose in which list of choices this type will appear
in_list = models.IntegerField(choices=((1, _(u"First")), (2, _(u"Second"))), db_column='liste_choix')
class Meta:
db_table = 't_b_contenu_touristique_type'
verbose_name = _(u"Touristic content type")
verbose_name_plural = _(u"Touristic content type")
ordering = ['label']
def __unicode__(self):
return self.label
class TouristicContentType1Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType1Manager, self).get_queryset().filter(in_list=1)
class TouristicContentType2Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType2Manager, self).get_queryset().filter(in_list=2)
class TouristicContentType1(TouristicContentType):
objects = TouristicContentType1Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 1
super(TouristicContentType1, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"First list types")
class TouristicContentType2(TouristicContentType):
objects = TouristicContentType2Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 2
super(TouristicContentType2, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"Second list types")
class ReservationSystem(models.Model):
name = models.CharField(verbose_name=_(u"Name"), max_length=256,
blank=False, null=False, unique=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 't_b_systeme_reservation'
verbose_name = _(u"Reservation system")
verbose_name_plural = _(u"Reservation systems")
class TouristicContent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
TimeStampedModelMixin, PicturesMixin, NoDeleteMixin):
""" A generic touristic content (accomodation, museum, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristiccontents",
db_table="t_r_contenu_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.GeometryField(verbose_name=_(u"Location"), srid=settings.SRID)
category = models.ForeignKey(TouristicContentCategory, related_name='contents',
verbose_name=_(u"Category"), db_column='categorie')
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact',
help_text=_(u"Address, phone, etc."))
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Anything worth to know"))
type1 = models.ManyToManyField(TouristicContentType, related_name='contents1',
verbose_name=_(u"Type 1"), db_table="t_r_contenu_touristique_type1",
blank=True)
type2 = models.ManyToManyField(TouristicContentType, related_name='contents2',
verbose_name=_(u"Type 2"), db_table="t_r_contenu_touristique_type2",
blank=True)
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristiccontents',
verbose_name=_("Source"), db_table='t_r_contenu_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristiccontents',
verbose_name=_("Portal"), db_table='t_r_contenu_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
reservation_system = models.ForeignKey(ReservationSystem, verbose_name=_(u"Reservation system"),
blank=True, null=True)
reservation_id = models.CharField(verbose_name=_(u"Reservation ID"), max_length=1024,
blank=True, db_column='id_reservation')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
class Meta:
db_table = 't_t_contenu_touristique'
verbose_name = _(u"Touristic content")
verbose_name_plural = _(u"Touristic contents")
def __unicode__(self):
return self.name
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def type1_label(self):
return self.category.type1_label
@property
def type2_label(self):
return self.category.type2_label
@property
def type1_display(self):
return ', '.join([unicode(n) for n in self.type1.all()])
@property
def type2_display(self):
return ', '.join([unicode(n) for n in self.type2.all()])
@property
def prefixed_category_id(self):
return self.category.prefixed_id
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def type(self):
"""Fake type to simulate POI for mobile app v1"""
return self.category
@property
def min_elevation(self):
return 0
@property
def max_elevation(self):
return 0
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ','.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def extent(self):
return self.geom.buffer(10).transform(settings.API_SRID, clone=True).extent
@property
def rando_url(self):
category_slug = _(u'touristic-content')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
Topology.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
Topology.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
TouristicContent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicContent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
class TouristicEventType(OptionalPictogramMixin):
type = models.CharField(verbose_name=_(u"Type"), max_length=128, db_column='type')
class Meta:
db_table = 't_b_evenement_touristique_type'
verbose_name = _(u"Touristic event type")
verbose_name_plural = _(u"Touristic event types")
ordering = ['type']
def __unicode__(self):
return self.type
class TouristicEvent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
PicturesMixin, TimeStampedModelMixin, NoDeleteMixin):
""" A touristic event (conference, workshop, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristic_events",
db_table="t_r_evenement_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.PointField(verbose_name=_(u"Location"), srid=settings.SRID)
begin_date = models.DateField(blank=True, null=True, verbose_name=_(u"Begin date"), db_column='date_debut')
end_date = models.DateField(blank=True, null=True, verbose_name=_(u"End date"), db_column='date_fin')
duration = models.CharField(verbose_name=_(u"Duration"), max_length=64, blank=True, db_column='duree',
help_text=_(u"3 days, season, ..."))
meeting_point = models.CharField(verbose_name=_(u"Meeting point"), max_length=256, blank=True, db_column='point_rdv',
help_text=_(u"Where exactly ?"))
meeting_time = models.TimeField(verbose_name=_(u"Meeting time"), blank=True, null=True, db_column='heure_rdv',
help_text=_(u"11:00, 23:30"))
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
organizer = models.CharField(verbose_name=_(u"Organizer"), max_length=256, blank=True, db_column='organisateur')
speaker = models.CharField(verbose_name=_(u"Speaker"), max_length=256, blank=True, db_column='intervenant')
type = models.ForeignKey(TouristicEventType, verbose_name=_(u"Type"), blank=True, null=True, db_column='type')
accessibility = models.CharField(verbose_name=_(u"Accessibility"), max_length=256, blank=True, db_column='accessibilite')
participant_number = models.CharField(verbose_name=_(u"Number of participants"), max_length=256, blank=True, db_column='nb_places')
booking = models.TextField(verbose_name=_(u"Booking"), blank=True, db_column='reservation')
target_audience = models.CharField(verbose_name=_(u"Target audience"), max_length=128, blank=True, null=True, db_column='public_vise')
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Recommandations / To plan / Advices"))
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristicevents',
verbose_name=_("Source"), db_table='t_r_evenement_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristicevents',
verbose_name=_("Portal"), db_table='t_r_evenement_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
category_id_prefix = 'E'
class Meta:
db_table = 't_t_evenement_touristique'
verbose_name = _(u"Touristic event")
verbose_name_plural = _(u"Touristic events")
ordering = ['-begin_date']
def __unicode__(self):
return self.name
@property
def type1(self):
return [self.type] if self.type else []
@property
def type2(self):
return []
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def dates_display(self):
if not self.begin_date and not self.end_date:
return u""
elif not self.end_date:
return _(u"starting from {begin}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'))
elif not self.begin_date:
return _(u"up to {end}").format(
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
elif self.begin_date == self.end_date:
return date_format(self.begin_date, 'SHORT_DATE_FORMAT')
else:
return _(u"from {begin} to {end}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'),
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
@property
def prefixed_category_id(self):
return self.category_id_prefix
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ', '.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def rando_url(self):
category_slug = _(u'touristic-event')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
TouristicEvent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicEvent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
Topology.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
Topology.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
TouristicContent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
TouristicContent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
TouristicEvent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
TouristicEvent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
| 43.868885 | 171 | 0.657581 | 2,503 | 22,417 | 5.636037 | 0.145825 | 0.06316 | 0.056993 | 0.040547 | 0.630963 | 0.555823 | 0.481818 | 0.436166 | 0.41008 | 0.381371 | 0 | 0.008463 | 0.23045 | 22,417 | 510 | 172 | 43.954902 | 0.809286 | 0.014498 | 0 | 0.40404 | 0 | 0 | 0.145538 | 0.026651 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118687 | false | 0 | 0.05303 | 0.088384 | 0.540404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2917f428a5344543b5b9765a392fb7105a798a1a | 4,384 | py | Python | app/models.py | TrigeekSpace/academia-bknd | bd3b821240ef50868cd7d7b59c8d25e71086e70e | [
"BSD-3-Clause"
] | null | null | null | app/models.py | TrigeekSpace/academia-bknd | bd3b821240ef50868cd7d7b59c8d25e71086e70e | [
"BSD-3-Clause"
] | null | null | null | app/models.py | TrigeekSpace/academia-bknd | bd3b821240ef50868cd7d7b59c8d25e71086e70e | [
"BSD-3-Clause"
] | null | null | null | """ SQLAlchemy database models. """
from datetime import datetime
from depot.fields.sqlalchemy import UploadedFileField
from app import db
from app.util.data import many_to_many, foreign_key
from app.config import TOKEN_LEN
class User(db.Model):
""" User model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
username = db.Column(db.String(32), unique=True)
email = db.Column(db.String(64), unique=True)
password = db.Column(db.Binary(32))
join_date = db.Column(db.DateTime(), default=datetime.now)
active = db.Column(db.Boolean(), default=False)
avatar = db.Column(UploadedFileField())
self_introduction = db.Column(db.Text(), unique=True)
contribution = db.Column(db.Integer(), default=0)
job = db.Column(db.String(64), unique=True)
class Session(db.Model):
""" API session class. """
token = db.Column(db.Binary(TOKEN_LEN), primary_key=True)
user, user_id = foreign_key("User", backref_name="sessions")
class AbstractBaseGroup(object):
""" Abstract base group class. """
pass
class Group(db.Model, AbstractBaseGroup):
""" Group model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(32), unique=True)
users = many_to_many("Group", "User", backref_name="groups")
introduction = db.Column(db.Text())
class Paper(db.Model):
""" Paper model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(256), unique=False)
abstract = db.Column(db.Text(), unique=False)
authors = db.Column(db.String(256), unique=False)
conference = db.Column(db.String(128), unique=False)
publish_date = db.Column(db.DateTime(), default=datetime.now) # Accurate to the day
owners = many_to_many("Paper", "User", backref_name="papers")
owngroup = many_to_many("Paper", "Group", backref_name="papers")
collectors = many_to_many("Paper", "User", backref_name="collect_papers")
paper_file = db.Column(UploadedFileField())
class Note(db.Model):
""" User model class. """
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(256), unique=False)
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
author, author_id = foreign_key("User", backref_name="notes")
paper, paper_id = foreign_key("Paper", backref_name="notes")
collectors = many_to_many("Note", "User", backref_name="collect_notes")
owngroup = many_to_many("Note", "Group", backref_name="notes")
content = db.Column(db.Text(), unique=False)
annotation_file = db.Column(UploadedFileField())
class Question(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="questions_asked")
titie = db.Column(db.String(256), unique=False)
description = db.Column(db.Text(), unique=False)
upvotes = many_to_many("Question", "User", backref_name="questions_upvote")
downvotes = many_to_many("Question", "User", backref_name="questions_downvote")
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
class Reply(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="replies")
host_question, q_id = foreign_key("Question", backref_name="replies")
content = db.Column(db.Text())
upvotes = many_to_many("Reply", "User", backref_name="replies_upvote")
downvotes = many_to_many("Reply", "User", backref_name="replies_downvote")
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
provider, provider_id = foreign_key("User", backref_name="comments")
host_question, q_id = foreign_key("Question", backref_name="comments")
host_reply, r_id = foreign_key("Reply", backref_name="comments")
content = db.Column(db.Text(), unique=False)
create_time = db.Column(db.DateTime(), default=datetime.now)
last_modified = db.Column(db.DateTime(), default=datetime.now)
| 47.652174 | 87 | 0.707345 | 596 | 4,384 | 5.04698 | 0.177852 | 0.106383 | 0.123005 | 0.05984 | 0.643949 | 0.585106 | 0.550532 | 0.455785 | 0.376662 | 0.347407 | 0 | 0.006893 | 0.139599 | 4,384 | 91 | 88 | 48.175824 | 0.790562 | 0.039918 | 0 | 0.256757 | 0 | 0 | 0.078493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.027027 | 0.067568 | 0 | 0.864865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
291d8e921326cbecc63bc712d0993323051bed1f | 691 | py | Python | tests/test_demo.py | aaronestrada/flask-restplus-swagger-relative | e951bad6a2c72522ac74f5353a7b0cbe5436f20f | [
"BSD-3-Clause"
] | 3 | 2019-09-27T18:33:54.000Z | 2020-03-31T15:32:32.000Z | tests/test_demo.py | aaronestrada/flask-restplus-swagger-relative | e951bad6a2c72522ac74f5353a7b0cbe5436f20f | [
"BSD-3-Clause"
] | 1 | 2019-10-29T20:31:33.000Z | 2019-11-04T14:25:08.000Z | tests/test_demo.py | aaronestrada/flask-restplus-swagger-relative | e951bad6a2c72522ac74f5353a7b0cbe5436f20f | [
"BSD-3-Clause"
] | 1 | 2019-09-27T18:33:55.000Z | 2019-09-27T18:33:55.000Z | import pytest
from tests.test_application import app
@pytest.fixture
def client():
client = app.test_client()
yield client
def test_hello_resource(client):
"""
Test if it is possible to access to /hello resource
:param client: Test client object
:return:
"""
response = client.get('/hello').get_json()
assert response['hello'] == 'world'
def test_asset_found(client):
"""
Test if Swagger assets are accessible from the new path
:param client: Test client object
:return:
"""
response = client.get('/this_is_a_new/path_for_swagger_internal_documentation/swaggerui/swagger-ui-bundle.js')
assert response.status_code is 200
| 23.827586 | 114 | 0.700434 | 93 | 691 | 5.043011 | 0.516129 | 0.085288 | 0.051173 | 0.089552 | 0.21322 | 0.21322 | 0.21322 | 0.21322 | 0.21322 | 0 | 0 | 0.005425 | 0.199711 | 691 | 28 | 115 | 24.678571 | 0.842676 | 0.279305 | 0 | 0 | 0 | 0 | 0.223947 | 0.18847 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.25 | false | 0 | 0.166667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
291e921dde8646cb27f33c258f33f46413f66a28 | 1,614 | py | Python | 01_Introduction to Python/3-functions-and-packages/03_multiple-arguments.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 5 | 2021-02-03T14:36:58.000Z | 2022-01-01T10:29:26.000Z | 01_Introduction to Python/3-functions-and-packages/03_multiple-arguments.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | null | null | null | 01_Introduction to Python/3-functions-and-packages/03_multiple-arguments.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 3 | 2021-02-08T00:31:16.000Z | 2022-03-17T13:52:32.000Z | '''
03 - Multiple arguments
In the previous exercise, the square brackets around imag in the documentation showed us that the
imag argument is optional. But Python also uses a different way to tell users about arguments being
optional.
Have a look at the documentation of sorted() by typing help(sorted) in the IPython Shell.
You'll see that sorted() takes three arguments: iterable, key and reverse.
key=None means that if you don't specify the key argument, it will be None. reverse=False means
that if you don't specify the reverse argument, it will be False.
In this exercise, you'll only have to specify iterable and reverse, not key. The first input you
pass to sorted() will be matched to the iterable argument, but what about the second input? To tell
Python you want to specify reverse without changing anything about key, you can use =:
sorted(___, reverse = ___)
Two lists have been created for you on the right. Can you paste them together and sort them in
descending order?
Note: For now, we can understand an iterable as being any collection of objects, e.g. a List.
Instructions:
- Use + to merge the contents of first and second into a new list: full.
- Call sorted() on full and specify the reverse argument to be True. Save the sorted list as
full_sorted.
- Finish off by printing out full_sorted.
'''
# Create lists first and second
first = [11.25, 18.0, 20.0]
second = [10.75, 9.50]
# Paste together first and second: full
full = first + second
# Sort full in descending order: full_sorted
full_sorted = sorted(full, reverse=True)
# Print out full_sorted
print(full_sorted) | 35.086957 | 99 | 0.761462 | 275 | 1,614 | 4.425455 | 0.447273 | 0.049302 | 0.034511 | 0.023007 | 0.046015 | 0.046015 | 0.046015 | 0.046015 | 0 | 0 | 0 | 0.014372 | 0.180917 | 1,614 | 46 | 100 | 35.086957 | 0.906203 | 0.905824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
291f1330f75cfc0ca15457846d8102779d88cf8f | 790 | py | Python | Taller_Algoritmos_02/Ejercicio_10.py | Angelio01/algoritmos_programacion- | 63cb4cd4cfa01f504bf9ed927dcebf2466d6f60d | [
"MIT"
] | null | null | null | Taller_Algoritmos_02/Ejercicio_10.py | Angelio01/algoritmos_programacion- | 63cb4cd4cfa01f504bf9ed927dcebf2466d6f60d | [
"MIT"
] | null | null | null | Taller_Algoritmos_02/Ejercicio_10.py | Angelio01/algoritmos_programacion- | 63cb4cd4cfa01f504bf9ed927dcebf2466d6f60d | [
"MIT"
] | 1 | 2021-10-29T19:40:32.000Z | 2021-10-29T19:40:32.000Z | """
Entradas: 3 Valores flotantes que son el valor de diferentes monedas
Chelines autriacos --> float --> x
Dramas griegos --> float --> z
Pesetas --> float --> w
Salidas 4 valores flotantes que es la conversión de las anteriores monedas
Pesetas --> float --> x
Francos franceses --> float --> z
Dolares --> float --> a
Liras italianas --> float --> b
"""
# Entradas
x1 = float(input("Dime los chelines autríacos\n"))
z1 = float(input("Dime los dracmas griegos\n"))
w = float(input("Dime las pesetas\n"))
# Caja negra
x = (x1 * 956871)/100
z = z1/22.64572381
a = w/122499
b = (w*100)/9289
# Salidas
print(f"\n{x1} Chelines austríacos en pesetas son {x}\n{z1} Dracmas griegos en Francos franceses son {z}\n{w} Pesetas en Dolares son {a}\n{w} Pesetas en Liras italianas son {b}\n") | 28.214286 | 180 | 0.679747 | 125 | 790 | 4.296 | 0.432 | 0.055866 | 0.078212 | 0.063315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061256 | 0.173418 | 790 | 28 | 180 | 28.214286 | 0.761103 | 0.48481 | 0 | 0 | 0 | 0.125 | 0.613636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29204de0e1568db751699c8bf504b18e9d16ff4b | 4,049 | py | Python | estacionamientos/forms.py | ShadowManu/SAGE | 999626669c9a15755ed409e57864851eb27dc2c2 | [
"MIT"
] | null | null | null | estacionamientos/forms.py | ShadowManu/SAGE | 999626669c9a15755ed409e57864851eb27dc2c2 | [
"MIT"
] | null | null | null | estacionamientos/forms.py | ShadowManu/SAGE | 999626669c9a15755ed409e57864851eb27dc2c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from estacionamientos.models import Estacionamiento, Reserva, Pago
class EstacionamientosForm(forms.ModelForm):
nombre_duenio = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre del Dueño'}))
nombre_est = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre del Estacionamiento'}))
direccion = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Dirección'}))
telefono1 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Teléfono 1',}))
telefono2 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Telefono 2',}), required=False)
telefono3 = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Teléfono 3',}), required=False)
email1 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 1',}))
email2 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 2',}), required=False)
email3 = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Correo Electrónico 3',}), required=False)
rif = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'RIF',}))
capacidad = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Capacidad',}))
tarifa = forms.DecimalField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Tarifa',}))
horaI = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Hora Apertura',}))
horaF = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Hora Cierre',}))
reservaI = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Inicio Restringir Reserva',}), required=False)
reservaF = forms.TimeField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Fin Restringir Reserva',}), required=False)
class Meta:
model = Estacionamiento
fields = '__all__'
class ReservaForm(forms.ModelForm):
estacionamiento = forms.ModelChoiceField(
queryset=Estacionamiento.objects.all(),
empty_label="Estacionamiento",
widget=forms.Select(attrs={'class': 'form-control',}))
horaInicio = forms.TimeField(widget=forms.DateInput(
attrs={'class': 'form-control', 'placeholder': 'Inicio de la Reserva',}))
horaFin = forms.TimeField(widget=forms.DateInput(
attrs={'class': 'form-control', 'placeholder': 'Fin de la Reserva',}))
class Meta:
model = Reserva
fields = ['horaInicio', 'horaFin', 'estacionamiento']
class PagoForm(forms.ModelForm):
TARJETAS = [
('', 'Tipo de Tarjeta'),
('Vista', 'Vista'),
('Mister', 'Mister'),
('Xpres', 'Xpres')
]
nombre = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre',}))
cedula = forms.IntegerField(widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Cédula',}))
tipoTarjeta = forms.ChoiceField(choices=TARJETAS, widget=forms.Select(attrs={'class': 'form-control'}))
numeroTarjeta = forms.RegexField(min_length=16, max_length=16, regex=r'^(\d)+$',
error_message = ("Número de tarjeta no válido."), widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Número de Tarjeta',}))
class Meta:
model = Pago
fields = ['nombre', 'cedula', 'tipoTarjeta', 'numeroTarjeta', 'pago']
| 49.987654 | 107 | 0.646826 | 402 | 4,049 | 6.49005 | 0.266169 | 0.096972 | 0.123419 | 0.185128 | 0.60253 | 0.60253 | 0.595631 | 0.566501 | 0.526639 | 0.526639 | 0 | 0.005152 | 0.184984 | 4,049 | 80 | 108 | 50.6125 | 0.785455 | 0.010373 | 0 | 0.044118 | 0 | 0 | 0.273159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
292db3dd254935b6485aa3e5a0431e5e9297d7e2 | 2,328 | py | Python | test/programytest/clients/restful/test_config.py | minhdc/documented-programy | fe947d68c0749201fbe93ee5644d304235d0c626 | [
"MIT"
] | null | null | null | test/programytest/clients/restful/test_config.py | minhdc/documented-programy | fe947d68c0749201fbe93ee5644d304235d0c626 | [
"MIT"
] | null | null | null | test/programytest/clients/restful/test_config.py | minhdc/documented-programy | fe947d68c0749201fbe93ee5644d304235d0c626 | [
"MIT"
] | null | null | null | import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.clients.restful.config import RestConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class RestConfigurationTests(unittest.TestCase):
def test_init(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
host: 127.0.0.1
port: 5000
debug: false
workers: 4
use_api_keys: false
api_key_file: apikeys.txt
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("127.0.0.1", rest_config.host)
self.assertEqual(5000, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
self.assertEqual("apikeys.txt", rest_config.api_key_file)
def test_init_no_values(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("0.0.0.0", rest_config.host)
self.assertEqual(80, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
def test_to_yaml_with_defaults(self):
config = RestConfiguration("rest")
data = {}
config.to_yaml(data, True)
self.assertEquals(data['host'], "0.0.0.0")
self.assertEquals(data['port'], 80)
self.assertEquals(data['debug'], False)
self.assertEquals(data['use_api_keys'], False)
self.assertEquals(data['api_key_file'], './api.keys')
self.assertEquals(data['ssl_cert_file'], './rsa.cert')
self.assertEquals(data['ssl_key_file'], './rsa.keys')
self.assertEquals(data['bot'], 'bot')
self.assertEquals(data['license_keys'], "./config/license.keys")
self.assertEquals(data['bot_selector'], "programy.clients.client.DefaultBotSelector")
self.assertEquals(data['renderer'], "programy.clients.render.text.TextRenderer")
| 36.375 | 93 | 0.660653 | 262 | 2,328 | 5.683206 | 0.240458 | 0.087307 | 0.14775 | 0.064473 | 0.42176 | 0.346541 | 0.346541 | 0.346541 | 0.346541 | 0.346541 | 0 | 0.017954 | 0.210481 | 2,328 | 63 | 94 | 36.952381 | 0.792165 | 0 | 0 | 0.352941 | 0 | 0 | 0.209192 | 0.044674 | 0 | 0 | 0 | 0 | 0.431373 | 1 | 0.058824 | false | 0 | 0.078431 | 0 | 0.156863 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29313d16ae55bd60b3205923aa0959f4632a0038 | 1,211 | py | Python | Assignments/06.py | zexhan17/Data-Structures-and-Algorithms-using-Python | b5fd3d47c2eb7bf93eb88b276799d6663cd602e4 | [
"MIT"
] | null | null | null | Assignments/06.py | zexhan17/Data-Structures-and-Algorithms-using-Python | b5fd3d47c2eb7bf93eb88b276799d6663cd602e4 | [
"MIT"
] | null | null | null | Assignments/06.py | zexhan17/Data-Structures-and-Algorithms-using-Python | b5fd3d47c2eb7bf93eb88b276799d6663cd602e4 | [
"MIT"
] | null | null | null | # Write a recursive function to count the number of nodes in a Tree. (first do your self then see code)
def count_nodes(self):
count = 1
left_count = 0
right_count = 0
if self.left:
left_count = self.left.count_nodes()
if self.right:
right_count = self.right.count_nodes()
return count + left_count + right_count
Q # 2:
'''The height of a tree is the maximum number of levels in the tree. So, a tree with just one node has a height of 1. If the root has children which are leaves, the height of the tree is 2.
The height of a TreeNode can be computed recursively using a simple algorithm: The height Of a TreeNode With no children is 1. If it has children the height is: max of height of its two sub-trees + 1.
Write a clean, recursive function for the TreeNode class that calculates the height based on the above statement(first do your self then see code) '''
def get_height(self):
height = 1
left_height = 0
right_height = 0
if self.left:
left_height = self.left.get_height()
if self.right:
right_height = self.right.get_height()
return count + max(left_height, right_height)
print(self.val)
if self.left.val > self.val or self.right.val < self.val
return False
| 31.868421 | 201 | 0.734104 | 217 | 1,211 | 4.013825 | 0.331797 | 0.061998 | 0.050517 | 0.041332 | 0.163031 | 0.06659 | 0.06659 | 0.06659 | 0 | 0 | 0 | 0.011317 | 0.197358 | 1,211 | 37 | 202 | 32.72973 | 0.884774 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2934aab8985e093039352c584291d05e82d940ca | 1,629 | py | Python | checklog.py | mtibbett67/pymodules | 9a7dcd16fb2107029edaabde766c1dbdb769713c | [
"MIT"
] | null | null | null | checklog.py | mtibbett67/pymodules | 9a7dcd16fb2107029edaabde766c1dbdb769713c | [
"MIT"
] | null | null | null | checklog.py | mtibbett67/pymodules | 9a7dcd16fb2107029edaabde766c1dbdb769713c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
NAME:
checklog.py
DESCRIPTION:
This script checks the tail of the log file and lists the disk space
CREATED:
Sun Mar 15 22:53:54 2015
VERSION:
1.0
AUTHOR:
Mark Tibbett
AUTHOR_EMAIL:
mtibbett67@gmail.com
URL:
N/A
DOWNLOAD_URL:
N/A
INSTALL_REQUIRES:
[]
PACKAGES:
[]
SCRIPTS:
[]
'''
# Standard library imports
import os
import sys
import subprocess
# Related third party imports
# Local application/library specific imports
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
# Section formats
SEPARATOR = B + '=' * 80 + W
NL = '\n'
# Clear the terminal
os.system('clear')
# Check for root or sudo. Remove if not needed.
UID = os.getuid()
if UID != 0:
print R + ' [!]' + O + ' ERROR:' + G + ' sysupdate' + O + \
' must be run as ' + R + 'root' + W
# print R + ' [!]' + O + ' login as root (' + W + 'su root' + O + ') \
# or try ' + W + 'sudo ./wifite.py' + W
os.execvp('sudo', ['sudo'] + sys.argv)
else:
print NL
print G + 'You are running this script as ' + R + 'root' + W
print NL + SEPARATOR + NL
LOG = ['tail', '/var/log/messages']
DISK = ['df', '-h']
def check(arg1, arg2):
'''Call subprocess to check logs'''
print G + arg1 + W + NL
item = subprocess.check_output(arg2)
#subprocess.call(arg2)
print item + NL + SEPARATOR + NL
check('Runing tail on messages', LOG)
check('Disk usage', DISK)
| 16.793814 | 73 | 0.581952 | 237 | 1,629 | 3.983122 | 0.594937 | 0.01589 | 0.010593 | 0.016949 | 0.027542 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052936 | 0.257827 | 1,629 | 96 | 74 | 16.96875 | 0.727874 | 0.259669 | 0 | 0 | 0 | 0 | 0.248541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.096774 | null | null | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29351a72a75c3ab6afce56723dbd2096b63f981a | 726 | py | Python | algorithms/implementation/minimum_distances.py | avenet/hackerrank | e522030a023af4ff50d5fc64bd3eba30144e006c | [
"MIT"
] | null | null | null | algorithms/implementation/minimum_distances.py | avenet/hackerrank | e522030a023af4ff50d5fc64bd3eba30144e006c | [
"MIT"
] | null | null | null | algorithms/implementation/minimum_distances.py | avenet/hackerrank | e522030a023af4ff50d5fc64bd3eba30144e006c | [
"MIT"
] | null | null | null | n = int(input().strip())
items = [
int(A_temp)
for A_temp
in input().strip().split(' ')
]
items_map = {}
result = None
for i, item in enumerate(items):
if item not in items_map:
items_map[item] = [i]
else:
items_map[item].append(i)
for _, item_indexes in items_map.items():
items_indexes_length = len(item_indexes)
if items_indexes_length > 1:
for i in range(items_indexes_length):
for j in range(i + 1, items_indexes_length):
diff = item_indexes[j] - item_indexes[i]
if result is None:
result = diff
elif diff < result:
result = diff
print(result if result else -1)
| 22.6875 | 56 | 0.566116 | 99 | 726 | 3.949495 | 0.30303 | 0.102302 | 0.184143 | 0.076726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006173 | 0.330579 | 726 | 31 | 57 | 23.419355 | 0.798354 | 0 | 0 | 0.083333 | 0 | 0 | 0.001377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.041667 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29452ec5be15d28b45cb5711c4822ec7f8c5c51e | 1,001 | py | Python | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # Given an integer n, count the total number of digit 1 appearing
# in all non-negative integers less than or equal to n.
#
# For example:
# Given n = 13,
# Return 6, because digit 1 occurred in the following numbers:
# 1, 10, 11, 12, 13.
#
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
# sum all the '1's inside the n numbers
count = 0
for i in range(1, n+1): # count including n
count += self.numberOfDigitOne(i)
return count
def numberOfDigitOne(self, n):
"""
function to count number of digit ones in a number n.
mod by 10 to test if 1st digit is 1;
then divide by 10 to get next digit;
next test if next digit is 1.
"""
result = 0
while n:
if n % 10 == 1:
result += 1
n = n / 10
return result
if __name__ == "__main__":
print Solution().countDigitOne(13)
| 22.75 | 65 | 0.548452 | 143 | 1,001 | 3.783217 | 0.475524 | 0.022181 | 0.048059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053628 | 0.366633 | 1,001 | 43 | 66 | 23.27907 | 0.799685 | 0.280719 | 0 | 0 | 0 | 0 | 0.017978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
294bff20d8c499704a706ccaf6f51e0e5fd8ce4d | 5,821 | py | Python | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
] | null | null | null | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
] | 5 | 2021-03-26T01:30:13.000Z | 2021-04-22T22:19:03.000Z | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
] | 1 | 2021-05-05T00:57:43.000Z | 2021-05-05T00:57:43.000Z | # from https://github.com/kvwoerden/mcts-cartpole
# ---------------------------------------------------------------------------- #
# Imports #
# ---------------------------------------------------------------------------- #
import os
import time
import random
import argparse
<<<<<<< HEAD
=======
from types import SimpleNamespace
>>>>>>> MCTS
import gym
from gym import logger
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from Simple_mcts import MCTSAgent
<<<<<<< HEAD
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
SEED = 28
EPISODES = 1
ENVIRONMENT = 'CartPole-v0'
LOGGER_LEVEL = logger.WARN
ITERATION_BUDGET = 80
LOOKAHEAD_TARGET = 100
MAX_EPISODE_STEPS = 1500
VIDEO_BASEPATH = '.\\video' # './video'
START_CP = 20
=======
from Agent import dqn_agent
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
LOGGER_LEVEL = logger.WARN
args = dict()
args['env_name'] = 'CartPole-v0'
args['episodes'] = 10
args['seed'] = 28
args['iteration_budget'] = 8000 # The number of iterations for each search step. Increasing this should lead to better performance.')
args['lookahead_target'] = 10000 # The target number of steps the agent aims to look forward.'
args['max_episode_steps'] = 1500 # The maximum number of steps to play.
args['video_basepath'] = '.\\video' # './video'
args['start_cp'] = 20 # The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.
args = SimpleNamespace(**args)
>>>>>>> MCTS
# ---------------------------------------------------------------------------- #
# Main loop #
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
<<<<<<< HEAD
random.seed(SEED)
parser = argparse.ArgumentParser(
description='Run a Monte Carlo Tree Search agent on the Cartpole environment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env_id', nargs='?', default=ENVIRONMENT,
help='The environment to run (only CartPole-v0 is supperted)')
parser.add_argument('--episodes', nargs='?', default=EPISODES, type=int,
help='The number of episodes to run.')
parser.add_argument('--iteration_budget', nargs='?', default=ITERATION_BUDGET, type=int,
help='The number of iterations for each search step. Increasing this should lead to better performance.')
parser.add_argument('--lookahead_target', nargs='?', default=LOOKAHEAD_TARGET, type=int,
help='The target number of steps the agent aims to look forward.')
parser.add_argument('--max_episode_steps', nargs='?', default=MAX_EPISODE_STEPS, type=int,
help='The maximum number of steps to play.')
parser.add_argument('--video_basepath', nargs='?', default=VIDEO_BASEPATH,
help='The basepath where the videos will be stored.')
parser.add_argument('--start_cp', nargs='?', default=START_CP, type=int,
help='The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.')
parser.add_argument('--seed', nargs='?', default=SEED, type=int,
help='The random seed.')
args = parser.parse_args()
logger.set_level(LOGGER_LEVEL)
env = gym.make(args.env_id)
env.seed(args.seed)
agent = MCTSAgent(args.iteration_budget, args.env_id)
=======
logger.set_level(LOGGER_LEVEL)
random.seed(args.seed)
env = gym.make(args.env_name)
env.seed(args.seed)
Q_net = dqn_agent()
agent = MCTSAgent(args.iteration_budget, env, Q_net)
>>>>>>> MCTS
timestr = time.strftime("%Y%m%d-%H%M%S")
reward = 0
done = False
for i in range(args.episodes):
ob = env.reset()
env._max_episode_steps = args.max_episode_steps
video_path = os.path.join(
args.video_basepath, f"output_{timestr}_{i}.mp4")
<<<<<<< HEAD
rec = VideoRecorder(env, path=video_path)
=======
# rec = VideoRecorder(env, path=video_path)
>>>>>>> MCTS
try:
sum_reward = 0
node = None
all_nodes = []
C_p = args.start_cp
while True:
print("################")
env.render()
<<<<<<< HEAD
rec.capture_frame()
=======
# rec.capture_frame()
>>>>>>> MCTS
action, node, C_p = agent.act(env.state, n_actions=env.action_space.n, node=node, C_p=C_p, lookahead_target=args.lookahead_target)
ob, reward, done, _ = env.step(action)
print("### observed state: ", ob)
sum_reward += reward
print("### sum_reward: ", sum_reward)
if done:
<<<<<<< HEAD
rec.close()
break
except KeyboardInterrupt as e:
rec.close()
=======
# rec.close()
break
except KeyboardInterrupt as e:
# rec.close()
>>>>>>> MCTS
env.close()
raise e
env.close()
| 39.067114 | 219 | 0.519842 | 608 | 5,821 | 4.827303 | 0.28125 | 0.040886 | 0.046337 | 0.02862 | 0.326065 | 0.274957 | 0.241908 | 0.222147 | 0.222147 | 0.190119 | 0 | 0.009093 | 0.263185 | 5,821 | 148 | 220 | 39.331081 | 0.675216 | 0 | 0 | 0.285714 | 0 | 0.008929 | 0.214567 | 0.005567 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.089286 | null | null | 0.026786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.