blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6798d9a3adab57003ce56bdaa03cc9394888b242 | 3471378dcdf9e26e1c19905beac48b32edfa40f0 | /scripts/after_processing.py | 693ac9b9fb1041a62ab4082a871f411c9b45e23a | [
"MIT"
] | permissive | shohei-kojima/MEGAnE | 19a197533ffbce6618c2133242a766057949b45e | 478671a606e89782b4f3f57f7f97f63c2b03602b | refs/heads/master | 2023-04-13T18:22:23.651841 | 2023-03-07T07:15:48 | 2023-03-07T07:15:48 | 229,880,456 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,762 | py | #!/usr/bin/env python
'''
Author: Shohei Kojima @ RIKEN
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import os,gzip
import utils
import math
from statistics import mean
import pybedtools
from pybedtools import BedTool
import log,traceback
def grouped_mei_to_bed(args, params, filenames):
log.logger.debug('started')
try:
pybedtools.set_tempdir(args.pybedtools_tmp)
me_to_class,_=utils.load_me_classification(filenames.reshaped_rep)
def rename_L1(me):
if ('_5end' in me) or ('_3end' in me) or ('_orf2' in me):
if me_to_class[me] == 'LINE/L1':
me=me.replace('_5end', '').replace('_3end', '').replace('_orf2', '')
return me
def predict_shared_te(R_list, L_list, r_strand, l_strand):
cands=[]
R_tes,L_tes=set(),set()
for l in R_list:
R_tes.add(rename_L1(l[0]))
for l in L_list:
L_tes.add(rename_L1(l[0]))
shared_tes= R_tes & L_tes
if len(shared_tes) >= 1:
tes=sorted(list(shared_tes))
d={}
for te in tes:
d[te]=[[], []]
for te,s,e,_ in R_list:
if te in tes:
if r_strand == '+':
d[te][0].append(int(s))
else:
d[te][0].append(int(e))
for te,s,e,_ in L_list:
if te in tes:
if r_strand == '+':
d[te][1].append(int(e))
else:
d[te][1].append(int(s))
for te in d:
if len(d[te][0]) >= 1:
d[te][0]=str(round(mean(d[te][0])))
else:
d[te][0]='NA'
if len(d[te][1]) >= 1:
d[te][1]=str(round(mean(d[te][1])))
else:
d[te][1]='NA'
cands.append('%s,%s/%s,%s/%s' % (te, d[te][0], d[te][1], r_strand, l_strand))
return cands
def search_transduction(params, filenames, infilename):
bed=[]
with open(filenames.tmp_for_3transd, 'w') as outfile:
with gzip.open(filenames.distant_txt +'.gz') as infile:
for line in infile:
line=line.decode()
ls=line.split()
for pos in ls[1].split(';'):
chr,tmp=pos.split(':', 1)
start,tmp=tmp.split('-', 1)
end,dir=tmp.split('/', 1)
outfile.write('%s\t%s\t%s\t%s\t.\t%s\n' % (chr, start, end, ls[0], dir))
bed=BedTool(filenames.tmp_for_3transd)
def retrieve_read_names(bedobj):
names_1,names_2={},{}
for line in bedobj:
line=str(line)
ls=line.split()
read_name,num=ls[9].rsplit('/', 1)
if num == '1':
names_1[read_name]=line
else:
names_2[read_name]=line
return names_1, names_2
def summarize_trans_pos(list):
bed=[]
for poss in list:
for pos in poss.split(';'):
chr,tmp=pos.split(':', 1)
start,tmp=tmp.split('-', 1)
end,dir=tmp.split('/', 1)
bed.append('\t'.join([chr, start, end, '.', '.', dir +'\n']))
bed=BedTool(''.join(bed), from_string=True).sort().merge(s=True, c='6', o='distinct')
pos=[]
for line in bed:
line=str(line)
ls=line.split()
pos.append('%s:%s-%s(%s)' % (ls[0], ls[1], ls[2], ls[3]))
return '|'.join(pos)
def search(infilename):
flank=[]
with open(infilename) as infile:
for line in infile:
ls=line.split()
r_evals=[ float(i) for i in ls[8].split(';') if not i == 'NA' and not i == '' ]
l_evals=[ float(i) for i in ls[9].split(';') if not i == 'NA' and not i == '' ]
if len(r_evals) == 0:
r_bp=ls[3].split(':')[0]
start= int(r_bp) - params.length_for_3transduction_search
start='0' if start < 0 else str(start)
flank.append('\t'.join([ls[0], start, r_bp, ls[7], '.', '+\n']))
elif len(l_evals) == 0:
l_bp=ls[4].split(':')[0]
end= int(l_bp) + params.length_for_3transduction_search
end=str(end)
flank.append('\t'.join([ls[0], l_bp, end, ls[7], '.', '-\n']))
flank=BedTool(''.join(flank), from_string=True)
flank_intersect=flank.intersect(bed, s=True, wa=True, wb=True, nonamecheck=True)
flank_read_names_d1,flank_read_names_d2=retrieve_read_names(flank_intersect)
trans_d={}
with gzip.open(filenames.distant_txt +'.gz') as infile:
for line in infile:
line=line.decode()
ls=line.split()
read_name,read_num=ls[0].rsplit('/', 1)
if (read_name in flank_read_names_d1) and (read_num == '2'):
mei_line=flank_read_names_d1[read_name]
mls=mei_line.split()
if mls[5] == '+':
id='\t'.join([mls[0], mls[2], mls[3], mls[5]])
else:
id='\t'.join([mls[0], mls[1], mls[3], mls[5]])
if not id in trans_d:
trans_d[id]=[]
trans_d[id].append(ls[1])
elif (read_name in flank_read_names_d2) and (read_num == '1'):
mei_line=flank_read_names_d2[read_name]
mls=mei_line.split()
if mls[5] == '+':
id='\t'.join([mls[0], mls[2], mls[3], mls[5]])
else:
id='\t'.join([mls[0], mls[1], mls[3], mls[5]])
if not id in trans_d:
trans_d[id]=[]
trans_d[id].append(ls[1])
for id in trans_d:
trans_d[id]=summarize_trans_pos(trans_d[id])
return trans_d
# search_transduction, main
trans_d=search(infilename)
return trans_d
def convert_to_bed(params, filenames, infilename, outfilename):
# search transduction, deprecated
# trans_d=search_transduction(params, filenames, infilename)
# summarize
with open(outfilename, 'w') as outfile:
with open(infilename) as infile:
for line in infile:
ls=line.split()
len_status='NA'
transd_status='3transduction:no'
# if breakpoints are not pA
r_evals=[ float(i) for i in ls[8].split(';') if not i == 'NA' and not i == '' ]
l_evals=[ float(i) for i in ls[9].split(';') if not i == 'NA' and not i == '' ]
if (len(r_evals) >= 1) and (len(l_evals) >= 1):
# find shared TE between R and L
R_tes,L_tes=set(),set()
for tes in ls[10].split(';;'):
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
R_tes.add(rename_L1(ts[0]))
for tes in ls[11].split(';;'):
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
L_tes.add(rename_L1(ts[0]))
shared_tes= R_tes & L_tes
# if shared TEs exist
if len(shared_tes) >= 1:
evals_d={}
for te in shared_tes:
evals_d[te]=[]
for eval,tes in zip(ls[8].split(';'), ls[10].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == 'NA' and not te == '':
te_name=te.split(',')[0]
te_name=rename_L1(te_name)
if te_name in evals_d:
evals_d[te_name].append(float(eval))
for eval,tes in zip(ls[9].split(';'), ls[11].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == 'NA' and not te == '':
te_name=te.split(',')[0]
te_name=rename_L1(te_name)
if te_name in evals_d:
evals_d[te_name].append(float(eval))
for te in evals_d:
evals_d[te]=min(evals_d[te])
min_eval=min(list(evals_d.values()))
tes_min_eval=[]
for te in evals_d:
if evals_d[te] == min_eval:
tes_min_eval.append(te)
tes_min_eval=sorted(tes_min_eval)
R_plus,L_plus=[],[]
R_minus,L_minus=[],[]
# R breakpoint
for tes in ls[10].split(';;'):
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
_me=rename_L1(ts[0])
if _me in tes_min_eval:
if ts[3] == '+':
R_plus.append(ts)
else:
R_minus.append(ts)
# L breakpoint
for tes in ls[11].split(';;'):
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
_me=rename_L1(ts[0])
if _me in tes_min_eval:
if ts[3] == '+':
L_plus.append(ts)
else:
L_minus.append(ts)
cands=[]
if (len(R_plus) >= 1) and (len(L_plus) >= 1):
tmp=predict_shared_te(R_plus, L_plus, '+', '+')
cands.extend(tmp)
elif (len(R_minus) >= 1) and (len(L_minus) >= 1):
tmp=predict_shared_te(R_minus, L_minus, '-', '-')
cands.extend(tmp)
elif (len(R_plus) >= 1) and (len(L_minus) >= 1):
tmp=predict_shared_te(R_plus, L_minus, '+', '-')
cands.extend(tmp)
elif (len(R_minus) >= 1) and (len(L_plus) >= 1):
tmp=predict_shared_te(R_minus, L_plus, '-', '+')
cands.extend(tmp)
len_short=False
for cand in cands:
if ('+/+' in cand) or ('-/-' in cand):
rbp,lbp=cand.split(',')[1].split('/')
if not rbp == 'NA' and not lbp == 'NA':
inslen= int(lbp) - int(rbp)
if 10 < inslen < 50:
len_short=True
len_status='no' if len_short is True else 'yes'
pred_status='PASS'
pred_res='MEI=' + '|'.join(cands)
# if not shared TE exist
else:
R_plus,L_plus=[],[]
R_minus,L_minus=[],[]
# R breakpoint
min_eval=min(r_evals)
for eval,tes in zip(ls[8].split(';'), ls[10].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
if float(eval) == min_eval:
if ts[3] == '+':
R_plus.append(ts)
else:
R_minus.append(ts)
# L breakpoint
min_eval=min(l_evals)
for eval,tes in zip(ls[9].split(';'), ls[11].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
if float(eval) == min_eval:
if ts[3] == '+':
L_plus.append(ts)
else:
L_minus.append(ts)
# reshape
R_str_l,L_str_l=[],[]
if len(R_plus) >= 1:
d={}
for te,s,_,_ in R_plus:
if not te in d:
d[te]=[]
d[te].append(int(s))
for te in d:
breapoint=round(mean(d[te]))
R_str_l.append('%s,%d,+' % (te, breapoint))
if len(R_minus) >= 1:
d={}
for te,s,_,_ in R_minus:
if not te in d:
d[te]=[]
d[te].append(int(s))
for te in d:
breapoint=round(mean(d[te]))
R_str_l.append('%s,%d,-' % (te, breapoint))
if len(L_plus) >= 1:
d={}
for te,_,e,_ in L_plus:
if not te in d:
d[te]=[]
d[te].append(int(e))
for te in d:
breapoint=round(mean(d[te]))
L_str_l.append('%s,%d,+' % (te, breapoint))
if len(L_minus) >= 1:
d={}
for te,_,e,_ in L_minus:
if not te in d:
d[te]=[]
d[te].append(int(e))
for te in d:
breapoint=round(mean(d[te]))
L_str_l.append('%s,%d,-' % (te, breapoint))
pred_status='Complex_structure'
pred_res='MEI_left_breakpoint=' + '|'.join(R_str_l) +';'+ 'MEI_right_breakpoint=' + '|'.join(L_str_l)
# if either end is pA
elif len(l_evals) == 0:
if ls[13] == '0':
transd_status='3transduction:need_check,MEI_right'
bp_plus,bp_minus=[],[]
# R breakpoint
evals=[ float(i) for i in ls[8].split(';') if not i == 'NA' and not i == '' ]
min_eval=min(evals)
for eval,tes in zip(ls[8].split(';'), ls[10].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == 'NA' and not te == '':
ts=te.split(',')
if float(eval) == min_eval:
if ts[3] == '+':
bp_plus.append(ts)
else:
bp_minus.append(ts)
# normal structure
if len(bp_plus) >= 1:
bp_str_l=[]
d={}
for te,s,_,_ in bp_plus:
if not te in d:
d[te]=[]
d[te].append(int(s))
for te in d:
breapoint=round(mean(d[te]))
bp_str_l.append('%s,%d,+' % (te, breapoint))
pred_status='PASS'
pred_res='MEI_left_breakpoint=' + '|'.join(bp_str_l) +';'+ 'MEI_right_breakpoint=pA'
# complex structure
else:
bp_str_l=[]
d={}
for te,_,e,_ in bp_minus:
if not te in d:
d[te]=[]
d[te].append(int(e))
for te in d:
breapoint=round(mean(d[te]))
bp_str_l.append('%s,%d,-' % (te, breapoint))
pred_status='Complex_structure'
pred_res='MEI_left_breakpoint=' + '|'.join(bp_str_l) +';'+ 'MEI_right_breakpoint=pA'
elif len(r_evals) == 0:
if ls[12] == '0':
transd_status='3transduction:need_check,MEI_left'
bp_plus,bp_minus=[],[]
# L breakpoint
evals=[ float(i) for i in ls[9].split(';') if not i == 'NA' and not i == '' ]
min_eval=min(evals)
for eval,tes in zip(ls[9].split(';'), ls[11].split(';;')):
if not eval == 'NA' and not eval == '':
for te in tes.split(';'):
if not te == '':
ts=te.split(',')
if float(eval) == min_eval:
if ts[3] == '+':
bp_plus.append(ts)
else:
bp_minus.append(ts)
# normal structure
if len(bp_minus) >= 1:
bp_str_l=[]
d={}
for te,_,e,_ in bp_minus:
if not te in d:
d[te]=[]
d[te].append(int(e))
for te in d:
breapoint=round(mean(d[te]))
bp_str_l.append('%s,%d,-' % (te, breapoint))
pred_status='PASS'
pred_res='MEI_left_breakpoint=pT' +';'+ 'MEI_right_breakpoint=' + '|'.join(bp_str_l)
# complex structure
else:
bp_str_l=[]
d={}
for te,s,_,_ in bp_plus:
if not te in d:
d[te]=[]
d[te].append(int(s))
for te in d:
breapoint=round(mean(d[te]))
bp_str_l.append('%s,%d,+' % (te, breapoint))
pred_status='Complex_structure'
pred_res='MEI_left_breakpoint=pT' +';'+ 'MEI_right_breakpoint=' + '|'.join(bp_str_l)
l_pos,l_num=ls[3].split(':')
r_pos,r_num=ls[4].split(':')
l_chim= int(l_num) - int(ls[12])
r_chim= int(r_num) - int(ls[13])
uniq='yes' if ls[16] == 'singleton' else 'no,%s' % ls[16]
if len_status == 'no':
confidence_status='low'
else:
confidence_status=ls[15]
tmp=[ls[0], ls[1], ls[2], ls[7], 'MEI_left:ref_pos=%s,chimeric=%d,hybrid=%s' % (l_pos, l_chim, ls[12]), 'MEI_right:ref_pos=%s,chimeric=%d,hybrid=%s' % (r_pos, r_chim, ls[13]), 'confidence:%s' % confidence_status, 'unique:%s,50bp_or_longer:%s,orig_conf:%s' % (uniq, len_status, ls[15]), 'subfamily_pred:status=%s,%s' % (pred_status, pred_res), transd_status, 'ID=%s' % ls[14]]
tmp= [ str(i) for i in tmp ]
outfile.write('\t'.join(tmp) +'\n')
outfile.flush()
os.fdatasync(outfile.fileno())
# main
if args.gaussian_executed is True:
convert_to_bed(params, filenames, filenames.bp_merged_groupg, filenames.bp_final_g)
convert_to_bed(params, filenames, filenames.bp_merged_groupp, filenames.bp_final_p)
else:
convert_to_bed(params, filenames, filenames.bp_merged_groupf, filenames.bp_final_f)
if args.threshold is not None:
convert_to_bed(params, filenames, filenames.bp_merged_groupu, filenames.bp_final_u)
pybedtools.cleanup()
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
def retrieve_3transd_reads(args, params, filenames):
log.logger.debug('started')
try:
pybedtools.set_tempdir(args.pybedtools_tmp)
def convert_line(params, ls):
if 'MEI_left' in ls[9]:
end=ls[4].split(',')[0].replace('MEI_left:ref_pos=', '')
end=int(end)
start= end - params.hybrid_read_range_from_breakpint
if start < 0:
start=0
pos_for_hybrid='\t'.join([ls[0], str(start), str(end), ls[10], '.', '+'])
elif 'MEI_right' in ls[9]:
start=ls[5].split(',')[0].replace('MEI_right:ref_pos=', '')
start=int(start)
end= start + params.hybrid_read_range_from_breakpint
pos_for_hybrid='\t'.join([ls[0], str(start), str(end), ls[10], '.', '-'])
return pos_for_hybrid
def retrieve_read_ids(params, infilepath):
poss_for_hybrid=set()
with open(infilepath) as infile:
for line in infile:
if '3transduction:need_check' in line:
ls=line.split()
for_hybrid=convert_line(params, ls)
poss_for_hybrid.add(for_hybrid)
return poss_for_hybrid
poss_for_hybrid=[]
if args.gaussian_executed is True:
poss_for_hybrid_gaussian=retrieve_read_ids(params, filenames.bp_final_g)
poss_for_hybrid_percentile=retrieve_read_ids(params, filenames.bp_final_p)
if args.threshold is True:
poss_for_hybrid_user=retrieve_read_ids(params, filenames.bp_final_u)
overlap= poss_for_hybrid_gaussian | poss_for_hybrid_percentile | poss_for_hybrid_user
for line in overlap:
tmp=[]
if line in poss_for_hybrid_gaussian:
tmp.append('gaussian')
if line in poss_for_hybrid_percentile:
tmp.append('percentile')
if line in poss_for_hybrid_user:
tmp.append('user_defined')
poss_for_hybrid.append(line +'\t'+ ';'.join(tmp))
else:
overlap= poss_for_hybrid_gaussian | poss_for_hybrid_percentile
for line in overlap:
tmp=[]
if line in poss_for_hybrid_gaussian:
tmp.append('gaussian')
if line in poss_for_hybrid_percentile:
tmp.append('percentile')
poss_for_hybrid.append(line +'\t'+ ';'.join(tmp))
else:
poss_for_hybrid_failed=retrieve_read_ids(params, filenames.bp_final_f)
if args.threshold is True:
poss_for_hybrid_user=retrieve_read_ids(params, filenames.bp_final_u)
overlap= poss_for_hybrid_failed | poss_for_hybrid_user
for line in overlap:
tmp=[]
if line in poss_for_hybrid_failed:
tmp.append('failed')
if line in poss_for_hybrid_user:
tmp.append('user_defined')
poss_for_hybrid.append(line +'\t'+ ';'.join(tmp))
else:
for line in poss_for_hybrid_failed:
poss_for_hybrid.append(line +'\tfailed')
if len(poss_for_hybrid) >= 1:
poss_for_hybrid=BedTool('\n'.join(poss_for_hybrid) +'\n', from_string=True)
# pairing of distant reads
d={}
with gzip.open(filenames.distant_txt +'.gz') as infile:
for line in infile:
line=line.decode()
ls=line.split()
id,dir=ls[0].split('/')
if not id in d:
d[id]=set()
d[id].add(dir)
retain=set()
for id in d:
if len(d[id]) == 2:
retain.add(id)
def process_bed(bed):
ids={}
bed=BedTool(bed, from_string=True)
bed=bed.intersect(poss_for_hybrid, wa=True, wb=True, s=True, nonamecheck=True)
if len(bed) >= 1:
for line in bed:
line=str(line)
ls=line.split()
if not ls[9] in ids:
ids[ls[9]]=[]
mapped_pos= '%s:%s-%s(%s)' % (ls[0], ls[1], ls[2], ls[5])
ids[ls[9]].append([ls[3], mapped_pos])
return ids
# extract reads intersect with 3'transduction candidates
d={}
bed=[]
with gzip.open(filenames.distant_txt +'.gz') as infile:
for line in infile:
line=line.decode()
ls=line.split()
rn,dir=ls[0].split('/')
if rn in retain:
id,poss=line.split()
for pos in poss.split(';'):
tmp,strand=pos.rsplit('/', 1)
tmp,end=tmp.rsplit('-', 1)
chr,start=tmp.rsplit(':', 1)
bed.append('\t'.join([chr, start, end, id, '.', strand]))
if len(bed) >= 100000: # chunk
bed='\n'.join(bed)
tmp=process_bed(bed)
for id in tmp:
if id in d:
d[id].extend(tmp[id])
else:
d[id]=tmp[id]
bed=[]
def convert_read_mate(readname):
converted='%s2' % readname[:-1] if readname[-1] == '1' else '%s1' % readname[:-1]
return converted
out=[]
for id in d:
for rn,pos in d[id]:
c=convert_read_mate(rn)
out.append('%s\tmapped=%s,%s\tmate=%s\n' % (id, rn, pos, c))
with open(filenames.transd_master, 'w') as outfile:
outfile.write(''.join(out))
outfile.flush()
os.fdatasync(outfile.fileno())
else:
log.logger.debug('No candidate for 3transduction.')
pybedtools.cleanup()
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
| [
"26kojima@gmail.com"
] | 26kojima@gmail.com |
1f44b4b6d3e46f04b442fb65029b4ba093647a51 | 9ae936a9689832a5b22cd12998c5dc5047aee164 | /December_2020/December_2020/5_dec_2020/test.py | b410a47a0763433f3d18e060eca479c4c3ca3919 | [] | no_license | inderdevkumar/2020-Python-Projects | 218320335f352dc340877d1ef62b65605ce4ccfd | 210154d092021d8de5f30797af9ad8e193e3c68e | refs/heads/master | 2023-02-04T08:47:05.952342 | 2020-12-25T15:33:27 | 2020-12-25T15:33:27 | 322,878,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | #===================== Function Defination to check prime numbers=================================
from itertools import count, islice
primes = (n for n in count(2) if all(n % d for d in range(2, n)))
print("100th prime is %d" % next(islice(primes, 99, 100)))
#===================== Function Defination to Euler of n =================================
def pi_euler1(n):
count = 0
potentialprime = 3
prime_lists= [] #To store list of prime numbers
deno_list= [] #To store list of denominators which are closed to numerator
product= 1
while count < int(user_input_number):
if primetest(potentialprime) == True:
prime_lists.append(potentialprime) #Appending prime_lists
count += 1
potentialprime += 1
else:
potentialprime += 1
for value in prime_lists:
denominator_list= [i*4 for i in range(1,n)]
denominator= denominator_list[min(range(len(denominator_list)), key = lambda i: abs(denominator_list[i]-value))] #Finding the denominator which is closed to numerator
deno_list.append(denominator) #Appending deno_list
product= product*(value/denominator) #Finding product of expression
print("Prime Lists are: ", prime_lists)
print("Denominator Lists are: ", deno_list)
print(f"pi euler1 for {n} is: ", product*4) #To get the desired output. This calculation is performed
if __name__ == "__main__":
user_input_number= int(input("Enter the number of terms: "))
#pi_euler1(user_input_number)
| [
"id0102yadav@gmail.com"
] | id0102yadav@gmail.com |
b0686e980691f4bc751d90ea12b21028fa20e7a0 | 8ca36d091c1592c57aba7fac8d3fb908282a9123 | /src/model.py | a27988fc08a09bf3ce45ad81819e7f9b3dc6ce68 | [
"MIT"
] | permissive | ocastel/exact-extract | 2c1870de25c29940599eacf191535b88e9fd09c8 | ccce4305ee3aaa4a30af197e36c55a06f11d8c23 | refs/heads/main | 2023-07-18T00:21:50.528314 | 2021-09-02T06:47:26 | 2021-09-02T06:47:26 | 394,207,700 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 28,289 | py | from __future__ import annotations
import itertools
import pickle
import sys
import os
from timeit import default_timer as timer
import traceback
from typing import List, Optional, Tuple, Union
import json
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule, Trainer, seed_everything
from transformers import Adafactor
import math
from transformers.data.data_collator import DataCollatorForSeq2Seq
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.loggers import NeptuneLogger
from argparse import ArgumentParser
import torch
import numpy as np
from pytorch_lightning.callbacks import Callback
import os
import time
sys.path.insert(0,os.getcwd())
from src.decoding.exact_extract import mlspan, InputAndAttention
from src.evaluation.metric import SquadMetricWrapper
from src.preprocessing.data_loading import FewShotDataLoader
from src.preprocessing.encoding import DatasetProcessor
from src.utils.outputs import PredictionWithMetadata, ExtractionResults, AggregatedPredictionsOfType, SpanType
print('setting env variable "TOKENIZERS_PARALLELISM" to "false"')
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
class ValEveryNSteps(Callback):
def __init__(self, every_n_step):
self.every_n_step = every_n_step
def on_batch_end(self, trainer, pl_module):
if os.path.exists(os.path.join(pl_module.hparams.results_path)):
print(f'Seems like another process have written results for {pl_module.hparams.exp_name} in {pl_module.hparams.results_path}, exiting.')
raise RuntimeError(f'Seems like another process have written results for {pl_module.hparams.exp_name} in {pl_module.hparams.results_path}, existing.')
if trainer.accumulate_grad_batches > 1 and (trainer.batch_idx % trainer.accumulate_grad_batches) != 0:
return
if self.every_n_step > 0:
if (trainer.global_step+1) % self.every_n_step == 0 and trainer.global_step > 0:
print(f'Finished step {trainer.global_step+1} (1-based), running evaluation:')
self.run_evaluation_and_log(trainer=trainer, pl_module=pl_module, steps=trainer.global_step + 1)
def on_train_start(self, trainer, pl_module):
print(f'Running evaluation for non-finetuned model:')
self.run_evaluation_and_log(trainer, pl_module, -1)
def on_train_end(self, trainer, pl_module):
if trainer.global_step < pl_module.hparams.max_steps:
return # skip saving if training end too early
with open(pl_module.hparams.results_path, 'w+') as f:
for r in pl_module.results:
f.write(json.dumps(r)+'\n')
def run_evaluation_and_log(self, trainer, pl_module, steps):
start = timer()
trainer.run_evaluation()
for metric_name, metric_obj in pl_module.validation_metrics():
pl_module.compute_and_log(metric_obj, metric_name, str(steps))
pl_module.reset_metrics()
end = timer()
print(f'Evaluation took {end - start} seconds on step {steps}')
class SquadModel(LightningModule):
def configure_optimizers(self):
if self.hparams.optimizer == 'adafactor_const':
optimizer = Adafactor(self.model.parameters(), lr=self.hparams.lr, relative_step=False,
scale_parameter=False)
return optimizer
else:
raise RuntimeError(f'optimizer {self.hparams.optimizer} is not supported.')
def checkpoint_path(self):
return os.path.join(self.results_dir,'checkpoint.ckpt')
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.results = []
self.fewshot_dataloder = FewShotDataLoader()
self.model = AutoModelForSeq2SeqLM.from_pretrained(self.hparams.model_name, cache_dir=self.hparams.cache_dir)
if self.hparams.tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.model_name, cache_dir=self.hparams.cache_dir)
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.tokenizer, cache_dir=self.hparams.cache_dir)
self.results_dir = os.path.sep.join(self.hparams.results_path.split(os.path.sep)[:-1])
self.reset_metrics()
self.reset_test_metrics()
# this map is used for dynamically trying out different chunk sizes per passage length
self.context_size_to_chunks = dict([(i, 1) for i in range(0, 520, 10)])
self.eos_token_id = self.tokenizer.additional_special_tokens_ids[1]
def create_metric(self):
return SquadMetricWrapper(name=self.hparams.exp_name)
def reset_metrics(self):
self.val_ml_span_metric = self.create_metric()
self.val_ml_span_norm_metric = self.create_metric()
self.val_greedy_metric = self.create_metric()
self.val_beam_metric = self.create_metric()
def reset_test_metrics(self):
self.test_ml_span_metric = self.create_metric()
self.test_ml_span_norm_metric = self.create_metric()
self.test_greedy_metric = self.create_metric()
self.test_beam_metric = self.create_metric()
def validation_metrics(self):
names = [f for f in vars(self).keys() if f.startswith('val_') and f.endswith('_metric')]
return [(name.replace('_metric',''), getattr(self,name)) for name in names]
def test_metrics(self):
names = [f for f in vars(self).keys() if f.startswith('test_') and f.endswith('_metric')]
return [(name.replace('_metric',''), getattr(self,name)) for name in names]
def train_dataloader(self):
print('loading train dataloader')
if self.hparams.train_rss:
print(f'loading {self.hparams.train_samples} from rss data')
train_data_enc = self.fewshot_dataloder.load_rss(self.hparams.train_samples)
else:
print(f'loading train samples ({self.hparams.dataset}, {self.hparams.train_samples})')
if self.hparams.train_samples == -1:
train_samples = self.fewshot_dataloder.load_train_ds(self.hparams.dataset, cache_dir=self.hparams.cache_dir)
else:
train_samples = self.fewshot_dataloder.load_splinter_ds(self.hparams.splinter_data, self.hparams.dataset.lower(), self.hparams.seed,
self.hparams.train_samples, hp_search=False, is_train=True, is_test=False, is_val=False)
train_data_enc = DatasetProcessor.prepare_dataset(tokenizer=self.tokenizer, dataset=train_samples, template=self.hparams.pattern)
print(f'total number of original training samples (not multiple from a single (long) sample: {len(train_samples)}')
print(f'total number of training samples (including multiple from a single (long) sample: {len(train_data_enc)}')
return DataLoader(train_data_enc, num_workers=4, shuffle=True, batch_size=self.hparams.batch_size,
collate_fn=DataCollatorForSeq2Seq(tokenizer=self.tokenizer, padding=True))
def val_dataloader(self):
print('Loading val split')
if self.hparams.hp_search:
print('loading validation from splinter val sample.')
val_samples = self.fewshot_dataloder.load_splinter_ds(self.hparams.splinter_data, self.hparams.dataset.lower(), self.hparams.seed,
self.hparams.val_samples, hp_search=self.hparams.hp_search, is_val=True, is_test=False, is_train=False)
else:
if self.hparams.dataset.lower() in ('textbookqa', 'bioasq'):
val_samples = self.fewshot_dataloder.load_splinter_ds(self.hparams.splinter_data, self.hparams.dataset.lower(),
self.hparams.seed,
self.hparams.val_samples, hp_search=False, is_val=True, is_test=False, is_train=False)
else:
val_samples = self.fewshot_dataloder.load_dev_ds(self.hparams.dataset, self.hparams.val_samples,
cache_dir=self.hparams.cache_dir, seed=self.hparams.val_seed)
print('loading validation from original dev split.')
print(f'Loaded {len(val_samples)} raw val samples, encoding...')
print('template is '+self.hparams.pattern)
self.val_answers = val_samples.set_index(['qid']).answers
val_data_enc = DatasetProcessor.prepare_dataset(self.tokenizer, val_samples, self.hparams.pattern)
print(f'After encoding and duplicating long samples, validation set containes {len(val_data_enc)} entries')
return DataLoader(val_data_enc, num_workers=4, shuffle=False, batch_size=self.hparams.val_batch_size,
collate_fn=DataCollatorForSeq2Seq(tokenizer=self.tokenizer, padding=True))
def test_dataloader(self):
print('Loading test split')
if self.hparams.dataset.lower() in ('textbookqa', 'bioasq'):
test_samples = self.fewshot_dataloder.load_splinter_ds(self.hparams.splinter_data, self.hparams.dataset.lower(), self.hparams.test_seed,
self.hparams.test_samples, is_test=True, is_train=False, is_val=False, hp_search=False) # hp_search true will get data from dev split
else:
test_samples = self.fewshot_dataloder.load_dev_ds(self.hparams.dataset, self.hparams.test_samples,
cache_dir=self.hparams.cache_dir, seed=self.hparams.test_seed)
if self.hparams.test_samples > 0:
test_samples = test_samples.sample(n=self.hparams.test_samples, random_state=self.hparams.test_seed)
print(f'Loaded {len(test_samples)} raw test samples, encoding...')
self.test_answers = test_samples.set_index(['qid']).answers#.apply(lambda x: x[0])
test_data_enc = DatasetProcessor.prepare_dataset(self.tokenizer, test_samples, self.hparams.pattern)
print(f'Resulted with {len(test_data_enc)} encoded test samples.')
return DataLoader(test_data_enc, num_workers=4, shuffle=False, batch_size=self.hparams.val_batch_size,
collate_fn=DataCollatorForSeq2Seq(tokenizer=self.tokenizer, padding=True))
def forward(self, args):
res = self.model.generate(input_ids=args['input_ids'], attention_mask=args['attention_mask'],
max_length=self.hparams.trim_context, min_length=3)
return res
def training_step(self, batch, batch_idx):
loss = self.model(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'],
labels=batch['labels']).loss
self.log('train_loss', loss, sync_dist=True)
return loss
def predict_from_span(self, batch) -> List[Tuple[ExtractionResults, int]]:
span_selections = []
for sample_id in range(len(batch['input_ids'])):
input_ids = batch['input_ids'][sample_id]
attention_mask = batch['attention_mask'][sample_id]
context = batch['context_input_ids'][sample_id]
context = context[context != self.tokenizer.pad_token_id]
extraction_result = None
context_rounded_size = context.shape[-1] // 10 * 10
while extraction_result is None:
try:
chunk_size = math.ceil(512 / self.context_size_to_chunks[context_rounded_size])
extraction_result = mlspan(model=self.model, tokenizer=self.tokenizer,
encoder_input_attention=InputAndAttention(input_ids,attention_mask),
context=context, chunk_size=chunk_size,
device=self.device, trim_context=self.hparams.trim_context,
eos_token_id=self.eos_token_id)
except RuntimeError as e:
if 'out of memory' not in str(e):
traceback.print_exc()
raise e
next_chunks = self.context_size_to_chunks[context_rounded_size] + 1
next_chunk_size = math.ceil(512 / next_chunks)
while next_chunk_size == chunk_size:
next_chunks = next_chunks + 1
next_chunk_size = math.ceil(512 / next_chunks)
print(
f'Decreasing chunks size for context of size {context_rounded_size} from {chunk_size} to {next_chunk_size} ; {self.context_size_to_chunks[context_rounded_size]} -> {next_chunks}')
self.context_size_to_chunks[context_rounded_size] = next_chunks
for i in range(context_rounded_size, 520, 10):
self.context_size_to_chunks[i] = np.max(
(self.context_size_to_chunks[context_rounded_size], self.context_size_to_chunks[i]))
print(self.context_size_to_chunks)
if self.context_size_to_chunks[context_rounded_size] > context.shape[-1]:
raise RuntimeError(
"Tried calculating mlspan for chunk made of a single item but failed; consider "
"setting trim_context at a lower value.")
span_selections.append((extraction_result, self.tokenizer.decode(batch['id'][sample_id], skip_special_tokens=True)))
return span_selections
def predict_greedy(self, batch) -> Tuple[List[List[int]], List[List[int]]]:
pred_dicts = self.model.generate(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'],
eos_token_id=self.eos_token_id,
return_dict_in_generate=True,
output_scores=True,
min_length=self.hparams.min_length
)
preds_ids = pred_dicts.sequences[:, 1:]
pred_scores = pred_dicts.scores
pred_probs = ( (preds_ids != 0).int() * torch.nn.CrossEntropyLoss(reduction='none')(input=torch.stack(pred_scores, dim=1).permute(0, 2, 1), target=preds_ids)).nan_to_num().sum(dim=1)
return preds_ids, pred_probs.tolist()
def predict_beam(self, batch) -> Tuple[List[List[int]], List[List[int]]]:
pred_dicts = self.model.generate(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'],
eos_token_id=self.eos_token_id,
early_stopping=False,
num_beams=8,
return_dict_in_generate=True,
output_scores=True,
num_return_sequences=1
)
preds_ids = pred_dicts.sequences[:, 1:]
pred_probs = -1*pred_dicts.sequences_scores # beam already process this for us, but returns maximized scores, so negate
return preds_ids, pred_probs.tolist()
def build_references(self, ids, is_validation):
answers = self.val_answers if is_validation else self.test_answers
references = [{'id': id, 'answers': {'text': list(answers.loc[id]), 'answer_start': [0]*len(answers.loc[id])}} for id in ids]
return references
def to_predictions(self, span_preds, field_name) -> Optional[List[dict]]:
if getattr(span_preds[0][0],field_name) is None:
return None
return [{'id':str(span_pred[1]), 'prediction_text': getattr(span_pred[0],field_name).top.decoded,
'nll': getattr(span_pred[0],field_name).top.nll} for span_pred in span_preds]
def validation_or_test_step(self, batch, is_validation) -> Tuple[dict,List[ExtractionResults]]:
if is_validation:
loss = self.model(input_ids=batch['input_ids'], labels=batch['labels']).loss
self.log('val_loss', loss, sync_dist=True)
ids = self.tokenizer.batch_decode(batch['id'], skip_special_tokens=True) #[str(i) for i in batch['num_id'].cpu().numpy().reshape(-1)]
references = self.build_references(ids, is_validation)
if self.hparams.decode_ml_span:
span_predictions = self.predict_from_span(batch)
else:
span_predictions = [(ExtractionResults.empty(),self.tokenizer.decode(id_enc, skip_special_tokens=True)) for id_enc in batch['id']]
if self.hparams.decode_greedy:
batch_generate_greedy_ids, batch_generate_greedy_log_probs = self.predict_greedy(batch)
if self.hparams.decode_beam:
batch_generate_beam_ids, batch_generate_beam_log_probs = self.predict_beam(batch)
for i in range(len(batch['input_ids'])):
span_prediction = span_predictions[i]
if self.hparams.decode_greedy:
decoded = self.tokenizer.decode(batch_generate_greedy_ids[i], skip_special_tokens=True)
nll = batch_generate_greedy_log_probs[i]
span_prediction[0].greedy = AggregatedPredictionsOfType(
top_k=[PredictionWithMetadata(tokens=[], tokens_ids=batch_generate_greedy_ids[i].tolist(),
tokens_nlls=[], decoded=decoded, nll=nll)], tpe=SpanType.GREEDY, k=1)
if self.hparams.decode_beam:
span_prediction[0].beam = AggregatedPredictionsOfType(
top_k=[PredictionWithMetadata(tokens=[], tokens_ids=batch_generate_beam_ids[i].tolist(),
tokens_nlls=[], decoded=self.tokenizer.decode(batch_generate_beam_ids[i],
skip_special_tokens=True),
nll=batch_generate_beam_log_probs[i])], tpe=SpanType.BEAM, k=1)
ret_dict = {}
if self.hparams.decode_greedy:
original_contexts = self.tokenizer.batch_decode(batch['context_input_ids'], skip_special_tokens=True)
greedy_predictions = self.to_predictions(span_preds=span_predictions,field_name='greedy')
greedy_in_context = [(greedy_prediction['prediction_text'] in original_context and len(greedy_prediction['prediction_text'])>0) for
greedy_prediction, original_context in zip(greedy_predictions, original_contexts)]
k = 'greedy_in_context_acc' if is_validation else 'test_greedy_in_context_acc'
ret_dict[k] = greedy_in_context
metrics = self.validation_metrics() if is_validation else self.test_metrics()
for (metric_name, metric_obj) in metrics:
metric_name = metric_name.replace('test_', '').replace('val_','')
predictions = self.to_predictions(span_predictions, metric_name)
if predictions is not None:
metric_obj.add_batch(predictions=predictions, references=references)
span_prediction_results = list(map(lambda x:x[0],span_predictions))
return ret_dict, span_prediction_results
def validation_step(self, batch, batch_idx):
logs_dict, prediction_results = self.validation_or_test_step(batch=batch, is_validation=True)
return logs_dict
def test_step(self, batch, batch_idx):
logs_dict, prediction_results = self.validation_or_test_step(batch=batch, is_validation=False)
return logs_dict
def compute_and_log(self, metric, metric_name, step=None):
met = metric.compute()
if met is not None:
full_metric_name = metric_name if step is not None else str(step)+'_'+metric_name
self.log(full_metric_name+'_f1', met['f1'])
self.log(full_metric_name+'_EM', met['exact_match'])
print(full_metric_name + ':' + str(met))
json_dict = dict()
for k in self.hparams:
try: # avoid unserializable keys gracefully
json_dict[k] = json.dumps(self.hparams[k])
except TypeError:
pass
json_dict['metric_name'] = metric_name
json_dict['step'] = step
json_dict['f1'] = met['f1']
json_dict['EM'] = met['exact_match']
self.results.append(json_dict)
def pickle_path(self, metric_name, rank):
dir_path = os.path.sep.join(self.hparams.results_path.split(os.path.sep)[:-1] +
[metric_name])
file_path = os.path.sep.join([dir_path, str(rank) + '.pkl'])
return dir_path, file_path
def checkpoint_dir(self):
dir_path = os.path.sep.join(self.hparams.results_path.split(os.path.sep)[:-1] +
['final_checkpoint'])
return dir_path
def pickle_metric(self, metric_name, metric_obj):
dir_path, file_path = self.pickle_path(metric_name, self.global_rank)
print(f'pickling to {file_path}; contains {len(metric_obj)} predictions.')
os.makedirs(dir_path, exist_ok=True)
with open(file_path, 'wb') as f:
pickle.dump(metric_obj, f)
def unpickle_metrics(self, metric_name, world_size):
unpickled_metrics = []
for rank in range(1, world_size):
dir_path, file_path = self.pickle_path(metric_name, rank)
while not os.path.exists(file_path):
print(f'{file_path} does not exist, sleeping for 2 seconds')
time.sleep(2)
time.sleep(30) # to avoid race condition, sleep for some time
with open(file_path, 'rb') as f:
print(f'unpickling {file_path}')
other_metric: SquadMetricWrapper = pickle.load(f)
unpickled_metrics.append(other_metric)
return unpickled_metrics
# def update_metric_from_pickles(self, ):
def test_epoch_end(self, outputs):
gpus = len(self.hparams.gpus.split(',')) if type(self.hparams.gpus) == str else self.hparams.gpus
world_size = gpus * self.hparams.num_nodes
print(f'global rank is {self.global_rank}, world size is {world_size}')
for metric_name, metric_obj in self.test_metrics():
if world_size == 1:
self.compute_and_log(metric_obj, metric_name)
metric_obj.save(self.hparams.results_path, metric_name)
else:
self.pickle_metric(metric_name=metric_name, metric_obj=metric_obj)
if self.global_rank == 0:
unpickled_metrics = self.unpickle_metrics(metric_name, world_size)
for unpickled_metric in unpickled_metrics:
metric_obj.add_batch(unpickled_metric.predictions.values(), unpickled_metric.references.values())
metric_obj.save(self.hparams.results_path, metric_name)
self.compute_and_log(metric_obj, metric_name)
if self.hparams.decode_greedy:
bool_list = list(itertools.chain.from_iterable([d['test_greedy_in_context_acc'] for d in outputs]))
if world_size == 1:
self.log('test_greedy_in_context_acc',np.mean(bool_list))
else:
self.pickle_metric('test_greedy_in_context',bool_list)
if self.global_rank == 0:
in_context_bool_lists = self.unpickle_metrics('test_greedy_in_context', world_size)
for l in in_context_bool_lists:
bool_list += l
self.log('test_greedy_in_context_acc', np.mean(bool_list))
with open(self.hparams.results_path, 'w+') as f:
for r in self.results:
f.write(json.dumps(r)+'\n')
@classmethod
def add_model_specific_args(cls, parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='patrickvonplaten/t5-tiny-random')
parser.add_argument('--tokenizer', default=None, type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--train_samples', type=int, default=64, help="negative for full dataset")
parser.add_argument('--val_samples', type=int, default=1024)
parser.add_argument('--test_samples', type=int, default=10)
parser.add_argument('--pattern', type=str, default="Text: <context>\nQuestion: <question>\nAnswer:<mask>.")
parser.add_argument('--exp_name', type=str, default='test')
parser.add_argument('--cache_dir', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--val_batch_size', type=int, default=2)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--trim_context', type=int, default=512)
parser.add_argument('--dataset', type=str, default='SQuAD')
parser.add_argument('--splinter_data', type=str, default='./data')
parser.add_argument('--optimizer', default='adamw', type=str)
parser.add_argument('--tags', default='', type=str)
parser.add_argument('--decode_beam', default=False, action='store_true')
parser.add_argument('--decode_greedy', default=False, action='store_true')
parser.add_argument('--decode_ml_span', default=False, action='store_true')
parser.add_argument('--val_seed',default=1, type=int)
parser.add_argument('--test_seed',default=0, type=int)
parser.add_argument('--check_val_every_n_steps', default=64, type=int)
parser.add_argument('--results_path', type=str, default= './results')
parser.add_argument('--hp_search', default=False, action='store_true')
parser.add_argument('--min_length',default=None)
parser.add_argument('--train_rss', default=False, action='store_true')
parser.add_argument('--save_model', default=False, action='store_true')
return parser
def neptune_tags(args):
tags = args.tags.split(',')
tags = tags + [args.optimizer, 'bsz_' + str(args.batch_size), 'accum_' + str(args.accumulate_grad_batches),
'seed_' + str(args.seed), 'lr_' + str(args.lr), 'steps' + str(args.max_steps)]
return tags
def main(args):
seed_everything(0)
save_dir = os.path.sep.join((os.getcwd(), args.exp_name)) if args.cache_dir is None else os.path.sep.join(
(args.cache_dir, args.exp_name))
checkpoint_callback = ModelCheckpoint(
dirpath=save_dir,
save_top_k=0,
verbose=True,
monitor='val_loss',
mode='min',
prefix=args.exp_name
)
if args.max_steps is not None:
args.max_epochs = 99999 # overriding to have full control through max_steps
neptune_logger = NeptuneLogger(
close_after_fit=False,
experiment_name=args.exp_name,
params=vars(args),
tags=neptune_tags(args),
)
trainer = Trainer.from_argparse_args(args, deterministic=True,
checkpoint_callback=checkpoint_callback,
logger=neptune_logger,
progress_bar_refresh_rate=64
)
model = SquadModel(args)
if args.train_samples != 0:
trainer.fit(model)
if args.save_model:
print(f'saving checkpoint to {model.checkpoint_dir()}')
model.model.save_pretrained(model.checkpoint_dir())
if args.test_samples!=0:
trainer.test(model)
def update_local_args(args):
os.makedirs('/'.join(args.results_path.split('/')[:-1]), exist_ok=True )
return args
if __name__ == '__main__':
start = timer()
import platform
parser = ArgumentParser(add_help=True)
parser = Trainer.add_argparse_args(parser)
parser = SquadModel.add_model_specific_args(parser)
args = parser.parse_args()
if platform.release() == '5.8.0-7642-generic':
args = update_local_args(args)
if args.val_samples == 0:
args.val_percent_check = 0
main(args)
end = timer()
print(end - start)
| [
"or.castel@gmail.com"
] | or.castel@gmail.com |
29c0a30e3982af6079d53b392785579d23b7ea4b | e757ad85c2ef3c06ed284695091becad00ec8fc7 | /EjecForm2.py | fb8dd6d6ef14894354d860cb90c490f8b6beae77 | [] | no_license | jonasdominguez1402/Formulario_Evaluacion | 93bfc6e3e11c003dcfcd31d0aae61f6a4c11e1c4 | 33e5fb38b0ca1cdc4e7a0e435ffc7bf8c7e6420f | refs/heads/master | 2020-08-24T01:07:32.816824 | 2019-10-22T06:26:18 | 2019-10-22T06:26:18 | 216,738,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,835 | py | """
Formulario De Profesores
Jonathan Eliseo Dominguez Hdz.
GITI11071-E
Version 1.0
22/Octubre/2019
"""
from PyQt5 import QtCore, QtGui, QtWidgets, QtSql
from PyQt5.Qt import QSqlDatabase
import sqlite3
from pprint import pprint
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(640, 515)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(170, 0, 291, 41))
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(70, 50, 481, 131))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.ver_registro = QtWidgets.QPushButton(self.layoutWidget)
self.ver_registro.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ver_registro.setObjectName("ver_registro")
self.verticalLayout.addWidget(self.ver_registro)
self.Agregar = QtWidgets.QPushButton(self.layoutWidget)
self.Agregar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Agregar.setObjectName("Agregar")
self.verticalLayout.addWidget(self.Agregar)
self.eliminar = QtWidgets.QPushButton(self.layoutWidget)
self.eliminar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.eliminar.setObjectName("eliminar")
self.verticalLayout.addWidget(self.eliminar)
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 631, 491))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.tableView = QtWidgets.QTableView(self.frame)
self.tableView.setGeometry(QtCore.QRect(10, 190, 611, 301))
self.tableView.setObjectName("tableView")
self.frame.raise_()
self.label.raise_()
self.layoutWidget.raise_()
mainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
# CREACION DE LA BASE DE DATOS
self.create_DB()
self.ver_registro.clicked.connect(self.print_data)
self.model = None
self.ver_registro.clicked.connect(self.vizualisacion_tabla)
self.Agregar.clicked.connect(self.agregar_registro)
self.eliminar.clicked.connect(self.eliminar_registro)
# Funcion para eliminar registros
def eliminar_registro(self):
if self.model:
self.model.removeRow(self.tableView.currentIndex().row())
else:
self.sql_tableview_model
# Funcion para agregar registros
def agregar_registro(self):
if self.model:
self.model.insertRows(self.model.rowCount(),1)
else:
self.sql_tableview_model()
# Funcion Para Visualizar los Registros en la Tabla
def vizualisacion_tabla(self):
db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName('Profesores.db')
tableview=self.tableView
self.model= QtSql.QSqlTableModel()
tableview.setModel(self.model)
self.model.setTable('Profesores')
self.model.setEditStrategy(QtSql.QSqlTableModel.OnFieldChange)
self.model.select()
self.model.setHeaderData(0, QtCore.Qt.Horizontal, "id")
self.model.setHeaderData(1, QtCore.Qt.Horizontal, "Nombre")
self.model.setHeaderData(2, QtCore.Qt.Horizontal, "Apellido")
self.model.setHeaderData(3, QtCore.Qt.Horizontal, "Direccion")
self.model.setHeaderData(4, QtCore.Qt.Horizontal, "Telefono")
self.model.setHeaderData(5, QtCore.Qt.Horizontal, "Email")
# Funcion Para Visualizar los Registros en la Tabla
def print_data(self):
sqlite_file='Profesores.db'
conn=sqlite3.connect(sqlite_file)
cursor= conn.cursor()
cursor.execute("SELECT * FROM 'Profesores' ORDER BY id")
all_rows = cursor.fetchall()
pprint(all_rows)
conn.commit()
conn.close()
# Funcion Para Crear la Base de Datos
def create_DB(self):
db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName('Profesores.db')
db.open()
query = QtSql.QSqlQuery()
query.exec_("create table Profesores(id int primary key,"
"Nombre varchar(20), Apellido varchar(20), Direccion varchar(30), Telefono int(20), Email varchar(20))")
query.exec_("insert into Profesores values(1, 'Jonathan', 'Dominguez', 'Miguel Hidalgo 42', '4181556335', 'jonas12gmail.com' )")
query.exec_("insert into Profesores values(2, 'Jonas', 'Hernandez', 'Miguel Hidalgo 52', '4181556335', 'jonas12gmail.com' )")
query.exec_("insert into Profesores values(3, 'Jose', 'Domingo', 'Miguel Hidalgo 40', '4181556335', 'jonas12gmail.com' )")
query.exec_("insert into Profesores values(4, 'Juan', 'Cano', 'Miguel Hidalgo 46', '4181556335', 'jonas12gmail.com' )")
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "Formulario de Profesores"))
self.label.setText(_translate("mainWindow", "Formulario de Profesores"))
self.ver_registro.setText(_translate("mainWindow", "Ver Registro"))
self.Agregar.setText(_translate("mainWindow", "Agregar"))
self.eliminar.setText(_translate("mainWindow", "Eliminar"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mainWindow = QtWidgets.QMainWindow()
ui = Ui_mainWindow()
ui.setupUi(mainWindow)
mainWindow.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | jonasdominguez1402.noreply@github.com |
62b2d3ae8b6c692e897b88f46edcf24719c689dd | 0262f25e1300c5137a0f6e04ca47610ffb9d2d6e | /conf/default.py | 53c878026bf033293e4abc8c90a59782c4871aa7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] | permissive | crazyjazchu/zhuzhigang | fc5167cdc6bd69a31d255dc722d356b5471839c5 | 3277b7e3d90ffb1d606d28189931f502ba019f47 | refs/heads/master | 2020-05-14T14:54:16.448263 | 2019-04-17T14:28:38 | 2019-04-17T14:28:38 | 181,842,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,190 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Django settings for app-framework project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import sys
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # noqa
# ==============================================================================
# 应用基本信息配置 (请按照说明修改)
# ==============================================================================
# 在蓝鲸智云开发者中心 -> 点击应用ID -> 基本信息 中获取 APP_ID 和 APP_TOKEN 的值
APP_ID = 'zhuzhigang'
APP_TOKEN = '38d90fb3-ef8e-4172-8602-4ea502fc3cca'
# 蓝鲸智云开发者中心的域名,形如:http://paas.bking.com
BK_PAAS_HOST = 'https://paas.blueking.com'
# 请求官方 API 默认版本号,可选值为:"v2" 或 "";其中,"v2"表示规范化API,""表示未规范化API
DEFAULT_BK_API_VER = 'v2'
# 是否启用celery任务
IS_USE_CELERY = True
# 本地开发的 celery 的消息队列(RabbitMQ)信息
BROKER_URL_DEV = 'amqp://guest:guest@127.0.0.1:5672/'
# TOCHANGE 调用celery任务的文件路径, List of modules to import when celery starts.
CELERY_IMPORTS = (
'home_application.celery_tasks',
)
# ==============================================================================
# 应用运行环境配置信息
# ==============================================================================
ENVIRONMENT = os.environ.get('BK_ENV', 'development')
# 应用基本信息从环境变量中获取,未设置环境变量(如:本地开发)时,则用用户在文件开头的填写的值
APP_ID = os.environ.get('APP_ID', APP_ID)
APP_TOKEN = os.environ.get('APP_TOKEN', APP_TOKEN)
BK_PAAS_HOST = os.environ.get('BK_PAAS_HOST', BK_PAAS_HOST)
BK_PAAS_INNER_HOST = os.environ.get('BK_PAAS_INNER_HOST', BK_PAAS_HOST)
# 应用访问路径
SITE_URL = '/'
# 运行模式, DEVELOP(开发模式), TEST(测试模式), PRODUCT(正式模式)
RUN_MODE = 'DEVELOP'
if ENVIRONMENT.endswith('production'):
RUN_MODE = 'PRODUCT'
DEBUG = False
SITE_URL = '/o/%s/' % APP_ID
elif ENVIRONMENT.endswith('testing'):
RUN_MODE = 'TEST'
DEBUG = False
SITE_URL = '/t/%s/' % APP_ID
else:
RUN_MODE = 'DEVELOP'
DEBUG = True
try:
import pymysql
pymysql.install_as_MySQLdb()
except:
pass
# ===============================================================================
# 应用基本信息
# ===============================================================================
# 应用密钥
SECRET_KEY = 'MQtd_0cw&AiY5jT&&#w7%9sCK=HW$O_e%ch4xDd*AaP(xU0s3X'
# CSRF的COOKIE域,默认使用当前域
# CSRF_COOKIE_DOMAIN =''
CSRF_COOKIE_PATH = SITE_URL
ALLOWED_HOSTS = ['*']
# ==============================================================================
# Middleware and apps
# ==============================================================================
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'account.middlewares.LoginMiddleware', # 登录鉴权中间件
'common.middlewares.CheckXssMiddleware', # Xss攻击处理中间件
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# OTHER 3rd Party App
'app_control',
'account',
'home_application',
)
# ==============================================================================
# Django 项目配置
# ==============================================================================
TIME_ZONE = 'Asia/Shanghai'
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# 项目路径
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT, PROJECT_MODULE_NAME = os.path.split(PROJECT_PATH)
BASE_DIR = os.path.dirname(os.path.dirname(PROJECT_PATH))
PYTHON_BIN = os.path.dirname(sys.executable)
# ===============================================================================
# 静态资源设置
# ===============================================================================
# 静态资源文件(js,css等)在应用上线更新后, 由于浏览器有缓存, 可能会造成没更新的情况.
# 所以在引用静态资源的地方,都需要加上这个版本号,如:<script src="/a.js?v=${STATIC_VERSION}"></script>;
# 如果静态资源修改了以后,上线前修改这个版本号即可
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATIC_VERSION = 0.1
# 应用本地静态资源目录
STATIC_URL = '%sstatic/' % SITE_URL
ROOT_URLCONF = 'urls'
# ==============================================================================
# Templates
# ==============================================================================
# mako template dir
MAKO_TEMPLATE_DIR = os.path.join(PROJECT_ROOT, 'templates')
MAKO_TEMPLATE_MODULE_DIR = os.path.join(BASE_DIR, 'templates_module', APP_ID)
if RUN_MODE not in ['DEVELOP']:
MAKO_TEMPLATE_MODULE_DIR = os.path.join(PROJECT_ROOT, 'templates_module', APP_ID)
# Django TEMPLATES配置
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# the context to the templates
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.csrf',
'common.context_processors.mysetting', # 自定义模版context,可在页面中使用STATIC_URL等变量
'django.template.context_processors.i18n',
],
'debug': DEBUG
},
},
]
# ==============================================================================
# session and cache
# ==============================================================================
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 默认为false,为true时SESSION_COOKIE_AGE无效
SESSION_COOKIE_PATH = SITE_URL # NOTE 不要改动,否则,可能会改成和其他app的一样,这样会影响登录
# ===============================================================================
# Authentication
# ===============================================================================
AUTH_USER_MODEL = 'account.BkUser'
AUTHENTICATION_BACKENDS = ('account.backends.BkBackend', 'django.contrib.auth.backends.ModelBackend')
LOGIN_URL = "%s/login/?app_id=%s" % (BK_PAAS_HOST, APP_ID)
LOGOUT_URL = '%saccount/logout/' % SITE_URL
LOGIN_REDIRECT_URL = SITE_URL
REDIRECT_FIELD_NAME = "c_url"
# 验证登录的cookie名
BK_COOKIE_NAME = 'bk_token'
# 数据库初始化 管理员列表
ADMIN_USERNAME_LIST = ['admin']
# ===============================================================================
# CELERY 配置
# ===============================================================================
if IS_USE_CELERY:
try:
import djcelery
INSTALLED_APPS += (
'djcelery', # djcelery
)
djcelery.setup_loader()
CELERY_ENABLE_UTC = False
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
if "celery" in sys.argv:
DEBUG = False
# celery 的消息队列(RabbitMQ)信息
BROKER_URL = os.environ.get('BK_BROKER_URL', BROKER_URL_DEV)
if RUN_MODE == 'DEVELOP':
from celery.signals import worker_process_init
@worker_process_init.connect
def configure_workers(*args, **kwargs):
import django
django.setup()
except:
pass
# ==============================================================================
# logging
# ==============================================================================
# 应用日志配置
BK_LOG_DIR = os.environ.get('BK_LOG_DIR', '/data/paas/apps/logs/')
LOGGING_DIR = os.path.join(BASE_DIR, 'logs', APP_ID)
LOG_CLASS = 'logging.handlers.RotatingFileHandler'
if RUN_MODE == 'DEVELOP':
LOG_LEVEL = 'DEBUG'
elif RUN_MODE == 'TEST':
LOGGING_DIR = os.path.join(BK_LOG_DIR, APP_ID)
LOG_LEVEL = 'INFO'
elif RUN_MODE == 'PRODUCT':
LOGGING_DIR = os.path.join(BK_LOG_DIR, APP_ID)
LOG_LEVEL = 'ERROR'
# 自动建立日志目录
if not os.path.exists(LOGGING_DIR):
try:
os.makedirs(LOGGING_DIR)
except:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s [%(asctime)s] %(pathname)s %(lineno)d %(funcName)s %(process)d %(thread)d \n \t %(message)s \n', # noqa
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s \n'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'root': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, '%s.log' % APP_ID),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'component': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, 'component.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5
},
'wb_mysql': {
'class': LOG_CLASS,
'formatter': 'verbose',
'filename': os.path.join(LOGGING_DIR, 'wb_mysql.log'),
'maxBytes': 1024 * 1024 * 4,
'backupCount': 5
},
},
'loggers': {
'django': {
'handlers': ['null'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
# the root logger ,用于整个project的logger
'root': {
'handlers': ['root'],
'level': LOG_LEVEL,
'propagate': True,
},
# 组件调用日志
'component': {
'handlers': ['component'],
'level': 'WARN',
'propagate': True,
},
# other loggers...
'django.db.backends': {
'handlers': ['wb_mysql'],
'level': 'DEBUG',
'propagate': True,
},
}
}
| [
"zzg.script@gmail.com"
] | zzg.script@gmail.com |
283875cea940fcedf35028d030e797b5b1dccc75 | 763fc9a03883fce8cd333d3503314c536a25cdc9 | /palindromesrecursive.py | acffa9577c4c83a2da02bff121a00b3d6db2b5f8 | [] | no_license | kegenrodrigues/ShortPrograms | ac367206f724a318062b7eecad62f58caa37066f | 0f2cb7ace72e0d3ab4a7bda7ec3535e7d7ee0889 | refs/heads/master | 2021-01-22T05:47:55.755458 | 2017-02-12T07:54:07 | 2017-02-12T07:54:07 | 81,711,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Define a procedure is_palindrome, that takes as input a string, and returns a
# Boolean indicating if the input string is a palindrome.
# Base Case: '' => True
# Recursive Case: if first and last characters don't match => False
# if they do match, is the middle a palindrome?
def is_palindrome(s):
if s=='':
return True
else:
if s[0]==s[len(s)-1]:
return is_palindrome(s[1:len(s)-1])
else:
return False
print is_palindrome('')
#>>> True
print is_palindrome('abab')
#>>> False
print is_palindrome('abba')
#>>> True
| [
"kegenrodrigues95@gmail.com"
] | kegenrodrigues95@gmail.com |
51e85eef5df89ea6e897b123a36fc3c3e8ca8598 | c15849c46c383f725c83d7f339f39f64a3f0e069 | /review/models.py | 44f25a57f572fea446259641e24bd4794c0a0c17 | [] | no_license | Tariqalrehily/iShop | 3ff9658a4a0dd1de991e860f44c3ff2c1eb5cf8c | 0454106c1ff5b508b734b95f3f6cfe1008d630b5 | refs/heads/master | 2022-11-24T02:34:03.956204 | 2020-02-18T19:13:59 | 2020-02-18T19:13:59 | 231,911,435 | 0 | 1 | null | 2022-11-22T05:14:54 | 2020-01-05T12:07:28 | HTML | UTF-8 | Python | false | false | 445 | py | from django.db import models
class Review(models.Model):
RATING_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
)
pub_date = models.DateTimeField('date published')
user_name = models.CharField(max_length=100)
rating = models.IntegerField(choices=RATING_CHOICES)
comment = models.CharField(max_length=1000)
def __unicode__(self):
return self.user_name
| [
"Tariqalrehily@gmail.com"
] | Tariqalrehily@gmail.com |
3ba42d75b8d7773ba4b0b673f1dbbbdaf2f8c9ec | 4a41223e8c8ab33d83c6f213692c6097bb96540d | /eelbrain/_stats/permutation.py | b4e3ec6a2a339c793de3982c33ed7a8d87dbda5e | [
"BSD-3-Clause"
] | permissive | rbaehr/Eelbrain | 33ceeee24533581ab3e7569c31e0f6a6c6dfcda1 | 6301dc256e351fdbb58bbe13ab48fde7bfcf192a | refs/heads/master | 2021-07-05T19:19:20.573231 | 2017-10-03T04:35:23 | 2017-10-03T04:35:23 | 104,907,464 | 0 | 0 | null | 2017-09-26T16:03:20 | 2017-09-26T16:03:20 | null | UTF-8 | Python | false | false | 6,505 | py | # Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from itertools import izip
from math import ceil
import random
import numpy as np
from .._data_obj import NDVar, Var
from .._utils import intervals
_YIELD_ORIGINAL = 0
# for testing purposes, yield original order instead of permutations
def _resample_params(N, samples):
"""Decide whether to do permutations or random resampling
Parameters
----------
N : int
Number of observations.
samples : int
``samples`` parameter (number of resampling iterations, or < 0 to
sample all permutations).
Returns
-------
actual_n_samples : int
Adapted number of resamplings that will be done.
samples_param : int
Samples parameter for the resample function (-1 to do all permutations,
otherwise same as n_samples).
"""
n_perm = 2 ** N
if n_perm - 1 <= samples:
samples = -1
if samples < 0:
n_samples = n_perm - 1
else:
n_samples = samples
return n_samples, samples
def permute_order(n, samples=10000, replacement=False, unit=None, seed=0):
"""Generator function to create indices to shuffle n items
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
seed : None | int
Seed the random state of the relevant randomization module
(:mod:`random` or :mod:`numpy.random`) to make replication possible.
None to skip seeding (default 0).
Returns
-------
Iterator over index.
"""
n = int(n)
samples = int(samples)
if samples < 0:
err = "Complete permutation for resampling through reordering"
raise NotImplementedError(err)
if _YIELD_ORIGINAL:
original = np.arange(n)
for _ in xrange(samples):
yield original
return
if seed is not None:
np.random.seed(seed)
if unit is None:
if replacement:
for _ in xrange(samples):
yield np.random.randint(n, n)
else:
index = np.arange(n)
for _ in xrange(samples):
np.random.shuffle(index)
yield index
else:
if replacement:
raise NotImplementedError("Replacement and units")
else:
idx_orig = np.arange(n)
idx_perm = np.arange(n)
unit_idxs = [np.nonzero(unit == cell)[0] for cell in unit.cells]
for _ in xrange(samples):
for idx_ in unit_idxs:
v = idx_orig[idx_]
np.random.shuffle(v)
idx_perm[idx_] = v
yield idx_perm
def permute_sign_flip(n, samples=10000, seed=0, out=None):
"""Iterate over indices for ``samples`` permutations of the data
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
seed : None | int
Seed the random state of the :mod:`random` module to make replication
possible. None to skip seeding (default 0).
out : array of int8 (n,)
Buffer for the ``sign`` variable that is yielded in each iteration.
Yields
------
sign : array of int8 (n,)
Sign for each case (``1`` or ``-1``; ``sign`` is the same array object
but its content modified in every iteration).
"""
n = int(n)
if seed is not None:
random.seed(seed)
if out is None:
out = np.empty(n, np.int8)
else:
assert out.shape == (n,)
if n > 62: # Python 2 limit for xrange
if samples < 0:
raise NotImplementedError("All possibilities for more than 62 cases")
n_groups = ceil(n / 62.)
group_size = int(ceil(n / n_groups))
for _ in izip(*(permute_sign_flip(stop - start, samples, None,
out[start: stop]) for
start, stop in intervals(range(0, n, group_size) + [n]))):
yield out
return
# determine possible number of permutations
n_perm_possible = 2 ** n
if samples < 0:
# do all permutations
sample_sequences = xrange(1, n_perm_possible)
else:
# random resampling
sample_sequences = random.sample(xrange(1, n_perm_possible), samples)
for seq in sample_sequences:
out.fill(1)
for i in (i for i, s in enumerate(bin(seq)[-1:1:-1]) if s == '1'):
out[i] = -1
yield out
def resample(Y, samples=10000, replacement=False, unit=None, seed=0):
"""
Generator function to resample a dependent variable (Y) multiple times
Parameters
----------
Y : Var | NDVar
Variable which is to be resampled.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
seed : None | int
Seed the random state of the relevant randomization module
(:mod:`random` or :mod:`numpy.random`) to make replication possible.
None to skip seeding (default 0).
Returns
-------
Iterator over Y_resampled. One copy of ``Y`` is made, and this copy is
yielded in each iteration with shuffled data.
"""
if isinstance(Y, Var):
pass
elif isinstance(Y, NDVar):
if not Y.has_case:
raise ValueError("Need NDVar with cases")
else:
raise TypeError("Need Var or NDVar")
out = Y.copy('{name}_resampled')
for index in permute_order(len(out), samples, replacement, unit, seed):
out.x[index] = Y.x
yield out
| [
"christianmbrodbeck@gmail.com"
] | christianmbrodbeck@gmail.com |
bc36aa2af207337b1f32972a1c363e82700e8bdf | 4ad771bef6d1c2f7bb462988bc0af8ec491ce14c | /GO_score/GO_enrichment/myGOenrichment.py | e2f130aad3c464c3836596dc4201f2e618eb2a2c | [] | no_license | DabinJeong/pan-cancer_DMR | aca2baa2668c6e37c85f3d90f30d8f9a99e5510e | 63cb2a1586ce4a8ccac176bbce5f9b392e4534dd | refs/heads/master | 2023-05-30T19:07:12.182295 | 2023-05-26T03:05:31 | 2023-05-26T03:05:31 | 192,674,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,259 | py | #!/usr/bin/env python
import sys
import argparse
import scipy.stats as stats
parser=argparse.ArgumentParser(
usage='''\
%(prog)s [options] gene2subtype -trait2genes trait2genes
example: %(prog)s gene2subtype.txt -trait2genes trait2genes.txt -backgroundGenes backgroundGemes.txt -pcut 0.05 -topK None -o outfile.txt
''')
parser.add_argument('gene2subtype', metavar='str', help='gene2subtype file')
parser.add_argument('-trait2genes', required=False, metavar='str', default='/data1/project/hongryul/GOanalysis/GOBPname2gene.arabidopsis.BP.20180907.txt', help='trait2genes file')
parser.add_argument('-backgroundGenes', required=False, metavar='str', default='None', help='allgenes in first column file')
parser.add_argument('-method', required=False, metavar='[fisher|binomial]', default='fisher', help='method for statistical test')
parser.add_argument('-s', required=False, type=int, metavar='N', default=0, help='skipped lines for allgenes file')
parser.add_argument('-pcut', required=False, type=float, metavar='N', default=1.0, help='pvalue cutoff')
parser.add_argument('-topK', required=False, type=int, metavar='N', default=None, help='show top K result')
parser.add_argument('-o', dest='outfile', required=False, metavar='str', default='stdout', help='outfile')
args=parser.parse_args()
if args.outfile == 'stdout':
OF=sys.stdout
else:
OF=open(args.outfile,'w')
if args.backgroundGenes != 'None':
set_gene=set()
IF=open(args.backgroundGenes,'r')
for line in IF:
gene=line.rstrip('\n').split('\t',1)[0]
set_gene.add(gene)
dic_gene2trait={}
dic_trait2gene={}
IF=open(args.trait2genes,'r')
for line in IF:
trait,genes=line.rstrip('\n').split('\t')
lst_gene=genes.split(',')
for gene in lst_gene:
if args.backgroundGenes != 'None' and gene not in set_gene:
continue
if trait not in dic_trait2gene:
dic_trait2gene[trait]=set()
if gene not in dic_gene2trait:
dic_gene2trait[gene]=set()
dic_gene2trait[gene].add(trait)
dic_trait2gene[trait].add(gene)
dic_trait2ratio={}
for trait in dic_trait2gene.keys():
dic_trait2ratio[trait]=float(len(dic_trait2gene[trait]))/len(dic_gene2trait)
def GO_enrichment(lst_gene):
dic_trait2count={}
set_tested_gene=set()
for gid in lst_gene:
if gid not in dic_gene2trait:
continue
for trait in dic_gene2trait[gid]:
if not trait in dic_trait2count:
dic_trait2count[trait]=0
dic_trait2count[trait]+=1
set_tested_gene.add(gid)
lst_out=[]
for trait, count in dic_trait2count.items():
occured_in_tested, total_tested, occured_in_background, total_background = count, len(set_tested_gene), len(dic_trait2gene[trait]), len(dic_gene2trait)
if occured_in_tested == 0:
pval=1.0
else:
if args.method == 'binomial':
pval=1.0-stats.binom.cdf(occured_in_tested-1, total_tested, dic_trait2ratio[trait]) # ovccured_in_test-1 means p(X>=n) i.e. contain
elif args.method == 'fisher':
oddratio,pval=stats.fisher_exact([[occured_in_tested, total_tested-occured_in_tested], [occured_in_background-occured_in_tested, total_background-total_tested-occured_in_background+occured_in_tested]], alternative='greater') # 2X2 fisher's exact test
lst_out.append([trait, pval, occured_in_tested, total_tested, occured_in_background, total_background])
return sorted(lst_out, key=lambda x: x[1])
IF=open(args.gene2subtype,'r')
for i in range(args.s):
IF.readline()
OF.write('\t'.join(['subtype','setid','pval','#occured_in_tested','#total_tested','#occured_in_background','#total_background'])+'\n')
dic_subtype2gene={}
for line in IF:
s=line.rstrip().split('\t')
if len(s) == 1:
gene,subtype=s[0],'None'
else:
gene,subtype=s[0:2]
if subtype not in dic_subtype2gene:
dic_subtype2gene[subtype]=set()
dic_subtype2gene[subtype].add(gene)
for subtype, lst_gene in sorted(dic_subtype2gene.items(),key=lambda x:float(x[0]) if x[0].isdigit() else x[0]):
lst_out=GO_enrichment(lst_gene)
if len(lst_out) == 0:
OF.write('\t'.join(map(str, [subtype]+['nan']*6))+'\n')
if args.topK == None:
for out in lst_out:
if float(out[1]) <= args.pcut:
OF.write('\t'.join(map(str, [subtype, out[0]]+out[1:]))+'\n')
else:
for out in lst_out[0:args.topK]:
if float(out[1]) <= args.pcut:
OF.write('\t'.join(map(str, [subtype, out[0]]+out[1:]))+'\n')
| [
"noreply@github.com"
] | DabinJeong.noreply@github.com |
67768ec9817d3fb0cdb5ad569a52d5eb2cf99485 | cab68de86c14ac17a93fb197de5264b0230d064a | /ImageProcessing/edge.py | 0a28b64d79f26a6c025958814440b4302bbc8662 | [] | no_license | Munins-eye/OpenCVWorkshop | b7723958520de2d44b0f1a4e73b55dfaa2733164 | 9de3f3c9c8616b3a5246276cb9a91cac256a2b6e | refs/heads/master | 2020-08-23T18:48:25.594830 | 2015-10-04T00:09:48 | 2015-10-04T00:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | #/usr/bin/env python
#source - opencv 2.4.6 python samples
'''
This sample demonstrates Canny edge detection.
Usage:
edge.py [<video source>]
Trackbars control edge thresholds.
'''
import cv2
import video
import sys
if __name__ == '__main__':
print __doc__
try: fn = sys.argv[1]
except: fn = 0
def nothing(*arg):
pass
cv2.namedWindow('edge')
cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)
cap = video.create_capture(fn)
while True:
flag = False #added for slow camera
while(not flag): #added for slow camera
flag, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thrs1 = cv2.getTrackbarPos('thrs1', 'edge')
thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5)
vis = img
# vis = img.copy()
vis /= 2
vis[edge != 0] = (0, 255, 0)
cv2.imshow('edge', vis)
ch = cv2.waitKey(5)
if ch == 27:
break
cv2.destroyAllWindows()
| [
"strawdog3@gmail.com"
] | strawdog3@gmail.com |
04b47d010396b5123f0750112280351703d8512d | 99836c9478dfbc42bbc48da13d562ed088ab9e69 | /Ecomwebsite/Ecomwebsite/settings.py | 622473658aa8b7c068f6293d51935635f566c153 | [] | no_license | Labannya969/SHOPEE | 9b095aa2d259e11154c2baab74731834f9659f7a | 0864d26911b642f097a5e66a9e366ac634a1bca2 | refs/heads/main | 2023-07-09T00:00:22.864273 | 2021-08-16T16:04:29 | 2021-08-16T16:04:29 | 380,132,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,384 | py | """
Django settings for Ecomwebsite project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-@zant!e^35_z#eze779!9lecvlj+d-b7(#m6xw+4m!g)c3t+f_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'shop.apps.ShopConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ecomwebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ecomwebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL= "/static/"
MEDIA_ROOT= os.path.join(BASE_DIR, "media")
MEDIA_URL="/media/"
| [
"64127385+Labannya969@users.noreply.github.com"
] | 64127385+Labannya969@users.noreply.github.com |
5af2f7fd4d5c06d6bf0a729619f19050f2580c44 | 7321af910c4e0db827fd36bd1dd82ea13f8d0815 | /rss2pdf/settings.py | ccb6afb2bdb997fbdc5a1d238a87b16a00d16996 | [] | no_license | ggfloq3/rss2pdf | 057816ad8b6aa3816470de01d2d88084148852e3 | 1db80735eebb040c613436b1972232779394297d | refs/heads/master | 2021-01-17T12:17:52.388476 | 2017-10-31T22:06:30 | 2017-10-31T22:06:30 | 84,064,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | """
Django settings for rss2pdf project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^zsl6b*f5o31#z#v2y7d1hz5v$v@vuifqwq&a&q)+zt(o@we52'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rss2pdf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
'APP_DIRS': True,
},
]
WSGI_APPLICATION = 'rss2pdf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# Celery settings
BROKER_URL = 'redis://localhost:6379/0'
# use json format for everything
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_IMPORTS = (
'app.tasks',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'USER'
EMAIL_HOST_PASSWORD = 'PASSWORD'
EMAIL_USE_TLS = True | [
"ggfloq3@gmail.com"
] | ggfloq3@gmail.com |
7cd19258b70e7a03a3c1805e5604ce82897597bc | c3562f863c139e829f5e49bdd231414ab8cce3e6 | /Fern.py | 453d0b727d7064d76344d701720a85b84820e963 | [] | no_license | ZakaryL/Fractals | e733cb7a1b2ce3f77e82f7a52712387c59a2595a | 7ee76c5e093bf461a9b263912064fc768bacef03 | refs/heads/master | 2021-07-02T08:56:10.472676 | 2020-12-29T04:59:19 | 2020-12-29T04:59:19 | 210,098,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import random
import matplotlib.pyplot as plt
X = [0]
Y = [0]
for n in range(10000):
r = random.uniform(0, 100)
if r < 1.0:
x = 0
y = 0.16*Y[n-1]
elif r < 86.0:
x = 0.85*X[n-1] + 0.04*Y[n-1]
y = -0.04*X[n-1] + 0.85*Y[n-1]+1.6
elif r < 93.0:
x = 0.2*X[n-1] - 0.26*Y[n-1]
y = 0.23*X[n-1] + 0.22*Y[n-1] + 1.6
else:
x = -0.15*X[n-1] + 0.28*Y[n-1]
y = 0.26*X[n-1] + 0.24*Y[n-1] + 0.44
X.append(x)
Y.append(y)
'''Make a plot'''
#plt.figure(figsize=[15, 15])
plt.scatter(X,Y, color='g', marker='.')
plt.show() | [
"noreply@github.com"
] | ZakaryL.noreply@github.com |
9a1fb6a9e0f65452bf2b9a16236ff552c2ebb6e6 | 4d171efb775ccd6bdc997ecbe70467e600f8891a | /main.py | 98842ee4ea1c07d41b30e8cb5751bce6e3e68164 | [] | no_license | croodsthecraz/dual_paddle | 68be1ce90faef30ba5e8108eda69319e732dfc98 | 4a60eaf09862c4dfa3510d4bec0bf0e45e3ea4d3 | refs/heads/main | 2023-07-09T05:16:30.583288 | 2021-08-12T13:36:28 | 2021-08-12T13:36:28 | 395,325,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,123 | py | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen,ScreenManager,WipeTransition
from kivy.uix.widget import Widget
from kivy.animation import Animation
from kivy.properties import NumericProperty,ReferenceListProperty,ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.core.window import Window
from kivy.core.audio import SoundLoader
class tennisPaddle(Widget):
score = NumericProperty(0)
#bouncing of ball from the paddle
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 2)
bounced = Vector(-1 * vx, vy)
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset
class tennisBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class tennisGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
#intiating the tennis class
def __init__(self, *args, **kwargs):
super(tennisGame, self).__init__(*args, **kwargs)
Clock.schedule_interval(self.update, 1.0 / 60.0)
#intiating random flow of ball between te paddles
def serve_ball(self, vel=Vector(4,0).rotate(randint(0,360))):
self.ball.center = self.center
self.ball.velocity = vel
#Updating the ball moments
def update(self, dt):
self.ball.move()
#bounce of paddles
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
#bounce ball off bottom or top
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
#went of to a side to score point?
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(Vector(-4,0).rotate(randint(0,360)))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(Vector(-4,0).rotate(randint(0,360)))
if self.player1.score == 1:
Manager()
#touch screen parameters
def on_touch_move(self, touch):
if touch.x < self.width / 3:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
class Manager(ScreenManager):
pass
class tennisApp(App):
def build(self):
self.icon = 'blackboard.png'
self.load_kv('my.kv')
# load the mp3 music
music = SoundLoader.load('music.mp3')
# check the exisitence of the music
music.loop = True
music.stop()
#On request of Exiting section
Window.bind(on_request_close=self.on_request_close)
return Manager(transition=WipeTransition())
#calling exiting function
def on_request_close(self, *args):
self.textpopup(title='Exit', text='Are you sure?')
return True
#Exiting kivy App class defined
def textpopup(self, title='', text=''):
box = BoxLayout(orientation='vertical')
box.add_widget(Label(text=text))
mybutton1 = Button(text='yes', size_hint=(1, 0.25),pos_hint={'x':0,'y':.6})
mybutton2 = Button(text='no', size_hint=(1, 0.25), pos_hint={'x':0, 'y': .6})
box.add_widget(mybutton1)
box.add_widget(mybutton2)
popup = Popup(title=title, content=box, size_hint=(None, None), size=(600, 300))
mybutton1.bind(on_release = self.stop)
mybutton2.bind(on_release = popup.dismiss)
popup.open()
#intiating kivy App
if __name__ == '__main__':
tennisApp().run()
| [
"noreply@github.com"
] | croodsthecraz.noreply@github.com |
fb2b2686748656c4270f7e7e1cbdde51a145fb32 | 0ca43b1752d3324bf930bd4d2ec22e9d07d92f86 | /desktop/admin.py | b161369fff8dba59c5354bc8b2c15433c9f44088 | [] | no_license | ammandas/Molecular_Methods_App | 3a0990e81d046b7f1ba27ecb9a9300b6e8d5d235 | b1fe907c4ad44d314d82f5837853d2dd05deeaf0 | refs/heads/master | 2021-01-01T18:17:32.954166 | 2015-02-23T15:19:51 | 2015-02-23T15:19:51 | 30,081,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Registers the models with the admin interface
from django.contrib import admin
from desktop.models import User, Result, Glossary, QQuestion, Answer, Lab, MQuestion, Video
#admin.site.register(User)
admin.site.register(Result)
admin.site.register(Glossary)
admin.site.register(QQuestion)
admin.site.register(Answer)
admin.site.register(Lab)
admin.site.register(MQuestion)
admin.site.register(Video)
| [
"mattfrost_86@yahoo.co.uk"
] | mattfrost_86@yahoo.co.uk |
2babfffb9e6765a49d67d7f9fc2e01dd91bd7767 | 3ecdb2eece2d958c3bcf9d458a7a80308dd1d890 | /python_fundamentals/functions_basic_II.py | 6a23fc5b5f77714c3ee1601eb5ac5b9bb6f07d6a | [] | no_license | pikap3w/python_oct_2018 | a06d31fdb19102a6b186841c32cc74b0a704fa2a | c72d2fb1d4656b39e1b9441aea534d2dd7a708d2 | refs/heads/master | 2020-03-30T13:25:13.724792 | 2018-10-21T04:08:12 | 2018-10-21T04:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # 1) Countdown
def countdown(num):
arr = []
for i in range(num,-1,-1):
arr.append(i)
return arr
# 2) Print and Return
def printReturn(arr):
print(arr[0])
return(arr[1])
# 3) First Plus Length
def firstPlusLength(arr):
sum = arr[0] + len(arr)
return sum
# 4) Values Greater than Second
def greaterThan2nd(arr):
if len(arr) == 1:
return False
else:
newArr = []
for element in arr:
if element > arr[1]:
newArr.append(element)
print(len(newArr))
return newArr
# 5) This Length, That Value
def lengthAndValue(size, value):
list = []
for _ in range(size):
list.append(value)
return list | [
"pikap3w@Kristens-MacBook-Pro.local"
] | pikap3w@Kristens-MacBook-Pro.local |
73c4e490daff71c9c65cf6d1ea1fffcfe7253ff8 | cc40dc1725079b19cd93605e236f7aad57e533f9 | /reviewboard/oauth/apps.py | 7a3748b50cc0563d291513a5eb8322a2ae5aed45 | [
"MIT"
] | permissive | bolariin/reviewboard | 73ea3a8f9a36d344a9943db2ecb391ddf316e52b | 3dde7af0c3dd0ecfac04b2d617fd4fe84afbeb82 | refs/heads/master | 2020-04-29T09:02:04.860098 | 2019-03-19T08:21:53 | 2019-03-19T08:21:53 | 176,008,790 | 0 | 0 | null | 2019-03-16T18:22:59 | 2019-03-16T18:22:58 | null | UTF-8 | Python | false | false | 292 | py | """The app definition for reviewboard.oauth."""
from __future__ import unicode_literals
try:
from django.apps import AppConfig
except ImportError:
# Django < 1.7
AppConfig = object
class OAuthAppConfig(AppConfig):
name = 'reviewboard.oauth'
label = 'reviewboard_oauth' | [
"barret@beanbaginc.com"
] | barret@beanbaginc.com |
01ebc7eb291e960ec42d8dc23255d61ec4d1af5c | 71a28d4bc1c0f32dc9185332ba2142ba823d3e53 | /core/urls.py | b9dba265ed605e2da34c2ae8c0cdaf56b88c141d | [] | no_license | ekeydar/train_stops_map | 51b1e3a86967851ea16f2e822867f881b91d24fe | acbc1a1a250ca6c3e7f5dde8932301bd4b67c96d | refs/heads/master | 2016-09-12T21:17:13.771035 | 2016-05-27T12:54:46 | 2016-05-27T12:54:46 | 59,438,833 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^stops/$', views.StopList.as_view(), name='stops'),
url(r'^stops/(?P<pk>\d+)/$', views.StopDetail.as_view(), name='stop'),
]
| [
"ekeydar@gmail.com"
] | ekeydar@gmail.com |
ff792eaaf6aab13d51935a99da6ce2da3404f064 | 7f37a4da60257917a86e7ba7d34a1107a9bdbba4 | /code/models/gaze_base.py | 6560ed2ed20873936c44e15bb2efd5d1f4087d1c | [] | no_license | iszff/RGBD-Gaze | 9d0545611a68a0ce0d25ac6b4d87aa17b4e87ac7 | ee768bf53b0507bcfa6d7a1916fa1aaede30d132 | refs/heads/master | 2022-12-09T23:59:20.744055 | 2020-09-10T08:25:42 | 2020-09-10T08:25:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet, model_urls
import torch.utils.model_zoo as model_zoo
from torch import nn
import torch as th
class ResNetEncoder(ResNet):
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x112_64 = x
x = self.maxpool(x)
x = self.layer1(x)
# x56_64 = x
x = self.layer2(x)
# x28_128 = x
x = self.layer3(x)
# x14_256 = x
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.relu(x)
return x#, x112_64, x56_64, x28_128, x14_256
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetEncoder(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetEncoder(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetEncoder(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetEncoder(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetEncoder(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class Decoder(nn.Module):
def __init__(self, feat_dim=512):
super(Decoder, self).__init__()
self.decoder1 = nn.Sequential(
nn.Linear(feat_dim, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
)
self.decoder2 = nn.Sequential(
nn.Linear(128 + 3, 2)
)
def forward(self, feat, info):
out = self.decoder1(feat)
out = th.cat([out, info], 1)
out = self.decoder2(out)
return out
| [
"liandz@shanghaitech.edu.cn"
] | liandz@shanghaitech.edu.cn |
8305cb8cffa7575585552abc217622a9595cd7a0 | 9d30b2ee32c41b07e34aa58b249929262d007c80 | /2.py | 67095890a963bf457fa059acb6928ef72346d78b | [] | no_license | Dmi-Dor/PYbasics7leasson | 540ae76469fe1dae607faff16d12ae0d62430a29 | 97e204b4c512aba8e01d3571f11e1a867cf0a4bb | refs/heads/master | 2022-11-21T15:49:41.401073 | 2020-07-19T16:49:15 | 2020-07-19T16:49:15 | 280,908,840 | 0 | 0 | null | 2020-07-19T16:50:41 | 2020-07-19T16:48:27 | Python | UTF-8 | Python | false | false | 1,140 | py | class Textile:
def __init__(self, width, height):
self.width = width
self.height = height
def get_square_c(self):
return self.width / 6.5 + 0.5
def get_square_j(self):
return self.height * 2 + 0.3
@property
def get_sq_full(self):
return str(f'Общая площадь ткани \n'
f' {(self.width / 6.5 + 0.5) + (self.height * 2 + 0.3)}')
class Coat(Textile):
def __init__(self, width, height):
super().__init__(width, height)
self.square_c = round(self.width / 6.5 + 0.5)
def __str__(self):
return f'Площадь для пальто {self.square_c}'
class Jacket(Textile):
def __init__(self, width, height):
super().__init__(width, height)
self.square_j = round(self.height * 2 + 0.3)
def __str__(self):
return f'Площадь для костюма {self.square_j}'
coat = Coat(2, 4)
jacket = Jacket(1, 2)
print(coat)
print(jacket)
print(coat.get_sq_full)
print(jacket.get_sq_full)
print(jacket.get_square_c())
print(jacket.get_square_j()) | [
"noreply@github.com"
] | Dmi-Dor.noreply@github.com |
f4de03d4f594c373e50136f0ce0233ff7e897e9c | e3a91ecd77ab23c2ada0cd7702a5eef2f7b8039e | /Dice/most_six.py | 4cf6d40bd4ee20ee876db1ad3f6d940369560b19 | [] | no_license | varunthind/Dice-program | ad07eb78564dc4ad5e1770c3ddeb1cd9cf0389d6 | 5a061a65f1bfa33de0dbdf4e441de960ff38a182 | refs/heads/main | 2023-06-04T01:26:44.564107 | 2021-06-27T12:41:53 | 2021-06-27T12:41:53 | 380,736,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import random
while True:
diceNo = random.randint(1, 6)
randNo = random.randint(1, 6)
otherRand = random.randint(1, 6)
roll = input('Press Enter(play) Or Space and then Enter(exit):')
if roll == 'close' or roll == ' ' or roll == 'exit':
exit()
elif randNo == diceNo or randNo == otherRand:
print(6)
else:
print(diceNo)
| [
"noreply@github.com"
] | varunthind.noreply@github.com |
ec6e6ad22ab5a96295739ceb492638e945ef5cdd | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/dossier/upgrades/to4303.py | 0a0901f85458945e5a68304d5dff1c762b0b9236 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 964 | py | from ftw.upgrade import ProgressLogger
from ftw.upgrade import UpgradeStep
from opengever.dossier.templatedossier import TemplateDossier
from plone import api
from zope.event import notify
from zope.lifecycleevent import ObjectModifiedEvent
class MigrateTemplateDossierClass(UpgradeStep):
def __call__(self):
self.setup_install_profile('profile-opengever.dossier.upgrades:4303')
self.migrate_template_dossiers()
def migrate_template_dossiers(self):
catalog = api.portal.get_tool('portal_catalog')
brains = catalog.unrestrictedSearchResults(
portal_type='opengever.dossier.templatedossier')
with ProgressLogger('Migrating templatedossier class', brains) as step:
for brain in brains:
self.migrate_object(brain.getObject())
step()
def migrate_object(self, obj):
self.migrate_class(obj, TemplateDossier)
notify(ObjectModifiedEvent(obj))
| [
"david.erni@4teamwork.ch"
] | david.erni@4teamwork.ch |
51c843f65c60601d922c917dd8e9df84d7c0f482 | 77752f9e5b8c5f854afe44c10c7339d8a02368a3 | /intervalosConfianca.py | 3c02095b048bf3543040d065e699a46b20016822 | [] | no_license | ajffdelgado/projeto1_AM | afc45dadc613c8736314597d4ccde5b17e3e8b3a | 73879134bd3f116717897af34d9c79fb653f6a4a | refs/heads/master | 2020-04-03T00:05:26.581870 | 2018-11-29T16:27:39 | 2018-11-29T16:27:39 | 154,888,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,958 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 20:49:49 2018
@author: Delgado
"""
import numpy as np
import scipy as sp
import scipy.stats
acuracias = [[78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048], [78.0952380952381, 78.0952380952381, 84.76190476190476, 74.76190476190476, 75.71428571428571, 80.47619047619048, 80.95238095238095, 79.52380952380952, 82.38095238095238, 80.47619047619048]]
melhor_indice = None
maior_media = 0
medias = []
for i in range(len(acuracias)):
media = np.mean(acuracias[i])
medias.append(media)
if (media > maior_media):
maior_media = media
melhor_indice = i
print(len(medias))
media_final = np.mean(medias)
minimo = min(medias)
maximo = max(medias)
conjunto = np.array(medias)
print(conjunto)
#calcular a matriz média
media = conjunto.mean(axis=0)
print('media:{}'.format(media))
desvio_padrao = np.std(conjunto)
conf_int = scipy.stats.norm.interval(0.95, loc=media, scale=desvio_padrao)
print("intervalo de confiança: "+str(conf_int))
print(conf_int)
print("Máximo: "+str(maximo))
print("Mínimo: "+str(minimo)) | [
"ajffdelgado@hotmail.com"
] | ajffdelgado@hotmail.com |
5b3558a023a6725d52a2b47aa167635dd4cac157 | 207f0d9f88f7c728dd16eea40288f40d49795325 | /Main.py | e3890c046baa4c5c24fa3c2d8c58630588c23c50 | [] | no_license | shaimove/BraTS2020-MRI-Brain-Segmentaion | acbf29e58dd584480be994c24d10bb50e778def7 | a4c4770e887011ce664e88b935f3b4f485644ba7 | refs/heads/main | 2023-02-27T08:16:04.506503 | 2021-01-17T07:43:57 | 2021-01-17T07:43:57 | 330,183,942 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,671 | py | # #Main.py
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import nibabel as nib
import torch
from torch.utils import data
import model
import utils
import MetricAndLoss
from Dataset import DatasetMRI
from Log import SegmentationLoss
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#%% Dataset
# Define folders
folder_training = '../Training/'
folder_validation = '../Validation/'
# create DataFrames with data about our dataset
tableTraining = utils.CreateDataTable(folder_training,True)
# now, split the tableTraining TO 80-20 split for validation set
num_training = 269
tableValidation = tableTraining.iloc[num_training:]
tableTraining = tableTraining.iloc[:num_training]
# calculate the z-score normalization for every input type
Dict_stats = utils.CalculateStats(tableTraining,True)
#%% Create dataset and data loade
# define batch size
batch_size_train = 2
batch_size_validation = 2
# define dataset and dataloader for training
train_dataset = DatasetMRI(tableTraining,Dict_stats)
train_loader = data.DataLoader(train_dataset,batch_size=batch_size_train,shuffle=True)
# define dataset and dataloader for validation
validation_dataset = DatasetMRI(tableValidation,Dict_stats)
validation_loader = data.DataLoader(validation_dataset,batch_size=batch_size_validation,shuffle=True)
#%% Define parameters
# number of epochs
num_epochs = 20
# load model
model = model.MRIModel().to(device)
utils.count_parameters(model)
# send parameters to optimizer
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# define loss function
#criterion = MetricAndLoss.DiceLoss()
# initiate logs
trainLog = SegmentationLoss()
validationLog = SegmentationLoss()
#%% Training
for epoch in range(num_epochs):
##################
### TRAIN LOOP ###
##################
# set the model to train mode
model.train()
# initiate training loss
train_loss = 0
i = 0 # index for log
for batch in train_loader:
# get batch images and labels
T1 = batch['T1'].to(device)
T1_ce = batch['T1 ce'].to(device)
T2 = batch['T2'].to(device)
FLAIR = batch['FLAIR'].to(device)
labels = batch['Label'].to(device)
# clear the old gradients from optimizer
optimizer.zero_grad()
# forward pass: feed inputs to the model to get outputs
output = model(T1,T1_ce,T2,FLAIR)
# calculate the training batch loss
#loss = criterion(output, torch.max(labels, 1)[1])
loss = MetricAndLoss.DiceLoss(output,labels)
# backward: perform gradient descent of the loss w.r. to the model params
loss.backward()
# update the model parameters by performing a single optimization step
optimizer.step()
# accumulate the training loss
train_loss += loss.item()
# update training log
print('Epoch %d, Batch %d/%d, loss: %.4f' % (epoch,i,len(train_loader),loss))
trainLog.BatchUpdate(epoch,i,loss)
i += 1 # update index
#######################
### VALIDATION LOOP ###
#######################
# set the model to eval mode
model.eval()
# initiate validation loss
valid_loss = 0
i = 0 # index for Log
# turn off gradients for validation
with torch.no_grad():
for batch in validation_loader:
# get batch images and labels
T1 = batch['T1'].to(device)
T1_ce = batch['T1 ce'].to(device)
T2 = batch['T2'].to(device)
FLAIR = batch['FLAIR'].to(device)
labels = batch['Label'].to(device)
# forward pass
output = model(T1,T1_ce,T2,FLAIR)
# validation batch loss
#loss = criterion(output, torch.max(labels, 1)[1])
loss = MetricAndLoss.DiceLoss(output,labels)
# accumulate the valid_loss
valid_loss += loss.item()
# update validation log
print('Epoch %d, Batch %d/%d, loss: %.4f' % (epoch,i,len(validation_loader),loss))
validationLog.BatchUpdate(epoch,i,loss)
i += 1 # update loss
#########################
## PRINT EPOCH RESULTS ##
#########################
train_loss /= len(train_loader)
valid_loss /= len(validation_loader)
# update training and validation loss
trainLog.EpochUpdate(epoch,train_loss)
validationLog.EpochUpdate(epoch,valid_loss)
# print results
print('Epoch: %s/%s: Training loss: %.3f. Validation Loss: %.3f.'
% (epoch+1,num_epochs,train_loss,valid_loss))
#%% Save the model
PATH = '../model_16_01_2020_3D_U_net.pt'
train_loss = trainLog.getLoss()
validation_loss = validationLog.getLoss()
torch.save({'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss,
'validation_loss': validation_loss}, PATH)
#%% Load the model
if False:
PATH = '../model_16_01_2020_3D_U_net.pt'
checkpoint = torch.load(PATH)
import model
model2 = model.MRIModel()
model2.load_state_dict(checkpoint['model_state_dict'])
#%%
plt.figure()
plt.plot(range(num_epochs),train_loss,label='Training Loss')
plt.plot(range(num_epochs),validation_loss,label='Validation Loss')
plt.grid(); plt.xlabel('Number of epochs'); plt.ylabel('Loss')
plt.title('Loss for 3D-Unet for BraTS2020 Brain MRI Segmentation')
plt.legend()
| [
"47494709+shaimove@users.noreply.github.com"
] | 47494709+shaimove@users.noreply.github.com |
0a46033393c86559e612e6b1b103cf9acf25da44 | 6519fdac61df9bb04350f2cd2ccb497879926b5a | /cloudsolv/wsgi.py | c3ede4c3eba8388a7dc52d5651118a021e7e6f32 | [] | no_license | Avinash517/cloudsolv | ae3e770f515d1df77a0582df7f0bc5fd5bf33d5f | d280b460126998a2014f5960c765e495851fa6d7 | refs/heads/master | 2020-08-02T06:26:14.981516 | 2019-09-27T07:45:43 | 2019-09-27T07:45:43 | 210,820,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for cloudsolv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloudsolv.settings')
application = get_wsgi_application()
| [
"avinashjadhav@gmail.com"
] | avinashjadhav@gmail.com |
f898bc011b7d9345fbef96b0f970ceb599a3409a | 6f1d57238f3b395b04696a16768bcc507f00630c | /A_Comparing_Strings.py | 283a2e89d3cfcd5b5fe35998a4154d040395da59 | [] | no_license | FazleRabbbiferdaus172/Codeforces_Atcoder_Lightoj_Spoj | 024a4a2a627de02e4698709d6ab86179b8301287 | 6465e693337777e7bd78ef473b4d270ce757a3a2 | refs/heads/master | 2023-07-01T06:32:14.775294 | 2021-07-27T17:07:37 | 2021-07-27T17:07:37 | 271,202,781 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import sys
s1 = list(input())
s2 = list(input())
d1, d2 = dict(), dict()
miss = 0
if len(s1) != len(s2):
print("NO")
sys.exit(0)
for i in range(97, 97+26):
d1[chr(i)] = 0
d2[chr(i)] = 0
for i in range(len(s1)):
d1[s1[i]] += 1
d2[s2[i]] += 1
if s1[i] != s2[i]:
miss += 1
if d1 == d2 and miss == 2:
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | FazleRabbbiferdaus172.noreply@github.com |
76899adc467a3809feeb552b788af2eb41cb4d7a | 7e3921d82b3b242704081ef2b01d7c4c07a4daf3 | /django_read/tests.py | 2d9bda70a30666b4762dfd99cdff7825cae899e5 | [] | no_license | JoeJasinski/django_read | 45dcae153deb29f2c17835b859364a9300a3ab48 | 0bd34c2b475ee9b3e0b45f94ea5dac4f1216107d | refs/heads/master | 2021-01-20T06:55:07.030539 | 2014-03-08T21:59:02 | 2014-03-08T21:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2) | [
"joe.jasinski@gmail.com"
] | joe.jasinski@gmail.com |
fac0c2b9f5ddb4a198cff2aae5c946f09c610ce1 | c78d51931847c2164a4dea08c087732c780ac75f | /dunzo/geek_ans_new_job.py | 269e1e9b731fa4a7dd8fb4904adf33f5c0b01b67 | [
"MIT"
] | permissive | Akash671/coding | bb9839810a8b218fbc360ea53f7d3ac8d634180b | 4ef047f8e227074b660a2c7b41aefa377fdc0552 | refs/heads/main | 2023-08-07T12:10:15.439001 | 2021-09-30T09:50:37 | 2021-09-30T09:50:37 | 401,483,291 | 0 | 0 | null | 2021-08-30T20:53:42 | 2021-08-30T20:53:41 | null | UTF-8 | Python | false | false | 362 | py | #code
for t in range(int(input())):
s=str(input())
ans=0
for i in s:
if i.isupper():
ans+=1
break
for i in s:
if i.islower():
ans+=1
break
for i in s:
if i.isdigit():
ans+=1
break
if ans==3:
print("YES")
else:
print("NO")
| [
"akashsaini454545@gmail.com"
] | akashsaini454545@gmail.com |
3bcf98c39c2c723ff9cd0e2bfb64eec449d7d39e | 88694aac418f2741d2b6a8192af4d71841e2c715 | /docs/source/conf.py | 45658872d357313fe1a5d4fdfd15e2bcfc1cd54a | [] | no_license | dydona/turnstatdocs | cbd3a368d716de2f9590da347c02abbb213a7d55 | 01970888c3158128a2a2b142dc73df46aed63586 | refs/heads/master | 2021-01-17T17:40:35.552998 | 2016-07-25T21:53:01 | 2016-07-25T21:53:01 | 63,708,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,193 | py | # -*- coding: utf-8 -*-
#
# turnstatdocs documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 21 17:37:32 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'turnstatdocs'
copyright = u'2016, Daniel Cardona'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'turnstatdocsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'turnstatdocs.tex', u'turnstatdocs Documentation',
u'Daniel Cardona', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'turnstatdocs', u'turnstatdocs Documentation',
[u'Daniel Cardona'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'turnstatdocs', u'turnstatdocs Documentation',
u'Daniel Cardona', 'turnstatdocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"d.cardona.rojas@gmail.com"
] | d.cardona.rojas@gmail.com |
10e7ac562d394f627511fc1bc4107af7c2f80d90 | c2105bde9173e467e21ad75a2cb95511944468e9 | /Projects/Calculator/main.py | 55ce3e538421ca2af4191f17ba0db8dad7c74594 | [] | no_license | BryanHoffmaster/learning-python | 8528864e7840bffff38af5a76c6636cdc3b81c04 | 18f2de765bbf9784254be5902bca6c751e12227b | refs/heads/master | 2020-05-21T09:15:06.524454 | 2017-10-04T04:29:45 | 2017-10-04T04:29:45 | 84,609,301 | 0 | 0 | null | 2017-10-04T04:29:46 | 2017-03-10T23:29:50 | Python | UTF-8 | Python | false | false | 1,025 | py | import math_functions
import sys
quit_calls = ('q', 'quit')
help_calls = ('h', 'help')
history = []
def print_menu():
_help_text = "\n-----------\nPlease Add, Subtract, "\
"Multiply, or Divide two numbers.\n\n"\
"'f' or 'float' to change the length of the float "\
"dividend acuracy\n\n"\
"'h' or 'help' for this menu.\n\n"\
"'q' or 'quit' to exit this program.\n-----------\n"
print(_help_text)
if __name__ == "__main__":
print_menu()
while True:
user_input = input("Calculate: ")
if user_input.lower() in quit_calls:
print("\nExiting main loop of the program.\n")
sys.exit(0)
elif user_input.lower() in help_calls:
user_input
print_menu()
else:
answer = math_functions.parse_input(user_input)
hist_tuple = (answer, user_input)
history.append(hist_tuple)
print("Answer: %.3f" % answer)
print()
print(history)
| [
"bryan.hoffmaster@gmail.com"
] | bryan.hoffmaster@gmail.com |
59aafe5b7c631da8b47c6bd9ceb8815996636c35 | 061fd052e35a96b854e21b25aea8c23c79d45829 | /apps/support/views.py | a41ab95726f7efba471e3acac5e07983d9a9e3e4 | [] | no_license | lexsos/avia35inner | 263c3d38ad63656859d7be02e352f9b17fb2117d | d68eb73d7e9b558286cf410b682ee64a6d7f42d1 | refs/heads/master | 2020-06-05T20:33:13.434483 | 2014-12-10T06:31:47 | 2014-12-10T06:31:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | from django.views.generic import (
ListView,
DetailView,
FormView,
TemplateView,
)
from django.db.models import Max
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from .models import Ticket, Comment
from .forms import CommentForm, TicketForm
from .settings import CONFIG
class TicketSecurityMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return redirect('support_index')
return super(TicketSecurityMixin, self).dispatch(
request,
*args,
**kwargs
)
def get_queryset(self):
queryset = super(TicketSecurityMixin, self).get_queryset()
# if not admin show only own tickets
if not self.request.user.is_superuser:
queryset = queryset.filter(author=self.request.user)
return queryset
class TicketLastUpdateMixin(object):
def get_queryset(self):
queryset = super(TicketLastUpdateMixin, self).get_queryset()
# Latest ticket update (latest comment)
queryset = queryset.annotate(last_update=Max('comment__create_date'))
return queryset
class TicketNewCommentCountMixin(object):
sql_new_count_tmpl = """SELECT COUNT(support_comment.id)
FROM support_comment
WHERE (NOT support_comment.readed)
AND (support_comment.ticket_id = support_ticket.id)
AND (support_comment.author_id != {0})"""
def get_queryset(self):
queryset = super(TicketNewCommentCountMixin, self).get_queryset()
# Count of new comment for current user
sql_new_count = self.sql_new_count_tmpl.format(self.request.user.pk)
queryset = queryset.extra(select={'new_count': sql_new_count})
return queryset
class TicketListView(TicketSecurityMixin,
TicketLastUpdateMixin,
TicketNewCommentCountMixin,
ListView):
model = Ticket
paginate_by = CONFIG['PAGINATE_BY']
def get_queryset(self):
queryset = super(TicketListView, self).get_queryset()
queryset = queryset.order_by('-last_update')
return queryset
class TicketDetailView(TicketSecurityMixin,
TicketLastUpdateMixin, DetailView):
model = Ticket
class CommentsView(TicketSecurityMixin, DetailView):
model = Ticket
template_name = 'support/ajax/comment_list.html'
def get(self, request, *args, **kwargs):
ticket = self.get_object()
comments = ticket.comment_set.exclude(author=request.user)
comments.update(readed=True)
return super(CommentsView, self).get(request, *args, **kwargs)
class AddCommentView(FormView):
form_class = CommentForm
template_name = 'support/ajax/comment_form.html'
def get_context_data(self, **kwargs):
context = super(AddCommentView, self).get_context_data(**kwargs)
context['ticket_pk'] = self.kwargs['pk']
return context
def get_success_url(self):
return reverse('support_comment_add', kwargs=self.kwargs)
def form_valid(self, form):
user = self.request.user
ticket = get_object_or_404(Ticket, pk=self.kwargs['pk'])
can_save = user.is_authenticated() and \
(user.is_superuser or (ticket.author == user and ticket.opened))
if can_save:
instance = form.save(commit=False)
instance.author = self.request.user
instance.ticket = ticket
instance.save()
return super(AddCommentView, self).form_valid(form)
class TicketNewView(TicketSecurityMixin, TemplateView):
template_name = 'support/ticket_new.html'
class AddTicketView(FormView):
form_class = TicketForm
template_name = 'support/ajax/ticket_form.html'
def form_valid(self, form):
user = self.request.user
if user.is_authenticated():
ticket = form.save(commit=False)
ticket.author = user
ticket.save()
comment = Comment(
ticket=ticket,
content=form.cleaned_data['content'],
author=user
)
comment.save()
self.new_pk = ticket.pk
print dir(form)
return super(AddTicketView, self).form_valid(form)
def get_success_url(self):
return reverse('support_ticket_added', kwargs={'pk': self.new_pk})
class AddedTicketView(TemplateView):
template_name = 'support/ajax/ticket_added.html'
def get_context_data(self, **kwargs):
context = super(AddedTicketView, self).get_context_data(**kwargs)
context['ticket_pk'] = self.kwargs['pk']
return context
| [
"ale-sosnin@yandex.ru"
] | ale-sosnin@yandex.ru |
cabc57c3b8c8f2123a5290d77059ddeed22334e2 | 6f5d594c26d022483f9e0b91b95c9f7a74ded205 | /Image_Compression_with_Kmeans_Clustering/main.py | 17815ca30b5eb0ecaff0d8a6228b67fe2d97223e | [] | no_license | FASLADODO/Data_Science_Personal_Project | ee9af21f19f8026368a8bb3b70f6c63f1a0653a0 | 2cf7a0d8e6f55114d631d1165ab8ad7ab2ec6815 | refs/heads/master | 2022-11-28T17:32:07.091237 | 2020-07-30T16:12:12 | 2020-07-30T16:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # -*- coding: utf-8 -*-
"""
@author: Marcellus Ruben Winastwan
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import function as f
os.getcwd()
# read the image
image = plt.imread('mutu.jpg')
if image[0,0,0] > 1:
image = image/255
else:
image
# Store the image size
imageSize = np.shape(image)
# Initialize xData as our feature set for k-means clustering with size number of pixel*3
xData = image.reshape(imageSize[0]*imageSize[1],3)
# Define max number of iterations and number of centroids K
K= 16
maxIterations = 2
# Initialize centroid
initialCentroids = f.initializeCentroids(K, xData)
finalCentroids = f.runKMeans(xData, initialCentroids, maxIterations)
print(finalCentroids)
# Next, assign each of the pixel into its closest final centroids.
idx = f.findClosestCentroids(xData, finalCentroids)
# Then, assign the value of final centroids (representation of the colors) to form a compressed image
xCompressed = finalCentroids[idx]
print(np.shape(xCompressed))
# Next, reshape the xCompressed array back to the shape of its original image
xCompressed = xCompressed.reshape(imageSize[0],imageSize[1],3)
print(xCompressed)
plt.subplot(1,2,1)
plt.imshow(image)
plt.subplot(1,2,2)
plt.imshow(xCompressed)
plt.show()
#
#
| [
"marcellusruben@gmail.com"
] | marcellusruben@gmail.com |
c08533a8fde1b415a37ea7b0568b0eb4933ee1cf | 3a176a671e21154288984c43444808be5d59ecf8 | /scripts/run_augment_bin.py | 64c0f13d8bf5ca1a104ada77d6754c7a197fb526 | [] | no_license | enikon/pug | a9c45975add53c383d79278f9c6183329901c11e | 91089b37fa0740b61a1dcb646d0f4490e2bb8947 | refs/heads/master | 2023-02-15T00:32:08.696515 | 2021-01-17T12:13:02 | 2021-01-17T12:13:02 | 316,448,996 | 1 | 0 | null | 2021-01-17T12:13:03 | 2020-11-27T08:55:41 | Python | UTF-8 | Python | false | false | 1,434 | py | import argparse
import os
import data_augmentation_smote_under
import data_augmentation_extension
def main(_args):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="input folder", default='../dataset_bin')
parser.add_argument('-e', '--extension', help="file extension", default='_base')
args = parser.parse_args(*_args)
# data_augmentation_extension.main(
# ([
# "-i", os.path.join(args.input, 'train.npy'),
# "-o", os.path.join(args.input, 'train' + args.extension + '.npy'),
# "-cn", '4'
# ],)
# )
data_augmentation_smote_under.main(
([
"-i", os.path.join(args.input, 'train.npy'),
"-o", os.path.join(args.input, 'train' + args.extension + '.npy'),
"-cn", '2',
"-tc", '100000'
],)
)
data_augmentation_smote_under.main(
([
"-i", os.path.join(args.input, 'eval.npy'),
"-o", os.path.join(args.input, 'eval' + args.extension + '.npy'),
"-cn", '2',
"-tc", '40000'
],)
)
data_augmentation_smote_under.main(
([
"-i", os.path.join(args.input, 'test.npy'),
"-o", os.path.join(args.input, 'test' + args.extension + '.npy'),
"-cn", '2',
"-tc", '-1'
],)
)
if __name__ == "__main__":
main({})
print("DONE")
| [
"templar.of.silence@gmail.com"
] | templar.of.silence@gmail.com |
9e9add37ee462a69c648d55aa51fae492d73be5e | bc06ce70f17f49f50344f261afb6ab323cf11d62 | /hostel_project/hostel_webapp/migrations/0007_auto_20201126_1135.py | 5e2ce70bf7305288fe3f63b52014693578990f4a | [
"MIT"
] | permissive | RidhimaKohli/hostel-web-app | 83cbed259f4f56d756d7495286bc77adb2321846 | a57fcb356c5665d83b146c6170b075ea898303e9 | refs/heads/master | 2023-07-31T08:02:04.293674 | 2020-11-26T19:36:30 | 2020-11-26T19:36:30 | 283,858,182 | 0 | 4 | MIT | 2021-09-22T19:32:05 | 2020-07-30T19:16:06 | CSS | UTF-8 | Python | false | false | 385 | py | # Generated by Django 3.0.8 on 2020-11-26 11:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hostel_webapp', '0006_remove_complaint_complaint_pic'),
]
operations = [
migrations.RenameField(
model_name='complaint',
old_name='author',
new_name='student',
),
]
| [
"saptashrungib@gmail.com"
] | saptashrungib@gmail.com |
01339e9925a598cd8837f7053d0d0160959048cd | 8b268f5d2deee5a576867d25fcc47fbdef89c80f | /model.py | a41dd961279c9c5363b171eb8c6ac6edd99ec9c5 | [
"MIT"
] | permissive | teodorailic98/Vislice | de3bed161e65faa3e26fb779550a24a7adb395d6 | d4ea273426af7b204dbd3655f69c6a9817dd516e | refs/heads/main | 2023-04-08T02:49:49.633361 | 2021-04-20T16:27:07 | 2021-04-20T16:27:07 | 359,865,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | import random
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
PONOVLJENA_CRKA = 'o'
NAPACNA_CRKA = '-'
ZMAGA = 'W'
PORAZ = 'X'
class Igra:
def __init__(self, geslo, crke=None):
self.geslo = geslo
if crke is None:
self.crke = []
else:
self.crke = crke
def napacne_crke(self):
return [crka for crka in self.crke if crka not in self.geslo]
def pravilne_crke(self):
return [crka for crka in self.crka if crka in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
return all(crka in self.crke for crka in self.geslo)
def poraz(self):
return self.stevilo_napak() > STEVILO_DOVOLJENIH_NAPAK
def pravilni_del_gesla(self):
s = ''
for crka in self.geslo:
if crka in self.crke:
s += crka + ' '
else:
s += '_ '
return s
def nepravilni_ugibi(self):
return ' '.join(self.napacne_crke())
def ugibaj(self, crka):
crka = crka.upper()
if crka in self.crke:
return PONOVLJENA_CRKA
else:
self.crke.append(crka)
if crka in self.geslo:
if self.zmaga():
return ZMAGA
else:
return PRAVILNA_CRKA
else:
if self.poraz():
return PORAZ
else:
return NAPACNA_CRKA
with open('besede.txt', encoding='utf-8') as f:
bazen_besed = [vrstica.strip().upper() for vrstica in f]
def nova_igra():
return Igra(random.choice(bazen_besed)) | [
"teodorailic1998@gmail.com"
] | teodorailic1998@gmail.com |
123e5f4f70646928dc69b64772839a25e02b5f58 | f78137c438b5cf2e13e49fba9cbca5403eac0c27 | /testCase/aigao/testUpdateMediaSdk.py | f34ff0a62ec8bdfb16cfe8637c32f1a47ac3d2b0 | [] | no_license | Mayjoy-jiao/smoketest | cfd52db32700979b01e710f32372ed00f9b4d232 | f1b1c56d21b83d8f704ee220349bbe792377e0f9 | refs/heads/master | 2020-06-19T22:55:48.524394 | 2020-04-09T02:03:24 | 2020-04-09T02:03:24 | 196,905,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,605 | py | """
测试媒体包升级
用例运行前,后台页面设置如下:
1.[SDK广告管理_媒体管理]选择:联调媒体包
2.确保联调媒体包的媒体包管理中各控制版本号有对应的正式运行的媒体包
Author: Sun Yaping
date: 2019-08-21
"""
# -*- coding: utf-8 -*-
import time
import unittest
import paramunittest
from common import common
from common.Log import MyLog
import readConfig as readConfig
from common import configHttp as configHttp
from common import configDB as configDB
import requests
aigao_xls = common.get_xls("aigao.xlsx", "aigao")
localReadConfig = readConfig.ReadConfig()
localConfigHttp = configHttp.ConfigHttp()
ConfigDB = configDB.MyDB()
@paramunittest.parametrized(aigao_xls[2])
class UpdateMediaSdk(unittest.TestCase):
def setParameters(self, case_name, url, method, token, goods_id, result, code, msg):
"""
set params
:param case_name:
:param url:
:param method:
:param token:
:param goods_id:
:param result:
:param code:
:param msg:
:return:
"""
self.case_name = str(case_name)
self.url = str(url)
self.method = str(method)
self.token = str(token)
self.goodsId = str(goods_id)
self.result = str(result)
self.code = str(code)
self.msg = str(msg)
self.response = None
self.info = None
def description(self):
"""
:return:
"""
self.case_name
def setUp(self):
"""
:return:
"""
self.log = MyLog.get_log()
self.logger = self.log.get_logger()
def postdata(self, controlversion, mediapkgversion, version, androidversion):
"""
媒体包升级接口
:param controlversion: 媒体包控制版本号
:param mediapkgversion: 媒体包版本号
:param version: 爱告SDK协议版本号
:param androidversion: 客户端安卓版本号
:return:
"""
data = {
"appid": "1139",
"channelId": "suny999",
"subChannelId": "1",
"mid": "ssiddca98c74423142258b994801ed9f1111",
"imei": "865625020257042",
"imsi": "460011207707575",
"controlVersion": controlversion,
"mediaPkgName": "com.android.test",
"mediaPkgVersion": mediapkgversion,
"version": str(version),
"androidVersion": androidversion
}
# url = 'http://test.iad.zzay.net/ps/updateMediaSdk.do'
# res = common.runaigaoapi_1(data, url)
return data
def checkResult(self, req_controlversion, req_androidversion, updatemediasdk_res):
"""
check test result
:return:
"""
# 数据库查询当前正式运行的包名为'com.android.test'的控制版本号为3的媒体包版本号最高的媒体包的URL和MD5
if req_androidversion == 23:
self.sql = "select media_pkg_version,download_pkg,MD5 from sdk_media_pkg where control_version = '%s' and state = '3' and media_pkg_name = 'com.android.test' AND android_version = '23' order by CAST(media_pkg_version as SIGNED) desc limit 1" % (
req_controlversion)
else:
self.sql = "select media_pkg_version,download_pkg,MD5 from sdk_media_pkg where control_version = '%s' and state = '3' and media_pkg_name = 'com.android.test' AND android_version = '19,20,21,22,24,25,26,27,28,29' order by CAST(media_pkg_version as SIGNED) desc limit 1" % (
req_controlversion)
dbresult = ConfigDB.mysqlDB("113.31.86.153", "zzmanager", "iadMOB-2013@0622)", 3306, "iadsupport", self.sql)
print(f"当前正式运行的包名为'com.android.test'的控制版本号为{req_controlversion}的支持安卓{req_androidversion}的最高媒体包版本号为: {dbresult[0]}")
time.sleep(3)
current_downloadurl = updatemediasdk_res['data']['url']
current_md5 = updatemediasdk_res['data']['md5']
self.assertEqual(dbresult[1], current_downloadurl)
self.assertEqual(dbresult[2], current_md5)
# @unittest.skip("该用例暂不执行")
def testControlVersion1_firsttime(self):
"""
用户首次拉取控制版本号1的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(1, 0, "1.0.6", 23)
print("媒体包升级请求上行:\n", data)
# 请求接口
# common.runJVM()
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(1, 23, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion1_notfirsttime(self):
"""
用户非首次拉取控制版本号1的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(1, 45, "1.0.6", 24)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(1, 24, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion2_firsttime(self):
"""
用户首次拉取控制版本号2的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(2, 0, "1.0.7", 23)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(2, 23, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion2_notfirsttime(self):
"""
用户非首次拉取控制版本号2的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(2, 19082100, "1.0.7", 25)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(2, 25, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion3_firsttime(self):
"""
用户首次拉取控制版本号3的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(3, 0, "1.0.7", 23)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(3, 23, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion3_notfirsttime(self):
"""
用户非首次拉取控制版本号3的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(3, 19083000, "1.0.7", 29)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(3, 29, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion4_firsttime(self):
"""
用户首次拉取控制版本号4的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(4, 0, "1.0.7", 23)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(4, 23, res)
# @unittest.skip("该用例暂不执行")
def testControlVersion4_notfirsttime(self):
"""
用户非首次拉取控制版本号4的媒体包
:return:
"""
# set url
self.url = aigao_xls[2][3]
print("第一步:设置url:\n" + self.url)
# set post data
print("第二步:设置请求上行:\n")
data = self.postdata(4, 19090300, "1.0.7", 28)
print("媒体包升级请求上行:\n", data)
# 请求接口
res = common.runaigaoapi(data, self.url)
print("媒体包升级请求下行:\n", res)
# check result
self.checkResult(4, 28, res)
def tearDown(self):
"""
:return:
"""
print("测试结束,输出log完结\n\n")
| [
"zhangjj@chinamobiad.com"
] | zhangjj@chinamobiad.com |
5d080ad148ecc425f0554f9c07c6decbb89a47ca | 06d7a6d5a1b136aa9e2145805275b5e74f826a2b | /apps/menu/migrations/0005_category_is_active.py | 339410a056a4bcc91d3918c808780581d9cf53eb | [] | no_license | pyeye/gr-backend | 6662831bae8a24d6cbd0c5c5d553a4c3193d75a4 | d09e1f59c248c3f32ba2e856c415f46c8b6fb313 | refs/heads/master | 2021-01-02T23:36:10.081027 | 2019-08-26T16:08:34 | 2019-08-26T16:08:34 | 99,502,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-24 18:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0004_auto_20170923_1335'),
]
operations = [
migrations.AddField(
model_name='category',
name='is_active',
field=models.BooleanField(default=True, verbose_name='Активированно'),
),
]
| [
"agw.174@gmail.com"
] | agw.174@gmail.com |
213d25bf84577a6d3302247cb04c2a0af37c66c0 | 1abd2d4fe2f01584bf0aab44d7e98e76f7280f9f | /setup.py | 5f1ae46c490365023591c75ca903926ea2fd28c3 | [] | no_license | yychuang/GenIce | b370c046cb4eec134ab80f7faa36aeb00f52786e | 80b836df7208be3d830bd276924a0a91635eded7 | refs/heads/main | 2023-06-06T18:24:23.242385 | 2021-06-28T08:57:25 | 2021-06-28T08:57:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | #!/usr/bin/env python3
from setuptools import setup, find_packages
import os
import codecs
import re
# Copied from wheel package
here = os.path.abspath(os.path.dirname(__file__))
#README = codecs.open(os.path.join(here, 'README.txt'), encoding='utf8').read()
#CHANGES = codecs.open(os.path.join(here, 'CHANGES.txt'), encoding='utf8').read()
with codecs.open(os.path.join(os.path.dirname(__file__), 'genice2', '__init__.py'),
encoding='utf8') as version_file:
metadata = dict(
re.findall(
r"""__([a-z]+)__ = "([^"]+)""",
version_file.read()))
long_desc = "".join(open("README.md").readlines())
with open("requirements.txt") as f:
requires = [x.strip() for x in f.readlines()]
setup(name='GenIce2',
python_requires='>=3.6',
version=metadata['version'],
description='A Swiss army knife to generate hydrogen-disordered ice structures.',
long_description=long_desc,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
],
author='Masakazu Matsumoto',
author_email='vitroid@gmail.com',
url='https://github.com/vitroid/GenIce/',
keywords=['genice2', ],
license='MIT',
packages=find_packages(),
install_requires=requires,
entry_points={
'console_scripts': [
'genice2 = genice2.cli.genice:main',
'analice2 = genice2.cli.analice:main'
]
}
)
| [
"vitroid@gmail.com"
] | vitroid@gmail.com |
57268792322c40054acf66a0c27b41c53cf231bb | 4455bf0616ec573c8e2d143130f41886d598bcbe | /mms/tests/unit_tests/test_model_loader.py | b609dfb00bd8f8af53a3e494d74a937494a67b4b | [
"Apache-2.0"
] | permissive | lupesko/mxnet-model-server-1 | dc7bab8c9d64e9fb12dd55a8e7c434a6dfacfb2a | 3346199648792afdc5ed6e5eec7b8f4c1dcdc3b7 | refs/heads/master | 2021-05-11T10:27:16.478058 | 2018-06-10T10:59:07 | 2018-06-10T10:59:07 | 118,103,397 | 0 | 0 | null | 2018-01-19T09:13:42 | 2018-01-19T09:13:42 | null | UTF-8 | Python | false | false | 383 | py | import os
import pytest
import sys
curr_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curr_path + '/../..')
from mms.model_loader import ModelLoader
def test_onnx_fails_fast():
models = { 'onnx': 's3://bucket/prefix/whatever.onnx'}
with pytest.raises(ValueError) as e:
ModelLoader.load(models)
assert 'Convert ONNX model' in str(e.value)
| [
"yuruofeifei@gmail.com"
] | yuruofeifei@gmail.com |
83e984bf7313b99dd2e24c39e24640b35d45e344 | a2706c66c4f2769c00fc5f67e1a85742cfa7e17c | /WebSocket/Handle/console.py | 71d912effb8fa4a6b590eaf8e05ac2ba4968e4fc | [
"BSD-3-Clause"
] | permissive | Jeromeyoung/viperpython | 48800312dcbdde17462d28d45865fbe71febfb11 | ba794ee74079285be32191e898daa3e56305c8be | refs/heads/main | 2023-09-01T18:59:23.464817 | 2021-09-26T04:05:36 | 2021-09-26T04:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,131 | py | # -*- coding: utf-8 -*-
# @File : console.py
# @Date : 2021/2/26
# @Desc :
from Lib.configs import RPC_FRAMEWORK_API_REQ
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class Console(object):
def __init__(self):
pass
@staticmethod
def get_active_console():
result = RpcClient.call(Method.ConsoleList, [], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
return False
else:
consoles = result.get("consoles")
if len(consoles) == 0:
consoles_create_opt = {"SkipDatabaseInit": True, 'AllowCommandPassthru': False}
result = RpcClient.call(Method.ConsoleCreate, [consoles_create_opt], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
return False
else:
active_id = int(result.get("id"))
Xcache.set_console_id(active_id)
return True
else:
active_id = int(consoles[0].get("id"))
Xcache.set_console_id(active_id)
return True
@staticmethod
def reset_active_console():
result = RpcClient.call(Method.ConsoleList, [], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
else:
consoles = result.get("consoles")
if len(consoles) == 0:
pass
else:
for console in consoles: # 删除已知命令行
cid = int(console.get("id"))
params = [cid]
RpcClient.call(Method.ConsoleDestroy, params, timeout=RPC_FRAMEWORK_API_REQ)
result = RpcClient.call(Method.ConsoleCreate, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
else:
active_id = int(result.get("id"))
Xcache.set_console_id(active_id)
@staticmethod
def write(data=None):
cid = Xcache.get_console_id()
if cid is None:
get_active_console_result = Console.get_active_console()
if get_active_console_result:
cid = Xcache.get_console_id()
else:
return False, None
params = [cid, data.replace("\r\n", "\n")]
result = RpcClient.call(Method.ConsoleWrite, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
get_active_console_result = Console.get_active_console()
if get_active_console_result:
cid = Xcache.get_console_id()
params = [cid, data.replace("\r\n", "\n")]
result = RpcClient.call(Method.ConsoleWrite, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
return False, None
else:
return True, result
else:
return False, result
else:
return True, result
@staticmethod
def read():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleRead, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def tabs(line=None):
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid, line]
result = RpcClient.call(Method.ConsoleTabs, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def session_detach():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleSessionDetach, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def session_kill():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleSessionKill, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
| [
"yu5890681@gmail.com"
] | yu5890681@gmail.com |
432e4d7dfb9d1711e8dc1a8ddffa9cd8f54be76e | da34178acfaf567a7faa0fb10ea9d50c8ddf0d98 | /ccc05j1.py | e1ffdd91408203370c061aa3683e88add9505398 | [] | no_license | csgregorian/competition | d81890b940a8912aefd3c724760bef421dcea38e | bd87ca6b104ede1d36b8d3fc475c96694078ebfb | refs/heads/master | 2020-05-31T03:56:51.174808 | 2015-10-15T19:44:17 | 2015-10-15T19:44:17 | 24,005,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | d, e, w = int(input()), int(input()), int(input())
a = 0
b = 0
a += max(0, d-100) * 25
a += e * 15
a += w * 20
b += max(0, d-250) * 45
b += e * 35
b += w * 25
a = float(a)/100
b = float(b)/100
print("Plan A costs %.2f" % a)
print("Plan B costs %.2f" % b)
if a == b:
print("Plan A and B are the same price.")
elif a > b:
print("Plan B is cheapest.")
elif a < b:
print("Plan A is cheapest.") | [
"csgregorian@gmail.com"
] | csgregorian@gmail.com |
60336c737e406385fe01a079b58435791bb872c7 | faa4ce06fdc64bcd3490d8c244cdad4f7bba8d7e | /shop/migrations/0001_initial.py | 77c2b82fa8d279c48477a15bd1b57612404899d3 | [] | no_license | menyibing/lutto_django_project | 71982b0ada0597f7f7fec58ea0c28c0853cbf601 | 2852784b90fef0d675e79c75c17569cc16a0a6b2 | refs/heads/master | 2020-04-05T15:40:01.636476 | 2018-11-07T13:38:50 | 2018-11-07T13:38:50 | 156,978,314 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | # Generated by Django 2.1.1 on 2018-10-17 01:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GoodBrand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='GoodClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='GoodPicture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('intergal', models.CharField(max_length=50)),
# ('note1', models.CharField(max_length=50, null=True)),
# ('note2', models.CharField(max_length=50, null=True)),
('goodbrand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.GoodBrand')),
('goodclass', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.GoodClass')),
],
),
migrations.AddField(
model_name='goodpicture',
name='good',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Goods'),
),
]
| [
"2270487537@qq.com"
] | 2270487537@qq.com |
1b8b1bb6dfa86f27bd84c71c3ecefe0522283f79 | 62af4b966c2052d529906bd5ea68c574e33ea5d1 | /preprocessing/parse.py | 23783d00a855b532bc0469537a56453fd8c34d62 | [
"Apache-2.0"
] | permissive | som-shahlab/trove | 1bea40e12450ba4d1158b15b43ec59ad35359cb4 | 291851b45666ad46f30c6c3103295bb8f64f1c8a | refs/heads/main | 2023-04-13T21:14:33.743306 | 2022-10-24T20:28:34 | 2022-10-24T20:28:34 | 316,359,795 | 72 | 23 | Apache-2.0 | 2022-09-30T20:45:24 | 2020-11-26T23:56:02 | Python | UTF-8 | Python | false | false | 6,171 | py | import os
import sys
import glob
import json
import time
import logging
import argparse
import pandas as pd
from pathlib import Path
from joblib import Parallel, delayed
from functools import partial
from spacy.util import minibatch
from pipes.tokenizers import get_parser, parse_doc
from typing import List, Set, Dict, Tuple, Optional, Union, Callable, Generator
logger = logging.getLogger(__name__)
def timeit(f):
"""
Decorator for timing function calls
:param f:
:return:
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
logger.info(f'{f.__name__} took: {te - ts:2.4f} sec')
return result
return timed
def transform_texts(nlp,
batch_id,
corpus,
output_dir: str,
disable: Set[str] = None,
prefix: str = ''):
"""
:param nlp:
:param batch_id:
:param corpus:
:param output_dir:
:param disable:
:param prefix:
:return:
"""
out_path = Path(output_dir) / (
f"{prefix + '.' if prefix else ''}{batch_id}.json")
print("Processing batch", batch_id)
with out_path.open("w", encoding="utf8") as f:
doc_names, texts, metadata = zip(*corpus)
for i, doc in enumerate(nlp.pipe(texts)):
sents = list(parse_doc(doc, disable=disable))
f.write(
json.dumps(
{'name': str(doc_names[i]),
'metadata': metadata[i],
'sentences': sents}
)
)
f.write("\n")
print("Saved {} texts to JSON {}".format(len(texts), batch_id))
def dataloader(inputpath: str,
primary_key: str = 'DOC_NAME',
text_key: str = 'TEXT',
preprocess: Callable = lambda x: x):
"""
:param inputpath:
:param preprocess:
:return:
"""
# directory or single file
filelist = glob.glob(inputpath + "/*.[tc]sv") \
if os.path.isdir(inputpath) else [inputpath]
for fpath in filelist:
print(fpath)
df = pd.read_csv(fpath, delimiter='\t', header=0, quotechar='"')
for i, row in df.iterrows():
doc_name = row[primary_key]
text = row[text_key]
text = text.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r')
if not text.strip():
logger.error(
f"Document {doc_name} contains no text -- skipping")
continue
# add any other columns as metadata
metadata = {
name:value for name,value in row.to_dict().items()
if name not in [text_key]
}
yield (doc_name, preprocess(text), metadata)
def load_merge_terms(fpath:str, sep: str = '\t') -> Set[str]:
terms = set()
with open(fpath, 'r') as fp:
for line in fp:
terms.add(line.strip().split(sep)[0])
return terms
@timeit
def main(args):
merge_terms = load_merge_terms(args.merge_terms) \
if args.merge_terms else {}
nlp = get_parser(disable=args.disable.split(','),
merge_terms=merge_terms,
max_sent_len=args.max_sent_len)
identity_preprocess = lambda x:x
corpus = dataloader(
args.inputdir,
primary_key = args.primary_key,
text_key = args.text_key,
preprocess=identity_preprocess
)
partitions = minibatch(corpus, size=args.batch_size)
executor = Parallel(n_jobs=args.n_procs,
backend="multiprocessing",
prefer="processes")
do = delayed(partial(transform_texts, nlp))
tasks = (do(i, batch, args.outputdir, args.disable, args.prefix) for
i, batch in enumerate(partitions))
executor(tasks)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default=None,
help="input directory")
argparser.add_argument("-o", "--outputdir", type=str, default=None,
help="output directory")
argparser.add_argument("-F", "--fmt", type=str, default="single",
help="document format (single|row)")
argparser.add_argument("-p", "--prefix", type=str, default="",
help="json name prefix")
argparser.add_argument("-n", "--n_procs", type=int, default=2,
help="number of processes")
argparser.add_argument("-b", "--batch_size", type=int, default=1000,
help="batch size")
argparser.add_argument("-d", "--disable", type=str,
default="ner,parser,tagger",
help="disable spaCy components")
argparser.add_argument("--keep_whitespace", action='store_true',
help='retain whitespace tokens')
argparser.add_argument("-m", "--max_sent_len", type=int, default=150,
help='Max sentence length (in words)')
argparser.add_argument("-M", "--merge_terms", type=str, default=None,
help='Do not split lines spanning these phrases')
argparser.add_argument("--quiet", action='store_true',
help="suppress logging")
argparser.add_argument("--primary_key", type=str,
default="DOC_NAME",
help="primary document key")
argparser.add_argument("--text_key", type=str,
default="TEXT",
help="text key column name")
args = argparser.parse_args()
if not args.quiet:
FORMAT = '%(message)s'
logging.basicConfig(format=FORMAT, stream=sys.stdout,
level=logging.INFO)
logger.info(f'Python: {sys.version}')
for attrib in args.__dict__.keys():
v = 'None' if not args.__dict__[attrib] else args.__dict__[attrib]
logger.info("{:<15}: {:<10}".format(attrib, v))
main(args)
| [
"fries.jason@gmail.com"
] | fries.jason@gmail.com |
094a892a9dab6508e6fb0e607e408cbaa20a4c05 | b683b7694a8b5f81e13f5c6b648c7a33b442c79a | /myprofile/migrations/0002_profile_email.py | 06319d4507ba114c1f646b2fa3cdd59b14c0ca4a | [] | no_license | murat-dev/ShopDRF | ca12dacfbb6308c2e09642388da99f8f9cc68718 | 23f690677bcd3ad22d5c13ae52b0883891ee936b | refs/heads/master | 2023-04-13T13:08:34.361549 | 2021-04-27T09:58:25 | 2021-04-27T09:58:25 | 362,063,882 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # Generated by Django 3.1 on 2021-03-11 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(default=1, max_length=100, unique=True),
preserve_default=False,
),
]
| [
"muratbegalyev@gmail.com"
] | muratbegalyev@gmail.com |
d7c2b34c95ae59903eaf3e564e0a080c96466b09 | bbb57d8224e3df1e89cfa64892864e6fc3538c47 | /PY-21/hw_8_cookbook.py | 866c8cae9b935ef7e970db799288068c12a90f35 | [] | no_license | D2ntes/netology_learn | 08848d7304d4091d1fe828a343c4df26013c2610 | 88cad90d1de279beda5fdecf50cf6578c848b683 | refs/heads/master | 2022-12-03T17:25:29.039034 | 2020-03-10T15:29:46 | 2020-03-10T15:29:46 | 166,715,195 | 0 | 1 | null | 2022-11-22T05:07:36 | 2019-01-20T22:14:35 | CSS | UTF-8 | Python | false | false | 7,706 | py | # Домашнее задание к лекции 2.1 «Открытие и чтение файла, запись в файл»
# Необходимо написать программу для кулинарной книги.
#
# Список рецептов должен храниться в отдельном файле в следующем формате:
#
# Название блюда
# Kоличество ингредиентов в блюде
# Название ингредиента | Количество | Единица измерения
# Название ингредиента | Количество | Единица измерения
# ...
# В одном файле может быть произвольное количество блюд.
# Читать список рецептов из этого файла.
# Соблюдайте кодстайл, разбивайте новую логику на функции и не используйте глобальных переменных.
# Задача №1
# Должен получится следующий словарь
#
# cook_book = {
# 'Омлет': [
# {'ingridient_name': 'Яйцо', 'quantity': 2, 'measure': 'шт.'},
# {'ingridient_name': 'Молоко', 'quantity': 100, 'measure': 'мл'},
# {'ingridient_name': 'Помидор', 'quantity': 2, 'measure': 'шт'}
# ],
# 'Утка по-пекински': [
# {'ingridient_name': 'Утка', 'quantity': 1, 'measure': 'шт'},
# {'ingridient_name': 'Вода', 'quantity': 2, 'measure': 'л'},
# {'ingridient_name': 'Мед', 'quantity': 3, 'measure': 'ст.л'},
# {'ingridient_name': 'Соевый соус', 'quantity': 60, 'measure': 'мл'}
# ],
# 'Запеченный картофель': [
# {'ingridient_name': 'Картофель', 'quantity': 1, 'measure': 'кг'},
# {'ingridient_name': 'Помидор', 'quantity': 2, 'measure': 'шт'},
# {'ingridient_name': 'Сыр гауда', 'quantity': 100, 'measure': 'г'},
# ]
# }
from pprint import pprint
def load_book_from_file(book_file='cookbook.txt'):
# Читаем и преобразуем данные в заданный словарь из файла
with open(book_file) as book:
cook_dict = dict()
key_ingridient_dict = ['ingridient_name', 'quantity', 'measure']
book.seek(0, 2) # Определяем конец файла и возвращаем указатель в начало файла
eof = book.tell()
book.seek(0, 0)
while book.tell() != eof: # Проверяем конец файла
ingridient_list = []
key = book.readline().strip() # Наименование блюда
for ingridient in range(int(book.readline().strip())):
value_ingridient_dict = book.readline().strip().split(' | ')
value_ingridient_dict[1] = int(value_ingridient_dict[1])
ingridient_list.append(dict(zip(key_ingridient_dict, value_ingridient_dict)))
cook_dict.setdefault(key, ingridient_list)
book.readline()
pprint(cook_dict)
return cook_dict
# Задача №2
# Нужно написать функцию, которая на вход принимает список блюд из cook_book
# и количество персон для кого мы будем готовить
# get_shop_list_by_dishes(dishes, person_count)
# На выходе мы должны получить словарь с названием ингредиентов и его количетсва для блюда. Например, для такого вызова
#
# get_shop_list_by_dishes(['Запеченный картофель', 'Омлет'], 2)
# Должен быть следующий результат:
#
# {
# 'Картофель': {'measure': 'кг', 'quantity': 2},
# 'Молоко': {'measure': 'мл', 'quantity': 200},
# 'Помидор': {'measure': 'шт', 'quantity': 8},
# 'Сыр гауда': {'measure': 'г', 'quantity': 200},
# 'Яйцо': {'measure': 'шт', 'quantity': 4},
# 'Чеснок': {'measure': 'зубч', 'quantity': 6}
# }
# Обратите внимание, что ингредиенты могут повторяться
def get_shop_list_by_dishes(args):
dishes = args[0]
person_count = args[1]
cook_book = args[-1]
ingridients_diner_dict = {}
for dish in dishes: # Перечисляем все блюда в книге
for ingridient in cook_book[dish]: # Перечисляем все индигриенты в блюде
if ingridient['ingridient_name'] in ingridients_diner_dict: # При совпедении индигрента, добавляем кол-во
ingridients_diner_dict[ingridient['ingridient_name']]['quantity'] += ingridient[
'quantity'] * person_count
# Добавляем индигрент и количество(с ед. изм.) в словарь, с учетом кол-ва персон
ingridients_diner_dict.setdefault(ingridient['ingridient_name'],
{'measure': ingridient['measure'],
'quantity': ingridient['quantity'] * person_count})
# Выводим кол-во требуемых индигриентов
print(f"Для приготовления блюд: {', '.join(dishes)}\n"
f"Количество персон: {person_count}\n"
f"Потреюуется:"
)
for ingridient in ingridients_diner_dict: #
print(f"{ingridient} | {ingridients_diner_dict[ingridient]['quantity']} "
f"{ingridients_diner_dict[ingridient]['measure']}")
def choice_of_dishes(book):
i = 0
menu = dict()
dishes = []
person = 0
print('Кулинарная книга\nСодержание:')
for dish in book: # Выводим нумерованный список блюд и создаем словарь для цифрового ввода блюд
i += 1
menu.setdefault(str(i), dish)
print(f'{i}. {dish}')
while True:
try:
# Ввод номера блюд из существующих и кол-во персон
number_dishes = list(input(f'Введите номера блюд(через пробелы от 1 до {i}): ').split())
except TypeError: # Проверяем правильность ввода типа данных
print("!!!Cледует ввести номер блюд цифрами!!!")
try:
person = int(input("На сколько персон? "))
except TypeError: # Проверяем правильность ввода типа данных
print("!!!Cледует ввести кол-во персон цифрами!!!")
except ValueError:
print("!!!Cледует ввести кол-во персон цифрами!!!")
try:
for numer_dish in number_dishes: # Добавляем блюда по цифровым указателям
dishes.append(menu[numer_dish])
except KeyError: # Проверяем правильность ввода указателей на блюда
print("!!!Блюда под таким номером в книге нет!!!")
else:
break
return dishes, person, book
get_shop_list_by_dishes(choice_of_dishes(load_book_from_file()))
| [
"gruzdev-n@mail.ru"
] | gruzdev-n@mail.ru |
43bb031457d50e9122740259e04359b535cb713f | 97d65ce909cee03ed4c8cc9770a5cf55a12a0b90 | /motion.py | 8613f0735d153c4dc4a45e3ab7178f2fe86a165f | [] | no_license | jdcasey/rpi-motion | 499c728d312192bfb67d700e89b6067145ea5bb2 | e503ca67673c6d36f6a2bb8cdf3b6869c0242562 | refs/heads/master | 2020-04-21T20:13:32.422320 | 2019-02-09T22:37:59 | 2019-02-09T22:37:59 | 169,836,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/env python3
import RPi.GPIO as gpio
import datetime
import time
gpio.setmode(gpio.BCM)
gpio.setup(23, gpio.IN)
try:
time.sleep(2)
while(True):
if gpio.input(23):
print("PING: " + str(datetime.datetime.now()))
time.sleep(6)
time.sleep(0.1)
except Exception as e:
print(str(e))
gpio.cleanup()
| [
"jdcasey@commonjava.org"
] | jdcasey@commonjava.org |
289bc0edfd52e02ce3efd5b777628799617507a9 | d8bdb06686a2c81ef290afff9cd950eef24ef3b9 | /day_test08/test_001.py | fc99ffe2b457f1e6a02a923f80dc2c67db603beb | [] | no_license | sunadruey/hkes_test | 7f2622d658bc9289b4278a1eb1bbf5002ad5b0da | b6fa3b63dd5e58e9b74176fcd078fc95cfbfbf59 | refs/heads/master | 2023-05-04T20:06:05.832422 | 2021-05-26T14:30:32 | 2021-05-26T14:30:32 | 369,708,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py |
from day_test08.hogwos import Huogwos
class Main:
def send_keys(self):
pass
def click(self):
pass
def title(self):
pass
def click_first_link(self):
return Huogwos() | [
"sunadruey@163.com"
] | sunadruey@163.com |
c03381a3eb66d32c05604a2226fbaea846f8e98c | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/dbm.py | ac53fcf86f8ad981eb28108dc15d60478a2542de | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # encoding: utf-8
# module dbm
# from /usr/lib64/python2.6/lib-dynload/dbm.so
# by generator 1.136
# no doc
# no imports
# Variables with simple values
library = 'GNU gdbm'
# functions
def open(path, flag=None, mode=None): # real signature unknown; restored from __doc__
"""
open(path[, flag[, mode]]) -> mapping
Return a database object.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
df8fa1dadaeb9af62097dca1ce6ed2f84718704d | bee7d7bcb1fbeedca4421ab081a330b7cbf17ea8 | /Project6/partition.py | d6a8cdfd0ca37e4de02106375b78d650fa9e7fd8 | [] | no_license | vincy0320/School_Intro_to_ML | a576d3d65aa0223c850db9f8c34c76b6656316ac | 9ae339f81fc7134ba9058fe975dec9ac7e3aaba4 | refs/heads/master | 2020-04-18T21:43:28.444567 | 2019-01-27T06:11:47 | 2019-01-27T06:11:47 | 167,772,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #!/usr/bin/python3
import random
import math
import util
def __get_class_indices_dict(data, class_index):
"""
Get a dictionary whose key is the class name and the value is a list of
row indices in the given dataset that has that class name.
"""
return util.get_class_indices_dict(data, class_index)
def get_stratified_partitions(dataset, n):
"""
Get n-fold stratified partition.
"""
percentage = round(1 / n, 2)
percentages = [percentage] * n
partitions = get_stratified_partitions_by_percentage(dataset, percentages)
# verify_partition(data, partitions, class_index)
return partitions
def get_stratified_partitions_by_percentage(dataset, percentages):
"""
Get stratified partitions based on the given percentages
"""
# Verity percentage sums to 1
total = sum(percentages)
if round(total) != 1:
raise Exception("Error: Percentages must sum to 1")
class_indices_dict = util.get_class_indices_dict(dataset)
count_dict = util.get_class_count_dict(class_indices_dict)
partitions = []
for index in range(len(percentages)):
percentage = percentages[index]
part = []
for class_name in class_indices_dict:
if index == len(percentages) - 1:
part += class_indices_dict[class_name]
else:
count = round(count_dict[class_name] * percentage)
selection = get_random_n_and_rest(
class_indices_dict[class_name], count)
part += selection
partitions.append(part)
# verify_partition(data, partitions, class_index)
return partitions
def get_random_partitions(data, n):
"""
Get n partitions randomly from the given data
"""
indices = list(range(len(data)))
partition_size = math.floor(len(indices) / n)
partitions = []
while n > 0:
part = get_random_n_and_rest(indices, partition_size)
partitions.append(part)
n -= 1
partitions[-1] += indices
return partitions
def get_random_n_and_rest(all_values, count):
"""
Get count number of random values from all values
"""
selected = []
while count > 0:
index = random.randint(0, len(all_values) - 1)
selected.append(all_values[index])
del all_values[index]
count -= 1
return selected
# Test
# def verify_partition(data, partitions, class_index):
# for part in partitions:
# print("part:")
# count_map = {}
# for index in part:
# class_name = data[index][class_index]
# if class_name in count_map:
# count_map[class_name] += 1
# else:
# count_map[class_name] = 0
# print(count_map)
| [
"vincy.wscheng@gmail.com"
] | vincy.wscheng@gmail.com |
5144aade36d0dec75c11c658719a8e512384ddb9 | 1870335a3175750d0e3f928a3b1dbcacb7177eee | /game_states.py | 4d9c5cfc153f9b621634ebcb24726e60525a6a91 | [] | no_license | Abdel-Oued/Projet_Roguelike | b7609e2e01e190bfa0c06c6541cd2d7f391bbcf4 | fbcf5da6facb31091ddefe4bbc55313035574201 | refs/heads/master | 2023-03-22T17:48:05.478295 | 2021-03-04T14:40:38 | 2021-03-04T14:40:38 | 220,295,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from enum import Enum
class GameStates(Enum):
PLAYERS_TURN = 1
ENEMY_TURN = 2
PLAYER_DEAD = 3
SHOW_INVENTORY = 4
DROP_INVENTORY = 5
TARGETING = 6
| [
"noreply@github.com"
] | Abdel-Oued.noreply@github.com |
7afe7c980fc2beac69f9d9f0ad9c1bd27ccfa9d8 | 21a1e6ce3a04b4897b7b64a73f8b25ddee5323f8 | /multibHistograms.py | 47179516e6b84f90d4a0080941325d83ef72fb37 | [] | no_license | fedorov/pyhelpers | a0db84a9762ab60d1b6c5bc379a2aaa3cdfed0e2 | 142db8ac338d189e465d7550f5a918ab1fb9c4f0 | refs/heads/master | 2021-01-21T19:09:13.926654 | 2017-05-24T17:55:33 | 2017-05-24T17:55:33 | 92,122,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | import makeHistogram, sys
def main(argv):
import os
rootDir = argv[1]
subjects = os.listdir(rootDir)
label = "OPT. Ax DWI 13 B VALUES - as a 15 frames MultiVolume by GE.B-value frame 0-label.nrrd"
maps = ["Slow diff fraction.nrrd","Slow diffusion map.nrrd","Fast diffusion map.nrrd"]
for s in subjects:
# ignore .DS_Store on Mac ...
if s.startswith("."):
continue
for m in maps:
labelFile = os.path.join(rootDir,s,label)
mapFile = os.path.join(rootDir,s,m)
labels = makeHistogram.getMaskImageLabels(labelFile)
print "Labels:",labels
for l in labels:
if l == 1:
labelName = "TumorTZ (label 1)"
elif l == 3:
labelName = "NormalTZ (label 3)"
else:
labelName = "Label"+str(l)
labelName = "Label"+str(l)
histoFile = os.path.join(rootDir,s,m.split(".")[0]+"-histogram-"+labelName+".pdf")
array = makeHistogram.getMaskedImageArray(mapFile,labelFile,label=l)
makeHistogram.makeHisto(array,histoFile,title=m.split(".")[0]+" histogram, "+labelName)
#print m,l," array ",array
if __name__ == "__main__":
main(sys.argv)
| [
"andrey.fedorov@gmail.com"
] | andrey.fedorov@gmail.com |
ab0ec41bd693d803f1b8291982b7e7f8e9b98432 | 76615f88840948671ce7a0944d04c335378bfd59 | /bluesky/tutorial-2.py | 4fd44709646c2f8b32e245b07f7459512c6f2413 | [] | no_license | NSLS-II/NSLS-II.github.io | 802e7e29b2a8cd8ab6d720de9f0cce154ecf9d82 | f2ec8b415e59d5135517adaadbc3627f7692ad92 | refs/heads/master | 2023-08-30T21:33:20.808997 | 2023-08-19T17:03:59 | 2023-08-19T17:03:59 | 29,092,632 | 5 | 6 | null | 2023-06-05T15:48:53 | 2015-01-11T13:30:39 | HTML | UTF-8 | Python | false | false | 268 | py | from bluesky.plans import scan
from ophyd.sim import det4, motor1, motor2
dets = [det4]
RE(scan(dets,
motor1, -1.5, 1.5, # scan motor1 from -1.5 to 1.5
motor2, -0.1, 0.1, # ...while scanning motor2 from -0.1 to 0.1
11)) # ...both in 11 steps | [
"Travis@nomail"
] | Travis@nomail |
08aa43322c4c2c38e87cc1e6aa5eac604c2e6150 | 37de565d394ef27dc7d6e43f0e0e4c3a98293bbd | /p9.py | 7ba7c056548f23d3ed726618621f831a5b7fff29 | [] | no_license | Hemangi3598/chap-3_p9 | 340ec3bb8b18f1ce3388ff98291226ec9cba7712 | 7af8da997891732096cfd7bb6bc90960aa66b920 | refs/heads/main | 2023-08-04T17:18:03.463246 | 2021-09-17T07:54:23 | 2021-09-17T07:54:23 | 407,454,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # wapp to find the rev of the given number
num = int(input("enter the number"))
if num < 0:
print("invalid number")
else:
print("num = ", num)
rev = 0
while num > 0:
digit = num % 10 # get the last digit
rev = (rev * 10) + digit # use the last digit
num = num // 10 # remove the last digit
print("rev= ", rev) | [
"noreply@github.com"
] | Hemangi3598.noreply@github.com |
5bd0a4a788e9d7993b80bcafdfc417e5da471f70 | 1e25bbceebd3faf748c962325c572722a1d24e9c | /aicollege/message/models.py | 419e1022d3345f8531c2093d7005582b5523a627 | [] | no_license | qzhsjz/AICollege_backend | 31a8c9db5e6f63206903db2c6247c14eecaba15a | 4304a193e5b93716b9fb43103eaa1765e24fbdbf | refs/heads/master | 2020-03-17T10:27:11.764380 | 2018-09-09T07:20:12 | 2018-09-09T07:20:12 | 133,512,284 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from django.db import models
from user.models import User
# Create your models here.
class Message(models.Model):
id = models.AutoField(primary_key=True)
sender = models.ForeignKey(User, on_delete=models.CASCADE, related_name='sender', db_constraint=True)
receiver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='receiver', db_constraint=True)
time = models.DateTimeField(auto_now_add=True)
subject = models.CharField(max_length=255)
content = models.TextField()
class Announcement(models.Model):
id = models.AutoField(primary_key=True)
time = models.DateTimeField(auto_now_add=True)
subject = models.CharField(max_length=255)
content = models.TextField() | [
"qzhsjz@gmail.com"
] | qzhsjz@gmail.com |
583db200a1bda895ae3715b39413a5e19bd97fc1 | 69b5c22313044010653f57218b612f3536477ca9 | /introduction to python (3)/chapter 3/remove_duplicates.py | 100e46905d1a45fbb30e6f407b81617b57d17887 | [] | no_license | AmeyLaddad/A-Udacity | 91700500585069a76ffbf87a2d886b6426d76438 | 219068014af3f50a1bc91e004804f814e632fa71 | refs/heads/master | 2020-03-26T01:56:40.737210 | 2019-02-23T03:17:10 | 2019-02-23T03:17:10 | 144,389,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | def remove_duplicates(input_list):
final = []
for x in input_list:
if x not in final:
final.append(x)
return final
print(remove_duplicates(["india","pakistan","nepal","bhutan","USA","USA"]))
"""
"This is the second way"
def remove_duplicates(input_list):
for x in input_list:
y = 0
while y<len(input_list):
if x == input_list[y]:
del input_list[y]
return input_list
print(remove_duplicates(["india","pakistan","nepal","bhutan","USA","UK"]))
""" | [
"31889511+AmeyLaddad@users.noreply.github.com"
] | 31889511+AmeyLaddad@users.noreply.github.com |
c5c326fef01f68b470319f1e670f0c87cb312bb0 | 285c6c329dd20614f3f014dd85006a4b74ee94fc | /MatasanoCrypto/cryptools.py | 5fb0c32dc35af7abffa52d782ce8a19693522f51 | [] | no_license | sbedell/CodingChallenges | 60bc6357d008478b26f5e9428748105dff4550c9 | 531975ea9fea1b35e58464ad96ff2d6a2b72fa99 | refs/heads/master | 2021-01-18T05:33:55.403524 | 2020-06-01T05:47:02 | 2020-06-01T05:47:02 | 29,806,946 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | import base64
import binascii
import string
import random
import os
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Text processing functions:
def splitArray(array, n):
''' array - an array or string to be split into a list
n - length of the chunks
'''
return [array[i:i+n] for i in range(0, len(array), n)]
def hammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("String lengths are not equal")
strSum = sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
hexSum = sum(ch1 != ch2 for ch1, ch2 in zip(binascii.hexlify(s1.encode()), binascii.hexlify(s2.encode())))
return strSum + hexSum
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PKCS Stuff:
# Just a custom exception, no error message values
class PaddingError(Exception):
def __init__(self):
pass
def paddingPKCS7(s, n):
'''
s - string to pad
n - blocksize to pad UP TO
'''
amtToPad = n - len(s)
return s + bytes([amtToPad] * amtToPad)
def validatePKCS7(bytestring):
"""
Takes in a plaintext.
Determintes if it has valid PKCS#7 padding
If so, it strips the padding off and returns the plaintext
If not, it raises an exception
"""
lastByte = bytestring[-1] # type = int
expectedPadding = bytestring[-lastByte:] # type = bytes
for b in expectedPadding:
if b != lastByte:
raise PaddingError
return bytestring[:-lastByte]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Challenge 1:
def hexToBase64(hexStr):
return base64.b64encode(binascii.unhexlify(hexStr))
# Challenge 2:
def fixedXOR(s1, s2):
ans = str()
for x, y in zip(binascii.unhexlify(s1), binascii.unhexlify(s2)):
ans += chr(x ^ y)
return ans
# Logically Equivalent:
def fixedXOR2(s1, s2):
return "".join(chr(x ^ y) for x, y in zip(binascii.unhexlify(s1), binascii.unhexlify(s2)))
# Not Unhexlifying:
def fixedXorNoUnhex(s1, s2):
return "".join(chr(ord(x) ^ ord(y)) for x, y in zip(s1, s2))
# Challenge 3:
def singleCharXOR(s1, c):
ans = str()
for char in binascii.unhexlify(s1):
ans += chr(c ^ char)
return ans
# Challenge 4?
def freqAnalysis(file):
wordFreq = dict()
with open(file) as f:
for line in f:
for word in line.split():
if word in wordFreq:
wordFreq[word.strip().strip("();,?.\"")] += 1
else:
wordFreq[word.strip().strip("();,?.\"")] = 1
words = list()
for word in wordFreq.keys():
if wordFreq[word] > 25: # tune this to whatever num you want
words.append(word.lower())
return words
# Challenge 5:
def repeatingXOR( plaintext, key ):
'''
plaintext = byte encoded string
key = byte encoded string
'''
crypt = str()
iKey = 0
for c in plaintext:
crypt += chr(c ^ key[iKey])
iKey = (iKey + 1) % len(key) # increase by 1, mod by keylen
return binascii.hexlify(crypt.encode())
# Challenge 8:
def ecbDetected(ciphertext):
'''
Returns true if there are repeating 16 byte chunks
Returns false otherwise.
ciphertext = text to check
'''
chunkedText = splitArray(ciphertext, 16)
repeatedBlocks = set()
for chunk in chunkedText:
repeatedBlocks.add(chunk)
return len(repeatedBlocks) < len(chunkedText)
# Garbage for challenge 6:
def getXorKeysize(ciphertext):
# Dict to store keysizes
keySizes = {}
for ks in range(2, 40):
normEditDist = float((hammingDistance(ciphertext[:ks].decode(), ciphertext[ks:ks+ks].decode())) / ks)
if normEditDist < smallestKeySize:
smallestKeySize = normEditDist
##def getXorKeysize2(ciphertext):
## blocks = splitArray(ciphertext, 2)
def bruteXorKey():
pass
def breakRepeatingKeyXOR():
pass
| [
"bedell.369@osu.edu"
] | bedell.369@osu.edu |
a9bd392f278b6f226a6bf011cbda24a2e6f9cf25 | f70b4ab08835d7bd59cb468b9a795fd73d1c9d46 | /accounts/urls.py | 2781dcd916847f2a9cc6b1eebf85e1f514faeece | [] | no_license | xxkaminaxx/XxRelix4U | c4638c7009ca4fd049a31d144acd407c061c36a2 | 56fe6f0bfd4e7076f1224c9851d28e1b4af28a4e | refs/heads/master | 2022-12-10T08:38:36.798928 | 2020-03-08T08:06:48 | 2020-03-08T08:06:48 | 239,625,285 | 0 | 1 | null | 2022-11-22T05:21:11 | 2020-02-10T22:18:30 | Python | UTF-8 | Python | false | false | 424 | py | from django.conf.urls import url, include
from accounts.views import logout, login, user_registration
from accounts import url_reset
# account related urls here then imported into main url.
urlpatterns = [
url(r'^logout/', logout, name="logout"),
url(r'^login/', login, name="login"),
url(r'^user_registration/', user_registration, name="user_registration"),
url(r'^password-reset/', include(url_reset))
]
| [
"kasolos@hotmail.co.uk"
] | kasolos@hotmail.co.uk |
9726a9fbf16f8881db41c4e2da04c76e619bcd5f | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPD_20210712183638.py | 3a1a9c53b9d9a89641bbb3d4af878f7d702672f8 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,856 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * (self.q / (self.beta * self.w_s) * cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
ar_corr = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1-self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k * (load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * (self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 100
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988], [0, 80, 1], [11300, 230, 0.948],
[11900, 230, 0.78], [3000, 100, 0.984], [0, 100, 0.984],
[3000, 200, 0.975], [7000, 230, 0.96]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
for k in range(3):
plt.figure(figsize=(12, 8))
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
if k == 0:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Gudmundsson_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Gudmundsson-Method}$ - Normalized to Sea Level')
elif k == 1:
problem1 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Mattingly_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Mattingly-Method}$ - Normalized to Sea Level')
else:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{with}$ $\bf{DP}$ - Normalized to Sea Level')
if i >= 5:
p_w[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w[i + m, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem1.allFuncs[i](problem1)
p_w[i + m, j] = problem2.allFuncs[i](problem2)
if i == 1:
l1a, = plt.plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
l1b, = plt.plot(p_w[i + m, :], np.linspace(0, 250, n), color=color[i], linestyle='--')
if k != 2:
l1 = plt.legend([l1a, l1b], ['with DP', 'without DP'], loc="upper right")
else:
l1 = plt.legend([l1a, l1b], ['Gudmundsson method', 'Mattingly method'], loc="upper right")
else:
plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
plt.plot(w_s, p_w[i + m, :], color=color[i], linestyle='--')
# p_w[1, :] = 200 / (p_w[1, -1] - p_w[1, 20]) * (w_s - p_w[1, 2])
def func(x, a, b, c, d, e):
return a + b*x + c*x**2 + d*x**3 + e*x**4
#return a * np.exp(b * x) + c
if i == 1 or i == 1+m :
xdata, ydata = p_w[i, :], np.linspace(0, 250, n)
popt, _ = curve_fit(func, xdata, ydata)
p_w[i, :] = func(w_s, popt[0], popt[1], popt[2], popt[3], popt[4])
#if k != 2:
# p_w[1 + m, :] = 10 ** 10 * (w_s - p_w[1 + m, 2])
#else:
# p_w[1 + m, :] = 200 / (p_w[1 + m, -1] - p_w[1 + m, 20]) * (w_s - p_w[1 + m, 2])
plt.fill_between(w_s, np.amax(p_w[0:m, :], axis=0), 200, color='b', alpha=0.25,
label=label[k])
plt.fill_between(w_s, np.amax(p_w[m:2 * m, :], axis=0), 200, color='r', alpha=0.25,
label=label[k + 3])
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
cb138abac8d2b7e034cc1403158c58a81f2da182 | 2785af72c4bd533914e7e9e32cf1a2da81e3270d | /resources/songstolink.py | d7bb58f33741ed988964558dbc18db9c025f5a16 | [] | no_license | zackmuc/MediaPortal | 6a6db838c597e9b0929e2d767953d7abacf2b2d5 | c97abc9d12fa5fdda8dea60aa55d7b499515b005 | refs/heads/master | 2021-01-16T22:07:54.689689 | 2013-06-24T16:54:59 | 2013-06-24T16:54:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | # -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.resources.imports import *
class SongstoLink:
def __init__(self, session):
print "SongstoLink:"
self.session = session
self._callback = None
self._errback = None
self._baseurl = "http://s.songs.to/data.php?id="
self.imgurl = ''
def getLink(self, cb_play, cb_err, sc_title, sc_artist, sc_album, token, imgurl):
self._callback = cb_play
self._errback = cb_err
self.imgurl = imgurl
if token != '':
scStream = self._baseurl+token
print "hash: ",token
self._callback(sc_title, scStream, sc_album, sc_artist, imgurl)
else:
title = urllib2.quote(sc_title.encode("utf8"))
artist = urllib2.quote(sc_artist.encode("utf8"))
url = "http://songs.to/json/songlist.php?quickplay=1"
dataPost = "data=%7B%22data%22%3A%5B%7B%22artist%22%3A%22"+artist+"%22%2C%20%22album%22%3A%22%22%2C%20%22title%22%3A%22"+title+"%22%7D%5D%7D"
getPage(url, method='POST', postdata=dataPost, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.scDataPost).addErrback(cb_err)
def scDataPost(self, data):
findSongs = re.findall('"hash":"(.*?)","title":"(.*?)","artist":"(.*?)","album":"(.*?)"', data)
found = False
if findSongs:
print findSongs
(scHash, scTitle, scArtist, scAlbum) = findSongs[0]
if scHash:
found = True
print "hash: ",scHash
scStream = self._baseurl+scHash
print scHash
self._callback(scTitle, scStream, scAlbum, scArtist, self.imgurl)
if not found:
self._errback('scHash not found!') | [
"kschmidt2007@googlemail.com"
] | kschmidt2007@googlemail.com |
923407afb596f24f329293846a992525aa6538e4 | 4ff9c71749de4a1311476159190184a5a994b1fa | /constant.py | c6f2fd1b705172d008a54bd220fc706d3560688b | [] | no_license | JayCax/CS-362-Portfolio-Project | a5c364c19dca915637b0b5268e64c0a78e29bd61 | 5a5d199c38bda2c03ef064c2fdae91c2835bd90f | refs/heads/main | 2023-01-27T11:40:59.861000 | 2020-12-10T08:07:28 | 2020-12-10T08:07:28 | 320,201,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | SECS_IN_DAY = 86400
| [
"noreply@github.com"
] | JayCax.noreply@github.com |
f2d88ede145a55a634404601a3248fdd20b69f0c | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/primaires/perso/stat.py | 376ea04b6dc9b0fedb3545626465b19dc441e1d2 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,954 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe Stat, détaillée plus bas."""
from abstraits.obase import BaseObj
from primaires.perso.exceptions.stat import *
# Flags :
NX = 0 # aucune exception ne sera levée
I0 = 1 # lève une exception si strictement inférieure à 0
IE0 = 2 # lève une exception si inférieure ou égale à 0
SM = 4 # lève une exception si strictement supérieure au MAX
SEM = 8 # lève une exception si supérieure ou égale au MAX
class Stat(BaseObj):
"""Cette classe définit une stat (ou caractéristique).
Les attributs d'une stat sont :
nom -- son nom
symbole -- son symbole (utile pour le prompt)
defaut -- sa valeur par défaut, celle donnée à un joueur à sa création
marge -- la marge maximale
max -- une chaîne de caractère représentant une autre stat
flags -- les flags indiquant quand une exception doit être levée
parent -- le parent hébergeant les stats
"""
_nom = "stat"
_version = 1
def __init__(self, nom, symbole, defaut, marge, max, flags=I0, parent=None):
"""Constructeur d'une stat.
Elle prend les mêmes paramètres que ceux passés dans l'ordre, dans
la configuration.
Voir : ./cfg_stats.py
"""
BaseObj.__init__(self)
self.nom = nom
self.symbole = symbole
self.defaut = defaut
self.marge_min = 0
self.marge_max = marge
self.nom_max = max
self.flags = flags
self.parent = parent
# Valeurs
self.__base = self.defaut
self.__variable = 0
self.__max = None
if self.parent and max:
self.__max = getattr(self.parent, "_{}".format(max))
self._construire()
def __getnewargs__(self):
return ("", "", "", 0, "")
def __repr__(self):
return "<stat {}={}>".format(self.nom, self.courante)
def __str__(self):
return "{}={} (base={}, variable={}, max={})".format(
self.nom, self.courante, self.base, self.variable, self.max)
@property
def base(self):
return self.__base
def _get_variable(self):
return self.__variable
def _set_variable(self, variable):
self.__variable = variable
variable = property(_get_variable, _set_variable)
@property
def max(self):
max = self.__max
if max:
max = max.courante
return max
def _get_courante(self):
return self.__base + self.__variable
def _set_courante(self, courante):
"""C'est dans cette propriété qu'on change la valeur courante
de la stat.
On passe par une méthode 'set' qui fait le travail.
"""
self.set(courante, self.flags)
courante = property(_get_courante, _set_courante)
def set(self, courante, flags):
"""Modifie la stat courante.
C'est dans cette méthode qu'on lève des exceptions en fonction des
valeurs modifiées.
NOTE IMPORTANTE: la valeur est modifiée quelque soit l'exception
levée. L'exception est levée pour réagir à un certain comportement
(par exemple, le joueur n'a plus de vitalité) mais elle n'empêchera
pas la stat d'être modifiée.
En revanche, on test bel et bien que la stat de base ne dépasse ni
le max ni la marge.
"""
base = courante - self.__variable
if self.parent and self.parent.parent and \
not self.parent.parent.est_immortel():
# Levée d'exceptions
if base < 0 and flags & I0:
self.__base = 0
raise StatI0
if base <= 0 and flags & IE0:
self.__base = 0
raise StatIE0
if self.max and flags & SM and base > self.max:
raise StatSM
if self.max and flags & SEM and base >= self.max:
raise StatSEM
if base > self.marge_max:
base = self.marge_max
if base < self.marge_min:
base = self.marge_min
if self.max and base > self.max:
base = self.max
if self.parent and self.parent.parent and \
self.parent.parent.est_immortel() and self.max:
base = self.max
self.__base = base
def __setattr__(self, nom, val):
BaseObj.__setattr__(self, nom, val)
| [
"kredh@free.fr"
] | kredh@free.fr |
eebaf1cc5939bf3397f44b7abae4b3301b3f9927 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-cc/huaweicloudsdkcc/v3/model/update_network_instance_request_body.py | ccb6925b1dca4650bfa9c81651ceef569cd52c3e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,407 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateNetworkInstanceRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'network_instance': 'UpdateNetworkInstance'
}
attribute_map = {
'network_instance': 'network_instance'
}
def __init__(self, network_instance=None):
"""UpdateNetworkInstanceRequestBody
The model defined in huaweicloud sdk
:param network_instance:
:type network_instance: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
self._network_instance = None
self.discriminator = None
self.network_instance = network_instance
@property
def network_instance(self):
"""Gets the network_instance of this UpdateNetworkInstanceRequestBody.
:return: The network_instance of this UpdateNetworkInstanceRequestBody.
:rtype: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
return self._network_instance
@network_instance.setter
def network_instance(self, network_instance):
"""Sets the network_instance of this UpdateNetworkInstanceRequestBody.
:param network_instance: The network_instance of this UpdateNetworkInstanceRequestBody.
:type network_instance: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
self._network_instance = network_instance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateNetworkInstanceRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
00a260febb6f5bdbad5f3e248f26b978211197fb | 115fa8e854120660b395180dbfe48461d824a7dd | /test_numba.py | f624eda17d63c6d47e8c3853111e30f4d2d87748 | [] | no_license | zafercavdar/python2c-compilers | 8a123105e0ee148e400dd6258e02950cca08f614 | a6cc7de2bb430566b170ec6ed41953faa537d498 | refs/heads/master | 2020-03-18T16:57:44.677608 | 2018-05-26T21:42:52 | 2018-05-26T21:42:52 | 134,996,222 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | from numba import jit
from numba.decorators import autojit
from time import time
from math import log10
import re
import numpy as np
regex = r"\s+"
def op(x):
return 2 * x - 1
def split_by_pattern(x):
return re.split(regex, x)
def read_file(path):
f = open(path, "r")
return [line for line in f]
def iterable(size):
return [i for i in range(0, size)]
def numba_apply(func, iterable):
def pure_python_call():
return [func(x) for x in iterable]
@jit
def numba_call():
return [func(x) for x in iterable]
s = time()
pure_python_call()
f = time()
py = f - s
s = time()
numba_call()
f = time()
nb = f - s
print("{} {} {}".format(log10(len(iterable)), py, nb))
def test_numba_vs_python_simple_op():
for x in range(0, 17):
size = pow(3, x)
iterables = iterable(size)
numba_apply(op, iterables)
def test_numba_vs_python_The_Idiot():
for k in range(1, 50):
dataset = read_file("The_Idiot.txt") * k
numba_apply(split_by_pattern, dataset)
def cos_sim_matrix(arr):
N = arr.shape[0]
dist = np.zeros((N, N))
for i, row in enumerate(arr):
for j, col in enumerate(arr):
dot_product = np.dot(row, col)
norm_a = np.linalg.norm(row)
norm_b = np.linalg.norm(col)
dist[i][j] = dot_product / (norm_a * norm_b)
return dist
cos_sim_matrix_numba = autojit(cos_sim_matrix)
def matrix_multiplication(m1, m2):
return m1 * m2
matrix_multiplication_numba = autojit(matrix_multiplication)
def matrix_multiplication_loops(m1, m2):
M = len(m1)
N = len(m1[0])
Q = len(m2[0])
res = np.zeros((M, Q))
sum = 0
for c in range(M):
for d in range(Q):
for k in range(N):
sum += m1[c][k] * m2[k][d]
res[c][d] = sum
sum = 0
return res
matrix_multiplication_loops_numba = autojit(matrix_multiplication_loops)
def matrix_substraction(m1, m2):
return m1 ** 5
matrix_substraction_numba = autojit(matrix_substraction)
def test_numba_with_numpy():
# N = 1000
D = 20
sizes = [x * 100 for x in range(1, 11)]
for size in sizes:
arr = np.random.rand(size, D)
s = time()
cos_sim_matrix(arr)
f = time()
temp = f - s
s = time()
cos_sim_matrix_numba(arr)
f = time()
print("numpy: {} numpy+numba: {}".format(temp, f - s))
def test_numba_matrix_multiplication():
Ns = [x * 100 for x in range(1, 18)]
compiler = np.random.rand(5, 5)
matrix_multiplication_numba(compiler, compiler)
for N in Ns:
m = np.matrix(np.random.rand(N, N))
s = time()
matrix_multiplication(m, m)
f = time()
temp = f - s
s = time()
matrix_multiplication_numba(m, m)
f = time()
print("numpy: {} numpy+numba: {}".format(temp, f - s))
def test_numba_matrix_multiplication_loops():
Ns = [x * 100 for x in range(1, 18)]
compiler = np.random.rand(5, 5)
matrix_multiplication_loops_numba(compiler, compiler)
for N in Ns:
m = np.random.rand(N, N)
s = time()
matrix_multiplication_loops(m, m)
f = time()
temp = f - s
s = time()
matrix_multiplication_loops_numba(m, m)
f = time()
print("python-loops: {} python-loops+numba: {}".format(temp, f - s))
def test_numba_matrix_substraction():
sizes = [x * 500 for x in range(1, 21)]
for size in sizes:
m = np.matrix(np.random.rand(size, size))
s = time()
matrix_substraction(m, m)
f = time()
temp = f - s
s = time()
matrix_substraction_numba(m, m)
f = time()
print("numpy: {} numpy+numba: {}".format(temp, f - s))
test_numba_matrix_multiplication_loops()
| [
"zafercavdar@yahoo.com"
] | zafercavdar@yahoo.com |
6ff299c2cc6c8b9893253d3ebe9d3ea491400c72 | 60be3894ad491bde502b8f6909a026ee115d952e | /aiosmb/network/tcp.py | 3da2e5cbc315e7cbcfde7804212c83c4942ef225 | [] | no_license | topotam/aiosmb | 7c97c6a9806c84a9fae28fa372cc6903fa6ec0c5 | e2ece67bbf380f576b154b09ea5fd63d9b4ecf4c | refs/heads/master | 2023-06-25T17:41:03.605226 | 2021-07-27T18:31:12 | 2021-07-27T18:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,676 | py | import enum
import asyncio
from aiosmb import logger
from aiosmb.commons.exceptions import *
class TCPSocket:
"""
Generic asynchronous TCP socket class, nothing SMB related.
Creates the connection and channels incoming/outgoing bytes via asynchonous queues.
"""
def __init__(self, socket = None, target = None):
self.settings = target
self.socket = socket #for future, if we want a custom soscket
self.reader = None
self.writer = None
self.out_queue = asyncio.Queue()
self.in_queue = asyncio.Queue()
self.disconnected = asyncio.Event()
self.incoming_task = None
self.outgoing_task = None
async def disconnect(self):
"""
Disconnects from the socket.
Stops the reader and writer streams.
"""
if self.disconnected.is_set():
return
if self.outgoing_task is not None:
self.outgoing_task.cancel()
if self.incoming_task is not None:
self.incoming_task.cancel()
if self.writer is not None:
try:
self.writer.close()
except:
pass
self.writer = None
self.reader = None
self.disconnected.set()
async def handle_incoming(self):
"""
Reads data bytes from the socket and dispatches it to the incoming queue
"""
try:
lasterror = None
msgsize = None
while not self.disconnected.is_set():
try:
data = await self.reader.readexactly(4)
msgsize = int.from_bytes(data[1:], byteorder='big', signed = False)
data = await self.reader.readexactly(msgsize)
await self.in_queue.put( (data, None) )
if data == b'':
return
except asyncio.CancelledError as e:
lasterror = e
break
except Exception as e:
logger.debug('[TCPSocket] handle_incoming %s' % str(e))
lasterror = e
break
except asyncio.CancelledError:
return
except Exception as e:
lasterror = e
finally:
if self.in_queue is not None:
await self.in_queue.put( (None, lasterror) )
await self.disconnect()
async def handle_outgoing(self):
"""
Reads data bytes from the outgoing queue and dispatches it to the socket
"""
try:
while not self.disconnected.is_set():
data = await self.out_queue.get()
self.writer.write(data)
await self.writer.drain()
except asyncio.CancelledError:
#the SMB connection is terminating
return
except Exception as e:
logger.exception('[TCPSocket] handle_outgoing %s' % str(e))
await self.disconnect()
#async def connect(self, settings): #not needed parameter because we have networkselector now...
async def connect(self):
"""
Main function to be called, connects to the target specified in settings, and starts reading/writing.
"""
#self.settings = settings
try:
con = asyncio.open_connection(self.settings.get_ip(), self.settings.get_port())
try:
self.reader, self.writer = await asyncio.wait_for(con, int(self.settings.timeout))
except asyncio.TimeoutError:
logger.debug('[TCPSocket] Connection timeout')
raise SMBConnectionTimeoutException('[TCPSocket] Connection timeout')
except ConnectionRefusedError:
logger.debug('[TCPSocket] Connection refused')
raise SMBConnectionRefusedException('[TCPSocket] Connection refused')
except asyncio.CancelledError:
#the SMB connection is terminating
raise asyncio.CancelledError
except Exception as e:
logger.debug('[TCPSocket] connect generic exception')
raise e
self.incoming_task = asyncio.create_task(self.handle_incoming())
self.outgoing_task = asyncio.create_task(self.handle_outgoing())
return True, None
except Exception as e:
try:
self.writer.close()
except:
pass
return False, e
| [
"info@skelsec.com"
] | info@skelsec.com |
88bb5ceff669f2aa39cc2b11b65ea78a81eef6ce | 1d230067a680871af1127f89cc23dda6901f02e3 | /python/cursoemvideo/ex095.py | 5e8568b9d4e0b1020d9da0c0b584dd403076a433 | [] | no_license | pedrocambui/exercicios_e_desafios | 981f47d8ef5b37eda5c637da76918c29f4a4d700 | 65eb86f232aad4abc7cc2e95897ca0d5bb72dcf2 | refs/heads/main | 2023-03-13T13:28:46.058769 | 2021-03-30T16:25:08 | 2021-03-30T16:25:08 | 353,064,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | jogador = {}
jogadores = list()
gols = list()
cont = 0
while True:
jogador.clear()
gols.clear()
jogador['nome'] = str(input('Nome do jogador: '))
partidas = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(0, partidas):
gols.append(int(input(f' Quantos gols na partida {c+1}? ')))
jogador['gols'] = gols[:]
jogador['total'] = sum(gols)
jogadores.append(jogador.copy())
while True:
continuar = str(input('Quer continuar? [S/N] ')).strip().lower()[0]
if continuar in 'sn':
break
print('ERRO! Responda apenas S ou N.')
if continuar == 'n':
break
print('-='*30)
print('cod', end='')
for i in jogador.keys():
print(f'{i:<15}', end='')
print()
for k, v in enumerate(jogadores):
print(f'{k:>3} ', end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print('-='*30)
while True:
lev = int(input('Mostrar dados de qual jogador?(999 para parar): '))
if lev == 999:
break
if lev > len(jogadores):
print(f'ERRO! Não existe jogador com o código {lev}!')
else:
print(f'-- LEVANTAMENTO DO JOGADOR {jogadores[lev]["nome"]} --')
for i, v in enumerate(jogadores[lev]['gols']):
print(f' No jogo {i+1} fez {v} gols.')
print('-='*30) | [
"pcambuicorrea@gmail.com"
] | pcambuicorrea@gmail.com |
f32fd1e50a5d38c4d0457aeee1a638d7bfa345c0 | f79efc86aa0f1ca5a5a840e2cea8abf4086ea603 | /FproPlay/Dictionaries/fight.py | 953da27c15da30a945c5c27f1df42e8ccf675d9a | [] | no_license | tiago-falves/FPRO-Python | 6987bc8aa93bd2fb9c1ff16f9c2142c92d776767 | 28d7b94314cd6403c5f525358ecdfda5f7dcc03a | refs/heads/master | 2020-05-28T07:06:16.316112 | 2019-06-29T20:33:01 | 2019-06-29T20:33:01 | 188,916,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | def fight(heroes,villain):
for i in heroes:
if i["category"]==villain["category"]:
if i["health"]>=villain["health"]:
return "{} defeated the villain and now has a score of {}".format(i["name"],i["score"]+1)
else:
villain["health"]=villain["health"]-i["health"]/2
return "{} prevailed with {}HP left".format(villain["name"],villain["health"]) | [
"tiago.falves98@gmail.com"
] | tiago.falves98@gmail.com |
a3092c3007fd7565350f5de588f2ca1ec176508d | d942f8a8cc99fa32b81783b2097b71fbee7981ed | /code/working_files/coldstart_pyspark_matmul.py | 49d1b53da026d6242546e4781a8b83d81414c1d8 | [] | no_license | jack-epstein/nyucds_1004_finalproject | 6cc50fe7785fa90d84f2e5cb30aed2d58510dc39 | e4430c48fb165248e52179dd770d4e900091b18d | refs/heads/main | 2023-05-04T07:46:27.862210 | 2021-05-18T22:43:24 | 2021-05-18T22:43:24 | 368,677,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Attempt to take in U and V' matrices and get a new R' matrix. Ultimately did not succeed
Usage:
$ spark-submit --driver-memory 8g --executor-memory 8g code/model/coldstart_pyspark_matmul.py hdfs:/user/jte2004/userFactors_r200 hdfs:/user/jte2004/itemFactors_r200_updated
'''
# Import packages
import sys
import pyspark.sql.functions as func
from pyspark.sql import SparkSession
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.mllib.evaluation import RankingMetrics
from pyspark.mllib.linalg import distributed
from pyspark.ml.recommendation import ALS
from pyspark.ml.feature import StringIndexer, OneHotEncoder
#from pyspark.mllib.recommendation import MatrixFactorizationModel
import time
def main(spark, file_path_users, file_path_items):
'''Main routine for Lab Solutions
Parameters
----------
spark : SparkSession object
file_path_users: parquet files with user latent factors hdfs:/user/jte2004/userFactors_r200
file_path_items: parquet files with item latent factors hdfs:/user/jte2004/itemFactors_r200_updated
'''
# Loads the parquet files
user_factors = spark.read.parquet(file_path_users)
item_factors = spark.read.parquet(file_path_items)
print('User Sample')
user_factors.printSchema()
user_factors.limit(3).show()
print('')
print('Item Sample')
item_factors.printSchema()
item_factors.limit(3).show()
print('')
"https://stackoverflow.com/questions/45789489/how-to-split-a-list-to-multiple-columns-in-pyspark"
'''#test2 = item_factors.select(item_factors.id, item_factors.features[0],item_factors.features[1],item_factors.features[2])
test2 = item_factors.select(item_factors.id, [item_factors.features[i] for i in range(200)])
print('new test')
test2.printSchema()
print('')'''
testU = user_factors.select(user_factors.id, user_factors.features[0],user_factors.features[1],user_factors.features[2])
testV = item_factors.select(item_factors.id, item_factors.features[0],item_factors.features[1],item_factors.features[2])
check = distributed.DistributedMatrix(testU)
#check = testU.multiply(testV.transpose())
print('matmul test')
print('rows in U',check.numRows())
print('')
# Only enter this block if we're in main
if __name__ == "__main__":
# Create the spark session object
spark = SparkSession.builder.appName('matmul').getOrCreate()
# Get file_path for dataset to analyze
file_path_users = sys.argv[1]
file_path_items = sys.argv[2]
main(spark, file_path_users, file_path_items)
| [
"jte2004@nyu.edu"
] | jte2004@nyu.edu |
a0dfa996b3c2096b9b715337c068974c637741c9 | 21907b665a7765b4ce65061efe7d149257940b06 | /flask/bin/migrate-repository | 4cde0f28b4f9a0e98edca683226fedc941925e4d | [] | no_license | 692309199/microblog | 40c1b9d8af8abf0265602ce9684c482cb87a4ddf | be148949e07b863e099e4802edd0a87d317817c6 | refs/heads/master | 2021-05-11T10:31:39.467201 | 2018-01-22T10:08:51 | 2018-01-22T10:08:51 | 118,103,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/opt/microblog/flask/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from migrate.versioning.migrate_repository import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"692309199@qq.com"
] | 692309199@qq.com | |
74c7bb5a685440da68dd5f60b5565430cafaa304 | c2ddea0be67445819b85559823bb7b2232b52727 | /venv/Scripts/pip3.7-script.py | a2dc6998de19444febefe5feaf8ff0fb84a7afa6 | [] | no_license | aaron6347/CPT212-Asn2 | eed93526bf5d02c0800d7f3dda12ce90bb74790a | 154741c220068aad29e8ceecc026793394f3f038 | refs/heads/master | 2022-10-08T01:09:23.756596 | 2020-06-01T11:45:59 | 2020-06-01T11:45:59 | 260,435,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!C:\Users\Aaron\PycharmProjects\CPT212-Asn2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"49943930+aaron6347@users.noreply.github.com"
] | 49943930+aaron6347@users.noreply.github.com |
82f974dda587b52858bedd38326563eaa89c4553 | d556c1d6d334a52624684a294a8e520742bf985d | /nutriplus_back/registration/app_settings.py | 3fdd918bf82d88b64b7434ea83b6b8c688c9b56b | [] | no_license | mouradfelipe/nutriplus | db5e5da81db32a54c08de207c938a58111ec2e5e | 994d3d01759ceb887f5ee39dc73209a43e7d9ce0 | refs/heads/master | 2021-06-19T09:00:50.776808 | 2020-03-07T12:08:17 | 2020-03-07T12:08:17 | 206,334,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | from django.conf import settings
from rest_framework.permissions import AllowAny
#from rest_auth.registration.serializers import RegisterSerializer as DefaultRegisterSerializer
from .serializers import RegisterSerializer as DefaultRegisterSerializer
from .utils import import_callable
serializers = getattr(settings, 'REST_AUT_REGISTER_SERIALIZERS', {})
RegisterSerializer = import_callable(
serializers.get('REGISTER_SERIALIZER', DefaultRegisterSerializer)
)
def register_permission_classes():
permission_classes = [AllowAny, ]
for klass in getattr(settings, 'REST_AUTH_REGISTER_PERMISSION_CLASSES', tuple()):
permission_classes.append(import_callable(klass))
return tuple(permission_classes) | [
"lucas.tnagel@gmail.com"
] | lucas.tnagel@gmail.com |
5585928a7e726d630fd8b22b45e5ffb5b9cd040a | b88b96d96c096da3d69858dfdfaaab5de002bdcf | /main.py | 2243c761f7da3588d991e11ec054feec66530593 | [] | no_license | BryanArmando/Ocurrencias-de-un-texto | 174fc2f8721d684f0dbeec662f1e267e40acf39f | e7c5bbf0815176101790be6d217149cce10a5de2 | refs/heads/master | 2023-06-02T08:58:59.290275 | 2021-06-17T02:04:14 | 2021-06-17T02:04:14 | 377,676,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import re
print("Ocurrencias de un fragmento de un libro\n")
archivo=open('Harry.txt',encoding='utf8')
linea=archivo.readline()
diccionario={}
while linea!="":
palabras=linea.split()
for i in range(len(palabras)):
palabra=re.sub('[?|.|!|\|/|;|:|,|”|“|(|)|«|»|-|_]','',palabras[i])
if palabra in diccionario:
diccionario[palabra]+=1
else:
diccionario.update({palabra:1})
linea=archivo.readline()
print("Palabras y repeticiones")
for x in diccionario:
print('({},{}) '.format(x, diccionario.get(x)))
archivo.close()
| [
"66704761+BryanArmando@users.noreply.github.com"
] | 66704761+BryanArmando@users.noreply.github.com |
39057d7f01b675b8b5992785673f879a433ca69a | e0234338c27428668ac180c6baf475a870dbe7f8 | /esp32_drone/python/tcp_test.py | 2b3cedc5c4ceebbc8e3c4d01f082a63243591d04 | [] | no_license | darcyg/esp32_projects | 9cc4245bb19f23a12a4a2b1399c221a004bfc415 | 4d008b9070a0664ba5de69cb39ba44b66ad178a2 | refs/heads/master | 2023-05-19T22:05:48.368668 | 2021-06-06T15:19:48 | 2021-06-06T15:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import socket
import struct
import asyncio
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('192.168.178.68', 3333)
print(f'starting up on {server_address[0]} port {server_address[1]}')
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print(f'connection from {client_address}')
# Receive the data in small chunks
while True:
data = connection.recv(16)
# expecting 4 floats (quaternions)
data_float = struct.unpack('<4f', data)
print(f'received: {data_float}')
# if data:
# connection.sendall(data)
# print('sending data back to the client')
# print('Enter throttle: ')
# message = input()
# print(f'You wrote: {message}')
# connection.sendall(message.encode())
# else:
# print(f'no more data from {client_address}')
# break
finally:
# Clean up the connection
connection.close()
| [
"dominik.fohrmann@gmail.com"
] | dominik.fohrmann@gmail.com |
c61c925a0e0f9a7e7cfb8f89789e3bb008228a43 | f55ea1cfc520e6e2ceda961e7400fe2de0ad4e69 | /src/rosegraphics.py | c2138ca1fdd9002c0686a7fb42372f18f779db0e | [
"MIT"
] | permissive | chenx16/05b-Exam1Practice | 231081cc1284ccdd27b1da57cd19bc656a648eb1 | 82dbe6a5677732820f32dbb8264c04a028a1a459 | refs/heads/master | 2020-04-09T20:15:08.721532 | 2018-12-12T19:01:54 | 2018-12-12T19:01:54 | 160,568,393 | 0 | 0 | null | 2018-12-05T19:31:56 | 2018-12-05T19:31:56 | null | UTF-8 | Python | false | false | 65,517 | py | """
rosegraphics.py - a simple Graphics library for Python.
Its key feature is:
-- USING this library provides a simple introduction to USING objects.
Other key features include:
-- It has a rich set of classes, methods and instance variables.
-- In addition to classes like Circles that are natural for
students, it has other kinds of classes like RoseWindow
and FortuneTeller to provide a richer set of examples
than "just" a graphics library.
-- It allows one to do a reasonable set of graphics operations
with reasonable efficiency. The API mimics Java's Shape API
for the most part.
-- It is built on top of tkinter and its extension ttk
(the standard graphics libraries that come with Python).
-- Unlike tkinter, it is NOT event-driven and hence can be used
before students see that paradigm. (There is a behind-the-scenes
facilty for listening for and responding to events,
for those who want to do so.)
-- It attempts to be as bullet-proof as possible, to make it easy
for beginners to use it. In particular, it attempts to provide
reasonable error messages when a student misuses the API.
-- It was inspired by zellegraphics but is a complete re-implemenation
that attempts to:
-- Be more bullet-proof.
-- Provide a richer set of examples for using objects.
-- Have an API that is more like Java's Shape API than tkinter's
(older) API.
-- While it can serve as an example for defining classes,
it is NOT intended to do so for beginners.
It is excellent for helping students learn to USE objects;
it is NOT perfect for helping students learn to WRITE CLASSES.
See the MAIN function below for typical examples of its use.
Authors: David Mutchler, Mark Hays, Michael Wollowswki, Matt Boutell,
Chandan Rupakheti, Claude Anderson and their colleagues,
with thanks to John Zelle for inspiration and hints.
First completed version: September 2014.
"""
# FIXME (errors):
# -- clone() does not really make a copy; it just makes a new one
# but without cloning all the attributes.
# -- _ShapeWithCenter claims that things like Ellipse are subclasses,
# but they are not at this point, I think. In general, need to
# deal with overlap between _ShapeWithCenter and _RectangularShape.
# KEEP both of them to have some classes have corner_1 and corner_2
# while others have center and ...
# FIXME (things that have yet to be implemented):
# -- Allow multiple canvasses.
# -- Better close_on ... ala zellegraphics.
# -- Keyboard.
# -- Better Mouse.
# -- Add type hints.
# -- Catch all Exceptions and react appropriately.
# -- Implement unimplemented classes.
# -- Add and allow FortuneTellers and other non-canvas classes.
import tkinter
from tkinter import font as tkinter_font
import time
import turtle
# ----------------------------------------------------------------------
# All the windows that are constructed during a run share the single
# _master_Tk (a tkinter.Tk object)
# as their common root. The first construction of a RoseWindow
# sets this _master_Tk to a Tkinter.Tk object.
# ----------------------------------------------------------------------
_master_Tk = None
# ----------------------------------------------------------------------
# At the risk of not being Pythonic, we provide a simple type-checking
# facility that attempts to provide meaningful error messages to
# students when they pass arguments that are not of the expected type.
# ----------------------------------------------------------------------
class WrongTypeException(Exception):
""" Not yet implemented. """
pass
def check_types(pairs):
""" Not yet implemented fully. """
for pair in pairs:
value = pair[0]
expected_type = pair[1]
if not isinstance(value, expected_type):
raise WrongTypeException(pair)
# ----------------------------------------------------------------------
# Serialization facility
# ----------------------------------------------------------------------
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
# Idea: dump all the stats on all shapes, return a sorted list for easy comparison.
# Problem: the order in which keys appear in dictionaries is random!
# Solution: sort keys and manually print
shapes = [shape.__dict__ for shape in self.initial_canvas.shapes]
keys_by_shape = [sorted(shape) for shape in shapes]
for k in range(len(shapes)):
shapes[k]['_method_for_drawing'] = None
shapes[k]['shape_id_by_canvas'] = None
result = []
for k in range(len(keys_by_shape)):
shape = shapes[k]
result.append([])
for key in keys_by_shape[k]:
result[-1].append(str(key) + ":" + str(shape[key]))
result[-1] = str(result[-1])
return "\n".join(sorted(result))
# ----------------------------------------------------------------------
# RoseWindow is the top-level object.
# It starts with a single RoseCanvas.
# ----------------------------------------------------------------------
class RoseWindow(object):
"""
A RoseWindow is a window that pops up when constructed.
It can have RoseWidgets on it and starts by default with
a single RoseCanvas upon which one can draw shapes.
To construct a RoseWindow, use:
- rg.RoseWindow()
or use any of its optional arguments, as in these examples:
window = rg.RoseWindow(400, 300) # 400 wide by 300 tall
window = rg.RoseWindow(400, 300, 'Funny window') # with a title
Instance variables include:
width: width of this window (in pixels)
height: width of this window (in pixels)
title: displayed on the window's bar
widgets: the things attached to this window
"""
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
"""
Pops up a tkinter.Toplevel window with (by default)
a RoseCanvas (and associated tkinter.Canvas) on it.
Arguments are:
-- width, height: dimensions of the window (in pixels).
-- title: title displayed on the windoww.
-- color: background color of the window
-- canvas_color: background color of the canvas
displayed on the window by default
-- make_initial_canvas:
-- If True, a default canvas is placed on the window.
-- Otherwise, no default canvas is placed on the window.
If this is the first RoseWindow constructed, then a
hidden Tk object is constructed to control the event loop.
Preconditions:
:type width: int
:type height: int
:type title: str
:type color: Color
:type canvas_color: Color
:type make_initial_canvas: bool
"""
# check_types([(width, (int, float)),
# (height, (int, float)),
# (title, (Color, str)
# --------------------------------------------------------------
# The _master_Tk controls the mainloop for ALL the RoseWindows.
# If this is the first RoseWindow constructed in this run,
# then construct the _master_Tk object.
# --------------------------------------------------------------
global _master_Tk
if not _master_Tk:
_master_Tk = tkinter.Tk()
_master_Tk.withdraw()
else:
time.sleep(0.1) # Helps the window appear on TOP of Eclipse
# --------------------------------------------------------------
# Has a tkinter.Toplevel, and a tkinter.Canvas on the Toplevel.
# --------------------------------------------------------------
self.toplevel = tkinter.Toplevel(_master_Tk,
background=color,
width=width, height=height)
self.toplevel.title(title)
self._is_closed = False
self.toplevel.protocol("WM_DELETE_WINDOW", self.close)
# FIXME: The next two need to be properties to have
# setting happen correctly. Really belongs to RoseCanvas.
# See comments elsewhere on this.
self.width = width
self.height = height
if make_initial_canvas:
self.initial_canvas = RoseCanvas(self, width, height,
canvas_color)
else:
self.initial_canvas = None
self.widgets = [self.initial_canvas]
# FIXME: Do any other tailoring of the toplevel as desired,
# e.g. borderwidth and style...
# --------------------------------------------------------------
# Catch mouse clicks and key presses.
# --------------------------------------------------------------
self.mouse = Mouse()
self.keyboard = Keyboard()
self.toplevel.bind('<Button>', self._on_mouse_click)
self.toplevel.bind('<KeyPress>', self._on_key_press)
self.update()
def close(self):
""" Closes this RoseWindow. """
if self.toplevel:
self.toplevel.destroy()
self.toplevel = None
self.update()
self._is_closed = True
def update(self):
"""
Checks for and handles events that has happened
in this RoseWindow (e.g. mouse clicks, drawing shapes).
"""
global _master_Tk
_master_Tk.update()
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to RoseCanvas objects associated with this RoseWindow, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
for widget in self.widgets:
if type(widget) == RoseCanvas:
widget.render()
self.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def close_on_mouse_click(self):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse anywhere in the window.
Then closes this RoseWindow.
Returns an rg.Point that specifies where the user clicked the mouse.
"""
message = 'To exit, click anywhere in this window'
click_position = self.continue_on_mouse_click(message=message,
close_it=True)
return click_position
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse, then erases the message.
Optional parameters let you:
-- Display a different message
-- Place the message at a different place in the window (xpos and ypos are as in Text)
-- Close the window after the mouse is clicked (and ignore the GraphicsError that results if the user instead chooses to click the X in the window)
-- NOT erase the message when done
"""
if self._is_closed:
return
if x_position is None:
x_position = self.width / 2
if y_position is None:
y_position = self.height - 20
anchor_point = Point(x_position, y_position)
text = Text(anchor_point, message)
# FIXME: Really should do all this on a per-RoseCanvas basis.
if self.initial_canvas:
text.attach_to(self.initial_canvas)
self.initial_canvas._renderShape(text, render_NOW=True)
click_position = self.get_next_mouse_click()
if erase_it and self.initial_canvas:
text.detach_from(self.initial_canvas)
if close_it:
self.close() # then close the window
return click_position
def get_next_mouse_click(self):
"""
Waits for the user to click in the window.
Then returns the rg.Point that represents the point where the user clicked.
Example:
If this method is called and then the user clicks near the upper-right corner of a 300 x 500 window,
this function would return something like rg.Point(295, 5).
"""
self.mouse.position = None
while True:
if self._is_closed:
return None
if self.mouse.position is not None:
break
self.update()
time.sleep(.05) # allow time for other events to be handled
click_point = self.mouse.position
self.mouse.position = None
return click_point
def _on_mouse_click(self, event):
self.mouse._update(event)
def _on_key_press(self, event):
self.keyboard._update(event)
# def add_canvas(self, width=None, height=None, background_color=0):
# FIXME: Set defaults based on the main canvas.
# new_canvas = RoseCanvas(self, background_color='white')
# self.widgets.append(new_canvas)
#
# _root.update()
def __serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class RoseWidget():
"""
A Widget is a thing that one can put on a Window,
e.g. a Canvas, FortuneTeller, etc.
"""
def __init__(self, window):
self._window = window
def get_window(self):
return self._window
class RoseCanvas(RoseWidget):
defaults = {'colors': [None, 'yellow', 'light blue', 'dark grey']}
count = 0
"""
A RoseCanvas is a RoseWidget (i.e., a thing on a RoseWindow)
upon which one can draw shapes and other Drawable things.
"""
def __init__(self, window, width=200, height=200,
background_color=0):
super().__init__(window)
RoseCanvas.count = RoseCanvas.count + 1
# FIXME: Deal with default background colors.
# FIXME: Store background color as a property
# so that modifying it changes the tkinter canvas.
# Ditto width and height.
# if background_color == 0:
# index = RoseCanvas.count % len(defaults['colors'])
# self.background_color = defaults['colors'][index]
# else:
# self.background_color = background_color
tk_canvas = tkinter.Canvas(window.toplevel,
width=width, height=height,
background=background_color)
self._tkinter_canvas = tk_canvas
# FIXME: Automate gridding better.
self._tkinter_canvas.grid(padx=5, pady=5)
self.shapes = []
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to this RoseCanvas, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
self._update_shapes()
self._window.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def _renderShape(self, shape, render_NOW=False):
"""Renders a shape."""
coordinates = shape._get_coordinates_for_drawing()
options = shape._get_options_for_drawing()
if shape.shape_id_by_canvas[self] is None:
shape.shape_id_by_canvas[self] = \
shape._method_for_drawing(self._tkinter_canvas, *coordinates)
try:
self._tkinter_canvas.coords(shape.shape_id_by_canvas[self],
*coordinates)
except tkinter.TclError:
msg = 'Could not place the shape\n'
msg += 'on the given window.\n'
msg += 'Did you accidentally close a window\n'
msg += 'that later needed to be rendered again?'
raise Exception(msg) from None
self._tkinter_canvas.itemconfigure(shape.shape_id_by_canvas[self],
options)
if render_NOW:
# redraw NOW
self._window.update()
def _draw(self, shape):
"""Queues a shape for being drawn. Does NOT draw it just yet."""
shapeInList = False
for listShape in self.shapes:
if listShape is shape:
shapeInList = True
break
if not shapeInList:
shape.shape_id_by_canvas[self] = None
self.shapes.append(shape)
def _undraw(self, shape):
if shape in self.shapes:
for i in range(len(self.shapes)):
if self.shapes[i] is shape:
self._tkinter_canvas.delete(shape.shape_id_by_canvas[self])
del self.shapes[i]
break
def _update_shapes(self):
for shape in self.shapes:
self._renderShape(shape)
class Mouse(object):
def __init__(self):
self.position = None
def _update(self, event):
self.position = Point(event.x, event.y)
class Keyboard(object):
def __init__(self):
self.key_pressed = None
def _update(self, event):
pass
class __FreezeClass__ (type):
"""Prevents class variable assignment."""
def __setattr__(self, name, _ignored): # last parameter is the value
err = "You tried to set the instance variable '" + name + "'\n"
err += " on the CLASS '" + self.__name__ + "'"
err += ", which is not an OBJECT.\n"
err += " Did you forget the () after the word "
err += self.__name__ + ",\n"
err += " on the line where you constructed the object?"
raise SyntaxError(err)
class _Shape(object, metaclass=__FreezeClass__):
"""
A Shape is a thing that can be drawn on a RoseCanvas
(which itself draws on a tkinter Canvas).
Its constructor provides the tkinter method to be used to
draw this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image, Line, Path, Polygon,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: None.
Public methods: attach_to.
"""
def __init__(self, method_for_drawing):
""" Arguments:
-- the tkinter method for drawing the Shape.
"""
self._method_for_drawing = method_for_drawing
self.shape_id_by_canvas = {}
def __eq__(self, other):
"""
Two Shape objects are equal (==) if all their attributes
are equal to each other.
"""
# check before we go deleting keys that may or may not exist
if(not isinstance(other, self.__class__)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
del self_dict["shape_id_by_canvas"]
del other_dict["shape_id_by_canvas"]
return (self_dict == other_dict)
def __ne__(self, other):
return not self.__eq__(other)
def attach_to(self, window_or_canvas):
"""
'draws' this Shape. More precisely:
Attaches this Shape to the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered, this shape
will appear on that RoseWindow/RoseCanvas.
"""
if isinstance(window_or_canvas, RoseWindow):
window_or_canvas = window_or_canvas.initial_canvas
window_or_canvas._draw(self)
def detach_from(self, rose_canvas):
"""
'undraws' this Shape. More precisely:
Detaches this Shape from the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered,
this shape will no longer appear
on that RoseWindow/RoseCanvas.
"""
if type(rose_canvas) == RoseWindow:
rose_canvas = rose_canvas.initial_canvas
rose_canvas._undraw(self)
class _ShapeWithOutline(object):
"""
A Shape that has an interior (which can be filled with a color)
and an outline (which has a color and thickness).
This abstract type has concrete subclasses that include:
Arc, Circle, Ellipse, Image, Line, Path,
Polygon, Rectangle, Square, Text and Window.
Public data attributes: fill_color, outline_color, outline_thickness.
Public methods: _initialize_options.
"""
defaults = {'fill_color': None,
'outline_color': 'black',
'outline_thickness': 1}
def _initialize_options(self):
self.fill_color = _ShapeWithOutline.defaults['fill_color']
self.outline_color = _ShapeWithOutline.defaults['outline_color']
self.outline_thickness = _ShapeWithOutline.defaults[
'outline_thickness']
def _get_options_for_drawing(self):
options = {'fill': self.fill_color,
'outline': self.outline_color,
'width': self.outline_thickness}
# If a color is None, that means transparent here:
for option in ('fill', 'outline'):
if not options[option]:
options[option] = ''
return options
class _ShapeWithThickness(object):
"""
A Shape that can be (and almost always is) filled with a color
and has a thickness but no outline.
This abstract type has concrete subclasses that include:
Line and Path.
Public data attributes: color, thickness.
Public methods: _initialize_options.
"""
defaults = {'color': 'black',
'thickness': 1,
'arrow': None}
def _initialize_options(self):
self.color = _ShapeWithThickness.defaults['color']
self.thickness = _ShapeWithThickness.defaults['thickness']
self.arrow = _ShapeWithThickness.defaults['arrow']
def _get_options_for_drawing(self):
options = {'fill': self.color,
'width': self.thickness,
'arrow': self.arrow}
# If a color is None, that means 'black' here:
if options['fill'] is None:
options['fill'] = 'black'
return options
class _ShapeWithText(object):
"""
A Shape that has text and a font for displaying that text.
This abstract type has concrete subclasses that include:
Text.
Public data attributes: font_family, font_size,
is_bold, is_italic, is_underline, is_overstrike.
Public methods: _initialize_options.
"""
# FIXME: Add more to the above docstring.
defaults = {'font_family': 'helvetica',
'font_size': 14,
'weight': 'normal',
'slant': 'roman',
'underline': 0,
'overstrike': 0,
'justify': tkinter.CENTER,
'text_box_width': None,
'text_color': 'black',
'text': ''}
def _initialize_options(self):
self.font_family = _ShapeWithText.defaults['font_family']
self.font_size = _ShapeWithText.defaults['font_size']
self.is_bold = _ShapeWithText.defaults['weight'] == 'bold'
self.is_italic = _ShapeWithText.defaults['slant'] == 'italic'
self.is_underline = _ShapeWithText.defaults['underline'] == 1
self.is_overstrike = _ShapeWithText.defaults['overstrike'] == 1
self.justify = _ShapeWithText.defaults['justify']
self.text_box_width = _ShapeWithText.defaults['text_box_width']
self.text_color = _ShapeWithText.defaults['text_color']
self.text = _ShapeWithText.defaults['text']
def _get_options_for_drawing(self):
weight = 'bold' if self.is_bold else 'normal'
slant = 'italic' if self.is_italic else 'roman'
underline = 1 if self.is_underline else 0
overstrike = 1 if self.is_overstrike else 0
font = tkinter_font.Font(family=self.font_family,
size=self.font_size,
weight=weight,
slant=slant,
underline=underline,
overstrike=overstrike)
options = {'font': font,
'justify': self.justify,
'fill': self.text_color,
'text': self.text}
if self.text_box_width:
options['width'] = self.text_box_width
return options
class _ShapeWithCenter(_Shape):
"""
A Shape that has a center (and for which moving its center
moves the entire Shape). Its constructor provides the center
of the Shape along with its method for drawing this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: center.
Public methods: move_by, move_center_to.
"""
def __init__(self, center, method_for_drawing):
"""
Arguments:
-- the Point that is the center of the Shape
(the Shape stores a CLONE of that Point)
-- the tkinter method for drawing the Shape.
"""
# Clone the center argument, so that if the caller
# mutates the argument, it does NOT affect this Shape.
super().__init__(method_for_drawing)
self.center = center.clone()
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.center.move_by(dx, dy)
def move_center_to(self, x, y):
"""
Moves this _Shape's center to (x, y),
thus translating the entire Shape
by however much its center moved.
:type x: float
:type y: float
"""
self.center.move_to(x, y)
class _RectangularShape(_Shape):
"""
A _Shape determined by its rectangular bounding box (plus possibly
other information).
Concrete sub-classes include: rg.Ellipse, rg.Rectangle.
Examples:
These all assume that the variable shape is a _RectangularShape
(e.g. an rg.Ellipse or a rg.Rectangle):
The methods in these examples all return rg.Point objects that are
copies of a corner/center of the _RectangularShape:
ul = shape.get_upper_left_corner()
ur = shape.get_upper_right_corner()
ll = shape.get_lower_left_corner()
lr = shape.get_lower_right_corner()
center = shape.get_center()
The methods in these examples return a positive number:
h = shape.get_height()
w = shape.get_width()
The method in this example returns an rg.Rectangle that encloses
this _RectangularShape:
bbox = shape.get_bounding_box()
This example moves this _RectangularShape right 100 and up 50:
shape.move_by(100, -50)
This example does the same thing another way:
shape.corner_1 = shape.corner_1 + 100
shape.corner_2 = shape.corner_2 - 50
"""
def __init__(self, corner_1, corner_2, method_for_drawing):
"""
:type corner_1: Point
:type corner_2: Point
:type method_for_drawing: callable(int, int, int, int) -> int
"""
super().__init__(method_for_drawing)
self.corner_1 = corner_1.clone()
self.corner_2 = corner_2.clone()
self._update_corners()
def __repr__(self):
""" Returns a string representation of this shape. """
f_string = ''
f_string += '{}: corner_1=({}, {}), corner_2=({}, {}),'
f_string += ' fill_color={},'
f_string += ' outline_color={}, outline_thickness={}.'
return f_string.format(self.__class__.__name__,
self.corner_1.x, self.corner_1.y,
self.corner_2.x, self.corner_2.y,
self.fill_color, self.outline_color,
self.outline_thickness)
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.corner_1.x += dx
self.corner_1.y += dy
self.corner_2.x += dx
self.corner_2.y += dy
def clone(self):
"""
Returns a copy of this _RectangularShape.
"""
return self.__class__(self.corner_1.clone(),
self.corner_2.clone())
def get_upper_left_corner(self):
"""
Returns a copy of the ** upper-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_left_corner
def get_lower_left_corner(self):
"""
Returns a copy of the ** lower-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_left_corner
def get_upper_right_corner(self):
"""
Returns a copy of the ** upper-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_right_corner
def get_lower_right_corner(self):
"""
Returns a copy of the ** lower-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_right_corner
def get_center(self):
"""
Returns a copy of the ** center ** of this _RectanglarShape.
The returned value is an rg.Point.
"""
return Point((self.corner_1.x + self.corner_2.x) / 2,
(self.corner_1.y + self.corner_2.y) / 2)
def get_height(self):
"""
Returns the height (i.e., the size in
the y-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.y - self.corner_2.y)
def get_width(self):
"""
Returns the width (i.e., the size in
the x-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.x - self.corner_2.x)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this _RectangularShape.
"""
return Rectangle(self.corner_1, self.corner_2)
def _update_corners(self):
min_x = min(self.corner_1.x, self.corner_2.x)
min_y = min(self.corner_1.y, self.corner_2.y)
max_x = max(self.corner_1.x, self.corner_2.x)
max_y = max(self.corner_1.y, self.corner_2.y)
self._upper_left_corner = Point(min_x, min_y)
self._upper_right_corner = Point(max_x, min_y)
self._lower_left_corner = Point(min_x, max_y)
self._lower_right_corner = Point(max_x, max_y)
def _get_coordinates_for_drawing(self):
return [self.get_upper_left_corner().x,
self.get_upper_left_corner().y,
self.get_lower_right_corner().x,
self.get_lower_right_corner().y]
class Arc(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Bitmap(_Shape):
""" Not yet implemented. """
class Circle(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an circle.
To construct a Circle, use:
- rg.Circle(center, radius)
where center is an rg.Point object
and radius is a positive integer.
For example:
- rg.Circle(rg.Point(100, 75), 30)
specifies the circle whose center
is at (100, 75) and whose radius is 30.
Instance variables include:
center: An rg.Point that specifies
the center of the Circle.
radius: The radius of the Circle.
fill_color:
The Circle is filled with this color.
Example: circle.fill_color = 'green'
outline_color:
The outline of the Circle is this color.
Example: circle.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Circle.
Examples:
circle = rg.Circle(rg.Point(100, 75), 30)
print(circle.center, circle.radius)
circle.fill_color = 'blue'
circle.outline_color = 'black'
circle.outline_thickness = 5
window = rg.RoseWindow()
circle.attach_to(window)
circle.move_center_to(300, 200)
circle.move_by(-50, 60)
# Another way to move the Circle:
x = circle.center.x
y = circle.center.y
circle.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, radius):
"""
:type center: rg.Point
:type radius: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The radius is also stored in an instance variable:
self.radius = radius
def __repr__(self):
""" Returns a string representation of this Circle. """
f_string = ''
f_string += 'Circle: center=({}, {}), radius={}, fill_color={}, '
f_string += 'outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.radius,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Circle. """
return Circle(self.center, self.radius)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this Circle.
"""
c1 = Point(self.center.x - self.radius,
self.center.y - self.radius)
c2 = Point(self.center.x + self.radius,
self.center.y + self.radius)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Ellipse(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is an ellipse (aka oval).
To construct an Ellipse, use:
- rg.Ellipse(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the imaginery rectangle that
encloses the Ellipse.
For example:
- rg.Ellipse(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the ellipse whose imaginery
rectangle that encloses the ellipse:
- has upper-left corner (100, 50) and
- lower-right corner(300, 200).
Another example:
- rg.Ellipse(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same ellipse.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the imaginery rectangle
that encloses the Ellipse.
corner_2: An rg.Point that specifies an
opposite corner of the imaginery rectangle
that encloses the Ellipse.
fill_color:
The Ellipse is filled with this color.
Example: ellipse.fill_color = 'green'
outline_color:
The outline of the Ellipse is this color.
Example: ellipse.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Ellipse.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
ellipse = rg.Rectangle(p1, p2)
print(ellipse.corner_1, ellipse.corner_2)
ellipse.fill_color = 'blue'
ellipse.outline_color = 'black'
ellipse.outline_thickness = 5
window = rg.RoseWindow()
ellipse.attach_to(window)
ellipse.move_to(300, 200)
ellipse.move_by(-50, 60)
# Another way to move the Ellipse:
ellipse.corner_1 = rect.corner_1 - 50
ellipse.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = ellipse.get_upper_left_corner()
ur = ellipse.get_upper_right_corner()
ll = ellipse.get_lower_left_corner()
lr = ellipse.get_lower_right_corner()
center = ellipse.get_center()
# To get the width/height (always positive):
h = ellipse.get_height()
w = ellipse.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
class Line(_Shape, _ShapeWithThickness):
"""
A Shape that is a line segment.
To construct a Line, use:
- rg.Line(start, end)
where start and end are rg.Point objects
that specify the endpoints of the Line.
For example:
- rg.Line(rg.Point(100, 50),
- rg.Point(200, 30)
specifies the Line that starts at (100, 50)
and ends at (200, 30).
Another example:
- rg.Line(rg.Point(200, 30),
- rg.Point(100, 50)
specifies the Line that is the same as the
previous example except that the start and
end points are reversed. This is important
if the Line's "arrow" type is not None.
Instance variables include:
start:
The rg.Point that is one end of the Line.
end:
The rg.Point that is the other end of the Line.
color: The Line is drawn with this color.
thickness: The thickness (in pixels) of the Line.
arrow: Specifies whether or not the Line
is drawn as an arrow. Possible values are:
- None draw the Line without arrow-heads
- 'first' draw an arrow-head at the start
- 'last' draw an arrow-head at the end
- 'both' draw an arrow-head at both
For example, if my_line is a Line, then
- my_line.arrow = 'last'
makes the Line be drawn as an arrow
from its start point to its end point.
Examples:
start = rg.Point(100, 50)
end = rg.Point(200, 30)
line = rg.Line(start, end)
line.color = 'blue'
line.thickness = 3
line.arrow = 'both' # A double-sided arrow
line.arrow = None # Just a line (no arrow)
line.arrow = 'first' # Arrow from end to start
line.arrow = 'last' # Arrow from start to end
window = rg.RoseWindow()
line.attach_to(window)
line.move_by(-50, 60)
"""
def __init__(self, start, end):
"""
:type start: rg.Point
:type end: rg.Point
"""
super().__init__(tkinter.Canvas.create_line)
# The following sets default values for:
# self.color
# self.thickness
# self.arrow
super()._initialize_options()
# The other instance variables are the endpoints:
self.start = start.clone()
self.end = end.clone()
def __repr__(self):
""" Returns a string representation of this Line. """
f_string = ''
f_string += 'Line: start=({}, {}), end=({}, {}), color={}, '
f_string += 'thickness={}, arrow={}.'
return f_string.format(self.start.x, self.start.y,
self.end.x, self.end.y,
self.color, self.thickness, self.arrow)
def clone(self):
""" Returns a copy of this Line. """
return Line(self.start, self.end)
def move_by(self, dx, dy):
"""
Moves both endpoints of this Line
(and hence the entire Line as well)
to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Line.
:type dx: float
:type dy: float
"""
self.start.move_by(dx, dy)
self.end.move_by(dx, dy)
def get_midpoint(self):
"""
Returns an rg.Point at the midpoint (center) of this Line.
"""
return Point((self.start.x + self.end.x) / 2,
(self.start.y + self.end.y) / 2)
def _get_coordinates_for_drawing(self):
return [self.start.x,
self.start.y,
self.end.x,
self.end.y]
class Path(_Shape, _ShapeWithThickness):
""" Not yet implemented. """
class Point(_Shape, _ShapeWithOutline):
"""
A Shape that is a point in two-dimensional space.
It is drawn as a small circle (dot).
To construct a Point, use:
- rg.Point(x, y)
where x and y are the Point's coordinates.
For example:
- rg.Point(100, 50)
specifies the point whose x value is 100
and whose y value is 50.
Instance variables include the following:
x: The x-coordinate of the Point.
y: The y-coordinate of the Point.
fill_color:
The Point is filled with this color.
Note that a Point is drawn as a small, filled
circle, which is why it has a fill_color, etc.
Example: p.fill_color = 'green'
outline_color:
The outline of the Point is this color.
Example: p.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Point.
Examples:
p = rg.Point(100, 50)
print(p.x, p.y)
window = rg.RoseWindow()
p.attach_to(window)
p.move_to(300, 200)
p.move_by(-50, 60)
# Another way to move the Point:
p.x = p.x - 50
p.y = p.y + 60
p.fill_color = 'blue'
p.outline_color = 'black'
p.outline_thickness = 1
"""
defaults = {'width_for_drawing': 5,
'height_for_drawing': 5,
'fill_color': 'black',
'outline_color': 'black',
'outline_thickness': 1}
def __init__(self, x, y):
"""
:type x: float
:type y: float
"""
super().__init__(tkinter.Canvas.create_oval)
self.fill_color = Point.defaults['fill_color']
self.outline_color = Point.defaults['outline_color']
self.outline_thickness = Point.defaults['outline_thickness']
self.x = x
self.y = y
self.width_for_drawing = Point.defaults['width_for_drawing']
self.height_for_drawing = Point.defaults['height_for_drawing']
def __repr__(self):
""" Returns a string representation of this Point. """
return 'Point({:.1f}, {:.1f})'.format(self.x, self.y)
def clone(self):
""" Returns a copy of this Point. """
return Point(self.x, self.y)
def move_by(self, dx, dy):
"""
Moves this Point to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Point.
:type dx: float
:type dy: float
"""
self.x = self.x + dx
self.y = self.y + dy
def move_to(self, x, y):
"""
Moves this Point to (x, y).
Does NOT return a value; instead, it mutates this Point.
:type x: float
:type y: float
"""
self.x = x
self.y = y
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses
this Point (viewing it as a dot).
"""
c1 = Point(self.x - self.width_for_drawing / 2,
self.y - self.width_for_drawing / 2)
c2 = Point(self.x + self.height_for_drawing / 2,
self.y + self.height_for_drawing / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Polygon(_Shape, _ShapeWithOutline):
""" Not yet implemented. """
class Rectangle(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is a rectangle.
To construct a Rectangle, use:
- rg.Rectangle(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the rectangle.
For example:
- rg.Rectangle(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the rectangle:
- whose upper-left corner is (100, 50) and
- whose lower-right corner is (300, 200).
Another example:
- rg.Rectangle(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same rectangle.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the Rectangle.
corner_2: An rg.Point that specifies
an opposite corner of the Rectangle.
fill_color:
The Rectangle is filled with this color.
Example: rect.fill_color = 'green'
outline_color:
The outline of the Rectangle is this color.
Example: rect.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Rectangle.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
rect = rg.Rectangle(p1, p2)
print(rect.corner_1, rect.corner_2)
rect.fill_color = 'blue'
rect.outline_color = 'black'
rect.outline_thickness = 5
window = rg.RoseWindow()
rect.attach_to(window)
rect.move_to(300, 200)
rect.move_by(-50, 60)
# Another way to move the Rectangle:
rect.corner_1 = rect.corner_1 - 50
rect.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = rect.get_upper_left_corner()
ur = rect.get_upper_right_corner()
ll = rect.get_lower_left_corner()
lr = rect.get_lower_right_corner()
center = rect.get_center()
# To get the width/height (always positive):
h = rect.get_height()
w = rect.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
def get_bounding_box(self):
"""
Returns a new rg.Rectangle with the same corners as this one.
"""
return self.clone()
class RoundedRectangle(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Square(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an square.
To construct a Square, use:
- rg.Square(center, length_of_each_side)
where center is an rg.Point object
and length_of_each_side is a positive integer.
For example:
- rg.Square(rg.Point(100, 75), 60)
specifies the square whose center
is at (100, 75) and whose length of
each side is 60. Its corners are at:
(70, 35), (70, 105), (130, 35), (130, 105).
Instance variables include:
center: An rg.Point that specifies
the center of the Square.
radius: The length of each side of the Square.
fill_color:
The Square is filled with this color.
Example: square.fill_color = 'green'
outline_color:
The outline of the Square is this color.
Example: square.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Square.
Examples:
square = rg.Square(rg.Point(100, 75), 60)
print(square.center, square.length_of_each_side)
square.fill_color = 'blue'
square.outline_color = 'black'
square.outline_thickness = 5
window = rg.RoseWindow()
square.attach_to(window)
square.move_center_to(300, 200)
square.move_by(-50, 60)
# Another way to move the Square:
x = square.center.x
y = square.center.y
square.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, length_of_each_side):
"""
:type center: rg.Point
:type length_of_each_side: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The length of each side is also stored in an instance variable
self.length_of_each_side = length_of_each_side
def __repr__(self):
""" Returns a string representation of this Square. """
f_string = ''
f_string += 'Square: center=({}, {}), side-lengths={}, '
f_string += 'fill_color={}, outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.length_of_each_side,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Square. """
return Square(self.center, self.length_of_each_side)
def get_bounding_box(self):
"""
Returns a rg.Rectangle with the same corners as this Square.
"""
c1 = Point(self.center.x - self.length_of_each_side / 2,
self.center.y - self.length_of_each_side / 2)
c2 = Point(self.center.x + self.length_of_each_side / 2,
self.center.y + self.length_of_each_side / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Text(_ShapeWithCenter, _ShapeWithText):
"""
A Shape that has a string of text on it, displayed horizontally.
Its constructor specifies the rg.Point at which the text
is centered and the string that is to be displayed.
Public data attributes: center (an rg.Point),
font_size (an integer, 5 to 80 or so are reasonable values),
is_bold (True if the text is to be displayed in BOLD, else False),
is_italic (True or False),
is_underline (True or False),
is _overstrike (True or False),
text_color (color used to display the text, default is 'black')
text (the string to be displayed).
Public methods: attach_to, move_by, move_center_to.
"""
def __init__(self, center, text):
"""
The first argument must be a rg.Point.
The second argument must be a string.
When this Text object is rendered on a window,
the string (2nd argument) is drawn horizontally on the window,
centered at the rg.Point that is the 1st argument.
Preconditions:
:type center: rg.Point
:type text str
"""
super().__init__(center, tkinter.Canvas.create_text)
super()._initialize_options()
self.text = text
# FIXME: Allow __init__ to set the options.
def __repr__(self):
return "Text displaying '{}' at position {}".format(self.text,
self.center)
# FIXME: Have repr include characteristics??
# FIXME: Do a clone?
# def clone(self):
# return Square(self.center, self.length_of_each_side)
# def get_bounding_box(self):
# return Rectangle(self.center,
# 2 * self.length_of_each_side,
# 2 * self.length_of_each_side)
# FIXME: Implement bounding_box using the tkinter function for it.
def _get_coordinates_for_drawing(self):
return [self.center.x, self.center.y]
# Mark: Window/RoseWindow naming collision is causing mass confusion.
# class Window(_Shape):
# """ Not yet implemented. """
# default_options = {}
# CONSIDER: Are these right for here?
class Button(_Shape):
""" Not yet implemented. """
default_options = {}
class Entry(_Shape):
""" Not yet implemented. """
default_options = {}
class Color(object):
"""
A Color represents a fill or outline color created from custom
amounts of red, green, and blue light. The arguments are:
- The RED component (0-255),
- the GREEN component (0-255),
- the BLUE component (0-255).
This Color can be passed to RoseGraphics colors
such as fill_color and outline_color.
"""
def __init__(self, red, green=None, blue=None):
self.red = red
self.green = green
self.blue = blue
def __repr__(self):
return "#{:02x}{:02x}{:02x}".format(self.red, self.green, self.blue)
# begin STUB code for testing
class _RoseWindowStub(RoseWindow):
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
canvas_color = "white" # FIXME
self._is_closed = False
self.width = width
self.height = height
self.initial_canvas = _RoseCanvasStub(
self, width, height, canvas_color)
def render(self, seconds_to_pause=None):
pass
def get_next_mouse_click(self):
return Point(0, 0)
def close_on_mouse_click(self):
return None
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
return None
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class _RoseCanvasStub(RoseCanvas):
def __init__(self, window, width, height, canvas_color):
# super().__init__(window, width, height, canvas_color)
# canvases.append(self)
self.shapes = []
def _draw(self, shape):
# super()._draw(shape)
self.shapes.append(shape)
def render(self, seconds_to_pause=None):
# super().render() # don't pause
pass
class TurtleWindow(object):
def __init__(self):
self._screen = turtle.Screen()
turtle.Turtle._screen = self._screen
def close_on_mouse_click(self):
self._screen.exitonclick()
# We may need the statement:
# turtle.TurtleScreen._RUNNING = True
# in case we open a subsequent TurtleWindow during this run.
# The turtle library seems not to allow for that possibility
# (it uses a CLASS variable _RUNNING where I would have expected
# an INSTANCE variable).
# The next statement appeared to have a visible effect
# (something flashed) but nothing worse. At time time
# it is commented-out, since we need only a single TurtleWindow.
# turtle.TurtleScreen._RUNNING = True
def delay(self, milliseconds=None):
self._screen.delay(milliseconds)
def tracer(self, n=None, delay=None):
self._screen.tracer(n, delay)
class ShapesWindow(RoseWindow):
pass
class SimpleTurtle(object):
"""
A SimpleTurtle is a Turtle with restricted (simpler) functionality.
It can move forward/backward (units are pixels), turn (spin)
left/right (units are degrees), and more.
To construct a SimpleTurtle, use:
rg.SimpleTurtle(shape)
where shape is OPTIONAL and can be any of: 'turtle'
'arrow' 'classic' 'square' 'circle' 'triangle' 'blank'
Instance variables include:
speed: An integer from 1 (slowest) to 10 (fastest) that
determines how fast the SimpleTurtle moves.
pen: an rg.Pen object (see example below) that determines
the color and thickness of the line
that the SimpleTurtle draws when moving
paint_bucket: an rg.PaintBucket object (see example below)
that determines the color with which the SimpleTurtle
"fills" shapes indicated by using the begin_fill and
end_fill methods.
Examples:
natacha = rg.SimpleTurtle()
natacha.forward(100)
boris = rg.SimpleTurtle('turtle')
boris.speed = 8
boris.pen = rg.Pen('blue', 5) # blue line 5 pixels thick
boris.paint_bucket = rg.PaintBucket('red')
# Moves with pen down, then with pen up, then with pen down again:
boris.left(90)
boris.forward(-300)
boris.pen_up()
boris.go_to(rg.Point(100, -50)
boris.pen_down()
boris.backward(75)
# Moves with the enclosed space "filled" with the paint_bucket
boris.begin_fill()
... movements ...
boris.end_fill()
"""
def __init__(self, shape='classic'):
"""
What comes in:
A turtle.Shape that determines how the Turtle looks. Defaults to
a Bitmap of the "classic" Turtle (an arrowhead) from early Turtle Graphics.
Side effects: Constructs and stores in self._turtle the "real" Turtle
to do all the work on behalf of this SimpleTurtle. This (purposely)
restricts what this SimpleTurtle knows and can do.
:type shape: str
"""
self.speed = 1
self.pen = Pen('black', 1)
self.paint_bucket = PaintBucket('black')
self._turtle = turtle.Turtle(shape)
self._update_real_turtle()
def forward(self, distance):
"""
Makes this SimpleTurtle go forward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.forward(200)
"""
self._update_real_turtle()
self._turtle.forward(distance)
def backward(self, distance):
"""
Makes this SimpleTurtle go backward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.backward(200)
"""
self._update_real_turtle()
self._turtle.backward(distance)
def left(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) left the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.left(45)
"""
self._update_real_turtle()
self._turtle.left(angle)
def right(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) right the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.right(45)
"""
self._update_real_turtle()
self._turtle.right(angle)
def go_to(self, point):
"""
Makes this SimpleTurtle go to the given rg.Point.
(0, 0) is at the center of the window.
Example (assuming sally is an rg.SimpleTurtle):
sally.go_to(rg.Point(100, -50))
"""
self._update_real_turtle()
self._turtle.goto(point.x, point.y)
def draw_circle(self, radius):
"""
Makes this SimpleTurtle draw a circle with the given radius.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_circle(40)
"""
self._update_real_turtle()
self._turtle.circle(radius)
def draw_square(self, length_of_sides):
"""
Makes this SimpleTurtle draw a square with the given value
for the length of each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_square(100)
"""
for _ in range(4):
self.forward(length_of_sides)
self.left(90)
def draw_regular_polygon(self, number_of_sides, length_of_sides):
"""
Makes this SimpleTurtle draw a regular polygon with the given
number of sides and the given length for each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_polygon(8, 75) # octogon
sally.draw_polygon(3, 75) # triangle
"""
for _ in range(number_of_sides):
self.forward(length_of_sides)
self.left(360 / number_of_sides)
def pen_up(self):
"""
Lifts up this SimpleTurtle's pen. Subsequent movements
will NOT draw a line (until pen_down is called).
Example (assuming sally is an rg.SimpleTurtle):
sally.pen_up()
"""
self._update_real_turtle()
self._turtle.penup()
def pen_down(self):
"""
Puts down this SimpleTurtle's pen. Subsequent movements
WILL draw a line using this SimpleTurtle's pen (until pen_up
is called). Example (assuming sally is an rg.SimpleTurtle):
sally.pen_down()
"""
self._update_real_turtle()
self._turtle.pendown()
def x_cor(self):
"""
Returns the x-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
x = sally.x_cor()
"""
return self._turtle.xcor()
def y_cor(self):
"""
Returns the y-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
y = sally.y_cor()
"""
return self._turtle.ycor()
def begin_fill(self):
"""
Begins "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.begin_fill()
def end_fill(self):
"""
Completes "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.end_fill()
def clear(self):
""" Not yet implemented. """
def clone(self):
""" Not yet implemented. """
pass
def write_text(self):
""" Not yet implemented. """
pass
def _update_real_turtle(self):
self._turtle.pencolor(self.pen.color)
self._turtle.pensize(self.pen.thickness)
self._turtle.fillcolor(self.paint_bucket.color)
self._turtle.speed(self.speed)
class Pen(object):
"""
A Pen has a color and thickness.
SimpleTurtles use a Pen for drawing lines.
To construct a Pen, use:
rg.Pen(color, thickness)
where color is a color (e.g. 'red')
and thickness is a small positive integer.
Instance variables are:
color: The color of the Pen
thickness: The thickness of the Pen
Examples:
thick_blue = rg.Pen('blue', 14)
thin_red = rg.Pen('red', 1)
"""
def __init__(self, color, thickness):
self.thickness = thickness
self.color = color
class PaintBucket(object):
"""
A PaintBucket has a color.
SimpleTurtles use a PaintBucket for filling shapes with color.
To construct a PaintBucket, use:
rg.PaintBucket(color)
where color is a color (e.g. 'red').
Instance variables are:
color: The color of the PaintBucket
Example:
paint = rg.PaintBucket('green')
"""
def __init__(self, color):
self.color = color
| [
"mutchler@rose-hulman.edu"
] | mutchler@rose-hulman.edu |
b563a862cc39ee319fc9a4a292dee5f03239fcdc | 4dd95ff4e685ad4937651eef199171a58563b23d | /venv/Lib/site-packages/truedata_ws/websocket/internal_tests.py | 1d890a276b8cf3f0a4030c600ed43968494f07e9 | [] | no_license | webclinic017/Stock-test1 | 8a7dff8408679ddb3e732a8dd449138236127814 | 27e212da22d5b2610a581bdfbf9dc49a325abb1d | refs/heads/master | 2023-02-09T23:43:13.687509 | 2021-01-02T15:02:58 | 2021-01-02T15:02:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | from .base_tests import get_connection, run_live_tests, run_historical_tests
from .defaults import DEFAULT_LIVE_PORT, DEFAULT_HIST_PORT, DEFAULT_TEST_SYMBOL_LIST
from colorama import Style, Fore
def run_all_tests(user_a, pass_a, user_b, pass_b, live_port, historical_port, symbols_to_test):
if live_port is None:
print(f'{Style.BRIGHT}{Fore.BLUE}Setting to default live port = {DEFAULT_LIVE_PORT}{Style.RESET_ALL}')
live_port = DEFAULT_LIVE_PORT
else:
print(f'{Style.BRIGHT}{Fore.BLUE}Using given live port = {live_port}{Style.RESET_ALL}')
if historical_port is None:
print(f'{Style.BRIGHT}{Fore.BLUE}Setting to default hist port = {DEFAULT_HIST_PORT}{Style.RESET_ALL}')
historical_port = DEFAULT_HIST_PORT
else:
print(f'{Style.BRIGHT}{Fore.BLUE}Using given historical port = {historical_port}{Style.RESET_ALL}')
if symbols_to_test is None:
print(f'{Style.BRIGHT}{Fore.BLUE}Setting to default symbol list = {DEFAULT_TEST_SYMBOL_LIST}{Style.RESET_ALL}')
symbols_to_test = DEFAULT_TEST_SYMBOL_LIST
else:
print(f'{Style.BRIGHT}{Fore.BLUE}Using given symbol list = {symbols_to_test}{Style.RESET_ALL}')
td_obj_with_bidask = get_connection(user_a, pass_a, live_port, historical_port)
td_obj_without_bidask = get_connection(user_b, pass_b, live_port, historical_port)
run_historical_tests(td_obj_with_bidask, symbols=symbols_to_test)
run_historical_tests(td_obj_without_bidask, symbols=symbols_to_test)
| [
"shayak1@gmail.com"
] | shayak1@gmail.com |
ec9d27e688222690aed28a30726f4cc759f5e9af | 9dfae7205fa6edc5ddff876eee08a34b83e37830 | /Exercícios/ex057.py | 2f79bcf133682f7722bab1f5beb4dc24d12b182b | [
"MIT"
] | permissive | rbpope/AulasPythonMundo2 | 1ccb49c8e762cda0085e4d92e6b1fcd13bc08115 | 1120d8e6df37fb54e2f113bb3e2b929781915ba7 | refs/heads/main | 2023-05-13T11:53:32.594884 | 2021-06-10T13:46:08 | 2021-06-10T13:46:08 | 304,985,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | gen = str(input('Qual o seu gênero? [M/F] ').strip().upper()[0])
while gen not in 'MF':
gen = str(input('Opção inválida, tente novamente: ').strip().upper()[0])
if gen == 'M':
print('Você informou o genero masculino.')
if gen == 'F':
print('Você informou o genero feminino.')
| [
"renato@pope.med.br"
] | renato@pope.med.br |
482928edaa2e06cd3b7bed4f4eaec7daf1bdda60 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/esphome/domain_data.py | 01f0a4d6b1369b6f6908d943c821bb3805e59e57 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 2,885 | py | """Support for esphome domain data."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TypeVar, cast
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from .entry_data import RuntimeEntryData
STORAGE_VERSION = 1
DOMAIN = "esphome"
_DomainDataSelfT = TypeVar("_DomainDataSelfT", bound="DomainData")
@dataclass
class DomainData:
"""Define a class that stores global esphome data in hass.data[DOMAIN]."""
_entry_datas: dict[str, RuntimeEntryData] = field(default_factory=dict)
_stores: dict[str, Store] = field(default_factory=dict)
_entry_by_unique_id: dict[str, ConfigEntry] = field(default_factory=dict)
def get_by_unique_id(self, unique_id: str) -> ConfigEntry:
"""Get the config entry by its unique ID."""
return self._entry_by_unique_id[unique_id]
def get_entry_data(self, entry: ConfigEntry) -> RuntimeEntryData:
"""Return the runtime entry data associated with this config entry.
Raises KeyError if the entry isn't loaded yet.
"""
return self._entry_datas[entry.entry_id]
def set_entry_data(self, entry: ConfigEntry, entry_data: RuntimeEntryData) -> None:
"""Set the runtime entry data associated with this config entry."""
if entry.entry_id in self._entry_datas:
raise ValueError("Entry data for this entry is already set")
self._entry_datas[entry.entry_id] = entry_data
if entry.unique_id:
self._entry_by_unique_id[entry.unique_id] = entry
def pop_entry_data(self, entry: ConfigEntry) -> RuntimeEntryData:
"""Pop the runtime entry data instance associated with this config entry."""
if entry.unique_id:
del self._entry_by_unique_id[entry.unique_id]
return self._entry_datas.pop(entry.entry_id)
def is_entry_loaded(self, entry: ConfigEntry) -> bool:
"""Check whether the given entry is loaded."""
return entry.entry_id in self._entry_datas
def get_or_create_store(self, hass: HomeAssistant, entry: ConfigEntry) -> Store:
"""Get or create a Store instance for the given config entry."""
return self._stores.setdefault(
entry.entry_id,
Store(
hass, STORAGE_VERSION, f"esphome.{entry.entry_id}", encoder=JSONEncoder
),
)
@classmethod
def get(cls: type[_DomainDataSelfT], hass: HomeAssistant) -> _DomainDataSelfT:
"""Get the global DomainData instance stored in hass.data."""
# Don't use setdefault - this is a hot code path
if DOMAIN in hass.data:
return cast(_DomainDataSelfT, hass.data[DOMAIN])
ret = hass.data[DOMAIN] = cls()
return ret
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
a1963e2438b34787fe6b7416956480d82b70424c | 9aed3c36c72133860001c4578de003245ea00170 | /zui/messager.py | e26e8ca8260e4c06b2f0c93f6d3f0936c5554ba3 | [] | no_license | seanh/PandaZUI | 78ffe263b4b4b83488c07521be131d53b1575c58 | 7365ec19185273d2fa7bd52387c43a70dd8abb92 | refs/heads/master | 2020-04-01T09:31:12.888689 | 2009-05-16T14:49:13 | 2009-05-16T14:49:13 | 202,614 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,610 | py | """
messager.py -- a simple message-passing pattern for one-many or many-many
dependencies. Useful for event notifications, for example.
To send a message use the singleton messager instance:
from messager import messager
messager.send('message name',argument)
You can pass a single argument with a message, and this argument can be anything
you like. For example, event objects that simply hold a number of attributes can
be constrcuted and passed as arguments with messages.
Messager maintains a mapping of message names to lists of functions (and their
arguments). When a message is sent, all of the functions subscribed to that
message are called and passed the argument given when the function was
subscribed followed by argument given when the message was sent. To subscribe a
function you must subclass Receiver and call the accept(...) or acceptOnce(...)
methods:
self.accept('message name',function,argument)
self.acceptOnce('message name',function,argument)
You don't need to call Receiver.__init__() when you subclass Receiver, it has no
__init__. Receiver works by maintaining a list of the message subscriptions you
have made.
It is up to you to make sure that functions that accept messages take the right
number of arguments, 0, 1 or 2 depending on whether the accept(...) and
send(...) methods were called with an argument or not.
To unsubscribe a function from a message name use ignore:
# Unsubscribe a particular function from a particular message name.
self.ignore('message name',function)
# Unsubscribe all functions that this object has subscribed to a particular
# message name.
self.ignore('message name')
# Unsubscribe all functions that this object has subscribed to any message
# name.
self.ignoreAll()
You can unsubscribe all functions from all Receiver objects with:
messager.clear()
If you do `messager.verbose = True` the messager will print whenever it
receives a message or subscription, and if you do `print messager` the messager
will print out a list of all the registered message names and their subscribers.
One last thing to be aware of is that messager keeps references to (functions
of) all objects that subscribe to accept messages. For an object to be deleted
it must unsubscribe all of its functions from all messages (the ignoreAll()
method will do this).
"""
class Messager:
"""Singleton messager object."""
def __init__(self):
"""Initialise the dictionary mapping message names to lists of receiver
functions."""
self.receivers = {}
self.one_time_receivers = {}
self.verbose = False
def send(self,name,sender_arg='no arg'):
"""Send a message with the given name and the given argument. All
functions registered as receivers of this message name will be
called."""
if self.verbose:
print 'Sending message',name
if self.receivers.has_key(name):
for receiver in self.receivers[name]:
args = []
if receiver['arg'] != 'no arg':
args.append(receiver['arg'])
if sender_arg != 'no arg':
args.append(sender_arg)
receiver['function'](*args)
if self.verbose:
print ' received by',receiver['function']
if self.one_time_receivers.has_key(name):
for receiver in self.one_time_receivers[name]:
args = []
if receiver['arg'] != 'no arg':
args.append(receiver['arg'])
if sender_arg != 'no arg':
args.append(sender_arg)
receiver['function'](*args)
if self.verbose:
print ' received by',receiver['function']
del self.one_time_receivers[name]
def _accept(self,name,function,arg='no arg'):
"""Register with the messager to receive messages with the given name,
messager will call the given function to notify of a message. The arg
object given to accept will be passed to the given function first,
followed by the arg object given to send by the sender object."""
if not self.receivers.has_key(name):
self.receivers[name] = []
self.receivers[name].append({'function':function,'arg':arg})
if self.verbose:
print '',function,'subscribed to event',name,'with arg',arg
def _acceptOnce(self,name,function,arg=None):
"""Register to receive the next instance only of a message with the
given name."""
if not self.one_time_receivers.has_key(name):
self.one_time_receivers[name] = []
self.one_time_receivers[name].append({'function':function,'arg':arg})
if self.verbose:
print '',function,'subscribed to event',name,'with arg',arg,'once only'
def _ignore(self,name,function):
"""Unregister the given function from the given message name."""
if self.receivers.has_key(name):
# FIXME: Could use a fancy list comprehension here.
temp = []
for receiver in self.receivers[name]:
if receiver['function'] != function:
temp.append(receiver)
self.receivers[name] = temp
if self.one_time_receivers.has_key(name):
temp = []
for receiver in self.one_time_receivers[name]:
if receiver['function'] != function:
temp.append(receiver)
self.one_time_receivers[name] = temp
if self.verbose:
print '',function,'unsubscribed from',name
def clear(self):
"""Clear all subscriptions with the messager."""
self.receivers = {}
self.one_time_receivers = {}
def __str__(self):
"""Return a string showing which functions are registered with
which event names, useful for debugging."""
string = 'Receivers:\n'
string += self.receivers.__str__() + '\n'
string += 'One time receivers:\n'
string += self.one_time_receivers.__str__()
return string
# Create the single instance of Messager.
messager = Messager()
class Receiver:
"""A class to inherit if you want to register with the messager to receive
messages. You don't have to inherit this to register for messages, you can
just call messager directly, but this class maintains a list of your message
subscriptions and provides a handy ignoreAll() method, and an enhanced
ignore(...) method."""
def accept(self,name,function,arg='no arg'):
# We initialise subscriptions when we first need it, to avoid having an
# __init__ method that subclasses would need to call.
if not hasattr(self,'subscriptions'):
self.subscriptions = []
messager._accept(name,function,arg)
self.subscriptions.append((name,function))
def acceptOnce(self,name,function,arg='no arg'):
if not hasattr(self,'subscriptions'):
self.subscriptions = []
messager._acceptOnce(name,function,arg)
self.subscriptions.append((name,function))
def ignore(self,*args):
if not hasattr(self,'subscriptions'):
return
if len(args) == 1:
name = args[0]
function = None
elif len(args) == 2:
name,function = args
else:
raise Exception('Wrong number of arguments to Receiver.ignore')
if function is None:
# Remove all of this object's function subscriptions to the given
# message name.
temp = []
for subscription in self.subscriptions:
n,f = subscription
if n == name:
messager._ignore(n,f)
else:
temp.append(subscription)
self.subscriptions = temp
else:
# Remove the single subscription (name,function)
messager._ignore(name,function)
self.subscriptions.remove((name,function))
def ignoreAll(self):
if not hasattr(self,'subscriptions'):
return
for subscription in self.subscriptions:
messager._ignore(*subscription)
self.subscriptions = [] | [
"seanh@sdf.lonestar.org"
] | seanh@sdf.lonestar.org |
5a1ed0cd70c637628613bcdc2591471ce0eebf24 | b3c8678c1db0b3e256de97e560d7d4d26c1dd6eb | /src/jpl.mcl.site.sciencedata/src/jpl/mcl/site/sciencedata/testing.py | 4325448b2a43549da50926c2e47a4028a8f43d2d | [
"GPL-2.0-only",
"Apache-2.0",
"GPL-1.0-or-later"
] | permissive | MCLConsortium/mcl-site | e4a127235504e7ac5575ef3d73c8fd1bdf02824b | 5eb9c16a7fe322192a03461a9f22ecb8c17307fd | refs/heads/master | 2021-06-09T23:49:35.775652 | 2021-04-30T22:59:59 | 2021-04-30T22:59:59 | 49,965,919 | 1 | 0 | Apache-2.0 | 2020-08-25T15:58:27 | 2016-01-19T16:24:58 | Python | UTF-8 | Python | false | false | 1,896 | py | # encoding: utf-8
from plone.app.testing import PloneSandboxLayer, IntegrationTesting, FunctionalTesting, PLONE_FIXTURE
from . import PACKAGE_NAME
import pkg_resources, urllib2, urllib, httplib, plone.api
class TestSchemeHandler(urllib2.BaseHandler):
u'''A special URL handler for the testing-only scheme ``testscheme``.'''
def testscheme_open(self, req):
try:
selector = req.get_selector()
path = 'tests/data/' + selector.split('/')[-1] + '.json'
if pkg_resources.resource_exists(PACKAGE_NAME, path):
return urllib.addinfourl(
pkg_resources.resource_stream(PACKAGE_NAME, path),
httplib.HTTPMessage(open('/dev/null')),
req.get_full_url(),
200
)
else:
raise urllib2.URLError('Not found')
except Exception:
raise urllib2.URLError('Not found')
class JPLMCLSiteSciencedataLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import jpl.mcl.site.sciencedata
self.loadZCML(package=jpl.mcl.site.sciencedata)
urllib2.install_opener(urllib2.build_opener(TestSchemeHandler))
def setUpPloneSite(self, portal):
wfTool = plone.api.portal.get_tool('portal_workflow')
wfTool.setDefaultChain('plone_workflow')
self.applyProfile(portal, 'jpl.mcl.site.sciencedata:default')
JPL_MCL_SITE_SCIENCEDATA_FIXTURE = JPLMCLSiteSciencedataLayer()
JPL_MCL_SITE_SCIENCEDATA_INTEGRATION_TESTING = IntegrationTesting(
bases=(JPL_MCL_SITE_SCIENCEDATA_FIXTURE,),
name='JPLMCLSiteSciencedataLayer:IntegrationTesting'
)
JPL_MCL_SITE_SCIENCEDATA_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(JPL_MCL_SITE_SCIENCEDATA_FIXTURE,),
name='JPLMCLSiteSciencedataLayer:FunctionalTesting'
)
| [
"kelly@seankelly.biz"
] | kelly@seankelly.biz |
7a460b6195a2009a5f4250eefc0f10ed3be48dac | 8f4317101fa4b1a86ffd2fc416c51b2e09e6bc9c | /Exercise IULI/Exercise 6 ( Template Matching )/face template matching.py | 62a38f4fef0ad706ea900032a7670a03865faf12 | [] | no_license | Jphartogi/OpenCV_Uni | 90e5234d523aa92f157f25b34a46867e333707a8 | 0e28426e90c2a1d20a31ef0c44f982ff047c449c | refs/heads/master | 2020-05-18T18:19:05.032737 | 2019-05-02T13:02:49 | 2019-05-02T13:02:49 | 184,580,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 13:44:38 2018
@author: MSI
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while(True):
# Capture frame by frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
template = cv2.imread('face.jpg',0)
w,h = template.shape[::-1]
res = cv2.matchTemplate(gray,template, cv2.TM_CCOEFF_NORMED)
threshold = 0.4
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(frame,pt,(pt[0]+w,pt[1]+h),(0,255,200),2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow('detected',frame)
cap.release()
cv2.destroyAllWindows()
| [
"joshuaphartogi@gmail.com"
] | joshuaphartogi@gmail.com |
1c2efb6403268bce7bdbf32ec39453b0052157f8 | 94cac43dd648921a14e8ed022df1e86da3c0a413 | /高级数据类型/hm_11_字典的其他操作.py | de057130c4764d65530fdb8b508d143ee2238a1a | [] | no_license | HAOWEM97/Python_learning | a0642d091e724ec1b6778795587c96d18beab834 | 64ef7141e7c003004c47e80bcac2ced5f446d53c | refs/heads/master | 2022-07-16T19:50:11.175501 | 2020-05-17T19:48:59 | 2020-05-17T19:48:59 | 264,741,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | xiaoming_dict = {"name":"小明",
"age":18}
# 1.统计健值对数量
print(len(xiaoming_dict))
# 2.合并字典
temp_dict = {"gender": True,
"height": 1.75,
"age":19}
xiaoming_dict.update(temp_dict)
# 3.清空字典
xiaoming_dict.clear()
print(xiaoming_dict) | [
"noreply@github.com"
] | HAOWEM97.noreply@github.com |
d555da4df0ff92fad94428138c04e5725366861c | 47542e6b98c19592f44ce44297771c698d4987f7 | /ch09/09_08.py | 42a496a958555522889a656ddd4e96b4f567131b | [
"Apache-2.0"
] | permissive | sharebook-kr/book-cryptocurrency | 235b6998668265ec804451afddd245a52824f51a | 847ba97ba096c257b35f5e507cd33fa6a0724860 | refs/heads/master | 2022-12-14T05:24:52.765589 | 2022-11-30T01:35:08 | 2022-11-30T01:35:08 | 128,632,349 | 162 | 141 | Apache-2.0 | 2022-11-30T01:35:09 | 2018-04-08T11:05:17 | Python | UTF-8 | Python | false | false | 301 | py | import websockets
import asyncio
async def bithumb_ws_client():
uri = "wss://pubwss.bithumb.com/pub/ws"
async with websockets.connect(uri) as websocket:
greeting = await websocket.recv()
print(greeting)
async def main():
await bithumb_ws_client()
asyncio.run(main()) | [
"brayden.jo@outlook.com"
] | brayden.jo@outlook.com |
5606d5e545c1fac58c09a834fd3e0717b1e9cf54 | 7f57d9317c44a7b003fa312dff6f521b2f5eaeb1 | /043.py | d95c522700b72af416782c80801e8f364243152e | [] | no_license | concreted/project-euler | ee85e5cd13d2a94df638f1d0894404be24f1007e | cedd1c766bbd6ff8b2abb50734cf2aebeeb2e3f7 | refs/heads/master | 2016-09-06T05:02:09.903064 | 2014-08-19T18:44:56 | 2014-08-19T18:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | """
Project Euler Problem 43
========================
The number, 1406357289, is a 0 to 9 pandigital number because it is made
up of each of the digits 0 to 9 in some order, but it also has a rather
interesting sub-string divisibility property.
Let d[1] be the 1st digit, d[2] be the 2nd digit, and so on. In this
way, we note the following:
* d[2]d[3]d[4]=406 is divisible by 2
* d[3]d[4]d[5]=063 is divisible by 3
* d[4]d[5]d[6]=635 is divisible by 5
* d[5]d[6]d[7]=357 is divisible by 7
* d[6]d[7]d[8]=572 is divisible by 11
* d[7]d[8]d[9]=728 is divisible by 13
* d[8]d[9]d[10]=289 is divisible by 17
Find the sum of all 0 to 9 pandigital numbers with this property.
"""
def permutations(digits):
if len(digits) == 1:
return [digits]
else:
results = []
for i in range(len(digits)):
subperms = permutations(digits[:i] + digits[i+1:])
for s in subperms:
results.append([digits[i]] + s)
return results
def property(digits):
return int(''.join(digits[1:4])) % 2 == 0 and int(''.join(digits[2:5])) % 3 == 0 and int(''.join(digits[3:6])) % 5 == 0 and int(''.join(digits[4:7])) % 7 == 0 and int(''.join(digits[5:8])) % 11 == 0 and int(''.join(digits[6:9])) % 13 == 0 and int(''.join(digits[7:10])) % 17 == 0
perms = permutations(['0','1','2','3','4','5','6','7','8','9'])
perms = [int(''.join(perm)) for perm in perms if property(perm)]
print sum(perms)
| [
"arichuang@gmail.com"
] | arichuang@gmail.com |
fb6392d17610ef3bd463e5e9c2c3d3564776dbff | 40c9545a2e3b33051522d39804dedadd2f870282 | /whatsapp_with_python.py | ce6aa30a8f891d1349fdb4a6b5a355d0cc60dab1 | [] | no_license | kavyasri-anupa/voice_assistant | 8cacd03b66f22b41c0d41897610122af17e5d95b | 81e0935f77b5ce5d25230532e06eb333f7f1349a | refs/heads/master | 2020-09-01T12:47:22.055550 | 2019-11-01T14:05:55 | 2019-11-01T14:05:55 | 218,960,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
import pyttsx3
import speech_recognition as sr
import pyaudio
r1=sr.Recognizer()
r2=sr.Recognizer()
r3=sr.Recognizer()
r4=sr.Recognizer()
def whatsapp_msg():
driver = webdriver.Chrome()
speech=pyttsx3.init()
driver.get("https://web.whatsapp.com/")
speech.say('scan qr')
speech.runAndWait()
time.sleep(10)
speech.say('to whom should i send message')
speech.runAndWait()
with sr.Microphone() as source:
audio=r1.listen(source)
try:
name=r1.recognize_google(audio)
print(name)
except sr.UnknownValueError:
speech.say("couldn't recognize ur voice")
speech.runAndWait()
print("couldn't recognize ur voice")
speech.say('what should i say')
speech.runAndWait()
with sr.Microphone() as source:
audio=r2.listen(source)
try:
msg=r1.recognize_google(audio)
print(msg)
except sr.UnknownValueError:
speech.say("couldn't recognize ur voice")
speech.runAndWait()
print("couldn't recognize ur voice")
speech.say("should i repeat")
speech.runAndWait()
user=driver.find_element_by_xpath('//span[contains(@title,'+'"'+name+'"'+')]')
wait=WebDriverWait(driver,20)
user.click()
with sr.Microphone() as source:
audio=r3.listen(source)
try:
cmnd=r3.recognize_google(audio)
print(cmnd)
if 'yes' in cmnd:
speech.say('enter how many times should i repeat?')
speech.runAndWait()
count= int(input())
msg_box=driver.find_element(By.XPATH,'//*[@id="main"]/footer/div[1]/div[2]/div/div[2]')
wait=WebDriverWait(driver,20)
msg_box.click()
for i in range(count):
msg_box.send_keys(msg)
button=driver.find_element(By.XPATH,'//*[@id="main"]/footer/div[1]/div[3]/button/span')
wait=WebDriverWait(driver,20)
button.click()
speech.say('message sent')
speech.runAndWait()
elif 'no' in cmnd:
msg_box=driver.find_element(By.XPATH,'//*[@id="main"]/footer/div[1]/div[2]/div/div[2]')
wait=WebDriverWait(driver,20)
msg_box.click()
msg_box.send_keys(msg)
button=driver.find_element(By.XPATH,'//*[@id="main"]/footer/div[1]/div[3]/button/span')
wait=WebDriverWait(driver,20)
button.click()
speech.say('message sent')
speech.runAndWait()
except sr.UnknownValueError:
speech.say("couldn't recognize ur voice")
speech.runAndWait()
print("couldn't recognize ur voice")
#//*[@id="pane-side"]/div[1]/div/div/div[10]/div/div/div[2]/div[1]/div[1]/span/span
whatsapp_msg()
| [
"noreply@github.com"
] | kavyasri-anupa.noreply@github.com |
8b68a20b8f3bbe0e4e6c7d6e85f4c3d5dfafa5f9 | a876b0eec9d4305640850e0813385f148a4fa806 | /3.6_More_Guests.py | a8e80141ea505da0c0f2f3895ff91cea1f130e5e | [] | no_license | Ken0706/Crash_Course_Book_Python | 8f422b0ad7041b4b09ac0ac4a1382782c91c91bb | 76a43da3cfebbf88d3b13aea61fe8ae68c2c8b52 | refs/heads/master | 2023-05-31T22:48:47.438291 | 2021-06-18T12:34:57 | 2021-06-18T12:34:57 | 368,716,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from random import randrange
guests = ["ken", "nah", "ben", "Sonat", "Luck"]
for guest in range(len(guests)):
print(f"\t\tWelcome {guests[guest].title()}, welcome to dinner with us - \
Your seat number is {guest + 1}!")
guest_cant_go = guests[randrange(len(guests))]
seat = guests.index(guest_cant_go)
print(f"--> {guest_cant_go.title()} cant go with seat number is {seat + 1} <--")
print(f"--> Tayah replace for {guest_cant_go.title()} <--")
for guest in range(len(guests)):
guests[seat] = "Tayah"
print(f"\t\tWelcome {guests[guest].title()}, welcome to dinner with us - \
Your seat number is {guest + 1}!")
msg = "\t--- Your seat numbers are changed as we have found \
a larger dinner table ---"
print(f"{msg.upper()}")
mid_seat = int(len(guests) / 2)
guests.insert(0,"Macro")
guests.insert(mid_seat,"Middle")
guests.append("Late")
"""guests.remove("Middle")
guests.remove("Late")
guests.remove("Macro")"""
for guest in range(len(guests)):
print(f"\t\tWelcome {guests[guest].title()}, welcome to dinner with us - \
Your seat number changed is {guest + 1}!")
| [
"duc.nguyenhuyhcm@gmail.com"
] | duc.nguyenhuyhcm@gmail.com |
b158bdf46a2603c770aaf9019b76ed11ecc472fd | ee2ad70e60a4cf5684eb1c85db99a9e4cb0b4641 | /caldera/views.py | d4e3eb9b5c050bab5a621312f64cb0e752182fea | [] | no_license | rahmatwidhiyana/caldera-id | 05998c032077d90bcaef225a3986b391feab6f5c | 227b99f33b608bac11d1bd0c90e91c46d9e8beb8 | refs/heads/master | 2022-10-17T10:49:41.344048 | 2020-06-09T14:31:07 | 2020-06-09T14:31:07 | 220,808,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from cart.forms import CartAddProductForm
from cart.cart import Cart
# Create your views here.
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = Product.filter(category=category)
context={
'category': category,
'categories': categories,
'products': products
}
return render(request,'category.html',context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
return render(request, 'single.html',{'product': product, 'cart_product_form': cart_product_form})
def product_list_by_category(request, category_slug):
category = get_object_or_404(Category, slug=category_slug)
products = Product.objects.filter(category=category)
categories = Category.objects.all()
context={
'category': category,
'categories': categories,
'products': products
}
return render(request,'category_list.html',context)
| [
"rahmatwidhiyana@gmail.com"
] | rahmatwidhiyana@gmail.com |
0907e493938b08ca78cb28ca7cb0ad08c3cee610 | 0a481a58880071eed558dca7983b54c1263a51ee | /Web-Scraper.py | 64d5701e89fa4ef9da38fd51ce7bcf38bcbfdf45 | [] | no_license | thermaltp/class | 0c528025f878dc65721d2fb06232c5a5cfc12be7 | 81b732552d21fa0b0394c412373bfbb95c298d18 | refs/heads/master | 2020-03-20T01:16:25.521237 | 2018-07-16T16:04:58 | 2018-07-16T16:04:58 | 137,071,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import requests
from bs4 import BeautifulSoup
url = 'https://pastebin.com/archive'
#url = 'https://www.yelp.com/search?find_desc=&find_loc=Augusta+GA'
paste_r = requests.get(url)
print(paste_r)
#paste_r = (paste_r.text)
soup = BeautifulSoup(paste_r.text, 'html.parser')
#print(soup.findAll('a'))
#for link in soup.findAll('a'):
# print(link)
#for name in soup.findAll('a', {'class': 'biz-name'}):
# print(name.text)
for name in soup.findAll('table', {'class': 'maintable'}):
print(name.text) | [
"noreply@github.com"
] | thermaltp.noreply@github.com |
0076fb29f2309bfda8ab09cfaf850d6ec7f8cfcd | f4a5f3b3a8a038fe5c9b49f91a678a6d0cce14fa | /ManagementSystem/webservice/vehicles/views.py | 23cf396c819047587a11bd2f15ae93b666c0c608 | [] | no_license | nikita-reva/SystemArchitectureProject | 803a46496e0d5f398a63a9ae3dc8c2ffd3465c0f | 8a6b43fcc0c8f6dea49b649c1023b871a39c0cad | refs/heads/master | 2022-11-23T19:37:02.034132 | 2020-07-30T11:36:17 | 2020-07-30T11:36:17 | 281,161,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Vehicle
class VehicleListView(ListView):
model = Vehicle
template_name = 'vehicles/vehicles.html' # <app>/<model>_<viewtype>.html
context_object_name = 'vehicles'
ordering = ['id']
class VehicleDetailView(DetailView):
model = Vehicle | [
"57528503+NikoReva@users.noreply.github.com"
] | 57528503+NikoReva@users.noreply.github.com |
953322509d2314ae1bdedfc3fbc4e2d39e83c00d | 19bebc7530516885928ced22b08357813fa48593 | /fab/common_features/__init__.py | b324d4122751dce608eceaf383398854b005dc81 | [] | no_license | funkystyle/fbb | 97af1187db5280e37d740e95abb0b367ec22d1b6 | 683dd67af4519b15679881eeb06d49a1554ae1d7 | refs/heads/master | 2021-01-21T01:02:48.073823 | 2017-04-22T18:50:04 | 2017-04-22T18:50:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from login_decorators import user_login_required
from views import *
from hooks import *
| [
"c_sveges@qti.qualcomm.com"
] | c_sveges@qti.qualcomm.com |
6b1308a92bd30267af62dd4f25cabf61610cbbe2 | eb5b9791349f1cc75b8e47fd80896e4fe9bf6061 | /games/tetris/games.py | 8beff569c277ec816ecc06d50003d1e6958bd3ba | [
"MIT"
] | permissive | Hellorelei/oc-2018 | 4cbaacbe443886e6c93cf74486f0027e21fc462a | 7961de5ba9923512bd50c579c37f1dadf070b692 | refs/heads/master | 2022-07-09T07:36:04.121651 | 2019-06-21T08:05:41 | 2019-06-21T08:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | """
File: games.py
Author: Raphael Holzer
Date: 27.01.2019
This module imports 6 SenseHAT game modules :
- morpion
- game2048
- mines
- tetris
- connect4
- labyrinth
"""
from sense_hat import SenseHat
from gamelib import *
import morpion
import game2048
import mines
import tetris
import connect4
import labyrinth
def main():
"""Present a question mark (?) and allow to choose a game.
The left/right button increments/decrements the index number.
The up button displays the game name.
The middle button starts the selected game.
"""
active = True
sense = SenseHat()
games = ['morption', '2048', 'mines', 'connect4', 'tetris', 'labyrinth']
functions = [morpion.main, game2048.main, mines.main, connect4.main,
tetris.main, labyrinth.main]
i = 0
n = len(games)
sense.show_message('games', text_colour=BLUE)
sense.show_letter('?')
while active:
event = sense.stick.wait_for_event()
if event.action == 'pressed':
if event.direction == 'right':
i = (i+1) % n
elif event.direction == 'left':
i = (i-1) % n
elif event.direction == 'up':
sense.show_message(games[i], text_colour=GREEN)
elif event.direction == 'middle':
games[i]()
sense.stick.get_events()
sense.show_letter(str(i), text_colour=RED)
# Execute the main() function when the file is executed,
# but do not execute when the module is imported as a module.
print('module name =', __name__)
if __name__ == '__main__':
main() | [
"hugo.ducommunditverron@bugnon.educanet2.ch"
] | hugo.ducommunditverron@bugnon.educanet2.ch |
647b95ab69e4db8e87fa88c1ddb61b2b286a953d | f5816d67d235c992af171831f07e273cca56ec40 | /books/books/urls.py | fdef9f4efdb703778cd1c44f8e2779e0b3988a37 | [
"MIT"
] | permissive | ToteBrick/DRF | 19e27f40ccef2251c4513c56f75c54e0c04c01b3 | 1b6a51314ecf3784bd6fe84489f6564e1a30a4d4 | refs/heads/master | 2020-06-05T02:17:20.728393 | 2019-06-19T02:09:12 | 2019-06-19T02:09:12 | 192,279,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """books URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.urls')), # 新增restful风格
]
| [
"402981948@qq.com"
] | 402981948@qq.com |
f53a88ff3344ca5bafbd2aaf313dd87c96ad25d5 | a0b9d5b509602f3bd5fd17d8987df76df2d800be | /AntiEvilTwin.py | 0ebeaa890b6849feb56548bf746062534fa80496 | [
"Apache-2.0"
] | permissive | Danielgimp/Evil-Twin-AP | 8503d3cf16f022b2028904f225ce51a67f7b6d75 | 3eedddd0361f70565c611853286f9e962eadc184 | refs/heads/main | 2023-06-30T22:33:42.154936 | 2021-07-29T10:32:24 | 2021-07-29T10:32:24 | 390,113,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | import os
import sys
import time
from scapy.layers.dot11 import Dot11Beacon
from scapy.sendrecv import sniff
WifiNetworks = {}
EvilTwinWifiNetworks = {}
def scanEvilAP(pkt):
if pkt.haslayer(Dot11Beacon): # Check if the packet has an Wireless information
if pkt.type == 0 and pkt.subtype == 8:
# Subtype 8 is a beacon frame is one of the management frames (type=0) in IEEE 802.11 based WLANs.
# It contains all the information about the network. Beacon frames are transmitted periodically,
# they serve to announce the presence of a wireless LAN and to synchronise the members of the service set.
# Beacon frames are transmitted by the access point (AP)
# Check if there are SSID's in the network
if not (pkt.info.decode("utf-8") in WifiNetworks):
# if the SSID is encrypted in any way [OPN,WEP,WPA,WPA2] add them to WifiNetworks
if not ('OPN' in pkt[Dot11Beacon].network_stats()['crypto']):
WifiNetworks[pkt.info.decode("utf-8")] = pkt.addr3
# If those SSID's do not include any security measurements add them to the EvilTwinWifiNetworks dictionary
# This is true since in our Evil Twin attack Hostpad initiates an unencrypted Wifi Hotspot
elif (WifiNetworks[pkt.info.decode(
"utf-8")] != pkt.addr3): # check if the MAC not equals to the ap's mac in the dict
if 'OPN' in pkt[Dot11Beacon].network_stats()['crypto']:
if not (pkt.info.decode("utf-8") in EvilTwinWifiNetworks):
print("%s" % (pkt.info.decode("utf-8")))
EvilTwinWifiNetworks[pkt.info.decode("utf-8")] = pkt.info.decode("utf-8")
else:
pass
else:
pass
def makeMonitorMode(adapterName):
print('Changing %s to Monitoring Mode' % adapterName)
# First we need to enter into Monitor mode for the given adapter (0.5 of waiting was added to make sure it is not too fast)
os.system('sudo ifconfig %s down' % adapterName) # turn off the adapter (software)
time.sleep(0.5)
os.system('sudo iwconfig %s mode monitor' % adapterName) # set into monitoring mode
time.sleep(0.5)
os.system('sudo ifconfig %s up' % adapterName) # turn on the adapter (software)
time.sleep(0.5)
def main():
adapterName = sys.argv[1]
print('Changing %s to Monitoring Mode' % adapterName)
# First we need to enter into Monitor mode for the given adapter (0.5 of waiting was added to make sure it is not too fast)
os.system('sudo ifconfig %s down' % adapterName) # turn off the adapter (software)
time.sleep(0.5)
os.system('sudo iwconfig %s mode monitor' % adapterName) # set into monitoring mode
time.sleep(0.5)
os.system('sudo ifconfig %s up' % adapterName) # turn on the adapter (software)
time.sleep(0.5)
print("These are the Evil Twin Wifi Acess Points:")
sniff(prn=scanEvilAP, iface=adapterName, count=5000) # iface - interface to sniff , prn - function
if __name__ == '_main_':
main()
| [
"noreply@github.com"
] | Danielgimp.noreply@github.com |
1f80b008f647abe0f8150c2b78046cdac3ed2395 | ce8b0b2aca794d7c5b68a94806eb50dab8bfeb98 | /model.py | 8e69d2a73cf5f3aa9014fb2da277ae80f621b732 | [] | no_license | rtiwari5317/Salary-Prediction-Problem-Till-Deployment | 9b350fb59c81a947a9a2bb74460e91fa3b3db27c | 1574df54e7ce98a0b2d1401ef27a46e11b32dc29 | refs/heads/master | 2023-03-20T03:40:39.368275 | 2020-06-12T16:29:19 | 2020-06-12T16:29:19 | 271,839,199 | 0 | 0 | null | 2021-03-20T04:19:39 | 2020-06-12T16:07:51 | Python | UTF-8 | Python | false | false | 526 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 14:46:30 2020
@author: Rahul3.Tiwari
"""
import pandas as pd
#mport numpy as np
import pickle
dta_st = pd.read_csv('C:\\Users\\rahul3.tiwari\\Desktop\\salaries.csv')
X = dta_st.drop(['Salary'],axis=1)
y = dta_st['Salary']
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X,y)
pickle.dump(reg, open('model-salary.pkl','wb'))
model = pickle.load(open('model-salary.pkl','rb'))
print(model.predict([[3.5]]))
| [
"noreply@github.com"
] | rtiwari5317.noreply@github.com |
9a4b55c49ddbd6bf15ee9f95c0e49e1c0aa461d2 | 0c7e54178e89a4dad98deb8265c2cb41ca3399b9 | /backend/strawberry_forms/tests/test_mutations.py | 470971ab95ea4f9bf7f8c47ae52ac75af4feafda | [
"MIT"
] | permissive | marlenebDC/pycon | 4394bf7a0aecc5aa8ae0b378900d86c2afc7fab7 | 175f2ee9e8698bbcf15dd33d2eb4739fee04c6d7 | refs/heads/master | 2023-07-07T00:34:39.932779 | 2020-01-21T01:00:24 | 2020-01-21T01:00:24 | 235,290,754 | 0 | 0 | MIT | 2023-06-23T23:35:11 | 2020-01-21T08:30:15 | null | UTF-8 | Python | false | false | 1,670 | py | import strawberry
from django.forms import Form, IntegerField
from strawberry_forms.mutations import FormMutation
def test_form_mutation_without_context():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "hello"
def test_form_mutation_response_can_be_converted_using_transform_method():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
@classmethod
def transform(cls, result):
return "world"
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "world"
def test_form_mutation_transform_is_not_required():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "hello"
def test_mutation_without_input():
class TestForm(Form):
def save(self, *args, **kwargs):
return "ciao"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
assert TestMutation.Mutation(None) == "ciao"
| [
"marcoaciernoemail@gmail.com"
] | marcoaciernoemail@gmail.com |
225c14407e2eba431953f219ed8ecc4582a965c5 | 8b54570140861ffbe464e244f9f49ba55e341577 | /linux/ovirt-guest-tray.py | cdc740419735bb0eb6c99bfa8f3a70c09adf3c55 | [
"Apache-2.0"
] | permissive | vinzenz/ovirt-guest-agent-tray | 36569d149b7082e8129fbe5c462869bfeb8bf779 | 581a73f3ff4431a6a17f6ff9bc3d64f2b23ff586 | refs/heads/master | 2016-09-06T07:41:07.988384 | 2014-07-30T13:55:46 | 2014-07-30T13:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | #!/usr/bin/env python
import gtk
class TrayIcon:
def __init__(self, *args, **kwargs):
self.icon = gtk.StatusIcon()
self.icon.set_from_file('ovirt-icon-48.svg')
self.icon.connect('popup-menu', self.on_popup_menu)
def on_about(self, *args, **kwargs):
dlg = gtk.Dialog("About the oVirt Guest Agent",
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
label1 = gtk.Label("oVirt Guest Agent for Linux")
label1.show()
label2 = gtk.Label("Version 3.6.0")
label2.show()
label3 = gtk.Label("oVirt Guest Agent is running.")
label3.show()
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
textview = gtk.TextView()
textview.set_editable(False)
textview.set_cursor_visible(False)
textview.set_sensitive(False)
sw.add(textview)
buffer = textview.get_buffer()
lic = '<Copyright information here>'
try:
f = open('/usr/share/ovirt-guest-agent/COPYING', 'r')
lic = f.read()
f.close()
except (OSError,IOError):
pass
buffer.insert(buffer.get_end_iter(), lic)
textview.show()
sw.show()
dlg.vbox.set_homogeneous(False)
dlg.vbox.pack_start(label1, fill=False, expand=False, padding=4)
dlg.vbox.pack_start(label2, fill=False, expand=False, padding=4)
dlg.vbox.pack_start(sw, fill=True, expand=True, padding=4)
dlg.vbox.pack_start(label3, fill=False, expand=False, padding=4)
dlg.set_default_size(640, 480)
dlg.run()
dlg.destroy()
def on_popup_menu(self, icon, event_button, event_time):
menu = gtk.Menu()
about = gtk.MenuItem('About')
about.show()
about.connect('activate', self.on_about)
menu.append(about)
sep = gtk.SeparatorMenuItem()
sep.show()
menu.append(sep)
quit = gtk.MenuItem('Quit')
quit.show()
menu.append(quit)
quit.connect('activate', gtk.main_quit)
menu.popup(None, None, gtk.status_icon_position_menu, event_button, event_time, self.icon)
if __name__ == '__main__':
icon = TrayIcon()
gtk.main()
| [
"vfeenstr@redhat.com"
] | vfeenstr@redhat.com |
9a6f1583701d24e3546563c643bd0c71b2b8b0e9 | 462d0746a917974ab8a166ef060fe665a40eedb6 | /multivar-multiply.py | dfa490e6e5b046b4b5b27bc55485259dd53b8242 | [
"MIT"
] | permissive | meghdadFar/mv-ncc-extractor | 4c2f84007782cbeb9aeb4cd6cb006ed7b21522fa | 5e9c4ef1ce2372cf8be7c7ec2e3d923c8c5337e0 | refs/heads/master | 2020-04-11T19:17:54.194796 | 2018-12-16T18:34:50 | 2018-12-16T18:34:50 | 162,029,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import scipy
from IO import read_sdma, read_pmi, read_score, reddy_ncs
from util import element_wise
if __name__ == '__main__':
sdmas = read_sdma('/Users/svm/Resources/non-comp/ncs/reddy_sdma2.txt')
pmis, npmis = read_pmi('/Users/svm/Resources/non-comp/ncs/_pmi_npmi.txt')
additive = read_score('/Users/svm/Resources/non-comp/scores/additive_scores.txt')
reg = read_score('/Users/svm/Resources/non-comp/scores/reg_scores.txt')
eval_ncs, eval_scores = reddy_ncs('/Users/svm//Resources/non-comp/ncs/MeanAndDeviations.clean.txt')
sdmas_list = []
pmis_list = []
npmis_list = []
additive_list = []
reg_list = []
for k in eval_ncs:
sdmas_list.append(float(sdmas[k]))
pmis_list.append(float(pmis[k]))
npmis_list.append(float(npmis[k]))
additive_list.append(float(additive[k]))
reg_list.append(float(reg[k]))
print 'Spearman rho bet. human score and additive score ', scipy.stats.spearmanr(additive_list, eval_scores)
print 'Spearman rho bet. human score and reg score ', scipy.stats.spearmanr(reg_list, eval_scores)
mult = element_wise(reg_list, sdmas_list, npmis_list)
print 'Spearman rho bet. human score and mult score ', scipy.stats.spearmanr(mult, eval_scores)
| [
"meghdad.farahmand@gmail.com"
] | meghdad.farahmand@gmail.com |
6a99400f10f09d4c70a14c23a6f38abae3506ed9 | a4cf5929b8e635555f692406e8add1095a8485f5 | /benchmark/indexes/containment/utils.py | a3872932acfc9c0df52d2208f871c378071bae53 | [
"MIT"
] | permissive | ekzhu/datasketch | 6004513baa14bc22b4b7bed102a1b515c77bd6e4 | d24c9838be9020090c8525ca29e7447e2289b00a | refs/heads/master | 2023-09-04T00:10:33.286427 | 2023-09-02T02:40:25 | 2023-09-02T02:40:25 | 32,555,448 | 2,208 | 320 | MIT | 2023-09-12T07:57:40 | 2015-03-20T01:21:46 | Python | UTF-8 | Python | false | false | 827 | py | import numpy as np
def get_precision_recall(found, reference):
reference = set(reference)
intersect = sum(1 for i in found if i in reference)
if len(found) == 0:
precision = 0.0
else:
precision = float(intersect) / float(len(found))
if len(reference) == 0:
recall = 1.0
else:
recall = float(intersect) / float(len(reference))
if len(found) == len(reference) == 0:
precision = 1.0
recall = 1.0
return [precision, recall]
def fscore(precision, recall):
if precision == 0.0 and recall == 0.0:
return 0.0
return 2.0 / (1.0 / precision + 1.0 / recall)
def average_fscore(founds, references):
return np.mean([fscore(*get_precision_recall(found, reference))
for found, reference in zip(founds, references)])
| [
"noreply@github.com"
] | ekzhu.noreply@github.com |
18eca07b025774850fbd69e33abf2544c170f3ae | 58d9da41bacd775007c97e639d6051cd443a1148 | /user/views.py | a4ca5b4705bcd5144ff54527dae6993548a29872 | [] | no_license | arhamshaikhSWE/Django-Rest-API | bd5b875bdf162b7aae46167444cd3102684a6f04 | 25fe833bf884a66240fb87b88d174d894545d1ae | refs/heads/main | 2023-04-17T05:17:01.804788 | 2020-10-30T22:14:12 | 2020-10-30T22:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.contrib.auth import get_user_model
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class ListUserView(generics.ListAPIView):
"""Create a new user in the system"""
queryset = get_user_model().objects.all().order_by('-id')
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authentication user"""
return self.request.user
| [
"med.elkhalki0@gmail.com"
] | med.elkhalki0@gmail.com |
899792034c993806533598690dee73c774e0f116 | 5c52e6c32477a75334ff144e4d1fa23031cd62e3 | /create_features.py | cc87c2b7e3f42b3c69b1089a6ffe981086fd4647 | [] | no_license | mlaugharn/WideDeep-MIND-metric-learning | f66eae48111fa6e11472e3b3d663f3d6401ef0c8 | 7f1ac584629162f1a3874fd6900a23dee77fb9ae | refs/heads/main | 2023-04-30T05:29:32.423042 | 2021-05-16T20:21:06 | 2021-05-16T20:21:06 | 367,757,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,537 | py | # creates X_wide, X_tab, X_wide_te, X_tab_te and saves them to csv files
import pathlib
config = {
'train_folder': pathlib.Path('./smalldataset/train/'),
'test_folder': pathlib.Path('./smalldataset/val/')
}
from tqdm.autonotebook import tqdm
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from pytorch_widedeep import Trainer
from pytorch_widedeep.preprocessing import WidePreprocessor, TabPreprocessor
from pytorch_widedeep.models import Wide, TabMlp, WideDeep
from pytorch_widedeep.metrics import Accuracy
import pandas as pd
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# silly thing required to use dict arguments w/ LRU cache
def hash_dict(func):
"""Transform mutable dictionnary
Into immutable
Useful to be compatible with cache
"""
class HDict(dict):
def __hash__(self):
return hash(frozenset(self.items()))
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = tuple([HDict(arg) if isinstance(arg, dict) else arg for arg in args])
kwargs = {k: HDict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}
return func(*args, **kwargs)
return wrapped
logging.debug('loading impressions df...')
impressions_df = pd.read_csv(config['train_folder'] / 'behaviors.tsv', sep='\t', header=None)
logging.debug('loaded impressions df')
logging.debug('loading impressions_df for test..')
impressions_df_te = pd.read_csv(config['test_folder'] / 'behaviors.tsv', sep='\t', header=None)
logging.debug('loaded impressions df for test')
news_df = pd.read_csv(config['train_folder'] / 'news.tsv', sep='\t', header=None)
logging.debug('loaded news df')
news_df_te = pd.read_csv(config['test_folder'] / 'news.tsv', sep='\t', header=None)
logging.debug('loaded news df for test')
impressions_cols = ['impression_id', 'user_id', 'time', 'history', 'impressions']
news_cols = ['news_id', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']
impressions_df.columns = impressions_cols
impressions_df_te.columns = impressions_cols
news_df.columns = news_cols
news_df_te.columns = news_cols
# replace NaN title entities with []
# replace NaN abstract entities with []
# replace NaN abstracts with ''
from ast import literal_eval
def clean_news(news_df):
for row in news_df.loc[news_df.title_entities.isnull(), 'title_entities'].index:
news_df.at[row, 'title_entities'] = '[]'
for row in news_df.loc[news_df.abstract_entities.isnull(), 'abstract_entities'].index:
news_df.at[row, 'abstract_entities'] = '[]'
news_df.fillna({'abstract': ''
}, inplace=True)
news_df['title_entities'] = news_df['title_entities'].map(literal_eval)
news_df['abstract_entities'] = news_df['abstract_entities'].map(literal_eval)
return news_df
news_df = clean_news(news_df)
logging.debug('cleaned news')
news_df_te = clean_news(news_df_te)
logging.debug('cleaned news for test')
def clean_impressions(impressions_df):
# split history into a list
# split impressions into a list
for row in tqdm(impressions_df.loc[impressions_df.history.isnull(), 'history'].index):
impressions_df.at[row, 'history'] = ''
impressions_df['history'] = impressions_df['history'].str.split()
impressions_df['impressions'] = impressions_df['impressions'].str.split()
return impressions_df
impressions_df = clean_impressions(impressions_df)
logging.debug('cleaned impressions')
impressions_df_te = clean_impressions(impressions_df_te)
logging.debug('cleaned impressions for test')
news_id_idxer = dict(zip(news_df['news_id'], news_df.index)) #{newsid:idx for idx, newsid in news_df['news_id'].to_dict().items()}
logging.debug('indexed news ids')
news_id_idxer_te = dict(zip(news_df_te['news_id'], news_df_te.index)) # {newsid:idx for idx, newsid in news_df_te['news_id'].to_dict().items()}
logging.debug('indexed news ids for test')
import functools
@functools.lru_cache(maxsize=5000000)
def get_news_features(news_id, kind = 'train'):
# category
global news_df
global news_df_te
if news_id in news_id_idxer:
df = news_df
idxer = news_id_idxer
elif news_id in news_id_idxer_te:
df = news_df_te
idxer = news_id_idxer_te
else:
return {'category': 'None', 'subcategory': 'None', 'title': 'None', 'abstract': 'None'}
# idxer = news_id_idxer if kind == 'train' else news_id_idxer_te
# df = news_df if kind == 'train' else news_df_te
idx = idxer[news_id]
news = df.iloc[idx]
category = news.category
subcategory = news.subcategory
title = news.title
abstract = news.abstract
# # first 4 title entities and first 8 abstract entities should normally be more than enough
# title_entity1 = news.title_entities[0]['Label'] if len(news.title_entities) > 0 else None
# title_entity2 = news.title_entities[1]['Label'] if len(news.title_entities) > 1 else None
# title_entity3 = news.title_entities[2]['Label'] if len(news.title_entities) > 2 else None
# title_entity4 = news.title_entities[3]['Label'] if len(news.title_entities) > 3 else None
# abstract_entity1 = news.abstract_entities[0]['Label'] if len(news.abstract_entities) > 0 else None
# abstract_entity2 = news.abstract_entities[1]['Label'] if len(news.abstract_entities) > 1 else None
# abstract_entity3 = news.abstract_entities[2]['Label'] if len(news.abstract_entities) > 2 else None
# abstract_entity4 = news.abstract_entities[3]['Label'] if len(news.abstract_entities) > 3 else None
# abstract_entity5 = news.abstract_entities[4]['Label'] if len(news.abstract_entities) > 4 else None
# abstract_entity6 = news.abstract_entities[5]['Label'] if len(news.abstract_entities) > 5 else None
# abstract_entity7 = news.abstract_entities[6]['Label'] if len(news.abstract_entities) > 6 else None
# abstract_entity8 = news.abstract_entities[7]['Label'] if len(news.abstract_entities) > 7 else None
return {'category': category,
'subcategory': subcategory,
'title': title,
'abstract': abstract,
# 'title_entity1': title_entity1,
# 'title_entity2': title_entity2,
# 'title_entity3': title_entity3,
# 'title_entity4': title_entity4,
# 'abstract_entity1': abstract_entity1,
# 'abstract_entity2': abstract_entity2,
# 'abstract_entity3': abstract_entity3,
# 'abstract_entity4': abstract_entity4,
# 'abstract_entity5': abstract_entity5,
# 'abstract_entity6': abstract_entity6,
# 'abstract_entity7': abstract_entity7,
# 'abstract_entity8': abstract_entity8
}
#impression_id_idxer = {impression['impression_id']:i for i, impression in impressions_df.iterrows()}
#impression_id_idxer_te = {impression['impression_id']:i for i, impression in impressions_df_te.iterrows()}
logging.debug('processing clicks..')
user_clicks = {} # list of news articles that a user has clicked
for i, impression in tqdm(impressions_df.iterrows(), total=len(impressions_df)):
user = impression.user_id
if user not in user_clicks: user_clicks[user] = set()
user_clicks[user].update(impression.history)
# for h in impression.history:
# user_clicks[user].add(h)
# print(impression.history)
for x in impression.impressions:
if x[-1] == '1':
n, click = x[:-2], x[-1]
user_clicks[user].add(n)
print('processing test clicks..')
user_clicks_te = user_clicks
for i, impression in tqdm(impressions_df_te.iterrows(), total=len(impressions_df_te)):
user = impression.user_id
if user not in user_clicks_te: user_clicks_te[user] = set()
user_clicks_te[user].update(impression.history)
# # else:
# # for h in impression.history:
# # user_clicks_te[user].add(h)
# # print(impression.history)
for x in impression.impressions:
if x[-1] == '1':
n, click = x[:-2], x[-1]
user_clicks_te[user].add(n)
@functools.lru_cache(maxsize=500000)
def get_user_feats(user, kind = 'train'):
clicks = user_clicks if (kind == 'train' or kind == 'val') else user_clicks_te
if user not in clicks:
return {'cat0': 'None', 'cat1': 'None', 'cat2': 'None', 'cat_counts0': 0, 'cat_counts1': 0, 'cat_counts2': 0, 'subcat0': 'None', 'subcat1': 'None', 'subcat2': 'None', 'subcat_counts0': 0, 'subcat_counts1': 0, 'subcat_counts2': 0,
'uid': user, 'clicks': 0}
articles_feats = [get_news_features(article, kind) for article in clicks[user]]
articles_feats_df = pd.DataFrame(articles_feats)
cat_counts = articles_feats_df['category'].value_counts()
cats = cat_counts.index.tolist()
spaces_to_add = max(3 - len(cats), 0)
cats = cats + [''] * spaces_to_add
cat_counts = cat_counts.values.tolist() + [0] * spaces_to_add
subcat_counts = articles_feats_df['subcategory'].value_counts()
spaces_to_add = max(3 - len(subcat_counts), 0)
subcats = subcat_counts.index.tolist() + [''] * spaces_to_add
subcat_counts = subcat_counts.values.tolist() + [0] * spaces_to_add
click_counts = len(articles_feats_df)
return_val = {'uid': user, 'clicks': click_counts}
for i in range(3):
cats[i] = cats[i]# if len(cats) > i else ''
cat_counts[i] = cat_counts[i]# if len(cat_counts) > i else 0
subcats[i] = subcats[i]# if len(subcats) > i else ''
subcat_counts[i] = subcat_counts[i]# if len(subcat_counts) > i else 0
return_val['cat0'] = cats[0]
return_val['cat1'] = cats[1]
return_val['cat2'] = cats[2]
return_val['cat_counts0'] = cat_counts[0]
return_val['cat_counts1'] = cat_counts[1]
return_val['cat_counts2'] = cat_counts[2]
return_val['subcat0'] = subcats[0]
return_val['subcat1'] = subcats[1]
return_val['subcat2'] = subcats[2]
return_val['subcat_counts0'] = subcat_counts[0]
return_val['subcat_counts1'] = subcat_counts[1]
return_val['subcat_counts2'] = subcat_counts[2]
return return_val
import functools
#
# user features + news article features + impression features | clicked?
def create_data_df(impressions_df, news_df, kind = 'train'):
examples = []
user_newsid_histories = {}
for i, impression in tqdm(impressions_df.iterrows(), total=len(impressions_df)):
user_id = impression.user_id
impression_id = impression.impression_id
user_features = get_user_feats(user_id, kind)
#impression_feats = get_impression_feats(impression_id)
impression_feats = {'time': impression['time'], 'impression_id': impression_id, 'numchoices': len(impression['impressions'])}
if user_id not in user_newsid_histories: user_newsid_histories[user_id] = set()
# for hist_news in impression.history:
# news_id = hist_news
# if news_id not in user_newsid_histories[user_id]:
# user_newsid_histories[user_id].add(news_id) # make sure to only add a history news item once
# news_feats = get_news_features(news_id, kind)
# example = {**user_features, **news_feats, **impression_feats, 'clicked': True, 'history': True}
# examples.append(example)
if kind == 'train' or kind == 'val':
for n, impression_news in enumerate(impression.impressions):
news_id, clicked = impression_news[:-2], bool(int(impression_news[-1]))
news_feats = get_news_features(news_id, kind)
example = {**user_features, **news_feats, **impression_feats, 'clicked': clicked, 'history': False}
examples.append(example)
elif kind == 'test':
for n, impression_news in enumerate(impression.impressions):
news_id, clicked = impression_news[:-2], bool(int(impression_news[-1]))
news_feats = get_news_features(news_id, kind)
example = {**user_features, **news_feats, **impression_feats, 'clicked': False, 'history': False}
examples.append(example)
return pd.DataFrame(examples)
print('creating train df...')
df_train = create_data_df(impressions_df, news_df, 'train')
print('created train df')
print('creating test df...')
df_test = create_data_df(impressions_df_te, news_df_te, 'val')
print('created test df')
# df_train, df_test = train_test_split(train_df, test_size=0.2, stratify=train_df.clicked)
print(df_train.head())
print(df_test.head())
print('created df_train, df_test')
wide_cols = [
'cat0', 'cat1', 'cat2',
# 'cat_counts0', 'cat_counts1',
'history',
# 'cat_counts2',
'subcat0', 'subcat1', 'subcat2',
# 'subcat_counts0',
# 'subcat_counts1', 'subcat_counts2',
'category', 'subcategory',
'numchoices'
]
cross_cols = [('cat0', 'cat1'), ('cat0', 'subcat0')]
embed_cols = [
('cat0', 16),
('cat1', 16),
('cat2', 16),
('subcat0', 16),
('subcat1', 16),
('subcat2', 16)
]
cont_cols = ['clicks', 'cat_counts0', 'cat_counts1', 'cat_counts2', 'subcat_counts0', 'subcat_counts1', 'subcat_counts2', 'numchoices']
target_col = 'clicked'
target = df_train['clicked'].values
wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=cross_cols)
print('fitting x_wide...')
X_wide = wide_preprocessor.fit_transform(df_train)
print('made X_wide')
wide = Wide(wide_dim = np.unique(X_wide).shape[0], pred_dim=128)
tab_preprocessor = TabPreprocessor(embed_cols=embed_cols, continuous_cols=cont_cols)
print('making x_tab..')
X_tab = tab_preprocessor.fit_transform(df_train)
print('made X_tab')
print('making x_wide_te...')
X_wide_te = wide_preprocessor.transform(df_test)
print('made X_wide_te')
print('making x_tab_te..')
X_tab_te = tab_preprocessor.transform(df_test)
print('made X_tab_te')
import pickle
with open('X_wide.pkl', 'wb') as f: pickle.dump(X_wide, f)
with open('X_tab.pkl', 'wb') as f: pickle.dump(X_tab, f)
with open('X_wide_te.pkl', 'wb') as f: pickle.dump(X_wide_te, f)
with open('X_tab_te.pkl', 'wb') as f: pickle.dump(X_tab_te, f)
with open('wide_preprocessor.pkl', 'wb') as f: pickle.dump(wide_preprocessor, f)
with open('wide.pkl', 'wb') as f: pickle.dump(wide, f)
with open('tab_preprocessor.pkl', 'wb') as f: pickle.dump(tab_preprocessor, f)
df_train.to_csv('df_train.csv')
df_test.to_csv('df_test.csv')
# import IPython
# IPython.embed() | [
"mlaugharn@gmail.com"
] | mlaugharn@gmail.com |
a910b5b585ed60bf665f352a55e4b400b57059b2 | 5297baeb39c46e385233668dac0c59c3733f14c5 | /code/careerjet.py | bc9c6577fb8716c86223bbffaa295d2874704f68 | [
"MIT"
] | permissive | sethchart/DataJobs | 03f9623eb966f2473cbd00a0ebd660d3445333a5 | 4cb2b03c22ad319eb675106f3dde6d56032c98cd | refs/heads/main | 2023-03-07T16:01:32.386898 | 2021-01-28T22:03:36 | 2021-01-28T22:03:36 | 322,624,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | """
This module provides the Scraper class, which encapsulates interaction with
careerjet.com through the selenium webdriver. It provides the key methods
required for executing a scrape of job postings.
"""
__author__ = "Seth Chart"
__version__ = "0.1.0"
__license__ = "MIT"
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
from time import sleep
class Scraper(object):
"""Scraper. Wraps interactions with careerjet.com through the selenium
webdriver.
"""
def __init__(self):
self.browser = None
self._initialize_scraper()
def scrape_page(self):
"""Scrapes the current job posting"""
soup = self._get_current_page_soup()
page_data = {
'title': self._get_title(soup),
'url': self.browser.current_url,
'description': self._get_description(soup)
}
return page_data
def next_page(self):
"""next_page.
"""
"""Advances the browser to the next job posting"""
nav_bar = self.browser.find_element_by_class_name('nav')
next_button = nav_bar.find_element_by_class_name('next')
current_url = self.browser.current_url
next_button.click()
wait = WebDriverWait(self.browser, 10)
wait.until(lambda x: x.current_url != current_url)
@staticmethod
def _get_title(soup):
"""Extracts the job title from the current posting"""
title = soup.find('h1').text
return title
@staticmethod
def _get_description(soup):
"""Extracts the description from the current posting"""
description = soup.find('section', class_='content').text
return description
def _get_current_page_soup(self):
"""Parses the current job posting using BeautifulSoup"""
page = self.browser.page_source
soup = BeautifulSoup(page, 'html.parser')
return soup
def _initialize_scraper(self):
"""Opens a new browser with the first page of creerjet search results.
Then, clicks on the first job posting. Once this method has run, the
scraper object is ready to scrape the first job listing.
"""
self.browser = webdriver.Chrome()
self.browser.get('https://www.careerjet.com/search/jobs?l=USA&s=data')
first_job_posting = self.browser.find_element_by_class_name('job')
first_job_link = first_job_posting.find_element_by_tag_name('a')
first_job_link.click()
| [
"seth.chart@protonmail.com"
] | seth.chart@protonmail.com |
3b5944c68a5832d155a5560b5d9b36cd9f4092f3 | a38d4dc96dbb6bebfe35c6379640d736144d6046 | /app/helpers/__init__.py | 5941b090c16bc2e25d61d1a55b05dd5fe2b84271 | [] | no_license | RoadRunner11/Adaya | efdc5199692f47e9143765109a71ac872b9b0cb6 | a0a3af719988321adac7d9c60f59d1a37a19a014 | refs/heads/master | 2023-03-28T19:40:01.359741 | 2021-04-01T20:53:59 | 2021-04-01T20:53:59 | 353,808,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from app.helpers import utility
from app.helpers.singleton import SingletonMetaClass
from app.helpers.enum import Messages, Roles,Responses
| [
"hao@sporule.com"
] | hao@sporule.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.