seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
74630975144
import math import os.path import re import html def calcScore(now, best): if now<1e-9: return 0 #return now / best return best / now def isBetter(now, best): if now<1e-9: return False #return best < now return now < best def main(): import sqlite3 db = sqlite3.connect('mm.sqlite3') cur = db.cursor() cur.execute('select run_id, name, source, created_at from runs') CREATED_AT = {} NAME = {} SRC = {} for run_id, name, source, created_at in cur.fetchall(): NAME[run_id] = name SRC[run_id] = source CREATED_AT[run_id] = created_at cur.execute('select run_id, test_id, sec, stdout, stderr from results order by run_id, test_id') R = {} for run_id, test_id, sec, text_stdout, text_stderr in cur.fetchall(): if run_id not in R: R[run_id] = 0 R[run_id] += 1 pattern = re.compile(r'(\w+) *[=:] *([\d\.]+)') T = {} S = {} TIME = {} cur.execute('select run_id, test_id, sec, stdout, stderr from results order by run_id, test_id') for run_id, test_id, sec, text_stdout, text_stderr in cur.fetchall(): if R[run_id] != 100: continue if run_id not in S: S[run_id] = {} if test_id not in T: T[test_id] = {} if run_id not in TIME: TIME[run_id] = [sec, sec, sec] else: TIME[run_id][0] = min(TIME[run_id][0], sec) TIME[run_id][1] += sec TIME[run_id][2] = max(TIME[run_id][2], sec) S[run_id][test_id] = -1 for text in (text_stdout, text_stderr): for line in text.split("\n"): m = pattern.match(line) if m: if m.group(1).lower()=='score': S[run_id][test_id] = float(m.group(2)) else: T[test_id][m.group(1)] = float(m.group(2)) BEST = {} BEST_COUNT = {} for run_id in S: for test_id in S[run_id]: if test_id not in BEST or isBetter(S[run_id][test_id], BEST[test_id]): BEST[test_id] = S[run_id][test_id] BEST_COUNT[test_id] = 1 elif BEST[test_id] == S[run_id][test_id]: BEST_COUNT[test_id] += 1 T2 = {} for test_id in T: for name in T[test_id]: if name not in T2: T2[name] = [] T2[name].append(T[test_id][name]) T2 = {name: sorted(T2[name]) for name in T2} print(T2) def splitKind(values): target = len(values) / 3 best = len(values) best_i = 0 for i in range(1, len(values)): if values[i-1]!=values[i]: sc = abs(i-target) if best is None or sc<best: best = sc best_i = i assert best_i is not None for j in range(10): sep = ('{:.%df}' % (j, )).format((values[best_i-1]+values[best_i])/2) sep_f = float(sep) if values[best_i-1] < sep_f < values[best_i]: break best = len(values) best_i = len(values)-1 for i in range(len(values)-1, 0, -1): if values[i-1]!=values[i]: sc = abs(len(values)-i-target) if best is None or sc<best: best = sc best_i = i assert best_i is not None for j in range(10): sep2 = ('{:.%df}' % (j, )).format((values[best_i-1]+values[best_i])/2) sep2_f = float(sep2) if values[best_i-1] < sep2_f < values[best_i]: break return sep, sep2 T3 = {name: splitKind(T2[name]) for name in T2} print(T3) import http.server import urllib.parse class MyHandler(http.server.BaseHTTPRequestHandler): def getSource(self, query): self.send_response(200) self.send_header('Content-Type', 'text/plain; charset=utf-8') self.end_headers() run_id = int(query.get('id', [])[0]) self.wfile.write(SRC[run_id].encode()) def getDetail(self, query): self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.end_headers() param = query.get('PARAM', []) run_id = int(query.get('id', [])[0]) query.pop('id') htmls = [] htmls.append('<html>') htmls.append('<head>') htmls.append('<title>MM Analytics</title>') htmls.append('</head>') htmls.append('<body>') htmls.append(f'<h3>Name: {html.escape(f"{NAME[run_id]}")}</h3>') htmls.append(f'<a href="/?{urllib.parse.urlencode(query, True)}">[TOP]</a>') htmls.append(f'<a href="/source?id={run_id}">[SOURCE]</a>') htmls.append('<hr />') if 2<=len(param): sum_score = [0]*9 sum2_score = [0]*9 count_score = [0]*9 bests = [0]*9 uniques = [0]*9 fails = [0]*9 for test_id in S[run_id]: kind = 4 if T[test_id][param[0]]<float(T3[param[0]][0]): kind -= 1 elif float(T3[param[0]][1])<T[test_id][param[0]]: kind += 1 if T[test_id][param[1]]<float(T3[param[1]][0]): kind -= 3 elif float(T3[param[1]][1])<T[test_id][param[1]]: kind += 3 if 0 < S[run_id][test_id]: sc1 = calcScore(S[run_id][test_id], BEST[test_id]) sum_score[kind] += sc1 sum2_score[kind] += sc1*sc1 else: fails[kind] += 1 count_score[kind] += 1 if BEST[test_id] == S[run_id][test_id]: bests[kind] += 1 if BEST_COUNT[test_id]==1: uniques[kind] += 1 #for kind in range(3): # score = '{:.3f}'.format(100 * sum_score[kind] / count_score[kind]) # htmls.append(f'<td align="right">{score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td>') htmls.append('<table border="1">') htmls.append(f'<tr><td rowspan="2"></td><th colspan="6">{T3[param[0]][0]}&gt;</th><th colspan="6">{param[0]}</th><th colspan="6">&gt;{T3[param[0]][1]}</th></tr>') htmls.append('<tr>') for i in range(3): htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>') htmls.append('</tr>') labels = [f'{T3[param[1]][0]}&gt;', f'{param[1]}', f'&gt;{T3[param[1]][1]}'] for y in range(3): htmls.append(f'<tr><th>{labels[y]}</th>') for x in range(3): kind = y * 3 + x avg_score = sum_score[kind] / count_score[kind] score = '{:.3f}'.format(100 * avg_score) std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score[kind] - sum_score[kind]*avg_score) / count_score[kind])) htmls.append(f'<td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td><td align="right">{count_score[kind]}</td>') htmls.append('</tr>') htmls.append('</table>') htmls.append('</body>') htmls.append('</html>') self.wfile.write("\n".join(htmls).encode()) def getIndex(self, query): if 'id' in query or 'name' in query: if 'id' in query and 'name' in query: cur.execute('update runs set name = ? where run_id = ?', (query['name'][-1], int(query['id'][-1]))) NAME[int(query['id'][-1])] = query['name'][-1] db.commit() query.pop('id') query.pop('name') self.send_response(302) self.send_header('Location', '/?' + urllib.parse.urlencode(query, True)) self.end_headers() return self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.end_headers() param = query.get('PARAM', []) htmls = [] htmls.append('<html>') htmls.append('<head>') htmls.append('<title>MM Analytics</title>') htmls.append('</head>') htmls.append('<body>') htmls.append(''' <script> function change_name(id, value) { var new_value = window.prompt(id + "'s name =", value); if(new_value===null) { return false; } var href = window.location.href; if(0<=href.indexOf("?")) { href = href + "&"; } else { href = href + "?"; } window.location.href = href + new URLSearchParams({id: id, name: new_value}).toString(); } </script> ''') for name in T3: if name not in param: htmls.append(f'<p>_ <a href="/?{urllib.parse.urlencode({**query, "PARAM": param + [name]}, True)}">{name}: {T3[name][0]}, {T3[name][1]}</a></p>') else: param2 = list(param) param2.remove(name) htmls.append(f'<p>v <a href="/?{urllib.parse.urlencode({**query, "PARAM": param2}, True)}">{name}: {T3[name][0]}, {T3[name][1]}</a></p>') htmls.append('<table border="1">') htmls.append('<tr><th rowspan="2">ID</th><th rowspan="2">CREATED_AT</th><th rowspan="2">NAME</th><th colspan="3">Time</th><th colspan="6">Whole</th>') for name in param: htmls.append(f'<th colspan="6">{T3[name][0]}&gt;</th>') htmls.append(f'<th colspan="6">{name}</th>') htmls.append(f'<th colspan="6">&gt;{T3[name][1]}</th>') htmls.append('</tr>') htmls.append('<tr>') htmls.append('<th>MIN</th><th>AVG</th><th>MAX</th>') htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>') for name in param: htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>') htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>') htmls.append('<th>Score</th><th>Std</th><th>Bests</th><th>Uniqs</th><th>Fails</th><th>Tests</th>') htmls.append('</tr>') for run_id in reversed(list(S.keys())): sum_score = 0 sum2_score = 0 count_score = 0 bests = 0 uniques = 0 fails = 0 for test_id in S[run_id]: if 0 < S[run_id][test_id]: sc1 = calcScore(S[run_id][test_id], BEST[test_id]) sum_score += sc1 sum2_score += sc1*sc1 else: fails += 1 count_score += 1 if BEST[test_id] == S[run_id][test_id]: bests += 1 if BEST_COUNT[test_id]==1: uniques += 1 avg_score = sum_score / count_score score = '{:.3f}'.format(100 * avg_score) std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score - sum_score*avg_score) / count_score)) sec_min = '{:.3f}'.format(TIME[run_id][0]) sec_avg = '{:.3f}'.format(TIME[run_id][1] / count_score) sec_max = '{:.3f}'.format(TIME[run_id][2]) htmls.append(f'<tr><td><a href="/detail?{urllib.parse.urlencode({**query, "id": run_id}, True)}">{run_id}</a></td><td>{CREATED_AT[run_id]}</td><td><a href="javascript: change_name({run_id}, &quot;{urllib.parse.quote(f"{NAME[run_id]}")}&quot;)">{html.escape(f"{NAME[run_id]}")}</a></td><td align="right">{sec_min}</td><td align="right">{sec_avg}</td><td align="right">{sec_max}</td><td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests}</td><td align="right">{uniques}</td><td align="right">{fails}</td><td align="right">{count_score}</td>') for name in param: sum_score = [0]*3 sum2_score = [0]*3 count_score = [0]*3 bests = [0]*3 uniques = [0]*3 fails = [0]*3 for test_id in S[run_id]: kind = 1 if T[test_id][name]<float(T3[name][0]): kind = 0 elif float(T3[name][1])<T[test_id][name]: kind = 2 if 0 < S[run_id][test_id]: sc1 = calcScore(S[run_id][test_id], BEST[test_id]) sum_score[kind] += sc1 sum2_score[kind] += sc1*sc1 else: fails[kind] += 1 count_score[kind] += 1 if BEST[test_id] == S[run_id][test_id]: bests[kind] += 1 if BEST_COUNT[test_id]==1: uniques[kind] += 1 for kind in range(3): avg_score = sum_score[kind] / count_score[kind] score = '{:.3f}'.format(100 * avg_score) std_score = '{:.3f}'.format(100 * math.sqrt((sum2_score[kind] - sum_score[kind]*avg_score) / count_score[kind])) htmls.append(f'<td align="right">{score}</td><td align="right">{std_score}</td><td align="right">{bests[kind]}</td><td align="right">{uniques[kind]}</td><td align="right">{fails[kind]}</td><td align="right">{count_score[kind]}</td>') htmls.append(f'</tr>') htmls.append('</table>') htmls.append('</body>') htmls.append('</html>') self.wfile.write("\n".join(htmls).encode()) def do_GET(self): path, qs = (self.path.split('?') + [''])[:2] query = urllib.parse.parse_qs(qs) #query = {q: (query[q]+[''])[-1] for q in query} if path=='/': return self.getIndex(query) if path=='/detail': return self.getDetail(query) elif path=='/source': return self.getSource(query) elif path=='/favicon.ico': self.send_response(200) self.send_header('Content-Type', 'image/x-icon') self.end_headers() self.wfile.write(open(os.path.join(os.path.dirname(__file__), 'favicon.ico'), 'rb').read()) else: self.send_response(200) self.send_header('Content-Type', 'text/html; charset=utf-8') self.end_headers() htmls = [] htmls.append('<html>') htmls.append('<body>') htmls.append(self.path) htmls.append(f'{query}') htmls.append('</body>') htmls.append('</html>') self.wfile.write("\n".join(htmls).encode()) with http.server.HTTPServer(('', 8080), MyHandler) as server: print('start httpd ...') server.serve_forever()
colun/mmlang
src/mmhttpd.py
mmhttpd.py
py
15,984
python
en
code
21
github-code
36
70010896105
# 구구단2단 출력하는 함수를 만들어 보세요 # gugudan def gugudan(dan) : for j in range(1,10): print(dan,'x', j, '=', dan*j) for dan in range(2, 20+1): gugudan(dan) # 이거 하는데 1시간 걸림
Kyeongrok/python_yla
com/week2/am/01_gugudan.py
01_gugudan.py
py
232
python
ko
code
1
github-code
36
4578620215
import numpy as np def main(x): result = 1 k = 2 while x> 1.99: if int(x**(1/k)) == 1: break elif int(x**(1/k)) == x**(1/k): result *= k x = x**(1/k) k = 2 else: k+=1 if result<=1: return "NO" return result print(1000**(1/3)) print(int(1000**(1/3))) n = int(input()) for i in range(n): print(main(int(input())))
naphattar/Betaprogramming
Chapter 1/1043.py
1043.py
py
428
python
en
code
0
github-code
36
38715715582
#!/usr/bin/env python3 import hashlib key = 'iwrupvqb' i = 1 while True: s = '{}{}'.format(key, i) h = hashlib.md5(s.encode('ascii')).hexdigest() if h.startswith('00000'): print("Answer for 5 zeros is {} ({})".format(i, s)) break i += 1 i = 1 while True: s = '{}{}'.format(key, i) h = hashlib.md5(s.encode('ascii')).hexdigest() if h.startswith('000000'): print("Answer for six zeros is {} ({})".format(i, s)) break i += 1
lvaughn/advent
2015/4/advent_coin.py
advent_coin.py
py
488
python
en
code
1
github-code
36
8660975044
from PYmodule import * log10Ms = [9,10,11,12] typenames = ['H'+r'$_2$', 'H-H'+r'$_2$', 'H-H'] pres = ['./data/1e9','./data/1e10','./data/1e11','./data/1e12','./data/1e13'] print('z=6 to 4, t in Myr: ', (t_from_z(4)-t_from_z(6))/Myr) M_grow_ratio = 5. f_lambda = np.log(M_grow_ratio) * t_Edd / (t_from_z(4)-t_from_z(6)) print(f_lambda) exit(0) i_bsm = 0 iM = 2 T_tell = 8000 eta = 0.3 T=ascii.read(pres[iM]+'Jcol_'+str(i_bsm)+'.txt', guess=False,delimiter=' ') # None has np.where(T['z_col']==-1) T['Mdot'] = (k_B*T['Tg_loi']*T['f_loi']/(mu*m_H))**1.5/G/(Ms/yr) T['Mstar0'] = np.zeros(len(T)) T['Mstar_z'] = np.zeros(len(T)) T['Lbol_z'] = np.zeros(len(T)) T['M1450_z'] = np.zeros(len(T)) for i in range(len(T)): T['Mstar0'][i] = Mdot2M(T['Mdot'][i]*eta) T['on'] = np.ones(len(T)) T['Edd_ratio'] = np.ones(len(T)) # print("max T['Edd_ratio']=",np.max(T['Edd_ratio'])) abin_mf = np.logspace(2,12,num=40) # default endpoint=True wid_mf = abin_mf[1:]-abin_mf[:-1] dlog10M = np.log10(abin_mf[1]/abin_mf[0]) # print(dlog10M) for z in [4,6]: f_duty = 0.5 mu_fit = .3 sigma_fit = .15 N_concatenate = int(1e0) # if z==6: # N_concatenate = int(1e4) # else: # N_concatenate = int(1e0) h0_mf = np.zeros(len(abin_mf)-1); h1_mf = np.zeros(len(abin_mf)-1); h2_mf = np.zeros(len(abin_mf)-1) for i_concatenate in range(N_concatenate): # T = T[T['z_col']>z] T['Edd_ratio'] = lognorm.rvs(sigma_fit*np.log(10), scale=mu_fit, size=len(T)) # scatter=0.1dex; center=scale for i in range(len(T)): # T['Edd_ratio'][i] = .3 T['Mstar_z'][i] = T['Mstar0'][i] * np.exp( (t_from_z(z)-t_from_z(T['z_col'][i])) / t_Edd * f_duty* T['Edd_ratio'][i] ) # print(np.argmax(T['Mstar_z'])," max Mstar:", np.max(T['Mstar_z']), "corresponding Edding_ratio",T['Edd_ratio'][np.argmax(T['Mstar_z'])], "t_grow Myr",(t_from_z(6)-t_from_z(T['z_col'][np.argmax(T['Mstar_z'])]))/Myr) T_H2 = T[T['Tg_max']<=T_tell] T_isofail = T[np.logical_and(T['Tg_max']>T_tell, T['iso_col']==0)] T_isoOK = T[np.logical_and(T['Tg_max']>T_tell, T['iso_col']==1)] hist0_mf, bin_edges = np.histogram(T_H2['Mstar_z'],bins=abin_mf,density=False) hist1_mf, bin_edges = np.histogram(T_isofail['Mstar_z'],bins=abin_mf,density=False) hist2_mf, bin_edges = np.histogram(T_isoOK['Mstar_z'],bins=abin_mf,density=False) h0_mf += hist0_mf*n_base[iM]/(1e4*N_concatenate)/dlog10M*f_duty h1_mf += hist1_mf*n_base[iM]/(1e4*N_concatenate)/dlog10M*f_duty h2_mf += hist2_mf*n_base[iM]/(1e4*N_concatenate)/dlog10M*f_duty fig, ax = plt.subplots(1,2,figsize=(20,10),dpi=400) ax[0].plot(abin_mf, MF(abin_mf,z),label='Willott 2010 or extrapolate') x = (abin_mf[:-1]+abin_mf[1:])/2. ax[0].bar(x, h0_mf,width=wid_mf,color='C'+str(0),alpha=0.5,label=typenames[0]) ax[0].bar(x, h1_mf,width=wid_mf,bottom=h0_mf,color='C'+str(1),alpha=0.5,label=typenames[1]) ax[0].bar(x, h2_mf,width=wid_mf,bottom=(h0_mf+h1_mf),color='C'+str(2),alpha=0.5,label=typenames[2]) ax[0].tick_params(labelsize=fstick) ax[0].set_xlabel(r'$\mathrm{M_{\bullet}}$',fontsize=fslabel) ax[0].set_ylabel(r'$\mathrm{\Phi}$'+' '+r'$\mathrm{[Mpc^{-3}dex^{-1}]}$',fontsize=fslabel) ax[0].set_xscale('log'); ax[0].set_yscale('log') ax[0].set_title('z='+str(int(z)),fontsize=fslabel) ax[0].set_xlim(1e7,10**10.5); ax[0].set_ylim(1.e-9,1e-4) ax[0].grid(True) ax[0].legend(fontsize=fslegend,loc='best') ax[1].plot(abin_mf, MF(abin_mf,z),label='Willott 2010 or extrapolate') ax[1].bar(x, h0_mf,width=wid_mf,color='C'+str(0),alpha=0.5,label=typenames[0]) ax[1].bar(x, h1_mf,width=wid_mf,bottom=h0_mf,color='C'+str(1),alpha=0.5,label=typenames[1]) ax[1].bar(x, h2_mf,width=wid_mf,bottom=(h0_mf+h1_mf),color='C'+str(2),alpha=0.5,label=typenames[2]) ax[1].tick_params(labelsize=fstick) ax[1].set_xlabel(r'$\mathrm{M_{\bullet}}$',fontsize=fslabel) ax[1].set_ylabel(r'$\mathrm{\Phi}$'+' '+r'$\mathrm{[Mpc^{-3}dex^{-1}]}$',fontsize=fslabel) ax[1].set_xscale('log'); ax[1].set_yscale('log') ax[1].set_title('z='+str(int(z)),fontsize=fslabel) ax[1].set_xlim(abin_mf[0],abin_mf[1]); ax[1].set_ylim() ax[1].grid(True) ax[1].legend(fontsize=fslegend,loc='best') plt.savefig(figpre+'MF_z'+str(int(z))+'eta'+str(int(10*eta))+'f'+str(f_duty)+'mu'+str(mu_fit)+'sigma'+str(sigma_fit)+'N'+str(int(np.log10(N_concatenate)))+'.png') ascii.write(Table([(abin_mf[:-1]+abin_mf[1:])/2.,h0_mf,h1_mf,h2_mf, ],names=['bin_center','hist_0','hist_1','hist_2']), datapre+'histBHmass_z'+str(int(z))+'eta'+str(int(10*eta))+'f'+str(f_duty)+'mu'+str(mu_fit)+'sigma'+str(sigma_fit)+'N'+str(int(np.log10(N_concatenate))),overwrite=True)
lovetomatoes/BHMF
Mdist_grow.py
Mdist_grow.py
py
4,788
python
en
code
0
github-code
36
26425710929
#coding: latin-1 import numpy as np def shell_tube_eff(NTU,Cr,nshell=1): # # shell and tube (nshell shell pass) # NTU is the total NTU NTUn = NTU/nshell G = np.sqrt(1+Cr**2) y = np.exp(-NTUn*G) ep1 = 2/(1+Cr+G*(1+y)/(1-y)) if nshell > 1: if Cr == 1: ep = nshell*ep1/(1+ep1*(n-1)) else: z = (1-ep1*Cr)/(1-ep1) ep = (z**nshell-1)/(z**nshell-Cr) else: ep = ep1 return ep def shell_tube_NTU(ep,Cr,nshell=1): # # shell and tube (nshell shell pass) # NTU is the total NTU G = np.sqrt(1+Cr**2) if nshell > 1: if Cr ==1: ep1 = ep/(nshell - ep*(n-1)) else: F = ((ep*Cr -1)/(ep-1))**(1/nshell) ep1 = (F-1)/(F-Cr) else: ep1 = ep E = (2/ep1 - (1+Cr))/G if E > 1: NTU1 = -np.log((E-1)/(E+1))/G NTU = nshell*NTU1 else: print('impossible') NTU = -999 return NTU def F_coef_shell_tube(Tci,Tco,Thi,Tho,N): P = (Tco-Tci)/(Thi-Tci) R = (Thi-Tho)/(Tco-Tci) A = 2/P - 1 - R B = 2/P*np.sqrt((1-P)*(1-P*R)) num = np.sqrt(R**2 + 1)/(R-1)*np.log((1-P)/(1-P*R)) if N == 1: den = np.log((A+np.sqrt(R**2+1))/(A-np.sqrt(R**2+1))) F = num/den else: den = 2*np.log((A+B+np.sqrt(R**2+1))/(A+B-np.sqrt(R**2+1))) F = num/den return F def counter_flow_NTU(ep,Cr): if Cr < 1: NTU = 1/(Cr-1)*np.log((ep-1)/(ep*Cr-1)) else: NTU = ep/(1.0-ep) return NTU def counter_flow_eff(NTU,Cr): if Cr < 1: ep = (1-np.exp(-NTU*(1-Cr)))/(1-Cr*np.exp(-NTU*(1-Cr))) else: ep = NTU/(1+NTU) return ep
LouisLamarche/Fundamentals-of-Geothermal-Heat-Pump-Systems
lib/heat_exchanger_md.py
heat_exchanger_md.py
py
1,678
python
en
code
1
github-code
36
38930582027
import torch from torch import nn def conv_block(in_channel, channel, kernel_size=3, stride=1, padding=1, inplace=False): norm = nn.InstanceNorm2d return nn.Sequential( nn.Conv2d(in_channel, channel, kernel_size=kernel_size, stride=stride, padding=padding, bias=True), norm(channel, affine=True, momentum=0.4), nn.ELU(inplace=inplace) ) class ResNextBlock(nn.Module): """ resnext不是太好,将原先的+改为了cat,残差不变 我想命名为 ResNextX """ def __init__(self, in_channel, out_channel, split_num: int = 4, **kwargs): super().__init__() mid_channel = 4 self.split_num = split_num blocks = [] kernel_style = [ (1, 1), (1, 3), (3, 1), (3, 3) ] padding_style = [ (k1 // 2, k2 // 2) for k1, k2 in kernel_style ] inplace = kwargs.get("inplace", True) for i in range(split_num): blocks.append(nn.Sequential( conv_block(in_channel, mid_channel, kernel_size=kernel_style[i], stride=1, padding=padding_style[i], inplace=inplace), conv_block(mid_channel, mid_channel, kernel_size=3, stride=1, padding=1, inplace=inplace), conv_block(mid_channel, out_channel, kernel_size=kernel_style[i], stride=1, padding=padding_style[i], inplace=inplace), # conv_block(out_channel, mid_channel, kernel_size=1, stride=1, padding=0, inplace=inplace), # conv_block(mid_channel, mid_channel, kernel_size=3, stride=1, padding=1, inplace=inplace), # conv_block(mid_channel, out_channel, kernel_size=1, stride=1, padding=0, inplace=inplace) )) self.scale = conv_block(out_channel * split_num, out_channel, kernel_size=1, stride=1, padding=0, inplace=inplace) self.blocks = nn.ModuleList(blocks) if in_channel != out_channel: self.skip = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, padding=0) else: self.skip = None def forward(self, x): if self.skip: res = self.skip(x) else: res = x outputs = [] for i in range(self.split_num): outputs.append(torch.add(self.blocks[i](x), res)) x = self.scale(torch.cat(outputs, dim=1)) return torch.add(x, res)
QiliangFan/Drive
models/resnetx.py
resnetx.py
py
2,452
python
en
code
0
github-code
36
8230873640
import sys import parser ''' Grammar: program = statement statement = seq | single seq = single; seq | single; single single = assign | if | for assign = var = expr var = x | y const = 0 | 1 expr = var | const | expr + var | expr + const if = if cmp then statement else statement endif for = for var = expr; cmp; expr do statement endfor cmp = expr == expr | expr > expr | expr < expr ''' class ForLang(object): def parsing_error(self, tok1, tok2): print(self.input) print("Error: expecting %s but get %s at location %d" % (tok1, str(tok2), self.p.cur)) sys.exit(1) def debug(self, msg): if self.is_debug: print(msg) def lookAHead(self): if self.nextToken is None: self.nextToken = self.p.next_token() return self.nextToken def fetch(self): token = self.lookAHead() self.nextToken = self.p.next_token() return token def parse(self, input, debug=False): self.is_debug = debug self.input = input self.p = parser.tokenizer(input) self.nextToken = None return self.parseProgram() def parseProgram(self): return self.parseStatement() def parseStatement(self): self.debug('parsing statement') s1 = self.parseSingle() t = self.lookAHead() if t == ';': self.fetch() s2 = self.parseStatement() return ['<SEQ>', s1, s2] else: return s1 def parseSingle(self): self.debug('parsing single') t = self.lookAHead() if t == 'if': return self.parseIf() elif t == 'for': return self.parseFor() else: return self.parseAssign() def match(self, token): t = self.fetch() if t != token: self.parsing_error(token, t) def parseIf(self): self.debug('parsing if') self.match('if') cmp = self.parseCmp() self.match('then') then_branch = self.parseStatement() self.match('else') else_branch = self.parseStatement() self.match('endif') return ['<IF>', cmp, then_branch, else_branch] def parseFor(self): self.debug('parsing for') self.match('for') var = self.parseVar() self.match('=') init = self.parseExpr() self.match(';') cond = self.parseCmp() self.match(';') inc = self.parseExpr() self.match('do') stat = self.parseStatement() self.match('endfor') return ['<FOR>', var, init, cond, inc, stat] def parseAssign(self): self.debug('parsing assign') var = self.parseVar() self.match('=') expr = self.parseExpr() return ['<ASSIGN>', var, expr] def parseVar(self): self.debug('parsing var') v = self.fetch() if type(v) != type('a'): self.parsing_error('<var>', v) return v def parseConst(self): v = self.fetch() if type(v) != type(10): self.parsing_error('<num>', v) return v def parseExpr(self): self.debug('parsing expr') v1 = self.fetch() while self.lookAHead() == '+' or self.lookAHead() == '-': if self.lookAHead() == '+': self.match('+') v2 = self.fetch() v1 = ['<Op+>', v1, v2] else: self.match('-') v2 = self.fetch() v1 = ['<Op->', v1, v2] return ['<Expr>', v1] def parseCmp(self): self.debug('parsing cmp') e1 = self.parseExpr() op = self.lookAHead() if op != '==' and op != '>' and op != '<': self.parsing_error('==|>|<', op) op = self.fetch() e2 = self.parseExpr() return ['<CMP>', e1, op, e2] def printast(self, ast, indent): id = ast[0] if type(ast) == type([]) else ast print('\t'*indent + str(id)) if type(ast) == type([]): for i in range(1, len(ast)): self.printast(ast[i], indent + 1) def test1(): program = '''for i=1; i<10; i+1 do if x>0 then y=y+1 else y=y endif endfor; a = a + 1 ''' forlang = ForLang() p = forlang.parse(program, debug=True) forlang.printast(p, 0) if __name__ == '__main__': test1() pass
lsiddiqsunny/Undergraduate-Thesis
Code and data set of Tree to tree neural network progration/Tree2Tree-master/src/For2Lam/lang_for.py
lang_for.py
py
4,461
python
en
code
3
github-code
36
14825110137
import sys import os import itertools import numpy as np import enum OUTPUT_FILE = 'vector_type_predef.hpp' STATIC_BUFFER_TYPE = 'static_buffer' DEVICE_HOST_MACRO = 'DEVICE_HOST' def gen_p2_array(n): i = 1 rtn = [] while i <= n: rtn.append(i) i = i * 2 return rtn class vector_type(object): def __init__(self, n): self.n = n def __call__(self, fp): n = self.n fp.write(f'template <typename T> struct vector_type<T, {n}> {{\n') for d in gen_p2_array(n): if d == 1: fp.write(f' using d1_t = T;\n') else: fp.write(f' typedef T d{d}_t __attribute__((ext_vector_type({d})));\n') fp.write(f' using type = d{n}_t;\n') fp.write(f' union {{\n') fp.write(f' type d{n}_;\n') for d in gen_p2_array(n): fp.write(f' {STATIC_BUFFER_TYPE}<d{d}_t, {n // d}> d{d}x{n // d}_;\n') fp.write(f' }} data_;\n') fp.write(f' {DEVICE_HOST_MACRO} constexpr vector_type() : data_{{type{{0}}}} {{}}\n') fp.write(f' {DEVICE_HOST_MACRO} constexpr vector_type(type v) : data_{{v}} {{}}\n') fp.write(f' template<typename VEC> {DEVICE_HOST_MACRO} constexpr const auto& to_varray() const {{ return data_.d{n}_; }}\n') fp.write(f' template<typename VEC> {DEVICE_HOST_MACRO} constexpr auto& to_varray() {{ return data_.d{n}_; }}\n') for d in gen_p2_array(n): fp.write(f' template<> {DEVICE_HOST_MACRO} constexpr const auto& to_varray<d{d}_t>() const {{ return data_.d{d}x{n//d}_;}}\n') fp.write(f' template<> {DEVICE_HOST_MACRO} constexpr auto& to_varray<d{d}_t>() {{ return data_.d{d}x{n//d}_;}}\n') fp.write(f'}};\n') def gen(file_name): fp = None try: fp = open(file_name, "w") except IOError as e: print("can't open file:{}({})".format(file_name, e)) sys.exit() fp.write(f'template <typename T, index_t N>\n') fp.write(f'struct vector_type;\n') fp.write(f'\n') fp.write(f'// clang-format off\n') for n in [1, 2, 4, 8, 16, 32, 64, 128, 256]: vector_type(n)(fp) fp.write(f'\n') fp.write(f'// clang-format on\n') if __name__ == '__main__': output_file = OUTPUT_FILE if len(sys.argv) >= 2: output_file = sys.argv[1] gen(output_file)
carlushuang/gcnasm
hgemm_mfma/gen_vec_type.py
gen_vec_type.py
py
2,420
python
en
code
6
github-code
36
27900592036
# -*- coding: utf-8 -*- import numpy as np def moranI(W,X): ''' W:空间权重矩阵 X:观测值矩阵 归一化空间权重矩阵后进行moran检验 ''' W = np.array(W) X = np.array(X) X = X.reshape(1,-1) print('===========w:{}',W) print('===========x:{}',X) #归一化 print('W.sum:{}',W.sum(axis=1)) W = W/W.sum(axis=1) print('===========W归一化:{}',W) #空间单元数 n = W.shape[0] print('===========空间单元数:{}',n) #离差阵 Z = X - X.mean() print('===========离差证:{}',Z) S0 = W.sum() S1 = 0 for i in range(n): for j in range(n): S1 += 0.5*(W[i,j]+W[j,i])**2 S2 = 0 for i in range(n): S2 += (W[i,:].sum()+W[:,i].sum())**2 #全局moran指数 I = np.dot(Z,W) I = np.dot(I,Z.T) I = n/S0*I/np.dot(Z,Z.T) #在正太分布假设下的检验数 EI_N = -1/(n-1) VARI_N = (n**2*S1-n*S2+3*S0**2)/(S0**2*(n**2-1))-EI_N**2 ZI_N = (I-EI_N)/(VARI_N**0.5) #在随机分布假设下检验数 EI_R = -1/(n-1) b2 = 0 for i in range(n): b2 += n*Z[0,i]**4 b2 = b2/((Z*Z).sum()**2) VARI_R = n*((n**2-3*n+3)*S1-n*S2+3*S0**2)-b2*((n**2-n)*S1-2*n*S2+6*S0**2) VARI_R = VARI_R/(S0**2*(n-1)*(n-2)*(n-3))-EI_R**2 ZI_R = (I-EI_R)/(VARI_R**0.5) #计算局部moran指数 Ii = list() for i in range(n): Ii_ = n*Z[0,i] Ii__ = 0 for j in range(n): Ii__ += W[i,j]*Z[0,j] Ii_ = round(Ii_*Ii__/((Z*Z).sum()),3) Ii.append(Ii_) Ii = np.array(Ii) #局部检验数 ZIi = list() EIi = Ii.mean() VARIi = Ii.var() for i in range(n): ZIi_ = (Ii[i]-EIi)/(VARIi**0.5) ZIi.append(round(ZIi_,3)) ZIi = np.array(ZIi) result={} # 全局moran指数 result["I"]=round(I[0,0],3) #正太分布假设下检验数 result["ZI_N"]=round(ZI_N[0,0],3) #随机分布假设下检验数 result["ZI_R"]=round(ZI_R[0,0],3) #局部moran指数 result["Ii"]=Ii.tolist() #局部检验数 result["ZIi"]=ZIi.tolist() return result # return { # "I":{"value":I[0,0],"desc":"全局moran指数"}, # "ZI_N":{"value":ZI_N[0,0],"desc":"正太分布假设下检验数"}, # "ZI_R":{"value":ZI_R[0,0],"desc":"随机分布假设下检验数"}, # "Ii":{"value":Ii,"desc":"局部moran指数"}, # "ZIi":{"value":ZIi,"desc":"局部检验数"}, # "img":{"path":imgPath,"desc":"莫兰散点图路径"} # } if __name__ == "__main__": w = [ [0,1,1,0,0], [1,0,1,1,0], [1,1,0,1,0], [0,1,1,0,1], [0,0,0,1,0] ] w = np.array(w) x = [ [8,6,6,3,2] ] x = np.array(x) print(moranI(w,x))
fangweilong/python-algorithm
莫兰指数/MoranI.py
MoranI.py
py
2,858
python
en
code
0
github-code
36
31243963609
import argparse import datetime import pycloudlib CI_DEFAULT_TAG = "uaclient" def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-t", "--tag", dest="tag", action="store", default=CI_DEFAULT_TAG, help=( "Tag to determine which instances will be deleted." "If the tag is present in the instance name, it will " "be marked for deletion. " "Default: {}".format(CI_DEFAULT_TAG)) ) parser.add_argument( "-b", "--before-date", dest="before_date", action="store", help=("Resources created before this date will be deleted." " Format: MM/DD/YY") ) parser.add_argument( "--credentials-path", dest="credentials_path", help=""" Path to json file representing the GCP credentials. That file must a be a json dict containing all the necessary credentials to manage GCP resources.""" ) parser.add_argument( "--project-id", dest="project_id", help="Name of the project id this script will operate on" ) parser.add_argument( "--region", dest="region", help="Name of the region this script will operate on" ) parser.add_argument( "--zone", dest="zone", help="Name of the zone this script will operate on" ) return parser def clean_gcp(credentials_path, project_id, tag, before_date, region, zone): gce = pycloudlib.GCE( tag='cleanup', credentials_path=credentials_path, project=project_id, region=region, zone=zone ) all_instances = gce.compute.instances().list( project=gce.project, zone=gce.zone ).execute() for instance in all_instances.get('items', []): created_at = datetime.datetime.strptime( instance["creationTimestamp"].split("T")[0], "%Y-%M-%d" ) # If the machine is running for more than 2 days, we should # delete it, regardless of the name tag if created_at < before_date - datetime.timedelta(days=2): print("Deleting instance {} ...".format( instance['name'])) instance = gce.get_instance( instance_id=instance['name'] ) instance.delete() elif tag in instance['name'] and created_at < before_date: print("Deleting instance {} ...".format( instance['name'])) instance = gce.get_instance( instance_id=instance['name'] ) instance.delete() if __name__ == '__main__': parser = get_parser() args = parser.parse_args() if args.before_date: before_date = datetime.datetime.strptime( args.before_date, "%m/%d/%Y" ) else: before_date = datetime.datetime.today() - datetime.timedelta(days=1) clean_gcp( credentials_path=args.credentials_path, project_id=args.project_id, tag=args.tag, before_date=before_date, region=args.region, zone=args.zone )
canonical/server-test-scripts
ubuntu-advantage-client/gcp_cleanup.py
gcp_cleanup.py
py
3,138
python
en
code
8
github-code
36
36955105209
import wttest from suite_subprocess import suite_subprocess from wtdataset import SimpleDataSet, ComplexDataSet from wiredtiger import stat from wtscenario import make_scenarios # test_compact.py # session level compact operation class test_compact(wttest.WiredTigerTestCase, suite_subprocess): name = 'test_compact' # We don't want to set the page size too small as compaction doesn't work on tables with many # overflow items, furthermore eviction can get very slow with overflow items. We don't want the # page size to be too big either as there won't be enough pages to rewrite. config = 'leaf_page_max=8KB,key_format=S' nentries = 50000 # The table is a complex object, give it roughly 5 pages per underlying # file. types = [ ('file', dict(type='file:', dataset=SimpleDataSet, maxpages=5)), ('table', dict(type='table:', dataset=ComplexDataSet, maxpages=50)) ] compact = [ ('method', dict(utility=0,reopen=0)), ('method_reopen', dict(utility=0,reopen=1)), ('utility', dict(utility=1,reopen=0)), ] scenarios = make_scenarios(types, compact) # Configure the connection so that eviction doesn't happen (which could # skew our compaction results). conn_config = 'cache_size=1GB,eviction_checkpoint_target=80,' +\ 'eviction_dirty_target=80,eviction_dirty_trigger=95,statistics=(all)' # Return stats that track the progress of compaction. def getCompactProgressStats(self, uri): cstat = self.session.open_cursor( 'statistics:' + uri, None, 'statistics=(all)') statDict = {} statDict["pages_reviewed"] = cstat[stat.dsrc.btree_compact_pages_reviewed][2] statDict["pages_skipped"] = cstat[stat.dsrc.btree_compact_pages_skipped][2] statDict["pages_rewritten"] = cstat[stat.dsrc.btree_compact_pages_rewritten][2] cstat.close() return statDict # Test compaction. @wttest.skip_for_hook("timestamp", "removing timestamped items will not free space") def test_compact(self): # Populate an object uri = self.type + self.name ds = self.dataset(self, uri, self.nentries - 1, config=self.config) ds.populate() # Reopen the connection to force the object to disk. self.reopen_conn() # Confirm the tree starts big stat_cursor = self.session.open_cursor('statistics:' + uri, None, None) self.assertGreater(stat_cursor[stat.dsrc.btree_row_leaf][2], self.maxpages) stat_cursor.close() # Remove most of the object. c1 = ds.open_cursor(uri, None) c1.set_key(ds.key(5)) c2 = ds.open_cursor(uri, None) c2.set_key(ds.key(self.nentries - 5)) ds.truncate(None, c1, c2, None) c1.close() c2.close() # Compact it, using either the session method or the utility. Generated files are ~2MB, set # the minimum threshold to a low value to make sure compaction can be executed. compact_cfg = "free_space_target=1MB" if self.utility == 1: self.session.checkpoint(None) self.close_conn() self.runWt(["compact", "-c", compact_cfg, uri]) else: # Optionally reopen the connection so we do more on-disk tests. if self.reopen == 1: self.session.checkpoint(None) self.reopen_conn() self.session.compact(uri, compact_cfg) # Verify compact progress stats. We can't do this with utility method as reopening the # connection would reset the stats. if self.utility == 0 and self.reopen == 0 and not self.runningHook('tiered'): statDict = self.getCompactProgressStats(uri) self.assertGreater(statDict["pages_reviewed"],0) self.assertGreater(statDict["pages_rewritten"],0) self.assertEqual(statDict["pages_rewritten"] + statDict["pages_skipped"], statDict["pages_reviewed"]) # Confirm compaction worked: check the number of on-disk pages self.reopen_conn() stat_cursor = self.session.open_cursor('statistics:' + uri, None, None) self.assertLess(stat_cursor[stat.dsrc.btree_row_leaf][2], self.maxpages) stat_cursor.close() if __name__ == '__main__': wttest.run()
mongodb/mongo
src/third_party/wiredtiger/test/suite/test_compact01.py
test_compact01.py
py
4,370
python
en
code
24,670
github-code
36
28657759948
from torchvision import transforms from torch.utils.data import dataset, dataloader from torchvision.datasets.folder import default_loader from utils.RandomErasing import RandomErasing from utils.RandomSampler import RandomSampler from opt import opt import glob import pandas as pd import numpy as np import os.path as osp class Data(object): def __init__(self): # paper is (384, 128) train_transform = transforms.Compose([ transforms.Resize((256, 256), interpolation=3), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0]) ]) test_transform = transforms.Compose([ transforms.Resize((256, 256), interpolation=3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) self.trainset = EvaluationAICityCar(train_transform, 'train', opt.data_path) self.testset = EvaluationAICityCar(test_transform, 'test', opt.data_path) self.queryset = EvaluationAICityCar(test_transform, 'query', opt.data_path) self.train_loader = dataloader.DataLoader(self.trainset, sampler=RandomSampler(self.trainset, batch_id=opt.batchid, batch_image=opt.batchimage), batch_size=opt.batchid * opt.batchimage, num_workers=8, pin_memory=True) self.test_loader = dataloader.DataLoader(self.testset, batch_size=opt.batchtest, num_workers=1, pin_memory=True) self.query_loader = dataloader.DataLoader(self.queryset, batch_size=opt.batchtest, num_workers=1, pin_memory=True) def process(data_path, dtype, num): img = [] car_id = [] id_list = np.array(pd.read_csv(data_path + '/train_label.csv', header=None)) if dtype == 'train': for i in range(10): if i != num: path = data_path + '/folder' + str(i) + '/' dir_path = glob.glob(osp.join(path, '*.jpg')) for j in range(len(dir_path)): img.append(dir_path[j]) tmp = str(dir_path[j]) car_id.append(id_list[int(tmp[-10:-4])-1][0]) elif dtype == 'test': path = data_path + '/folder' + str(num) + '/test.txt' data = np.array(pd.read_csv(path, header=None)) for i in range(data.shape[0]): tmp = data_path + '/folder' + str(num) + '/' + data[i] img.append(tmp[0]) tmp = str(data[i]) car_id.append(id_list[int(tmp[-12:-6]) - 1][0]) elif dtype == 'query': path = data_path + '/folder' + str(num) + '/query.txt' data = np.array(pd.read_csv(path, header=None)) for i in range(data.shape[0]): tmp = data_path + '/folder' + str(num) + '/' + data[i] img.append(tmp[0]) tmp = str(data[i]) car_id.append(id_list[int(tmp[-12:-6]) - 1][0]) return img, car_id class EvaluationAICityCar(dataset.Dataset): def __init__(self, transform, dtype, data_path): super(EvaluationAICityCar, self).__init__() self.transform = transform self.loader = default_loader self.data_path = data_path self.imgs, self.id = process(self.data_path, dtype, num=6) def __getitem__(self, index): path = self.imgs[index] if self.id[index] <= 95: target = self.id[index] - 1 else: target = self.id[index] - 146 img = self.loader(path) if self.transform is not None: img = self.transform(img) return img, target def __len__(self): return len(self.imgs)
DavisonHu/AICity-track2-Re-id
loader/Evaluation_AICity_data.py
Evaluation_AICity_data.py
py
4,017
python
en
code
1
github-code
36
37217931593
def time_main(): from mult_optim_cython import main import timeit import numpy as np time_arr = timeit.repeat(main, repeat=5, number=1) print('Times:', time_arr) print('Median:', np.median(time_arr)) return if __name__ == "__main__": time_main()
RohanBh/cs263_project
programs/cython_np/time_mult_optim.py
time_mult_optim.py
py
280
python
en
code
0
github-code
36
25822109904
#!/usr/bin/env python3 # -*- coding: utf-8 -*- def read_info(file): while True: data = file.readline(); if not data: break yield data class Contact: def __init__(self, first_name = '', last_name = '', number = 0): self.first_name = first_name self.last_name = last_name self.number = number def __str__(self) -> str: return "{} {}: {}".format (self.first_name, self.last_name, self.number) def __gt__(self, another_contact): if self.last_name != another_contact.last_name: return self.last_name.__gt__(another_contact.last_name) else: return self.first_name.__gt__(another_contact.first_name) def add_from_console(self): self.first_name = input('Enter first name: ') self.last_name = input('Enter last name: ') self.number = input('Enter phone number: ') def __eq__(self, o: object) -> bool: return self.number == o.number def update(self, property): try: setattr(self, property, input("Enter " + property + ": ")) except: print('Invalid property') class PhoneBook: def __init__(self): self.records = [] try: f = open('./phone_book.txt') for line in read_info(f): args = line.split() self.records.append(Contact(args[0], args[1], args[2])) f.close() except Exception as e: print("Something wrong!\n" + str(e)) def add_to_file(self): f = open('./phone_book.txt', 'wt', encoding='utf-8') for item in self.records: f.write('{} {} {}\n'.format(item.first_name, item.last_name, item.number)) f.flush() f.close() def add(self): record = Contact() record.add_from_console() self.records.append(record) def remove_by_id(self, id): self.records.pop(int(id)) def search_by_name(self, name): result = '' for item in self.records: if (item.first_name + " " + item.last_name).__contains__(name): result += '{}\n'.format(item) return result def update(self, id): property = input('Enter property name (first_name, last_name, number): ') self.records[int(id)].update(property) def __str__(self) -> str: result = '' for idx, val in enumerate(self.records): result += '{} - {}\n'.format(idx, val) return result phone_book = PhoneBook() while True: action = input("1 - Add contact\n2 - Remove by id\n3 - Show contacts\n" "4 - Update contact\n5 - Search by name\n6 - Exit\n") try: if action == '1': phone_book.add() elif action == '2': phone_book.remove_by_id(input('Enter id: ')) elif action == '3': print(phone_book) elif action == '4': phone_book.update(input('Enter id: ')) elif action == '5': print(phone_book.search_by_name(input('Enter name: '))) elif action == '6': phone_book.add_to_file() break except Exception as e: print("Something wrong!\n" + str(e))
LiudaShevliuk/python
lab14_2/lab14_2.py
lab14_2.py
py
3,254
python
en
code
0
github-code
36
43046392244
import sys def search_value(my_dict, elem): for key, value in my_dict.items(): if value.lower() == elem.lower(): return value return None def search_key(my_dict, elem): for key, value in my_dict.items(): if key.lower() == elem.lower(): return key return None def search(my_dict, val): for key, value in my_dict.items(): if value == val: return key return None def my_search(elem): states = { "Oregon" : "OR", "Alabama" : "AL", "New Jersey": "NJ", "Colorado" : "CO" } capital_cities = { "OR": "Salem", "AL": "Montgomery", "NJ": "Trenton", "CO": "Denver" } my_dict = dict(zip(capital_cities.values(), states.keys())) key = search_key(my_dict, elem) if key: print(f'{key} is the capital of {my_dict[key]}') if key == None: value = search_value(my_dict, elem) if value != None: key = search(my_dict, value) print(f'{key} is the capital of {my_dict[key]}') else: print(f'{elem} is neither a capital city nor a state') def state(): if len(sys.argv) != 2: return state = [elem.strip() for elem in sys.argv[1:][0].split(',') if elem.strip() != ''] for elem in state: my_search(elem) if __name__ == '__main__': state()
GoryachevDaniil/ft_Python_Django_Piscine
day01/ex05/all_in.py
all_in.py
py
1,368
python
en
code
0
github-code
36
22450452976
""" Team 46 Haoyue Xie 1003068 @Melbourne Jiayu Li 713551 @Melbourne Ruqi Li 1008342 @Melbourne Yi Zhang 1032768 @Melbourne Zimeng Jia 978322 @Hebei, China """ import json from shapely.geometry import shape, Point #current_region is a dictionary def streaming_region(current_region, tweet): if current_region != {}: return current_region else: area_list = [] with open("City_geojson.json") as f: data = json.load(f) for area in data["features"]: if area["geometry"] != None: polygon = shape(area["geometry"]) area_list.append([polygon,area["properties"]]) if tweet["coordinates"] != None: point = Point(tweet["coordinates"]["coordinates"][0],tweet["coordinates"]["coordinates"][1]) for plg in area_list: if plg[0].contains(point): return plg[1] print("no sa4 area defined") elif tweet["place"] != None: coor1 = tweet["place"]["bounding_box"]["coordinates"][0][0] coor2 = tweet["place"]["bounding_box"]["coordinates"][0][2] point = Point((coor1[0]+coor2[0])/2,(coor1[1]+coor2[1])/2) for plg in area_list: if plg[0].contains(point): return plg[1] print("no sa4 area defined") else: print("no location info!") return {}
yzzhan4/COMP90024-AuzLife
TwitterStreaming/streaming_region.py
streaming_region.py
py
1,436
python
en
code
0
github-code
36
21366302281
""" URL: https://www.lintcode.com/problem/invert-binary-tree/description Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None """ # My own solution, simple recursion. class Solution: """ @param root: a TreeNode, the root of the binary tree @return: nothing """ def invertBinaryTree(self, root): # write your code here if root is None: return self.invertBinaryTree(root.left) self.invertBinaryTree(root.right) root.left, root.right = root.right, root.left # I referred to a solution provided by a student on Jiuzhang.com. It uses BFS, very simple. Next time I should # think about using BFS first when facing problems related to trees. I was always thinking about DFS and cannot # figure out a way to do it non-recursively. from collections import deque class Solution: """ @param root: a TreeNode, the root of the binary tree @return: nothing """ def invertBinaryTree(self, root): # write your code here if root is None: return queue = deque() queue.append(root) while len(queue) > 0: node = queue.popleft() node.left, node.right = node.right, node.left if node.left: queue.append(node.left) if node.right: queue.append(node.right)
simonfqy/SimonfqyGitHub
lintcode/easy/175_invert_binary_tree.py
175_invert_binary_tree.py
py
1,448
python
en
code
2
github-code
36
37655164252
from django.shortcuts import render from django.http import HttpResponse from hello.models import User import random # Create your views here. count = 0 def World(request): return HttpResponse('this is app2') def Add_user(request): global count count += 1 user = User() user.user_age = count user.user_name = random.choice(['Wang', 'Chan', 'Liu', 'Lin']) user.user_gender = not random.getrandbits(1) user.save() return render(request, ('add_user.html')) def Get_user(request): user1 = User.objects.values() context = { "sqllist":user1 } print(user1) return render(request, ('user_list.html'), context=context) def Update_user(request): pkv = User.objects.values_list() # randompk = pkv[random.randint(0,len(pkv) -1)][0] # user = User.objects.get(pk = randompk) # user.user_name = 'Change' # user.save() # response = 'date has been updated' # return HttpResponse(response) pkv = len(User.objects.values_list()) print(pkv) if(pkv > 1): pkv = User.objects.values_list() randompk = pkv[random.randint(0,len(pkv) -1)][0] user = User.objects.get(pk = randompk) user.user_name = 'Change' user.save() response = 'date has been updated' return HttpResponse(response) elif(pkv == 1) : a = User.objects.values_list()[0][0] user = User.objects.get(pk = a) user.user_name = 'Change' user.save() response = 'date has been updated' return HttpResponse(response) else: return HttpResponse('no information') def Del_All(request): pkv = len(User.objects.values_list()) print(pkv) if(pkv > 1): pkv = User.objects.values_list() randompk = pkv[random.randint(0,len(pkv) -1)][0] user = User.objects.get(pk = randompk) user.delete() response = 'PK ' + str(randompk) + ' has been delete' return HttpResponse(response) elif(pkv == 1) : a = User.objects.values_list()[0][0] user = User.objects.get(pk = a) user.delete() response = 'the ' +str(a)+ ' date has been delete' return HttpResponse(response) else: return HttpResponse('no information')
yy1110/Mydjango
django_first/app2/views.py
views.py
py
2,338
python
en
code
1
github-code
36
22714296408
import sys sys.path += ['.'] # noqa from unittest.mock import Mock from mycroft.services.paths_service import resolve_refs, StringGetter from mycroft.services import paths_service PathsManager = paths_service.PathsService paths_service.resource_filename = lambda *args: '' class TestResolver: def test_1(self): config = { 'this': 'is', 'a test': None } orig = config.copy() resolve_refs(config) assert orig == config def test_2(self): config = { 'a': '$b', 'b': 'c' } resolve_refs(config) assert config == {'a': 'c', 'b': 'c'} def test_3(self): config = { 'a': '1', 'b': '2$a', 'c': '$a/$b:$d' } resolve_refs(config) assert config == {'a': '1', 'b': '21', 'c': '1/21:$d'} class TestStringGetter: def test_1(self): s = StringGetter('$a:$b') assert s(a='1', b='2') == '1:2' assert s(3, 4) == '3:4' class TestPathsManager: def setup(self): self.rt = Mock() self.rt.config = {'lang': 'en-us'} PathsManager._attr_name = 'paths' def test_1(self): self.rt.config['paths'] = {'a': '1', 'b': '2$a'} assert PathsManager(self.rt).b == '21' def test_2(self): self.rt.config['paths'] = {'a': '1', 'b': '2$a', 'c': '$b$d'} assert str(PathsManager(self.rt).c) == '21$d' assert PathsManager(self.rt).c(3) == '213'
MatthewScholefield/mycroft-light
tests/managers/test_paths_manager.py
test_paths_manager.py
py
1,517
python
en
code
6
github-code
36
38336151204
from streamlit_webrtc import webrtc_streamer import av import cv2 cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") class VideoProcessor: def recv(self, frame): frm = frame.to_ndarray(format="bgr24") CONFIDENCE = 0.5 SCORE_THRESHOLD = 0.5 IOU_THRESHOLD = 0.5 font = cv2.FONT_HERSHEY_COMPLEX face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') rand_lvl = [random.randrange(70, 100) for i in range(0, 50)] frame_cntr = 0 while True: ret, frame = frm gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) i = 0 for (x, y, z, h) in faces: frame_cntr += 1 cv2.rectangle(frm, (x, y), (x + z, y + h), (255, 0, 0), 2) if frame_cntr < 100: cv2.putText(frame, f'Вы junior-разработчик на:{random.randrange(0, 100)}%', (x - 6, y), font, 0.7, (255, 255, 255), 2, cv2.LINE_AA) else: i += 1 cv2.putText(frame, f'Вы junior-разработчик на:{rand_lvl[i]}%', (x - 6, y), font, 0.7, (255, 255, 255), 2, cv2.LINE_AA) x, imag = cv2.imencode('.jpg', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + imag.tobytes() + b'\r\n\r\n') for x, y, w, h in faces: cv2.rectangle(frm, (x, y), (x + w, y + h), (0, 255, 0), 3) return av.VideoFrame.from_ndarray(frm, format='bgr24') webrtc_streamer(key="key", video_processor_factory=VideoProcessor)
NeTRooo/CyberGarden2022-Atom
rtc_test.py
rtc_test.py
py
1,710
python
en
code
0
github-code
36
4201928823
class Game: # def __init__(self): # self. def run_game (self): self.display_title () self.game_rules() self.display_winner() self.play_mode () def display_title (self): print("\nWelcome to Rock, Paper, Scissors, Lizard, Spock \n") def game_rules (self): print (""" The rules are simple:\n Scissors cuts Paper \n Paper covers Rock \n Rock crushes Lizard \n Lizard poisons Spock \n Spock smashes Scissors \n Scissors decapitates Lizard \n Lizard eats Paper \n Paper disproves Spock \n Spock vaporizes Rock \n and of course, \n Rock crushes Scissors""") def play_mode (self): print ("Play against a person or AI? 1 for Person or 2 for AI") self.input =() if self.play_mode == "1": self.use_ai = False if self.play_mode == "2": self.use_ai = True def display_winner (self): print (f"{self.winner} wins the set!") # loop back to start if selection is anything other than 0-4 # write input for player(s), # write loop to return to top if 0-4 selection not made. make sure that this is for human player ONLY # write what each number means when selected. i.e You choose 1 "paper", # parent class, name, score, gesture list, and currently selected gesture. # ai is diff by HOW they pick gestures # human choose gesture and access gesture in game.py then we can have access to player moves and to compare the two. # Three wins determine a winner of that match # Declare winner # GITBASH then work on above
Lorena-Valdez/RPSLS_1
RPSLS/game.py
game.py
py
1,694
python
en
code
0
github-code
36
20858123687
#https://leetcode.com/problems/masking-personal-information/ class Solution: def solveEmail(self,s): print(s) name,domain=s.split("@")[0].lower(),s.split("@")[1].lower() print(name,domain) name=name[0:1]+"*****"+name[-1] return name+"@"+domain def solvePhone(self,s): tel="" for i in range(0,len(s),1): cr=s[i:i+1] if cr=="+" or cr=="-" or cr=="(" or cr==")" or cr==" ": tel=tel+"" else: tel=tel+cr if len(tel)==10: return "***-***-"+tel[6:] elif len(tel)==11: return "+*-***-***-"+tel[7:] elif len(tel)==12: return "+**-***-***-"+tel[8:] elif len(tel)==13: return "+***-***-***-"+tel[9:] def maskPII(self, s: str) -> str: if "@" in s: ans=self.solveEmail(s) return ans else: ans=self.solvePhone(s) return ans
manu-karenite/Problem-Solving
Strings/maskingPersonalInformation.py
maskingPersonalInformation.py
py
1,091
python
en
code
0
github-code
36
639825382
# Description: 2. project 'Bulls and cows' in Engeto Online Python Academy # Author: Jiri Gloza # Define basic variables nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] Welcome_message = ''' Hi there ! I've generated a random 4 digit number for you. Let's play a bulls and cows game. Enter a number''' import random # Define main function and generating random number. # While cycle checks for user inputs and compare with random number using function user input # Function Bulls_and_cows checks the position of numbers and return number of bulls and cows for each guess def main(): print(Welcome_message) rand_number = list(map(int, random.sample(nums, 4))) guess_counter = 0 user_word_score = "" while True: user_guess = user_input(rand_number) bulls, cows = bulls_and_cows(user_guess, rand_number) print(f" {bulls} Bulls, {cows} Cows") guess_counter = guess_counter + 1 if bulls == 4: if guess_counter < 2: user_word_score = "Amazing" elif guess_counter < 10: user_word_score = "Average" else: user_word_score = "Not so good" print(f"Correct, you've guessed the right number in {guess_counter} guesses! \nThats {user_word_score}") break # Function which takes user input and check the formatting, if it is ok, then it return the user input back to main() def user_input(rand_number): guess_list = [] while True: guess_num = input(" -> ") try: if len(str(guess_num)) == 4: for i in str(guess_num): guess_list.append(int(i)) return guess_list else: print("Please enter 4 digits only") except ValueError: print("Bad formatting -> only digits are allowed") # function which takes, random number and user input and return the number of 'bulls' and 'cows' for the round def bulls_and_cows(guess_num, rand_num): bulls = 0 cows = 0 for i, x in enumerate((rand_num)): for j, y in enumerate((guess_num)): if i == j and x == y: bulls = bulls + 1 elif y == x and not j == i: cows = cows + 1 return bulls, cows main()
Globerx/Engeto_academy_Project2
Project2.py
Project2.py
py
2,291
python
en
code
0
github-code
36
24888015371
from typing import Iterable import torch import numpy as np from scipy.spatial.distance import cdist from tqdm import tqdm import ot def cost_matrix( data: np.ndarray, cost: str = 'correlation', normalize_features: bool = True) -> np.ndarray: """Compute an empirical ground cost matrix, i.e. a pairwise distance matrix between the rows of the dataset (l1-normalized by default). Accepted distances are the ones compatible with Scipy's `cdist`. Args: data (np.ndarray): The input data, samples as columns and features as rows. cost (str): The metric use. Defaults to `correlation`. normalize_features (bool, optional): Whether to divide the rows by their sum before computing distances. Defaults to True. Returns: np.ndarray: The pairwise cost matrix. """ if normalize_features: sums = data.sum(1).reshape(-1, 1) C = cdist(data/sums, data/sums, metric=cost) else: C = cdist(data, data, metric=cost) return C/C.max() def OT_distance_matrix( data: np.ndarray, cost: np.ndarray, eps: float = .1, dtype: torch.dtype = torch.double, device: str = 'cuda', divide_max: bool = False, numItermax: int = 500, stopThr: float = 1e-5, batch_size: int = 200) -> np.ndarray: """Compute the pairwise Optimal Transport distance matrix. We compute Sinkhorn Divergences using POT's implementation of the Sinkhorn algorithm. Computations are done using PyTorch on a specified device. But the result is a numpy array. This allows not saturating the GPU for large matrices. Args: data (np.ndarray): The input data, as a numpy array. cost (np.ndarray): The ground cost between features. eps (float, optional): The entropic regularization parameter. Small regularization requires more iterations and double precision. Defaults to .1. dtype (torch.dtype, optional): The torch dtype used for computations. Double is more precise but takes up more space. Defaults to torch.double. device (str, optional): The torch device to compute on, typically 'cpu' or 'cuda'. Defaults to 'cuda'. divide_max (bool, optional): Whether to divide the resulting matrix by its maximum value. This can be useful to compare matrices. Defaults to False. numItermax (int, optional): Used by POT, maximum number of Sinkhorn iterations. Defaults to 500. stopThr (float, optional): Used by POT, tolerance for early stopping in the Sinkhorn iterations. Defaults to 1e-5. batch_size (int, optional): The batch size, i.e. how many distances can be computed at the same time. Should be as large as possible on your hardware. Defaults to 200. Returns: np.ndarray: The pairwise OT distance matrix. """ # Move the cost to PyTorch. C = torch.from_numpy(cost) C = C.to(device=device, dtype=dtype) # Compute the kernel K = torch.exp(-C/eps) data_tensor = torch.from_numpy(data) data_tensor = data_tensor.to(device=device, dtype=dtype) D = torch.zeros(data_tensor.shape[1], data_tensor.shape[1], device='cpu', dtype=dtype) pbar = tqdm(total=data_tensor.shape[1]*(data_tensor.shape[1] - 1)//2, leave=False) errors = [] # Iterate over the lines. for i in range(data_tensor.shape[1]): for ii in np.split(range(i+1), np.arange(batch_size, i+1, batch_size)): # Compute the Sinkhorn dual variables _, wass_log = ot.sinkhorn( data_tensor[:,i].contiguous(), # This is the source histogram. data_tensor[:,ii].contiguous(), # These are the target histograms. C, # This is the ground cost. eps, # This is the regularization parameter. log=True, # Return the dual variables stopThr=stopThr, numItermax=numItermax ) # Compute the exponential dual potentials. f, g = eps*wass_log['u'].log(), eps*wass_log['v'].log() if len(wass_log['err']) > 0: errors.append(wass_log['err'][-1]) # Compute the Sinkhorn costs. # These will be used to compute the Sinkhorn divergences wass = ( f*data_tensor[:,[i]*len(ii)] + g*data_tensor[:,ii] - eps*wass_log['u']*(K@wass_log['v']) ).sum(0) # Add them in the distance matrix (including symmetric values). D[i,ii] = D[ii,i] = wass.cpu() pbar.update(len(ii)) pbar.close() # Get the diagonal terms OT_eps(a, a). d = torch.diagonal(D) # The Sinkhorn divergence is OT(a, b) - (OT(a, a) + OT(b, b))/2. D = D - .5*(d.view(-1, 1) + d.view(1, -1)) # Make sure there are no negative values. assert((D < 0).sum() == 0) # Make sure the diagonal is zero. D.fill_diagonal_(0) if divide_max: D /= torch.max(D) return D.numpy(), errors def C_index(D: np.ndarray, clusters: np.ndarray) -> float: """Compute the C index, a measure of how well the pairwise distances reflect ground truth clusters. Implemented here for reference, but the silhouette score (aka Average Silhouette Width) is a more standard metric for this. Args: D (np.ndarray): The pairwise distances. clusters (np.ndarray): The ground truth clusters. Returns: float: The C index. """ Sw = Nw = 0 for c in np.unique(clusters): idx = np.where(clusters == c)[0] Sw += D[idx][:,idx].sum()/2 Nw += int(len(idx)*(len(idx) - 1)/2) els = [] for i in range(len(D)): for j in range(i): els.append(D[i, j]) Smin = np.sort(np.array(els))[:Nw].sum() Smax = np.sort(np.array(els))[::-1][:Nw].sum() return (Sw - Smin)/(Smax - Smin)
cantinilab/OT-scOmics
src/otscomics/__init__.py
__init__.py
py
5,668
python
en
code
31
github-code
36
71404383785
import random, sys # random.seed(42) from person import Person from logger import Logger from virus import Virus import argparse class Simulation(object): def __init__(self, pop_size, vacc_percentage, initial_infected, virus): # TODO: Create a Logger object and bind it to self.logger. # Remember to call the appropriate logger method in the corresponding parts of the simulation. self.logger = Logger(virus.name + ".txt") # TODO: Store the virus in an attribute self.virus = virus # TODO: Store pop_size in an attribute self.original_pop_size = pop_size self.pop_size = pop_size # TODO: Store the vacc_percentage in a variable self.vacc_percentage = vacc_percentage self.vaccinated = [] # TODO: Store initial_infected in a variable self.initial_infected = initial_infected #to speed up looking for infected persons they are all stored here self.infected = [] # You need to store a list of people (Person instances) # Some of these people will be infected some will not. # Use the _create_population() method to create the list and # return it storing it in an attribute here. # TODO: Call self._create_population() and pass in the correct parameters. self.population = self._create_population(initial_infected) def _create_population(self, initial_infected): # TODO: Create a list of people (Person instances). This list # should have a total number of people equal to the pop_size. # Some of these people will be uninfected and some will be infected. # The number of infected people should be equal to the the initial_infected # TODO: Return the list of people population = [] for i in range(self.pop_size): population.append(Person(i)) vaccinated_i = (random.choices(range(1, self.pop_size), k=int(self.vacc_percentage*self.pop_size//1))) self.vaccinated = [] for vaccinated in vaccinated_i: population[vaccinated] = (Person(vaccinated, is_vaccinated=False, infection=self.virus)) self.vaccinated.append(population[vaccinated]) initial_infected_i = (random.choices(range(1, self.pop_size), k=initial_infected)) self.infected = [] for infected in initial_infected_i: population[infected] = (Person(infected, is_vaccinated=False, infection=self.virus)) self.infected.append(population[infected]) return population def _simulation_should_continue(self): # This method will return a boolean indicating if the simulation # should continue. # The simulation should not continue if all of the people are dead, # or if all of the living people have been vaccinated. # TODO: Loop over the list of people in the population. Return True # if the simulation should continue or False if not. if self.pop_size <= 0 or len(self.vaccinated) >= self.pop_size: return False return True def run(self): # This method starts the simulation. It should track the number of # steps the simulation has run and check if the simulation should # continue at the end of each step. should_continue = True # TODO: Write meta data to the logger. This should be starting # statistics for the simulation. It should include the initial # population size and the virus. self.step_number = 0 self.logger.write_metadata(self.pop_size, self.virus, self.initial_infected) while should_continue: # TODO: Increment the time_step_counter # TODO: for every iteration of this loop, call self.time_step() # Call the _simulation_should_continue method to determine if # the simulation should continue self.time_step() should_continue = self._simulation_should_continue() self.logger.log_time_step(self.step_number, self.pop_size) # TODO: When the simulation completes you should conßclude this with # the logger. Send the final data to the logger. def time_step(self): # This method will simulate interactions between people, calulate # new infections, and determine if vaccinations and fatalities from infections # The goal here is have each infected person interact with a number of other # people in the population # TODO: Loop over your population # For each person if that person is infected # have that person interact with 100 other living people # Run interactions by calling the interaction method below. That method # takes the infected person and a random person new_deaths = 0 new_survivors = 0 number_of_new_interactions = 0 number_of_new_infections = 0 current_infected = [] for person in self.infected: if person.is_alive: current_infected.append(person) for infected in current_infected: new_interactions = self.interaction(100) new_infections = self._infect_newly_infected(new_interactions) if infected.did_survive_infection(): infected.is_vaccinated = True #since surviving a virus gives similar results to vaccine self.vaccinated.append(infected) new_survivors += 1 else: infected.is_alive = False self.pop_size -= 1 new_deaths += 1 self.step_number += 1 self.logger.log_interactions(self.step_number, self.pop_size, number_of_new_interactions) self.logger.log_infections(self.step_number, self.pop_size, number_of_new_infections) self.logger.log_infection_survival(self.step_number, self.pop_size, new_deaths) def interaction(self, num_interactions): # TODO: Finish this method. # The possible cases you'll need to cover are listed below: # random_person is vaccinated: # nothing happens to random person. # random_person is already infected: # nothing happens to random person. # random_person is healthy, but unvaccinated: # generate a random number between 0.0 and 1.0. If that number is smaller # than repro_rate, add that person to the newly infected array # Simulation object's newly_infected array, so that their infected # attribute can be changed to True at the end of the time step. # TODO: Call logger method during this method. infectable = list(set(self.population).difference(set(self.infected).union(set(self.vaccinated)))) if len(infectable) >= 100: interacted_with = random.choices(infectable, k=100) else: interacted_with = random.choices(infectable, k=len(infectable)) return interacted_with def _infect_newly_infected(self, interacted_with): # TODO: Call this method at the end of every time step and infect each Person. # TODO: Once you have iterated through the entire list of self.newly_infected, remember # to reset self.newly_infected back to an empty list. newly_infected = [] for infected in interacted_with: if random.random() < self.virus.repro_rate and infected.infection == None: newly_infected.append(infected) self.population[infected.id].infection = self.virus self.infected.append(infected) return newly_infected if __name__ == "__main__": # # Test your simulation here # virus_name = "Sniffles" # repro_num = 0.5 # mortality_rate = 0.12 # virus = Virus(virus_name, repro_num, mortality_rate) # # Set some values used by the simulation # pop_size = 1000 # vacc_percentage = 0.1 # initial_infected = 10 # # Make a new instance of the simulation # sim = Simulation(pop_size, vacc_percentage, initial_infected, virus) parser = argparse.ArgumentParser() parser.add_argument("population_size", help="size of the population you wish to simulate", type=int) parser.add_argument("vacc_percentage", help="percent of people who start vaccinated within given population", type=float) parser.add_argument("virus", help="name of the virus") parser.add_argument("mortality_rate", help="the percent chance of dying after contracting the virus", type=float) parser.add_argument("reproduction_rate", help="the percent chance of transmission per interaction", type=float) parser.add_argument("initial_infected", help="the number of people who start with the virus", type=int) args = parser.parse_args() virus = Virus(args.virus, repro_rate=args.reproduction_rate, mortality_rate=args.mortality_rate) sim = Simulation(args.population_size, args.vacc_percentage, args.initial_infected, virus) # sim.run() sim.run()
b3fr4nk/Herd-Immunity-Sim
simulation.py
simulation.py
py
9,203
python
en
code
0
github-code
36
73011952423
#-*- coding: utf-8 -*- import csv import os import pymysql import pandas as pd # 一个根据pandas自动识别type来设定table的type def make_table_sql(df): columns = df.columns.tolist() types = df.ftypes # 添加id 制动递增主键模式 make_table = [] for item in columns: if 'int' in types[item]: char = item + ' INT' elif 'float' in types[item]: char = item + ' FLOAT' elif 'object' in types[item]: char = item + ' longtext' elif 'datetime' in types[item]: char = item + ' DATETIME' make_table.append(char) return ','.join(make_table) # csv 格式输入 mysql 中 def csv2mysql(db_name, table_name, df): # 创建database cursor.execute('CREATE DATABASE IF NOT EXISTS {}'.format(db_name)) # 选择连接database conn.select_db(db_name) print("hello") # 创建table cursor.execute('DROP TABLE IF EXISTS {}'.format(table_name)) cursor.execute('CREATE TABLE {}({})'.format(table_name,make_table_sql(df))) # 提取数据转list 这里有与pandas时间模式无法写入因此换成str 此时mysql上格式已经设置完成 # df['日期'] = df['日期'].astype('str') values = df.values.tolist() # 根据columns个数 s = ','.join(['%s' for _ in range(len(df.columns))]) # executemany批量操作 插入数据 批量操作比逐个操作速度快很多 cursor.executemany('INSERT INTO {} VALUES ({})'.format(table_name,s), values) # 参数设置 DictCursor使输出为字典模式 连接到本地用户root 密码为kellydc config = dict(host='localhost', user='root', password='kellydc', cursorclass=pymysql.cursors.DictCursor ) # 建立连接 conn = pymysql.Connect(**config) # 自动确认commit True conn.autocommit(1) # 设置光标 cursor = conn.cursor() df = pd.read_csv('/Users/daven/Github/MedDataPro/sampleData/clear/clear_set.csv', encoding='utf-8', low_memory=False) df = df.astype(object).where(pd.notnull(df), None) # print(df.head()) csv2mysql("MedData","RM_Report", df) cursor.execute('SELECT * FROM RM_Report LIMIT 5') cursor.scroll(4) cursor.fetchall()
cyj-user/MedData
sampleData/data_input.py
data_input.py
py
2,208
python
en
code
0
github-code
36
10350206274
#************************************************************************* # 2. Dictionary #************************************************************************* # How to define an empty dictionary mydict = {} # How to initialize a dictionary # setdefault(key, default_value) returns default value for the associated key when the key is first introduced # mydict = {'a' : None, 'b' : None, 'c' : None, 'd' : None, 'e' : None} mydict.setdefault('a', None) mydict.setdefault('b', None) mydict.setdefault('c', None) mydict.setdefault('d', None) mydict.setdefault('e', None) # Hoe to assign values to a dict # examples of different data types mydict = {'a': 'apple', 'b' : 'ball', 'c' : 'corn'} mydict1 = {'name' : 'John', 1 : [2,3,4]} #using dict() constructor # Note: {} requires ":" {a:b,...} # [] requires "," [(a,b),...] mydict2 = dict({'a': 'apple', 'b': 'ball', 'c': 'corn'}) mydict3 = dict([('a','apple'),('b','ball'),('c','corn')]) #How to access values in a list # access via keys mydict['a'] mydict.get('b') #************************************************************************* # accessing dict # key: value, key:value, key:value... person = {'name': 'John', 'age' : 22, 'status': 'single'} print(person.get('name')) # get value from key 'name' print(person.get('age')) # get value from key 'age' print(person.keys()) # get all keys in person print(person.values()) # get all values from person person['age'] = 20 # change the value of 'age' to 20 print(person['age']) # another way to get the value from age # get() returns default value if key doens't exist # dict[key] will return an error if the key doesn't exist #accessing tuple # similar to list mytuple = (1,2,3) another_tuple = (4,5,6) print(mytuple[0]) # prints value in first position in tuple -- 1 for this example print(mytuple[-1]) # prints value of last item in tuple -- 3 in this example print(mytuple+another_tuple) # concatenates bothe tuples and prints -- (1,2,3,4,5,6) in this example print(max(mytuple)) # prints value in last position -- 3 in this example if 3 in mytuple: print('yes') # prints 'yes' if asked for value exists in mytuple -- it does, so this printsa 'yes' # tuple cannot be modified directly # Can't add, can't delete items # CAN delete the entire tuple # membership testing 'a' in mydict.keys() # checing for 'a' as a key 'apple' in mydict.values() # checking for 'apple' as a value ('a', 'apple') in mydict.items() # checking for the key-value pair 'a':'apple' # sorting a dictionary by key for key in sorted(mydict2): print(key, '->', mydict2[key]) # sorting a dictionary by value and then find the associated key via lisify the dictionary for value in sorted(mydict2.values()): KL = list(mydict2.keys()) VL = list(mydict2.values()) key = KL[VL.index(value)] print(key, '->', value) # a few more examples # cloning a dictionaru with fromkeys() mydict3={} mykeys = mydict2.keys() mydict.fromkeys(mykeys) values = 0 mydict3.fromkeys(mykeys, values) # Conversions list -> tuple # Conversions tuple -> list # Conversions dict -> tuple dict1 = {'a': 1, 'b': 2} tuple(dict1.items()) dict1 = {'a': 1, 'b': 2} list(dict1.items()) list(dict1.keys()) list(dict1.values()) # no slicing available for dictionary # since slicing is possible only with integers # How to add an entry in a dictionary mydict.setdefault('a', None) mydict.setdefault('b', None) mydict.setdefault('c', None) mydict.setdefault('d', None) mydict.setdefault('e', None) mydict['a'] = 'animal' mydict['b'] = 'beach' mydict['c'] = 'cat' mydict['c'] = 'desk' # mydict = {'a': 'animal', 'b': 'beach', 'c': 'cat', 'd': 'desk', 'e': None} # How to remove an entry in a dictionary # pop.('key') removes the entry by returning its associated value mydict.pop('d') # popitem() removes the last entry by returning both key and value mydict.popitem() # clear() removes all the items mydict.clear() # How to traverse the list mydict2 = dict({'z': 'zebra', 'b': 'ball', 'c': 'corn'}) # iteration via unpacking each tuple for key, value in mydict2.items(): print(key, '->', value) # iteration via items for item in mydict2.items(): print(item) # iterattion via kesy for key in mydict2.keys(): print(key) # iteration via values for value in mydict2.values(): print(value)
gregsurber/Practice
Week_2/wk2_code_demo.py
wk2_code_demo.py
py
4,444
python
en
code
0
github-code
36
556091123
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import os import scrapy import json from urllib.parse import urlparse from pymongo import MongoClient from scrapy.pipelines.images import ImagesPipeline class DataEditPipeline(object): @staticmethod def process_item(item, spider): data = json.loads('{' + item['data'].split(';')[0].split('{', maxsplit=1)[1]) item['price'] = int(data['entities']['products'][0]['discountedPrice'])/100 item['photos'] = [itm['url'] for itm in data['entities']['products'][0]['images']] item['name'] = data['entities']['products'][0]['name'] item['params'] = {itm['slug']: itm['rawValue'] for itm in data['entities']['products'][0]['attributes']} del(item['data']) return item class YoulaPhotosPipeline(ImagesPipeline): def get_media_requests(self, item, info): if item['photos']: for img in item['photos']: try: yield scrapy.Request(img) except Exception as e: print(e) def file_path(self, request, response=None, info=None): return info.spider.start_urls[0].split('/')[-1] + '/' + request.url.split('/')[-1][:5] + '/' + \ os.path.basename(urlparse(request.url).path) def item_completed(self, results, item, info): if results: item['photos'] = [itm[1] for itm in results if itm[0]] return item class DataBasePipeline(object): def __init__(self): client = MongoClient('localhost', 27017) self.mongo_base = client.youla def process_item(self, item, spider): collection = self.mongo_base[spider.start_urls[0].split('/')[-1]] collection.insert_one(item) return item
GruXsqK/Methods_scraping
Lesson_6/Youla_parser_project/youlaparser/pipelines.py
pipelines.py
py
1,918
python
en
code
0
github-code
36
4413470363
''' xを数字とセットにした二次元配列を作る sを数字に置き換えたものと、元のままのものの三次元配列にする →二次元配列では取り扱いきれないので、遠慮なく三次元へ ソートして、出す ''' from collections import defaultdict x = input() n = int(input()) s = [input() for _ in range(n)] new = defaultdict(dict) for i in range(len(x)): new[x[i]] = i ans = [] for i in s: inner = [] for j in i: inner.append(new[j]) ans.append([inner, i]) ans.sort() for i in ans: print(i[-1])
burioden/atcoder
submissions/abc219/c.py
c.py
py
566
python
ja
code
4
github-code
36
13989800577
# -*- coding: utf-8 -*- import copy from io import BytesIO from datetime import datetime from xlwt import Workbook, XFStyle, Borders, Pattern class ExcelWT(Workbook): """Excel生成工具 """ def __init__(self, name, encoding=r'utf-8', style_compression=0): super().__init__(encoding, style_compression) self._book_name = name self._current_sheet = None self._default_style = XFStyle() self._default_style.borders.left = Borders.THIN self._default_style.borders.right = Borders.THIN self._default_style.borders.top = Borders.THIN self._default_style.borders.bottom = Borders.THIN self._default_style.pattern.pattern = Pattern.SOLID_PATTERN self._default_style.pattern.pattern_fore_colour = 0x01 self._default_title_style = copy.deepcopy(self._default_style) self._default_title_style.font.bold = True self._default_title_style.pattern.pattern_fore_colour = 0x16 def create_sheet(self, name, titles=[]): sheet = self._current_sheet = self.add_sheet(name) style = self._default_title_style for index, title in enumerate(titles): sheet.write(0, index, title, style) sheet.col(index).width = 0x1200 def add_sheet_row(self, *args): sheet = self._current_sheet style = self._default_style nrow = len(sheet.rows) for index, value in enumerate(args): sheet.write(nrow, index, value, style) def get_file(self): result = b'' with BytesIO() as stream: self.save(stream) result = stream.getvalue() return result def write_request(self, request): filename = f"{self._book_name}.{datetime.today().strftime('%y%m%d.%H%M%S')}.xls" request.set_header(r'Content-Type', r'application/vnd.ms-excel') request.set_header(r'Content-Disposition', f'attachment;filename={filename}') return request.finish(self.get_file())
wsb310/hagworm
hagworm/extend/excel.py
excel.py
py
2,028
python
en
code
13
github-code
36
73903114025
import pandas as pd from datetime import date, timedelta, datetime from meteostat import Point, Daily import statsmodels.api as sm def read_data(): # Set time period start = datetime(2010, 1, 1) end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d")) # Create Point for Vancouver, BC vancouver = Point(49.2497, -123.1193, 70) #campinas = Point(-22.9056, -47.0608, 686) #saopaulo = Point(-23.5475, -46.6361, 769) # Get daily data for 2018 data = Daily(vancouver, start, end) data = data.fetch() data = data[['tavg', 'prcp']] return data def predict(): data = read_data() returns = data['tavg'] valor_ontem = returns.tail(1) model = sm.tsa.statespace.SARIMAX(returns , order=(1,1,3), seasonal_order=(0,1,1,7), enforce_stationarity=False, enforce_invertibility=False, freq='D') model = model.fit() forecast = model.get_forecast(steps=1) # Previsão para 1 período à frente conf_interval = forecast.conf_int(alpha=0.05) # Intervalo de confiança de 95% pred = forecast.predicted_mean[0] # Previsão um dia a frente lower_bound = conf_interval.iloc[0, 0] # Limite inferior do intervalo de confiança upper_bound = conf_interval.iloc[0, 1] # Limite superior do intervalo de confiança prediction = round(float(pred),4) lower_bound = round(float(lower_bound),4) upper_bound = round(float(upper_bound),4) valor_ontem = round(float(valor_ontem),4) data_atual = date.today() data_amanha = data_atual + timedelta(days=1) return [str(data_amanha), prediction, lower_bound, upper_bound]
Marcosgrosso/automation_series
predict_model.py
predict_model.py
py
1,724
python
en
code
0
github-code
36
73947800422
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('',views.ProductList,name='ProductList'), path('productdetails',views.productdetails,name='productdetails'), path('orderslist',views.OrdersList,name='OrdersList'), path('addcolumns',views.AddColumns,name='AddColumns'), path('addproduct',views.addproduct,name='addproduct'), ]
Fawazk/VofoxSolutions-test
vofox/purchase/urls.py
urls.py
py
403
python
en
code
1
github-code
36
4193041157
# put your python code here lst = [] while 5: a = input().split() if a == ['end']: break b = [int(i) for i in a] lst.append(b) print(lst) for i in range(len(lst)): for j in range(len(lst[i])): print(lst[i][j - len(lst) + 1] + lst[i][j - 1] + lst[i - 1][j] + lst[i - len(lst) + 1][j], end=' ') print('')
Eduard-z/stepic
spisok.py
spisok.py
py
343
python
en
code
0
github-code
36
4492015736
## Load training SDFs import argparse import colorsys import os import numpy as np import pathlib import tqdm import open3d as o3d import random from CARTO.simnet.lib.datapoint import decompress_datapoint from CARTO.Decoder import utils from CARTO.Decoder.data import dataset from CARTO.Decoder import config from CARTO.Decoder.visualizing import code_vis from PIL import Image import seaborn as sns def main(args): file_dir = pathlib.Path(args.file_dir) out_dir = pathlib.Path(args.out_dir) out_dir.mkdir(exist_ok=True, parents=True) dataset_cfg: config.GenerationConfig = utils.load_cfg( file_dir, cfg_class=config.GenerationConfig ) all_files = list(file_dir.glob("*.zstd")) if args.latest or args.earliest: all_files.sort(key=lambda x: os.path.getmtime(x), reverse=args.earliest) else: print("Shuffling object list") random.shuffle(all_files) counts = utils.AccumulatorDict() for file_name in all_files: counts.increment(str(file_name).split("_")[-2], 1) print(counts) render = code_vis.get_o3d_render(frame_width=600, frame_height=600) for i, file_path in tqdm.tqdm(enumerate(all_files[: args.n])): with open(file_path, "rb") as fh: buf = fh.read() data_point: dataset.DataPoint = decompress_datapoint(buf) # print(data_point.keys()) sdf = data_point.sdf_values[:, None] points = data_point.points # Assign inside/outside color colors = np.where( sdf < 0.0, np.ones_like(points) * sns.color_palette("tab10")[0], np.ones_like(points) * sns.color_palette("tab10")[1], ) if len(points) == 0: continue points /= dataset_cfg.max_extent pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) pcd.colors = o3d.utility.Vector3dVector(colors) img_np = code_vis.render_o3d_mesh(pcd, height_coloring=False, render=render) img_PIL = Image.fromarray(img_np) img_PIL.save(str(out_dir / f"{i}.png")) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("file_dir") parser.add_argument("out_dir") parser.add_argument("-n", type=int, default=100) parser.add_argument("-l", "--latest", action="store_true", default=False) parser.add_argument("-e", "--earliest", action="store_true", default=False) args = parser.parse_args() main(args)
robot-learning-freiburg/CARTO
CARTO/Decoder/visualizing/visualize_sdf_values.py
visualize_sdf_values.py
py
2,511
python
en
code
10
github-code
36
74973903784
#Question 5 def subsequence(st): if len(st) == 1: return list(st) else: subs = subsequence(st[:len(st)-1]) for element in subs: if element[-1] < st[-1]: subs += [element + st[-1]] subs += [st[-1]] return subs def long_com_seq(s1 , s2): x = len(s1) y = len(s2) val = list(["empty"]*(y+1) for i in range(x+1)) for i in range(x+1): for j in range(y+1): if i == 0: val[i][j] = 0 elif j == 0: val[i][j] = 0 elif s1[i-1] == s2[j-1]: val[i][j] = val[i-1][j-1]+1 else: val[i][j] = max(val[i-1][j] , val[i][j-1]) return val[x][y]
SimoneFiorellino/ADM-HW3
q5.py
q5.py
py
780
python
en
code
0
github-code
36
72508017705
''' liguangyao 10/25/2023 guangyaoli@ruc.edu.cn ''' import os import torch from torchvision import transforms, utils from PIL import Image import numpy as np import glob from imagebind import data from imagebind.models import imagebind_model from imagebind.models.imagebind_model import ModalityType device = "cuda:1" if torch.cuda.is_available() else "cpu" # Instantiate model model = imagebind_model.imagebind_huge(pretrained=True) model.eval() model.to(device) def VideoLevelPrompt(video_label_list, video_name): video_level_prompt = 'A photo of a dog.' return video_level_prompt def ImageBind_feat_extract(args, dir_audio_path, dir_viusal_path, dir_text_path, dst_audio_path, dst_visual_path, dst_text_path): # 此处为文本 video_label_list = [] with open(dir_text_path, 'r') as dpp: for line in dpp: video_label_list.append(line.replace("\n", "")) # print(video_label_list) video_list = os.listdir(dir_viusal_path) video_idx = 0 total_nums = len(video_list) for video_name in video_list: video_idx = video_idx + 1 print("\n--> ", video_idx, video_name) audio_save_file = os.path.join(dst_audio_path, video_name + '.npy') frame_save_file = os.path.join(dst_visual_path, video_name + '.npy') text_save_file = os.path.join(dst_text_path, video_name + '.npy') if os.path.exists(audio_save_file): print(video_name + '.npy', "is already processed!") continue frame_list_load = sorted(glob.glob(os.path.join(dir_viusal_path, video_name, '*.jpg'))) audio_list_load = sorted(glob.glob(os.path.join(dir_audio_path, video_name, '*.wav'))) text_list = VideoLevelPrompt(video_label_list, video_name) # 例如:A photo of a dog. 保证文本是陈述语句即可,可自行设计 # 为了保证模型训练可以批处理,故需要保证每个数据样本后的长度一致。 # 然而由于不同的视频长度不一,采样出的帧数不一致,故此处对每个视频进行均匀采样。 frame_nums = len(frame_list_load) if frame_nums < args.frame_nums: frame_samples = np.round(np.linspace(0, frame_nums-2, args.frame_nums)) else: frame_samples = np.round(np.linspace(0, args.frame_nums-1, args.frame_nums)) frame_list = [frame_list_load[int(sample)] for sample in frame_samples] audio_nums = len(audio_list_load) if audio_nums < args.audio_nums: audio_samples = np.round(np.linspace(0, audio_nums-2, args.audio_nums)) else: audio_samples = np.round(np.linspace(0, args.audio_nums-1, args.audio_nums)) audio_list = [audio_list_load[int(sample)] for sample in audio_samples] # Load data inputs = { ModalityType.TEXT: data.load_and_transform_text(text_list, device), ModalityType.VISION: data.load_and_transform_vision_data(frame_list, device), ModalityType.AUDIO: data.load_and_transform_audio_data(audio_list, device), } with torch.no_grad(): embeddings = model(inputs) text_feat = embeddings['text'] audio_feat = embeddings['audio'] visual_feat = embeddings['vision'] # print("\nimagebind text: ", text_feat.shape) # print("imagebind audio: ", audio_feat.shape) # print("imagebind visual: ", visual_feat.shape) text_feat = text_feat.float().cpu().numpy() np.save(text_save_file, text_feat) audio_feat = audio_feat.float().cpu().numpy() np.save(audio_save_file, audio_feat) visual_feat = visual_feat.float().cpu().numpy() np.save(frame_save_file, visual_feat) print("Process: ", video_idx, " / ", total_nums, " ----- video id: ", video_idx) print("T-A-V Feat shape: ", text_feat.shape, audio_feat.shape, visual_feat.shape) def ImageBind_visaul_feat_extract(args, dir_viusal_path, dst_visual_path): video_list = os.listdir(dir_viusal_path) video_idx = 0 total_nums = len(video_list) for video_name in video_list: video_idx = video_idx + 1 print("\n--> ", video_idx, video_name) frame_save_file = os.path.join(dst_visual_path, video_name + '.npy') if os.path.exists(frame_save_file): print(video_name + '.npy', "is already processed!") continue frame_list_load = sorted(glob.glob(os.path.join(dir_viusal_path, video_name, '*.jpg'))) frame_nums = len(frame_list_load) if frame_nums < args.frame_nums: frame_samples = np.round(np.linspace(0, frame_nums-2, args.frame_nums)) else: frame_samples = np.round(np.linspace(0, args.frame_nums-1, args.frame_nums)) frame_list = [frame_list_load[int(sample)] for sample in frame_samples] # Load data inputs = {ModalityType.VISION: data.load_and_transform_vision_data(frame_list, device),} with torch.no_grad(): embeddings = model(inputs) visual_feat = embeddings['vision'] # print("imagebind visual: ", visual_feat.shape) visual_feat = visual_feat.float().cpu().numpy() np.save(frame_save_file, visual_feat) print("Process: ", video_idx, " / ", total_nums, " ----- video id: ", video_idx) print("V Feat shape: ", visual_feat.shape) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dir_audio_path", type=str, default='data/users/guangyao_li/MUSIC-AVQA/audio_16kHz_2sec', help='audio file path') parser.add_argument("--dir_visual_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/avqa-frames-1fps', help='visual frames path') parser.add_argument("--dir_text_path", type=str, default='../../dataset/split_que_id/music_avqa.json', help='text file path') parser.add_argument("--dst_audio_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_audio_16kHz', help='audio feature path') parser.add_argument("--dst_visual_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_frame_1fps', help='visual frames feature path') parser.add_argument("--dst_text_path", type=str, default='/data/users/guangyao_li/MUSIC-AVQA/imagebind_feat/imagebind_text', help='text feature path') parser.add_argument("--frame_nums", type=int, default=60, help='frame sample numbers') parser.add_argument("--audio_nums", type=int, default=60, help='audio clip sample numbers') # parser.add_argument("--gpu", dest='gpu', type=str, default='0', # help='Set CUDA_VISIBLE_DEVICES environment variable, optional') args = parser.parse_args() # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu params = vars(args) # 同时提取audio, vsiual 和text的特征 ImageBind_feat_extract(args, args.dir_audio_path, args.dir_visual_path, args.dir_text_path, args.dst_audio_path, args.dst_visual_path, args.dst_text_path) # 只提取一个模态的特征,如visual ImageBind_visual_feat_extract(args, dir_viusal_path, dst_visual_path)
ayameyao/ResearchToolCode
FeatureExtraction/Extract_ImageBind_Features/extract_imagebind_feats.py
extract_imagebind_feats.py
py
7,418
python
en
code
2
github-code
36
11425205526
from edera import Condition from edera import Task from edera.exceptions import StorageOperationError from edera.requisites import shortcut from edera.storages import InMemoryStorage from edera.workflow import WorkflowBuilder from edera.workflow.processors import TargetCacher def test_target_cacher_checks_target_only_once(): class C(Condition): def check(self): counter[0] += 1 return True class T(Task): target = C() class X(Task): @shortcut def requisite(self): return T() counter = [0] workflow = WorkflowBuilder().build(X()) cache = InMemoryStorage() TargetCacher(cache).process(workflow) assert workflow[X()].item.phony assert workflow[T()].item.target.check() assert counter[0] == 1 assert workflow[T()].item.target.check() assert counter[0] == 1 def test_target_cacher_skips_false_targets(): class C(Condition): def check(self): counter[0] += 1 return False class T(Task): target = C() counter = [0] workflow = WorkflowBuilder().build(T()) cache = InMemoryStorage() TargetCacher(cache).process(workflow) assert not workflow[T()].item.target.check() assert counter[0] == 1 assert not workflow[T()].item.target.check() assert counter[0] == 2 def test_target_cacher_prevents_flooding(): class WriteOnlyStorage(InMemoryStorage): def get(self, key, since=None, limit=None): raise StorageOperationError("no") class C(Condition): def check(self): counter[0] += 1 return True class T(Task): target = C() counter = [0] workflow = WorkflowBuilder().build(T()) cache = WriteOnlyStorage() TargetCacher(cache).process(workflow) assert workflow[T()].item.target.check() assert counter[0] == 1 assert workflow[T()].item.target.check() assert counter[0] == 2 def test_target_cacher_ignores_errors(): class ReadOnlyStorage(InMemoryStorage): def put(self, key, value): raise StorageOperationError("no") class C(Condition): def check(self): counter[0] += 1 return True class T(Task): target = C() counter = [0] workflow = WorkflowBuilder().build(T()) cache = ReadOnlyStorage() TargetCacher(cache).process(workflow) assert workflow[T()].item.target.check() assert counter[0] == 1 assert workflow[T()].item.target.check() assert counter[0] == 2
thoughteer/edera
tests/unit/workflow/processors/test_target_cacher.py
test_target_cacher.py
py
2,571
python
en
code
3
github-code
36
19453386854
import requests #Requests é um biblioteca, um pacote de código. Para instalar usar: pip install requests from tkinter import * #Pegando todas as informações da biblioteca tkinter. def pegar_cotacoes(): requisicao = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL") requisicao_dic = requisicao.json() cotacao_dolar = requisicao_dic['USDBRL']['bid'] cotacao_euro = requisicao_dic['EURBRL']['bid'] cotacao_btc = requisicao_dic['BTCBRL']['bid'] texto = f''' Dólar: {cotacao_dolar} Euro: {cotacao_euro} BTC: {cotacao_btc}''' texto_cotacoes["text"] = texto #editanto o parâmetro text do texto_cotacoes janela = Tk() #Criando uma janela com tk. TK é um código do tkinter que cria a janela. janela.title("Cotação Atual das Moedas") #Adicionando o título da janela. texto_orientecao = Label(janela, text="Clique no botão para ver as cotações das moedas.") #Um pedaço de texto dentro da janela é chamado de Label. texto_orientecao.grid(column=0, row=0, padx=10, pady=10) #grid, usado para escolher a posição do texto. Pad é a distância do texto e o que será inserido depois. botao = Button(janela, text="Buscar cotações Dólar/Euro/BTC", command=pegar_cotacoes) #Button está na biblioteca do tkinter. Janela, lugar onde o botão vai ficar. Command, comando que irá executar a função pegar_cotacoes. botao.grid(column=0, row=1, padx=10, pady=10) texto_cotacoes = Label(janela, text="") texto_cotacoes.grid(column=0, row=2, padx=10, pady=10) janela.mainloop() #mainloop deixa a janela exibida. Garante que a janela vai funcionar.
jessicarios-DevOps/Tkinter-python
janela.py
janela.py
py
1,628
python
pt
code
0
github-code
36
21803177273
""" receives the fieldnames and dimension values for a single species calculates parameters like Volume, Area, and returns them together with sorted Dimensions (min, mid, max) Disclaimer: calculations of A and V are pythonized from the matlab script cellgeom.m by A. Ryabov Tested for: python 3.6 """ # Library imports import numpy as np import re __author__ = "Onur Kerimoglu" __email__ = "onur.kerimoglu@uol.de" __credits__ = ["Onur Kerimoglu", "Alexey Ryabov"] __version__ = "1.0.0" # December 2020 # global parameters and aliases pi = np.pi sqrt = np.sqrt asin = np.arcsin nan = np.nan def calc_geom(fields,headersin,dataset): shapecol = headersin.index('Geometric_shape') gshape = fields[shapecol] if gshape[0] == ' ': gshape = gshape[1:] gshape.replace(' ', '_') gshape.replace(' ', '_') #todo: decapitalize Found = True #default: assume the shape is recognized and dimensions can be extracted if (gshape in ['1','sphere','sphere-10%','sphere-20%','sphere-25%']): # In Olenina dataset: sphere/-10%/-20%/-25% (regex search does not work because of confounding instances) if dataset=='Olenina': d = get_dim(fields, headersin, 'D1',dataset) elif dataset=='Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V,A,Dsorted = shape_1(d) elif (gshape in ['2','prolate spheroid']) or (re.search('rotational ellipsoid',gshape)): # In Olenina dataset (=prolate spheroid): rotational ellipsoid/ x 0.5/-20% if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1',dataset) #check: /2 ? h = get_dim(fields, headersin, 'L1',dataset) #check: /2 ? if h<d: #this results in sqrt of negative number for area calculation (sqrt (h^2 - d^2)) #solution: swap dimensions (check) d=get_dim(fields, headersin, 'L1',dataset) h=get_dim(fields, headersin, 'D1',dataset) elif dataset=='Roselli': Found = False unable2extrdim(dataset,gshape) if Found: if d == h: gshape='sphere' V, A, Dsorted = shape_1(d) #calculating for shape_2 with d==h results in division by 0 else: V, A, Dsorted = shape_2(d,h) elif gshape in ['3', 'cylinder']: if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1',dataset) h = get_dim(fields, headersin, 'H',dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_3(d, h) elif (gshape in ['4', 'ellipsoid']) or (re.search('flattened ellipsoid',gshape) is not None): # In Olenina dataset (=ellipsoid): flattened ellipsoid/ - 20%/-20% if dataset == 'Olenina': b = get_dim(fields, headersin, 'D1') #check: / 2 c = get_dim(fields, headersin, 'D2') #check: / 2 h = get_dim(fields, headersin, 'L1') #check: / 2 if np.isnan(h): h = get_dim(fields, headersin, 'H') # check: / 2 if np.isnan(h): print('!') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_4(b, c, h) elif (gshape in ['5', 'cone', 'cone-10%']): # In Olenina dataset: cone/-10% if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1') h = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_5(d, h) elif (gshape in ['7','parallelepiped']) or (re.search('parallelipiped-',gshape) is not None): # In Olenina dataset: parallelipiped/-10%/-20%/-25%/-30%/-40% if dataset == 'Olenina': a = get_dim(fields, headersin, 'H') b = get_dim(fields, headersin, 'L1') c = get_dim(fields, headersin, 'W') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_7(a, b, c) elif (gshape in ['8','prism on elliptic base']) or (re.search('oval cylinder', gshape)): # In Olenina dataset (=prism on elliptic base): oval cylinder/-30% if dataset == 'Olenina': a = get_dim(fields, headersin, 'D1') b = get_dim(fields, headersin, 'D2') c = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_8(a, b, c) elif gshape in ['9','prism on parallelogram base'] : if dataset == 'Olenina': Found = False shapenotrec(gshape, dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_9(a, b, c) elif gshape in ['10','cube']: if dataset == 'Olenina': Found = False shapenotrec(gshape, dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_10(a) elif gshape in ['11','prism on triangle base 1', 'parallelipiped/2', 'half parallelipiped']: # In Olenina dataset: parallelipiped/2, half parallelipiped if dataset == 'Olenina': #Check: for these shapes, 3 parameters are provided in Olenina datasets H = get_dim(fields, headersin, 'H') L1 = get_dim(fields, headersin, 'L1') #W = get_dim(fields, headersin, 'W') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_11(a, c) elif gshape in ['12','half prism on elliptic base']: if dataset == 'Olenina': Found = False shapenotrec(gshape, dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_12(a, b, c) elif gshape in ['14','double cone', '2 cones-30%', '2 cones']: # In Olenina dataset: 2 cones-30%, 2 cones if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1') h = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_14(d, h) elif gshape in ['15','2 truncated cones', 'two truncated cones']: if dataset == 'Olenina': Found = False unable2extrdim(dataset, gshape) elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_15(d1, d2, h) elif gshape in ['16','prolate spheroid + 2 Cylinders']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_16(d,d1,d2,h,h1,h2) elif gshape in ['17','cylinder + 2 cones']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_17(d,h) elif (gshape in ['19', 'cone + half sphere']) or (re.search('cone + half sphere', gshape)): # In Olenina dataset: cone + half sphere/-20%/25%/40% if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1') h = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_19(d,h) elif gshape in ['20', 'half ellipsoid + cone']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_20(b,c,h,h1) elif gshape in ['21','prism on elliptic base+ box']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_21(a,a1,b,b1,c) elif gshape in ['22', 'cylinder + 2 half spheres']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_22(d,h) elif gshape in ['23', 'ellipsoid+2cones+cylinder']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_23(b, c, d1, d2, d3, h, h1, h2, h3) elif gshape in ['24', 'ellipsoid + cone']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_24(b,c,d1,h,h1) elif gshape in ['25', 'cylinder + 3 cones']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_25(d1,d2,d3,d4,h1,h2,h3,h4) elif gshape in ['27', 'half sphere', 'half sphere-30%']: if dataset == 'Olenina': #Check: For half sphere, 2 parameters are provided in the Olenina dataset: D1 and H d = get_dim(fields, headersin, 'D1') #H = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_27(d) elif gshape in ['34', '2 half ellipsoids + prism on elliptic base']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_34(a,b1,b2,c,c1,c2,h1,h2) elif gshape in ['35', 'cymbelloid']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_35(a,b,c) elif gshape in ['38', 'half cone']: if dataset == 'Olenina': d = get_dim(fields, headersin, 'D1') h = get_dim(fields, headersin, 'H') elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_38(d,h) # WARNING: Was not available in cellgeom.m! elif gshape in ['40', 'gomphonemoid']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_40(b,c,h) elif gshape in ['41', 'sickle-shaped prism']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_41(a,c,h) elif gshape in ['43', 'prism on elliptic base + 4 cones']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset,gshape) if Found: V, A, Dsorted = shape_43(a,b,c,d1,d2,d3,d4,h1,h2,h3,h4) elif gshape in ['44', 'pyramid']: if dataset == 'Olenina': Found = False shapenotrec(gshape,dataset) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_44(d,h) elif gshape in ['46', 'prism on triangular base', 'prism on triangle base 2',]: if dataset == 'Olenina': b = get_dim(fields, headersin, 'H') a = get_dim(fields, headersin, 'L1') # in almost every case W=L (Hillebrand99) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_46(a,b) elif gshape in ['51', '2 half ellipsoids', '2 rotational ellipsoids']: if dataset == 'Olenina': Found = False unable2extrdim(dataset, gshape) elif dataset == 'Roselli': Found = False unable2extrdim(dataset, gshape) if Found: V, A, Dsorted = shape_51(b1,b2,c1,c2,h1,h2) else: Found = False if not Found: Warning('Unrecognized shape: '+gshape) V = nan; A = nan; Dsorted = [nan,nan,nan] return gshape, V, A, Dsorted def unable2extrdim(dataset,gshape): Warning('Extracting dims for %s from %s datasets not yet implemented' %(gshape,dataset)) def shapenotrec(dataset,gshape): raise(ValueError('%s is not recognized for %s dataset' %(gshape,dataset))) def shape_1(d): # Sphere V = pi / 6 * d ** 3 A = pi * d ** 2 D1 = d D2 = d D3 = d return V, A, np.sort([D1, D2, D3]) def shape_2(d, h): # Prolate spheroid A = pi * d / 2 * (d + h ** 2 / sqrt(h ** 2 - d ** 2) * asin(sqrt(h ** 2 - d ** 2) / h)) # equation from maple gives exactly the same result. TESTED # A=(1/2)*pi*d*(d+2*h**2*asin(sqrt(-4*d**2+4*h**2)/(2*h))/sqrt(-4*d**2+4*h**2)) V = pi / 6 * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_3(d, h): # Cylinder A = pi * d * (d / 2 + h) V = pi / 4 * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_4(b, c, h): # Ellipsoid A = (pi / 4 * (b + c) * ((b + c) / 2 + 2 * h ** 2 / sqrt(4 * h ** 2 - (b + c) ** 2) * asin(sqrt(4 * h ** 2 - (b + c) ** 2) / (2 * h)))) if np.isnan(A): Warning('sqrt results in negative value since: h<b+c' ) V = pi / 6 * b * c * h D1 = b D2 = c D3 = h return V, A, np.sort([D1, D2, D3]) def shape_5(d, h): # Cone A = pi / 4 * d * (d + sqrt(4 * h ** 2 + d ** 2)) V = pi / 12 * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_7(a, b, c): # Parallelepiped A = 2 * a * b + 2 * b * c + 2 * a * c V = a * b * c D1 = a D2 = b D3 = c return V, A, np.sort([D1, D2, D3]) def shape_8(a, b, c): # Prism on elliptic base # A = pi/2 * (a*b + c * (a + b)) # this formula from http://phytobioimaging.unisalento.it/en-us/products/AtlasOfShapes.aspx?ID_Tipo=3 # suggests a first order approximation for the Area, better this A = c * pi * ((1 / 2) * a + (1 / 2) * b) * (1 + (a - b) ** 2 / (4 * (a + b) ** 2)) + (1 / 2) * pi * a * b V = (1 / 4) * pi * a * b * c D1 = a D2 = b D3 = c return V, A, np.sort([D1, D2, D3]) def shape_9(a, b, c): # Prism on parallelogram base # A = a*b + sqrt(a**2 + b**2)/4 * c #this # formula from Hillebrand 1999 is wrong, it gives approximately 50# error A = a * b + 2 * sqrt(a ** 2 + b ** 2) * c V = (1 / 2) * a * b * c D1 = a D2 = b D3 = c return V, A, np.sort([D1, D2, D3]) def shape_10(a): # Cube A = 6 * a ** 2 V = a ** 3 D1 = a D2 = a D3 = a return V, A, np.sort([D1, D2, D3]) def shape_11(a, c): # Prism on triangle base 1 # there is a typo in the formula for area 3 ab + \sqrt(3)/2 * # a^2 on web, it should be A = 3 * a * c + (1 / 2) * sqrt(3) * a ** 2 V = (1 / 4) * sqrt(3) * a ** 2 * c D1 = a D2 = sqrt(3) / 2 * a # height D3 = c return V, A, np.sort([D1, D2, D3]) def shape_12(a, b, c): # Half prism on elliptic base # A = pi/4 * (a * b + b * c + a*c ) + a*c # this formula from http://phytobioimaging.unisalento.it/en-us/products/AtlasOfShapes.aspx?ID_Tipo=3 # is wrong (also in Hillebrand 1999) # one can use A = pi/2 * a*b + pi/2 * (a/2 + b)*c + a*c # a more precise formula is A = (1 / 2) * pi * ((1 / 2) * a + b) * (1 + (a - 2 * b) ** 2 / (4 * (a + 2 * b) ** 2)) * c + a * c + ( 1 / 2) * pi * a * b V = (1 / 4) * pi * a * b * c D1 = a D2 = b D3 = c return V, A, np.sort([D1, D2, D3]) def shape_14(d, h): # Double cone A = (1 / 2) * pi * d * sqrt(d ** 2 + h ** 2) V = (1 / 12) * pi * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_15(d1, d2, h): # 2 truncated cones. I did not check it (there is only one cell) l = sqrt((d2 / 2 - d1 / 2) ** 2 + h ** 2) A = pi * l * (d1 + d2) + pi / 2 * d1 ** 2 V = pi / 6 * h * (d1 ** 2 + d1 * d2 + d2 ** 2) D1 = max(d1, d2) D2 = max(d1, d2) D3 = 2 * h return V, A, np.sort([D1, D2, D3]) def shape_16(d, d1, d2, h, h1, h2): # Prolate spheroid + 2 Cylinders A = pi * d1 * h1 + pi * d2 * h2 + pi * d * h ** 2 * asin(sqrt(-d ** 2 + h ** 2) / h) / ( 2 * sqrt(-d ** 2 + h ** 2)) + (1 / 2) * pi * d ** 2 V = (1 / 4) * pi * d1 ** 2 * h1 + (1 / 4) * pi * d2 ** 2 * h2 + (1 / 6) * pi * d ** 2 * h D1 = d D2 = d D3 = h1 + h2 + h return V, A, np.sort([D1, D2, D3]) def shape_17(d, h): # Cylinder +2 cones # I assume that the cones are equilateral # Both Hillebrand 1999 and web have mistakes in the formulas # A = pi*d**2+pi*d*(h-sqrt(3)*d) # V = (1/12)*pi*d**3*sqrt(3)+(1/4)*pi*d**2*(h-sqrt(3)*d) # here I assume that the cone height equals d/2 A = (1 / 2) * pi * d ** 2 * sqrt(2) + pi * d * (h - d) V = -(1 / 6) * pi * d ** 3 + (1 / 4) * pi * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_19(d, h): # cone+half sphere. The Volume is wrong on web, Area is wrong on web A = (1 / 4) * pi * d * (sqrt(2 * d ** 2 - 4 * d * h + 4 * h ** 2) + 2 * d) V = (1 / 24) * pi * d ** 2 * (d + 2 * h) D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_20(b, c, h, h1): # Half ellipsoid + Cone (on elliptic base) # on web there was only Volume ConeSideArea = (1 / 2) * pi * ((1 / 4) * b * sqrt(b ** 2 + 4 * h1 ** 2) + (1 / 4) * c * sqrt(c ** 2 + 4 * h1 ** 2)) HalfEllArea = ((1 / 8) * pi * (b + c) * ((1 / 2) * b + (1 / 2) * c + 2 * h ** 2 * asin(sqrt(4 * h ** 2 - (b + c) ** 2) / (2 * h)) / sqrt(4 * h ** 2 - (b + c) ** 2))) A = ConeSideArea + HalfEllArea V = (1 / 12) * pi * c * b * h1 + (1 / 12) * pi * b * c * h D1 = b D2 = c D3 = h / 2 + h1 return V, A, np.sort([D1, D2, D3]) def shape_21(a, a1, b, b1, c): # Prism on elliptic base+ box, ASh gives a correct formula for V, but a bit wrong for A P = pi * ((1 / 2) * a + (1 / 2) * b) * (1 + (a - b) ** 2 / (4 * (a + b) ** 2)) + 2 * a1 A = P * c + 2 * a1 * b1 + (1 / 2) * pi * a * b V = a1 * b1 * c + (1 / 4) * pi * a * b * c D1 = b D2 = c D3 = a + a1 return V, A, np.sort([D1, D2, D3]) def shape_22(d, h): # Cylinder + 2 Half spheres A = d * pi * (d + h) V = (1 / 6) * pi * d ** 3 + (1 / 4) * pi * d ** 2 * h D1 = d D2 = d D3 = h + d return V, A, np.sort([D1, D2, D3]) def shape_23(b, c, d1, d2, d3, h, h1, h2, h3): # Ellipsoid+2cones+cylinder A = ((1 / 4) * pi * (b + c) * ((1 / 2) * b + (1 / 2) * c + 2 * h ** 2 * asin(sqrt(4 * h ** 2 - (b + c) ** 2) / (2 * h)) / sqrt( 4 * h ** 2 - (b + c) ** 2)) - (1 / 4) * pi * d2 ** 2 - (1 / 4) * pi * d3 ** 2 + h1 * d1 * pi + (1 / 2) * pi * d2 * sqrt( h2 ** 2 + (1 / 4) * d2 ** 2) + (1 / 2) * pi * d3 * sqrt(h3 ** 2 + (1 / 4) * d3 ** 2)) V = (1 / 6) * pi * b * c * h + (1 / 4) * pi * d1 ** 2 * h1 + (1 / 12) * pi * d2 ** 2 * h2 + ( 1 / 12) * pi * d3 ** 2 * h3 D1 = b D2 = c D3 = h + h1 + max(h2, h3) return V, A, np.sort([D1, D2, D3]) def shape_24(b, c, d1, h, h1): # Ellipsoid + Cone A = ((1 / 4) * pi * (b + c) * ((1 / 2) * b + (1 / 2) * c + 2 * h ** 2 * asin(sqrt(4 * h ** 2 - (b + c) ** 2) / (2 * h)) / sqrt(4 * h ** 2 - (b + c) ** 2)) - (1 / 4) * pi * d1 ** 2 + (1 / 2) * pi * d1 * sqrt(h1 ** 2 + (1 / 4) * d1 ** 2)) V = (1 / 6) * pi * b * c * h + (1 / 12) * pi * d1 ** 2 * h1 D1 = b D2 = c D3 = h + h1 return V, A, np.sort([D1, D2, D3]) def shape_25(d1, d2, d3, d4, h1, h2, h3, h4): # Cylinder + 3 Cones A = (pi * ((1 / 2) * d1 + (1 / 2) * d4) * sqrt(((1 / 2) * d4 - (1 / 2) * d1) ** 2 + h4 ** 2) + (1 / 4) * pi * d4 ** 2 + h1 * d1 * pi + (1 / 4) * pi * d1 ** 2 - (1 / 4) * pi * d3 ** 2 - ( 1 / 4) * pi * d2 ** 2 + (1 / 2) * pi * d3 * sqrt(h3 ** 2 + (1 / 4) * d3 ** 2) + (1 / 2) * pi * d2 * sqrt( h2 ** 2 + (1 / 4) * d2 ** 2)) V = ((1 / 12) * pi * h4 * (d1 ** 2 + d1 * d4 + d4 ** 2) + (1 / 4) * pi * d1 ** 2 * h1 + (1 / 12) * pi * d2 ** 2 * h2 + (1 / 12) * pi * d3 ** 2 * h3) D1 = d4 D2 = d4 D3 = h1 + h4 + max(h2, h3) return V, A, np.sort([D1, D2, D3]) def shape_27(d): # Half sphere A = 3 * pi * d ** 2 * (1 / 4) V = (1 / 12) * pi * d ** 3 D1 = d D2 = d D3 = d / 2 return V, A, np.sort([D1, D2, D3]) def shape_34(a, b1, b2, c, c1, c2, h1, h2): # 2 half ellipsoids + prism on elliptic base A = ((1 / 8) * pi * (2 * b1 + c1) * (b1 + (1 / 2) * c1 + 2 * h1 ** 2 * asin(sqrt(4 * h1 ** 2 - (2 * b1 + c1) ** 2) / (2 * h1)) / sqrt( 4 * h1 ** 2 - (2 * b1 + c1) ** 2)) + (1 / 4) * pi * h1 * c1 - (1 / 4) * pi * a * c1 + (1 / 8) * pi * (2 * b2 + c2) * (b2 + (1 / 2) * c2 + 2 * h2 ** 2 * asin(sqrt(4 * h2 ** 2 - (2 * b2 + c2) ** 2) / (2 * h2)) / sqrt( 4 * h2 ** 2 - (2 * b2 + c2) ** 2)) + (1 / 4) * pi * h2 * c2 - (1 / 4) * pi * a * c2 + pi * ((1 / 2) * a + (1 / 2) * c1) * ( 1 + (a - c1) ** 2 / (4 * (a + c1) ** 2)) * c) # V = (1/12)*pi*b1*c1*h1+(1/12)*pi*b2*c2*h2+(1/4)*pi*a*b*c V = (1 / 6) * pi * b1 * c1 * h1 + (1 / 6) * pi * b2 * c2 * h2 + (1 / 4) * pi * a * c1 * c D1 = np.mean([c1, c2], 2) D2 = b1 + c + b2 D3 = np.mean([h1, h2], 2) return V, A, np.sort([D1, D2, D3]) def shape_35(a, b, c): # Cymbelloid. in Hi99 we do not have area, in ASh the area is wrongly found # (it should be b instead of c and arcsin(beta)) A = (b * (2 * b + 2 * a ** 2 * asin(sqrt(4 * a ** 2 - 16 * b ** 2) / (2 * a)) / sqrt(4 * a ** 2 - 16 * b ** 2)) * asin(c / (2 * b)) + (1 / 2) * pi * a * b) V = 2 * a * b ** 2 * asin(c / (2 * b)) * (1 / 3) D1 = b D2 = c D3 = a return V, A, np.sort([D1, D2, D3]) def shape_38(d, h): # Half Cone (WARNING: Not provided in cellgeom) # V=pi/6*D^2*G (Hillebrand99) # A=pi*D*l (Hillebrand99) l = np.sqrt((h ** 2 + (d / 2) ** 2)) # l: length of the diagonal connecting the tip of the cone to any point around the circular base V = pi / 12 * d ** 2 * h /2 A = pi / 2 * d * (d / 2 + l) / 2 + d*h/2 # Last term is the triangle D1 = d/2 D2 = l D3 = h return V, A, np.sort([D1, D2, D3]) def shape_40(b, c, h): # Gomphonemoid A = (1 / 2) * b * (2 * h + pi * h * asin(c / (2 * h)) + ((1 / 2) * pi - 2) * b) V = (1 / 4) * h * b * (h + ((1 / 4) * pi - 1) * b) * asin(c / (2 * h)) D1 = b D2 = c D3 = h return V, A, np.sort([D1, D2, D3]) def shape_41(a, c, h): # Sickle-shaped prism b = a b2 = 0 * b # assume the inner semi axis equals 0 A = (1 / 2) * pi * (b * c + b * h + b2 * c - b2 * h + c * h) V = (1 / 4) * pi * c * h * a D1 = a D2 = c D3 = h return V, A, np.sort([D1, D2, D3]) def shape_43(a, b, c, d1, d2, d3, d4, h1, h2, h3, h4): # Prism on elliptic base + 4 Cones A = (c * pi * ((1. / 2.) * a + (1. / 2.) * b) * (1 + (a - b) ** 2. / (4 * (a + b) ** 2)) + (1. / 2.) * pi * a * b - (1. / 4.) * pi * d1 ** 2 - (1 / 4) * pi * d2 ** 2 - (1 / 4) * pi * d3 ** 2 - ( 1 / 4) * pi * d4 ** 2 + (1 / 2) * pi * d1 * sqrt(h1 ** 2 + (1 / 4) * d1 ** 2) + (1 / 2) * pi * d2 * sqrt(h2 ** 2 + (1 / 4) * d2 ** 2) + (1 / 2) * pi * d3 * sqrt(h3 ** 2 + (1 / 4) * d3 ** 2) + (1 / 2) * pi * d4 * sqrt( h4 ** 2 + (1 / 4) * d4 ** 2)) V = ((1 / 4) * pi * a * b * c + (1 / 12) * pi * d1 ** 2 * h1 + (1 / 12) * pi * d2 ** 2 * h2 + (1 / 12) * pi * d3 ** 2 * h3 + (1 / 12) * pi * d4 ** 2 * h4) D1 = a D2 = b D3 = max(h1, h2) + c + max(h3, h4) return V, A, np.sort([D1, D2, D3]) def shape_44(d, h): # Pyramid (rectangular base) A = sqrt(d ** 2 + 4 * h ** 2) * d + d ** 2 V = (1 / 3) * d ** 2 * h D1 = d D2 = d D3 = h return V, A, np.sort([D1, D2, D3]) def shape_46(a, b): # Prism on triangle-base 2 A = 3 * a * b + (1 / 2) * a ** 2 * sqrt(3) V = (1 / 4) * a ** 2 * sqrt(3) * b D1 = a D2 = sqrt(3) / 2 * a D3 = b return V, A, np.sort([D1, D2, D3]) def shape_51(b1, b2, c1, c2, h1, h2): # 2 Half ellipsoids A = ((1 / 8) * pi * (2 * b1 + c1) * (b1 + (1 / 2) * c1 + 2 * h1 ** 2 * asin(sqrt(4 * h1 ** 2 - (2 * b1 + c1) ** 2) / (2 * h1)) / sqrt( 4 * h1 ** 2 - (2 * b1 + c1) ** 2)) + (1 / 8) * pi * (2 * b2 + c2) * (b2 + (1 / 2) * c2 + 2 * h2 ** 2 * asin(sqrt(4 * h2 ** 2 - (2 * b2 + c2) ** 2) / (2 * h2)) / sqrt( 4 * h2 ** 2 - (2 * b2 + c2) ** 2))) V = (1 / 6) * pi * b1 * c1 * h1 + (1 / 6) * pi * b2 * c2 * h2 D1 = np.mean([c1, c2], 2) D2 = b1 + b2 D3 = np.mean([h1, h2], 2) return V, A, np.sort([D1, D2, D3]) def get_dim(fields,headersin,dim,dataset='Olenina'): col = -1 #try to find the dimension if dataset=='Olenina': if dim=='L1': for coli,colname in enumerate(headersin): if colname[0:10]=='Length(l1)': col=coli if dim=='L2': for coli,colname in enumerate(headersin): if colname[0:10]=='Length(l2)': col=coli if dim=='W': for coli,colname in enumerate(headersin): if colname[0:8]=='Width(w)': col=coli if dim=='H': for coli,colname in enumerate(headersin): if colname[0:9]=='Height(h)': col=coli if dim=='D1': for coli,colname in enumerate(headersin): if colname[0:12]=='Diameter(d1)': col=coli if dim=='D2': for coli,colname in enumerate(headersin): if colname[0:12]=='Diameter(d2)': col=coli elif dataset == 'Roselli': raise(ValueError('Extracting dimensions from Roselli datasets not yet implemented')) #if found, extract the string and attempt converting to float if col == -1: # print ('dimension not found: %s' % (dim)) val = np.nan else: valstr=fields[col] try: val=float(valstr) except: val=np.nan return val
AlexRyabov/Cell-shape
python/calc_geom_funcs.py
calc_geom_funcs.py
py
28,215
python
en
code
2
github-code
36
19738731229
from vigilo.models.session import DBSession, MigrationDDL from vigilo.models import tables def upgrade(migrate_engine, actions): """ Migre le modèle. @param migrate_engine: Connexion à la base de données, pouvant être utilisée durant la migration. @type migrate_engine: C{Engine} @param actions: Conteneur listant les actions à effectuer lorsque cette migration aura été appliquée. @type actions: C{MigrationActions} """ MigrationDDL( [ "ALTER TABLE %(fullname)s RENAME COLUMN mainip TO address", "ALTER TABLE %(fullname)s ALTER COLUMN address TYPE varchar(255)", ], ).execute(DBSession, tables.Host.__table__) # Nécessite une mise à jour de VigiReport. actions.upgrade_vigireport = True
vigilo/models
src/vigilo/models/migration/002_Host_mainip_is_really_an_address.py
002_Host_mainip_is_really_an_address.py
py
803
python
fr
code
4
github-code
36
22620837649
class Student: def __init__(self, name, age, gpa, adviser, email): self.name = name self.age = age self.gpa = gpa self.adviser = adviser self.email = email students = [ Student("Dimash", 19, 3.7, "Abdygalym", "dimash@gmail.com"), Student("Ilyas", 19, 3.1, "Abdygalym", "ilyas@gmail.com"), Student("Islam", 19, 3.61, "Asem", "islam@gmail.com"), Student("Batyr", 20, 3.15, "Asem", "batyr@gmail.com"), ] a = str(input("Введите имя ",)) def find_person(students, a): for student in students: if student.name == a: return student return None result = find_person(students, a); if result: print(f"Имя студента {result.name}") # Вывод имени студента print(f"Возраст студента {result.age}") # Вывод возраста студента print(f"GPA студента {result.gpa}") # Вывод GPA студента print(f"Эдвайзер студента {result.adviser}") # Вывод эдвайзера студента print(f"Почта студента {result.email}") # Вывод почты студента else: print(f"Студент по имени {a} отсутствует в базе данных") print("Добавление нового студента") name = str(input("Введите имя студента ",)) age = str(input("Введите возраст студента ",)) gpa = str(input("Введите GPA студента ",)) adviser = str(input("Введите эдвайзера студента ",)) email = str(input("Введите почту студента ",)) newStudent = Student(name, age, gpa, adviser, email) students.append(newStudent) for student in students: print(f"Имя студента {student.name}") # Вывод имени студента print(f"Возраст студента {student.age}") # Вывод возраста студента print(f"GPA студента {student.gpa}") # Вывод GPA студента print(f"Эдвайзер студента {student.adviser}") # Вывод эдвайзера студента print(f"Почта студента {student.email}") # Вывод почты студента print(f"\n")
AlikhanIT/func2
main.py
main.py
py
2,255
python
ru
code
0
github-code
36
20655962047
import dataclasses import subprocess from typing import Any, ClassVar, List, Optional from fancy_dataclass.utils import DataclassMixin, issubclass_safe, obj_class_name class SubprocessDataclass(DataclassMixin): """Mixin class providing a method for converting dataclass fields to command-line args that can be used to make a subprocess call. Other arguments can be passed into the `metadata` argument of a `dataclasses.field`, namely: - `exec` (boolean flag indicating that this field should be treated as the name of the executable, rather than an argument) - `args` (list of command-line arguments corresponding to the field—only the first will be used, and only if it starts with a hyphen) - `exclude` (boolean flag indicating that the field should not be included in the args)""" def __post_init__(self) -> None: exec_field = None for (name, field) in self.__dataclass_fields__.items(): if field.metadata.get('exec', False): if (exec_field is None): exec_field = name else: raise TypeError("cannot have more than one field with 'exec' flag set to True") def get_arg(self, name: str, suppress_defaults: bool = False) -> List[str]: """Given the name of a dataclass field, gets the command-line args for that field. Args: name: Name of dataclass field suppress_defaults: If `True`, suppresses arguments that are equal to the default values Returns: List of command-line args corresponding to the field""" field = self.__dataclass_fields__[name] if field.metadata.get('exclude', False): # exclude the argument return [] if getattr(field.type, '__origin__', None) is ClassVar: # ignore fields associated with the class, rather than the instance return [] val = getattr(self, name, None) if (val is None): # optional value is None return [] if issubclass_safe(field.type, SubprocessDataclass): # get args via nested SubprocessDataclass return val.args(suppress_defaults = suppress_defaults) if field.metadata.get('exec', False): # this field is the executable, so return no arguments return [] if suppress_defaults: # if value matches the default, suppress the argument default = None has_default = True if (field.default == dataclasses.MISSING): if (field.default_factory == dataclasses.MISSING): has_default = False else: default = field.default_factory() else: default = field.default if has_default and (val == default): return [] if field.metadata.get('args'): # use arg name provided by the metadata arg = field.metadata['args'][0] if (not arg.startswith('-')): arg = None else: # use the field name (assume a single dash if it is a single letter) prefix = '-' if (len(name) == 1) else '--' arg = prefix + name.replace('_', '-') if isinstance(val, bool): # make it a boolean flag if True, otherwise omit it if (not val): arg = None val = [] elif isinstance(val, (list, tuple)): if val: val = [str(x) for x in val] else: arg = None elif (val is not None): # convert the field value to a string val = str(val) args = [arg] if arg else [] args += val if isinstance(val, list) else [val] return args def get_executable(self) -> Optional[str]: """Gets the name of an executable to run with the appropriate arguments. By default, this returns the name of the first dataclass field whose `exec` metadata flag is set to `True`, if one exists, and `None` otherwise. Returns: Name of the executable to run""" name = None for (name, field) in self.__dataclass_fields__.items(): if field.metadata.get('exec', False): return getattr(self, name, None) return None def args(self, suppress_defaults: bool = False) -> List[str]: """Converts dataclass fields to a list of command-line arguments for a subprocess call. Args: suppress_defaults: If `True`, suppresses arguments that are equal to the default values Returns: List of command-line args corresponding to the dataclass fields""" args = [] for name in self.__dataclass_fields__: args += [arg for arg in self.get_arg(name, suppress_defaults = suppress_defaults) if arg] return args def run_subprocess(self, **kwargs: Any) -> subprocess.CompletedProcess: """Executes the full subprocess command corresponding to the dataclass parameters. Args: kwargs: Keyword arguments passed to `subprocess.run` Returns: `CompletedProcess` object produced by `subprocess.run` Raises: ValueError: If no executable was found from the `get_executable` method""" executable = self.get_executable() if (not executable): raise ValueError(f'No executable identified for use with {obj_class_name(self)!r} instance') args = [executable] + self.args() return subprocess.run(args, **kwargs)
jeremander/fancy-dataclass
fancy_dataclass/subprocess.py
subprocess.py
py
5,568
python
en
code
0
github-code
36
2360892821
""" 字符串中字母大小写互换 【问题描述】编写程序,功能是把输入的字符串的大写字母变成小写字母,小写字母变成大写字母,非字母的字符不作变换。输出变换后的结果。 【输入形式】字符串,包含字母和非字母字符。 【输出形式】字符串,字母的大小写已经发生变换。 【样例输入】abcABC 【样例输出】ABCabc """ n = input() m = "" for i in n : if i.isupper(): i = i.lower() m = m + i elif i.lower(): i = i.upper() m = m + i print(m)
xzl995/Python
CourseGrading/5.1.8字符串中字母大小写互换.py
5.1.8字符串中字母大小写互换.py
py
579
python
zh
code
3
github-code
36
4393844283
# 클레어와 물약 # r1 x # https://www.acmicpc.net/problem/20119 # https://welog.tistory.com/256 import sys from collections import deque input = sys.stdin.readline n, m = map(int, input().split()) graph = [set() for _ in range(n + 1)] recipe_dict = {} for _ in range(m): data = list(map(int, input().split())) if data[-1] not in recipe_dict: recipe_dict[data[-1]] = [[data[1:-1], data[0]]] else: recipe_dict[data[-1]].append([data[1:-1], data[0]]) for i in range(1, len(data) - 1): graph[data[i]].add(data[-1]) l = int(input()) l_list = list(map(int, input().split())) check = [False] * (n + 1) result = set() for i in l_list: check[i] = True result.add(i) q = deque(l_list) while q: now = q.popleft() for i in graph[now]: if check[i]: continue for idx in range(len(recipe_dict[i])): recipe, cnt = recipe_dict[i][idx] if now in recipe: recipe.remove(now) cnt -= 1 recipe_dict[i][idx] = [recipe, cnt] if cnt == 0: check[i] = True q.append(i) result.add(i) print(len(result)) result = list(result) result.sort() print(*result)
sjjam/Algorithm-Python
baekjoon/20119.py
20119.py
py
1,292
python
en
code
0
github-code
36
32296716535
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="trello_client-basics-api-denisshvayko", version="0.0.1", author="denis", author_email="denis.shvayko@phystech.edu", description="Обертка для trello API", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/denisshvayko/D1.8.git", packages=setuptools.find_packages(), classifiers=["Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', )
denisshvayko/D1.8
setup.py
setup.py
py
640
python
en
code
0
github-code
36
31757699716
print('Bom dia') count=0 soma =0 for idade in range(10): idade=int(input('Digite sua idade:')) if(idade>=18): count=count+1 soma=idade+soma media=soma/10 print('a quantidade de pessoas é:',count) print('A media:', media)
4ntonio19/PythonExercises
PythonExercises/ListaRevisão6.py
ListaRevisão6.py
py
240
python
pt
code
0
github-code
36
9284740252
a = {'timezone': 'UTC', 'serverTime': 1570802268092, 'rateLimits': [{'rateLimitType': 'REQUEST_WEIGHT', 'interval': 'MINUTE', 'intervalNum': 1, 'limit': 1200}, {'rateLimitType': 'ORDERS', 'interval': 'MINUTE', 'intervalNum': 1, 'limit': 1200}], 'exchangeFilters': [], 'symbols': [{'symbol': 'BTCUSDT', 'status': 'TRADING', 'maintMarginPercent': '2.5000', 'requiredMarginPercent': '5.0000', 'baseAsset': 'BTC', 'quoteAsset': 'USDT', 'pricePrecision': 2, 'quantityPrecision': 3, 'baseAssetPrecision': 8, 'quotePrecision': 8, 'filters': [ {'minPrice': '0.01', 'maxPrice': '100000', 'filterType': 'PRICE_FILTER', 'tickSize': '0.01'}, {'stepSize': '0.001', 'filterType': 'LOT_SIZE', 'maxQty': '1000', 'minQty': '0.001'}, {'stepSize': '0.001', 'filterType': 'MARKET_LOT_SIZE', 'maxQty': '1000', 'minQty': '0.001'}, {'limit': 200, 'filterType': 'MAX_NUM_ORDERS'}, {'multiplierDown': '0.8500', 'multiplierUp': '1.1500', 'multiplierDecimal': '4', 'filterType': 'PERCENT_PRICE'}], 'orderTypes': ['LIMIT', 'MARKET', 'STOP'], 'timeInForce': ['GTC', 'IOC', 'FOK', 'GTX']}] } symbols_dict = {} items = a.get('symbols', []) for item in items: if item.get('status') == 'TRADING': symbol = item['symbol'] symbol_data = {'symbol': symbol} for f in item['filters']: if f['filterType'] == 'PRICE_FILTER': symbol_data['min_price'] = float(f['tickSize']) elif f['filterType'] == 'LOT_SIZE': symbol_data['min_qty'] = float(f['stepSize']) elif f['filterType'] == 'MIN_NOTIONAL': symbol_data['min_notional'] = float(f['minNotional']) # # symbols_dict[symbol] = symbol_data # # b = a.get('symbols', 'Not Found') # print(b)
Sam-0225/Quant_Grid
test.py
test.py
py
2,134
python
en
code
1
github-code
36
30466798237
class Solution: def shortestWordDistance(self, words, word1: str, word2: str) -> int: res = float('inf') res1 = [] res2 = [] for i in range(len(words)): if words[i] == word1: res1.append(i) if words[i] == word2: res2.append(i) if word1 == word2 and len(res1) <= 1: return 2147483647 i = 0 j = 0 while i < len(res1) and j < len(res2): if res1[i] == res2[j]: i += 1 continue res = min(res, abs(res1[i] - res2[j])) if res1[i] > res2[j]: j += 1 else: i += 1 return res s = Solution() w1 = 'a' w2 = 'a' words = ['a', 'a', 'a'] print(s.shortestWordDistance(words, w1, w2)) w1 = 'makes' w2 = 'makes' words = ["practice", "makes", "perfect", "coding", "makes"] print(s.shortestWordDistance(words, w1, w2)) w1 = 'practice' w2 = 'practice' words = ["practice", "makes", "perfect", "coding", "makes"] print(s.shortestWordDistance(words, w1, w2))
dundunmao/LeetCode2019
245. Shortest Word Distance III.py
245. Shortest Word Distance III.py
py
1,097
python
en
code
0
github-code
36
42493212575
""" WRITEME """ from __future__ import absolute_import, print_function, division from copy import copy, deepcopy from sys import getsizeof import sys import traceback import numpy as np import theano from theano.compat import izip from six import reraise from six.moves import StringIO from theano.gof import utils from theano.gof import graph from theano.gof.type import Type from .utils import undef __excepthook = sys.excepthook def log_thunk_trace(value, f=sys.stderr): """ Log Theano's diagnostic stack trace for an exception raised by raise_with_op. """ # in future, consider accepting `write` as arg rather than file # to support writing to a logger def write(msg): print("log_thunk_trace: %s" % msg.strip(), file=f) if hasattr(value, '__thunk_trace__'): trace2 = value.__thunk_trace__ write("There was a problem executing an Op.") if trace2 is None: write("Could not find where this Op was defined.") write(" * You might have instantiated this Op " "directly instead of using a constructor.") write(" * The Op you constructed might have been" " optimized. Try turning off optimizations.") elif trace2: write("Definition in: ") for line in traceback.format_list(trace2): write(line) write("For the full definition stack trace set" " the Theano flags traceback.limit to -1") def thunk_hook(type, value, trace): """ This function is meant to replace excepthook and do some special work if the exception value has a __thunk_trace__ field. In that case, it retrieves the field, which should contain a trace as returned by L{traceback.extract_stack}, and prints it out on L{stderr}. The normal excepthook is then called. Parameters: ---------- type Exception class value Exception instance trace Traceback object Notes ----- This hook replaced by nosetests, so it does not run in nose tests. """ log_thunk_trace(value) __excepthook(type, value, trace) sys.excepthook = thunk_hook # TODO: Make this work with linker defined schedule def raise_with_op(node, thunk=None, exc_info=None, storage_map=None): """ Re-raise an exception while annotating the exception object with debug info. Parameters ---------- node : Apply node The Apply node object that resulted in the raised exception. exc_info : tuple, optional A tuple containing the exception type, exception object and associated traceback, as would be returned by a call to `sys.exc_info()` (which is done if `None` is passed). storage_map: dict, optional storage map of the theano function that resulted in the raised exception. Notes ----- This re-raises the exception described by `exc_info` (or the last one raised, if `exc_info` is omitted) and annotates the exception object with several new members which may be helpful for debugging Theano graphs. They are: * __op_instance__: The Op that is responsible for the exception being raised. * __thunk_trace__: A traceback corresponding to the code that actually generated the exception, if it is available. * __applynode_index__: The index of the Apply node corresponding to this op in `op.fgraph.toposort()`. The exception is not annotated if it is of type `KeyboardInterrupt`. """ if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_trace = exc_info if exc_type == KeyboardInterrupt: # print a simple traceback from KeyboardInterrupt reraise(exc_type, exc_value, exc_trace) try: trace = node.outputs[0].tag.trace except AttributeError: try: trace = node.op.tag.trace except AttributeError: trace = () exc_value.__thunk_trace__ = trace exc_value.__op_instance__ = node topo = node.fgraph.toposort() if node in topo: node_index = topo.index(node) else: node_index = None exc_value.__applynode_index__ = node_index hints = [] detailed_err_msg = "\nApply node that caused the error: " + str(node) if exc_value.__applynode_index__ is not None: detailed_err_msg += "\nToposort index: %d" % node_index types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs] detailed_err_msg += "\nInputs types: %s\n" % types if thunk is not None: if hasattr(thunk, 'inputs'): shapes = [getattr(ipt[0], 'shape', 'No shapes') for ipt in thunk.inputs] strides = [getattr(ipt[0], 'strides', 'No strides') for ipt in thunk.inputs] scalar_values = [] for ipt in thunk.inputs: if getattr(ipt[0], "size", -1) <= 5: scalar_values.append(ipt[0]) else: scalar_values.append("not shown") else: shapes = "The thunk don't have an inputs attributes." strides = "So we can't access the strides of inputs values" scalar_values = "And can't print its inputs scalar value" clients = [[c[0] for c in var.clients] for var in node.outputs] detailed_err_msg += ("Inputs shapes: %s" % shapes + "\nInputs strides: %s" % strides + "\nInputs values: %s" % scalar_values) if theano.config.exception_verbosity == 'high': detailed_err_msg += "\nInputs type_num: %s" % str( [getattr(getattr(i[0], 'dtype', ''), 'num', '') for i in thunk.inputs]) if hasattr(node.op, '__input_name__'): detailed_err_msg += "\nInputs name: %s\n" % str(node.op.__input_name__) detailed_err_msg += "\nOutputs clients: %s\n" % clients else: hints.append( "HINT: Use another linker then the c linker to" " have the inputs shapes and strides printed.") # Print node backtraces tr = getattr(node.outputs[0].tag, 'trace', []) if isinstance(tr, list) and len(tr) > 0: detailed_err_msg += "\nBacktrace when the node is created(use Theano flag traceback.limit=N to make it longer):\n" # Print separate message for each element in the list of batcktraces sio = StringIO() for subtr in tr: traceback.print_list(subtr, sio) detailed_err_msg += str(sio.getvalue()) else: hints.append( "HINT: Re-running with most Theano optimization disabled could" " give you a back-trace of when this node was created. This can" " be done with by setting the Theano flag" " 'optimizer=fast_compile'. If that does not work," " Theano optimizations can be disabled with 'optimizer=None'.") if theano.config.exception_verbosity == 'high': f = StringIO() theano.printing.debugprint(node, file=f, stop_on_name=True, print_type=True) detailed_err_msg += "\nDebugprint of the apply node: \n" detailed_err_msg += f.getvalue() # Prints output_map if theano.config.exception_verbosity == 'high' and storage_map is not None: detailed_err_msg += "\nStorage map footprint:\n" shared_input_list = [ item for item in node.fgraph.inputs if isinstance(item, theano.compile.SharedVariable)] nonshared_input_list = [ item for item in node.fgraph.inputs if not isinstance(item, theano.compile.SharedVariable)] storage_map_list = [] total_size = 0 total_size_inputs = 0 for k in storage_map: storage_map_item = [] # storage_map_item[0]: the variable storage_map_item.append(str(k)) # storage_map_item[1]: the shape shapeinfo = None if hasattr(storage_map[k][0], 'shape'): shapeinfo = storage_map[k][0].shape if len(shapeinfo) != 0: storage_map_item.append(shapeinfo) else: storage_map_item.append(tuple()) else: storage_map_item.append(None) # storage_map_item[2]: itemsize # storage_map_item[3]: bytes if hasattr(storage_map[k][0], 'dtype'): dtype = storage_map[k][0].dtype storage_map_item.append(np.dtype(dtype).itemsize) if shapeinfo is None: storage_map_item.append(-1) else: sz = np.dtype(dtype).itemsize * np.prod(shapeinfo) storage_map_item.append(sz) total_size += sz if not k.owner: total_size_inputs += sz else: # If it is a view, don't count it twice. if getattr(k.owner.op, 'view_map', None): vmap = k.owner.op.view_map out_idx = k.owner.outputs.index(k) data = storage_map[k][0] if out_idx in vmap: assert len(vmap[out_idx]) == 1 input_data = storage_map[ k.owner.inputs[vmap[out_idx][0]]][0] if k.type.may_share_memory(data, input_data): total_size -= sz # If it is a destroyed input, the input # shouldn't be in the storage_map anymore # except if there is a special flag used. So # we still must check it. if getattr(k.owner.op, 'destroy_map', None): vmap = k.owner.op.destroy_map out_idx = k.owner.outputs.index(k) data = storage_map[k][0] if out_idx in vmap: assert len(vmap[out_idx]) == 1 input_data = storage_map[ k.owner.inputs[vmap[out_idx][0]]][0] if k.type.may_share_memory(data, input_data): total_size -= sz else: bytes = getsizeof(storage_map[k][0]) storage_map_item.append(bytes) storage_map_item.append(-1) # Flag of shared val # storage_map_item[4] if k in shared_input_list: storage_map_item.append(True) elif k in nonshared_input_list: storage_map_item.append(False) else: storage_map_item.append(None) storage_map_list.append(storage_map_item) from operator import itemgetter storage_map_list.sort(key=itemgetter(3), reverse=True) for item in storage_map_list: if item[3] == -1: continue detailed_err_msg += " - " + item[0] + ", " if item[4] is True: detailed_err_msg += "Shared Input, " elif item[4] is False: detailed_err_msg += "Input, " if item[1] is not None: detailed_err_msg += "Shape: %s, " % str(item[1]) detailed_err_msg += "ElemSize: %s Byte(s)" % item[2] if item[3] is not None: detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3] else: detailed_err_msg += "\n" detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % ( total_size, total_size / 1024. / 1024 / 1024) detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f GB\n" % ( total_size_inputs, total_size_inputs / 1024. / 1024 / 1024) else: hints.append( "HINT: Use the Theano flag 'exception_verbosity=high'" " for a debugprint and storage map footprint of this apply node.") try: exc_value = exc_type(str(exc_value) + detailed_err_msg + '\n' + '\n'.join(hints)) except TypeError: print("WARNING: %s error does not allow us to add extra error message" % str(exc_type)) # Some exception need extra parameter in inputs. So forget the # extra long error message in that case. pass reraise(exc_type, exc_value, exc_trace) class Linker(object): """ WRITEME """ def clone(self, allow_gc=undef): new = copy(self) if allow_gc is not undef: new.allow_gc = allow_gc return new def make_thunk(self): """ This function must return a triplet (function, input_variables, output_variables) where function is a thunk that operates on the returned variables. If inplace is True, the input_variables and output_variables lists will be the same as the inputs and outputs of the graph provided to the L{Linker}. Else, independent variables will be returned. Examples -------- x, y = Variable(Double), Variable(Double) e = x + y fgraph = FunctionGraph([x, y], [e]) fn, (new_x, new_y), (new_e, ) = MyLinker(fgraph).make_thunk(inplace) new_x.data = 1.0 new_y.data = 2.0 fn() print new_e.data # 3.0 print e.data # 3.0 iff inplace == True (else unknown) """ raise utils.MethodNotDefined("make_thunk", type(self), self.__class__.__name__) # DELETEME # def make_function(self, unpack_single=True, **kwargs): """ Returns a function that takes values corresponding to the inputs of the fgraph used by this L{Linker} and returns values corresponding the the outputs of that fgraph. If inplace is True, the calculations will operate in the same storage the fgraph uses, else independent storage will be allocated for the function. Example ------- e = x + y fgraph = FunctionGraph([x, y], [e]) fn = MyLinker(fgraph).make_function(inplace) print fn(1.0, 2.0) # 3.0 print e.data # 3.0 iff inplace == True (else unknown) If unpack_single is True (default) and that the function has only one output, then that output will be returned. Else, a list or tuple of length 1 will be returned. """ thunk, inputs, outputs = self.make_thunk(**kwargs) def execute(*args): def e_arity(takes, got): return 'Function call takes exactly %i %s (%i given)' % ( takes, ['argument', 'arguments'][takes > 1], got) if (len(args) != len(inputs)): raise TypeError(e_arity(len(inputs), len(args))) for arg, variable in izip(args, inputs): variable.data = arg thunk() if unpack_single: return utils.to_return_values([variable.data for variable in outputs]) else: return [variable.data for variable in outputs] execute.thunk = thunk execute.inputs = inputs execute.outputs = outputs return execute def schedule(self, fgraph): return fgraph.toposort() # TODO: Move this class to the compile module, where it is used (and for which it exists). class Container(object): """ This class joins a variable with its computed value. It is used in linkers, especially for the inputs and outputs of a Function. Parameters ---------- r : a Variable or a Type storage A list of length 1, whose element is the value for `r`. readonly : bool True indicates that this should not be setable by Function[r] = val. strict : bool If True, we don't allow type casting. allow_downcast If True (and `strict` is False), allow upcasting of type, but not downcasting. If False, prevent it. If None (default), allows only downcasting of float to floatX scalar. name : str A string (for pretty-printing?) """ def __init__(self, r, storage, readonly=False, strict=False, allow_downcast=None, name=None): if not isinstance(storage, list) or not len(storage) >= 1: raise TypeError("storage must be a list of length at least one") # self.r = r if isinstance(r, Type): self.type = r else: self.type = r.type if name is None: # Some Type do not have a name field. self.name = getattr(r, 'name', None) else: self.name = name self.storage = storage self.readonly = readonly self.strict = strict self.allow_downcast = allow_downcast def __get__(self): return self.storage[0] def __set__(self, value): if self.readonly: raise Exception("Cannot set readonly storage: %s" % self.name) try: if value is None: self.storage[0] = None return kwargs = {} if self.strict: kwargs['strict'] = True if self.allow_downcast is not None: kwargs['allow_downcast'] = self.allow_downcast if hasattr(self.type, 'filter_inplace'): self.storage[0] = self.type.filter_inplace(value, self.storage[0], **kwargs) else: self.storage[0] = self.type.filter(value, **kwargs) except Exception as e: e.args = e.args + (('Container name "%s"' % self.name),) raise data = property(__get__, __set__) value = property(__get__, __set__) def __str__(self): return "<" + str(self.storage[0]) + ">" def __repr__(self): return "<" + repr(self.storage[0]) + ">" def __deepcopy__(self, memo): data_was_in_memo = id(self.storage[0]) in memo r = type(self)( deepcopy(self.type, memo=memo), deepcopy(self.storage, memo=memo), deepcopy(self.readonly, memo=memo), deepcopy(self.strict, memo=memo), deepcopy(self.allow_downcast, memo=memo), deepcopy(self.name, memo=memo), ) # Work around NumPy deepcopy of ndarray with 0 dimension that # don't return an ndarray. if (r.storage[0] is not None and not self.type.is_valid_value(r.storage[0])): assert not data_was_in_memo assert self.type.is_valid_value(self.storage[0]) # This should also work for read only container. r.storage[0] = self.type.filter(r.storage[0], strict=False, allow_downcast=False) memo[id(self.storage[0])] = r.storage[0] return r def map_storage(fgraph, order, input_storage, output_storage, storage_map=None): """Ensure there is storage (a length-1 list) for inputs, outputs, and interior nodes. :param fgraph: The current fgraph. This function uses the inputs and outputs attributes. :param order: an iterable over Apply instances (in program running order) :param input_storage: None or existing input storage (see below) :param output_storage: None or existing output storage (see below) :rtype: 3-tuple :returns: (list of storage for inputs, list of storage for outputs, and the `storage_map`) Parameters ---------- fgraph The current fgraph. This function uses the inputs and outputs attributes. order An iterable over Apply instances (in program running order). input_storage None or existing input storage (see below). output_storage None or existing output storage (see below). Returns ------- 3-tuple List of storage for inputs, list of storage for outputs, and the `storage_map`. Extended summary ---------------- This function iterates over the nodes in `order` and ensures that for every input and output `Variable`, there is a unique storage container. This is returned as a dictionary Variable -> storage called the `storage_map`. This function also returns `input_storage`, which is a list of storages corresponding to fgraph.inputs. This function also returns `output_storage`, which is a list of storages corresponding to fgraph.outputs. """ # each Apply argument's data is stored in a list of length 1 (these lists act like pointers) if storage_map is None: storage_map = {} # input_storage is a list of data-containers for the inputs. if input_storage is None: input_storage = [[None] for input in fgraph.inputs] else: assert len(fgraph.inputs) == len(input_storage) # add input storage into storage_map for r, storage in zip(fgraph.inputs, input_storage): if r in storage_map: assert storage_map[r] is storage, ("Given input_storage conflicts " "with storage in given storage_" "map. Given input_storage: ", storage, "Storage in storage_ma" "p: ", storage_map[r]) else: storage_map[r] = storage # for orphan in fgraph.orphans: # if not isinstance(orphan, Constant): # raise TypeError("Cannot link a graph with non-constant orphans.", orphan) # storage_map[orphan] = [orphan.data] # allocate output storage if output_storage is not None: assert len(fgraph.outputs) == len(output_storage) for r, storage in zip(fgraph.outputs, output_storage): if r in storage_map: assert storage_map[r] is storage, ("Given output_storage confl" "icts with storage in given" " storage_map. Given output" "_storage: ", storage, "Sto" "rage in storage_map: ", storage_map[r]) else: storage_map[r] = storage # allocate storage for intermediate computation for node in order: for r in node.inputs: if r not in storage_map: assert isinstance(r, graph.Constant) storage_map[r] = [r.data] for r in node.outputs: storage_map.setdefault(r, [None]) for r in fgraph.outputs: if isinstance(r, graph.Constant): storage_map.setdefault(r, [r.data]) # extract output storage if output_storage is None: output_storage = [storage_map[r] for r in fgraph.outputs] return input_storage, output_storage, storage_map def streamline(fgraph, thunks, order, post_thunk_old_storage=None, no_recycling=None, nice_errors=True): """ WRITEME Parameters ---------- fgraph thunks The list of program instructions. order The list of apply instances that gave rise to the thunks (same order as thunks). post_thunk_old_storage A list (corresponding to thunks, order) whose elements are lists of storage cells, that should be cleared after running thecorresponding thunk. A value of None disables this functionality. no_recycling Storage elements that cannot be 'recycled' by repeatedly executing the program. These storage elements are cleared before re-running. nice_errors Run in such a way that the double-traceback is printed. This costs a bit of performance in the inner python loop. """ if no_recycling is None: no_recycling = [] if len(thunks) != len(order): raise ValueError('Length of thunks and order must match', (len(thunks), len(order))) if post_thunk_old_storage: if len(thunks) != len(post_thunk_old_storage): raise ValueError( 'Length of thunks and post_thunk_old_storage must match', (len(thunks), len(post_thunk_old_storage))) def streamline_default_f(): for x in no_recycling: x[0] = None try: for thunk, node, old_storage in izip(thunks, order, post_thunk_old_storage): thunk() for old_s in old_storage: old_s[0] = None except Exception: raise_with_op(node, thunk) f = streamline_default_f elif nice_errors: def streamline_nice_errors_f(): for x in no_recycling: x[0] = None try: for thunk, node in izip(thunks, order): thunk() except Exception: raise_with_op(node, thunk) f = streamline_nice_errors_f else: # don't worry about raise_with_op, just go a little faster. # there is a mix of python and c thunks def streamline_fast_f(): for x in no_recycling: x[0] = None for thunk in thunks: thunk() f = streamline_fast_f return f class LocalLinker(Linker): """ Useful base class for L{Linker}s which keep all nodes in the graph, and run a thunk associated with each node. """ def make_thunk(self, input_storage=None, output_storage=None, storage_map=None): return self.make_all(input_storage=input_storage, output_storage=output_storage, storage_map=storage_map)[:3] def make_all(self, input_storage, output_storage): # By convention, subclasses of LocalLinker should implement this function! # # This function should return a tuple of 5 things # 1. function to run the program # 2. input storage # 3. output storage # 4. thunks: list of nodes' functions in the order they will be run by the function in (1) # 5. order: list of nodes, in the order they will be run by the function in (1) raise utils.MethodNotDefined("make_all", type(self), self.__class__.__name__) def gc_helper(node_list): """ Return the set of Variable instances which are computed by node_list. Parameters ---------- node_list List of Apply instances in program execution order. Returns ------- 2-tuple FIRST, the set of Variable instances which are computed by node_list, and SECOND a dictionary that maps each Variable instance to a the last node to use Variable as an input. Extended Summary ---------------- This is used to allow garbage collection within graphs. It ignores view_map and destroy_map. This isn't needed as python have reference count. In Theano gc, we should not take into account view_map and destroy_map as if the thunk decided to create a new output, we would delay uselessly its gc by Python. """ # for freeing memory last_user = {} computed = set() for node in node_list: for input in node.inputs: last_user[input] = node for output in node.outputs: computed.add(output) return computed, last_user class PerformLinker(LocalLinker): """ Basic L{Linker} subclass that calls the perform method on each L{Op} in the L{FunctionGraph} in the order given by L{Linker.schedule}. """ def __init__(self, allow_gc=None, schedule=None): if allow_gc is None: allow_gc = theano.config.allow_gc self.fgraph = None self.allow_gc = allow_gc if schedule: self.schedule = schedule def accept(self, fgraph, no_recycling=None, profile=None): """ Parameters ---------- fgraph A PerformLinker can have accepted one FunctionGraph instance at a time. no_recycling WRITEME Returns ------- object self (TODO: WHY? Who calls this function?) """ if no_recycling is None: no_recycling = [] if self.fgraph is not None and self.fgraph is not fgraph: return type(self)(allow_gc=self.allow_gc).accept( fgraph, no_recycling, profile) # raise Exception("Cannot accept from a Linker that is already tied to another FunctionGraph.") self.fgraph = fgraph self.no_recycling = no_recycling return self def make_all(self, input_storage=None, output_storage=None, storage_map=None): """ Returns Function to run all nodes, list of input containers, list of outputs Parameters ---------- input_storage list of storages corresponding to fgraph.inputs output_storage list of storages corresponding to fgraph.outputs Returns ------- object Function to run all nodes, list of input containers, list of output containers, list of thunks (for all programs), list of nodes (for all programs). """ fgraph = self.fgraph order = self.schedule(fgraph) no_recycling = self.no_recycling input_storage, output_storage, storage_map = map_storage(fgraph, order, input_storage, output_storage, storage_map) compute_map = {} for k in storage_map: compute_map[k] = [k.owner is None] thunks = [] for node in order: # Maker sure we don't use C version of the code, but rather only # the python version # Note : ops that implement their own make thunk don't usually # have this attribute defiend !! thunks += [node.op.make_thunk(node, storage_map, compute_map, no_recycling, 'py')] thunks[-1].inputs = [storage_map[v] for v in node.inputs] thunks[-1].outputs = [storage_map[v] for v in node.outputs] computed, last_user = gc_helper(order) if self.allow_gc: post_thunk_old_storage = [] else: post_thunk_old_storage = None for node in order: if self.allow_gc: post_thunk_old_storage.append( [storage_map[input] for input in node.inputs if (input in computed) and ( input not in fgraph.outputs) and ( node == last_user[input])]) if no_recycling is True: # True seems like some special code for *everything*?? -JB # FunctionMaker always passes a list I think -JB no_recycling = list(storage_map.values()) no_recycling = utils.difference(no_recycling, input_storage) else: no_recycling = [storage_map[r] for r in no_recycling if r not in fgraph.inputs] # The function that actually runs your program is one of the f's in streamline. f = streamline(fgraph, thunks, order, post_thunk_old_storage, no_recycling=no_recycling) f.allow_gc = self.allow_gc # HACK: this is a way of passing an arg to Function.__call__ add_clear_storage(f, computed, storage_map) f.storage_map = storage_map return (f, [Container(input, storage) for input, storage in izip(fgraph.inputs, input_storage)], [Container(output, storage, True) for output, storage in izip(fgraph.outputs, output_storage)], thunks, order) def add_clear_storage(f, computed, storage_map): def clear_storage(): for c in computed: storage_map[c][0] = None f.clear_storage = clear_storage class WrapLinker(Linker): """ This class makes it easier to run several L{LocalLinker}s in parallel, and offers some control over how each thunk is run. A wrapper function must be provided, and it can be used to execute the thunks, inspect the nodes, print stuff out, etc. The constructor initializes a WrapLinker. Parameters ---------- linkers : list of L{LocalLinker} subclasses, whose make_all() method returns thunks in the same order. For each node in the graph, each linker will provide a thunk. This class makes it possible to iterate over each linker's program in parallel. wrapper : lambda (i, i_node, i_thunk1, i_thunk2, ...) : None Does some user-defined action for the i'th element of the program. i_thunk<n> is the thunk returned by the n'th linker. (If you want to run the program, make sure to call the necessary thunks in this function.) Notes ----- The outputs of the first linker will be returned. This linker ensures that each linker has its own storage for inputs and outputs and intermediate variables. There is no interference between linkers. """ def __init__(self, linkers, wrapper): self.fgraph = None self.linkers = linkers self.wrapper = wrapper def __copy__(self): """ Shallow copy of a WrapLinker. Returns ------- object A copy of self, where each of the linkers in self.linkers have been shallow-copied. It is useful because in FunctionMaker, copy.copy is called on the Mode's linker, so that it is not modified inplace when linker.accept() is called. In this case, we want the wrapped linkers to be copied too. """ other = self.__class__( linkers=[copy(l) for l in self.linkers], wrapper=self.wrapper) return other def clone(self, allow_gc=undef): return self.__class__( linkers=[l.clone(allow_gc=allow_gc) for l in self.linkers], wrapper=self.wrapper) def accept(self, fgraph, no_recycling=None, profile=None): """ Parameters ---------- fgraph : gof.FunctionGraph The fgraph which we will link. no_recycling : a list of Variables that belong to fgraph. If a Variable is in no_recycling, L{WrapLinker} will clear the output storage associated to it (for each linker in linkers) during the computation to avoid reusing it. """ if no_recycling is None: no_recycling = [] if self.fgraph is not None and self.fgraph is not fgraph: return type(self)(self.linkers, self.wrapper).accept(fgraph, no_recycling) self.fgraph = fgraph self.no_recycling = no_recycling self.linkers = [linker.accept(fgraph, no_recycling) for linker in self.linkers] return self def pre(self, f, inputs, order, thunk_groups): pass def make_thunk(self, **kwargs): no_recycling = self.no_recycling make_all = [self.linkers[0].make_all(**kwargs)] kwargs.pop('input_storage', None) make_all += [l.make_all(**kwargs) for l in self.linkers[1:]] fns, input_lists, output_lists, thunk_lists, order_lists \ = zip(*make_all) order_list0 = order_lists[0] for order_list in order_lists[1:]: if not order_list0 == order_list: raise Exception( "All linkers to WrapLinker should execute operations in the same order.") inputs0 = input_lists[0] outputs0 = output_lists[0] thunk_groups = list(zip(*thunk_lists)) order = [x[0] for x in zip(*order_lists)] to_reset = [] for thunks, node in izip(thunk_groups, order): for j, output in enumerate(node.outputs): if output in no_recycling: for thunk in thunks: to_reset.append(thunk.outputs[j]) wrapper = self.wrapper pre = self.pre def f(): for inputs in input_lists[1:]: for input1, input2 in izip(inputs0, inputs): input2.storage[0] = copy(input1.storage[0]) for x in to_reset: x[0] = None pre(self, [input.data for input in input_lists[0]], order, thunk_groups) for i, (thunks, node) in enumerate(izip(thunk_groups, order)): try: wrapper(i, node, *thunks) except Exception: raise_with_op(node, *thunks) f.thunk_groups = thunk_groups return f, inputs0, outputs0 def WrapLinkerMany(linkers, wrappers): """ Variant on WrapLinker that runs a series of wrapper functions instead of just one. """ def wrapper(*args): for f in wrappers: f(*args) return WrapLinker(linkers, wrapper)
Theano/Theano
theano/gof/link.py
link.py
py
38,073
python
en
code
9,807
github-code
36
70308798185
import wx class SlideshowFrame(wx.Frame): def __init__(self,**kwargs): wx.Frame.__init__(self, **kwargs) self.SetBackgroundColour(wx.BLACK) self.panel = wx.Panel(self, pos=self.Rect.GetPosition(), size=self.Rect.GetSize()) self.empty_img = wx.EmptyImage(self.Rect.GetWidth(), self.Rect.GetHeight()) self.imageCtrl = wx.StaticBitmap(self.panel, wx.ID_ANY, wx.BitmapFromImage(self.empty_img)) #self.verSizer = wx.BoxSizer(wx.VERTICAL) #self.horSizer = wx.BoxSizer(wx.HORIZONTAL) #self.mainSizer.Add(self.imageCtrl, 0, wx.ALL|wx.ALIGN_CENTER, 0) #self.panel.SetSizer(self.mainSizer) #self.mainSizer.Fit(self) #self.panel.Layout() def load_img(self, img_path): if img_path is None: img = self.empty_img else: img = wx.Image(img_path, wx.BITMAP_TYPE_ANY) # # scale the image, preserving the aspect ratio # w = img.GetWidth() h = img.GetHeight() W = self.Rect.GetWidth() H = self.Rect.GetHeight() # scale w to match W, and see if height is over/under H. If so, scale # h to match H instead. w2, h2 = W, h*(float(W)/w) if h2 > H: w2, h2 = w*(float(H)/h), H img = img.Scale(w2,h2,quality=wx.IMAGE_QUALITY_HIGH) self.imageCtrl.SetBitmap(wx.BitmapFromImage(img)) #self.panel.Layout() O = self.Rect.GetPosition() # frame origin X,Y = (O[0] + (W-w2)/2, O[1] + (H-h2)/2) self.panel.SetRect((X,Y,w2,h2)) #self.mainSizer.Fit(self) #self.panel.Layout() self.panel.Refresh()
jamestunnell/auto-slideshow
slideshow_frame.py
slideshow_frame.py
py
1,851
python
en
code
1
github-code
36
21114082657
from src import app from flask import jsonify, request import requests import json import os slackToken = os.environ['SLACK_TOKEN'] botAccessToken = os.environ['BOT_ACCESS_TOKEN'] hasuraDataUrl = "http://data.hasura/v1/query" chatUrl = "https://slack.com/api/chat.postMessage" ##################### APIs ###################### @app.route('/', methods=['GET']) def test(): return "Slackbot is running." @app.route('/echo', methods=['POST']) def event(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] id = storeMsgToDB(receivedMessage) sendConfirmation(id, receivedMessage, data["response_url"]) return "Waiting for confirmation" else: return "Invalid Token" @app.route('/repo', methods=['POST']) def repos(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getRepo(receivedMessage) else: return "Invalid Token" @app.route('/issue', methods=['POST']) def issues(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getIssue(receivedMessage) else: return "Invalid Token" @app.route('/branch', methods=['POST']) def branches(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getBranch(receivedMessage) else: return "Invalid Token" @app.route('/helpme', methods=['POST']) def helps(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getHelp(receivedMessage) else: return "Invalid Token" @app.route('/member', methods=['POST']) def members(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getMember(receivedMessage) else: return "Invalid Token" @app.route('/tag', methods=['POST']) def tags(): data = request.form.to_dict() print(data) print("SlackToken: " + slackToken) receivedToken = data["token"] print("ReceivedToken: " + receivedToken) if (receivedToken==slackToken): receivedMessage= data["text"] return getTag(receivedMessage) else: return "Invalid Token" @app.route('/confirm', methods=['POST']) def confirm(): req = request.form.to_dict() data = json.loads(req["payload"]) print (data) receivedToken = data["token"] channel = data["channel"]["id"] if (receivedToken == slackToken): if (data["actions"][0]["value"] == "yes"): message = fetchFromDBAndSend(data["callback_id"], channel) return ("Message Sent: " + str(message)) else: return "Ok. Not sending. :confused:" ##################### Utility functions ###################### def getRepo(text): strtext = "" slashparts = text.split('/') if text == "" or len(slashparts)<=1 or slashparts[1] == "": strtext = "Please enter the deatils in proper order" return strtexts url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] req = requests.get(url) resp = req.json() finalstr = "" if 'message' not in resp: resplist = [resp['language'],str(resp['forks']),str(resp['open_issues']),resp['html_url']] strlist = ["Majority of the repo is written in ","No of Forks made ","No of open issues for this repo is ","Check here: "] for i in range(0,4): strlist[i] = strlist[i] + resplist[i] for j in range(0,3): finalstr = finalstr + strlist[j] + '\n' finalstr = finalstr + strlist[3] return finalstr else: finalstr = "We could not find the result" + '\n' + "Make sure you entered the correct details :confused:" return finalstr def getIssue(text): strtext = "" slashparts = text.split('/') if text == "" or len(slashparts)<=2 or slashparts[2] == "": strtext = "Please enter the deatils in proper order" return strtext url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] + '/issues/' + slashparts[2] r = requests.get(url) resp = r.json() finalstr = "" if 'message' not in resp: resplist = [resp['title'],resp['user']['login'],resp['state'],resp['html_url']] strlist = ["Issue title: ","Issue was opened by ","The issue is ","Check here: "] for i in range(0,4): strlist[i] = strlist[i] + resplist[i] for j in range(0,3): finalstr = finalstr + strlist[j] + '\n' finalstr = finalstr + strlist[3] return finalstr else: finalstr = "We could not find the result" + '\n' + "Make sure that the particular issue exists :confused:" return finalstr def getHelp(text): str1 = ":robot_face: Bot works on the following Slash commands: \n" sl_str = ["/repo <org_name>/<repo_name> \n","/issue <org_name>/<repo_name>/<issue_no> \n","/branch <org_name>/<repo_name>/<branch_name> \n","/member <org_name> \n","/tag <org_name>/<repo_name>"] for i in range(0,5): str1 = str1 + sl_str[i] return str1 def getBranch(text): strtext = "" slashparts = text.split('/') if text == "" or len(slashparts)<=2 or slashparts[2] == "": strtext = "Please enter the deatils in proper order" return strtext url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] + '/branches/' + slashparts[2] r = requests.get(url) resp = r.json() finalstr = "" if 'message' not in resp: resplist = [resp['commit']['author']['login'],resp['commit']['commit']['message'],resp['commit']['html_url']] strlist = ["Author of this branch: ","Message: ","Check here: "] for i in range(0,3): strlist[i] = strlist[i] + resplist[i] for j in range(0,2): finalstr = finalstr + strlist[j] + '\n' finalstr = finalstr + strlist[2] return finalstr else: finalstr = "We could not find the result" + '\n' + "Are u sure about the typo :confused:??" return finalstr def getMember(text): strtext = "" if text == "": strtext = "Please enter the deatils in proper order" return strtext url = 'https://api.github.com/orgs/'+text+'/public_members' r = requests.get(url) resp = r.json() finalstr = "" fstr = "" if 'message' not in resp: i = len(resp) for j in range(0,i): fstr = fstr + resp[j]['login'] + " " finalstr = "Your organisation has " + fstr + "as their public members" return finalstr else: finalstr = "We could not find the result" + '\n' + "Make sure that the particular organisation exists :confused:" return finalstr def getTag(text): strtext = "" slashparts = text.split('/') if text == "" or len(slashparts)<=1 or slashparts[1] == "": strtext = "Please enter the deatils in proper order" return strtexts url = 'https://api.github.com/repos/'+ slashparts[0] + '/' + slashparts[1] +'/tags' req = requests.get(url) resp = req.json() finalstr = "" if 'message' not in resp: i = len(resp) if i != 0: finalstr = "The most recent release present for this repo is " + resp[0]['name'] else: finalstr = "No tags are present in this repo :disappointed:" return finalstr else: finalstr = "We could not find the result" + '\n' + "Make sure you entered the correct details :confused:" return finalstr def sendConfirmation(id, message, responseUrl): payload = { "text": "Are you sure you want to send a message?", "attachments": [ { "text": '"'+message+'"', "fallback": "You are indecisive", "callback_id": id, "color": "#3AA3E3", "attachment_type": "default", "actions": [ { "name": "yes", "text": "Yep", "type": "button", "value": "yes" }, { "name": "no", "text": "Nope", "type": "button", "value": "no" } ] } ] } headers = { 'content-type': "application/json", } response = requests.request("POST", responseUrl, data=json.dumps(payload), headers=headers) print(response.text) def storeMsgToDB(text): """ This function stores 'text' in the database, and takes note of the auto-generated unique id for the message. The table it stores it in is: +-------------------------+----------------+ | id (auto-increment int) | message (text) | +-------------------------+----------------+ Instead of contacting the postgres database directly this function uses the Hasura Data APIs. Try out the data APIs by running this from your terminal: $ hasura api-console Use the query builder and the API explorer to try out the data APIs. """ requestPayload = { "type": "insert", "args": { "table": "slack_messages", "objects": [ { "message": text, } ], "returning": [ "id" ] } } # Setting headers headers = { "Content-Type": "application/json", "X-Hasura-User-Id": "1", "X-Hasura-Role": "admin" } # Make the query and store response in resp resp = requests.request("POST", hasuraDataUrl, data=json.dumps(requestPayload), headers=headers) respObj = resp.json() print(respObj) id = respObj["returning"][0]["id"] return id def fetchFromDBAndSend(id, channel): """ This function fetches the stored message from the database. The table it fetches from is: +-------------------------+----------------+ | id (auto-increment int) | message (text) | +-------------------------+----------------+ Instead of contacting the postgres database directly this function uses the Hasura Data APIs. Try out the data APIs by running this from your terminal: $ hasura api-console Use the query builder and the API explorer to try out the data APIs. """ requestPayload = { "type": "select", "args": { "table": "slack_messages", "columns": [ "message", ], "where": { "id": { "$eq": id } } } } # Setting headers headers = { "Content-Type": "application/json", "X-Hasura-User-Id": "1", "X-Hasura-Role": "admin" } # Make the query and store response in resp resp = requests.request("POST", hasuraDataUrl, data=json.dumps(requestPayload), headers=headers) respObj = resp.json() print(respObj) message = respObj[0]["message"] return sendSlackMessage(message, channel) def sendSlackMessage(message, channel): payload = { "token": botAccessToken, "text": message, "channel": channel } headers = { 'content-type': "application/json", 'Authorization': 'Bearer '+botAccessToken } response = requests.request("POST", chatUrl, data=json.dumps(payload), headers=headers) print(response.json()) return message
Satyabrat35/SlackGitBot
microservices/bot/app/src/server.py
server.py
py
12,668
python
en
code
2
github-code
36
29654762562
import re from io import StringIO from flask import Flask, request, Response, redirect import pandas as pd app = Flask(__name__) def is_valid_query(q): ''' A query is valid if it is strictly consisted of the following three entries: [(A-Z, a-z, 0-9)+ or *] [==, !=, $=, &=] ["..."] Queries can be concat with 'and' & 'or'. The 'and' 'or' operators are executed in sequential order. Entries and operators must be separated by at least one single space. i.e. {C1=="a"} is not acceptable. Additional white spaces are allowed between entries. For the "" inside the query, the query term is the content wrapped by the first and last occurance of "". In the processing of query term, any sequence of consecutive spaces is reduced to a single space for clarity. (since consecutive spaces usually do not convey any semantic meanings in phrases) Output: a message indicating the validity of query ("valid" or error message), a list of valid queries (each query is represented by a 3-element list), a list of and/or operators ''' entries = q.split() valid_q = [] # a 3-element list consist of 3 valid entries queries = [] # list of valid_q operators = [] # operators between queries operand = ['==', '!=', '$=', '&='] # valid operand # check the valid status of three entries defined above valid_first = False valid_second = False valid_third = False i = 0 while(i < len(entries)): if not valid_first: column = re.findall('[A-Za-z0-9]+|\*', entries[i]) # if valid, must be exactly one match # i.e. "abc*123" will give three matches and is invalid if len(column) != 1 or column[0] != entries[i]: return "Invalid column name", queries, operators else: valid_q.append(entries[i]) valid_first = True elif not valid_second: if entries[i] not in operand: return "Invalid operator, must be ==, !=, $=, &=", queries, operators else: # store as int if only numbers valid_q.append(entries[i]) valid_second = True elif not valid_third: if entries[i][0] != '\"': return "Invalid query term, must begin with \"", queries, operators else: # traverse the list to find the last " before the next query term = "" # find the string before next query if entries[i:].count('and') > 0: end = entries[i:].index('and') + i term = " ".join(entries[i:end]) elif entries[i:].count('or') > 0: end = entries[i:].index('or') + i term = " ".join(entries[i:end]) else: end = len(entries) term = " ".join(entries[i:]) # test the validity of term if term[-1] != '\"': return "Invalid query term, must end with \"", queries, operators else: i = end valid_q.append(term[1:-1]) # remove the front and end "" when storing valid_third = True continue else: if i == len(queries) - 1: return "Extra term after queries", queries, operators if entries[i] == 'and' or entries[i] == 'or': queries.append(valid_q) operators.append(entries[i]) valid_q = [] valid_first = valid_second = valid_third = False else: return "Invalid and/or operand between queries", queries, operators i += 1 # append the last valid query and check incomplete query if valid_first and valid_second and valid_third: queries.append(valid_q) else: return "Missing entries in queries", queries, operators return "valid", queries, operators def match_query(queries, operators, df): ''' This function matches the queries associated with the operators to df. Output: a message indicating the validity of query matching ('valid' or error message) matched rows in df ''' columns = df.columns.tolist() res_df = pd.DataFrame(columns = columns) # empty df to append matching rows for i,q in enumerate(queries): # if this is the first query or the operator connecting pervious query is 'or', check the entire df if i - 1 < 0 or operators[i - 1] == 'or': cur_df = df.astype(str) # convert the content of df to string for comparison elif operators[i - 1] == 'and': cur_df = res_df # select rows from df if q[0] == "*": select_df = pd.DataFrame(columns = columns) # empty df to append matching rows for (col, _) in cur_df.iteritems(): if q[1] == "==": select_df = select_df.append(cur_df[cur_df[col] == q[2]]) elif q[1] == "!=": drop_df = cur_df[cur_df[col] == q[2]] select_df = select_df.append(cur_df.drop(index=drop_df.index.tolist())) elif q[1] == "$=": select_df = select_df.append(cur_df[cur_df[col].str.lower().isin([q[2].lower()])]) elif q[1] == "&=": select_df = select_df.append(cur_df[cur_df[col].str.contains(q[2], case=True)]) cur_df = select_df.drop_duplicates(keep='first') else: if q[0] not in columns: return 'No corresponding column name in data', res_df elif q[0] not in cur_df.columns: cur_df = pd.DataFrame(columns = columns) # no matching column, set the cur_df to empty else: if q[1] == "==": cur_df = cur_df[cur_df[q[0]] == q[2]] elif q[1] == "!=": drop_df = cur_df[cur_df[q[0]] == q[2]] cur_df = cur_df.drop(index=drop_df.index.tolist()) elif q[1] == "$=": cur_df = cur_df[cur_df[q[0]].str.lower().isin([q[2].lower()])] elif q[1] == "&=": cur_df = cur_df[cur_df[q[0]].str.contains(q[2], case=True)] # update res_df according to 'and' 'or' operators if i - 1 < 0 or operators[i - 1] == 'or': res_df = res_df.append(cur_df) res_df.drop_duplicates(keep='first',inplace=True) elif operators[i - 1] == 'and': res_df = cur_df if res_df.empty: return 'No corresponding items for the query', res_df return 'valid', res_df @app.route('/') def get_info(): args = request.args query = args['query'] # '&' will separate the query to two items, append it back for key, val in args.items(): if key == 'query': continue if key == '': query += '&=' + val else: query += '&' + key print(query) # Query error checking and parsing mes, queries, operators = is_valid_query(query) if mes != "valid": return mes print(queries) print(operators) # Query match df = pd.read_csv('data.csv') mes, res_df = match_query(queries, operators, df) if mes != "valid": return mes res_df.to_csv('res.csv') return ''' <html><body> The query has been successfully processed. To download the extracted results in a csv file, <a href="/getCSV">click me.</a> </body></html> ''' @app.route("/getCSV") def getCSV(): output = StringIO() df = pd.read_csv('res.csv') df.to_csv(output) return Response( output.getvalue(), mimetype="text/csv", headers={"Content-disposition":"attachment; filename=res.csv"}) if __name__ == '__main__': app.run(host='127.0.0.1', port=9527)
CandiceD17/Http-Server-Query-Retrieval
my_server.py
my_server.py
py
8,078
python
en
code
0
github-code
36
28891405121
import collections import difflib import logging import os import re from pytype.platform_utils import path_utils from pytype.tools.merge_pyi import merge_pyi import unittest __all__ = ('TestBuilder', 'load_tests') PY, PYI, EXPECTED = 'py', 'pyi', 'pep484.py' OVERWRITE_EXPECTED = 0 # flip to regenerate expected files def load_tests(unused_loader, standard_tests, unused_pattern): root = path_utils.join(path_utils.dirname(__file__), 'test_data') standard_tests.addTests(TestBuilder().build(root)) return standard_tests class TestBuilder: def build(self, data_dir): """Return a unittest.TestSuite with tests for the files in data_dir.""" suite = unittest.TestSuite() files_by_base = self._get_files_by_base(data_dir) for base, files_by_ext in sorted(files_by_base.items()): if not (PY in files_by_ext and PYI in files_by_ext): continue if not OVERWRITE_EXPECTED and EXPECTED not in files_by_ext: continue py, pyi = (files_by_ext[x] for x in (PY, PYI)) outfile = path_utils.join(data_dir, base + '.' + EXPECTED) test = build_regression_test(py, pyi, outfile) suite.addTest(test) return suite def _get_files_by_base(self, data_dir): files = os.listdir(data_dir) file_pat = re.compile(r'(?P<filename>(?P<base>.+?)\.(?P<ext>.*))$') matches = [m for m in map(file_pat.match, files) if m] ret = collections.defaultdict(dict) for m in matches: base, ext, filename = m.group('base'), m.group('ext'), m.group('filename') ret[base][ext] = path_utils.join(data_dir, filename) return ret def build_regression_test(py, pyi, outfile): def regression_test(test_case): py_input, pyi_src = (_read_file(f) for f in (py, pyi)) try: output = merge_pyi.merge_sources(py=py_input, pyi=pyi_src) except merge_pyi.MergeError: pass if OVERWRITE_EXPECTED: with open(outfile, 'w') as f: f.write(output) else: expected = _read_file(outfile) test_case.assertEqual(expected, output, _get_diff(expected, output)) name = path_utils.splitext(path_utils.basename(outfile))[0].replace('.', '_') test = f'test_{name}' case = type('RegressionTest', (unittest.TestCase,), {test: regression_test}) return case(test) def _read_file(filename): with open(filename) as f: return f.read() def _get_diff(a, b): a, b = a.split('\n'), b.split('\n') diff = difflib.Differ().compare(a, b) return '\n'.join(diff) if __name__ == '__main__': logging.basicConfig(level=logging.CRITICAL) unittest.main()
google/pytype
pytype/tools/merge_pyi/merge_pyi_test.py
merge_pyi_test.py
py
2,585
python
en
code
4,405
github-code
36
39265812608
import pandas as pd import plotly.graph_objects as go import prepare_data population = { 'NSW':8089526, 'QLD':5095100, 'VIC':6594804, 'SA':1751693, 'WA':2621680, 'TAS':534281, 'ACT':426709, 'NT':245869, 'Total':25359662, 'DeathsNationally':25359662, } df_aus = prepare_data.australia() df_aus_change = prepare_data.australia_change(df_aus) # Let's plot this mofo fig = go.Figure() # Plot all the states! for state in list(df_aus): fig.add_trace(go.Scatter( x=df_aus.index, y=pd.to_numeric(df_aus[state]).divide(population[state])*100000, name=state, )) # Make the plot look fancy. fig.update_layout(title='Per Capita COVID-19 Cases by State/Territory in Austalia', xaxis_title='Date', yaxis_title='Cases per 100,000 people') fig.show() # Let's plot this mofo fig_change = go.Figure() # Plot all the states! for state in list(df_aus_change): fig_change.add_trace(go.Scatter( x=df_aus_change.index, y=pd.to_numeric(df_aus_change[state]).divide(population[state])*100000, name=state, )) # Make the plot look fancy. fig_change.update_layout(title='Per Capita Change in COVID-19 Cases by State/Territory in Austalia', xaxis_title='Date', yaxis_title='Change in cases per 100,000 people') fig_change.show() # Roll those numbers over a week df_aus_change = df_aus_change.rolling(7).mean() # Let's plot this mofo fig_rolling_change = go.Figure() # Plot all the states! for state in list(df_aus): fig_rolling_change.add_trace(go.Scatter( x=df_aus_change.index, y=pd.to_numeric(df_aus_change[state]).divide(population[state])*100000, name=state, )) # Make the plot look fancy. fig_rolling_change.update_layout( title='7-day Rolling Per Capita Change in COVID-19 Cases by State/Territory in Austalia', xaxis_title='Date', yaxis_title='Change in cases per 100,000 people' ) fig_rolling_change.show()
explodingdinosaurs/corona
aus_states_per_capita.py
aus_states_per_capita.py
py
2,043
python
en
code
1
github-code
36
27894940347
class Node(object): def __init__(self, key, val): self.val = val self.key = key self.next = None self.prev = None class List(object): def __init__(self): self.head = Node(None, None) self.tail = None def append(self, node): if self.tail is None: self.head.next = node node.prev = self.head self.tail = node else: self.tail.next = node node.prev = self.tail self.tail = node def push_to_front(self, node): if node is None: return if self.tail is node: return node.prev.next = node.next node.next.prev = node.prev node.prev = self.tail node.next = None self.tail.next = node self.tail = self.tail.next def evict(self): node = self.head.next self.head.next = self.head.next.next self.head.next.prev = self.head return node class LRUCache(object): def __init__(self, capacity): self.capacity = capacity self.size = 0 self.map = {} self.list = List() def get(self, key): if key not in self.map: return None node = self.map[key] self.list.push_to_front(node) if node: return node.val return None def set(self, key, value): if key in self.map: node = self.map[key] self.list.push_to_front(node) return if self.size == self.capacity: node = self.list.evict() del self.map[node.key] else: self.size += 1 node = Node(key, value) self.list.append(node) self.map[key] = node if __name__ == "__main__": cache = LRUCache(3) cache.set(1, "hello") cache.set(2, "world") cache.set(3, "!!!") cache.set(4, "!!!") print(cache.get(1)) print(cache.get(2)) print(cache.get(3)) print(cache.get(4)) print(cache.get(2)) cache.set(5, "match") cache.set(6, "all") print(cache.get(5)) print(cache.get(6)) print(cache.get(2)) print(cache.get(4)) print(cache.get(3))
stgleb/algorithms-and-datastructures
hashmaps/lru_cache.py
lru_cache.py
py
2,224
python
en
code
0
github-code
36
12032981505
import itertools import numpy as np import networkx as nx from sklearn.neighbors import kneighbors_graph from sklearn.metrics.pairwise import euclidean_distances from scipy.sparse.csgraph import minimum_spanning_tree from ggc.utils import * def knn_graph(X, k): """Returns k-Nearest Neighbor (MkNN) graph from the feature matrix. Parameters ---------- X : ndarray, shape (N, F) N samples and F-dimensional features. k : int, k >= 1 Parameter for knn: the k-th nearest neighbour. Returns ------- adj : ndarray, shape (N, N) The adjacency matrix of the constructed knn graph. """ assert k < X.shape[0] adj_directed = kneighbors_graph(X=X, n_neighbors=k, p=2, include_self=False, ).toarray() adj = adj_directed + adj_directed.T adj[adj > 0] = 1 np.fill_diagonal(adj,0) return adj def mknn_graph(X, k): """Returns Mutual k-Nearest Neighbor (MkNN) graph from the feature matrix. Parameters ---------- X : ndarray, shape (N, F) N samples and F-dimensional features. k : int, k >= 1 Parameter for mknn: the k-th nearest neighbour. Returns ------- adj : ndarray, shape (N, N) The adjacency matrix of the constructed mknn graph. """ assert k < X.shape[0] adj_directed = kneighbors_graph(X=X, n_neighbors=k, p=2, include_self=False, ).toarray() adj = adj_directed + adj_directed.T adj[adj < 2] = 0 adj[adj >= 2] = 1 np.fill_diagonal(adj,0) return adj def cknn_graph(X, delta, k): """Returns Continuous k-Nearest Neighbor (CkNN) graph from the feature matrix. Parameters ---------- X : ndarray, shape (N, F) N samples and F-dimensional features. delta : float, delta > 0 Parameter for cknn. k : int, k >= 1 Parameter for cknn: the k-th nearest neighbour. Returns ------- adj : ndarray, shape (N, N) The adjacency matrix of the constructed cknn graph. References ---------- .. [1] Tyrus Berry, Timothy Sauer. Consistent manifold representation for topological data analysis. Foundations of Data Science, 2019, 1 (1) : 1-38. doi: 10.3934/fods.2019001 """ assert k < X.shape[0] D = euclidean_distances(X, X) N = D.shape[0] np.fill_diagonal(D,0) D_k = np.sort(D) adj = np.zeros([N, N]) adj[np.square(D) < delta * delta * np.dot(D_k[:,k].reshape(-1,1),D_k[:,k].reshape(1,-1))] = 1 np.fill_diagonal(adj,0) return adj def mst_graph(X): """Returns Minimum Spanning Tree (MST) graph from the feature matrix. Parameters ---------- X : ndarray, shape (N, F) N samples and F-dimensional features. Returns ------- adj : ndarray, shape (N, N) The adjacency matrix of the constructed mst graph. """ D = euclidean_distances(X, X) adj_directed = minimum_spanning_tree(D).toarray() adj = adj_directed + adj_directed.T adj[adj > 0] = 1 np.fill_diagonal(adj,0) return adj def rmst_graph(X, gamma, k): """Returns Relaxed Minimum Spanning Tree (RMST) graph from the feature matrix. Parameters ---------- X : ndarray, shape (N, F) N samples and F-dimensional features. gamma : float, gamma > 0 Parameter for rmst. k : int, k >= 1 Parameter for rmst: the k-th nearest neighbour. Returns ------- adj : ndarray, shape (N, N) The adjacency matrix of the constructed rmst graph. References ---------- .. [1] Beguerisse-Díaz, Mariano, Borislav Vangelov, and Mauricio Barahona. "Finding role communities in directed networks using role-based similarity, markov stability and the relaxed minimum spanning tree." 2013 IEEE Global Conference on Signal and Information Processing. IEEE, 2013. """ D = euclidean_distances(X, X) N = D.shape[0] assert k < N np.fill_diagonal(D,0) adj = np.zeros([N, N]) D_k = np.sort(D) D_k = np.tile(D_k[:,k],(N,1)) D_k = gamma * (D_k + D_k.T) np.fill_diagonal(D_k,0) max_weight = np.zeros((N,N)) G = nx.Graph(D) T = nx.minimum_spanning_tree(G) path = dict(nx.all_pairs_dijkstra_path(T)) for i,j in itertools.combinations(range(N),2): p = path[i][j] path_weight = np.zeros(len(p)-1) for k in range(len(p)-1): path_weight[k] = T.edges[p[k],p[k+1]]['weight'] max_weight[i][j] = np.amax(path_weight) max_weight = max_weight + max_weight.T np.fill_diagonal(max_weight,0) adj[D < (max_weight + D_k)] = 1 np.fill_diagonal(adj,0) return adj
haczqyf/ggc
ggc/graphs.py
graphs.py
py
4,902
python
en
code
6
github-code
36
19665493360
import re def get_puzzle_input(file: str) -> list[str]: return [line.strip() for line in open(f"{file}.txt", "r").readlines()] def create_list(r1: int, r2: int) -> list: return list(range(r1, r2 + 1)) def parse_input(sections: list) -> list: ranges = [] for section in sections: input = re.split(",|-", section) elf_1 = create_list(int(input[0]), int(input[1])) elf_2 = create_list(int(input[2]), int(input[3])) ranges.append([elf_1, elf_2]) return ranges def sort_sections(elves: list) -> list: sorted_sections = [] for elf in elves: pass if len(elf[0]) >= len(elf[1]): outer, inner = elf[0], elf[1] else: outer, inner = elf[1], elf[0] sorted_sections.append([outer, inner]) return sorted_sections def solution(sections: list) -> int: dubble_work = 0 overlapping = 0 for outer, inner in sections: all_in = [] for section in inner: if section in outer: all_in.append(True) else: all_in.append(False) if all(all_in): dubble_work += 1 if any(all_in): overlapping += 1 return dubble_work, overlapping sections = get_puzzle_input("input") ranges = parse_input(sections) sorted_sections = sort_sections(ranges) solutions = solution(sorted_sections) print(solutions)
jonnaliesel/Aoc2022
4/solution.py
solution.py
py
1,422
python
en
code
0
github-code
36
34993839592
# Thư viện import pygame, sys import numpy as np import time # Khởi tạo game pygame.init() # --------- # CÁC HẰNG SỐ # --------- WIDTH = 600 HEIGHT = WIDTH LINE_WIDTH = 15 WIN_LINE_WIDTH = 8 BOARD_ROWS = 5 BOARD_COLS = BOARD_ROWS SQUARE_SIZE = WIDTH/BOARD_ROWS CIRCLE_RADIUS = SQUARE_SIZE/3 CIRCLE_WIDTH = 15 CROSS_WIDTH = 25 SPACE = SQUARE_SIZE/4 RED = (235, 47, 6) BG_COLOR = (72, 84, 96) LINE_COLOR = (23, 145, 135) CIRCLE_COLOR = (255, 211, 42) CROSS_COLOR = (186, 220, 88) WIN_COLOR = (66, 66, 66) # --------- # VARIABLES # --------- player = 1 game_over = False # ------ # SCREEN # ------ screen = pygame.display.set_mode( (WIDTH, HEIGHT) ) pygame.display.set_caption( 'Isolation' ) screen.fill( BG_COLOR ) # ------------- # CONSOLE BOARD # ------------- board = np.zeros( (BOARD_ROWS, BOARD_COLS) ) # --------- # FUNCTIONS # --------- def draw_lines(): for i in range(1,BOARD_ROWS): # horizontal pygame.draw.line( screen, LINE_COLOR, (0, SQUARE_SIZE*i), (WIDTH, SQUARE_SIZE*i), LINE_WIDTH ) for i in range(1,BOARD_COLS): # vertical pygame.draw.line( screen, LINE_COLOR, (i * SQUARE_SIZE, 0), (i * SQUARE_SIZE, HEIGHT), LINE_WIDTH ) def draw_figures(): for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if board[row][col] == 1: pygame.draw.circle( screen, CIRCLE_COLOR, (int( col * SQUARE_SIZE + SQUARE_SIZE//2 ), int( row * SQUARE_SIZE + SQUARE_SIZE//2 )), CIRCLE_RADIUS, CIRCLE_WIDTH ) elif board[row][col] == 2: pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH ) pygame.draw.line( screen, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH ) def mark_square(row, col, player): board[row][col] = player # print ("----------------------------------------------------") # print("Player " + str(player) + " marked square : (" + str(row) + "," + str(col) + ")") # print(board) # print ("----------------------------------------------------") def available_square(row, col): return board[row][col] == 0 def is_board_full(): for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if board[row][col] == 0: return False return True WIN_LENGTH = 4 def check_win(player): # Dọc for col in range(BOARD_COLS): for row in range(BOARD_ROWS - (WIN_LENGTH - 1)): if all(board[row+i][col] == player for i in range(WIN_LENGTH)): draw_vertical_winning_line(col, row, player) return True # Ngang for row in range(BOARD_ROWS): for col in range(BOARD_COLS - (WIN_LENGTH - 1)): if all(board[row][col+i] == player for i in range(WIN_LENGTH)): draw_horizontal_winning_line(col, row, player) return True # Chéo trái for row in range(BOARD_ROWS - (WIN_LENGTH - 1)): for col in range(BOARD_COLS - (WIN_LENGTH - 1)): if all(board[row+i][col+i] == player for i in range(WIN_LENGTH)): draw_asc_diagonal(col, row, player) return True # Chéo phải for row in range((WIN_LENGTH - 1),BOARD_ROWS): for col in range(BOARD_ROWS - (WIN_LENGTH - 1)): if all(board[row-i][col+i] == player for i in range(WIN_LENGTH)): draw_desc_diagonal(row, col, player) # print(row,col) return True return False # ========= # Hàm vẽ đường win # ========= def draw_vertical_winning_line(col, row, player): x = int(col * SQUARE_SIZE + SQUARE_SIZE / 2) y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2) - 48 y2 = int((row + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) + 48 pygame.draw.line(screen, WIN_COLOR, (x, y1), (x, y2), WIN_LINE_WIDTH) def draw_horizontal_winning_line(col, row, player): x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2) - 48 x2 = int((col + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) + 48 y = int(row * SQUARE_SIZE + SQUARE_SIZE / 2) pygame.draw.line(screen, WIN_COLOR, (x1, y), (x2, y), WIN_LINE_WIDTH) def draw_asc_diagonal(col, row, player): x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2) y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2) x2 = int((col + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) y2 = int((row + (WIN_LENGTH - 1)) * SQUARE_SIZE + SQUARE_SIZE / 2) pygame.draw.line(screen, WIN_COLOR, (x1, y1), (x2, y2), WIN_LINE_WIDTH) def draw_desc_diagonal(row,col, player): x1 = int(col * SQUARE_SIZE + SQUARE_SIZE / 2) y1 = int(row * SQUARE_SIZE + SQUARE_SIZE / 2) x2 = int((col+WIN_LENGTH-1)* SQUARE_SIZE + SQUARE_SIZE / 2) y2 = int((row-WIN_LENGTH+1)* SQUARE_SIZE + SQUARE_SIZE / 2) pygame.draw.line(screen, WIN_COLOR, (x1,y1), (x2, y2), WIN_LINE_WIDTH) def restart(): screen.fill( BG_COLOR ) draw_lines() for row in range(BOARD_ROWS): for col in range(BOARD_COLS): board[row][col] = 0 def checkWinner(): # Ngang for row in range(BOARD_ROWS): for col in range(BOARD_COLS - 3): if board[row][col] == board[row][col+1] == board[row][col+2] == board[row][col+3] != 0: return board[row][col] # Dọc for row in range(BOARD_ROWS - 3): for col in range(BOARD_COLS): if board[row][col] == board[row+1][col] == board[row+2][col] == board[row+3][col] != 0: return board[row][col] # Chéo xuống for row in range(BOARD_ROWS - 3): for col in range(BOARD_COLS - 3): if board[row][col] == board[row+1][col+1] == board[row+2][col+2] == board[row+3][col+3] != 0: return board[row][col] # Chéo lên for row in range(3, BOARD_ROWS): for col in range(BOARD_COLS - 3): if board[row][col] == board[row-1][col+1] == board[row-2][col+2] == board[row-3][col+3] != 0: return board[row][col] # Đường chéo từ phải xuống trái for row in range(BOARD_ROWS - 3): for col in range(BOARD_COLS - 3): if board[row][col+3] == board[row+1][col+2] == board[row+2][col+1] == board[row+3][col] != 0: return board[row][col+3] # Hòa for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if board[row][col] == 0: return None return 0 def numberplay(): n = 0 for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if(board[row][col] == 2): n = n + 1 if((n//2)%2==0): return(n//2) else: return ((n//2)-1) mytime = 0 def bestMove(): global mytime n = 3 start_time = time.time() bestScore = -100000 move = None empty_cells = [(row, col) for row in range(BOARD_ROWS) for col in range(BOARD_COLS) if board[row][col] == 0] if not empty_cells: return (-1, -1) for row, col in empty_cells: board[row][col] = 2 score = minimax(board, 0,n, -100000, 100000, False) board[row][col] = 0 if score > bestScore: bestScore = score move = (row, col) if move: mark_square(move[0], move[1], 2) draw_figures() end_time = time.time() elapsed_time = end_time - start_time mytime = mytime + elapsed_time print("Caro_5x5:time to make first move :%f"%(mytime)) return move scores = { 1: -10, 2: 10, 0: 0 } i = 0 def minimax(board, depth,depthmax, alpha, beta, isMaximizing): global i i = i+1 print(i) result = checkWinner() if result is not None: return scores[result] if isMaximizing: bestScore = -100000 for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if board[row][col] == 0: board[row][col] = 2 if(depth > depthmax): board[row][col] = 0 break score = minimax(board,depth+1,depthmax, alpha, beta, False) board[row][col] = 0 bestScore = max(score, bestScore) alpha = max(alpha, bestScore) if beta <= alpha: break return bestScore else: bestScore = 100000 for row in range(BOARD_ROWS): for col in range(BOARD_COLS): if board[row][col] == 0: board[row][col] = 1 if(depth > depthmax): board[row][col] = 0 break score = minimax(board, depth+1,depthmax, alpha, beta, True) board[row][col] = 0 bestScore = min(score, bestScore) beta = min(beta, bestScore) if beta <= alpha: break return bestScore draw_lines() # -------- # MAINLOOP # -------- while True: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.MOUSEBUTTONDOWN and not game_over: mouseX = event.pos[0] # x mouseY = event.pos[1] # y clicked_row = int(mouseY // SQUARE_SIZE) clicked_col = int(mouseX // SQUARE_SIZE) if available_square( clicked_row, clicked_col ): player = 1 mark_square( clicked_row, clicked_col, player ) draw_figures() if check_win( player ): font = pygame.font.SysFont(None, 100) text = font.render("You win", True, pygame.Color(RED)) text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2)) screen.blit(text, text_rect) game_over = True elif is_board_full(): font = pygame.font.SysFont(None, 100) text = font.render("Hòa", True, pygame.Color(RED)) text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2)) screen.blit(text, text_rect) game_over = True else: player = 2 draw_figures() pygame.display.update() bestMove() draw_figures() if check_win( player ): font = pygame.font.SysFont(None, 100) text = font.render("Máy win", True, pygame.Color(RED)) text_rect = text.get_rect(center=(WIDTH/2, HEIGHT/2)) screen.blit(text, text_rect) game_over = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_r: restart() player = 1 game_over = False draw_figures() pygame.display.update()
LeVan102/AI_Caro
Caro5x5.py
Caro5x5.py
py
11,268
python
en
code
0
github-code
36
42154458258
# 1,2,3 더하기 import sys input = sys.stdin.readline case = [] for i in range(int(input())): case.append(int(input())) maxNum = max(case) dp = [0]*(maxNum+1) dp[1] = 1 dp[2] = 2 dp[3] = 4 for i in range(4, maxNum+1): dp[i] = dp[i-3]+dp[i-2]+dp[i-1] for i in case: print(dp[i])
FeelingXD/algorithm
beakjoon/9095.py
9095.py
py
295
python
en
code
2
github-code
36
5663440887
import math def merge_sort(array, left_bound, right_bound): if left_bound < right_bound: middle_bound = math.floor((left_bound + right_bound)/2) merge_sort(array, left_bound, middle_bound) merge_sort(array, middle_bound + 1, right_bound) merge(array, left_bound, middle_bound, right_bound) def merge(array, left_bound, middle_bound, right_bound): temp_left = [] temp_right = [] temp_left.append(list(array[0][left_bound : middle_bound + 1])) temp_left.append(list(array[1][left_bound : middle_bound + 1])) temp_right.append(list(array[0][middle_bound + 1 : right_bound + 1])) temp_right.append(list(array[1][middle_bound + 1 : right_bound + 1])) i = 0 j = 0 for k in range(left_bound, right_bound): if (temp_left[1][i] <= temp_right[1][j]): array[0][k] = temp_left[0][i] array[1][k] = temp_left[1][i] i+=1 if (i == len(temp_left[0])): array[0][k + 1 : right_bound + 1] = temp_right[0][j:] array[1][k + 1 : right_bound + 1] = temp_right[1][j:] break else: array[0][k] = temp_right[0][j] array[1][k] = temp_right[1][j] j+=1 if (j == len(temp_right[0])): array[0][k + 1 : right_bound + 1] = temp_left[0][i:] array[1][k + 1 : right_bound + 1] = temp_left[1][i:] break def count_inv(array, left_bound, right_bound): inversions = 0 if left_bound < right_bound: middle_bound = math.floor((left_bound + right_bound)/2) inversions += count_inv(array, left_bound, middle_bound) inversions += count_inv(array, middle_bound + 1, right_bound) inversions += count_split_inv(array, left_bound, middle_bound, right_bound) return inversions def count_split_inv(array, left_bound, middle_bound, right_bound): temp_left = array[left_bound : middle_bound + 1] temp_right = array[middle_bound + 1 : right_bound + 1] i = 0 j = 0 split_inversions = 0 for k in range(left_bound, right_bound): if (temp_left[i] <= temp_right[j]): array[k] = temp_left[i] i+=1 if (i == len(temp_left)): array[k + 1 : right_bound + 1] = temp_right[j:] break else: array[k] = temp_right[j] split_inversions += len(temp_left) - i j+=1 if (j == len(temp_right)): array[k + 1 : right_bound + 1] = temp_left[i:] break return split_inversions def read_first_row(filename): data = [] with open(filename, "r") as file_object: data = file_object.readline().split(' ') data = list(map(int, data)) return data def read_comp_row(filename, user_index): user_data = [] row = 0 with open(filename, "r") as file_object: while (row != user_index): file_object.readline() row+=1 user_data = file_object.readline().split(' ') user_data = list(map(int, user_data)) user_data = user_data[1:] return user_data def read_row(filename, user_index, order_user_data): user_data = [] row = 0 with open(filename, "r") as file_object: while (row != user_index): file_object.readline() row+=1 user_data = file_object.readline().split(' ') user_data = list(map(int, user_data)) user_data = user_data[1:] temp_data = user_data[:] for i in range(len(user_data)): user_data[order_user_data[i]-1] = temp_data[i] return user_data def write_results(filename, user_index, inversions_array): output_file = open(filename, "w"); output_file.writelines([str(user_index), '\n']) for i in range(0, len(inversions_array[0])): output_file.writelines([str(inversions_array[0][i]), ' ', str(inversions_array[1][i]), '\n']) output_file.close()
Melkye/Labs
Algorithms/Lab_3_inversions/Lab_3_inversions/fun.py
fun.py
py
4,037
python
en
code
0
github-code
36
28148567000
import datetime import time from gpiozero import LED, Device from gpiozero.pins.pigpio import PiGPIOFactory Device.pin_factory = PiGPIOFactory() # NOTE: Change this to match the GPIO pin you're connecting the LED to led = LED(18) # NOTE: Change these values to set the time you want the light to turn on and off at weekday_on_time = datetime.time(hour=7, minute=0, second=0) weekday_off_time = datetime.time(hour=17, minute=0, second=0) weekend_on_time = datetime.time(hour=7, minute=30, second=0) weekend_off_time = datetime.time(hour=17, minute=0, second=0) while True: dayOfWeek = datetime.datetime.now().weekday() currentTime = datetime.datetime.now().time() on_time = weekday_on_time if dayOfWeek < 5 else weekend_on_time off_time = weekday_off_time if dayOfWeek < 5 else weekend_off_time if currentTime > on_time and currentTime < off_time: led.on() else: led.off() time.sleep(60)
szh/pi-timedlight
timedlight.py
timedlight.py
py
936
python
en
code
1
github-code
36
39139414792
""" This module contains testcase_32_ephemeral test """ from testcase import Testcase from os.path import basename class testcase_32_ephemeral(Testcase): """ It should be possible to use ephemeral device (if we have one) Note that in rhel6.5 there is no shift letter in dick device name """ stages = ['stage1'] tags = ['default'] after = ['testcase_21_disk_size_format'] def test(self, connection, params): """ Perform test """ prod = params['product'].upper() ver = params['version'].upper() ephemerals = [] if 'bmap' in params: ephemerals = [bdev for bdev in params['bmap'] if 'ephemeral_name' in bdev] if not ephemerals: # are there are some devices to check? self.log.append({'result': 'skip', 'comment': 'no ephemeral devices in block map' }) return self.log # figure out what fstype to use with focus on mkfs.<fstype> speed fstype = None for fstype in ['vfat', 'xfs', 'ext3']: if self.get_result(connection, 'ls -la /sbin/mkfs.%s 2> /dev/null | wc -l' % fstype, 5) == '1': break if fstype == 'vfat': # so that mkfs.vfat /dev/<device> doesn't complain # because of making fs on whole drive instead of a partition mkfs_opt = '-I' else: mkfs_opt = '' devices = [] for bdev in ephemerals: name = bdev['name'] if (prod in ['RHEL', 'BETA']) and (ver.startswith('5.')): if name.startswith('/dev/xvd'): # no xvd* for RHEL5 continue elif (prod in ['RHEL', 'BETA']) and (ver.startswith('6.') and ver != '6.0'): if name.startswith('/dev/sd'): name = '/dev/xvd' + name[7:] if params['virtualization'] != 'hvm' and ver != '6.5' and len(name) == 9 and ord(name[8]) < ord('w'): # there is a 4-letter shift name = name[:8] + chr(ord(name[8]) + 4) else: # Fedora and newer RHELs if name.startswith('/dev/sd'): name = '/dev/xvd' + name[7:] # test: check device presence self.get_return_value(connection, 'fdisk -l %s | grep \'^Disk\'' % name, 30) if self.get_result(connection, 'grep \'%s \' /proc/mounts | wc -l' % name, 5) == '0': # device is not mounted, doing fs creation devices.append(name) if not devices: return self.log mkfs_commands = ['mkfs.%s %s %s' % (fstype, mkfs_opt, name) for name in devices] assert self.ping_pong(connection, ' & '.join(mkfs_commands) + ' & echo MKFS', '(?s).*\r\nMKFS.*'), "call mkfs_commands failed" assert self.ping_pong(connection, 'wait && echo WAIT', '(?s).*\r\nWAIT.*', 1000), "wait failed?!?!" dest_names = ['/tmp/mnt-%s' % basename(name) for name in devices] self.get_return_value(connection, 'mkdir -p ' + ' '.join(dest_names)) mount_commands = ['mount -t %s %s /tmp/mnt-%s' % (fstype, name, basename(name)) for name in devices] self.get_return_value(connection, ' && '.join(mount_commands)) return self.log
dparalen/dva
dva/test/testcase_32_ephemeral.py
testcase_32_ephemeral.py
py
3,371
python
en
code
0
github-code
36
70943213544
"""Module to index columns of the paper-summarized CSV file.""" import pandas as pd from loguru import logger from omegaconf import OmegaConf from utils import create_embeddings # Load the configuration cfg = OmegaConf.load("conf/config.yaml") FILE_PATH = cfg.data.path INDEXED_FILE_PATH = cfg.data.indexed_path df = pd.read_csv(cfg.data.path, compression="gzip", header=0) logger.info(f"Loaded {len(df)} rows from {cfg.data.path}") logger.info(f"columns: {df.columns}") logger.info("Creating embeddings for experiment time") df = create_embeddings(df, ["experiment time"]) logger.info("Creating embeddings for device") df = create_embeddings(df, ["device"]) print(df.head(5)) logger.info(f"Saving indexed file to {INDEXED_FILE_PATH}") df.to_csv(INDEXED_FILE_PATH, compression="gzip", index=False) logger.success("Done!")
naarkhoo/LiteGrave
src/index_csv_columns.py
index_csv_columns.py
py
831
python
en
code
0
github-code
36
36709037388
import rclpy import rclpy.node from airobot_interfaces.srv import StringCommand from gtts import gTTS import speech_recognition as sr import subprocess class SpeechService(rclpy.node.Node): def __init__(self): super().__init__('speech_service') self.get_logger().info('音声サーバーを起動しました') self.init_rec = sr.Recognizer() self.service = self.create_service( StringCommand, '/speech_service/wake_up', self.command_callback) def command_callback(self, request, response): self.synthesis('I\'m ready.') text = None while text is None: text = self.recognition() self.synthesis(text) response.answer = text return response def recognition(self): text = '' with sr.Microphone() as source: while text == '': audio_data = self.init_rec.record(source, duration=5) self.get_logger().info(f'音声認識を行います') try: text = self.init_rec.recognize_google(audio_data) except sr.UnknownValueError: pass self.get_logger().info(f'認識したテキストは "{text}" です') return text def synthesis(self, text): self.get_logger().info(f'音声合成を実行します') self.get_logger().info(f'発話内容は "{text}"') gTTS(text, lang='en').save('voice.mp3') subprocess.run(['mpg123 voice.mp3'], shell=True) def main(): rclpy.init() speech_service = SpeechService() try: rclpy.spin(speech_service) except KeyboardInterrupt: pass rclpy.shutdown()
AI-Robot-Book/chapter3
speech_service/speech_service/speech_service_mpg123.py
speech_service_mpg123.py
py
1,728
python
en
code
2
github-code
36
31058374968
import networkx as nx import pandas as pd from matplotlib import pyplot as plt from networkx.generators.ego import ego_graph from pyvis.network import Network from sklearn.decomposition import PCA def plot_network_with_edge_weights(G, figsize=(10, 10)): elarge = [(u, v) for (u, v, d) in G.edges(data=True) if (d["weight"] > 0.8)] emedium = [ (u, v) for (u, v, d) in G.edges(data=True) if (0.8 >= d["weight"] >= 0.5) ] esmall = [(u, v) for (u, v, d) in G.edges(data=True) if (d["weight"] < 0.5)] plt.figure(figsize=figsize) pos = nx.spring_layout(G) nx.draw_networkx_nodes(G, pos, node_color="red", node_size=300) nx.draw_networkx_edges(G, pos, edgelist=elarge, width=8, alpha=0.2) nx.draw_networkx_edges(G, pos, edgelist=emedium, width=5, alpha=0.2) nx.draw_networkx_edges(G, pos, edgelist=esmall, width=2, alpha=0.2) nx.draw_networkx_labels( G, pos, font_size=10, font_weight="bold", font_family="sans-serif", font_color="white", ) plt.axis("off") plt.show() def plot_ego_network(G, n, radius, **options): """ plot ego network around a node n depending on radius setting i.e. only include upto n nodes directly or indirectly connected to this node """ hub_ego = ego_graph(G, n, radius=radius) pos = nx.spring_layout(hub_ego) nx.draw(hub_ego, pos, node_color="b", node_size=50, with_labels=False) nx.draw_networkx_nodes(hub_ego, pos, nodelist=[n], **options) plt.show() return hub_ego def plot_centrality_hist(centrality, name): plt.figure(figsize=(15, 8)) plt.hist(centrality.values(), bins=60) plt.xticks(ticks=[0, 0.01, 0.02, 0.04, 0.06, 0.08]) plt.title(f"Histogram - {name} ", fontdict={"size": 35}, loc="center") plt.xlabel(f"{name}", fontdict={"size": 20}) plt.ylabel("Counts", fontdict={"size": 20}) plt.show() def interactive_network_vis( dag, *widgets, options=None, weights=False, notebook=True, directed=True ): nt = Network("800px", "800px", directed=directed, notebook=notebook) nt.from_nx(dag) if weights: for edge in nt.edges: edge["value"] = edge["weight"] if options is not None: nt.set_options(options=options) return nt else: nt.show_buttons(filter=widgets) return nt def plot_community_class_count(communities): count_list = [] class_list = [] for i, c in enumerate(communities): class_list.append(i) count_list.append(len(list(c))) df = pd.DataFrame({"class": class_list, "count": count_list}) df.plot.bar(x="class", y="count") return df def plot_link_features_projection(n_components, link_features, labels_test): pca = PCA(n_components=n_components) X_transformed = pca.fit_transform(link_features) plt.figure(figsize=(16, 12)) col = [] for label in labels_test: if label == 1: col.append("red") else: col.append("blue") plt.scatter( X_transformed[:, 0], X_transformed[:, 1], c=col, alpha=0.5, ) plt.show() def plot_shortest_paths_hist(frequencies): plt.figure(figsize=(15, 8)) plt.bar(x=[i + 1 for i in range(8)], height=frequencies) plt.title( "Percentages of Shortest Path Lengths", fontdict={"size": 35}, loc="center" ) plt.xlabel("Shortest Path Length", fontdict={"size": 22}) plt.ylabel("Percentage", fontdict={"size": 22}) plt.show() def plot_degree_freq_log_log(G, m=0): degree_freq = G.degree_historgam(G) degrees = range(len(degree_freq)) plt.figure(figsize=(10, 6)) plt.loglog(degrees[m:], degree_freq[m:], "go-") plt.title("log log plot for degree freq") plt.xlabel("degree") plt.ylabel("frequency") plt.show()
ryankarlos/networks_algos
vis/visualize.py
visualize.py
py
3,867
python
en
code
1
github-code
36
3530324591
# from threading import Thread import speech_recognition as sr import keyboard as k import spotipy import os import pyttsx3 import random import credentials from spotipy.oauth2 import SpotifyOAuth from spotipy.oauth2 import SpotifyClientCredentials # from refresh import Refresh # from googleText2Speech import synthesize_text os.environ["SPOTIPY_CLIENT_ID"] = credentials.SPOTIPY_CLIENT_ID os.environ["SPOTIPY_CLIENT_SECRET"] = credentials.SPOTIPY_CLIENT_SECRET os.environ["SPOTIPY_REDIRECT_URI"] = credentials.SPOTIPY_REDIRECT_URI os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials.GOOGLE_APPLICATION_CREDENTIALS deviceId = credentials.DEVICE_ID scope = "user-modify-playback-state" auth_manager = SpotifyClientCredentials() sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope)) # TTS engine engine = pyttsx3.init('sapi5') voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) # Mic init r = sr.Recognizer() mic = sr.Microphone(device_index=2) jarvisResponses = ["I'm on it.", "Consider it done.", "Right away, Sir.", "Yes sir."] def speak(text): engine.say(text) engine.runAndWait() def main(): while 1: try: with mic as source: r.adjust_for_ambient_noise(source) audio = r.listen(source) response = (r.recognize_google(audio)) print(response) if any(x in response for x in ["Jarvis", "Yaris", "Garvais", "Taurus"]): speak("Sir?") audio = r.listen(source) response = (r.recognize_google(audio)) # print(response) # Discord Functionality if any(x in response for x in ["mute", "unmute", "mutiny"]): k.press_and_release('F8') speak("It's done.") elif any(x in response for x in ["deafen", "undeafen", "quiet"]): k.press_and_release('F9') speak("It's done.") # Spotify Functionality if any(x in response for x in ["next", "skip"]): speak(jarvisResponses[random.randint(0, 3)]) sp.next_track(deviceId) if any(x in response for x in ["previous", "last", "replay"]): speak(jarvisResponses[random.randint(0, 3)]) sp.previous_track(deviceId) if any(x in response for x in ["pause", "stop"]): try: speak(jarvisResponses[random.randint(0, 3)]) sp.pause_playback(deviceId) except spotipy.exceptions.SpotifyException: pass elif any(x in response for x in ["resume", "continue", "play"]): try: speak(jarvisResponses[random.randint(0, 3)]) sp.start_playback(deviceId) except spotipy.exceptions.SpotifyException: pass if any(x in response for x in ["increase", "lower", "raise", "set", "volume"]) and any( char.isdigit() for char in response): speak(jarvisResponses[random.randint(0, 3)]) volume = [int(s) for s in response.split() if s.isdigit()] sp.volume(volume[0], deviceId) if any(x in response for x in ["fast-forward", "fast", "forward"]) and any( char.isdigit() for char in response): speak(jarvisResponses[random.randint(0, 3)]) time = [int(s) for s in response.split() if s.isdigit()] sp.seek_track(time[0] * 1000, deviceId) # Application Functionality if "open" in response: if "valorant" in response: speak(jarvisResponses[random.randint(0, 3)]) os.startfile(r"C:\Users\Public\Desktop\VALORANT.lnk") if any(x in response for x in ["Apex", "Legends", "legend"]): speak(jarvisResponses[random.randint(0, 3)]) os.startfile(r"C:\Users\Nasir\Desktop\Apex Legends.url") if any(x in response for x in ["aim", "labs", "lab"]): speak(jarvisResponses[random.randint(0, 3)]) os.startfile(r"C:\Users\Nasir\Desktop\Aim Lab.url") if "Spotify" in response: speak(jarvisResponses[random.randint(0, 3)]) os.startfile(r"C:\Users\Nasir\AppData\Roaming\Spotify\Spotify.exe") # PC Functionality if "sleep" in response: speak("Goodbye for now, sir.") os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0") if "quit" in response: speak("Goodbye for now, sir.") break except sr.RequestError: # print("API unavailable") pass except sr.UnknownValueError: # print("Unable to recognize speech or nothing said") pass if __name__ == '__main__': main()
nsrehman/Virtual-Assistant
voiceRecognition.py
voiceRecognition.py
py
5,520
python
en
code
0
github-code
36
29730523882
#coding:utf8 #login import logging logging.basicConfig(level=logging.DEBUG) _logger = logging.getLogger(__name__) #flask frame from flask_restplus import Resource #wechat frame import flask_wechat_utils from flask_wechat_utils.user.utils import auth from flask_wechat_utils.config import api #application config import config as config_application #application model from models import MessageTemplate #application from utils import get_formid_and_delete #------------------------------------------- # blueprint/api/ns #------------------------------------------- ns = api.namespace( config_application.APPLICATION_NAME, description=config_application.APPLICATION_DESCRIPTION ) # api = flask_wechat_utils.create_api() # ns = api.namespace( # config_application.APPLICATION_NAME, # description=config_application.APPLICATION_DESCRIPTION # ) #------------------------------------------- # /parser/marshal #------------------------------------------- parser_messageTemplate_create = api.parser() parser_messageTemplate_create.add_argument('form_id',type=str,required=True) #------------------------------------------- # route #------------------------------------------- @ns.route('/') class MessageTemplateRoute(Resource): @api.doc(parser=parser_messageTemplate_create) @auth def post(self): args = parser_messageTemplate_create.parse_args() message_template = MessageTemplate( openid=self.wechat_user.openid, form_id=args.get('form_id'), ) message_template.save() return { 'code':0, } @auth def get(self): form_id_result = get_formid_and_delete(self.wechat_user.openid) return { 'code':0, 'openid':form_id_result.openid, 'created_ts':str(form_id_result.created_ts), '_id':str(form_id_result.id), }
synctrust/flask-wechat-utils
flask_wechat_utils/message_template/routes.py
routes.py
py
1,768
python
en
code
0
github-code
36
14774852874
import streamlit as st from src.plotgraphs import make_radar_graph from src.sentanalysis import hf_analysis from src.sentanalysis import spacy_sentiment if __name__ == "__main__": st.write("Welcome") user_input = st.text_input("Enter a sentence", key="name") result = st.button("Submit") if result: new_sentiment = hf_analysis(user_input) output = new_sentiment.pop() doc = spacy_sentiment(user_input) st.plotly_chart(make_radar_graph(doc)) st.write(doc) st.write( "Prediction by using SiEBERT - English-Language Sentiment Classification" ) st.write(output)
yugant10-commits/sentiment-analysis
main.py
main.py
py
652
python
en
code
0
github-code
36
44034854335
import sys n = int(input()) card = list(map(int, sys.stdin.readline().split())) m = int(input()) d = list(map(int, sys.stdin.readline().split())) card.sort() def binary_search(left, right, t): if left > right: print(0, end = " ") return 0 else: mid = (left + right) // 2 if card[mid] == t: print(1, end = " ") return 1 else: if card[mid] < t: binary_search(mid + 1, right, t) else: binary_search(left, mid - 1, t) for i in d: binary_search(0, n-1, i)
GluteusStrength/Algorithm
백준/Silver/10815. 숫자 카드/숫자 카드.py
숫자 카드.py
py
605
python
en
code
0
github-code
36
17192006071
from typing import List from app.api.validators import ValidatorsClass from app.core.db import get_async_session from app.core.user import current_superuser from app.crud.charity_project import charity_crud from app.models import Donation from app.schemas.charity_project import CharityCreate, CharityDB, CharityUpdate from app.services.investing import investing_process from fastapi import APIRouter, Depends from sqlalchemy.ext.asyncio import AsyncSession router = APIRouter() @router.get( '/', response_model=List[CharityDB], response_model_exclude_none=True ) async def get_all_charity_projects(session: AsyncSession = Depends(get_async_session)) -> List[CharityDB]: """ Получает список всех благотворительных проектов из базы данных. Args: session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session). Returns: List[CharityDB]: Список объектов благотворительных проектов из базы данных. """ all_charity_projects = await charity_crud.get_all_objects(session) return all_charity_projects @router.post( '/', response_model=CharityDB, response_model_exclude_none=True, dependencies=[Depends(current_superuser)] ) async def create_charity_project( charity_project: CharityCreate, session: AsyncSession = Depends(get_async_session), ) -> CharityDB: """ Создает новый благотворительный проект в базе данных. Args: charity_project (CharityCreate): Объект создаваемого благотворительного проекта. session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session). Returns: CharityDB: Объект созданного благотворительного проекта. """ await ValidatorsClass.check_name_duplicate(charity_project.name, session) new_charity = await charity_crud.create( charity_project, session ) await investing_process(new_charity, Donation, session) return new_charity @router.delete( '/{project_id}', response_model=CharityDB, dependencies=[Depends(current_superuser)] ) async def delete_charity_project( project_id: int, session: AsyncSession = Depends(get_async_session) ) -> CharityDB: """ Удаляет благотворительный проект из базы данных. Args: project_id (int): Идентификатор удаляемого благотворительного проекта. session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session). Returns: CharityDB: Объект удаленного благотворительного проекта. """ delete_charity = await ValidatorsClass.check_charity_project_exists(project_id, session) ValidatorsClass.check_invested_amount_in_project(delete_charity) delete_charity = await charity_crud.delete(delete_charity, session) return delete_charity @router.patch( '/{project_id}', response_model=CharityDB, dependencies=[Depends(current_superuser)] ) async def update_charity_project( project_id: int, obj_in: CharityUpdate, session: AsyncSession = Depends(get_async_session), ) -> CharityDB: """ Обновляет информацию о благотворительном проекте в базе данных. Args: project_id (int): Идентификатор благотворительного проекта для обновления. obj_in (CharityUpdate): Объект с информацией для обновления благотворительного проекта. session (AsyncSession, optional): Сессия базы данных. Defaults to Depends(get_async_session). Returns: CharityDB: Объект обновленного благотворительного проекта. """ charity_project = await ValidatorsClass.check_charity_project_exists( project_id, session ) ValidatorsClass.check_charity_project_closed(charity_project) if obj_in.name is not None: await ValidatorsClass.check_name_duplicate( obj_in.name, session ) if obj_in.full_amount is not None: ValidatorsClass.count_sum_in_invested_amount( charity_project, obj_in.full_amount ) charity_project = await charity_crud.update( charity_project, obj_in, session ) return charity_project
Lexxar91/QRkot_spreadsheets
app/api/endpoints/charity_project.py
charity_project.py
py
4,780
python
ru
code
0
github-code
36
912034710
import cv2 cap = cv2.VideoCapture('vtest.avi') hog = cv2.HOGDescriptor() # 클래스 호출을 통해 객체 생성 # SVM: 머신러닝 기술 이름 hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) while True: ret, frame = cap.read() # 프레임을 읽어서 반환 # ret: true / false, true: 동영상 frame을 정상적으로 읽었을 때, false: 비정상적으로 읽었을 때 if not ret: break detected, _ = hog.detectMultiScale(frame) for (x, y, w, h) in detected: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 200), 3) cv2.imshow('CCTV', frame) if cv2.waitKey(10) == 27: # 10: ascii 코드로 esc키를 의미, esc키를 누르면 waitKey 함수는 27 반환 break cv2.destroyAllWindows()
yousung1020/OpenCV
실습자료/chapter 13/hog.py
hog.py
py
782
python
ko
code
0
github-code
36
28923214371
#10815 .숫자카드 import sys input = sys.stdin.readline from bisect import bisect_left, bisect_right N = int(input().rstrip()) nums = list(map(int,input().rstrip().split())) nums.sort() def find(arr, target): if len(arr) == 1: return 1 if arr[0] == target else 0 lo = 0 hi = len(arr)-1 while lo+1 < hi: mid = (lo+hi)//2 if arr[mid] > target: hi = mid elif arr[mid] < target: lo = mid else: return 1 if lo == hi: return 1 if arr[mid] == target else 0 else: if arr[lo] ==target : return 1 elif arr[lo+1] == target: return 1 else: return 0 M= int(input().rstrip()) targets= list(map(int,input().rstrip().split())) for target in targets: print(find(nums,target), end = " ")
GuSangmo/BOJ_practice
BOJ/10815.py
10815.py
py
833
python
en
code
0
github-code
36
37406492523
import logging logging.basicConfig(filename='test_logs.log', encoding='utf-8', level=logging.INFO) logger = logging.getLogger('selenium') logger.setLevel(logging.INFO) disable_loggers = ['urllib3.connectionpool','faker.factory'] def pytest_configure(): for logger_name in disable_loggers: logger_not = logging.getLogger(logger_name) logger_not.disabled = True
AlejandroPadilla99/mentoringPython
conftest.py
conftest.py
py
383
python
en
code
0
github-code
36
35854946253
""" The flask application package. """ # newest 1.4 version of sqlalchemy not working please install 1.3.24 #pip install SQLAlchemy==1.3.24 async_mode = None if async_mode is None: try: import gevent async_mode = 'gevent' except ImportError: pass if async_mode is None: try: import eventlet async_mode = 'eventlet' except ImportError: pass if async_mode is None: async_mode = 'threading' print('async_mode is ' + async_mode) if __name__ == '__main__': if async_mode == 'eventlet': import eventlet eventlet.monkey_patch() if async_mode == 'gevent': from gevent import monkey monkey.patch_all() from flask import Flask, redirect, url_for from flask_sqlalchemy import SQLAlchemy from flask_socketio import SocketIO from flask_login import LoginManager,current_user import HrnestBoss.app_config from flask_session import Session import sqlalchemy_utils import os import flask_admin as admin from flask_admin import Admin, helpers, expose from flask_admin.contrib.sqla import ModelView #from flask_talisman import Talisman import functools #Set Main Configuration Type #Conf_type='Development' Conf_type='Production' #Configuration Of working enviroment #Developer_SQLALCHEMY_DATABASE_URI ='postgresql://TestAdmin:test@localhost/HrnestBoss_dev' Production_SQLALCHEMY_DATABASE_URI = 'NEWDATABASE' app = Flask(__name__) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SECRET_KEY'] = 'Hrnest!' app.config.from_object(HrnestBoss.app_config) Developer_SQLALCHEMY_DATABASE_URI =app.config['DATABASE_URL'] Session(app) #Talisman(app) #app.logger.level=logging.INFO # Set enviromets from if Conf_type=='Development': app.config.update( TESTING=False, ENV='development', DEBUG=True) app.config['SQLALCHEMY_DATABASE_URI']=Developer_SQLALCHEMY_DATABASE_URI else: app.config.update( TESTING=False, ENV='production', DEBUG=False) app.config['SQLALCHEMY_DATABASE_URI']=Developer_SQLALCHEMY_DATABASE_URI app.config['SQLALCHEMY_ENGINE_OPTIONS']={"connect_args": {"timeout": 100}} app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension') socket_ = SocketIO(app, async_mode=async_mode) db = SQLAlchemy(app) login = LoginManager(app) def hrnestAccess(f): @functools.wraps(f) def wrapped(*args, **kwargs): if not current_user.is_hrnest_access: return {'message': 'Access Denied'} else: return f(*args, **kwargs) return wrapped import HrnestBoss.DbModel.populateTypesOfWork as check # Check table Shift_typesfor presets data check.check_Values() import HrnestBoss.routes.views import HrnestBoss.routes.user_routing import HrnestBoss.routes.timetable_routing import HrnestBoss.routes.request_routing from HrnestBoss.DbModel.models import default_privileges, user, shift_type, work_group, user_request ,users , timetable import uuid class MyAdminIndexView(admin.AdminIndexView): @expose('/') def index(self): if not current_user.is_authenticated: return redirect(url_for('login')) else : if current_user.is_admin: return super(MyAdminIndexView,self).index() else: return redirect(url_for('login')) _admin = Admin(app,'HRnestBOSS Panel',index_view=MyAdminIndexView()) _admin.add_view(ModelView(default_privileges, db.session)) _admin.add_view(ModelView(user, db.session)) _admin.add_view(ModelView(shift_type, db.session)) _admin.add_view(ModelView(work_group, db.session)) _admin.add_view(ModelView(user_request, db.session)) _admin.add_view(ModelView(users, db.session)) _admin.add_view(ModelView(timetable, db.session)) if app.config['ENABLE_ANYMOUS_USERS']: _user = user.query.filter_by(email='no_email@none.com',login='anymous').first() if _user is None: _user = user(login='anymous', email='no_email@none.com', uid=str(uuid.uuid4()),active=True, is_admin=False, hrnest_access=False ) _user.set_password('None') db.session.add(_user) db.session.commit() _user = user.query.filter_by(email='no_validate@none.com',login='adminHB').first() if _user is None: _user = user(login='adminHB', email='no_validate@none.com', uid=str(uuid.uuid4()),active=True, is_admin=True, hrnest_access=True ) _user.set_password('adminHB') db.session.add(_user) db.session.commit()
Radkos1976/Hrnest-FLask-enchacment
HrnestBoss/HrnestBoss/__init__.py
__init__.py
py
4,549
python
en
code
0
github-code
36
11370466883
import requests from time import sleep class Ark(object): """This is a python wrapper for the ARK api""" def __init__(self,api_token): self.api_token = api_token self.header = {'api_token' : self.api_token } def check_token(self,full_object=False): """Checks the number of calls your token has left""" base_url = "https://testapi.ark.com" url = base_url + "/token_request" request = requests.get(url,headers=self.header) while request.status_code == 302: sleep(1) request = requests.get(url,headers=self.header) if request.status_code != 200: return request.status_code if full_object: return request else: return request.json()['left'] def email(self, email, full_object=False): """Fetches a user profile via email""" base_url = "https://testapi.ark.com/email/" url = base_url + email request = requests.get(url,headers=self.header) while request.status_code == 302: sleep(1) request = requests.get(url,headers=self.header) if request.status_code != 200: return request.status_code if full_object: return request else: return request.json() def twitter(self, handle, full_object=False): """Fetches a user profile via twitter handle""" base_url = "https://testapi.ark.com/network/tw:" url = base_url + handle request = requests.get(url,headers=self.header) while request.status_code == 302: sleep(1) request = requests.get(url,headers=self.header) if request.status_code != 200: return request.status_code if full_object: return request else: return request.json() def facebook(self, facebook_url, full_object=False): """Fetches user profile via facebook url""" base_url = "https://testapi.ark.com/network/fb:" url = base_url + facebook_url request = requests.get(url,headers=self.header) while request.status_code == 302: sleep(1) request = requests.get(url,headers=self.header) if request.status_code != 200: return request.status_code if full_object: return request else: return request.json()
gregimba/Ark
ark.py
ark.py
py
2,070
python
en
code
2
github-code
36
36219148196
import numpy as np from numpy import array from mSimplexFaseII import solve from scipy.optimize import linprog import pprint from math import log, exp from numpy.random import rand, normal from numpy import round, int, abs, array, transpose def main(): #Primer test A = array([[1,0], [0, 2], [3, 2]]) b = [4, 12, 18] c = array([-3, -5]) print('\n - - - - - - - - - - - \n') print('TEST 1:\n') print('Our solution:') r = solve(A,b,c) print("\n".join("{}:\t{}".format(k, v) for k, v in r.items())) print('\nPython solution:') print(linprog(c, A_ub=A, b_ub=b)) print('\n - - - - - - - - - - - \n') #Segundo test A = array([[-1, 1], [1, 0]]) b = [0, 2] c = array([0, -1]) print('TEST 2:\n') r = solve(A,b,c) print('Our solution:') print("\n".join("{}:\t{}".format(k, v) for k, v in r.items())) print('\nPython solution:') print(linprog(c, A_ub=A, b_ub=b)) #Random tests num_random_tests = 5 eps = 1e-6 k = 1 for i in range(5): print('\n - - - - - - - - - - - \n') print('RANDOM TEST ', k,': ') k += 1 m = int(round(10*exp(log(20)*rand()))) n = int(round(10*exp(log(20)*rand()))) sigma = 100 A = round(sigma*normal(0,1,(n,n))) b = round(sigma*abs(normal(0,1,(n,1)))) b = b[:,0] c = round(sigma*normal(0,1,(n,1))) c = c[:,0] our_ans = solve(A,b,c) python_ans = linprog(c, A_ub=A, b_ub=b) if our_ans['x0'] is None: if 'The problem appears to be unbounded' in python_ans['message'] and our_ans['ban'] == 1: print('Successfull test!') else: print('Something went wrong') continue if abs(python_ans['fun'] - our_ans['z0']) > eps: print('Something went wrong') continue print('Successfull test!') if __name__ == '__main__': main()
SergioArnaud/Linear-programming
Practica1/testFaseII.py
testFaseII.py
py
2,007
python
en
code
0
github-code
36
4861207569
from dataclasses import dataclass from datetime import datetime,date import pytz import dateparser from typing import Union import pandas as pd from sqlalchemy import Column,Integer,DateTime,Text,TIMESTAMP,MetaData,Table from sqlalchemy.engine import create_engine from sqlalchemy.exc import OperationalError from businessindia.helpers.exceptions import InvalidDateFormatException import os import logging logger=logging.getLogger(__name__) class DateHandler: @staticmethod def parse_date(datevalue:Union[datetime,date,str],return_string:bool=False,return_time:bool=False,use_DMY_order:bool=True): parser_settings={ 'DATE_ORDER': 'DMY', 'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True } if datevalue is None: parsed_datetime=datetime.utcnow() if return_string: if return_time: return parsed_datetime.strftime('&d-%m-%Y-%H:%M') return parsed_datetime.date().strftime('%d-%m-%Y') else: if return_time: return parsed_datetime return parsed_datetime.date() if isinstance(datevalue,str): try: if not use_DMY_order: parser_settings.pop('DATE_ORDER') parsed_datetime=dateparser.parse(datevalue,settings=parser_settings) if return_string: if return_time: return parsed_datetime.strftime('%d-%m-%Y-%H:%M') return parsed_datetime.date().strftime('%d-%m-%Y') else: if return_time: return parsed_datetime return parsed_datetime.date() except AttributeError: raise InvalidDateFormatException(f'Pass valid date in dd-mm-yyyy format only. Got:{datevalue}') if isinstance(datevalue,datetime) or isinstance(datevalue,date): if isinstance(datevalue,date): datevalue=datetime.combine(datevalue,datetime.min.time()) localizeddt=pytz.utc.localize(datevalue) if return_string: if return_time: return localizeddt.strftime('%d-%m-%Y-%H:%M') return localizeddt.date().strftime('%d-%m-%Y') else: if return_time: return localizeddt return localizeddt.date() @staticmethod def parse_db_date(datevalue:str): try: date=datetime.strptime(datevalue,'%Y-%m-%d %H:%M:%S.%f').date() return date except AttributeError: raise InvalidDateFormatException('Unable to parse the database datetime format try changing it.') class ChecksumHandler: def __init__(self,conn_string:str=None) -> None: self.conn_string=os.environ.get('CHECKSUM_DB_CONN_STRING') if self.conn_string is None: self.conn_string=conn_string if conn_string else 'sqlite:///./checksum.db' logger.info('Connected to Checksum Database') self.engine=create_engine(self.conn_string) def fetch_latest_date(self,org_url:str,datecolname:str='published_date',tablename:str='checksum_business'): self.create_non_exist_table(tablename) try: unique_identifier=org_url.strip() query=f"SELECT MAX({datecolname}) FROM {tablename} WHERE org_url='{unique_identifier}'" with self.engine.connect() as conn: max_date=None for res in conn.execute(query): max_date=res[0] return max_date except Exception as e: logger.info(f'Unable to fetch latest date returning None Exception:{e}') return None def get_unique_csums(self,data:pd.DataFrame,tablename:str='checksum_business'): #Generate csums for every provided data as hash of str and str and remove those that match in db and keep those that does not match res=pd.read_sql(f'SELECT * FROM {tablename}',self.engine) df = pd.merge(data,res,how='left',on=['news_url'],suffixes=('','_db'),indicator=True) df=df[[c for c in df.columns if not c.endswith('_db')]] df=df.loc[df._merge=='left_only',:] df=df.drop(['_merge'],axis=1) df=df.drop_duplicates().reset_index(drop=True) final=df final.columns=final.columns.str.strip() return final def create_non_exist_table(self,tablename:str): meta=MetaData() checksumtb=Table( tablename, meta, Column('id',Integer,primary_key=True,autoincrement=True), Column('org_url',Text,index=True), Column('news_url',Text,index=True), Column('published_date',DateTime,index=True), Column('created_date',DateTime,server_default='now()') ) meta.create_all(self.engine,checkfirst=True) def push_to_business_table(self,df:pd.DataFrame,tablename:str='checksum_business'): df=df.rename(columns={'org_url':'org_url','news_url':'news_url','published_date':'published_date'}) df['published_date']=pd.to_datetime(df['published_date']) df['created_date']=datetime.utcnow() ############# try: final_df=self.get_unique_csums(df) except OperationalError: final_df=df #print(final_df.shape) df=final_df ################## df.to_sql(tablename,self.engine,chunksize=1000,if_exists='append',index=False) logger.info(f'Pushed to checksumdb df of shape {df.shape}') class ProdDBPushHandler: def __init__(self,conn_string:str=None) -> None: self.conn_string=os.environ.get('PROD_DB_CONN_STRING') if not self.conn_string: self.conn_string=conn_string if conn_string else 'sqlite:///./prod.db' logger.info('Connected to Production Database')
nitesh1489/test
helpers/handlers.py
handlers.py
py
6,204
python
en
code
1
github-code
36
42776999573
"""canaryAPI URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.urls import path from django.conf.urls import url, include from rest_framework.permissions import IsAuthenticated from rest_framework.documentation import include_docs_urls from manage_api.admin import admin_site from manage_api.views import AddExternalAPISetting from manage_api.views import TriggerItem, DownloadItem from manage_api.views import SysmonAlertItems, UserItem, UserItems from canary_log_api.views import ViewLog from canary_files.views import GenerateCanaryItem, DownloadCanaryItem from alert_api.views import CanaryAlertItems, SysmonIncoming, FileItem managepatterns = [ path('', UserItem.as_view(), name='user_item'), path('users/', UserItems.as_view(), name='user_items'), path('sysmon/', SysmonAlertItems.as_view(), name='sysmon_alert_items'), path('sysmon/<int:id>', SysmonAlertItems.as_view(), name='sysmon_alert_item'), path('trigger/', TriggerItem.as_view(), name='trigger_item'), path('trigger/<int:id>', TriggerItem.as_view(), name='trigger_item'), path('api_settings/', AddExternalAPISetting.as_view(), name='external-setting'), path(r'download/<md5>', DownloadItem.as_view(), name='download-sample'), ] apipatterns = [ path('alert/', SysmonIncoming.as_view(), name='incoming-mimialert'), path('alert/log/', CanaryAlertItems.as_view()), path('alert/log/<int:id>', CanaryAlertItems.as_view(), name='triggered-alerts'), path('alert/upload/<str:filename>/', FileItem.as_view(), name='incoming-sample'), path('manage/', include(managepatterns)), path('canary/', GenerateCanaryItem.as_view(), name='canary'), path('log/', ViewLog.as_view(), name='logs'), path('canary/download/<identifier>', DownloadCanaryItem.as_view(), name='download-canary'), ] urlpatterns = [ path('admin/', admin_site.urls), path('api/', include(apipatterns)), path('api-docs/', include_docs_urls(title='Canary API', public=False, permission_classes=[IsAuthenticated])), url(r'^api-auth/', include('rest_framework.urls')), ]
toucan-project/TOUCAN
toucan/canary_api/urls.py
urls.py
py
2,670
python
en
code
3
github-code
36
33277423381
# -*- coding: utf-8 -*- """ Created on Fri Jul 28 14:15:31 2017 @author: Simon """ import sleeploader import imp imp.reload(sleeploader) if __name__=='__main__': sleep = sleeploader.SleepDataset('D:/sleep/isruc') channels = {'EEG':['C4-A1','C4-M1'], 'EMG':'X1','EOG':['LOC-A2','E1-M2']} references = {'RefEEG':False, 'RefEMG':False,'RefEOG':False} sleep.load(channels=channels) sleep.save_object()
skjerns/AutoSleepScorerDev
tmp.py
tmp.py
py
424
python
en
code
8
github-code
36
12639565741
from mainapp.model.Event import Event from datetime import datetime from django.core.cache import cache from mainapp.Common import CacheUtil from django.conf import settings from django.utils import timezone KEY_CACHE_DAO_GET_ALL_EVENT_ACTIVE = 'context-dao-all-event-active' KEY_CACHE_DAO_GET_ALL_EVENT_NOT_ACTIVE = 'context-dao-all-event-not-active' def get_all_event_not_active(): """ Get all event active """ list_event = Event.objects.filter(active=False) return list_event def get_all_event_active(): """ Get all event active """ list_event = Event.objects.filter(active=True) return list_event def get_all_event_active_running(): """ Get all event running """ # Get current date now = datetime.now(tz=timezone.utc) list_event = Event.objects.filter(active=True, event_start__lte=now, event_end__gte=now) return list_event def get_all_event_active_is_comming(): """ Get all event is comming """ # Get current date now = datetime.now(tz=timezone.utc) list_event = Event.objects.filter(active=True, event_start__gte=now, event_end__gte=now) return list_event def get_all_event_active_is_passed(): """ Get all event is passed """ # Get current date now = datetime.now(tz=timezone.utc) list_event = Event.objects.filter(active=True, event_start__lte=now, event_end__lte=now) return list_event def get_event_detail_by_id(event_id): """ Get event detail by id """ event = Event.objects.get(pk=event_id) return event def insert_event(event): """ Insert event """ e = Event(event_name=event.event_name, event_note=event.event_note, event_slogun=event.event_slogun, event_description=event.event_description, event_image_banner_name=event.event_image_banner_name, event_image_banner_path=event.event_image_banner_path, active=event.active, event_start=event.event_start, event_end=event.event_end, created_at=datetime.now().strftime("%Y-%m-%d %H:%M:%S")) e.save() return e def update_event(event): """ Update event """ e = Event.objects.get(pk=event.event_id) e.event_name = event.event_name e.event_note = event.event_note e.event_slogun = event.event_slogun e.event_description = event.event_description e.event_image_banner_name = event.event_image_banner_name e.event_image_banner_path = event.event_image_banner_path e.active = event.active e.event_start = event.event_start e.event_end = event.event_end e.save() return e def delete_event(event_id): """ Delete event by id """ e = Event.objects.get(pk=event_id) e.delete()
trunganhvu/personalweb
mainapp/dao/Event/EventDao.py
EventDao.py
py
2,795
python
en
code
0
github-code
36
25770685168
import numpy as np import seaborn from PIL import Image import matplotlib.pyplot as plt import tensorflow as tf from keras import layers, models from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler, Normalizer from sklearn import svm from sklearn.metrics import f1_score, accuracy_score import seaborn as sns def get_images_and_labels(file, name_folder, x): #this is the function to read data from folders training_images = [] training_labels = [] f = open(file, "r") lines = f.readlines() for line in lines[1:]: #from line 1 because the first line is "id,label" #if we read data from a file that contains images and labels, we use rstrip (to cut \n) and split the line in 2 components [name_of_image, image_label] #else if the file does not contain labels (we use the x variable to tell us if it does or not) and just read the name of images. line = line.rstrip("\n").split(",") if x == 1 else line.rstrip("\n") #if the file contains labels we open the image (name_folder + line[0] is the name of the image) using PIL library and transform that image into a np.array image = np.array(Image.open(f"./{name_folder}/{line[0]}")) if x == 1 else np.array(Image.open(f"./{name_folder}/{line}")) #array of pixels #append the image training_images.append(image) #if the file contains labels, we append the label as an int values if x == 1: training_labels.append(int(line[1])) f.close() if x == 0: #if the file does not contain labels, we return just the images return training_images #otherwise we return both, images and labels return training_images, training_labels #MODEL 1 (training_images, training_labels) = get_images_and_labels("train.txt", "train+validation", 1) #1 -> cu label, 0 -> fara (validation_images, validation_labels) = get_images_and_labels("validation.txt", "train+validation", 1) test_images = get_images_and_labels("test.txt", "test", 0) training_images = np.array(training_images) #it was a simple list of np.arrays, now we transform it into an np.array of np.arrays (it's easier to work with them) training_labels = np.array(training_labels) validation_images = np.array(validation_images) validation_labels = np.array(validation_labels) test_images = np.array(test_images) class_names = [0, 1, 2, 3, 4, 5, 6] training_labels_one_hot = tf.keras.utils.to_categorical(training_labels) #for a better and faster operation we transform the array of labels into a matrix with length of the vector as number of line and #len(class_names) as the number of columns #example class 5 is transformed into -> [0. 0. 0. 0. 0. 1. 0.] validation_labels_one_hot = tf.keras.utils.to_categorical(validation_labels) training_images, validation_images, test_images = training_images / 255.0, validation_images / 255.0, test_images / 255.0 #for a better and faster operation # we divide the value of the pixel to the max value that a pixel can get # model = models.Sequential() # model.add(layers.Conv2D(32, 2, padding="same",activation="relu", input_shape=(16, 16, 3))) # model.add(layers.MaxPooling2D()) # # model.add(layers.Conv2D(32, 2,padding="same", activation="relu")) # model.add(layers.MaxPooling2D()) # # model.add(layers.Conv2D(64, 2,padding="same", activation="relu")) # model.add(layers.MaxPooling2D()) # model.add(layers.Dropout(0.6)) # # model.add(layers.Flatten()) # model.add(layers.Dense(128, activation="relu")) # model.add(layers.Dense(10, activation="softmax")) # # model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) # # hist = model.fit(training_images, training_labels,epochs=10, validation_data=(validation_images, validation_labels)) # # loss, accuracy = model.evaluate(validation_images, validation_labels) # # print(f"Loss:{loss}") # print(f"Accuracy:{accuracy}") #PART2 model = models.Sequential() model.add(layers.Conv2D(32, 2, activation="relu", input_shape=(16, 16, 3))) #here i played with the values to get a better accuracy and this is the best i found model.add(layers.MaxPooling2D()) model.add(layers.Conv2D(32, 2, activation="relu")) model.add(layers.MaxPooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(256, activation="relu")) model.add(layers.Dropout(0.25)) model.add(layers.Dense(128, activation="relu")) model.add(layers.Dropout(0.25)) model.add(layers.Dense(64, activation="relu")) model.add(layers.Dense(7, activation="softmax")) model.compile(loss= "categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) #Compile defines the loss function, the optimizer and the metrics hist = model.fit(training_images, training_labels_one_hot, epochs=15, batch_size=32,validation_data=(validation_images, validation_labels_one_hot)) #fit the tranin_data, train_labels #and validate with validation_images and validation labels #batch_size is to group images and to approximates the loss function and propagates the gradients back to update the weights plt.plot(hist.history['accuracy'], label='accuracy') #plotting accuracy plt.plot(hist.history['val_accuracy'], label = 'val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5, 1]) plt.legend(loc='lower right') plt.show() plt.plot(hist.history['loss'], label='loss') #plotting loss plt.plot(hist.history['val_loss'], label = 'val_loss') plt.xlabel('Epoch') plt.ylabel('loss') plt.ylim([1, 2]) plt.legend(loc='lower right') plt.show() loss, accuracy = model.evaluate(validation_images, validation_labels_one_hot) #with this function we get the accuracy and loss print(f"Loss:{loss}") print(f"Accuracy:{accuracy}") # # FINISHED PART2 # model.save("image_classifier1.model") #pentru a nu mai rula proramul de fiecare data, il salvez si apoi import datele # model = models.load_model("image_classifier.model") pred1 = model.predict(test_images) #pred1 contains all predictions for test_images predictions_test = [] for el in pred1: index = np.argmax(el) #using argmax we get the maximum index and we're using that index to get the actual class of the image predictions_test.append(class_names[index]) pred2 = model.predict(validation_images) #pred 2 contains all predictions for validation images predictions_val = [] for el in pred2: index = np.argmax(el) predictions_val.append(class_names[index]) #same here, i did this for the confusion matrix and for the accuracy and loss plot def sample_submission(file_r, file_w): test_data = [] with open(file_r) as r: #simply read all lines from file_r lines = r.readlines() for line in lines[1:]: #for each line (exept line[0] beacause it contains id, label string) we store in test_data the names of each image test_data.append(line.rstrip("\n")) #line.rstrip is tu cut \n with open(file_w, "w") as w: #this is a file to output our submission w.write("id,label\n") #first line written in our submission for i in range(len(test_data)): #for each image, we write the name of the image and the class_name of this image w.write(f"{test_data[i]},{class_names[predictions_test[i]]}\n") sample_submission("test.txt", "date_out.txt") #call submission function cf_matrix = confusion_matrix(validation_labels, predictions_val, labels=class_names) #here we display the confusion matrix f = seaborn.heatmap(cf_matrix, annot=True, fmt="d") plt.show() #MODEL 2 # training_images = np.array(training_images).reshape(len(training_images), -1) # convertion from 4D to 2D the svm model works with only 2D data # training_labels = np.array(training_labels) # validation_images = np.array(validation_images).reshape(len(validation_images), -1) # validation_labels = np.array(validation_labels) # test_images = np.array(test_images).reshape(len(test_images), -1) # # def normalize_data(train_data, test_data, type=None): #function to normalize data using sklearn library # if type == 'standard': # std_scaler = StandardScaler() # std_scaler.fit(train_data) # train_data = std_scaler.transform(train_data) # test_data = std_scaler.transform(test_data) # elif type =='l2': # normalized = Normalizer(norm='l2') # train_data = normalized.transform(train_data) # test_data = normalized.transform(test_data) # elif type =='l1': # normalized = Normalizer(norm='l1') # train_data = normalized.transform(train_data) # test_data = normalized.transform(test_data) # # return train_data, test_data # # training_images, test_images = normalize_data(training_images, test_images) # # svm_model = svm.SVC(C=1,kernel= "linear") #create the actual model # hist = svm_model.fit(training_images, training_labels) # # pred_validation_labels = svm_model.predict(validation_images) #get the predictions, this is to get the accuracy # pred_test_labels = svm_model.predict(test_images) #this is the actual predictions that we need # # def sample_submision(file_r, file_w): #this function works same as the other one # test_data = [] # with open(file_r) as r: # lines = r.readlines() # for line in lines[1:]: # test_data.append(line.rstrip("\n")) # # with open(file_w, "w") as w: # w.write("id,label\n") # for i in range(len(test_data)): # w.write(f"{test_data[i]},{pred_test_labels[i]}\n") # # sample_submision("test.txt", "date_out.txt") # # cf_matrix = confusion_matrix(validation_labels, pred_validation_labels, labels=class_names) #to display the confusion matrix # f = seaborn.heatmap(cf_matrix, annot=True, fmt="d") # plt.show() # # print("Accuracy:", accuracy_score(validation_labels, pred_validation_labels)) #print the accuracy # print("F1:", f1_score(validation_labels, pred_validation_labels, average=None))
AndrewSSB/KaggleCompetition
main.py
main.py
py
10,932
python
en
code
0
github-code
36
9228040497
import torch from torch._C import Value import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.loss import PoissonNLLLoss from .MultiHeadAttention import MultiHeadAttention from .Block import Block class Decoder(nn.Module): """ add another attention between encoder's out and decoder add one more normalize layer """ def __init__(self, d_model:int, q:int, v:int, h:int, dropout:float = 0.3) -> None: super().__init__() self._selfAttention = MultiHeadAttention(d_model, q, v, h) self._encoderDecoderAttention = MultiHeadAttention(d_model, q, v, h) self._feedforward = Block(d_model) self._layerNorm1 = nn.LayerNorm(d_model) self._layerNorm2 = nn.LayerNorm(d_model) self._layerNorm3 = nn.LayerNorm(d_model) self._dropout = nn.Dropout(dropout) def forward(self, x:torch.Tensor, memory:torch.Tensor) -> torch.Tensor: out = self._selfAttention(query=x, key=x, value=x, mask="subsequent") out = self._dropout(out) out = self._layerNorm1(out + x) out1 = self._encoderDecoderAttention(query=x, key=x, value=memory) out1 = self._dropout(out1) out1 = self._layerNorm2(out1 + out) out2 = self._feedforward(out1) out2 = self._dropout(out2) out2 = self._layerNorm3(out2 + out1) return out2
chenzhike110/Transformer
Tranformer/Modules/Decoder.py
Decoder.py
py
1,380
python
en
code
0
github-code
36
34444799833
"""Further improved the emphasis - drawing the user's attention when there is only one available seat left """ # initialize loop so that it runs at least once name = "" count = 0 MAX_TICKETS = 5 while name != "Xxx" and count < MAX_TICKETS: if MAX_TICKETS - count > 1: print(f"\nYou have {MAX_TICKETS - count} seats left.") else: # Warns the user there is only one seat left print(f"\n***** You have ONLY ONE seat left! *****") # get details name = input("Please enter your name: ").title() if name != "Xxx": count += 1 # don't want to include escape code in the count if count < MAX_TICKETS: print(f"\nYou have sold {count} tickets\nThere are still" f" {MAX_TICKETS - count} tickets left") else: print(f"\nYou have sold all the available tickets")
yis1234/Mega_Movie_Fundraiser
02_ticket_loop_v4.py
02_ticket_loop_v4.py
py
822
python
en
code
0
github-code
36
43041308146
from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication,QWidget,QHBoxLayout,QVBoxLayout,QRadioButton,QGroupBox,QPushButton,QLabel,QListWidget,QLineEdit from second_win import * from instr import * class FinalWin(QWidget): def __init__(self,exp): super().__init__() self.exp = exp self.set_appear() self.initUI() self.show() def initUI(self): self.work_text = QLabel(txt_workheart + self.results()) self.index_text = QLabel(txt_index + str(self.index)) self.layout_line = QVBoxLayout() self.layout_line.addWidget(self.index_text, alignment = Qt.AlignCenter) self.layout_line.addWidget(self.work_text, alignment = Qt.AlignCenter) self.setLayout(self.layout_line) def set_appear(self): self.setWindowTitle(txt_finalwin) self.resize(win_width,win_height) self.move(win_x,win_y)
AlexanderKudelya/indexruf
index/final_win.py
final_win.py
py
976
python
en
code
0
github-code
36
69801320106
import csv import sys from collections import defaultdict sys.setrecursionlimit(10**9) array_words = [] with open('sgb-words.txt') as csv_file: csv_reader = csv.reader(csv_file) for row in csv_reader: array_words.append(row[0]) def list_incident(word, array_words): array = [] for w in array_words: cnt = 0 for i in range(len(word)): if i != 0 and word[i] in w: if word[1:].count(word[i]) <= w.count(word[i]): cnt += 1 if cnt == 4: array.append(w) return array class GraphDirected: def __init__(self): self.graph = defaultdict(list) def add_edge(self,word, incident): self.graph[word].append(incident) def DFS(self, start, discovered): for v in self.graph[start]: if v not in discovered: discovered[v] = [start, v] self.DFS(v, discovered) def fillOrder(self, start, discovered, stack): for v in self.graph[start]: if v not in discovered: discovered[v] = [start, v] self.fillOrder(v, discovered, stack) stack.append(start) def getTranpose(self): g = GraphDirected() for w in self.graph.keys(): for u in self.graph[w]: g.add_edge(u, w) return g def countandfindSCCs(self, word = None): stack = [] discovered = {} for w in self.graph.keys(): if w not in discovered: discovered[w] = None self.fillOrder(w, discovered,stack) graph = self.getTranpose() discovered = {} count = 0 while len(stack) > 0: i = stack.pop() if i not in discovered: discovered[i] = None graph.DFS(i, discovered) count += 1 if word is not None: array = [] if word in discovered: root_word = word walk_edge = discovered[word] while walk_edge is not None: walk = walk_edge[0] root_word = walk walk_edge = discovered[walk] small_discovered = {} small_discovered[root_word] = None graph.DFS(root_word, small_discovered) for w in small_discovered.keys(): array.append(w) return array return count g = GraphDirected() for word in array_words: array_incident = list_incident(word, array_words) for w in array_incident: g.add_edge(word, w) print(g.countandfindSCCs("words")) # LIst các từ trong cùng liên thông mạnh với input là từ # print(g.countandfindSCCs()) # số liên thông mạnh trong đồ thị g
Chidt12/discreteMath
Bai3_Searching_on_graph/bai3b_searching_on_graph.py
bai3b_searching_on_graph.py
py
2,846
python
en
code
0
github-code
36
21365527624
import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from dptb.nnet.mlp import MLP from dptb.utils.tools import _get_activation_fn from typing import Optional, Any, Union, Callable class ResBlock(nn.Module): def __init__(self, n_in, n_hidden, n_out, activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, if_batch_normalized=False, device='cpu', dtype=torch.float32): super(ResBlock, self).__init__() self.layer = MLP(n_in, n_hidden, n_out, if_batch_normalized=if_batch_normalized, device=device, dtype=dtype, activation=activation) self.n_out = n_out self.n_in = n_in if isinstance(activation, str): self.activation = _get_activation_fn(activation) else: self.activation = activation def __setstate__(self, state): pass # super(ResBlock, self).__setstate__(state) def forward(self, x): out = self.layer(x) if self.n_in < self.n_out: out = nn.functional.interpolate(x.unsqueeze(1), size=[self.n_out]).squeeze(1) + out elif self.n_in == self.n_out: out = x + out else: out = nn.functional.adaptive_avg_pool1d(input=x, output_size=self.n_out) + out out = self.activation(out) return out class ResNet(nn.Module): def __init__(self, config, activation, if_batch_normalized=False, device='cpu', dtype=torch.float32): super(ResNet, self).__init__() self.layers = nn.ModuleList([]) for kk in range(len(config)-1): self.layers.append(ResBlock(**config[kk], if_batch_normalized=if_batch_normalized, activation=activation, device=device, dtype=dtype)) if isinstance(activation, str): self.activation = _get_activation_fn(activation) else: self.activation = activation if config[-1].get('n_hidden') is None: self.out_layer = nn.Linear(in_features=config[-1]['n_in'], out_features=config[-1]['n_out'], device=device, dtype=dtype) # nn.init.normal_(self.out_layer.weight, mean=0, std=1e-3) # nn.init.normal_(self.out_layer.bias, mean=0, std=1e-3) else: self.out_layer = MLP(**config[-1], if_batch_normalized=False, activation=activation, device=device, dtype=dtype) def forward(self, x): for layer in self.layers: x = layer(x) x = self.activation(x) return self.out_layer(x) if __name__ == '__main__': config = [ {'n_in': 3, 'n_hidden': 4, 'n_out': 8}, {'n_in': 8, 'n_hidden': 6, 'n_out': 4} ] net = ResNet(config, activation='relu', if_batch_normalized=True) a = torch.randn(100, 3) print(net(a).size())
deepmodeling/DeePTB
dptb/nnet/resnet.py
resnet.py
py
2,761
python
en
code
21
github-code
36
31197803049
""" Cryptocurrency network definitions """ class Network: """ Represents a cryptocurrency network (e.g. Bitcoin Mainnet) """ def __init__(self, description, version_priv, version_pub, pub_key_hash, wif): self.description = description self.version_priv = version_priv self.version_pub = version_pub self.pub_key_hash = pub_key_hash self.wif = wif @classmethod def get_supported_networks(cls): """ Returns the list of supported networks :return: list of supported networks """ return cls.__NETWORK_LIST @classmethod def set_supported_networks(cls, network_list): """ Sets up the list of supported networks :param network_list: New list of supported networks """ cls.__NETWORK_LIST = network_list def __eq__(self, other): return self.description == other.description and \ self.version_priv == other.version_priv and \ self.version_pub == other.version_pub and \ self.pub_key_hash == other.pub_key_hash and \ self.wif == other.wif def __str__(self): return self.description BITCOIN_MAINNET = Network(description="Bitcoin Mainnet", version_priv=0x0488ADE4, version_pub=0x0488B21E, pub_key_hash=b"\x00", wif=b"\x80") BITCOIN_TESTNET = Network(description="Bitcoin Testnet", version_priv=0x04358394, version_pub=0x043587CF, pub_key_hash=b"\x6F", wif=b"\xEF") # supported networks Network.set_supported_networks([BITCOIN_MAINNET, BITCOIN_TESTNET])
henriquetft/pyhdwallet
pyhdwallet/networks.py
networks.py
py
1,814
python
en
code
5
github-code
36
13536557102
''' Prova Pratica di Laboratorio di Sistemi Operativi 19 luglio 2010 Esercizio 3 URL: http://www.cs.unibo.it/~renzo/so/pratiche/2010.09.13.pdf @author: Tommaso Ognibene ''' import os, sys, hashlib, difflib def main(argv): # Check number of parameters if len(argv) != 3: sys.exit("The function needs two parameters to be passed in.") # Check parameters if not (os.path.isdir(argv[1])): sys.exit("The first parameter should be an existing directory.") if not (os.path.isdir(argv[2])): sys.exit("The second parameter should be an existing directory.") # Build a dictionary with key-value pair { relative file path - MD5 hash } fileHash = {} for index in range(1, 3): compareDirectories(fileHash, argv[index]) print("Done!") ''' @summary: Populate a dictionary with key-value pair { relative path - MD5 hash }. It may be used to compare two directories having the same files in order to show the differences (e.g. which one is more updated). @param fileHash: dictionary with key-value pair { relative path - MD5 hash } @param topDir: input directory ''' def compareDirectories(fileHash, topDir): for dirPath, _, fileNames in os.walk(topDir): for fileName in fileNames: filePath = os.path.join(dirPath, fileName) hash = GetMd5Hash(filePath) relativePath = os.path.join(os.path.relpath(dirPath, topDir), fileName) if not fileHash.get(relativePath, ""): fileHash[relativePath] = hash elif fileHash[relativePath] != hash: printDifferences(relativePath, argv[1], argv[2]) ''' @summary: Get the MD5 hash without loading the whole file to memory. Break the file in chunks whose size is a multiple of 128. This takes advantage of the fact that MD5 has 128-byte digest blocks. @param filePath: physical address of a file @param chunkSize: chunk size in Bytes @return: MD5 digest ''' def GetMd5Hash(filePath, chunkSize = 2 ** 20): digest = hashlib.md5() with open(filePath, 'rb') as file: chunk = file.read(chunkSize) while chunk: digest.update(chunk) chunk = file.read(chunkSize) return digest.hexdigest() ''' @summary: Print the differences between two files having same relative path. @param filePath: physical address of a file @param chunkSize: chunk size in Bytes @return: MD5 digest ''' def printDifferences(relativePath, dirA, dirB): # Compute the absolute paths filePathA = os.path.join(dirA, relativePath) filePathB = os.path.join(dirB, relativePath) # Compute the lines of text with open(filePathA) as fileA, open(filePathB) as fileB: linesA = fileA.readlines() linesB = fileB.readlines() # Compute the differences sequenceMatcher = difflib.Differ() delta = sequenceMatcher.compare(linesA, linesB) # Print the differences print("\n".join(delta)) if __name__ == "__main__": argv = ['showDifferences', 'd1', 'd2'] sys.exit(main(argv))
tomOgn/OS-Python
OS-Python/2004-01-27/showDifferences.py
showDifferences.py
py
3,105
python
en
code
0
github-code
36
73080924903
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Aug 31 04:34:53 2018 @author: can """ import pandas as pd import numpy as np import SimilartyLib as sim # #from sklearn.decomposition import PCA #from sklearn import preprocessing import createGraph as cG import graph_tool.all as gt indElement= ["al2","cu2","fe2","Mg2","teb","Ti2"]#,"Kk","Lit","Naa","Nn","Oo","Ti"] coll=["cos","euclidean","manhattan","minkowski","jaccard","graph"] result=pd.DataFrame(index=indElement,columns=coll) measures = sim.Similarity() guess=pd.read_excel("olcumler/al2.xlsx",header=None, names=["Wavelength","Sum"],skiprows=4)#sep=",",index_col=0,usecols=["Wavelength","Sum"]) start=100 guess=guess.query('Sum > '+str(start)) guess=guess.sort_values('Sum', ascending=False) #guess=guess.iloc[0:300,:] g=guess['Wavelength'].values #ssss= source.query('index < 222 | index > 444') for el in indElement: source=pd.read_excel("datalar/"+el+".xlsx",header=None, names=["Wavelength","Sum"],skiprows=4)#,sep=",",index_col=0,usecols=["Wavelength","Sum"]) # source.index.min() # start=max(float(source.index.min()),guess.index.min()) # end=min(float(source.index.max()),guess.index.max()) # source=source.query('index > '+str(start)+' and index < '+str(end)) # guess=guess.query('index > '+str(start)+' and index < '+str(end)) # source= source[start:end] # guess=guess[start:end] source=source.query('Sum > '+str(start)) source=source.sort_values('Sum', ascending=False) # source=source.iloc[0:300,:] s=source['Wavelength'].values if len(s) ==0: continue grapg_g = cG.create(guess) grapg_s = cG.create(source) graph_sim=gt.similarity(grapg_g,grapg_s) result.xs(el)['graph']=graph_sim # print(el, "benzerlik oranı \t%",graph_sim*100) result.xs(el)['cos']=measures.cosine_similarity(s,g) result.xs(el)['jaccard']=measures.jaccard_similarity(s,g) result.xs(el)['euclidean']=measures.euclidean_distance(s,g) result.xs(el)['manhattan']=measures.manhattan_distance(s,g) result.xs(el)['minkowski']=measures.minkowski_distance(s,g,3) print (result)
cantek41/SpectrumSimilarty
peakSim.py
peakSim.py
py
2,223
python
en
code
0
github-code
36
3494741214
#!/usr/bin/env python3 def make_list(lst): a, b = [], [] lst = sorted(lst) a.append(lst.pop(len(lst)//2)) if len(lst)>0: b.append(lst[:len(lst)//2]) b.append(lst[len(lst)//2:]) while len(a) != len(lst): if b[0] != []: a.append(b[0][len(b[0])//2]) a = b.pop(0) b.append(a[:len(a)//2]) b.append(a[len(a)//2+1:]) else: b.pop(0) return a
debuitc4/CA268
week5/student_test.py
student_test.py
py
371
python
zh
code
0
github-code
36
7549303047
#### import #### import sys sys.path.append("../") import App from pprint import pprint as pp import time import json class LogiViewSum: """ 予測結果を表示するためのクラス """ #entry_id #a_type #seido #sql_common_cond def __init__(self): #db接続の取得 app = App.AppClass() self.db = app.db_con() self.start_time = time.time() def main(self): pp ("start") # get_result_data arr_seido = ['0.8', '0.9', '0.95'] arr_entry_atype = self.get_entry_atype_list() pp(arr_entry_atype) for r in arr_entry_atype : for seido in arr_seido : self.entry_id = r['entry_id'] self.a_type = r['a_type'] self.seido = seido # exe self.get_summary(); #end main ######################## ### 対象データの取得 ### ######################## def get_entry_atype_list(self): sql = """SELECT entry_id, a_type FROM analyze_predict_1 GROUP BY entry_id, a_type """ cur = self.db.cursor() #カーソル宣言 cur.execute(sql) #実行 row = cur.fetchall() return row #def ###################### ### 集計結果の保存 ### ###################### def get_summary (self): """ entry_idと分析タイプ毎に各数値を取得する filterとして予測モデルの精度を加える seido でフィルターをかける """ #インスタンス変数の設定 #self.entry_id = '509696' #self.a_type = 'all_logistic' #self.seido = '0.95' ## 共通SQLwhere部 self.sql_common_cond = """ a_type = '{a_type}' AND entry_id = '{entry_id}' AND seido >= {seido} """.format(a_type=self.a_type, entry_id=self.entry_id, seido=self.seido) """ 数値取得 """ data= {} data['entry_id'] = self.entry_id data['a_type'] = self.a_type data['seido'] = self.seido #予測者数 data['target_count'] = self.get_target_count() #開封と予測 data['open_predict_count'] = self.get_open_predict_count() #開封と予測してマッチ #open_predict_match_count data['open_match_count'] = self.get_open_match_count() #開封一致率 data['open_match_rate'] = data['open_match_count'] / data['open_predict_count'] #未開封と予測 #not_open_predict_count data['not_open_predict_count'] = self.get_not_open_predict_count() #未開封と予測してマッチ #not_open_predict_match_count data['not_open_match_count'] = self.get_not_open_match_count() #未開封一致率 data['not_open_match_rate'] = data['not_open_match_count'] / data['not_open_predict_count'] """ save """ self.save(data) pp (data) #end """ 予測対象全体 """ def get_target_count(self) : sql = """ SELECT count(*) as count FROM analyze_predict_1 WHERE {cond} """.format(cond = self.sql_common_cond) res = self.query_fetch_one(sql) return res['count'] #end """ 開封予測 """ def get_open_predict_count(self) : sql = """ SELECT count(*) as count FROM analyze_predict_1 WHERE {cond} AND predict_label = 1 """.format(cond = self.sql_common_cond) res = self.query_fetch_one(sql) return res['count'] #end """ 開封予測 で結果とマッチ""" def get_open_match_count(self) : sql = """ SELECT count(*) as count FROM analyze_predict_1 WHERE {cond} AND predict_label = 1 AND result_match = 1 """.format(cond = self.sql_common_cond) res = self.query_fetch_one(sql) return res['count'] #end """ 未開封予測 """ def get_not_open_predict_count(self) : sql = """ SELECT count(*) as count FROM analyze_predict_1 WHERE {cond} AND predict_label = 0 """.format(cond = self.sql_common_cond) res = self.query_fetch_one(sql) return res['count'] #end """ 未開封予測で結果とマッチ """ def get_not_open_match_count(self) : sql = """ SELECT count(*) as count FROM analyze_predict_1 WHERE {cond} AND predict_label = 0 AND result_match = 1 """.format(cond = self.sql_common_cond) res = self.query_fetch_one(sql) return res['count'] #end """ 結果の保存 """ def save(self, data): sql = """ REPLACE INTO analyze_match_1 VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s ) """ param = [ data['entry_id'], data['a_type'], data['seido'], data['target_count'], data['open_predict_count'], data['open_match_count'], data['open_match_rate'], data['not_open_predict_count'], data['not_open_match_count'], data['not_open_match_rate'], ] cur = self.db.cursor() #カーソル宣言 res = cur.execute(sql, param) #実行 self.db.commit() pp(res) """ util """ def query_fetch_one(self, sql, param=[]): cur = self.db.cursor() #カーソル宣言 cur.execute(sql, param) #実行 row = cur.fetchone() return row #def #class obj = LogiViewSum() obj.main()
Takuya-Nakamura/utility
python/machine_learning_study/logistic/3_logi_view_sum.py
3_logi_view_sum.py
py
6,061
python
en
code
0
github-code
36
72237218663
"""Form definitions.""" from braces.forms import UserKwargModelFormMixin from crispy_forms.helper import FormHelper, Layout from crispy_forms.layout import Fieldset, Submit from django import forms from django.utils.translation import gettext_lazy as _ from .models import Sheet class SheetForm(UserKwargModelFormMixin, forms.ModelForm): """ModelForm for the Sheet model.""" class Meta: # noqa: D101 model = Sheet fields = ['exercises'] def __init__(self, *args, **kwargs): """Add crispy-forms helper and layout to form.""" super(SheetForm, self).__init__(*args, **kwargs) # add Crispy Forms foo self.helper = FormHelper() self.helper.form_id = 'id-SheetForm' self.helper.add_input(Submit('continue', 'Save & continue editing')) self.helper.add_input(Submit('submit', 'Save')) self.helper.layout = Layout( Fieldset( _('sheet form'), 'exercises', ), )
FlowFX/unkenmathe.de
src/um/sheets/forms.py
forms.py
py
1,017
python
en
code
1
github-code
36
17484175139
from Modules.Fint import Fint from Modules.PMS import PMS import unittest class TestPMS(unittest.TestCase): def test_set_fint(self): pms = PMS() fint = Fint() pms.set_fint(fint) self.assertEqual(fint, pms.fint) def test_set_data(self): pms = PMS() fint = Fint() pms.set_fint(fint) self.assertTrue(pms.set_data(current_post="1", answer_post="2")) self.assertFalse(pms.set_data(current_post="1", answer_post="2")) pms.has_data = False self.assertTrue(pms.set_data(current_post="1", answer_post="2")) self.assertFalse(pms.has_data) self.assertTrue(pms.set_data(current_post="3", answer_post="4")) def test_run(self): pms = PMS() fint = Fint() pms.set_fint(fint) pms.set_data("1", "2") pms.run() self.assertFalse(pms.has_data) pms.has_unsent_data = True pms.run() self.assertFalse(pms.has_unsent_data) self.assertFalse(pms.has_data) if __name__ == "__main__": unittest.main()
alexamar0714/TRYBOT
TestModules/TestPMS.py
TestPMS.py
py
1,078
python
en
code
1
github-code
36
12996324236
from Lib.HelpFunction import stop_stopwatch, start_stopwatch from Lib.data.DataCSV import DataCSV from Lib.PreprocessClass import Preprocess """Method manage getting data and preprocessing for both tests.""" def get_and_preprocess_data(arguments, min_items_for_user = 1): preprocess_class = Preprocess(arguments=arguments) preprocess_class.add_data(DataCSV().get_data(conf_dic=arguments)) arguments["logp"].p("Count of rows in ", arguments["infile"], " data: ", preprocess_class.get_count(), print_level=1) # Preprocessing raw data for algorithm, create dictionaries, map users and item to int id, # count weight of items, and count sum of user vectors for cosine similarity. time_p = start_stopwatch() preprocess_base_data, test_users, test_users_id = preprocess_class.preprocessing(min_items_for_user=min_items_for_user) arguments["logp"].p("Count of users in base data: ", len(preprocess_base_data.get_rawdata()), print_level=1) arguments["logp"].p("Count of items in base data: ", preprocess_base_data.count_of_base_data_items(), print_level=1) arguments["logp"].p("Time of preprocess: ", format(stop_stopwatch(time_p), '.2f'), " s", print_level=1) arguments["logp"].p("Count of users in test set: ", len(test_users), print_level=1) arguments["logp"].p("Percentage of matrix density: ", format(preprocess_class.get_count() / (len(preprocess_base_data.get_user_id_dict()) * len(preprocess_base_data.get_item_id_dict())) * 100,'.5f'), " %", print_level=1) return preprocess_base_data, test_users, test_users_id
recombee/lsh-library
src/DataDbgetParse.py
DataDbgetParse.py
py
1,706
python
en
code
0
github-code
36
15480079320
import io from PIL import Image from django.test import TestCase, Client from django.urls import reverse import numpy as np from unittest.mock import patch from mnist_predictor.views import make_prediction class PredictViewTestCase(TestCase): def setUp(self): self.client = Client() # Create a test image for the POST requests self.image = Image.new('L', (28, 28), color=255) self.image_bytes = io.BytesIO() self.image.save(self.image_bytes, format='PNG') self.image_bytes.seek(0) def test_predict_view_with_valid_data(self): with patch('mnist_predictor.views.make_prediction', return_value=3) as mock_make_prediction: response = self.client.post(reverse('predict'), {'image': self.image_bytes}, format='multipart') self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {'prediction': 3}) def test_predict_view_with_invalid_data(self): response = self.client.post(reverse('predict')) self.assertEqual(response.status_code, 400) def test_make_prediction_function(self): # Create a test image to use as input image = np.ones((1, 28, 28, 1)) # Make a prediction using the make_prediction function prediction = make_prediction(image) # Assert that the prediction is of the expected type and value self.assertIsInstance(prediction, int) self.assertGreaterEqual(prediction, 0) self.assertLessEqual(prediction, 9)
MichelWakim/mnist-api
mnist_predictor/tests.py
tests.py
py
1,514
python
en
code
0
github-code
36
2875218070
#!/usr/bin/python3 import requests def number_of_subscribers(subreddit): """ Set a custom User-Agent in headers to prevent API errors""" headers = {'User-Agent': 'MyRedditBot/1.0'} """ Construct the API URL for the given subreddit""" url = f'https://www.reddit.com/r/{subreddit}/about.json' """ Make a GET request to the API""" response = requests.get(url, headers=headers) """ Check if the response is successful""" if response.status_code == 200: try: data = response.json() """ Extract the number of subscribers from the response""" subscribers = data['data']['subscribers'] return subscribers except (KeyError, ValueError): return 0 else: return 0 """ Test cases""" subreddit_name = 'python' subscribers = number_of_subscribers(subreddit_name) print(f"Subscribers in /r/{subreddit_name}: {subscribers}")
Ojobumiche/alx-higher_level_programming
0x16-api_advanced/0-subs.py
0-subs.py
py
933
python
en
code
0
github-code
36
13100959928
from __future__ import print_function # import logging import json import sys import uuid from random import randrange # TODO remove this import requests import logging from cakework import exceptions from urllib3.exceptions import NewConnectionError import os # TODO: need to re-enable TLS for the handlers in the fly.toml file. Try these settings: https://community.fly.io/t/urgent-grpc-server-unreachable-via-grpcurl/2694/12 for alpn # TODO figure out how to configure the settings for fly.toml for grpc! # TODO also need to make sure different runs don't interfere with each other # TODO add a parameter for an entry point into the system (currently, assume that using cakework_app.py) logging.basicConfig(level=logging.INFO) class Client: def __init__(self, project, client_token, local=False): # TODO: infer user id // TODO revert local back to False self.project = project self.client_token = client_token if local: self.frontend_url = "http://localhost:8080" else: self.frontend_url = "https://cakework-frontend.fly.dev" self.local = local def get_run_status(self, run_id): response = None try: # Q: status 200 vs 201??? what's the diff? # TODO strip app from everywhere response = requests.get(f"{self.frontend_url}/client/runs/{run_id}/status", params={"token": self.client_token}) response.raise_for_status() # TODO: handle http error, or request id not found error except requests.exceptions.HTTPError as err: raise exceptions.CakeworkError("Http error while connecting to Cakework frontend") from err except requests.exceptions.Timeout as err: raise exceptions.CakeworkError("Timed out connecting to Cakework frontend") from err except requests.exceptions.RequestException as err: raise exceptions.CakeworkError("Request exception connecting Cakework frontend") from err except (ConnectionRefusedError, ConnectionResetError) as err: raise exceptions.CakeworkError("Failed to connect to Cakework frontend service") from err except Exception as err: # TODO catch and raise specific errors? raise exceptions.CakeworkError("Error happened while getting status") from err if response is not None: if response.status_code == 200: status = response.text return json.loads(status) elif response.status_code == 404: return None else: raise exceptions.CakeworkError("Internal server exception") else: raise exceptions.CakeworkError("Internal server exception") # TODO figure out how to refactor get_result and get_status def get_run_result(self, run_id): response = None try: # Q: status 200 vs 201??? what's the diff? response = requests.get(f"{self.frontend_url}/client/runs/{run_id}/result", params={"token": self.client_token}) response.raise_for_status() # TODO delete this? # TODO: handle http error, or request id not found error except requests.exceptions.HTTPError as errh: raise exceptions.CakeworkError("Http error while connecting to Cakework frontend") except requests.exceptions.Timeout as errt: raise exceptions.CakeworkError("Timed out connecting to Cakework frontend") except requests.exceptions.RequestException as err: raise exceptions.CakeworkError("Request exception connecting Cakework frontend") except (ConnectionRefusedError, ConnectionResetError) as e: raise exceptions.CakeworkError("Failed to connect to Cakework frontend service") except Exception as e: # TODO catch and raise specific errors? raise exceptions.CakeworkError("Something unexpected happened") if response is not None: if response.status_code == 200: result = json.loads(response.json()) return result elif response.status_code == 404: return None else: raise exceptions.CakeworkError("Internal server exception") else: raise exceptions.CakeworkError("Internal server exception") def run(self, task, params, compute ={"cpu":1, "memory": 256}): request = { "parameters": params, "compute": {} } cpu = compute.get("cpu") if cpu is not None: if cpu < 1 or cpu > 8: raise exceptions.CakeworkError("Number of cpus must be between 1 and 8") else: request["compute"]["cpu"] = cpu else: request["compute"]['cpu'] = 1 memory = compute.get("memory") if memory is not None: if memory < 256 or memory > 16384: raise exceptions.CakeworkError("Amount of memory must be between 256 and 16384 mb") else: request["compute"]["memory"] = memory else: request["compute"]['memory'] = 256 request["token"] = self.client_token response = requests.post(f"{self.frontend_url}/client/projects/{self.project}/tasks/{task}/runs", json=request, params={"token": self.client_token}) response_json = response.json() if response is None: raise exceptions.CakeworkError("Did not get a response from the frontend") if response.status_code == 201: run_id = response_json["runId"] return run_id elif response.status_code == 404: raise exceptions.CakeworkError("Task " + task + " for project " + self.project + " not found. Have you tried running `cakework deploy` first?") else: print(response) # TODO delete? raise exceptions.CakeworkError("Internal server exception")
usecakework/async-backend
sdk/python/src/cakework/client.py
client.py
py
6,074
python
en
code
3
github-code
36
44298786313
# -*- coding: utf-8 -*- """ Created on Fri Nov 30 08:29:53 2018 @author: Ahsan """ from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import compile, Aer from QGates import gateArity , gateName class QCircuit: def __init__ (self,qBit,cBit,shot=1): ''' This function is used to construct the base of quantum circuit Currently by default backend used is 'qasm_simulator_py'. NOTE: You can change the backend but would need to adjust the evaluate function as well. This function accepts the following arguments: Quantum Bits: [qBit] dataType: int Classical Bits [cBit] dataType: int shot is by default 1 dataType: int ''' self.qBit=qBit self.cBit=cBit self.shot=shot self.backend=Aer.get_backend('qasm_simulator') # self.backend=Aer.get_backend('statevector_simulator_py') self.qr=QuantumRegister(qBit) self.cr=ClassicalRegister(cBit) self.qCircuit=QuantumCircuit(self.qr,self.cr) def evaluate(self): ''' This function is used to evaluate the circuit When quantum circuit is constructed call this function to evaluate the circuit ''' qobj = compile(self.qCircuit, self.backend,shots=self.shot) job = self.backend.run(qobj) result = job.result() return result def constructCircuit(self,code): ''' This function recieves the list of tuples the first element of tuple represent the gate and the second and onwards are their placement position at the quantum circuit (depends upon the gate's arity) ''' for i in code: val=gateArity.get(i[0]) name=gateName.get(i[0]) if val==1: getattr(self.qCircuit,name)( self.qr[ int(i[1]) ] ) elif val==2: getattr(self.qCircuit,name)(self.qr[int(i[1])],self.qr[int(i[2])]) def measurement(self,m,useHadamard=True): ''' This function takes the list of tuple m having first element as qubit and second element as classical bit. It measures the qubit on the associated classical bit m : List of tuple [(qBit,cBit )] useHadamard: Append hadamard just before ''' if useHadamard: endH=[] for i in range(self.qBit): endH.append(('Hadamard',i)) self.constructCircuit(endH) for i in m: q=i[0] c=i[1] self.qCircuit.measure(self.qr[q],self.cr[c])
usamaahsan93/AutoQP
myQFn.py
myQFn.py
py
2,868
python
en
code
0
github-code
36
26122545244
# -*- coding: utf-8 -*- """ Created on Thu Jan 14 09:43:42 2016 @author: sampepose """ import csv import numpy as np from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt data = [] TestData = [] # Read the training data f = open('data/train.csv') reader = csv.reader(f) next(reader, None) for row in reader: data.append(row) f.close() X = np.array([x[1:] for x in data]) y = np.array([x[0] for x in data]) del data # free up the memory print('loaded training data') # Construct k-nearest neighbor classifier and 'fit' it kNeigh = KNeighborsClassifier(n_neighbors=5, n_jobs=-1) validation_X = X[-12000:] validation_y = y[-12000:] X = X[:-12000] y = y[:-12000] x_plot = [] y_plot = [] maxN = 30 for n in range(1, maxN + 1): rand = np.random.choice(X.shape[0], n * 1000, replace=False) rand_X = X[rand, :] rand_y = y[rand] kNeigh.fit(rand_X, rand_y) # predict the test data predict = kNeigh.predict(validation_X) correct = 0 for r in range(0, validation_y.shape[0]): if predict[r] == validation_y[r]: correct += 1 x_plot.append(n) y_plot.append(100.0 * (correct / validation_y.shape[0])) print('finished n=',n) print(x_plot) print(y_plot) plt.axis([1, maxN + 1, 85, 100]) plt.xlabel('training sample size (thousands)') plt.ylabel('percent accuracy') plt.scatter(x_plot, y_plot, marker='o') plt.show()
sampepose/digit-recognizer
kNearestNeighbor/test_increasing_sample_size.py
test_increasing_sample_size.py
py
1,447
python
en
code
0
github-code
36